From cf7641068d8249e4e7b07744d3a86773c0339542 Mon Sep 17 00:00:00 2001 From: "ngrishakin@mysql.com" <> Date: Mon, 12 Jun 2006 21:09:28 +0200 Subject: [PATCH 01/74] ndb_dd_advance test cases --- mysql-test/r/ndb_dd_advance.result | 1095 +++++++++++++++++++++++++++ mysql-test/r/ndb_dd_advance2.result | 760 +++++++++++++++++++ mysql-test/t/ndb_dd_advance.test | 630 +++++++++++++++ mysql-test/t/ndb_dd_advance2.test | 726 ++++++++++++++++++ 4 files changed, 3211 insertions(+) create mode 100644 mysql-test/r/ndb_dd_advance.result create mode 100644 mysql-test/r/ndb_dd_advance2.result create mode 100755 mysql-test/t/ndb_dd_advance.test create mode 100755 mysql-test/t/ndb_dd_advance2.test diff --git a/mysql-test/r/ndb_dd_advance.result b/mysql-test/r/ndb_dd_advance.result new file mode 100644 index 00000000000..810fc62c942 --- /dev/null +++ b/mysql-test/r/ndb_dd_advance.result @@ -0,0 +1,1095 @@ +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +**** Test Setup Section **** +CREATE LOGFILE GROUP log_group1 +ADD UNDOFILE './log_group1/undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE table_space1 +ADD DATAFILE './table_space1/datafile.dat' +USE LOGFILE GROUP log_group1 +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 +(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +CREATE TABLE test.t2 +(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL) +ENGINE=NDB; + +**** Data load for first test **** +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); +INSERT INTO test.t2 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); + +*** Test 1 Section Begins *** +SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +COUNT(*) +1 +SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +pk2 b2 c2 pk1 b c +4 4 4 4 4 4 +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4); +COUNT(*) +1 +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75); +COUNT(*) +1 +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +b c +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +46 46 +47 47 +48 48 +49 49 +50 50 +51 51 +52 52 +53 53 +54 54 +55 55 +56 56 +57 57 +58 58 +59 59 +60 60 +61 61 +62 62 +63 63 +64 64 +65 65 +66 66 +67 67 +68 68 +69 69 +70 70 +71 71 +72 72 +73 73 +74 74 +75 75 + +*** Setup for test 2 **** +DELETE FROM test.t1; +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45); + +**** Test Section 2 **** +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +b c +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2; +COUNT(*) +45 +SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2; +COUNT(*) +75 +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `pk2` int(11) NOT NULL, + `b2` int(11) NOT NULL, + `c2` int(11) NOT NULL, + PRIMARY KEY (`pk2`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk1` int(11) NOT NULL, + `b` int(11) NOT NULL, + `c` int(11) NOT NULL, + PRIMARY KEY (`pk1`) +) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `pk2` int(11) NOT NULL, + `b2` int(11) NOT NULL, + `c2` int(11) NOT NULL, + PRIMARY KEY (`pk2`) +) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ENGINE=NDBCLUSTER; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk1` int(11) NOT NULL, + `b` int(11) NOT NULL, + `c` int(11) NOT NULL, + PRIMARY KEY (`pk1`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 + +DROP TABLE test.t1; +DROP TABLE test.t2; +*** Setup for Test Section 3 *** +CREATE TABLE test.t1 ( +usr_id INT unsigned NOT NULL, +uniq_id INT unsigned NOT NULL AUTO_INCREMENT, +start_num INT unsigned NOT NULL DEFAULT 1, +increment INT unsigned NOT NULL DEFAULT 1, +PRIMARY KEY (uniq_id), +INDEX usr_uniq_idx (usr_id, uniq_id), +INDEX uniq_usr_idx (uniq_id, usr_id)) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +CREATE TABLE test.t2 ( +id INT unsigned NOT NULL DEFAULT 0, +usr2_id INT unsigned NOT NULL DEFAULT 0, +max INT unsigned NOT NULL DEFAULT 0, +c_amount INT unsigned NOT NULL DEFAULT 0, +d_max INT unsigned NOT NULL DEFAULT 0, +d_num INT unsigned NOT NULL DEFAULT 0, +orig_time INT unsigned NOT NULL DEFAULT 0, +c_time INT unsigned NOT NULL DEFAULT 0, +active ENUM ("no","yes") NOT NULL, +PRIMARY KEY (id,usr2_id), +INDEX id_idx (id), +INDEX usr2_idx (usr2_id)) +ENGINE=NDB; +INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198); + +**** Test Section 3 **** +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; +usr_id uniq_id increment usr2_id c_amount max +3 4 84676 NULL NULL NULL +INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes'); +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; +usr_id uniq_id increment usr2_id c_amount max +3 4 84676 3 6000 3000 + +DROP TABLE test.t1; +DROP TABLE test.t2; +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; +DROP TABLESPACE table_space1 +ENGINE = NDB; +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLESPACE ts2 +ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int) +ENGINE=NDB; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (1,1); +INSERT INTO t1 VALUES (2,2); +SELECT * FROM t1; +a b +1 1 +2 2 +INSERT INTO t2(a,b) SELECT * FROM t1; +SELECT * FROM t2; +a b +1 1 +2 2 +TRUNCATE t1; +TRUNCATE t2; +INSERT INTO t2 VALUES (3,3); +INSERT INTO t2 VALUES (4,4); +INSERT INTO t1(a,b) SELECT * FROM t2; +SELECT * FROM t1; +a b +3 3 +4 4 +DROP TABLE t1, t2; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +ALTER TABLESPACE ts2 +DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts2 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +DROP DATABASE IF EXISTS test; +CREATE LOGFILE GROUP lg +ADD UNDOFILE './undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts +ADD DATAFILE './datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE DATABASE test; +CREATE TABLE test.t ( +a smallint NOT NULL, +b int NOT NULL, +c bigint NOT NULL, +d char(10), +e TEXT, +f VARCHAR(255), +PRIMARY KEY(a) +) TABLESPACE ts STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); +SHOW CREATE TABLE test.t; +Table Create Table +t CREATE TABLE `t` ( + `a` smallint(6) NOT NULL, + `b` int(11) NOT NULL, + `c` bigint(20) NOT NULL, + `d` char(10) DEFAULT NULL, + `e` text, + `f` varchar(255) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `d` (`d`), + KEY `f` (`f`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SELECT * FROM test.t order by a; +a b c d e f +1 2 3 aaa1 bbb1 ccccc1 +2 3 4 aaa2 bbb2 ccccc2 +3 4 5 aaa3 bbb3 ccccc3 +4 5 6 aaa4 bbb4 ccccc4 +5 6 7 aaa5 bbb5 ccccc5 +6 7 8 aaa6 bbb6 ccccc6 +7 8 9 aaa7 bbb7 ccccc7 +8 9 10 aaa8 bbb8 ccccc8 +9 10 11 aaa9 bbb9 ccccc9 +10 11 12 aaa10 bbb10 ccccc10 +11 12 13 aaa11 bbb11 ccccc11 +12 13 14 aaa12 bbb12 ccccc12 +13 14 15 aaa13 bbb13 ccccc13 +14 15 16 aaa14 bbb14 ccccc14 +15 16 17 aaa15 bbb15 ccccc15 +16 17 18 aaa16 bbb16 ccccc16 +17 18 19 aaa17 bbb17 ccccc17 +18 19 20 aaa18 bbb18 ccccc18 +19 20 21 aaa19 bbb19 ccccc19 +20 21 22 aaa20 bbb20 ccccc20 +21 22 23 aaa21 bbb21 ccccc21 +22 23 24 aaa22 bbb22 ccccc22 +23 24 25 aaa23 bbb23 ccccc23 +24 25 26 aaa24 bbb24 ccccc24 +25 26 27 aaa25 bbb25 ccccc25 +26 27 28 aaa26 bbb26 ccccc26 +27 28 29 aaa27 bbb27 ccccc27 +28 29 30 aaa28 bbb28 ccccc28 +29 30 31 aaa29 bbb29 ccccc29 +30 31 32 aaa30 bbb30 ccccc30 +31 32 33 aaa31 bbb31 ccccc31 +32 33 34 aaa32 bbb32 ccccc32 +33 34 35 aaa33 bbb33 ccccc33 +34 35 36 aaa34 bbb34 ccccc34 +35 36 37 aaa35 bbb35 ccccc35 +36 37 38 aaa36 bbb36 ccccc36 +37 38 39 aaa37 bbb37 ccccc37 +38 39 40 aaa38 bbb38 ccccc38 +39 40 41 aaa39 bbb39 ccccc39 +40 41 42 aaa40 bbb40 ccccc40 +41 42 43 aaa41 bbb41 ccccc41 +42 43 44 aaa42 bbb42 ccccc42 +43 44 45 aaa43 bbb43 ccccc43 +44 45 46 aaa44 bbb44 ccccc44 +45 46 47 aaa45 bbb45 ccccc45 +46 47 48 aaa46 bbb46 ccccc46 +47 48 49 aaa47 bbb47 ccccc47 +48 49 50 aaa48 bbb48 ccccc48 +49 50 51 aaa49 bbb49 ccccc49 +50 51 52 aaa50 bbb50 ccccc50 +51 52 53 aaa51 bbb51 ccccc51 +52 53 54 aaa52 bbb52 ccccc52 +53 54 55 aaa53 bbb53 ccccc53 +54 55 56 aaa54 bbb54 ccccc54 +55 56 57 aaa55 bbb55 ccccc55 +56 57 58 aaa56 bbb56 ccccc56 +57 58 59 aaa57 bbb57 ccccc57 +58 59 60 aaa58 bbb58 ccccc58 +59 60 61 aaa59 bbb59 ccccc59 +60 61 62 aaa60 bbb60 ccccc60 +61 62 63 aaa61 bbb61 ccccc61 +62 63 64 aaa62 bbb62 ccccc62 +63 64 65 aaa63 bbb63 ccccc63 +64 65 66 aaa64 bbb64 ccccc64 +65 66 67 aaa65 bbb65 ccccc65 +66 67 68 aaa66 bbb66 ccccc66 +67 68 69 aaa67 bbb67 ccccc67 +68 69 70 aaa68 bbb68 ccccc68 +69 70 71 aaa69 bbb69 ccccc69 +70 71 72 aaa70 bbb70 ccccc70 +71 72 73 aaa71 bbb71 ccccc71 +72 73 74 aaa72 bbb72 ccccc72 +73 74 75 aaa73 bbb73 ccccc73 +74 75 76 aaa74 bbb74 ccccc74 +75 76 77 aaa75 bbb75 ccccc75 +76 77 78 aaa76 bbb76 ccccc76 +77 78 79 aaa77 bbb77 ccccc77 +78 79 80 aaa78 bbb78 ccccc78 +79 80 81 aaa79 bbb79 ccccc79 +80 81 82 aaa80 bbb80 ccccc80 +81 82 83 aaa81 bbb81 ccccc81 +82 83 84 aaa82 bbb82 ccccc82 +83 84 85 aaa83 bbb83 ccccc83 +84 85 86 aaa84 bbb84 ccccc84 +85 86 87 aaa85 bbb85 ccccc85 +86 87 88 aaa86 bbb86 ccccc86 +87 88 89 aaa87 bbb87 ccccc87 +88 89 90 aaa88 bbb88 ccccc88 +89 90 91 aaa89 bbb89 ccccc89 +90 91 92 aaa90 bbb90 ccccc90 +91 92 93 aaa91 bbb91 ccccc91 +92 93 94 aaa92 bbb92 ccccc92 +93 94 95 aaa93 bbb93 ccccc93 +94 95 96 aaa94 bbb94 ccccc94 +95 96 97 aaa95 bbb95 ccccc95 +96 97 98 aaa96 bbb96 ccccc96 +97 98 99 aaa97 bbb97 ccccc97 +98 99 100 aaa98 bbb98 ccccc98 +99 100 101 aaa99 bbb99 ccccc99 +100 101 102 aaa100 bbb100 ccccc100 +DROP TABLE test.t; +USE test; +show tables; +Tables_in_test +t +SELECT * FROM test.t order by a; +a b c d e f +1 2 3 aaa1 bbb1 ccccc1 +2 3 4 aaa2 bbb2 ccccc2 +3 4 5 aaa3 bbb3 ccccc3 +4 5 6 aaa4 bbb4 ccccc4 +5 6 7 aaa5 bbb5 ccccc5 +6 7 8 aaa6 bbb6 ccccc6 +7 8 9 aaa7 bbb7 ccccc7 +8 9 10 aaa8 bbb8 ccccc8 +9 10 11 aaa9 bbb9 ccccc9 +10 11 12 aaa10 bbb10 ccccc10 +11 12 13 aaa11 bbb11 ccccc11 +12 13 14 aaa12 bbb12 ccccc12 +13 14 15 aaa13 bbb13 ccccc13 +14 15 16 aaa14 bbb14 ccccc14 +15 16 17 aaa15 bbb15 ccccc15 +16 17 18 aaa16 bbb16 ccccc16 +17 18 19 aaa17 bbb17 ccccc17 +18 19 20 aaa18 bbb18 ccccc18 +19 20 21 aaa19 bbb19 ccccc19 +20 21 22 aaa20 bbb20 ccccc20 +21 22 23 aaa21 bbb21 ccccc21 +22 23 24 aaa22 bbb22 ccccc22 +23 24 25 aaa23 bbb23 ccccc23 +24 25 26 aaa24 bbb24 ccccc24 +25 26 27 aaa25 bbb25 ccccc25 +26 27 28 aaa26 bbb26 ccccc26 +27 28 29 aaa27 bbb27 ccccc27 +28 29 30 aaa28 bbb28 ccccc28 +29 30 31 aaa29 bbb29 ccccc29 +30 31 32 aaa30 bbb30 ccccc30 +31 32 33 aaa31 bbb31 ccccc31 +32 33 34 aaa32 bbb32 ccccc32 +33 34 35 aaa33 bbb33 ccccc33 +34 35 36 aaa34 bbb34 ccccc34 +35 36 37 aaa35 bbb35 ccccc35 +36 37 38 aaa36 bbb36 ccccc36 +37 38 39 aaa37 bbb37 ccccc37 +38 39 40 aaa38 bbb38 ccccc38 +39 40 41 aaa39 bbb39 ccccc39 +40 41 42 aaa40 bbb40 ccccc40 +41 42 43 aaa41 bbb41 ccccc41 +42 43 44 aaa42 bbb42 ccccc42 +43 44 45 aaa43 bbb43 ccccc43 +44 45 46 aaa44 bbb44 ccccc44 +45 46 47 aaa45 bbb45 ccccc45 +46 47 48 aaa46 bbb46 ccccc46 +47 48 49 aaa47 bbb47 ccccc47 +48 49 50 aaa48 bbb48 ccccc48 +49 50 51 aaa49 bbb49 ccccc49 +50 51 52 aaa50 bbb50 ccccc50 +51 52 53 aaa51 bbb51 ccccc51 +52 53 54 aaa52 bbb52 ccccc52 +53 54 55 aaa53 bbb53 ccccc53 +54 55 56 aaa54 bbb54 ccccc54 +55 56 57 aaa55 bbb55 ccccc55 +56 57 58 aaa56 bbb56 ccccc56 +57 58 59 aaa57 bbb57 ccccc57 +58 59 60 aaa58 bbb58 ccccc58 +59 60 61 aaa59 bbb59 ccccc59 +60 61 62 aaa60 bbb60 ccccc60 +61 62 63 aaa61 bbb61 ccccc61 +62 63 64 aaa62 bbb62 ccccc62 +63 64 65 aaa63 bbb63 ccccc63 +64 65 66 aaa64 bbb64 ccccc64 +65 66 67 aaa65 bbb65 ccccc65 +66 67 68 aaa66 bbb66 ccccc66 +67 68 69 aaa67 bbb67 ccccc67 +68 69 70 aaa68 bbb68 ccccc68 +69 70 71 aaa69 bbb69 ccccc69 +70 71 72 aaa70 bbb70 ccccc70 +71 72 73 aaa71 bbb71 ccccc71 +72 73 74 aaa72 bbb72 ccccc72 +73 74 75 aaa73 bbb73 ccccc73 +74 75 76 aaa74 bbb74 ccccc74 +75 76 77 aaa75 bbb75 ccccc75 +76 77 78 aaa76 bbb76 ccccc76 +77 78 79 aaa77 bbb77 ccccc77 +78 79 80 aaa78 bbb78 ccccc78 +79 80 81 aaa79 bbb79 ccccc79 +80 81 82 aaa80 bbb80 ccccc80 +81 82 83 aaa81 bbb81 ccccc81 +82 83 84 aaa82 bbb82 ccccc82 +83 84 85 aaa83 bbb83 ccccc83 +84 85 86 aaa84 bbb84 ccccc84 +85 86 87 aaa85 bbb85 ccccc85 +86 87 88 aaa86 bbb86 ccccc86 +87 88 89 aaa87 bbb87 ccccc87 +88 89 90 aaa88 bbb88 ccccc88 +89 90 91 aaa89 bbb89 ccccc89 +90 91 92 aaa90 bbb90 ccccc90 +91 92 93 aaa91 bbb91 ccccc91 +92 93 94 aaa92 bbb92 ccccc92 +93 94 95 aaa93 bbb93 ccccc93 +94 95 96 aaa94 bbb94 ccccc94 +95 96 97 aaa95 bbb95 ccccc95 +96 97 98 aaa96 bbb96 ccccc96 +97 98 99 aaa97 bbb97 ccccc97 +98 99 100 aaa98 bbb98 ccccc98 +99 100 101 aaa99 bbb99 ccccc99 +100 101 102 aaa100 bbb100 ccccc100 +DROP TABLE test.t; +DROP DATABASE test; +ALTER TABLESPACE ts +DROP DATAFILE './datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +DROP DATABASE IF EXISTS test; +Warnings: +Note 1008 Can't drop database 'test'; database doesn't exist +DROP table IF EXISTS test.t1; +Warnings: +Note 1051 Unknown table 't1' +DROP table IF EXISTS test.t2; +Warnings: +Note 1051 Unknown table 't2' +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLESPACE ts2 +ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE DATABASE test; +CREATE TABLE test.t1 ( +a1 smallint NOT NULL, +a2 int NOT NULL, +a3 bigint NOT NULL, +a4 char(10), +a5 decimal(5,1), +a6 time, +a7 date, +a8 datetime, +a9 VARCHAR(255), +a10 blob, +PRIMARY KEY(a1) +) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`), + KEY `a2` (`a2`), + KEY `a3` (`a3`), + KEY `a8` (`a8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +CREATE TABLE test.t2 ( +b1 smallint NOT NULL, +b2 int NOT NULL, +b3 bigint NOT NULL, +b4 char(10), +b5 decimal(5,1), +b6 time, +b7 date, +b8 datetime, +b9 VARCHAR(255), +b10 blob, +PRIMARY KEY(b1) +) ENGINE=NDB; +ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `b1` smallint(6) NOT NULL, + `b2` int(11) NOT NULL, + `b3` bigint(20) NOT NULL, + `b4` char(10) DEFAULT NULL, + `b5` decimal(5,1) DEFAULT NULL, + `b6` time DEFAULT NULL, + `b7` date DEFAULT NULL, + `b8` datetime DEFAULT NULL, + `b9` varchar(255) DEFAULT NULL, + `b10` blob, + PRIMARY KEY (`b1`), + KEY `b2` (`b2`), + KEY `b3` (`b3`), + KEY `b8` (`b8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SELECT * FROM test.t1; +a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 +19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +SELECT * FROM test.t2; +b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 +19 20 3000000017 aaa17 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +12 13 3000000010 aaa10 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +5 6 3000000003 aaa3 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +21 22 3000000019 aaa19 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +22 23 3000000020 aaa20 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +14 15 3000000012 aaa12 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +11 12 3000000009 aaa9 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +13 14 3000000011 aaa11 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +4 5 3000000002 aaa2 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +8 9 3000000006 aaa6 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +7 8 3000000005 aaa5 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +9 10 3000000007 aaa7 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +3 4 3000000001 aaa1 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +10 11 3000000008 aaa8 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +20 21 3000000018 aaa18 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +18 19 3000000016 aaa16 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +15 16 3000000013 aaa13 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +6 7 3000000004 aaa4 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +17 18 3000000015 aaa15 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +16 17 3000000014 aaa14 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; +COUNT(a1) a1 COUNT(a1)*a1 +1 1 1 +1 2 2 +1 3 3 +1 4 4 +1 5 5 +1 6 6 +1 7 7 +1 8 8 +1 9 9 +1 10 10 +1 11 11 +1 12 12 +1 13 13 +1 14 14 +1 15 15 +1 16 16 +1 17 17 +1 18 18 +1 19 19 +1 20 20 +SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; +COUNT(a2) (a2+1) COUNT(a2)*(a2+0) +1 3 2 +1 4 3 +1 5 4 +1 6 5 +1 7 6 +1 8 7 +1 9 8 +1 10 9 +1 11 10 +1 12 11 +1 13 12 +1 14 13 +1 15 14 +1 16 15 +1 17 16 +1 18 17 +1 19 18 +1 20 19 +1 21 20 +1 22 21 +DROP TABLE test.t1; +DROP TABLE test.t2; +create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4); +select distinct a from test.t1 group by b,a having a > 2 order by a desc; +a +4 +3 +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; +a c +4 NULL +3 NULL +select distinct a from test.t1 group by b,a having a > 2 order by a asc; +a +3 +4 +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc; +a c +3 NULL +4 NULL +drop table test.t1; +create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); +select * from test.t1 where a >= '1'; +a +1 +2 +3 +1 +1 +2 +3 +select distinct a from test.t1 order by a desc; +a +3 +2 +1 +select distinct a from test.t1 where a >= '1' order by a desc; +a +3 +2 +1 +select distinct a from test.t1 where a >= '1' order by a asc; +a +1 +2 +3 +drop table test.t1; +CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; +INSERT INTO test.t1 (email, infoID, dateentered) VALUES +('test1@testdomain.com', 1, '2002-07-30 22:56:38'), +('test1@testdomain.com', 1, '2002-07-27 22:58:16'), +('test2@testdomain.com', 1, '2002-06-19 15:22:19'), +('test2@testdomain.com', 2, '2002-06-18 14:23:47'), +('test3@testdomain.com', 1, '2002-05-19 22:17:32'); +INSERT INTO test.t2(infoID, shipcode) VALUES +(1, 'Z001'), +(2, 'R002'); +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID; +email shipcode +test1@testdomain.com Z001 +test2@testdomain.com R002 +test2@testdomain.com Z001 +test3@testdomain.com Z001 +SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; +email +test1@testdomain.com +test2@testdomain.com +test3@testdomain.com +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; +email shipcode +test1@testdomain.com Z001 +test2@testdomain.com Z001 +test2@testdomain.com R002 +test3@testdomain.com Z001 +drop table test.t1,test.t2; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +ALTER TABLESPACE ts2 +DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts2 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +DROP TABLE IF EXISTS test.t; +Warnings: +Note 1051 Unknown table 't' +create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB; +insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1); +insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2); +select * from test.t order by f1; +f1 f2 f3 +111111 aaaaaa 1 +222222 bbbbbb 2 +select f1,f2 from test.t order by f2; +f1 f2 +111111 aaaaaa +222222 bbbbbb +select f2 from test.t order by f2; +f2 +aaaaaa +bbbbbb +select f1,f2 from test.t order by f1; +f1 f2 +111111 aaaaaa +222222 bbbbbb +drop table test.t; +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts +ADD DATAFILE './table_space/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` blob, + `a3` text, + PRIMARY KEY (`a1`) +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` blob, + `a3` text, + PRIMARY KEY (`a1`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6), +ADD INDEX (a7), ADD INDEX (a8); +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`), + KEY `a2` (`a2`), + KEY `a3` (`a3`), + KEY `a5` (`a5`), + KEY `a6` (`a6`), + KEY `a7` (`a7`), + KEY `a8` (`a8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 DROP a14; +ALTER TABLE test.t1 DROP a13; +ALTER TABLE test.t1 DROP a12; +ALTER TABLE test.t1 DROP a11; +ALTER TABLE test.t1 DROP a10; +ALTER TABLE test.t1 DROP a9; +ALTER TABLE test.t1 DROP a8; +ALTER TABLE test.t1 DROP a7; +ALTER TABLE test.t1 DROP a6; +ALTER TABLE test.t1 DROP PRIMARY KEY; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +ALTER TABLESPACE ts +DROP DATAFILE './table_space/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_advance2.result new file mode 100644 index 00000000000..d14dc78a366 --- /dev/null +++ b/mysql-test/r/ndb_dd_advance2.result @@ -0,0 +1,760 @@ +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +DROP TABLE IF EXISTS test.t3; +DROP DATABASE IF EXISTS test; +***** +**** Copy data from table in one table space to table in different table space +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLESPACE ts2 +ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE DATABASE test; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts2 STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` varchar(256) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a1` int(11) NOT NULL, + `a2` varchar(256) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa'); +INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb'); +SELECT * FROM test.t1; +a1 a2 a3 +1 111111 aaaaaaaa +2 222222 bbbbbbbb +INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; +SELECT * FROM test.t2; +a1 a2 a3 +1 111111 aaaaaaaa +2 222222 bbbbbbbb +DROP TABLE test.t1, test.t2; +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +select length(@x0),length(@b1),length(@d1) from dual; +length(@x0) length(@b1) length(@d1) +256 2256 3000 +select length(@x0),length(@b2),length(@d2) from dual; +length(@x0) length(@b2) length(@d2) +256 20000 30000 +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) +TABLESPACE ts2 STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` varchar(5000) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a1` int(11) NOT NULL, + `a2` varchar(5000) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +INSERT INTO test.t1 VALUES (1,@vc1,@d1); +INSERT INTO test.t1 VALUES (2,@vc2,@b1); +INSERT INTO test.t1 VALUES (3,@vc3,@d2); +INSERT INTO test.t1 VALUES (4,@vc4,@b2); +SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) +FROM test.t1 WHERE a1=1; +a1 length(a2) substr(a2,180,2) length(a3) substr(a3,1+3*900,3) +1 200 aa 3000 dd1 +SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) +FROM test.t1 where a1=2; +a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3) +2 500 bb 2256 b1b +INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; +SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) +FROM test.t2 WHERE a1=1; +a1 length(a2) substr(a2,180,2) length(a3) substr(a3,1+3*900,3) +1 200 aa 3000 dd1 +SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) +FROM test.t2 where a1=2; +a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3) +2 500 bb 2256 b1b +DROP TABLE test.t1, test.t2; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +ALTER TABLESPACE ts2 +DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts2 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +**** Insert, Update, Delete from NDB table with BLOB fields +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); +set @vc5 = repeat('d', 5000); +set @bb1 = repeat('1', 2000); +set @bb2 = repeat('2', 5000); +set @bb3 = repeat('3', 10000); +set @bb4 = repeat('4', 40000); +set @bb5 = repeat('5', 50000); +select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual; +length(@vc1) length(@vc2) length(@vc3) length(@vc4) length(@vc5) +200 500 1000 4000 5000 +select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual; +length(@bb1) length(@bb2) length(@bb3) length(@bb4) length(@bb5) +2000 5000 10000 40000 50000 +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +INSERT INTO test.t1 VALUES (1,@vc1,@bb1); +INSERT INTO test.t1 VALUES (2,@vc2,@bb2); +INSERT INTO test.t1 VALUES (3,@vc3,@bb3); +INSERT INTO test.t1 VALUES (4,@vc4,@bb4); +INSERT INTO test.t1 VALUES (5,@vc5,@bb5); +UPDATE test.t1 SET a2=@vc5, a3=@bb5 WHERE a1=1; +SELECT a1,length(a2),substr(a2,4998,2),length(a3),substr(a3,49997,3) +FROM test.t1 WHERE a1=1; +a1 length(a2) substr(a2,4998,2) length(a3) substr(a3,49997,3) +1 5000 dd 50000 555 +UPDATE test.t1 SET a2=@vc4, a3=@bb4 WHERE a1=2; +SELECT a1,length(a2),substr(a2,3998,2),length(a3),substr(a3,39997,3) +FROM test.t1 WHERE a1=2; +a1 length(a2) substr(a2,3998,2) length(a3) substr(a3,39997,3) +2 4000 dd 40000 444 +UPDATE test.t1 SET a2=@vc2, a3=@bb2 WHERE a1=3; +SELECT a1,length(a2),substr(a2,498,2),length(a3),substr(a3,3997,3) +FROM test.t1 WHERE a1=3; +a1 length(a2) substr(a2,498,2) length(a3) substr(a3,3997,3) +3 500 bb 5000 222 +UPDATE test.t1 SET a2=@vc3, a3=@bb3 WHERE a1=4; +SELECT a1,length(a2),substr(a2,998,2),length(a3),substr(a3,9997,3) +FROM test.t1 WHERE a1=4; +a1 length(a2) substr(a2,998,2) length(a3) substr(a3,9997,3) +4 1000 cc 10000 333 +UPDATE test.t1 SET a2=@vc1, a3=@bb1 WHERE a1=5; +SELECT a1,length(a2),substr(a2,198,2),length(a3),substr(a3,1997,3) +FROM test.t1 WHERE a1=5; +a1 length(a2) substr(a2,198,2) length(a3) substr(a3,1997,3) +5 200 aa 2000 111 +DELETE FROM test.t1 where a1=5; +SELECT count(*) from test.t1; +count(*) +4 +DELETE FROM test.t1 where a1=4; +SELECT count(*) from test.t1; +count(*) +3 +DELETE FROM test.t1 where a1=3; +SELECT count(*) from test.t1; +count(*) +2 +DELETE FROM test.t1 where a1=2; +SELECT count(*) from test.t1; +count(*) +1 +DELETE FROM test.t1 where a1=1; +SELECT count(*) from test.t1; +count(*) +0 +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +**** Create Stored procedures that use disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB// +CREATE PROCEDURE test.sp1() +BEGIN +INSERT INTO test.t1 values (1,'111111','aaaaaaaa'); +END// +CALL test.sp1(); +SELECT * FROM test.t1; +a1 a2 a3 +1 111111 aaaaaaaa +CREATE PROCEDURE test.sp2(n INT, vc VARCHAR(256), blb BLOB) +BEGIN +UPDATE test.t1 SET a2=vc, a3=blb where a1=n; +END// +CALL test.sp2(1,'222222','bbbbbbbb'); +SELECT * FROM test.t1; +a1 a2 a3 +1 222222 bbbbbbbb +DELETE FROM test.t1; +DROP PROCEDURE test.sp1; +DROP PROCEDURE test.sp2; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create function that operate on disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE FUNCTION test.fn1(n INT) RETURNS INT +BEGIN +DECLARE v INT; +SELECT a1 INTO v FROM test.t1 WHERE a1=n; +RETURN v; +END// +CREATE FUNCTION test.fn2(n INT, blb BLOB) RETURNS BLOB +BEGIN +DECLARE vv BLOB; +UPDATE test.t1 SET a3=blb where a1=n; +SELECT a3 INTO vv FROM test.t1 WHERE a1=n; +RETURN vv; +END// +SELECT test.fn1(10) FROM DUAL; +test.fn1(10) +10 +SELECT test.fn2(50, 'new BLOB content') FROM DUAL; +test.fn2(50, 'new BLOB content') +new BLOB content +DELETE FROM test.t1; +DROP FUNCTION test.fn1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create triggers that operate on disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW +BEGIN +if isnull(new.a2) then +set new.a2:= 'trg1 works on a2 field'; +end if; +if isnull(new.a3) then +set new.a3:= 'trg1 works on a3 field'; +end if; +end// +insert into test.t1 (a1) values (1)// +insert into test.t1 (a1,a2) values (2, 'ccccccc')// +select * from test.t1// +a1 a2 a3 +1 trg1 works on a2 field trg1 works on a3 field +2 ccccccc trg1 works on a3 field +DELETE FROM test.t1; +DROP TRIGGER test.trg1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create, update views that operate on disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE VIEW test.v1 AS SELECT * FROM test.t1; +SELECT * FROM test.v1 order by a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +3 aaaaa3 bbbbb3 +4 aaaaa4 bbbbb4 +5 aaaaa5 bbbbb5 +6 aaaaa6 bbbbb6 +7 aaaaa7 bbbbb7 +8 aaaaa8 bbbbb8 +9 aaaaa9 bbbbb9 +10 aaaaa10 bbbbb10 +CHECK TABLE test.v1, test.t1; +Table Op Msg_type Msg_text +test.v1 check status OK +test.t1 check note The storage engine for the table doesn't support check +UPDATE test.v1 SET a2='zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' WHERE a1=5; +SELECT * FROM test.v1 order by a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +3 aaaaa3 bbbbb3 +4 aaaaa4 bbbbb4 +5 zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz bbbbb5 +6 aaaaa6 bbbbb6 +7 aaaaa7 bbbbb7 +8 aaaaa8 bbbbb8 +9 aaaaa9 bbbbb9 +10 aaaaa10 bbbbb10 +DROP VIEW test.v1; +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create and use disk based table that use auto inc +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa10 bbbbb10 +2 aaaaa9 bbbbb9 +3 aaaaa8 bbbbb8 +4 aaaaa7 bbbbb7 +5 aaaaa6 bbbbb6 +6 aaaaa5 bbbbb5 +7 aaaaa4 bbbbb4 +8 aaaaa3 bbbbb3 +9 aaaaa2 bbbbb2 +10 aaaaa1 bbbbb1 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create test that use transaction (commit, rollback) +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +SET AUTOCOMMIT=0; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); +COMMIT; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); +ROLLBACK; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +DELETE FROM test.t1; +DROP TABLE test.t1; +SET AUTOCOMMIT=1; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +START TRANSACTION; +INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); +COMMIT; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +START TRANSACTION; +INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); +ROLLBACK; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create test that uses locks +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +drop table if exists test.t1; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +LOCK TABLES test.t1 write; +INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); +INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +SELECT * FROM test.t1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); +UNLOCK TABLES; +INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); +SELECT * FROM test.t1; +a1 a2 a3 +3 aaaaa3 bbbbb3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +4 aaaaa3 bbbbb3 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create large disk base table, do random queries, check cache hits +***** +set @vc1 = repeat('a', 200); +SELECT @vc1 FROM DUAL; +@vc1 +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +set @vc2 = repeat('b', 500); +set @vc3 = repeat('b', 998); +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +select length(@x0),length(@b1),length(@d1) from dual; +length(@x0) length(@b1) length(@d1) +256 2256 3000 +select length(@x0),length(@b2),length(@d2) from dual; +length(@x0) length(@b2) length(@d2) +256 20000 30000 +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +INSERT INTO test.t1 values(1,@vc1,@d1); +INSERT INTO test.t1 values(2,@vc2,@d2); +explain SELECT * from test.t1 WHERE a1 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 +SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) +FROM test.t1 WHERE a1=1; +a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) +1 200 3000 dd1 +SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) +FROM test.t1 where a1=2; +a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3) +2 500 30000 dd2 +UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1; +UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2; +SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) +FROM test.t1 where a1=1; +a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3) +1 500 30000 dd2 +SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) +FROM test.t1 where a1=2; +a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) +2 200 3000 dd1 +SHOW VARIABLES LIKE 'have_query_cache'; +Variable_name Value +have_query_cache YES +SHOW STATUS LIKE 'Qcache%'; +Variable_name Value +Qcache_free_blocks 0 +Qcache_free_memory 0 +Qcache_hits 0 +Qcache_inserts 0 +Qcache_lowmem_prunes 0 +Qcache_not_cached 0 +Qcache_queries_in_cache 0 +Qcache_total_blocks 0 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250)) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +SELECT COUNT(*) from test.t1; +COUNT(*) +100 +SELECT SUM(a1) from test.t1; +SUM(a1) +5050 +SELECT MIN(a1) from test.t1; +MIN(a1) +1 +SELECT MAX(a1) from test.t1; +MAX(a1) +100 +SELECT a5 from test.t1 where a1=50; +a5 +root@localhost +SELECT * from test.t1; +a1 a2 a3 a4 a5 +80 aaaaaaaaaaaaaaaa80 bbbbbbbbbbbbbbbbbb80 2006-06-12 root@localhost +92 aaaaaaaaaaaaaaaa92 bbbbbbbbbbbbbbbbbb92 2006-06-12 root@localhost +95 aaaaaaaaaaaaaaaa95 bbbbbbbbbbbbbbbbbb95 2006-06-12 root@localhost +7 aaaaaaaaaaaaaaaa7 bbbbbbbbbbbbbbbbbb7 2006-06-12 root@localhost +28 aaaaaaaaaaaaaaaa28 bbbbbbbbbbbbbbbbbb28 2006-06-12 root@localhost +9 aaaaaaaaaaaaaaaa9 bbbbbbbbbbbbbbbbbb9 2006-06-12 root@localhost +3 aaaaaaaaaaaaaaaa3 bbbbbbbbbbbbbbbbbb3 2006-06-12 root@localhost +87 aaaaaaaaaaaaaaaa87 bbbbbbbbbbbbbbbbbb87 2006-06-12 root@localhost +60 aaaaaaaaaaaaaaaa60 bbbbbbbbbbbbbbbbbb60 2006-06-12 root@localhost +43 aaaaaaaaaaaaaaaa43 bbbbbbbbbbbbbbbbbb43 2006-06-12 root@localhost +77 aaaaaaaaaaaaaaaa77 bbbbbbbbbbbbbbbbbb77 2006-06-12 root@localhost +73 aaaaaaaaaaaaaaaa73 bbbbbbbbbbbbbbbbbb73 2006-06-12 root@localhost +76 aaaaaaaaaaaaaaaa76 bbbbbbbbbbbbbbbbbb76 2006-06-12 root@localhost +10 aaaaaaaaaaaaaaaa10 bbbbbbbbbbbbbbbbbb10 2006-06-12 root@localhost +85 aaaaaaaaaaaaaaaa85 bbbbbbbbbbbbbbbbbb85 2006-06-12 root@localhost +70 aaaaaaaaaaaaaaaa70 bbbbbbbbbbbbbbbbbb70 2006-06-12 root@localhost +41 aaaaaaaaaaaaaaaa41 bbbbbbbbbbbbbbbbbb41 2006-06-12 root@localhost +20 aaaaaaaaaaaaaaaa20 bbbbbbbbbbbbbbbbbb20 2006-06-12 root@localhost +18 aaaaaaaaaaaaaaaa18 bbbbbbbbbbbbbbbbbb18 2006-06-12 root@localhost +82 aaaaaaaaaaaaaaaa82 bbbbbbbbbbbbbbbbbb82 2006-06-12 root@localhost +57 aaaaaaaaaaaaaaaa57 bbbbbbbbbbbbbbbbbb57 2006-06-12 root@localhost +53 aaaaaaaaaaaaaaaa53 bbbbbbbbbbbbbbbbbb53 2006-06-12 root@localhost +52 aaaaaaaaaaaaaaaa52 bbbbbbbbbbbbbbbbbb52 2006-06-12 root@localhost +79 aaaaaaaaaaaaaaaa79 bbbbbbbbbbbbbbbbbb79 2006-06-12 root@localhost +72 aaaaaaaaaaaaaaaa72 bbbbbbbbbbbbbbbbbb72 2006-06-12 root@localhost +100 aaaaaaaaaaaaaaaa100 bbbbbbbbbbbbbbbbbb100 2006-06-12 root@localhost +91 aaaaaaaaaaaaaaaa91 bbbbbbbbbbbbbbbbbb91 2006-06-12 root@localhost +15 aaaaaaaaaaaaaaaa15 bbbbbbbbbbbbbbbbbb15 2006-06-12 root@localhost +31 aaaaaaaaaaaaaaaa31 bbbbbbbbbbbbbbbbbb31 2006-06-12 root@localhost +36 aaaaaaaaaaaaaaaa36 bbbbbbbbbbbbbbbbbb36 2006-06-12 root@localhost +34 aaaaaaaaaaaaaaaa34 bbbbbbbbbbbbbbbbbb34 2006-06-12 root@localhost +62 aaaaaaaaaaaaaaaa62 bbbbbbbbbbbbbbbbbb62 2006-06-12 root@localhost +6 aaaaaaaaaaaaaaaa6 bbbbbbbbbbbbbbbbbb6 2006-06-12 root@localhost +23 aaaaaaaaaaaaaaaa23 bbbbbbbbbbbbbbbbbb23 2006-06-12 root@localhost +50 aaaaaaaaaaaaaaaa50 bbbbbbbbbbbbbbbbbb50 2006-06-12 root@localhost +63 aaaaaaaaaaaaaaaa63 bbbbbbbbbbbbbbbbbb63 2006-06-12 root@localhost +93 aaaaaaaaaaaaaaaa93 bbbbbbbbbbbbbbbbbb93 2006-06-12 root@localhost +35 aaaaaaaaaaaaaaaa35 bbbbbbbbbbbbbbbbbb35 2006-06-12 root@localhost +19 aaaaaaaaaaaaaaaa19 bbbbbbbbbbbbbbbbbb19 2006-06-12 root@localhost +84 aaaaaaaaaaaaaaaa84 bbbbbbbbbbbbbbbbbb84 2006-06-12 root@localhost +12 aaaaaaaaaaaaaaaa12 bbbbbbbbbbbbbbbbbb12 2006-06-12 root@localhost +88 aaaaaaaaaaaaaaaa88 bbbbbbbbbbbbbbbbbb88 2006-06-12 root@localhost +5 aaaaaaaaaaaaaaaa5 bbbbbbbbbbbbbbbbbb5 2006-06-12 root@localhost +51 aaaaaaaaaaaaaaaa51 bbbbbbbbbbbbbbbbbb51 2006-06-12 root@localhost +97 aaaaaaaaaaaaaaaa97 bbbbbbbbbbbbbbbbbb97 2006-06-12 root@localhost +54 aaaaaaaaaaaaaaaa54 bbbbbbbbbbbbbbbbbb54 2006-06-12 root@localhost +59 aaaaaaaaaaaaaaaa59 bbbbbbbbbbbbbbbbbb59 2006-06-12 root@localhost +27 aaaaaaaaaaaaaaaa27 bbbbbbbbbbbbbbbbbb27 2006-06-12 root@localhost +32 aaaaaaaaaaaaaaaa32 bbbbbbbbbbbbbbbbbb32 2006-06-12 root@localhost +21 aaaaaaaaaaaaaaaa21 bbbbbbbbbbbbbbbbbb21 2006-06-12 root@localhost +94 aaaaaaaaaaaaaaaa94 bbbbbbbbbbbbbbbbbb94 2006-06-12 root@localhost +61 aaaaaaaaaaaaaaaa61 bbbbbbbbbbbbbbbbbb61 2006-06-12 root@localhost +83 aaaaaaaaaaaaaaaa83 bbbbbbbbbbbbbbbbbb83 2006-06-12 root@localhost +39 aaaaaaaaaaaaaaaa39 bbbbbbbbbbbbbbbbbb39 2006-06-12 root@localhost +22 aaaaaaaaaaaaaaaa22 bbbbbbbbbbbbbbbbbb22 2006-06-12 root@localhost +14 aaaaaaaaaaaaaaaa14 bbbbbbbbbbbbbbbbbb14 2006-06-12 root@localhost +86 aaaaaaaaaaaaaaaa86 bbbbbbbbbbbbbbbbbb86 2006-06-12 root@localhost +66 aaaaaaaaaaaaaaaa66 bbbbbbbbbbbbbbbbbb66 2006-06-12 root@localhost +11 aaaaaaaaaaaaaaaa11 bbbbbbbbbbbbbbbbbb11 2006-06-12 root@localhost +74 aaaaaaaaaaaaaaaa74 bbbbbbbbbbbbbbbbbb74 2006-06-12 root@localhost +48 aaaaaaaaaaaaaaaa48 bbbbbbbbbbbbbbbbbb48 2006-06-12 root@localhost +44 aaaaaaaaaaaaaaaa44 bbbbbbbbbbbbbbbbbb44 2006-06-12 root@localhost +75 aaaaaaaaaaaaaaaa75 bbbbbbbbbbbbbbbbbb75 2006-06-12 root@localhost +42 aaaaaaaaaaaaaaaa42 bbbbbbbbbbbbbbbbbb42 2006-06-12 root@localhost +64 aaaaaaaaaaaaaaaa64 bbbbbbbbbbbbbbbbbb64 2006-06-12 root@localhost +1 aaaaaaaaaaaaaaaa1 bbbbbbbbbbbbbbbbbb1 2006-06-12 root@localhost +68 aaaaaaaaaaaaaaaa68 bbbbbbbbbbbbbbbbbb68 2006-06-12 root@localhost +25 aaaaaaaaaaaaaaaa25 bbbbbbbbbbbbbbbbbb25 2006-06-12 root@localhost +13 aaaaaaaaaaaaaaaa13 bbbbbbbbbbbbbbbbbb13 2006-06-12 root@localhost +56 aaaaaaaaaaaaaaaa56 bbbbbbbbbbbbbbbbbb56 2006-06-12 root@localhost +40 aaaaaaaaaaaaaaaa40 bbbbbbbbbbbbbbbbbb40 2006-06-12 root@localhost +81 aaaaaaaaaaaaaaaa81 bbbbbbbbbbbbbbbbbb81 2006-06-12 root@localhost +46 aaaaaaaaaaaaaaaa46 bbbbbbbbbbbbbbbbbb46 2006-06-12 root@localhost +89 aaaaaaaaaaaaaaaa89 bbbbbbbbbbbbbbbbbb89 2006-06-12 root@localhost +55 aaaaaaaaaaaaaaaa55 bbbbbbbbbbbbbbbbbb55 2006-06-12 root@localhost +47 aaaaaaaaaaaaaaaa47 bbbbbbbbbbbbbbbbbb47 2006-06-12 root@localhost +26 aaaaaaaaaaaaaaaa26 bbbbbbbbbbbbbbbbbb26 2006-06-12 root@localhost +17 aaaaaaaaaaaaaaaa17 bbbbbbbbbbbbbbbbbb17 2006-06-12 root@localhost +29 aaaaaaaaaaaaaaaa29 bbbbbbbbbbbbbbbbbb29 2006-06-12 root@localhost +37 aaaaaaaaaaaaaaaa37 bbbbbbbbbbbbbbbbbb37 2006-06-12 root@localhost +16 aaaaaaaaaaaaaaaa16 bbbbbbbbbbbbbbbbbb16 2006-06-12 root@localhost +69 aaaaaaaaaaaaaaaa69 bbbbbbbbbbbbbbbbbb69 2006-06-12 root@localhost +99 aaaaaaaaaaaaaaaa99 bbbbbbbbbbbbbbbbbb99 2006-06-12 root@localhost +24 aaaaaaaaaaaaaaaa24 bbbbbbbbbbbbbbbbbb24 2006-06-12 root@localhost +90 aaaaaaaaaaaaaaaa90 bbbbbbbbbbbbbbbbbb90 2006-06-12 root@localhost +67 aaaaaaaaaaaaaaaa67 bbbbbbbbbbbbbbbbbb67 2006-06-12 root@localhost +65 aaaaaaaaaaaaaaaa65 bbbbbbbbbbbbbbbbbb65 2006-06-12 root@localhost +45 aaaaaaaaaaaaaaaa45 bbbbbbbbbbbbbbbbbb45 2006-06-12 root@localhost +2 aaaaaaaaaaaaaaaa2 bbbbbbbbbbbbbbbbbb2 2006-06-12 root@localhost +96 aaaaaaaaaaaaaaaa96 bbbbbbbbbbbbbbbbbb96 2006-06-12 root@localhost +78 aaaaaaaaaaaaaaaa78 bbbbbbbbbbbbbbbbbb78 2006-06-12 root@localhost +98 aaaaaaaaaaaaaaaa98 bbbbbbbbbbbbbbbbbb98 2006-06-12 root@localhost +38 aaaaaaaaaaaaaaaa38 bbbbbbbbbbbbbbbbbb38 2006-06-12 root@localhost +58 aaaaaaaaaaaaaaaa58 bbbbbbbbbbbbbbbbbb58 2006-06-12 root@localhost +71 aaaaaaaaaaaaaaaa71 bbbbbbbbbbbbbbbbbb71 2006-06-12 root@localhost +30 aaaaaaaaaaaaaaaa30 bbbbbbbbbbbbbbbbbb30 2006-06-12 root@localhost +33 aaaaaaaaaaaaaaaa33 bbbbbbbbbbbbbbbbbb33 2006-06-12 root@localhost +49 aaaaaaaaaaaaaaaa49 bbbbbbbbbbbbbbbbbb49 2006-06-12 root@localhost +4 aaaaaaaaaaaaaaaa4 bbbbbbbbbbbbbbbbbb4 2006-06-12 root@localhost +8 aaaaaaaaaaaaaaaa8 bbbbbbbbbbbbbbbbbb8 2006-06-12 root@localhost +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test new file mode 100755 index 00000000000..a41cd5862ec --- /dev/null +++ b/mysql-test/t/ndb_dd_advance.test @@ -0,0 +1,630 @@ +############################################################## +# Author: JBM +# Date: 2006-01-12 +# Purpose: To test using ndb memory and disk tables together. +############################################################## + +############################################################## +# Author: Nikolay +# Date: 2006-05-12 +# Purpose: To test using ndb memory and disk tables together. +# +# Select from disk into memory table +# Select from disk into memory table +# Create test that loads data, use mysql dump to dump data, drop table, +# create table and load from mysql dump. +# Use group by asc and dec; Use having; Use order by +# ALTER Tests (Meta data testing): +# ALTER from InnoDB to Cluster Disk Data +# ALTER from MyISAM to Cluster Disk Data +# ALTER from Cluster Disk Data to InnoDB +# ALTER from Cluster Disk Data to MyISAM +# ALTER DD Tables and add columns +# ALTER DD Tables and add Indexes +# ALTER DD Tables and drop columns +# +############################################################## + +-- source include/have_ndb.inc +-- source include/not_embedded.inc + +--disable_warnings +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +--enable_warnings + +############ Test Setup Section ############# +-- echo **** Test Setup Section **** + +CREATE LOGFILE GROUP log_group1 +ADD UNDOFILE './log_group1/undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; + +CREATE TABLESPACE table_space1 +ADD DATAFILE './table_space1/datafile.dat' +USE LOGFILE GROUP log_group1 +INITIAL_SIZE 12M +ENGINE NDB; + + +CREATE TABLE test.t1 +(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; + +CREATE TABLE test.t2 +(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL) +ENGINE=NDB; + +--echo +##################### Data load for first test #################### +--echo **** Data load for first test **** + +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); + + +INSERT INTO test.t2 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); + +--echo +##################### Test 1 Section Begins ############### +--echo *** Test 1 Section Begins *** +SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4); +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75); +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +--echo +####################### Test 1 Section End ################ + +##################### Setup for test 2 #################### +--echo *** Setup for test 2 **** +DELETE FROM test.t1; +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45); +--echo +############################# Test Section 2 ############### +--echo **** Test Section 2 **** +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2; +SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2; +SHOW CREATE TABLE test.t2; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +SHOW CREATE TABLE test.t2; +ALTER TABLE test.t1 ENGINE=NDBCLUSTER; +SHOW CREATE TABLE test.t1; +--echo +######################### End Test Section 2 ################# +DROP TABLE test.t1; +DROP TABLE test.t2; +##################### Setup for Test Section 3 ############### +--echo *** Setup for Test Section 3 *** +CREATE TABLE test.t1 ( + usr_id INT unsigned NOT NULL, + uniq_id INT unsigned NOT NULL AUTO_INCREMENT, + start_num INT unsigned NOT NULL DEFAULT 1, + increment INT unsigned NOT NULL DEFAULT 1, + PRIMARY KEY (uniq_id), + INDEX usr_uniq_idx (usr_id, uniq_id), + INDEX uniq_usr_idx (uniq_id, usr_id)) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; + + +CREATE TABLE test.t2 ( + id INT unsigned NOT NULL DEFAULT 0, + usr2_id INT unsigned NOT NULL DEFAULT 0, + max INT unsigned NOT NULL DEFAULT 0, + c_amount INT unsigned NOT NULL DEFAULT 0, + d_max INT unsigned NOT NULL DEFAULT 0, + d_num INT unsigned NOT NULL DEFAULT 0, + orig_time INT unsigned NOT NULL DEFAULT 0, + c_time INT unsigned NOT NULL DEFAULT 0, + active ENUM ("no","yes") NOT NULL, + PRIMARY KEY (id,usr2_id), + INDEX id_idx (id), + INDEX usr2_idx (usr2_id)) +ENGINE=NDB; + +INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198); + +--echo +###################### Test Section 3 ###################### +--echo **** Test Section 3 **** +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; + +INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes'); + +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; +--echo +####################### End Section 3 ######################### +DROP TABLE test.t1; +DROP TABLE test.t2; +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; + +DROP TABLESPACE table_space1 +ENGINE = NDB; + +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; + +####################### Section 4 ######################### + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLESPACE ts2 + ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + +### Select from disk into memory table ### + + CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int) + ENGINE=NDB; + + SHOW CREATE TABLE t1; + SHOW CREATE TABLE t2; + + INSERT INTO t1 VALUES (1,1); + INSERT INTO t1 VALUES (2,2); + SELECT * FROM t1; + INSERT INTO t2(a,b) SELECT * FROM t1; + SELECT * FROM t2; + +### Select from disk into memory table ### + + TRUNCATE t1; + TRUNCATE t2; + INSERT INTO t2 VALUES (3,3); + INSERT INTO t2 VALUES (4,4); + INSERT INTO t1(a,b) SELECT * FROM t2; + SELECT * FROM t1; + + DROP TABLE t1, t2; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + + DROP TABLESPACE ts1 ENGINE NDB; + + ALTER TABLESPACE ts2 + DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; + + DROP TABLESPACE ts2 ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that loads data, use mysql dump to dump data, drop table, +#### create table and load from mysql dump. + + DROP DATABASE IF EXISTS test; + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts + ADD DATAFILE './datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +CREATE DATABASE test; + +CREATE TABLE test.t ( + a smallint NOT NULL, + b int NOT NULL, + c bigint NOT NULL, + d char(10), + e TEXT, + f VARCHAR(255), + PRIMARY KEY(a) +) TABLESPACE ts STORAGE DISK ENGINE=NDB; + + ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); + SHOW CREATE TABLE test.t; + +# insert records into tables + + let $1=100; + disable_query_log; + while ($1) + { + eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); + dec $1; + } + enable_query_log; + + SELECT * FROM test.t order by a; +--exec $MYSQL_DUMP --skip-comments --databases test > $MYSQLTEST_VARDIR/tmp/t_dump.sql +DROP TABLE test.t; +--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/t_dump.sql +USE test; +show tables; + +SELECT * FROM test.t order by a; + + DROP TABLE test.t; + DROP DATABASE test; + + ALTER TABLESPACE ts + DROP DATAFILE './datafile.dat' + ENGINE NDB; + + DROP TABLESPACE ts ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### BUG 18856 test case comented out +##### Use "SELECT * INTO OUTFILE" to dump data and "LOAD DATA INFILE" to load ##### data back to the data file. + +# CREATE LOGFILE GROUP lg +# ADD UNDOFILE './undofile.dat' +# INITIAL_SIZE 16M +# UNDO_BUFFER_SIZE = 1M +# ENGINE=NDB; + +# CREATE TABLESPACE ts +# ADD DATAFILE './datafile.dat' +# USE LOGFILE GROUP lg +# INITIAL_SIZE 12M +# ENGINE NDB; + +#CREATE DATABASE test; + +#CREATE TABLE test.t ( +# a smallint NOT NULL, +# b int NOT NULL, +# c bigint NOT NULL, +# d char(10), +# e TEXT, +# f VARCHAR(255), +# PRIMARY KEY(a) +#) TABLESPACE ts STORAGE DISK ENGINE=NDB; + +# ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); +# SHOW CREATE TABLE test.t; + +# insert records into tables + +# let $1=100; +# disable_query_log; +# while ($1) +# { +# eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); +# dec $1; +# } +# enable_query_log; + +# SELECT * FROM test.t order by a; + +# SELECT * INTO OUTFILE 't_backup' FROM test.t; +# TRUNCATE test.t; + +#'TRUNCATE test.t' failed: 1205: Lock wait timeout exceeded; try restarting #transaction. TABLESPACE ts STORAGE DISK ENGINE=NDB; + +# SELECT count(*) FROM test.t; +# LOAD DATA INFILE 't_backup' INTO TABLE test.t; + +# SELECT * FROM test.t order by a; + +# DROP TABLE test.t; +# DROP DATABASE test; + +# ALTER TABLESPACE ts +# DROP DATAFILE './datafile.dat' +# ENGINE NDB; +# DROP TABLESPACE ts ENGINE NDB; +# DROP LOGFILE GROUP lg +# ENGINE=NDB; + +#### Use group by asc and dec; Use having; Use order by. #### + + DROP DATABASE IF EXISTS test; + DROP table IF EXISTS test.t1; + DROP table IF EXISTS test.t2; + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLESPACE ts2 + ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +CREATE DATABASE test; + +CREATE TABLE test.t1 ( + a1 smallint NOT NULL, + a2 int NOT NULL, + a3 bigint NOT NULL, + a4 char(10), + a5 decimal(5,1), + a6 time, + a7 date, + a8 datetime, + a9 VARCHAR(255), + a10 blob, + PRIMARY KEY(a1) +) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); + SHOW CREATE TABLE test.t1; + +CREATE TABLE test.t2 ( + b1 smallint NOT NULL, + b2 int NOT NULL, + b3 bigint NOT NULL, + b4 char(10), + b5 decimal(5,1), + b6 time, + b7 date, + b8 datetime, + b9 VARCHAR(255), + b10 blob, + PRIMARY KEY(b1) +) ENGINE=NDB; + + ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); + SHOW CREATE TABLE test.t2; + +let $1=20; +disable_query_log; +while ($1) +{ + eval insert into test.t1 values($1, $1+1, $1+2000000000, "aaa$1", 34.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); + eval insert into test.t2 values($1+2, $1+3, $1+3000000000, "aaa$1", 35.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); + dec $1; +} +enable_query_log; + +SELECT * FROM test.t1; +SELECT * FROM test.t2; +SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; +SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; + +DROP TABLE test.t1; +DROP TABLE test.t2; + +create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + +insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4); +select distinct a from test.t1 group by b,a having a > 2 order by a desc; +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; +select distinct a from test.t1 group by b,a having a > 2 order by a asc; +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc; +drop table test.t1; + +create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); +select * from test.t1 where a >= '1'; +select distinct a from test.t1 order by a desc; +select distinct a from test.t1 where a >= '1' order by a desc; +select distinct a from test.t1 where a >= '1' order by a asc; +drop table test.t1; + +CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; + +INSERT INTO test.t1 (email, infoID, dateentered) VALUES + ('test1@testdomain.com', 1, '2002-07-30 22:56:38'), + ('test1@testdomain.com', 1, '2002-07-27 22:58:16'), + ('test2@testdomain.com', 1, '2002-06-19 15:22:19'), + ('test2@testdomain.com', 2, '2002-06-18 14:23:47'), + ('test3@testdomain.com', 1, '2002-05-19 22:17:32'); + +INSERT INTO test.t2(infoID, shipcode) VALUES + (1, 'Z001'), + (2, 'R002'); + +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID; +SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; +drop table test.t1,test.t2; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts1 ENGINE NDB; + ALTER TABLESPACE ts2 + DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts2 ENGINE NDB; + DROP LOGFILE GROUP lg + ENGINE=NDB; +#################################################################### + + +#### Customer posted order by test case + +DROP TABLE IF EXISTS test.t; +create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB; +insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1); +insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2); +select * from test.t order by f1; +select f1,f2 from test.t order by f2; +select f2 from test.t order by f2; +select f1,f2 from test.t order by f1; +drop table test.t; + +################## ALTER Tests (Meta data testing) #################### + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts + ADD DATAFILE './table_space/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +#### Try to ALTER from InnoDB to Cluster Disk Data + +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER from MyISAM to Cluster Disk Data + +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER from Cluster Disk Data to InnoDB + +CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER from Cluster Disk Data to MyISAM + +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER DD Tables and add columns + +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; + +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; + +SHOW CREATE TABLE test.t1; + +#### Try to ALTER DD Tables and add Indexes + +ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6), +ADD INDEX (a7), ADD INDEX (a8); + +SHOW CREATE TABLE test.t1; + +DROP TABLE test.t1; + +#### Try to ALTER DD Tables and drop columns + +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; + +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; + +SHOW CREATE TABLE test.t1; + +ALTER TABLE test.t1 DROP a14; +ALTER TABLE test.t1 DROP a13; +ALTER TABLE test.t1 DROP a12; +ALTER TABLE test.t1 DROP a11; +ALTER TABLE test.t1 DROP a10; +ALTER TABLE test.t1 DROP a9; +ALTER TABLE test.t1 DROP a8; +ALTER TABLE test.t1 DROP a7; +ALTER TABLE test.t1 DROP a6; +ALTER TABLE test.t1 DROP PRIMARY KEY; + +SHOW CREATE TABLE test.t1; + +DROP TABLE test.t1; + + ALTER TABLESPACE ts + DROP DATAFILE './table_space/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts ENGINE NDB; + DROP LOGFILE GROUP lg + ENGINE=NDB; + +####################### End section 4 ######################### +#End 5.1 test case + diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_advance2.test new file mode 100755 index 00000000000..6d23ddf90c0 --- /dev/null +++ b/mysql-test/t/ndb_dd_advance2.test @@ -0,0 +1,726 @@ +############################################################## +# Author: Nikolay +# Date: 2006-04-01 +# Purpose: Specific Blob and Varchar testing using disk tables. +############################################################## +# Create Stored procedures that use disk based tables. +# Create function that operate on disk based tables. +# Create triggers that operate on disk based tables. +# Create views that operate on disk based tables. +# Try to create FK constraints on disk based tables. +# Create and use disk based table that use auto inc. +# Create test that use transaction (commit, rollback) +# Create large disk base table, do random queries, check cache hits, do same +# query 10 times check cache hits. +# Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), ... other built in # SQL functions +# Create test that uses locks. +# Create test using truncate. +############################################################## + +-- source include/have_ndb.inc + +--disable_warnings +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +DROP TABLE IF EXISTS test.t3; +DROP DATABASE IF EXISTS test; +--enable_warnings + +#### Copy data from table in one table space to table in different table space. #### +--echo ***** +--echo **** Copy data from table in one table space to table in different table space +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLESPACE ts2 + ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE DATABASE test; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts2 STORAGE DISK ENGINE=NDB; + + SHOW CREATE TABLE test.t1; + SHOW CREATE TABLE test.t2; + + INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa'); + INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb'); + SELECT * FROM test.t1; + INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; + SELECT * FROM test.t2; + + DROP TABLE test.t1, test.t2; + + # populate BLOB field with large data + +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); + +# x0 size 256 +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); + +# b1 length 2000+256 +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +# d1 length 3000 +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); + +# b2 length 20000 +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +# d2 length 30000 +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); + +select length(@x0),length(@b1),length(@d1) from dual; +select length(@x0),length(@b2),length(@d2) from dual; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) + TABLESPACE ts2 STORAGE DISK ENGINE=NDB; + + SHOW CREATE TABLE test.t1; + SHOW CREATE TABLE test.t2; + + INSERT INTO test.t1 VALUES (1,@vc1,@d1); + INSERT INTO test.t1 VALUES (2,@vc2,@b1); + INSERT INTO test.t1 VALUES (3,@vc3,@d2); + INSERT INTO test.t1 VALUES (4,@vc4,@b2); + + SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) + FROM test.t1 WHERE a1=1; + SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) + FROM test.t1 where a1=2; + + INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; + SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) + FROM test.t2 WHERE a1=1; + SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) + FROM test.t2 where a1=2; + + + DROP TABLE test.t1, test.t2; + + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts1 ENGINE NDB; + + ALTER TABLESPACE ts2 + DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts2 ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Insert, Update, Delete from NDB table with BLOB fields #### +--echo ***** +--echo **** Insert, Update, Delete from NDB table with BLOB fields +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); +set @vc5 = repeat('d', 5000); + +set @bb1 = repeat('1', 2000); +set @bb2 = repeat('2', 5000); +set @bb3 = repeat('3', 10000); +set @bb4 = repeat('4', 40000); +set @bb5 = repeat('5', 50000); + +select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual; +select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +# CREATE TABLE test.t2 (a1 int NOT NULL, a2 VARCHAR(5000), a3 BLOB) +# TABLESPACE ts2 STORAGE DISK ENGINE=NDB; + + INSERT INTO test.t1 VALUES (1,@vc1,@bb1); + INSERT INTO test.t1 VALUES (2,@vc2,@bb2); + INSERT INTO test.t1 VALUES (3,@vc3,@bb3); + INSERT INTO test.t1 VALUES (4,@vc4,@bb4); + INSERT INTO test.t1 VALUES (5,@vc5,@bb5); + + UPDATE test.t1 SET a2=@vc5, a3=@bb5 WHERE a1=1; + SELECT a1,length(a2),substr(a2,4998,2),length(a3),substr(a3,49997,3) + FROM test.t1 WHERE a1=1; + + UPDATE test.t1 SET a2=@vc4, a3=@bb4 WHERE a1=2; + SELECT a1,length(a2),substr(a2,3998,2),length(a3),substr(a3,39997,3) + FROM test.t1 WHERE a1=2; + + UPDATE test.t1 SET a2=@vc2, a3=@bb2 WHERE a1=3; + SELECT a1,length(a2),substr(a2,498,2),length(a3),substr(a3,3997,3) + FROM test.t1 WHERE a1=3; + + UPDATE test.t1 SET a2=@vc3, a3=@bb3 WHERE a1=4; + SELECT a1,length(a2),substr(a2,998,2),length(a3),substr(a3,9997,3) + FROM test.t1 WHERE a1=4; + + UPDATE test.t1 SET a2=@vc1, a3=@bb1 WHERE a1=5; + SELECT a1,length(a2),substr(a2,198,2),length(a3),substr(a3,1997,3) + FROM test.t1 WHERE a1=5; + + DELETE FROM test.t1 where a1=5; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=4; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=3; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=2; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=1; + SELECT count(*) from test.t1; + + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts1 ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +##### Create Stored procedures that use disk based tables ##### +--echo ***** +--echo **** Create Stored procedures that use disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +delimiter //; + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB// + CREATE PROCEDURE test.sp1() + BEGIN + INSERT INTO test.t1 values (1,'111111','aaaaaaaa'); + END// +delimiter ;// + + CALL test.sp1(); + SELECT * FROM test.t1; + +delimiter //; + CREATE PROCEDURE test.sp2(n INT, vc VARCHAR(256), blb BLOB) + BEGIN + UPDATE test.t1 SET a2=vc, a3=blb where a1=n; + END// +delimiter ;// + + CALL test.sp2(1,'222222','bbbbbbbb'); + SELECT * FROM test.t1; + + DELETE FROM test.t1; + DROP PROCEDURE test.sp1; + DROP PROCEDURE test.sp2; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create function that operate on disk based tables #### +--echo ***** +--echo ***** Create function that operate on disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=100; + disable_query_log; + while ($1) + { + eval insert into test.t1 values($1, "aaaaa$1", "bbbbb$1"); + dec $1; + } + enable_query_log; + + delimiter //; + CREATE FUNCTION test.fn1(n INT) RETURNS INT + BEGIN + DECLARE v INT; + SELECT a1 INTO v FROM test.t1 WHERE a1=n; + RETURN v; + END// + delimiter ;// + +delimiter //; + CREATE FUNCTION test.fn2(n INT, blb BLOB) RETURNS BLOB + BEGIN + DECLARE vv BLOB; + UPDATE test.t1 SET a3=blb where a1=n; + SELECT a3 INTO vv FROM test.t1 WHERE a1=n; + RETURN vv; + END// + delimiter ;// + + SELECT test.fn1(10) FROM DUAL; + SELECT test.fn2(50, 'new BLOB content') FROM DUAL; + + DELETE FROM test.t1; + DROP FUNCTION test.fn1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create triggers that operate on disk based tables #### +--echo ***** +--echo ***** Create triggers that operate on disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + delimiter //; + CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW + BEGIN + if isnull(new.a2) then + set new.a2:= 'trg1 works on a2 field'; + end if; + if isnull(new.a3) then + set new.a3:= 'trg1 works on a3 field'; + end if; + end// + insert into test.t1 (a1) values (1)// + insert into test.t1 (a1,a2) values (2, 'ccccccc')// + select * from test.t1// + delimiter ;// + + DELETE FROM test.t1; + DROP TRIGGER test.trg1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create, update views that operate on disk based tables #### +--echo ***** +--echo ***** Create, update views that operate on disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=10; + disable_query_log; + while ($1) + { + eval insert into test.t1 values($1, "aaaaa$1", "bbbbb$1"); + dec $1; + } + enable_query_log; + CREATE VIEW test.v1 AS SELECT * FROM test.t1; + SELECT * FROM test.v1 order by a1; + CHECK TABLE test.v1, test.t1; + + UPDATE test.v1 SET a2='zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' WHERE a1=5; + SELECT * FROM test.v1 order by a1; + + DROP VIEW test.v1; + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create and use disk based table that use auto inc #### +--echo ***** +--echo ***** Create and use disk based table that use auto inc +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=10; + disable_query_log; + while ($1) + { + eval insert into test.t1 values(NULL, "aaaaa$1", "bbbbb$1"); + dec $1; + } + enable_query_log; + SELECT * FROM test.t1 ORDER BY a1; + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that use transaction (commit, rollback) #### +--echo ***** +--echo ***** Create test that use transaction (commit, rollback) +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + SET AUTOCOMMIT=0; + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); + COMMIT; + SELECT * FROM test.t1 ORDER BY a1; + INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); + ROLLBACK; + SELECT * FROM test.t1 ORDER BY a1; + + DELETE FROM test.t1; + DROP TABLE test.t1; + SET AUTOCOMMIT=1; + +# Now do the same thing with START TRANSACTION without using AUTOCOMMIT. + + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + START TRANSACTION; + INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); + COMMIT; + SELECT * FROM test.t1 ORDER BY a1; + + START TRANSACTION; + INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); + ROLLBACK; + SELECT * FROM test.t1 ORDER BY a1; + + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that uses locks #### +--echo ***** +--echo ***** Create test that uses locks +--echo ***** + + connect (con1,localhost,root,,); + connect (con2,localhost,root,,); + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +# connection con1; +--disable_warnings + drop table if exists test.t1; + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +--enable_warnings + + LOCK TABLES test.t1 write; + INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); + INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); + SELECT * FROM test.t1 ORDER BY a1; + + connection con2; + SELECT * FROM test.t1; + INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); + + connection con1; + UNLOCK TABLES; + + connection con2; + INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); + SELECT * FROM test.t1; + DELETE FROM test.t1; + DROP TABLE test.t1; + + #connection defualt; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create large disk base table, do random queries, check cache hits #### +--echo ***** +--echo ***** Create large disk base table, do random queries, check cache hits +--echo ***** + +set @vc1 = repeat('a', 200); +SELECT @vc1 FROM DUAL; +set @vc2 = repeat('b', 500); +set @vc3 = repeat('b', 998); + +# x0 size 256 +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); + +# b1 length 2000+256 (blob part aligned) +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +# d1 length 3000 +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); + +# b2 length 20000 +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +# d2 length 30000 +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); + +select length(@x0),length(@b1),length(@d1) from dual; +select length(@x0),length(@b2),length(@d2) from dual; + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + INSERT INTO test.t1 values(1,@vc1,@d1); + INSERT INTO test.t1 values(2,@vc2,@d2); + explain SELECT * from test.t1 WHERE a1 = 1; + + SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) + FROM test.t1 WHERE a1=1; + SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) + FROM test.t1 where a1=2; + + UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1; + UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2; + + SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) + FROM test.t1 where a1=1; + SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) + FROM test.t1 where a1=2; + + SHOW VARIABLES LIKE 'have_query_cache'; + SHOW STATUS LIKE 'Qcache%'; + + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE #### +--echo ***** +--echo ***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250)) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=100; + disable_query_log; + while ($1) + { + eval insert into test.t1 values($1, "aaaaaaaaaaaaaaaa$1", "bbbbbbbbbbbbbbbbbb$1", NOW(), USER()); + dec $1; + } + enable_query_log; + + SELECT COUNT(*) from test.t1; + SELECT SUM(a1) from test.t1; + SELECT MIN(a1) from test.t1; + SELECT MAX(a1) from test.t1; + SELECT a5 from test.t1 where a1=50; + + + SELECT * from test.t1; + + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + + +#End 5.1 test case + From f42797991c3e9e215b5ca28e2007dc88ca9c65aa Mon Sep 17 00:00:00 2001 From: "ngrishakin@mysql.com" <> Date: Tue, 13 Jun 2006 06:21:16 +0200 Subject: [PATCH 02/74] ndb_dd_advance2.result: ndb_dd_advance2 test cases ndb_dd_advance2.test: ndb_dd_advance test cases --- mysql-test/r/ndb_dd_advance2.result | 212 ++++++++++++++-------------- mysql-test/t/ndb_dd_advance2.test | 10 +- 2 files changed, 111 insertions(+), 111 deletions(-) diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_advance2.result index d14dc78a366..bceebe34807 100644 --- a/mysql-test/r/ndb_dd_advance2.result +++ b/mysql-test/r/ndb_dd_advance2.result @@ -506,18 +506,18 @@ SELECT * FROM test.t1 ORDER BY a1; a1 a2 a3 1 aaaaa1 bbbbb1 2 aaaaa2 bbbbb2 -SELECT * FROM test.t1; +SELECT * FROM test.t1 ORDER BY a1; a1 a2 a3 1 aaaaa1 bbbbb1 2 aaaaa2 bbbbb2 INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); UNLOCK TABLES; INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); -SELECT * FROM test.t1; +SELECT * FROM test.t1 ORDER BY a1; a1 a2 a3 -3 aaaaa3 bbbbb3 1 aaaaa1 bbbbb1 2 aaaaa2 bbbbb2 +3 aaaaa3 bbbbb3 4 aaaaa3 bbbbb3 DELETE FROM test.t1; DROP TABLE test.t1; @@ -581,11 +581,11 @@ explain SELECT * from test.t1 WHERE a1 = 1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) -FROM test.t1 WHERE a1=1; +FROM test.t1 WHERE a1=1 ORDER BY a1; a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) 1 200 3000 dd1 SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) -FROM test.t1 where a1=2; +FROM test.t1 where a1=2 ORDER BY a1; a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3) 2 500 30000 dd2 UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1; @@ -649,108 +649,108 @@ MAX(a1) SELECT a5 from test.t1 where a1=50; a5 root@localhost -SELECT * from test.t1; +SELECT * from test.t1 order by a1; a1 a2 a3 a4 a5 -80 aaaaaaaaaaaaaaaa80 bbbbbbbbbbbbbbbbbb80 2006-06-12 root@localhost -92 aaaaaaaaaaaaaaaa92 bbbbbbbbbbbbbbbbbb92 2006-06-12 root@localhost -95 aaaaaaaaaaaaaaaa95 bbbbbbbbbbbbbbbbbb95 2006-06-12 root@localhost -7 aaaaaaaaaaaaaaaa7 bbbbbbbbbbbbbbbbbb7 2006-06-12 root@localhost -28 aaaaaaaaaaaaaaaa28 bbbbbbbbbbbbbbbbbb28 2006-06-12 root@localhost -9 aaaaaaaaaaaaaaaa9 bbbbbbbbbbbbbbbbbb9 2006-06-12 root@localhost -3 aaaaaaaaaaaaaaaa3 bbbbbbbbbbbbbbbbbb3 2006-06-12 root@localhost -87 aaaaaaaaaaaaaaaa87 bbbbbbbbbbbbbbbbbb87 2006-06-12 root@localhost -60 aaaaaaaaaaaaaaaa60 bbbbbbbbbbbbbbbbbb60 2006-06-12 root@localhost -43 aaaaaaaaaaaaaaaa43 bbbbbbbbbbbbbbbbbb43 2006-06-12 root@localhost -77 aaaaaaaaaaaaaaaa77 bbbbbbbbbbbbbbbbbb77 2006-06-12 root@localhost -73 aaaaaaaaaaaaaaaa73 bbbbbbbbbbbbbbbbbb73 2006-06-12 root@localhost -76 aaaaaaaaaaaaaaaa76 bbbbbbbbbbbbbbbbbb76 2006-06-12 root@localhost -10 aaaaaaaaaaaaaaaa10 bbbbbbbbbbbbbbbbbb10 2006-06-12 root@localhost -85 aaaaaaaaaaaaaaaa85 bbbbbbbbbbbbbbbbbb85 2006-06-12 root@localhost -70 aaaaaaaaaaaaaaaa70 bbbbbbbbbbbbbbbbbb70 2006-06-12 root@localhost -41 aaaaaaaaaaaaaaaa41 bbbbbbbbbbbbbbbbbb41 2006-06-12 root@localhost -20 aaaaaaaaaaaaaaaa20 bbbbbbbbbbbbbbbbbb20 2006-06-12 root@localhost -18 aaaaaaaaaaaaaaaa18 bbbbbbbbbbbbbbbbbb18 2006-06-12 root@localhost -82 aaaaaaaaaaaaaaaa82 bbbbbbbbbbbbbbbbbb82 2006-06-12 root@localhost -57 aaaaaaaaaaaaaaaa57 bbbbbbbbbbbbbbbbbb57 2006-06-12 root@localhost -53 aaaaaaaaaaaaaaaa53 bbbbbbbbbbbbbbbbbb53 2006-06-12 root@localhost -52 aaaaaaaaaaaaaaaa52 bbbbbbbbbbbbbbbbbb52 2006-06-12 root@localhost -79 aaaaaaaaaaaaaaaa79 bbbbbbbbbbbbbbbbbb79 2006-06-12 root@localhost -72 aaaaaaaaaaaaaaaa72 bbbbbbbbbbbbbbbbbb72 2006-06-12 root@localhost -100 aaaaaaaaaaaaaaaa100 bbbbbbbbbbbbbbbbbb100 2006-06-12 root@localhost -91 aaaaaaaaaaaaaaaa91 bbbbbbbbbbbbbbbbbb91 2006-06-12 root@localhost -15 aaaaaaaaaaaaaaaa15 bbbbbbbbbbbbbbbbbb15 2006-06-12 root@localhost -31 aaaaaaaaaaaaaaaa31 bbbbbbbbbbbbbbbbbb31 2006-06-12 root@localhost -36 aaaaaaaaaaaaaaaa36 bbbbbbbbbbbbbbbbbb36 2006-06-12 root@localhost -34 aaaaaaaaaaaaaaaa34 bbbbbbbbbbbbbbbbbb34 2006-06-12 root@localhost -62 aaaaaaaaaaaaaaaa62 bbbbbbbbbbbbbbbbbb62 2006-06-12 root@localhost -6 aaaaaaaaaaaaaaaa6 bbbbbbbbbbbbbbbbbb6 2006-06-12 root@localhost -23 aaaaaaaaaaaaaaaa23 bbbbbbbbbbbbbbbbbb23 2006-06-12 root@localhost -50 aaaaaaaaaaaaaaaa50 bbbbbbbbbbbbbbbbbb50 2006-06-12 root@localhost -63 aaaaaaaaaaaaaaaa63 bbbbbbbbbbbbbbbbbb63 2006-06-12 root@localhost -93 aaaaaaaaaaaaaaaa93 bbbbbbbbbbbbbbbbbb93 2006-06-12 root@localhost -35 aaaaaaaaaaaaaaaa35 bbbbbbbbbbbbbbbbbb35 2006-06-12 root@localhost -19 aaaaaaaaaaaaaaaa19 bbbbbbbbbbbbbbbbbb19 2006-06-12 root@localhost -84 aaaaaaaaaaaaaaaa84 bbbbbbbbbbbbbbbbbb84 2006-06-12 root@localhost -12 aaaaaaaaaaaaaaaa12 bbbbbbbbbbbbbbbbbb12 2006-06-12 root@localhost -88 aaaaaaaaaaaaaaaa88 bbbbbbbbbbbbbbbbbb88 2006-06-12 root@localhost -5 aaaaaaaaaaaaaaaa5 bbbbbbbbbbbbbbbbbb5 2006-06-12 root@localhost -51 aaaaaaaaaaaaaaaa51 bbbbbbbbbbbbbbbbbb51 2006-06-12 root@localhost -97 aaaaaaaaaaaaaaaa97 bbbbbbbbbbbbbbbbbb97 2006-06-12 root@localhost -54 aaaaaaaaaaaaaaaa54 bbbbbbbbbbbbbbbbbb54 2006-06-12 root@localhost -59 aaaaaaaaaaaaaaaa59 bbbbbbbbbbbbbbbbbb59 2006-06-12 root@localhost -27 aaaaaaaaaaaaaaaa27 bbbbbbbbbbbbbbbbbb27 2006-06-12 root@localhost -32 aaaaaaaaaaaaaaaa32 bbbbbbbbbbbbbbbbbb32 2006-06-12 root@localhost -21 aaaaaaaaaaaaaaaa21 bbbbbbbbbbbbbbbbbb21 2006-06-12 root@localhost -94 aaaaaaaaaaaaaaaa94 bbbbbbbbbbbbbbbbbb94 2006-06-12 root@localhost -61 aaaaaaaaaaaaaaaa61 bbbbbbbbbbbbbbbbbb61 2006-06-12 root@localhost -83 aaaaaaaaaaaaaaaa83 bbbbbbbbbbbbbbbbbb83 2006-06-12 root@localhost -39 aaaaaaaaaaaaaaaa39 bbbbbbbbbbbbbbbbbb39 2006-06-12 root@localhost -22 aaaaaaaaaaaaaaaa22 bbbbbbbbbbbbbbbbbb22 2006-06-12 root@localhost -14 aaaaaaaaaaaaaaaa14 bbbbbbbbbbbbbbbbbb14 2006-06-12 root@localhost -86 aaaaaaaaaaaaaaaa86 bbbbbbbbbbbbbbbbbb86 2006-06-12 root@localhost -66 aaaaaaaaaaaaaaaa66 bbbbbbbbbbbbbbbbbb66 2006-06-12 root@localhost -11 aaaaaaaaaaaaaaaa11 bbbbbbbbbbbbbbbbbb11 2006-06-12 root@localhost -74 aaaaaaaaaaaaaaaa74 bbbbbbbbbbbbbbbbbb74 2006-06-12 root@localhost -48 aaaaaaaaaaaaaaaa48 bbbbbbbbbbbbbbbbbb48 2006-06-12 root@localhost -44 aaaaaaaaaaaaaaaa44 bbbbbbbbbbbbbbbbbb44 2006-06-12 root@localhost -75 aaaaaaaaaaaaaaaa75 bbbbbbbbbbbbbbbbbb75 2006-06-12 root@localhost -42 aaaaaaaaaaaaaaaa42 bbbbbbbbbbbbbbbbbb42 2006-06-12 root@localhost -64 aaaaaaaaaaaaaaaa64 bbbbbbbbbbbbbbbbbb64 2006-06-12 root@localhost -1 aaaaaaaaaaaaaaaa1 bbbbbbbbbbbbbbbbbb1 2006-06-12 root@localhost -68 aaaaaaaaaaaaaaaa68 bbbbbbbbbbbbbbbbbb68 2006-06-12 root@localhost -25 aaaaaaaaaaaaaaaa25 bbbbbbbbbbbbbbbbbb25 2006-06-12 root@localhost -13 aaaaaaaaaaaaaaaa13 bbbbbbbbbbbbbbbbbb13 2006-06-12 root@localhost -56 aaaaaaaaaaaaaaaa56 bbbbbbbbbbbbbbbbbb56 2006-06-12 root@localhost -40 aaaaaaaaaaaaaaaa40 bbbbbbbbbbbbbbbbbb40 2006-06-12 root@localhost -81 aaaaaaaaaaaaaaaa81 bbbbbbbbbbbbbbbbbb81 2006-06-12 root@localhost -46 aaaaaaaaaaaaaaaa46 bbbbbbbbbbbbbbbbbb46 2006-06-12 root@localhost -89 aaaaaaaaaaaaaaaa89 bbbbbbbbbbbbbbbbbb89 2006-06-12 root@localhost -55 aaaaaaaaaaaaaaaa55 bbbbbbbbbbbbbbbbbb55 2006-06-12 root@localhost -47 aaaaaaaaaaaaaaaa47 bbbbbbbbbbbbbbbbbb47 2006-06-12 root@localhost -26 aaaaaaaaaaaaaaaa26 bbbbbbbbbbbbbbbbbb26 2006-06-12 root@localhost -17 aaaaaaaaaaaaaaaa17 bbbbbbbbbbbbbbbbbb17 2006-06-12 root@localhost -29 aaaaaaaaaaaaaaaa29 bbbbbbbbbbbbbbbbbb29 2006-06-12 root@localhost -37 aaaaaaaaaaaaaaaa37 bbbbbbbbbbbbbbbbbb37 2006-06-12 root@localhost -16 aaaaaaaaaaaaaaaa16 bbbbbbbbbbbbbbbbbb16 2006-06-12 root@localhost -69 aaaaaaaaaaaaaaaa69 bbbbbbbbbbbbbbbbbb69 2006-06-12 root@localhost -99 aaaaaaaaaaaaaaaa99 bbbbbbbbbbbbbbbbbb99 2006-06-12 root@localhost -24 aaaaaaaaaaaaaaaa24 bbbbbbbbbbbbbbbbbb24 2006-06-12 root@localhost -90 aaaaaaaaaaaaaaaa90 bbbbbbbbbbbbbbbbbb90 2006-06-12 root@localhost -67 aaaaaaaaaaaaaaaa67 bbbbbbbbbbbbbbbbbb67 2006-06-12 root@localhost -65 aaaaaaaaaaaaaaaa65 bbbbbbbbbbbbbbbbbb65 2006-06-12 root@localhost -45 aaaaaaaaaaaaaaaa45 bbbbbbbbbbbbbbbbbb45 2006-06-12 root@localhost -2 aaaaaaaaaaaaaaaa2 bbbbbbbbbbbbbbbbbb2 2006-06-12 root@localhost -96 aaaaaaaaaaaaaaaa96 bbbbbbbbbbbbbbbbbb96 2006-06-12 root@localhost -78 aaaaaaaaaaaaaaaa78 bbbbbbbbbbbbbbbbbb78 2006-06-12 root@localhost -98 aaaaaaaaaaaaaaaa98 bbbbbbbbbbbbbbbbbb98 2006-06-12 root@localhost -38 aaaaaaaaaaaaaaaa38 bbbbbbbbbbbbbbbbbb38 2006-06-12 root@localhost -58 aaaaaaaaaaaaaaaa58 bbbbbbbbbbbbbbbbbb58 2006-06-12 root@localhost -71 aaaaaaaaaaaaaaaa71 bbbbbbbbbbbbbbbbbb71 2006-06-12 root@localhost -30 aaaaaaaaaaaaaaaa30 bbbbbbbbbbbbbbbbbb30 2006-06-12 root@localhost -33 aaaaaaaaaaaaaaaa33 bbbbbbbbbbbbbbbbbb33 2006-06-12 root@localhost -49 aaaaaaaaaaaaaaaa49 bbbbbbbbbbbbbbbbbb49 2006-06-12 root@localhost -4 aaaaaaaaaaaaaaaa4 bbbbbbbbbbbbbbbbbb4 2006-06-12 root@localhost -8 aaaaaaaaaaaaaaaa8 bbbbbbbbbbbbbbbbbb8 2006-06-12 root@localhost +1 aaaaaaaaaaaaaaaa1 bbbbbbbbbbbbbbbbbb1 2006-06-13 root@localhost +2 aaaaaaaaaaaaaaaa2 bbbbbbbbbbbbbbbbbb2 2006-06-13 root@localhost +3 aaaaaaaaaaaaaaaa3 bbbbbbbbbbbbbbbbbb3 2006-06-13 root@localhost +4 aaaaaaaaaaaaaaaa4 bbbbbbbbbbbbbbbbbb4 2006-06-13 root@localhost +5 aaaaaaaaaaaaaaaa5 bbbbbbbbbbbbbbbbbb5 2006-06-13 root@localhost +6 aaaaaaaaaaaaaaaa6 bbbbbbbbbbbbbbbbbb6 2006-06-13 root@localhost +7 aaaaaaaaaaaaaaaa7 bbbbbbbbbbbbbbbbbb7 2006-06-13 root@localhost +8 aaaaaaaaaaaaaaaa8 bbbbbbbbbbbbbbbbbb8 2006-06-13 root@localhost +9 aaaaaaaaaaaaaaaa9 bbbbbbbbbbbbbbbbbb9 2006-06-13 root@localhost +10 aaaaaaaaaaaaaaaa10 bbbbbbbbbbbbbbbbbb10 2006-06-13 root@localhost +11 aaaaaaaaaaaaaaaa11 bbbbbbbbbbbbbbbbbb11 2006-06-13 root@localhost +12 aaaaaaaaaaaaaaaa12 bbbbbbbbbbbbbbbbbb12 2006-06-13 root@localhost +13 aaaaaaaaaaaaaaaa13 bbbbbbbbbbbbbbbbbb13 2006-06-13 root@localhost +14 aaaaaaaaaaaaaaaa14 bbbbbbbbbbbbbbbbbb14 2006-06-13 root@localhost +15 aaaaaaaaaaaaaaaa15 bbbbbbbbbbbbbbbbbb15 2006-06-13 root@localhost +16 aaaaaaaaaaaaaaaa16 bbbbbbbbbbbbbbbbbb16 2006-06-13 root@localhost +17 aaaaaaaaaaaaaaaa17 bbbbbbbbbbbbbbbbbb17 2006-06-13 root@localhost +18 aaaaaaaaaaaaaaaa18 bbbbbbbbbbbbbbbbbb18 2006-06-13 root@localhost +19 aaaaaaaaaaaaaaaa19 bbbbbbbbbbbbbbbbbb19 2006-06-13 root@localhost +20 aaaaaaaaaaaaaaaa20 bbbbbbbbbbbbbbbbbb20 2006-06-13 root@localhost +21 aaaaaaaaaaaaaaaa21 bbbbbbbbbbbbbbbbbb21 2006-06-13 root@localhost +22 aaaaaaaaaaaaaaaa22 bbbbbbbbbbbbbbbbbb22 2006-06-13 root@localhost +23 aaaaaaaaaaaaaaaa23 bbbbbbbbbbbbbbbbbb23 2006-06-13 root@localhost +24 aaaaaaaaaaaaaaaa24 bbbbbbbbbbbbbbbbbb24 2006-06-13 root@localhost +25 aaaaaaaaaaaaaaaa25 bbbbbbbbbbbbbbbbbb25 2006-06-13 root@localhost +26 aaaaaaaaaaaaaaaa26 bbbbbbbbbbbbbbbbbb26 2006-06-13 root@localhost +27 aaaaaaaaaaaaaaaa27 bbbbbbbbbbbbbbbbbb27 2006-06-13 root@localhost +28 aaaaaaaaaaaaaaaa28 bbbbbbbbbbbbbbbbbb28 2006-06-13 root@localhost +29 aaaaaaaaaaaaaaaa29 bbbbbbbbbbbbbbbbbb29 2006-06-13 root@localhost +30 aaaaaaaaaaaaaaaa30 bbbbbbbbbbbbbbbbbb30 2006-06-13 root@localhost +31 aaaaaaaaaaaaaaaa31 bbbbbbbbbbbbbbbbbb31 2006-06-13 root@localhost +32 aaaaaaaaaaaaaaaa32 bbbbbbbbbbbbbbbbbb32 2006-06-13 root@localhost +33 aaaaaaaaaaaaaaaa33 bbbbbbbbbbbbbbbbbb33 2006-06-13 root@localhost +34 aaaaaaaaaaaaaaaa34 bbbbbbbbbbbbbbbbbb34 2006-06-13 root@localhost +35 aaaaaaaaaaaaaaaa35 bbbbbbbbbbbbbbbbbb35 2006-06-13 root@localhost +36 aaaaaaaaaaaaaaaa36 bbbbbbbbbbbbbbbbbb36 2006-06-13 root@localhost +37 aaaaaaaaaaaaaaaa37 bbbbbbbbbbbbbbbbbb37 2006-06-13 root@localhost +38 aaaaaaaaaaaaaaaa38 bbbbbbbbbbbbbbbbbb38 2006-06-13 root@localhost +39 aaaaaaaaaaaaaaaa39 bbbbbbbbbbbbbbbbbb39 2006-06-13 root@localhost +40 aaaaaaaaaaaaaaaa40 bbbbbbbbbbbbbbbbbb40 2006-06-13 root@localhost +41 aaaaaaaaaaaaaaaa41 bbbbbbbbbbbbbbbbbb41 2006-06-13 root@localhost +42 aaaaaaaaaaaaaaaa42 bbbbbbbbbbbbbbbbbb42 2006-06-13 root@localhost +43 aaaaaaaaaaaaaaaa43 bbbbbbbbbbbbbbbbbb43 2006-06-13 root@localhost +44 aaaaaaaaaaaaaaaa44 bbbbbbbbbbbbbbbbbb44 2006-06-13 root@localhost +45 aaaaaaaaaaaaaaaa45 bbbbbbbbbbbbbbbbbb45 2006-06-13 root@localhost +46 aaaaaaaaaaaaaaaa46 bbbbbbbbbbbbbbbbbb46 2006-06-13 root@localhost +47 aaaaaaaaaaaaaaaa47 bbbbbbbbbbbbbbbbbb47 2006-06-13 root@localhost +48 aaaaaaaaaaaaaaaa48 bbbbbbbbbbbbbbbbbb48 2006-06-13 root@localhost +49 aaaaaaaaaaaaaaaa49 bbbbbbbbbbbbbbbbbb49 2006-06-13 root@localhost +50 aaaaaaaaaaaaaaaa50 bbbbbbbbbbbbbbbbbb50 2006-06-13 root@localhost +51 aaaaaaaaaaaaaaaa51 bbbbbbbbbbbbbbbbbb51 2006-06-13 root@localhost +52 aaaaaaaaaaaaaaaa52 bbbbbbbbbbbbbbbbbb52 2006-06-13 root@localhost +53 aaaaaaaaaaaaaaaa53 bbbbbbbbbbbbbbbbbb53 2006-06-13 root@localhost +54 aaaaaaaaaaaaaaaa54 bbbbbbbbbbbbbbbbbb54 2006-06-13 root@localhost +55 aaaaaaaaaaaaaaaa55 bbbbbbbbbbbbbbbbbb55 2006-06-13 root@localhost +56 aaaaaaaaaaaaaaaa56 bbbbbbbbbbbbbbbbbb56 2006-06-13 root@localhost +57 aaaaaaaaaaaaaaaa57 bbbbbbbbbbbbbbbbbb57 2006-06-13 root@localhost +58 aaaaaaaaaaaaaaaa58 bbbbbbbbbbbbbbbbbb58 2006-06-13 root@localhost +59 aaaaaaaaaaaaaaaa59 bbbbbbbbbbbbbbbbbb59 2006-06-13 root@localhost +60 aaaaaaaaaaaaaaaa60 bbbbbbbbbbbbbbbbbb60 2006-06-13 root@localhost +61 aaaaaaaaaaaaaaaa61 bbbbbbbbbbbbbbbbbb61 2006-06-13 root@localhost +62 aaaaaaaaaaaaaaaa62 bbbbbbbbbbbbbbbbbb62 2006-06-13 root@localhost +63 aaaaaaaaaaaaaaaa63 bbbbbbbbbbbbbbbbbb63 2006-06-13 root@localhost +64 aaaaaaaaaaaaaaaa64 bbbbbbbbbbbbbbbbbb64 2006-06-13 root@localhost +65 aaaaaaaaaaaaaaaa65 bbbbbbbbbbbbbbbbbb65 2006-06-13 root@localhost +66 aaaaaaaaaaaaaaaa66 bbbbbbbbbbbbbbbbbb66 2006-06-13 root@localhost +67 aaaaaaaaaaaaaaaa67 bbbbbbbbbbbbbbbbbb67 2006-06-13 root@localhost +68 aaaaaaaaaaaaaaaa68 bbbbbbbbbbbbbbbbbb68 2006-06-13 root@localhost +69 aaaaaaaaaaaaaaaa69 bbbbbbbbbbbbbbbbbb69 2006-06-13 root@localhost +70 aaaaaaaaaaaaaaaa70 bbbbbbbbbbbbbbbbbb70 2006-06-13 root@localhost +71 aaaaaaaaaaaaaaaa71 bbbbbbbbbbbbbbbbbb71 2006-06-13 root@localhost +72 aaaaaaaaaaaaaaaa72 bbbbbbbbbbbbbbbbbb72 2006-06-13 root@localhost +73 aaaaaaaaaaaaaaaa73 bbbbbbbbbbbbbbbbbb73 2006-06-13 root@localhost +74 aaaaaaaaaaaaaaaa74 bbbbbbbbbbbbbbbbbb74 2006-06-13 root@localhost +75 aaaaaaaaaaaaaaaa75 bbbbbbbbbbbbbbbbbb75 2006-06-13 root@localhost +76 aaaaaaaaaaaaaaaa76 bbbbbbbbbbbbbbbbbb76 2006-06-13 root@localhost +77 aaaaaaaaaaaaaaaa77 bbbbbbbbbbbbbbbbbb77 2006-06-13 root@localhost +78 aaaaaaaaaaaaaaaa78 bbbbbbbbbbbbbbbbbb78 2006-06-13 root@localhost +79 aaaaaaaaaaaaaaaa79 bbbbbbbbbbbbbbbbbb79 2006-06-13 root@localhost +80 aaaaaaaaaaaaaaaa80 bbbbbbbbbbbbbbbbbb80 2006-06-13 root@localhost +81 aaaaaaaaaaaaaaaa81 bbbbbbbbbbbbbbbbbb81 2006-06-13 root@localhost +82 aaaaaaaaaaaaaaaa82 bbbbbbbbbbbbbbbbbb82 2006-06-13 root@localhost +83 aaaaaaaaaaaaaaaa83 bbbbbbbbbbbbbbbbbb83 2006-06-13 root@localhost +84 aaaaaaaaaaaaaaaa84 bbbbbbbbbbbbbbbbbb84 2006-06-13 root@localhost +85 aaaaaaaaaaaaaaaa85 bbbbbbbbbbbbbbbbbb85 2006-06-13 root@localhost +86 aaaaaaaaaaaaaaaa86 bbbbbbbbbbbbbbbbbb86 2006-06-13 root@localhost +87 aaaaaaaaaaaaaaaa87 bbbbbbbbbbbbbbbbbb87 2006-06-13 root@localhost +88 aaaaaaaaaaaaaaaa88 bbbbbbbbbbbbbbbbbb88 2006-06-13 root@localhost +89 aaaaaaaaaaaaaaaa89 bbbbbbbbbbbbbbbbbb89 2006-06-13 root@localhost +90 aaaaaaaaaaaaaaaa90 bbbbbbbbbbbbbbbbbb90 2006-06-13 root@localhost +91 aaaaaaaaaaaaaaaa91 bbbbbbbbbbbbbbbbbb91 2006-06-13 root@localhost +92 aaaaaaaaaaaaaaaa92 bbbbbbbbbbbbbbbbbb92 2006-06-13 root@localhost +93 aaaaaaaaaaaaaaaa93 bbbbbbbbbbbbbbbbbb93 2006-06-13 root@localhost +94 aaaaaaaaaaaaaaaa94 bbbbbbbbbbbbbbbbbb94 2006-06-13 root@localhost +95 aaaaaaaaaaaaaaaa95 bbbbbbbbbbbbbbbbbb95 2006-06-13 root@localhost +96 aaaaaaaaaaaaaaaa96 bbbbbbbbbbbbbbbbbb96 2006-06-13 root@localhost +97 aaaaaaaaaaaaaaaa97 bbbbbbbbbbbbbbbbbb97 2006-06-13 root@localhost +98 aaaaaaaaaaaaaaaa98 bbbbbbbbbbbbbbbbbb98 2006-06-13 root@localhost +99 aaaaaaaaaaaaaaaa99 bbbbbbbbbbbbbbbbbb99 2006-06-13 root@localhost +100 aaaaaaaaaaaaaaaa100 bbbbbbbbbbbbbbbbbb100 2006-06-13 root@localhost DROP TABLE test.t1; ALTER TABLESPACE ts1 DROP DATAFILE './table_space1/datafile.dat' diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_advance2.test index 6d23ddf90c0..77a697d0bfd 100755 --- a/mysql-test/t/ndb_dd_advance2.test +++ b/mysql-test/t/ndb_dd_advance2.test @@ -564,7 +564,7 @@ delimiter //; SELECT * FROM test.t1 ORDER BY a1; connection con2; - SELECT * FROM test.t1; + SELECT * FROM test.t1 ORDER BY a1; INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); connection con1; @@ -572,7 +572,7 @@ delimiter //; connection con2; INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); - SELECT * FROM test.t1; + SELECT * FROM test.t1 ORDER BY a1; DELETE FROM test.t1; DROP TABLE test.t1; @@ -648,9 +648,9 @@ select length(@x0),length(@b2),length(@d2) from dual; explain SELECT * from test.t1 WHERE a1 = 1; SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) - FROM test.t1 WHERE a1=1; + FROM test.t1 WHERE a1=1 ORDER BY a1; SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) - FROM test.t1 where a1=2; + FROM test.t1 where a1=2 ORDER BY a1; UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1; UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2; @@ -709,7 +709,7 @@ select length(@x0),length(@b2),length(@d2) from dual; SELECT a5 from test.t1 where a1=50; - SELECT * from test.t1; + SELECT * from test.t1 order by a1; DROP TABLE test.t1; From de0252b1295841ec521f0e3a82cba59c757cac5a Mon Sep 17 00:00:00 2001 From: "ngrishakin@mysql.com" <> Date: Tue, 13 Jun 2006 21:18:28 +0200 Subject: [PATCH 03/74] Many files: ndb_dd_advance test cases --- mysql-test/r/ndb_dd_advance.result | 7 ------- mysql-test/r/ndb_dd_advance2.result | 15 --------------- mysql-test/t/ndb_dd_advance.test | 10 +++++----- mysql-test/t/ndb_dd_advance2.test | 7 ++----- 4 files changed, 7 insertions(+), 32 deletions(-) diff --git a/mysql-test/r/ndb_dd_advance.result b/mysql-test/r/ndb_dd_advance.result index 810fc62c942..7e82f6db97d 100644 --- a/mysql-test/r/ndb_dd_advance.result +++ b/mysql-test/r/ndb_dd_advance.result @@ -370,7 +370,6 @@ DROP DATAFILE './table_space2/datafile.dat' DROP TABLESPACE ts2 ENGINE NDB; DROP LOGFILE GROUP lg ENGINE=NDB; -DROP DATABASE IF EXISTS test; CREATE LOGFILE GROUP lg ADD UNDOFILE './undofile.dat' INITIAL_SIZE 16M @@ -381,7 +380,6 @@ ADD DATAFILE './datafile.dat' USE LOGFILE GROUP lg INITIAL_SIZE 12M ENGINE NDB; -CREATE DATABASE test; CREATE TABLE test.t ( a smallint NOT NULL, b int NOT NULL, @@ -615,16 +613,12 @@ a b c d e f 99 100 101 aaa99 bbb99 ccccc99 100 101 102 aaa100 bbb100 ccccc100 DROP TABLE test.t; -DROP DATABASE test; ALTER TABLESPACE ts DROP DATAFILE './datafile.dat' ENGINE NDB; DROP TABLESPACE ts ENGINE NDB; DROP LOGFILE GROUP lg ENGINE=NDB; -DROP DATABASE IF EXISTS test; -Warnings: -Note 1008 Can't drop database 'test'; database doesn't exist DROP table IF EXISTS test.t1; Warnings: Note 1051 Unknown table 't1' @@ -646,7 +640,6 @@ ADD DATAFILE './table_space2/datafile.dat' USE LOGFILE GROUP lg INITIAL_SIZE 12M ENGINE NDB; -CREATE DATABASE test; CREATE TABLE test.t1 ( a1 smallint NOT NULL, a2 int NOT NULL, diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_advance2.result index bceebe34807..2f624105aeb 100644 --- a/mysql-test/r/ndb_dd_advance2.result +++ b/mysql-test/r/ndb_dd_advance2.result @@ -1,7 +1,6 @@ DROP TABLE IF EXISTS test.t1; DROP TABLE IF EXISTS test.t2; DROP TABLE IF EXISTS test.t3; -DROP DATABASE IF EXISTS test; ***** **** Copy data from table in one table space to table in different table space ***** @@ -20,7 +19,6 @@ ADD DATAFILE './table_space2/datafile.dat' USE LOGFILE GROUP lg INITIAL_SIZE 12M ENGINE NDB; -CREATE DATABASE test; CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) @@ -598,19 +596,6 @@ SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) FROM test.t1 where a1=2; a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) 2 200 3000 dd1 -SHOW VARIABLES LIKE 'have_query_cache'; -Variable_name Value -have_query_cache YES -SHOW STATUS LIKE 'Qcache%'; -Variable_name Value -Qcache_free_blocks 0 -Qcache_free_memory 0 -Qcache_hits 0 -Qcache_inserts 0 -Qcache_lowmem_prunes 0 -Qcache_not_cached 0 -Qcache_queries_in_cache 0 -Qcache_total_blocks 0 DELETE FROM test.t1; DROP TABLE test.t1; ALTER TABLESPACE ts1 diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test index a41cd5862ec..5938ea31c73 100755 --- a/mysql-test/t/ndb_dd_advance.test +++ b/mysql-test/t/ndb_dd_advance.test @@ -269,7 +269,7 @@ ENGINE =NDB; #### Create test that loads data, use mysql dump to dump data, drop table, #### create table and load from mysql dump. - DROP DATABASE IF EXISTS test; +# DROP DATABASE IF EXISTS test; CREATE LOGFILE GROUP lg ADD UNDOFILE './undofile.dat' @@ -283,7 +283,7 @@ ENGINE =NDB; INITIAL_SIZE 12M ENGINE NDB; -CREATE DATABASE test; +# CREATE DATABASE test; CREATE TABLE test.t ( a smallint NOT NULL, @@ -319,7 +319,7 @@ show tables; SELECT * FROM test.t order by a; DROP TABLE test.t; - DROP DATABASE test; +# DROP DATABASE test; ALTER TABLESPACE ts DROP DATAFILE './datafile.dat' @@ -395,7 +395,7 @@ SELECT * FROM test.t order by a; #### Use group by asc and dec; Use having; Use order by. #### - DROP DATABASE IF EXISTS test; +# DROP DATABASE IF EXISTS test; DROP table IF EXISTS test.t1; DROP table IF EXISTS test.t2; @@ -417,7 +417,7 @@ SELECT * FROM test.t order by a; INITIAL_SIZE 12M ENGINE NDB; -CREATE DATABASE test; +# CREATE DATABASE test; CREATE TABLE test.t1 ( a1 smallint NOT NULL, diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_advance2.test index 77a697d0bfd..01f2d3d12b5 100755 --- a/mysql-test/t/ndb_dd_advance2.test +++ b/mysql-test/t/ndb_dd_advance2.test @@ -23,7 +23,6 @@ DROP TABLE IF EXISTS test.t1; DROP TABLE IF EXISTS test.t2; DROP TABLE IF EXISTS test.t3; -DROP DATABASE IF EXISTS test; --enable_warnings #### Copy data from table in one table space to table in different table space. #### @@ -49,8 +48,6 @@ DROP DATABASE IF EXISTS test; INITIAL_SIZE 12M ENGINE NDB; - CREATE DATABASE test; - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) @@ -660,8 +657,8 @@ select length(@x0),length(@b2),length(@d2) from dual; SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) FROM test.t1 where a1=2; - SHOW VARIABLES LIKE 'have_query_cache'; - SHOW STATUS LIKE 'Qcache%'; + #SHOW VARIABLES LIKE 'have_query_cache'; + #SHOW STATUS LIKE 'Qcache%'; DELETE FROM test.t1; DROP TABLE test.t1; From d8d42948b2baad475754abc260dfed7ac3012452 Mon Sep 17 00:00:00 2001 From: "holyfoot@deer.(none)" <> Date: Mon, 19 Jun 2006 22:11:01 +0500 Subject: [PATCH 04/74] bug #20318 (ctype_ucs2_def test fails with embedded) there was two problems about charsets in embedded server 1. mysys/charset.c - defined there default_charset_info variable is modified by both server and client code (particularly when --default-charset option is handled) In embedded server we get two codelines modifying one variable. I created separate default_client_charset_info for client code 2. mysql->charset and mysql->options.charset initialization isn't properly done for embedded server - necessary calls added --- include/sql_common.h | 1 + libmysqld/lib_sql.cc | 7 ++++ libmysqld/libmysqld.c | 48 ++----------------------- sql-common/client.c | 84 ++++++++++++++++++++++++------------------- sql/sql_parse.cc | 59 ++++++++++++++++-------------- 5 files changed, 91 insertions(+), 108 deletions(-) diff --git a/include/sql_common.h b/include/sql_common.h index c07a4a831bb..9fc8d4f457b 100644 --- a/include/sql_common.h +++ b/include/sql_common.h @@ -22,6 +22,7 @@ extern const char *not_error_sqlstate; extern "C" { #endif +extern CHARSET_INFO *default_client_charset_info; MYSQL_FIELD *unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields, my_bool default_value, uint server_capabilities); void free_rows(MYSQL_DATA *cur); diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index bf8c17a71af..56f4200e695 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -41,6 +41,8 @@ static const char *fake_groups[] = { "server", "embedded", 0 }; int check_user(THD *thd, enum enum_server_command command, const char *passwd, uint passwd_len, const char *db, bool check_count); +void thd_init_client_charset(THD *thd, uint cs_number); + C_MODE_START #include #undef ER @@ -532,10 +534,13 @@ err: return NULL; } + #ifdef NO_EMBEDDED_ACCESS_CHECKS int check_embedded_connection(MYSQL *mysql) { THD *thd= (THD*)mysql->thd; + thd_init_client_charset(thd, mysql->charset->number); + thd->update_charset(); thd->host= (char*)my_localhost; thd->host_or_ip= thd->host; thd->user= my_strdup(mysql->user, MYF(0)); @@ -551,6 +556,8 @@ int check_embedded_connection(MYSQL *mysql) char scramble_buff[SCRAMBLE_LENGTH]; int passwd_len; + thd_init_client_charset(thd, mysql->charset->number); + thd->update_charset(); if (mysql->options.client_ip) { thd->host= my_strdup(mysql->options.client_ip, MYF(0)); diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c index 6fa41fb3fd0..a2bd4242c3d 100644 --- a/libmysqld/libmysqld.c +++ b/libmysqld/libmysqld.c @@ -85,49 +85,7 @@ static void end_server(MYSQL *mysql) } -static int mysql_init_charset(MYSQL *mysql) -{ - char charset_name_buff[16], *charset_name; - - if ((charset_name=mysql->options.charset_name)) - { - const char *save=charsets_dir; - if (mysql->options.charset_dir) - charsets_dir=mysql->options.charset_dir; - mysql->charset=get_charset_by_name(mysql->options.charset_name, - MYF(MY_WME)); - charsets_dir=save; - } - else if (mysql->server_language) - { - charset_name=charset_name_buff; - sprintf(charset_name,"%d",mysql->server_language); /* In case of errors */ - mysql->charset=get_charset((uint8) mysql->server_language, MYF(MY_WME)); - } - else - mysql->charset=default_charset_info; - - if (!mysql->charset) - { - mysql->net.last_errno=CR_CANT_READ_CHARSET; - strmov(mysql->net.sqlstate, "HY0000"); - if (mysql->options.charset_dir) - sprintf(mysql->net.last_error,ER(mysql->net.last_errno), - charset_name ? charset_name : "unknown", - mysql->options.charset_dir); - else - { - char cs_dir_name[FN_REFLEN]; - get_charsets_dir(cs_dir_name); - sprintf(mysql->net.last_error,ER(mysql->net.last_errno), - charset_name ? charset_name : "unknown", - cs_dir_name); - } - return mysql->net.last_errno; - } - return 0; -} - +int mysql_init_character_set(MYSQL *mysql); MYSQL * STDCALL mysql_real_connect(MYSQL *mysql,const char *host, const char *user, @@ -203,10 +161,10 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, init_embedded_mysql(mysql, client_flag, db_name); - if (check_embedded_connection(mysql)) + if (mysql_init_character_set(mysql)) goto error; - if (mysql_init_charset(mysql)) + if (check_embedded_connection(mysql)) goto error; /* Send client information for access check */ diff --git a/sql-common/client.c b/sql-common/client.c index 3a598832253..ea8baeeffc7 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -133,6 +133,8 @@ static void mysql_close_free(MYSQL *mysql); static int wait_for_data(my_socket fd, uint timeout); #endif +CHARSET_INFO *default_client_charset_info = &my_charset_latin1; + /**************************************************************************** A modified version of connect(). my_connect() allows you to specify @@ -1424,7 +1426,7 @@ mysql_init(MYSQL *mysql) bzero((char*) (mysql),sizeof(*(mysql))); mysql->options.connect_timeout= CONNECT_TIMEOUT; mysql->last_used_con= mysql->next_slave= mysql->master = mysql; - mysql->charset=default_charset_info; + mysql->charset=default_client_charset_info; strmov(mysql->net.sqlstate, not_error_sqlstate); /* By default, we are a replication pivot. The caller must reset it @@ -1537,6 +1539,50 @@ static MYSQL_METHODS client_methods= #endif }; +C_MODE_START +int mysql_init_character_set(MYSQL *mysql) +{ + NET *net= &mysql->net; + /* Set character set */ + if (!mysql->options.charset_name && + !(mysql->options.charset_name= + my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME)))) + return 1; + + { + const char *save= charsets_dir; + if (mysql->options.charset_dir) + charsets_dir=mysql->options.charset_dir; + mysql->charset=get_charset_by_csname(mysql->options.charset_name, + MY_CS_PRIMARY, MYF(MY_WME)); + charsets_dir= save; + } + + if (!mysql->charset) + { + net->last_errno=CR_CANT_READ_CHARSET; + strmov(net->sqlstate, unknown_sqlstate); + if (mysql->options.charset_dir) + my_snprintf(net->last_error, sizeof(net->last_error)-1, + ER(net->last_errno), + mysql->options.charset_name, + mysql->options.charset_dir); + else + { + char cs_dir_name[FN_REFLEN]; + get_charsets_dir(cs_dir_name); + my_snprintf(net->last_error, sizeof(net->last_error)-1, + ER(net->last_errno), + mysql->options.charset_name, + cs_dir_name); + } + return 1; + } + return 0; +} +C_MODE_END + + MYSQL * STDCALL CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, const char *passwd, const char *db, @@ -1875,42 +1921,8 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, goto error; } - /* Set character set */ - if (!mysql->options.charset_name && - !(mysql->options.charset_name= - my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME)))) + if (mysql_init_character_set(mysql)) goto error; - - { - const char *save= charsets_dir; - if (mysql->options.charset_dir) - charsets_dir=mysql->options.charset_dir; - mysql->charset=get_charset_by_csname(mysql->options.charset_name, - MY_CS_PRIMARY, MYF(MY_WME)); - charsets_dir= save; - } - - if (!mysql->charset) - { - net->last_errno=CR_CANT_READ_CHARSET; - strmov(net->sqlstate, unknown_sqlstate); - if (mysql->options.charset_dir) - my_snprintf(net->last_error, sizeof(net->last_error)-1, - ER(net->last_errno), - mysql->options.charset_name, - mysql->options.charset_dir); - else - { - char cs_dir_name[FN_REFLEN]; - get_charsets_dir(cs_dir_name); - my_snprintf(net->last_error, sizeof(net->last_error)-1, - ER(net->last_errno), - mysql->options.charset_name, - cs_dir_name); - } - goto error; - } - /* Save connection information */ if (!my_multi_malloc(MYF(0), diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 51ef3f31b26..4c0221c9e9c 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -667,6 +667,37 @@ static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0) #endif /* NO_EMBEDDED_ACCESS_CHECKS */ } +void thd_init_client_charset(THD *thd, uint cs_number) +{ + /* + Use server character set and collation if + - opt_character_set_client_handshake is not set + - client has not specified a character set + - client character set is the same as the servers + - client character set doesn't exists in server + */ + if (!opt_character_set_client_handshake || + !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) || + !my_strcasecmp(&my_charset_latin1, + global_system_variables.character_set_client->name, + thd->variables.character_set_client->name)) + { + thd->variables.character_set_client= + global_system_variables.character_set_client; + thd->variables.collation_connection= + global_system_variables.collation_connection; + thd->variables.character_set_results= + global_system_variables.character_set_results; + } + else + { + thd->variables.character_set_results= + thd->variables.collation_connection= + thd->variables.character_set_client; + } +} + + /* Perform handshake, authorize client and update thd ACL variables. SYNOPSIS @@ -809,33 +840,7 @@ static int check_connection(THD *thd) thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; thd->max_client_packet_length= uint4korr(net->read_pos+4); DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); - /* - Use server character set and collation if - - opt_character_set_client_handshake is not set - - client has not specified a character set - - client character set is the same as the servers - - client character set doesn't exists in server - */ - if (!opt_character_set_client_handshake || - !(thd->variables.character_set_client= - get_charset((uint) net->read_pos[8], MYF(0))) || - !my_strcasecmp(&my_charset_latin1, - global_system_variables.character_set_client->name, - thd->variables.character_set_client->name)) - { - thd->variables.character_set_client= - global_system_variables.character_set_client; - thd->variables.collation_connection= - global_system_variables.collation_connection; - thd->variables.character_set_results= - global_system_variables.character_set_results; - } - else - { - thd->variables.character_set_results= - thd->variables.collation_connection= - thd->variables.character_set_client; - } + thd_init_client_charset(thd, (uint) net->read_pos[8]); thd->update_charset(); end= (char*) net->read_pos+32; } From 4259e697985777b8b4ac40a92d987e221151b7c4 Mon Sep 17 00:00:00 2001 From: "ngrishakin@mysql.com" <> Date: Tue, 20 Jun 2006 19:18:34 +0200 Subject: [PATCH 05/74] ndb_dd_advance2 test --- mysql-test/r/ndb_dd_advance2.result | 200 ++++++++++++++-------------- mysql-test/t/ndb_dd_advance2.test | 2 +- 2 files changed, 101 insertions(+), 101 deletions(-) diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_advance2.result index 2f624105aeb..00490fbc32b 100644 --- a/mysql-test/r/ndb_dd_advance2.result +++ b/mysql-test/r/ndb_dd_advance2.result @@ -636,106 +636,106 @@ a5 root@localhost SELECT * from test.t1 order by a1; a1 a2 a3 a4 a5 -1 aaaaaaaaaaaaaaaa1 bbbbbbbbbbbbbbbbbb1 2006-06-13 root@localhost -2 aaaaaaaaaaaaaaaa2 bbbbbbbbbbbbbbbbbb2 2006-06-13 root@localhost -3 aaaaaaaaaaaaaaaa3 bbbbbbbbbbbbbbbbbb3 2006-06-13 root@localhost -4 aaaaaaaaaaaaaaaa4 bbbbbbbbbbbbbbbbbb4 2006-06-13 root@localhost -5 aaaaaaaaaaaaaaaa5 bbbbbbbbbbbbbbbbbb5 2006-06-13 root@localhost -6 aaaaaaaaaaaaaaaa6 bbbbbbbbbbbbbbbbbb6 2006-06-13 root@localhost -7 aaaaaaaaaaaaaaaa7 bbbbbbbbbbbbbbbbbb7 2006-06-13 root@localhost -8 aaaaaaaaaaaaaaaa8 bbbbbbbbbbbbbbbbbb8 2006-06-13 root@localhost -9 aaaaaaaaaaaaaaaa9 bbbbbbbbbbbbbbbbbb9 2006-06-13 root@localhost -10 aaaaaaaaaaaaaaaa10 bbbbbbbbbbbbbbbbbb10 2006-06-13 root@localhost -11 aaaaaaaaaaaaaaaa11 bbbbbbbbbbbbbbbbbb11 2006-06-13 root@localhost -12 aaaaaaaaaaaaaaaa12 bbbbbbbbbbbbbbbbbb12 2006-06-13 root@localhost -13 aaaaaaaaaaaaaaaa13 bbbbbbbbbbbbbbbbbb13 2006-06-13 root@localhost -14 aaaaaaaaaaaaaaaa14 bbbbbbbbbbbbbbbbbb14 2006-06-13 root@localhost -15 aaaaaaaaaaaaaaaa15 bbbbbbbbbbbbbbbbbb15 2006-06-13 root@localhost -16 aaaaaaaaaaaaaaaa16 bbbbbbbbbbbbbbbbbb16 2006-06-13 root@localhost -17 aaaaaaaaaaaaaaaa17 bbbbbbbbbbbbbbbbbb17 2006-06-13 root@localhost -18 aaaaaaaaaaaaaaaa18 bbbbbbbbbbbbbbbbbb18 2006-06-13 root@localhost -19 aaaaaaaaaaaaaaaa19 bbbbbbbbbbbbbbbbbb19 2006-06-13 root@localhost -20 aaaaaaaaaaaaaaaa20 bbbbbbbbbbbbbbbbbb20 2006-06-13 root@localhost -21 aaaaaaaaaaaaaaaa21 bbbbbbbbbbbbbbbbbb21 2006-06-13 root@localhost -22 aaaaaaaaaaaaaaaa22 bbbbbbbbbbbbbbbbbb22 2006-06-13 root@localhost -23 aaaaaaaaaaaaaaaa23 bbbbbbbbbbbbbbbbbb23 2006-06-13 root@localhost -24 aaaaaaaaaaaaaaaa24 bbbbbbbbbbbbbbbbbb24 2006-06-13 root@localhost -25 aaaaaaaaaaaaaaaa25 bbbbbbbbbbbbbbbbbb25 2006-06-13 root@localhost -26 aaaaaaaaaaaaaaaa26 bbbbbbbbbbbbbbbbbb26 2006-06-13 root@localhost -27 aaaaaaaaaaaaaaaa27 bbbbbbbbbbbbbbbbbb27 2006-06-13 root@localhost -28 aaaaaaaaaaaaaaaa28 bbbbbbbbbbbbbbbbbb28 2006-06-13 root@localhost -29 aaaaaaaaaaaaaaaa29 bbbbbbbbbbbbbbbbbb29 2006-06-13 root@localhost -30 aaaaaaaaaaaaaaaa30 bbbbbbbbbbbbbbbbbb30 2006-06-13 root@localhost -31 aaaaaaaaaaaaaaaa31 bbbbbbbbbbbbbbbbbb31 2006-06-13 root@localhost -32 aaaaaaaaaaaaaaaa32 bbbbbbbbbbbbbbbbbb32 2006-06-13 root@localhost -33 aaaaaaaaaaaaaaaa33 bbbbbbbbbbbbbbbbbb33 2006-06-13 root@localhost -34 aaaaaaaaaaaaaaaa34 bbbbbbbbbbbbbbbbbb34 2006-06-13 root@localhost -35 aaaaaaaaaaaaaaaa35 bbbbbbbbbbbbbbbbbb35 2006-06-13 root@localhost -36 aaaaaaaaaaaaaaaa36 bbbbbbbbbbbbbbbbbb36 2006-06-13 root@localhost -37 aaaaaaaaaaaaaaaa37 bbbbbbbbbbbbbbbbbb37 2006-06-13 root@localhost -38 aaaaaaaaaaaaaaaa38 bbbbbbbbbbbbbbbbbb38 2006-06-13 root@localhost -39 aaaaaaaaaaaaaaaa39 bbbbbbbbbbbbbbbbbb39 2006-06-13 root@localhost -40 aaaaaaaaaaaaaaaa40 bbbbbbbbbbbbbbbbbb40 2006-06-13 root@localhost -41 aaaaaaaaaaaaaaaa41 bbbbbbbbbbbbbbbbbb41 2006-06-13 root@localhost -42 aaaaaaaaaaaaaaaa42 bbbbbbbbbbbbbbbbbb42 2006-06-13 root@localhost -43 aaaaaaaaaaaaaaaa43 bbbbbbbbbbbbbbbbbb43 2006-06-13 root@localhost -44 aaaaaaaaaaaaaaaa44 bbbbbbbbbbbbbbbbbb44 2006-06-13 root@localhost -45 aaaaaaaaaaaaaaaa45 bbbbbbbbbbbbbbbbbb45 2006-06-13 root@localhost -46 aaaaaaaaaaaaaaaa46 bbbbbbbbbbbbbbbbbb46 2006-06-13 root@localhost -47 aaaaaaaaaaaaaaaa47 bbbbbbbbbbbbbbbbbb47 2006-06-13 root@localhost -48 aaaaaaaaaaaaaaaa48 bbbbbbbbbbbbbbbbbb48 2006-06-13 root@localhost -49 aaaaaaaaaaaaaaaa49 bbbbbbbbbbbbbbbbbb49 2006-06-13 root@localhost -50 aaaaaaaaaaaaaaaa50 bbbbbbbbbbbbbbbbbb50 2006-06-13 root@localhost -51 aaaaaaaaaaaaaaaa51 bbbbbbbbbbbbbbbbbb51 2006-06-13 root@localhost -52 aaaaaaaaaaaaaaaa52 bbbbbbbbbbbbbbbbbb52 2006-06-13 root@localhost -53 aaaaaaaaaaaaaaaa53 bbbbbbbbbbbbbbbbbb53 2006-06-13 root@localhost -54 aaaaaaaaaaaaaaaa54 bbbbbbbbbbbbbbbbbb54 2006-06-13 root@localhost -55 aaaaaaaaaaaaaaaa55 bbbbbbbbbbbbbbbbbb55 2006-06-13 root@localhost -56 aaaaaaaaaaaaaaaa56 bbbbbbbbbbbbbbbbbb56 2006-06-13 root@localhost -57 aaaaaaaaaaaaaaaa57 bbbbbbbbbbbbbbbbbb57 2006-06-13 root@localhost -58 aaaaaaaaaaaaaaaa58 bbbbbbbbbbbbbbbbbb58 2006-06-13 root@localhost -59 aaaaaaaaaaaaaaaa59 bbbbbbbbbbbbbbbbbb59 2006-06-13 root@localhost -60 aaaaaaaaaaaaaaaa60 bbbbbbbbbbbbbbbbbb60 2006-06-13 root@localhost -61 aaaaaaaaaaaaaaaa61 bbbbbbbbbbbbbbbbbb61 2006-06-13 root@localhost -62 aaaaaaaaaaaaaaaa62 bbbbbbbbbbbbbbbbbb62 2006-06-13 root@localhost -63 aaaaaaaaaaaaaaaa63 bbbbbbbbbbbbbbbbbb63 2006-06-13 root@localhost -64 aaaaaaaaaaaaaaaa64 bbbbbbbbbbbbbbbbbb64 2006-06-13 root@localhost -65 aaaaaaaaaaaaaaaa65 bbbbbbbbbbbbbbbbbb65 2006-06-13 root@localhost -66 aaaaaaaaaaaaaaaa66 bbbbbbbbbbbbbbbbbb66 2006-06-13 root@localhost -67 aaaaaaaaaaaaaaaa67 bbbbbbbbbbbbbbbbbb67 2006-06-13 root@localhost -68 aaaaaaaaaaaaaaaa68 bbbbbbbbbbbbbbbbbb68 2006-06-13 root@localhost -69 aaaaaaaaaaaaaaaa69 bbbbbbbbbbbbbbbbbb69 2006-06-13 root@localhost -70 aaaaaaaaaaaaaaaa70 bbbbbbbbbbbbbbbbbb70 2006-06-13 root@localhost -71 aaaaaaaaaaaaaaaa71 bbbbbbbbbbbbbbbbbb71 2006-06-13 root@localhost -72 aaaaaaaaaaaaaaaa72 bbbbbbbbbbbbbbbbbb72 2006-06-13 root@localhost -73 aaaaaaaaaaaaaaaa73 bbbbbbbbbbbbbbbbbb73 2006-06-13 root@localhost -74 aaaaaaaaaaaaaaaa74 bbbbbbbbbbbbbbbbbb74 2006-06-13 root@localhost -75 aaaaaaaaaaaaaaaa75 bbbbbbbbbbbbbbbbbb75 2006-06-13 root@localhost -76 aaaaaaaaaaaaaaaa76 bbbbbbbbbbbbbbbbbb76 2006-06-13 root@localhost -77 aaaaaaaaaaaaaaaa77 bbbbbbbbbbbbbbbbbb77 2006-06-13 root@localhost -78 aaaaaaaaaaaaaaaa78 bbbbbbbbbbbbbbbbbb78 2006-06-13 root@localhost -79 aaaaaaaaaaaaaaaa79 bbbbbbbbbbbbbbbbbb79 2006-06-13 root@localhost -80 aaaaaaaaaaaaaaaa80 bbbbbbbbbbbbbbbbbb80 2006-06-13 root@localhost -81 aaaaaaaaaaaaaaaa81 bbbbbbbbbbbbbbbbbb81 2006-06-13 root@localhost -82 aaaaaaaaaaaaaaaa82 bbbbbbbbbbbbbbbbbb82 2006-06-13 root@localhost -83 aaaaaaaaaaaaaaaa83 bbbbbbbbbbbbbbbbbb83 2006-06-13 root@localhost -84 aaaaaaaaaaaaaaaa84 bbbbbbbbbbbbbbbbbb84 2006-06-13 root@localhost -85 aaaaaaaaaaaaaaaa85 bbbbbbbbbbbbbbbbbb85 2006-06-13 root@localhost -86 aaaaaaaaaaaaaaaa86 bbbbbbbbbbbbbbbbbb86 2006-06-13 root@localhost -87 aaaaaaaaaaaaaaaa87 bbbbbbbbbbbbbbbbbb87 2006-06-13 root@localhost -88 aaaaaaaaaaaaaaaa88 bbbbbbbbbbbbbbbbbb88 2006-06-13 root@localhost -89 aaaaaaaaaaaaaaaa89 bbbbbbbbbbbbbbbbbb89 2006-06-13 root@localhost -90 aaaaaaaaaaaaaaaa90 bbbbbbbbbbbbbbbbbb90 2006-06-13 root@localhost -91 aaaaaaaaaaaaaaaa91 bbbbbbbbbbbbbbbbbb91 2006-06-13 root@localhost -92 aaaaaaaaaaaaaaaa92 bbbbbbbbbbbbbbbbbb92 2006-06-13 root@localhost -93 aaaaaaaaaaaaaaaa93 bbbbbbbbbbbbbbbbbb93 2006-06-13 root@localhost -94 aaaaaaaaaaaaaaaa94 bbbbbbbbbbbbbbbbbb94 2006-06-13 root@localhost -95 aaaaaaaaaaaaaaaa95 bbbbbbbbbbbbbbbbbb95 2006-06-13 root@localhost -96 aaaaaaaaaaaaaaaa96 bbbbbbbbbbbbbbbbbb96 2006-06-13 root@localhost -97 aaaaaaaaaaaaaaaa97 bbbbbbbbbbbbbbbbbb97 2006-06-13 root@localhost -98 aaaaaaaaaaaaaaaa98 bbbbbbbbbbbbbbbbbb98 2006-06-13 root@localhost -99 aaaaaaaaaaaaaaaa99 bbbbbbbbbbbbbbbbbb99 2006-06-13 root@localhost -100 aaaaaaaaaaaaaaaa100 bbbbbbbbbbbbbbbbbb100 2006-06-13 root@localhost +1 aaaaaaaaaaaaaaaa1 bbbbbbbbbbbbbbbbbb1 2006-06-20 root@localhost +2 aaaaaaaaaaaaaaaa2 bbbbbbbbbbbbbbbbbb2 2006-06-20 root@localhost +3 aaaaaaaaaaaaaaaa3 bbbbbbbbbbbbbbbbbb3 2006-06-20 root@localhost +4 aaaaaaaaaaaaaaaa4 bbbbbbbbbbbbbbbbbb4 2006-06-20 root@localhost +5 aaaaaaaaaaaaaaaa5 bbbbbbbbbbbbbbbbbb5 2006-06-20 root@localhost +6 aaaaaaaaaaaaaaaa6 bbbbbbbbbbbbbbbbbb6 2006-06-20 root@localhost +7 aaaaaaaaaaaaaaaa7 bbbbbbbbbbbbbbbbbb7 2006-06-20 root@localhost +8 aaaaaaaaaaaaaaaa8 bbbbbbbbbbbbbbbbbb8 2006-06-20 root@localhost +9 aaaaaaaaaaaaaaaa9 bbbbbbbbbbbbbbbbbb9 2006-06-20 root@localhost +10 aaaaaaaaaaaaaaaa10 bbbbbbbbbbbbbbbbbb10 2006-06-20 root@localhost +11 aaaaaaaaaaaaaaaa11 bbbbbbbbbbbbbbbbbb11 2006-06-20 root@localhost +12 aaaaaaaaaaaaaaaa12 bbbbbbbbbbbbbbbbbb12 2006-06-20 root@localhost +13 aaaaaaaaaaaaaaaa13 bbbbbbbbbbbbbbbbbb13 2006-06-20 root@localhost +14 aaaaaaaaaaaaaaaa14 bbbbbbbbbbbbbbbbbb14 2006-06-20 root@localhost +15 aaaaaaaaaaaaaaaa15 bbbbbbbbbbbbbbbbbb15 2006-06-20 root@localhost +16 aaaaaaaaaaaaaaaa16 bbbbbbbbbbbbbbbbbb16 2006-06-20 root@localhost +17 aaaaaaaaaaaaaaaa17 bbbbbbbbbbbbbbbbbb17 2006-06-20 root@localhost +18 aaaaaaaaaaaaaaaa18 bbbbbbbbbbbbbbbbbb18 2006-06-20 root@localhost +19 aaaaaaaaaaaaaaaa19 bbbbbbbbbbbbbbbbbb19 2006-06-20 root@localhost +20 aaaaaaaaaaaaaaaa20 bbbbbbbbbbbbbbbbbb20 2006-06-20 root@localhost +21 aaaaaaaaaaaaaaaa21 bbbbbbbbbbbbbbbbbb21 2006-06-20 root@localhost +22 aaaaaaaaaaaaaaaa22 bbbbbbbbbbbbbbbbbb22 2006-06-20 root@localhost +23 aaaaaaaaaaaaaaaa23 bbbbbbbbbbbbbbbbbb23 2006-06-20 root@localhost +24 aaaaaaaaaaaaaaaa24 bbbbbbbbbbbbbbbbbb24 2006-06-20 root@localhost +25 aaaaaaaaaaaaaaaa25 bbbbbbbbbbbbbbbbbb25 2006-06-20 root@localhost +26 aaaaaaaaaaaaaaaa26 bbbbbbbbbbbbbbbbbb26 2006-06-20 root@localhost +27 aaaaaaaaaaaaaaaa27 bbbbbbbbbbbbbbbbbb27 2006-06-20 root@localhost +28 aaaaaaaaaaaaaaaa28 bbbbbbbbbbbbbbbbbb28 2006-06-20 root@localhost +29 aaaaaaaaaaaaaaaa29 bbbbbbbbbbbbbbbbbb29 2006-06-20 root@localhost +30 aaaaaaaaaaaaaaaa30 bbbbbbbbbbbbbbbbbb30 2006-06-20 root@localhost +31 aaaaaaaaaaaaaaaa31 bbbbbbbbbbbbbbbbbb31 2006-06-20 root@localhost +32 aaaaaaaaaaaaaaaa32 bbbbbbbbbbbbbbbbbb32 2006-06-20 root@localhost +33 aaaaaaaaaaaaaaaa33 bbbbbbbbbbbbbbbbbb33 2006-06-20 root@localhost +34 aaaaaaaaaaaaaaaa34 bbbbbbbbbbbbbbbbbb34 2006-06-20 root@localhost +35 aaaaaaaaaaaaaaaa35 bbbbbbbbbbbbbbbbbb35 2006-06-20 root@localhost +36 aaaaaaaaaaaaaaaa36 bbbbbbbbbbbbbbbbbb36 2006-06-20 root@localhost +37 aaaaaaaaaaaaaaaa37 bbbbbbbbbbbbbbbbbb37 2006-06-20 root@localhost +38 aaaaaaaaaaaaaaaa38 bbbbbbbbbbbbbbbbbb38 2006-06-20 root@localhost +39 aaaaaaaaaaaaaaaa39 bbbbbbbbbbbbbbbbbb39 2006-06-20 root@localhost +40 aaaaaaaaaaaaaaaa40 bbbbbbbbbbbbbbbbbb40 2006-06-20 root@localhost +41 aaaaaaaaaaaaaaaa41 bbbbbbbbbbbbbbbbbb41 2006-06-20 root@localhost +42 aaaaaaaaaaaaaaaa42 bbbbbbbbbbbbbbbbbb42 2006-06-20 root@localhost +43 aaaaaaaaaaaaaaaa43 bbbbbbbbbbbbbbbbbb43 2006-06-20 root@localhost +44 aaaaaaaaaaaaaaaa44 bbbbbbbbbbbbbbbbbb44 2006-06-20 root@localhost +45 aaaaaaaaaaaaaaaa45 bbbbbbbbbbbbbbbbbb45 2006-06-20 root@localhost +46 aaaaaaaaaaaaaaaa46 bbbbbbbbbbbbbbbbbb46 2006-06-20 root@localhost +47 aaaaaaaaaaaaaaaa47 bbbbbbbbbbbbbbbbbb47 2006-06-20 root@localhost +48 aaaaaaaaaaaaaaaa48 bbbbbbbbbbbbbbbbbb48 2006-06-20 root@localhost +49 aaaaaaaaaaaaaaaa49 bbbbbbbbbbbbbbbbbb49 2006-06-20 root@localhost +50 aaaaaaaaaaaaaaaa50 bbbbbbbbbbbbbbbbbb50 2006-06-20 root@localhost +51 aaaaaaaaaaaaaaaa51 bbbbbbbbbbbbbbbbbb51 2006-06-20 root@localhost +52 aaaaaaaaaaaaaaaa52 bbbbbbbbbbbbbbbbbb52 2006-06-20 root@localhost +53 aaaaaaaaaaaaaaaa53 bbbbbbbbbbbbbbbbbb53 2006-06-20 root@localhost +54 aaaaaaaaaaaaaaaa54 bbbbbbbbbbbbbbbbbb54 2006-06-20 root@localhost +55 aaaaaaaaaaaaaaaa55 bbbbbbbbbbbbbbbbbb55 2006-06-20 root@localhost +56 aaaaaaaaaaaaaaaa56 bbbbbbbbbbbbbbbbbb56 2006-06-20 root@localhost +57 aaaaaaaaaaaaaaaa57 bbbbbbbbbbbbbbbbbb57 2006-06-20 root@localhost +58 aaaaaaaaaaaaaaaa58 bbbbbbbbbbbbbbbbbb58 2006-06-20 root@localhost +59 aaaaaaaaaaaaaaaa59 bbbbbbbbbbbbbbbbbb59 2006-06-20 root@localhost +60 aaaaaaaaaaaaaaaa60 bbbbbbbbbbbbbbbbbb60 2006-06-20 root@localhost +61 aaaaaaaaaaaaaaaa61 bbbbbbbbbbbbbbbbbb61 2006-06-20 root@localhost +62 aaaaaaaaaaaaaaaa62 bbbbbbbbbbbbbbbbbb62 2006-06-20 root@localhost +63 aaaaaaaaaaaaaaaa63 bbbbbbbbbbbbbbbbbb63 2006-06-20 root@localhost +64 aaaaaaaaaaaaaaaa64 bbbbbbbbbbbbbbbbbb64 2006-06-20 root@localhost +65 aaaaaaaaaaaaaaaa65 bbbbbbbbbbbbbbbbbb65 2006-06-20 root@localhost +66 aaaaaaaaaaaaaaaa66 bbbbbbbbbbbbbbbbbb66 2006-06-20 root@localhost +67 aaaaaaaaaaaaaaaa67 bbbbbbbbbbbbbbbbbb67 2006-06-20 root@localhost +68 aaaaaaaaaaaaaaaa68 bbbbbbbbbbbbbbbbbb68 2006-06-20 root@localhost +69 aaaaaaaaaaaaaaaa69 bbbbbbbbbbbbbbbbbb69 2006-06-20 root@localhost +70 aaaaaaaaaaaaaaaa70 bbbbbbbbbbbbbbbbbb70 2006-06-20 root@localhost +71 aaaaaaaaaaaaaaaa71 bbbbbbbbbbbbbbbbbb71 2006-06-20 root@localhost +72 aaaaaaaaaaaaaaaa72 bbbbbbbbbbbbbbbbbb72 2006-06-20 root@localhost +73 aaaaaaaaaaaaaaaa73 bbbbbbbbbbbbbbbbbb73 2006-06-20 root@localhost +74 aaaaaaaaaaaaaaaa74 bbbbbbbbbbbbbbbbbb74 2006-06-20 root@localhost +75 aaaaaaaaaaaaaaaa75 bbbbbbbbbbbbbbbbbb75 2006-06-20 root@localhost +76 aaaaaaaaaaaaaaaa76 bbbbbbbbbbbbbbbbbb76 2006-06-20 root@localhost +77 aaaaaaaaaaaaaaaa77 bbbbbbbbbbbbbbbbbb77 2006-06-20 root@localhost +78 aaaaaaaaaaaaaaaa78 bbbbbbbbbbbbbbbbbb78 2006-06-20 root@localhost +79 aaaaaaaaaaaaaaaa79 bbbbbbbbbbbbbbbbbb79 2006-06-20 root@localhost +80 aaaaaaaaaaaaaaaa80 bbbbbbbbbbbbbbbbbb80 2006-06-20 root@localhost +81 aaaaaaaaaaaaaaaa81 bbbbbbbbbbbbbbbbbb81 2006-06-20 root@localhost +82 aaaaaaaaaaaaaaaa82 bbbbbbbbbbbbbbbbbb82 2006-06-20 root@localhost +83 aaaaaaaaaaaaaaaa83 bbbbbbbbbbbbbbbbbb83 2006-06-20 root@localhost +84 aaaaaaaaaaaaaaaa84 bbbbbbbbbbbbbbbbbb84 2006-06-20 root@localhost +85 aaaaaaaaaaaaaaaa85 bbbbbbbbbbbbbbbbbb85 2006-06-20 root@localhost +86 aaaaaaaaaaaaaaaa86 bbbbbbbbbbbbbbbbbb86 2006-06-20 root@localhost +87 aaaaaaaaaaaaaaaa87 bbbbbbbbbbbbbbbbbb87 2006-06-20 root@localhost +88 aaaaaaaaaaaaaaaa88 bbbbbbbbbbbbbbbbbb88 2006-06-20 root@localhost +89 aaaaaaaaaaaaaaaa89 bbbbbbbbbbbbbbbbbb89 2006-06-20 root@localhost +90 aaaaaaaaaaaaaaaa90 bbbbbbbbbbbbbbbbbb90 2006-06-20 root@localhost +91 aaaaaaaaaaaaaaaa91 bbbbbbbbbbbbbbbbbb91 2006-06-20 root@localhost +92 aaaaaaaaaaaaaaaa92 bbbbbbbbbbbbbbbbbb92 2006-06-20 root@localhost +93 aaaaaaaaaaaaaaaa93 bbbbbbbbbbbbbbbbbb93 2006-06-20 root@localhost +94 aaaaaaaaaaaaaaaa94 bbbbbbbbbbbbbbbbbb94 2006-06-20 root@localhost +95 aaaaaaaaaaaaaaaa95 bbbbbbbbbbbbbbbbbb95 2006-06-20 root@localhost +96 aaaaaaaaaaaaaaaa96 bbbbbbbbbbbbbbbbbb96 2006-06-20 root@localhost +97 aaaaaaaaaaaaaaaa97 bbbbbbbbbbbbbbbbbb97 2006-06-20 root@localhost +98 aaaaaaaaaaaaaaaa98 bbbbbbbbbbbbbbbbbb98 2006-06-20 root@localhost +99 aaaaaaaaaaaaaaaa99 bbbbbbbbbbbbbbbbbb99 2006-06-20 root@localhost +100 aaaaaaaaaaaaaaaa100 bbbbbbbbbbbbbbbbbb100 2006-06-20 root@localhost DROP TABLE test.t1; ALTER TABLESPACE ts1 DROP DATAFILE './table_space1/datafile.dat' diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_advance2.test index 01f2d3d12b5..c10a009c224 100755 --- a/mysql-test/t/ndb_dd_advance2.test +++ b/mysql-test/t/ndb_dd_advance2.test @@ -694,7 +694,7 @@ select length(@x0),length(@b2),length(@d2) from dual; disable_query_log; while ($1) { - eval insert into test.t1 values($1, "aaaaaaaaaaaaaaaa$1", "bbbbbbbbbbbbbbbbbb$1", NOW(), USER()); + eval insert into test.t1 values($1, "aaaaaaaaaaaaaaaa$1", "bbbbbbbbbbbbbbbbbb$1", '2006-06-20' , USER()); dec $1; } enable_query_log; From 7d83b7e48a83ad37b18dad3ac1f2cc6e0318fd94 Mon Sep 17 00:00:00 2001 From: "joerg@mysql.com" <> Date: Thu, 22 Jun 2006 14:28:05 +0200 Subject: [PATCH 06/74] Improved fix for bug#18516 (also 19353): 1) Rename the old shell tool "mysql_upgrade", to avoid a name collision. 2) Improve the spec file, to explicitly use a temporary socket. --- scripts/Makefile.am | 6 ++--- ...ysql_upgrade.sh => mysql_upgrade_shell.sh} | 0 support-files/mysql.spec.sh | 22 +++++++++++++++---- 3 files changed, 21 insertions(+), 7 deletions(-) rename scripts/{mysql_upgrade.sh => mysql_upgrade_shell.sh} (100%) diff --git a/scripts/Makefile.am b/scripts/Makefile.am index 0f68b484f41..a339ebc5b8f 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -32,7 +32,7 @@ bin_SCRIPTS = @server_scripts@ \ mysqldumpslow \ mysql_explain_log \ mysql_tableinfo \ - mysql_upgrade \ + mysql_upgrade_shell \ mysqld_multi \ mysql_create_system_tables @@ -60,7 +60,7 @@ EXTRA_SCRIPTS = make_binary_distribution.sh \ mysql_explain_log.sh \ mysqld_multi.sh \ mysql_tableinfo.sh \ - mysql_upgrade.sh \ + mysql_upgrade_shell.sh \ mysqld_safe.sh \ mysql_create_system_tables.sh @@ -89,7 +89,7 @@ CLEANFILES = @server_scripts@ \ mysqldumpslow \ mysql_explain_log \ mysql_tableinfo \ - mysql_upgrade \ + mysql_upgrade_shell \ mysqld_multi \ make_win_src_distribution \ mysql_create_system_tables diff --git a/scripts/mysql_upgrade.sh b/scripts/mysql_upgrade_shell.sh similarity index 100% rename from scripts/mysql_upgrade.sh rename to scripts/mysql_upgrade_shell.sh diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index abd29b6014a..2f66d64b289 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -490,11 +490,19 @@ chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir # So ensure the server is isolated as much as possible, and start it so that # passwords are not checked. # See the related change in the start script "/etc/init.d/mysql". -chmod 700 $mysql_datadir -%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables -%{_bindir}/mysql_upgrade +if type mktemp >/dev/null 2>&1 +then + mysql_tmp_sockdir=`mktemp -dt` +else + PID=$$ + mysql_tmp_sockdir=/tmp/mysql-$PID + ( umask 077 ; mkdir $mysql_tmp_sockdir ) +fi +chown %{mysqld_user}:%{mysqld_group} $mysql_tmp_sockdir +%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables --socket=$mysql_tmp_sockdir/upgrade.sock +%{_bindir}/mysql_upgrade --socket=$mysql_tmp_sockdir/upgrade.sock %{_sysconfdir}/init.d/mysql stop --skip-networking --skip-grant-tables -chmod 755 $mysql_datadir +rm -fr $mysql_tmp_sockdir # Change permissions again to fix any new files. chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir @@ -732,6 +740,12 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Thu Jun 22 2006 Joerg Bruehe + +- Close a gap of the previous version by explicitly using + a newly created temporary directory for the socket to be used + in the "mysql_upgrade" operation, overriding any local setting. + * Tue Jun 20 2006 Joerg Bruehe - To run "mysql_upgrade", we need a running server; From 36cea7d4fe44bcaffbd434e143b3a72ea62b98fc Mon Sep 17 00:00:00 2001 From: "holyfoot@deer.(none)" <> Date: Thu, 22 Jun 2006 22:11:27 +0500 Subject: [PATCH 07/74] bug #10166 (Signed byte values cause data to be padded) The AsBinary function returns VARCHAR data type with binary collation. It can cause problem for clients that treat that kind of data as different from BLOB type. So now AsBinary returns BLOB. --- mysql-test/r/gis.result | 10 ++++++++++ mysql-test/t/gis.test | 7 +++++++ sql/item_geofunc.h | 2 ++ 3 files changed, 19 insertions(+) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index bf2f3e2bf03..f7066e7edca 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -661,3 +661,13 @@ POINT(10 10) select (asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))); (asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))) POINT(10 10) +create table t1 (g GEOMETRY); +select * from t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 t1 g g 255 4294967295 0 Y 144 0 63 +g +select asbinary(g) from t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def asbinary(g) 252 8192 0 Y 128 0 63 +asbinary(g) +drop table t1; diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index 3eb17f3a484..b66b97c2c41 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -364,3 +364,10 @@ select (asWKT(geomfromwkb((0x000000000140240000000000004024000000000000)))); select (asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))); # End of 4.1 tests + +--enable_metadata +create table t1 (g GEOMETRY); +select * from t1; +select asbinary(g) from t1; +--disable_metadata +drop table t1; diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 5f060416ff3..a466b606dc1 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -32,6 +32,7 @@ public: Item_geometry_func(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} Item_geometry_func(List &list) :Item_str_func(list) {} void fix_length_and_dec(); + enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; } }; class Item_func_geometry_from_text: public Item_geometry_func @@ -67,6 +68,7 @@ public: Item_func_as_wkb(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "aswkb"; } String *val_str(String *); + enum_field_types field_type() const { return MYSQL_TYPE_BLOB; } }; class Item_func_geometry_type: public Item_str_func From 671a1be31bd701202c6ddb48cb4538562780741e Mon Sep 17 00:00:00 2001 From: "joerg@mysql.com" <> Date: Fri, 23 Jun 2006 12:17:11 +0200 Subject: [PATCH 08/74] The binary "mysql_upgrade" must be included in the distribution. (bug#18516 + 19353) --- scripts/make_binary_distribution.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index 36c941ef6aa..74b1993882e 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -134,7 +134,7 @@ BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \ client/mysql$BS client/mysqlshow$BS client/mysqladmin$BS \ client/mysqldump$BS client/mysqlimport$BS \ client/mysqltest$BS client/mysqlcheck$BS \ - client/mysqlbinlog$BS \ + client/mysqlbinlog$BS client/mysql_upgrade$BS \ tests/mysql_client_test$BS \ libmysqld/examples/mysql_client_test_embedded$BS \ libmysqld/examples/mysqltest_embedded$BS \ From d672814403017c4b163e4e0120c52673c918d0c4 Mon Sep 17 00:00:00 2001 From: "ngrishakin@mysql.com" <> Date: Sat, 24 Jun 2006 01:21:08 +0200 Subject: [PATCH 09/74] updated test case ndb_dd_advance --- mysql-test/r/ndb_dd_advance.result | 86 +++++++++++++++--------------- mysql-test/t/ndb_dd_advance.test | 14 ++--- 2 files changed, 50 insertions(+), 50 deletions(-) diff --git a/mysql-test/r/ndb_dd_advance.result b/mysql-test/r/ndb_dd_advance.result index 7e82f6db97d..1586508cc88 100644 --- a/mysql-test/r/ndb_dd_advance.result +++ b/mysql-test/r/ndb_dd_advance.result @@ -341,12 +341,12 @@ t2 CREATE TABLE `t2` ( ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 INSERT INTO t1 VALUES (1,1); INSERT INTO t1 VALUES (2,2); -SELECT * FROM t1; +SELECT * FROM t1 order by a; a b 1 1 2 2 INSERT INTO t2(a,b) SELECT * FROM t1; -SELECT * FROM t2; +SELECT * FROM t2 order by a; a b 1 1 2 2 @@ -355,7 +355,7 @@ TRUNCATE t2; INSERT INTO t2 VALUES (3,3); INSERT INTO t2 VALUES (4,4); INSERT INTO t1(a,b) SELECT * FROM t2; -SELECT * FROM t1; +SELECT * FROM t1 order by a; a b 3 3 4 4 @@ -704,50 +704,50 @@ t2 CREATE TABLE `t2` ( KEY `b3` (`b3`), KEY `b8` (`b8`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SELECT * FROM test.t1; +SELECT * FROM test.t1 order by a1; a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 -19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data -12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data -5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data -14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data -11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data 1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data -13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data 2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data -4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data -8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data -7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data -9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data 3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data -10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data -20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data -18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data -15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data 6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data -17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data 16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data -SELECT * FROM test.t2; +17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +SELECT * FROM test.t2 order by b1; b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 -19 20 3000000017 aaa17 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data -12 13 3000000010 aaa10 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +3 4 3000000001 aaa1 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +4 5 3000000002 aaa2 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data 5 6 3000000003 aaa3 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +6 7 3000000004 aaa4 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +7 8 3000000005 aaa5 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +8 9 3000000006 aaa6 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +9 10 3000000007 aaa7 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +10 11 3000000008 aaa8 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +11 12 3000000009 aaa9 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +12 13 3000000010 aaa10 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +13 14 3000000011 aaa11 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +14 15 3000000012 aaa12 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +15 16 3000000013 aaa13 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +16 17 3000000014 aaa14 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +17 18 3000000015 aaa15 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +18 19 3000000016 aaa16 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +19 20 3000000017 aaa17 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +20 21 3000000018 aaa18 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data 21 22 3000000019 aaa19 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data 22 23 3000000020 aaa20 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data -14 15 3000000012 aaa12 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data -11 12 3000000009 aaa9 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data -13 14 3000000011 aaa11 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data -4 5 3000000002 aaa2 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data -8 9 3000000006 aaa6 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data -7 8 3000000005 aaa5 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data -9 10 3000000007 aaa7 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data -3 4 3000000001 aaa1 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data -10 11 3000000008 aaa8 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data -20 21 3000000018 aaa18 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data -18 19 3000000016 aaa16 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data -15 16 3000000013 aaa13 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data -6 7 3000000004 aaa4 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data -17 18 3000000015 aaa15 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data -16 17 3000000014 aaa14 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; COUNT(a1) a1 COUNT(a1)*a1 1 1 1 @@ -817,14 +817,14 @@ a c drop table test.t1; create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); -select * from test.t1 where a >= '1'; +select * from test.t1 where a >= '1' order by a; a 1 +1 +1 +2 2 3 -1 -1 -2 3 select distinct a from test.t1 order by a desc; a @@ -853,11 +853,11 @@ INSERT INTO test.t1 (email, infoID, dateentered) VALUES INSERT INTO test.t2(infoID, shipcode) VALUES (1, 'Z001'), (2, 'R002'); -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID; +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email; email shipcode test1@testdomain.com Z001 -test2@testdomain.com R002 test2@testdomain.com Z001 +test2@testdomain.com R002 test3@testdomain.com Z001 SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; email diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test index 5938ea31c73..2579c3ea1d0 100755 --- a/mysql-test/t/ndb_dd_advance.test +++ b/mysql-test/t/ndb_dd_advance.test @@ -236,9 +236,9 @@ ENGINE =NDB; INSERT INTO t1 VALUES (1,1); INSERT INTO t1 VALUES (2,2); - SELECT * FROM t1; + SELECT * FROM t1 order by a; INSERT INTO t2(a,b) SELECT * FROM t1; - SELECT * FROM t2; + SELECT * FROM t2 order by a; ### Select from disk into memory table ### @@ -247,7 +247,7 @@ ENGINE =NDB; INSERT INTO t2 VALUES (3,3); INSERT INTO t2 VALUES (4,4); INSERT INTO t1(a,b) SELECT * FROM t2; - SELECT * FROM t1; + SELECT * FROM t1 order by a; DROP TABLE t1, t2; @@ -463,8 +463,8 @@ while ($1) } enable_query_log; -SELECT * FROM test.t1; -SELECT * FROM test.t2; +SELECT * FROM test.t1 order by a1; +SELECT * FROM test.t2 order by b1; SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; @@ -482,7 +482,7 @@ drop table test.t1; create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); -select * from test.t1 where a >= '1'; +select * from test.t1 where a >= '1' order by a; select distinct a from test.t1 order by a desc; select distinct a from test.t1 where a >= '1' order by a desc; select distinct a from test.t1 where a >= '1' order by a asc; @@ -502,7 +502,7 @@ INSERT INTO test.t2(infoID, shipcode) VALUES (1, 'Z001'), (2, 'R002'); -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID; +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email; SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; drop table test.t1,test.t2; From d27a15a81c8806ff6d41ccd0303909f0c5a9f174 Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Mon, 26 Jun 2006 19:14:35 +0200 Subject: [PATCH 10/74] Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. The stress test suite revealed some deadlocks. Some were related to the new condition variable (COND_global_read_lock) and some were general problems with the global read lock. It is now necessary to signal COND_global_read_lock whenever COND_refresh is signalled. We need to wait for the release of a global read lock if one is set before every operation that requires a write lock. But we must not wait if we have locked tables by LOCK TABLES. After setting a global read lock a thread waits until all write locks are released. --- mysql-test/r/lock_multi.result | 15 +++++ mysql-test/t/lock_multi.test | 50 ++++++++++++++++ mysys/thr_lock.c | 2 + sql/lock.cc | 44 ++++++++++++-- sql/mysql_priv.h | 1 + sql/sql_base.cc | 14 ++--- sql/sql_handler.cc | 11 ++-- sql/sql_insert.cc | 26 +-------- sql/sql_parse.cc | 102 +++++++++++++++++++++++++++------ sql/sql_table.cc | 9 +-- 10 files changed, 208 insertions(+), 66 deletions(-) diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result index 2188d58e526..c80108f723a 100644 --- a/mysql-test/r/lock_multi.result +++ b/mysql-test/r/lock_multi.result @@ -67,6 +67,21 @@ Select_priv N use test; use test; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; + FLUSH TABLES WITH READ LOCK; +CREATE TABLE t2 (c1 int); +UNLOCK TABLES; +UNLOCK TABLES; +DROP TABLE t1, t2; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; + FLUSH TABLES WITH READ LOCK; +CREATE TABLE t2 AS SELECT * FROM t1; +ERROR HY000: Table 't2' was not locked with LOCK TABLES +UNLOCK TABLES; +UNLOCK TABLES; +DROP TABLE t1; create table t1 (f1 int(12) unsigned not null auto_increment, primary key(f1)) engine=innodb; lock tables t1 write; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; // diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test index 905d0699e6a..627c33b3d82 100644 --- a/mysql-test/t/lock_multi.test +++ b/mysql-test/t/lock_multi.test @@ -142,6 +142,7 @@ disconnect con2; --error ER_DB_DROP_EXISTS DROP DATABASE mysqltest_1; +# # Bug#16986 - Deadlock condition with MyISAM tables # connection locker; @@ -170,6 +171,55 @@ connection locker; use test; # connection default; +# +# Test if CREATE TABLE with LOCK TABLE deadlocks. +# +connection writer; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; +# +# This waits until t1 is unlocked. +connection locker; +send FLUSH TABLES WITH READ LOCK; +--sleep 1 +# +# This must not block. +connection writer; +CREATE TABLE t2 (c1 int); +UNLOCK TABLES; +# +# This awakes now. +connection locker; +reap; +UNLOCK TABLES; +# +connection default; +DROP TABLE t1, t2; +# +# Test if CREATE TABLE SELECT with LOCK TABLE deadlocks. +# +connection writer; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; +# +# This waits until t1 is unlocked. +connection locker; +send FLUSH TABLES WITH READ LOCK; +--sleep 1 +# +# This must not block. +connection writer; +--error 1100 +CREATE TABLE t2 AS SELECT * FROM t1; +UNLOCK TABLES; +# +# This awakes now. +connection locker; +reap; +UNLOCK TABLES; +# +connection default; +DROP TABLE t1; # # Bug #17264: MySQL Server freeze diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c index f5a8b618949..51df50a4926 100644 --- a/mysys/thr_lock.c +++ b/mysys/thr_lock.c @@ -204,6 +204,8 @@ static void check_locks(THR_LOCK *lock, const char *where, { if ((int) data->type == (int) TL_READ_NO_INSERT) count++; + /* Protect against infinite loop. */ + DBUG_ASSERT(count <= lock->read_no_write_count); } if (count != lock->read_no_write_count) { diff --git a/sql/lock.cc b/sql/lock.cc index 71384fe7fc6..97a080c5634 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -905,7 +905,7 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list) if (table_list->table) { hash_delete(&open_cache, (byte*) table_list->table); - (void) pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); } } @@ -997,9 +997,9 @@ end: (default 0, which will unlock all tables) NOTES - One must have a lock on LOCK_open when calling this - This function will send a COND_refresh signal to inform other threads - that the name locks are removed + One must have a lock on LOCK_open when calling this. + This function will broadcast refresh signals to inform other threads + that the name locks are removed. RETURN 0 ok @@ -1013,7 +1013,7 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list, table != last_table; table= table->next_local) unlock_table_name(thd,table); - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); } @@ -1304,3 +1304,37 @@ bool make_global_read_lock_block_commit(THD *thd) } +/* + Broadcast COND_refresh and COND_global_read_lock. + + SYNOPSIS + broadcast_refresh() + void No parameters. + + DESCRIPTION + Due to a bug in a threading library it could happen that a signal + did not reach its target. A condition for this was that the same + condition variable was used with different mutexes in + pthread_cond_wait(). Some time ago we changed LOCK_open to + LOCK_global_read_lock in global read lock handling. So COND_refresh + was used with LOCK_open and LOCK_global_read_lock. + + We did now also change from COND_refresh to COND_global_read_lock + in global read lock handling. But now it is necessary to signal + both conditions at the same time. + + NOTE + When signalling COND_global_read_lock within the global read lock + handling, it is not necessary to also signal COND_refresh. + + RETURN + void +*/ + +void broadcast_refresh(void) +{ + VOID(pthread_cond_broadcast(&COND_refresh)); + VOID(pthread_cond_broadcast(&COND_global_read_lock)); +} + + diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 6d39f2f7440..54f3d652af4 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1342,6 +1342,7 @@ void start_waiting_global_read_lock(THD *thd); bool make_global_read_lock_block_commit(THD *thd); bool set_protect_against_global_read_lock(void); void unset_protect_against_global_read_lock(void); +void broadcast_refresh(void); /* Lock based on name */ int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index ba9fa6f6c80..9adf3fe35c0 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -530,7 +530,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) if (found_old_table) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } if (!lock_in_use) VOID(pthread_mutex_unlock(&LOCK_open)); @@ -1035,7 +1035,7 @@ TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find) } *prev=0; // Notify any 'refresh' threads - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); return start; } @@ -1577,7 +1577,7 @@ bool reopen_table(TABLE *table,bool locked) if (table->triggers) table->triggers->set_table(table); - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); error=0; end: @@ -1678,7 +1678,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) { my_afree((gptr) tables); } - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); *prev=0; DBUG_RETURN(error); } @@ -1715,7 +1715,7 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks, } } if (found) - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); DBUG_VOID_RETURN; } @@ -1807,7 +1807,7 @@ bool drop_locked_tables(THD *thd,const char *db, const char *table_name) } *prev=0; if (found) - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); if (thd->locked_tables && thd->locked_tables->table_count == 0) { my_free((gptr) thd->locked_tables,MYF(0)); @@ -5249,7 +5249,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, Signal any thread waiting for tables to be freed to reopen their tables */ - (void) pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); DBUG_PRINT("info", ("Waiting for refresh signal")); if (!(flags & RTFC_CHECK_KILLED_FLAG) || !thd->killed) { diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 1cd7778a053..0193d4d5355 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -254,7 +254,8 @@ err: DESCRIPTION Though this function takes a list of tables, only the first list entry - will be closed. Broadcasts a COND_refresh condition. + will be closed. + Broadcasts refresh if it closed the table. RETURN FALSE ok @@ -291,7 +292,7 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables) if (close_thread_table(thd, table_ptr)) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } VOID(pthread_mutex_unlock(&LOCK_open)); } @@ -608,7 +609,7 @@ err0: tables are closed (if MYSQL_HA_FLUSH_ALL) is set. If 'tables' is NULL and MYSQL_HA_FLUSH_ALL is not set, all HANDLER tables marked for flush are closed. - Broadcasts a COND_refresh condition, for every table closed. + Broadcasts refresh for every table closed. NOTE Since mysql_ha_flush() is called when the base table has to be closed, @@ -704,7 +705,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags, MYSQL_HA_REOPEN_ON_USAGE mark for reopen. DESCRIPTION - Broadcasts a COND_refresh condition, for every table closed. + Broadcasts refresh if it closed the table. The caller must lock LOCK_open. RETURN @@ -742,7 +743,7 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags) if (close_thread_table(thd, table_ptr)) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } DBUG_RETURN(0); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 8ffc6f53a43..15c7f91ba83 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1349,18 +1349,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) */ if (! (tmp= find_handler(thd, table_list))) { - /* - Avoid that a global read lock steps in while we are creating the - new thread. It would block trying to open the table. Hence, the - DI thread and this thread would wait until after the global - readlock is gone. Since the insert thread needs to wait for a - global read lock anyway, we do it right now. Note that - wait_if_global_read_lock() sets a protection against a new - global read lock when it succeeds. This needs to be released by - start_waiting_global_read_lock(). - */ - if (wait_if_global_read_lock(thd, 0, 1)) - goto err; if (!(tmp=new delayed_insert())) { my_error(ER_OUTOFMEMORY,MYF(0),sizeof(delayed_insert)); @@ -1401,11 +1389,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) pthread_cond_wait(&tmp->cond_client,&tmp->mutex); } pthread_mutex_unlock(&tmp->mutex); - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); thd->proc_info="got old table"; if (tmp->thd.killed) { @@ -1441,11 +1424,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) err1: thd->fatal_error(); - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); err: pthread_mutex_unlock(&LOCK_delayed_create); DBUG_RETURN(0); // Continue with normal insert @@ -2676,7 +2654,7 @@ bool select_create::send_eof() hash_delete(&open_cache,(byte*) table); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } lock=0; table=0; @@ -2705,7 +2683,7 @@ void select_create::abort() quick_rm_table(table_type, create_table->db, create_table->table_name); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } else if (!create_info->table_existed) close_temporary_table(thd, create_table->db, create_table->table_name); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index ba5c2ebf484..169fe219263 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2337,17 +2337,37 @@ static void reset_one_shot_variables(THD *thd) } -/**************************************************************************** -** mysql_execute_command -** Execute command saved in thd and current_lex->sql_command -****************************************************************************/ +/* + Execute command saved in thd and current_lex->sql_command + + SYNOPSIS + mysql_execute_command() + thd Thread handle + + IMPLEMENTATION + + Before every operation that can request a write lock for a table + wait if a global read lock exists. However do not wait if this + thread has locked tables already. No new locks can be requested + until the other locks are released. The thread that requests the + global read lock waits for write locked tables to become unlocked. + + Note that wait_if_global_read_lock() sets a protection against a new + global read lock when it succeeds. This needs to be released by + start_waiting_global_read_lock() after the operation. + + RETURN + FALSE OK + TRUE Error +*/ bool mysql_execute_command(THD *thd) { - bool res= FALSE; - int result= 0; - LEX *lex= thd->lex; + bool res= FALSE; + bool need_start_waiting= FALSE; // have protection against global read lock + int result= 0; + LEX *lex= thd->lex; /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */ SELECT_LEX *select_lex= &lex->select_lex; /* first table of first SELECT_LEX */ @@ -2832,7 +2852,8 @@ mysql_execute_command(THD *thd) TABLE in the same way. That way we avoid that a new table is created during a gobal read lock. */ - if (wait_if_global_read_lock(thd, 0, 1)) + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) { res= 1; goto end_with_restore_list; @@ -2857,7 +2878,7 @@ mysql_execute_command(THD *thd) { update_non_unique_table_error(create_table, "CREATE", duplicate); res= 1; - goto end_with_restart_wait; + goto end_with_restore_list; } } /* If we create merge table, we have to test tables in merge, too */ @@ -2873,7 +2894,7 @@ mysql_execute_command(THD *thd) { update_non_unique_table_error(tab, "CREATE", duplicate); res= 1; - goto end_with_restart_wait; + goto end_with_restore_list; } } } @@ -2915,13 +2936,6 @@ mysql_execute_command(THD *thd) send_ok(thd); } -end_with_restart_wait: - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); - /* put tables back for PS rexecuting */ end_with_restore_list: lex->link_first_table_back(create_table, link_to_local); @@ -3039,6 +3053,13 @@ end_with_restore_list: goto error; else { + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + thd->enable_slow_log= opt_log_slow_admin_statements; res= mysql_alter_table(thd, select_lex->db, lex->name, &lex->create_info, @@ -3296,6 +3317,14 @@ end_with_restore_list: break; /* Skip first table, which is the table we are inserting in */ select_lex->context.table_list= first_table->next_local; + + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values, lex->update_list, lex->value_list, lex->duplicates, lex->ignore); @@ -3319,6 +3348,14 @@ end_with_restore_list: select_lex->options|= SELECT_NO_UNLOCK; unit->set_limit(select_lex); + + if (! thd->locked_tables && + ! (need_start_waiting= ! wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + if (!(res= open_and_lock_tables(thd, all_tables))) { /* Skip first table, which is the table we are inserting in */ @@ -3395,6 +3432,14 @@ end_with_restore_list: break; DBUG_ASSERT(select_lex->offset_limit == 0); unit->set_limit(select_lex); + + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + res = mysql_delete(thd, all_tables, select_lex->where, &select_lex->order_list, unit->select_limit_cnt, select_lex->options, @@ -3408,6 +3453,13 @@ end_with_restore_list: (TABLE_LIST *)thd->lex->auxilliary_table_list.first; multi_delete *result; + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + if ((res= multi_delete_precheck(thd, all_tables))) break; @@ -4974,10 +5026,22 @@ end_with_restore_list: if (lex->sql_command != SQLCOM_CALL && lex->sql_command != SQLCOM_EXECUTE && uc_update_queries[lex->sql_command]<2) thd->row_count_func= -1; - DBUG_RETURN(res || thd->net.report_error); + + goto end; error: - DBUG_RETURN(1); + res= TRUE; + +end: + if (need_start_waiting) + { + /* + Release the protection against the global read lock and wake + everyone, who might want to set a global read lock. + */ + start_waiting_global_read_lock(thd); + } + DBUG_RETURN(res || thd->net.report_error); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 275cfbaa088..49f84aed966 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1674,8 +1674,6 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name, my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias); DBUG_RETURN(TRUE); } - if (wait_if_global_read_lock(thd, 0, 1)) - DBUG_RETURN(TRUE); VOID(pthread_mutex_lock(&LOCK_open)); if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)) { @@ -1743,7 +1741,6 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name, end: VOID(pthread_mutex_unlock(&LOCK_open)); - start_waiting_global_read_lock(thd); thd->proc_info="After create"; DBUG_RETURN(error); @@ -1923,7 +1920,7 @@ void close_cached_table(THD *thd, TABLE *table) thd->open_tables=unlink_open_table(thd,thd->open_tables,table); /* When lock on LOCK_open is freed other threads can continue */ - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); DBUG_VOID_RETURN; } @@ -3894,7 +3891,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, if (error) { VOID(pthread_mutex_unlock(&LOCK_open)); - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); goto err; } thd->proc_info="end"; @@ -3904,7 +3901,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE); mysql_bin_log.write(&qinfo); } - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); VOID(pthread_mutex_unlock(&LOCK_open)); #ifdef HAVE_BERKELEY_DB if (old_db_type == DB_TYPE_BERKELEY_DB) From 8728fbbc6ccf34b448b5cea1f00969fac54b58c6 Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Mon, 26 Jun 2006 20:57:18 +0200 Subject: [PATCH 11/74] Bug#16218 - Crash on insert delayed Bug#17294 - INSERT DELAYED puting an \n before data Bug#16611 - INSERT DELAYED corrupts data Bug#13707 - Server crash with INSERT DELAYED on MyISAM table Combined as Bug#16218. INSERT DELAYED crashed in 5.0 on a table with a varchar that could be NULL and was created pre-5.0 (Bugs 16218 and 13707). INSERT DELAYED corrupted data in 5.0 on a table with varchar fields that was created pre-5.0 (Bugs 17294 and 16611). In case of INSERT DELAYED the open table is copied from the delayed insert thread to be able to create a record for the queue. When copying the fields, a method was used that did convert old varchar to new varchar fields and did not set up some pointers into the record buffer of the table. The field conversion was guilty for the misinterpretation of the record contents by the delayed insert thread. The wrong pointer setup was guilty for the crashes. For Bug 13707 (Server crash with INSERT DELAYED on MyISAM table) I fixed the above mentioned method to set up one of the pointers. For Bug 16218 I set up the other pointers too. But when looking at the corruptions I got aware that converting the field type was totally wrong for INSERT DELAYED. The copied table is used to create a record that is to be sent to the delayed insert thread. Of course it can interpret the record correctly only if all field types are the same in both table objects. So I revoked the fix for Bug 13707 and changed the new_field() method so that it can suppress conversions. No test case as this is a migration problem. One needs to create a table with 4.x and use it with 5.x. I added two test scripts to the bug report. --- sql/field.cc | 31 ++++++++---------- sql/field.h | 7 ++-- sql/sql_insert.cc | 79 +++++++++++++++++++++++++++++++++++++++------- sql/sql_select.cc | 5 +-- sql/sql_trigger.cc | 3 +- sql/table.cc | 3 +- 6 files changed, 91 insertions(+), 37 deletions(-) diff --git a/sql/field.cc b/sql/field.cc index bdf84c588e1..ff858813ca6 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1515,7 +1515,8 @@ bool Field::optimize_range(uint idx, uint part) } -Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table) +Field *Field::new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type __attribute__((unused))) { Field *tmp; if (!(tmp= (Field*) memdup_root(root,(char*) this,size_of()))) @@ -1540,7 +1541,7 @@ Field *Field::new_key_field(MEM_ROOT *root, struct st_table *new_table, uint new_null_bit) { Field *tmp; - if ((tmp= new_field(root, new_table))) + if ((tmp= new_field(root, new_table, table == new_table))) { tmp->ptr= new_ptr; tmp->null_ptr= new_null_ptr; @@ -6224,29 +6225,21 @@ uint Field_string::max_packed_col_length(uint max_length) } -Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table) +Field *Field_string::new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type) { Field *new_field; - if (type() != MYSQL_TYPE_VAR_STRING || table == new_table) - return Field::new_field(root, new_table); + if (type() != MYSQL_TYPE_VAR_STRING || keep_type) + return Field::new_field(root, new_table, keep_type); /* Old VARCHAR field which should be modified to a VARCHAR on copy This is done to ensure that ALTER TABLE will convert old VARCHAR fields to now VARCHAR fields. */ - if ((new_field= new Field_varstring(field_length, maybe_null(), - field_name, new_table, charset()))) - { - /* - delayed_insert::get_local_table() needs a ptr copied from old table. - This is what other new_field() methods do too. The above method of - Field_varstring sets ptr to NULL. - */ - new_field->ptr= ptr; - } - return new_field; + return new Field_varstring(field_length, maybe_null(), + field_name, new_table, charset()); } /**************************************************************************** @@ -6738,9 +6731,11 @@ int Field_varstring::cmp_binary(const char *a_ptr, const char *b_ptr, } -Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table) +Field *Field_varstring::new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type) { - Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table); + Field_varstring *res= (Field_varstring*) Field::new_field(root, new_table, + keep_type); if (res) res->length_bytes= length_bytes; return res; diff --git a/sql/field.h b/sql/field.h index f4d27e46877..18fcd20c97b 100644 --- a/sql/field.h +++ b/sql/field.h @@ -211,7 +211,8 @@ public: */ virtual bool can_be_compared_as_longlong() const { return FALSE; } virtual void free() {} - virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table); + virtual Field *new_field(MEM_ROOT *root, struct st_table *new_table, + bool keep_type); virtual Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, char *new_ptr, uchar *new_null_ptr, uint new_null_bit); @@ -1033,7 +1034,7 @@ public: enum_field_types real_type() const { return FIELD_TYPE_STRING; } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } - Field *new_field(MEM_ROOT *root, struct st_table *new_table); + Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type); }; @@ -1105,7 +1106,7 @@ public: enum_field_types real_type() const { return MYSQL_TYPE_VARCHAR; } bool has_charset(void) const { return charset() == &my_charset_bin ? FALSE : TRUE; } - Field *new_field(MEM_ROOT *root, struct st_table *new_table); + Field *new_field(MEM_ROOT *root, struct st_table *new_table, bool keep_type); Field *new_key_field(MEM_ROOT *root, struct st_table *new_table, char *new_ptr, uchar *new_null_ptr, uint new_null_bit); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 320b8e1df9d..1fdfb2c8cfa 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -17,6 +17,44 @@ /* Insert of records */ +/* + INSERT DELAYED + + Insert delayed is distinguished from a normal insert by lock_type == + TL_WRITE_DELAYED instead of TL_WRITE. It first tries to open a + "delayed" table (delayed_get_table()), but falls back to + open_and_lock_tables() on error and proceeds as normal insert then. + + Opening a "delayed" table means to find a delayed insert thread that + has the table open already. If this fails, a new thread is created and + waited for to open and lock the table. + + If accessing the thread succeeded, in + delayed_insert::get_local_table() the table of the thread is copied + for local use. A copy is required because the normal insert logic + works on a target table, but the other threads table object must not + be used. The insert logic uses the record buffer to create a record. + And the delayed insert thread uses the record buffer to pass the + record to the table handler. So there must be different objects. Also + the copied table is not included in the lock, so that the statement + can proceed even if the real table cannot be accessed at this moment. + + Copying a table object is not a trivial operation. Besides the TABLE + object there are the field pointer array, the field objects and the + record buffer. After copying the field objects, their pointers into + the record must be "moved" to point to the new record buffer. + + After this setup the normal insert logic is used. Only that for + delayed inserts write_delayed() is called instead of write_record(). + It inserts the rows into a queue and signals the delayed insert thread + instead of writing directly to the table. + + The delayed insert thread awakes from the signal. It locks the table, + inserts the rows from the queue, unlocks the table, and waits for the + next signal. It does normally live until a FLUSH TABLES or SHUTDOWN. + +*/ + #include "mysql_priv.h" #include "sp_head.h" #include "sql_trigger.h" @@ -1466,6 +1504,7 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) my_ptrdiff_t adjust_ptrs; Field **field,**org_field, *found_next_number_field; TABLE *copy; + DBUG_ENTER("delayed_insert::get_local_table"); /* First request insert thread to get a lock */ status=1; @@ -1489,31 +1528,47 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) } } + /* + Allocate memory for the TABLE object, the field pointers array, and + one record buffer of reclength size. Normally a table has three + record buffers of rec_buff_length size, which includes alignment + bytes. Since the table copy is used for creating one record only, + the other record buffers and alignment are unnecessary. + */ client_thd->proc_info="allocating local table"; copy= (TABLE*) client_thd->alloc(sizeof(*copy)+ (table->s->fields+1)*sizeof(Field**)+ table->s->reclength); if (!copy) goto error; + + /* Copy the TABLE object. */ *copy= *table; copy->s= ©->share_not_to_be_used; // No name hashing bzero((char*) ©->s->name_hash,sizeof(copy->s->name_hash)); /* We don't need to change the file handler here */ - field=copy->field=(Field**) (copy+1); - copy->record[0]=(byte*) (field+table->s->fields+1); - memcpy((char*) copy->record[0],(char*) table->record[0],table->s->reclength); + /* Assign the pointers for the field pointers array and the record. */ + field= copy->field= (Field**) (copy + 1); + copy->record[0]= (byte*) (field + table->s->fields + 1); + memcpy((char*) copy->record[0], (char*) table->record[0], + table->s->reclength); - /* Make a copy of all fields */ + /* + Make a copy of all fields. + The copied fields need to point into the copied record. This is done + by copying the field objects with their old pointer values and then + "move" the pointers by the distance between the original and copied + records. That way we preserve the relative positions in the records. + */ + adjust_ptrs= PTR_BYTE_DIFF(copy->record[0], table->record[0]); - adjust_ptrs=PTR_BYTE_DIFF(copy->record[0],table->record[0]); - - found_next_number_field=table->found_next_number_field; - for (org_field=table->field ; *org_field ; org_field++,field++) + found_next_number_field= table->found_next_number_field; + for (org_field= table->field; *org_field; org_field++, field++) { - if (!(*field= (*org_field)->new_field(client_thd->mem_root,copy))) - return 0; + if (!(*field= (*org_field)->new_field(client_thd->mem_root, copy, 1))) + DBUG_RETURN(0); (*field)->orig_table= copy; // Remove connection (*field)->move_field(adjust_ptrs); // Point at copy->record[0] if (*org_field == found_next_number_field) @@ -1540,14 +1595,14 @@ TABLE *delayed_insert::get_local_table(THD* client_thd) /* Adjust lock_count. This table object is not part of a lock. */ copy->lock_count= 0; - return copy; + DBUG_RETURN(copy); /* Got fatal error */ error: tables_in_use--; status=1; pthread_cond_signal(&cond); // Inform thread about abort - return 0; + DBUG_RETURN(0); } diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 465f41fa8de..d25f1291a7b 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -8217,7 +8217,8 @@ Field* create_tmp_field_from_field(THD *thd, Field* org_field, org_field->field_name, table, org_field->charset()); else - new_field= org_field->new_field(thd->mem_root, table); + new_field= org_field->new_field(thd->mem_root, table, + table == org_field->table); if (new_field) { if (item) @@ -13072,7 +13073,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param, saved value */ Field *field= item->field; - item->result_field=field->new_field(thd->mem_root,field->table); + item->result_field=field->new_field(thd->mem_root,field->table, 1); char *tmp=(char*) sql_alloc(field->pack_length()+1); if (!tmp) goto err; diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index f943b014118..91910227ec7 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -736,7 +736,8 @@ bool Table_triggers_list::prepare_record1_accessors(TABLE *table) QQ: it is supposed that it is ok to use this function for field cloning... */ - if (!(*old_fld= (*fld)->new_field(&table->mem_root, table))) + if (!(*old_fld= (*fld)->new_field(&table->mem_root, table, + table == (*fld)->table))) return 1; (*old_fld)->move_field((my_ptrdiff_t)(table->record[1] - table->record[0])); diff --git a/sql/table.cc b/sql/table.cc index 8e23bea2540..8f6b5ecf1eb 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -802,7 +802,8 @@ int openfrm(THD *thd, const char *name, const char *alias, uint db_stat, if (!(field->flags & BLOB_FLAG)) { // Create a new field field=key_part->field=field->new_field(&outparam->mem_root, - outparam); + outparam, + outparam == field->table); field->field_length=key_part->length; } } From c72a4dce143d734e9d0917e838ea930d06c8b16b Mon Sep 17 00:00:00 2001 From: "ngrishakin@mysql.com" <> Date: Mon, 26 Jun 2006 22:33:46 +0200 Subject: [PATCH 12/74] updated test case ndb_dd_advance --- mysql-test/r/ndb_dd_advance.result | 4 ++-- mysql-test/t/ndb_dd_advance.test | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/ndb_dd_advance.result b/mysql-test/r/ndb_dd_advance.result index 1586508cc88..09fe75805d5 100644 --- a/mysql-test/r/ndb_dd_advance.result +++ b/mysql-test/r/ndb_dd_advance.result @@ -853,11 +853,11 @@ INSERT INTO test.t1 (email, infoID, dateentered) VALUES INSERT INTO test.t2(infoID, shipcode) VALUES (1, 'Z001'), (2, 'R002'); -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email; +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode; email shipcode test1@testdomain.com Z001 -test2@testdomain.com Z001 test2@testdomain.com R002 +test2@testdomain.com Z001 test3@testdomain.com Z001 SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; email diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test index 2579c3ea1d0..e882ec794c1 100755 --- a/mysql-test/t/ndb_dd_advance.test +++ b/mysql-test/t/ndb_dd_advance.test @@ -502,7 +502,7 @@ INSERT INTO test.t2(infoID, shipcode) VALUES (1, 'Z001'), (2, 'R002'); -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email; +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode; SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; drop table test.t1,test.t2; From 117b76a562431090ca9f0b29702f921872065970 Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Tue, 27 Jun 2006 00:47:52 +0400 Subject: [PATCH 13/74] A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files. --- mysql-test/r/create.result | 2 +- mysql-test/r/ps.result | 105 +++++++++++++++++++ mysql-test/r/sp.result | 46 ++++++++ mysql-test/t/create.test | 2 +- mysql-test/t/ps.test | 118 +++++++++++++++++++++ mysql-test/t/sp.test | 46 ++++++++ sql/item_strfunc.cc | 4 +- sql/log_event.cc | 20 ++-- sql/slave.cc | 16 +-- sql/slave.h | 4 - sql/sp.cc | 107 +++++++++++-------- sql/sp.h | 14 +-- sql/sp_head.cc | 66 ++++-------- sql/sp_head.h | 10 -- sql/sql_class.h | 43 +++++++- sql/sql_db.cc | 11 +- sql/sql_insert.cc | 15 ++- sql/sql_lex.h | 5 + sql/sql_parse.cc | 210 ++++++++++++++++--------------------- sql/sql_table.cc | 3 +- sql/sql_trigger.cc | 9 +- sql/sql_udf.cc | 6 +- sql/sql_view.cc | 9 +- sql/sql_yacc.yy | 47 ++++++++- sql/tztime.cc | 6 +- 25 files changed, 634 insertions(+), 290 deletions(-) diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index 27a6c8a9d03..c5b77ea4925 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -607,7 +607,7 @@ create database mysqltest; use mysqltest; drop database mysqltest; create table test.t1 like x; -ERROR 42000: Incorrect database name 'NULL' +ERROR 3D000: No database selected drop table if exists test.t1; create database mysqltest; use mysqltest; diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result index abebfc8cd93..3ce2f5169e2 100644 --- a/mysql-test/r/ps.result +++ b/mysql-test/r/ps.result @@ -1158,3 +1158,108 @@ Warnings: Error 1146 Table 'test.t4' doesn't exist deallocate prepare stmt; drop table t1, t2, t3; +create database mysqltest_long_database_name_to_thrash_heap; +use test; +create table t1 (i int); +prepare stmt from "alter table test.t1 rename t1"; +use mysqltest_long_database_name_to_thrash_heap; +execute stmt; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +prepare stmt from "alter table test.t1 rename t1"; +use test; +execute stmt; +show tables like 't1'; +Tables_in_test (t1) +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +t1 +deallocate prepare stmt; +use mysqltest_long_database_name_to_thrash_heap; +prepare stmt_create from "create table t1 (i int)"; +prepare stmt_insert from "insert into t1 (i) values (1)"; +prepare stmt_update from "update t1 set i=2"; +prepare stmt_delete from "delete from t1 where i=2"; +prepare stmt_select from "select * from t1"; +prepare stmt_alter from "alter table t1 add column (b int)"; +prepare stmt_alter1 from "alter table t1 drop column b"; +prepare stmt_analyze from "analyze table t1"; +prepare stmt_optimize from "optimize table t1"; +prepare stmt_show from "show tables like 't1'"; +prepare stmt_truncate from "truncate table t1"; +prepare stmt_drop from "drop table t1"; +drop table t1; +use test; +execute stmt_create; +show tables like 't1'; +Tables_in_test (t1) +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +t1 +use test; +execute stmt_insert; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +i +1 +execute stmt_update; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +i +2 +execute stmt_delete; +execute stmt_select; +i +execute stmt_alter; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +Field Type Null Key Default Extra +i int(11) YES NULL +b int(11) YES NULL +execute stmt_alter1; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +Field Type Null Key Default Extra +i int(11) YES NULL +execute stmt_analyze; +Table Op Msg_type Msg_text +mysqltest_long_database_name_to_thrash_heap.t1 analyze status Table is already up to date +execute stmt_optimize; +Table Op Msg_type Msg_text +mysqltest_long_database_name_to_thrash_heap.t1 optimize status Table is already up to date +execute stmt_show; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +t1 +execute stmt_truncate; +execute stmt_drop; +show tables like 't1'; +Tables_in_test (t1) +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +drop database mysqltest_long_database_name_to_thrash_heap; +prepare stmt_create from "create table t1 (i int)"; +ERROR 3D000: No database selected +prepare stmt_insert from "insert into t1 (i) values (1)"; +ERROR 3D000: No database selected +prepare stmt_update from "update t1 set i=2"; +ERROR 3D000: No database selected +prepare stmt_delete from "delete from t1 where i=2"; +ERROR 3D000: No database selected +prepare stmt_select from "select * from t1"; +ERROR 3D000: No database selected +prepare stmt_alter from "alter table t1 add column (b int)"; +ERROR 3D000: No database selected +prepare stmt_alter1 from "alter table t1 drop column b"; +ERROR 3D000: No database selected +prepare stmt_analyze from "analyze table t1"; +ERROR 3D000: No database selected +prepare stmt_optimize from "optimize table t1"; +ERROR 3D000: No database selected +prepare stmt_show from "show tables like 't1'"; +ERROR 3D000: No database selected +prepare stmt_truncate from "truncate table t1"; +ERROR 3D000: No database selected +prepare stmt_drop from "drop table t1"; +ERROR 3D000: No database selected +create temporary table t1 (i int); +ERROR 3D000: No database selected +use test; diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index ff378f1f43b..a2b36b11a2e 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -4990,4 +4990,50 @@ CALL bug18037_p2()| DROP FUNCTION bug18037_f1| DROP PROCEDURE bug18037_p1| DROP PROCEDURE bug18037_p2| +use test| +create table t3 (i int)| +insert into t3 values (1), (2)| +create database mysqltest1| +use mysqltest1| +create function bug17199() returns varchar(2) deterministic return 'ok'| +use test| +select *, mysqltest1.bug17199() from t3| +i mysqltest1.bug17199() +1 ok +2 ok +use mysqltest1| +create function bug18444(i int) returns int no sql deterministic return i + 1| +use test| +select mysqltest1.bug18444(i) from t3| +mysqltest1.bug18444(i) +2 +3 +drop database mysqltest1| +create database mysqltest1 charset=utf8| +create database mysqltest2 charset=utf8| +create procedure mysqltest1.p1() +begin +-- alters the default collation of database test +alter database character set koi8r; +end| +use mysqltest1| +call p1()| +show create database mysqltest1| +Database Create Database +mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */ +show create database mysqltest2| +Database Create Database +mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */ +alter database mysqltest1 character set utf8| +use mysqltest2| +call mysqltest1.p1()| +show create database mysqltest1| +Database Create Database +mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */ +show create database mysqltest2| +Database Create Database +mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */ +drop database mysqltest1| +drop database mysqltest2| +use test| drop table t1,t2; diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index e22c2b5c426..07edbf206fe 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -517,7 +517,7 @@ DROP TABLE t12913; create database mysqltest; use mysqltest; drop database mysqltest; ---error 1102 +--error ER_NO_DB_ERROR create table test.t1 like x; --disable_warnings drop table if exists test.t1; diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test index e3f3e37cd4c..ff66b265fae 100644 --- a/mysql-test/t/ps.test +++ b/mysql-test/t/ps.test @@ -1146,4 +1146,122 @@ execute stmt; execute stmt; deallocate prepare stmt; drop table t1, t2, t3; + +# +# Bug#17199 "Table not found" error occurs if the query contains a call +# to a function from another database. +# Test prepared statements- related behaviour. +# +# +# ALTER TABLE RENAME and Prepared Statements: wrong DB name buffer was used +# in ALTER ... RENAME which caused memory corruption in prepared statements. +# No need to fix this problem in 4.1 as ALTER TABLE is not allowed in +# Prepared Statements in 4.1. +# +create database mysqltest_long_database_name_to_thrash_heap; +use test; +create table t1 (i int); +prepare stmt from "alter table test.t1 rename t1"; +use mysqltest_long_database_name_to_thrash_heap; +execute stmt; +show tables like 't1'; +prepare stmt from "alter table test.t1 rename t1"; +use test; +execute stmt; +show tables like 't1'; +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +deallocate prepare stmt; +# +# Check that a prepared statement initializes its current database at +# PREPARE, and then works correctly even if the current database has been +# changed. +# +use mysqltest_long_database_name_to_thrash_heap; +# Necessary for preparation of INSERT/UPDATE/DELETE to succeed +prepare stmt_create from "create table t1 (i int)"; +prepare stmt_insert from "insert into t1 (i) values (1)"; +prepare stmt_update from "update t1 set i=2"; +prepare stmt_delete from "delete from t1 where i=2"; +prepare stmt_select from "select * from t1"; +prepare stmt_alter from "alter table t1 add column (b int)"; +prepare stmt_alter1 from "alter table t1 drop column b"; +prepare stmt_analyze from "analyze table t1"; +prepare stmt_optimize from "optimize table t1"; +prepare stmt_show from "show tables like 't1'"; +prepare stmt_truncate from "truncate table t1"; +prepare stmt_drop from "drop table t1"; +# Drop the table that was used to prepare INSERT/UPDATE/DELETE: we will +# create a new one by executing stmt_create +drop table t1; +# Switch the current database +use test; +# Check that all prepared statements operate on the database that was +# active at PREPARE +execute stmt_create; +# should return empty set +show tables like 't1'; +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +use test; +execute stmt_insert; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_update; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_delete; +execute stmt_select; +execute stmt_alter; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_alter1; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_analyze; +execute stmt_optimize; +execute stmt_show; +execute stmt_truncate; +execute stmt_drop; +show tables like 't1'; +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +# +# Attempt a statement PREPARE when there is no current database: +# is expected to return an error. +# +drop database mysqltest_long_database_name_to_thrash_heap; +--error ER_NO_DB_ERROR +prepare stmt_create from "create table t1 (i int)"; +--error ER_NO_DB_ERROR +prepare stmt_insert from "insert into t1 (i) values (1)"; +--error ER_NO_DB_ERROR +prepare stmt_update from "update t1 set i=2"; +--error ER_NO_DB_ERROR +prepare stmt_delete from "delete from t1 where i=2"; +--error ER_NO_DB_ERROR +prepare stmt_select from "select * from t1"; +--error ER_NO_DB_ERROR +prepare stmt_alter from "alter table t1 add column (b int)"; +--error ER_NO_DB_ERROR +prepare stmt_alter1 from "alter table t1 drop column b"; +--error ER_NO_DB_ERROR +prepare stmt_analyze from "analyze table t1"; +--error ER_NO_DB_ERROR +prepare stmt_optimize from "optimize table t1"; +--error ER_NO_DB_ERROR +prepare stmt_show from "show tables like 't1'"; +--error ER_NO_DB_ERROR +prepare stmt_truncate from "truncate table t1"; +--error ER_NO_DB_ERROR +prepare stmt_drop from "drop table t1"; +# +# The above has automatically deallocated all our statements. +# +# Attempt to CREATE a temporary table when no DB used: it should fail +# This proves that no table can be used without explicit specification of +# its database if there is no current database. +# +--error ER_NO_DB_ERROR +create temporary table t1 (i int); +# +# Restore the old environemnt +# +use test; # End of 5.0 tests diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 1d21a5da187..c0dd785a8ce 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -5888,6 +5888,52 @@ DROP FUNCTION bug18037_f1| DROP PROCEDURE bug18037_p1| DROP PROCEDURE bug18037_p2| +# +# Bug#17199: "Table not found" error occurs if the query contains a call +# to a function from another database. +# See also ps.test for an additional test case for this bug. +# +use test| +create table t3 (i int)| +insert into t3 values (1), (2)| +create database mysqltest1| +use mysqltest1| +create function bug17199() returns varchar(2) deterministic return 'ok'| +use test| +select *, mysqltest1.bug17199() from t3| +# +# Bug#18444: Fully qualified stored function names don't work correctly +# in select statements +# +use mysqltest1| +create function bug18444(i int) returns int no sql deterministic return i + 1| +use test| +select mysqltest1.bug18444(i) from t3| +drop database mysqltest1| +# +# Check that current database has no influence to a stored procedure +# +create database mysqltest1 charset=utf8| +create database mysqltest2 charset=utf8| +create procedure mysqltest1.p1() +begin +-- alters the default collation of database test + alter database character set koi8r; +end| +use mysqltest1| +call p1()| +show create database mysqltest1| +show create database mysqltest2| +alter database mysqltest1 character set utf8| +use mysqltest2| +call mysqltest1.p1()| +show create database mysqltest1| +show create database mysqltest2| +drop database mysqltest1| +drop database mysqltest2| +# +# Restore the old environemnt +use test| # # BUG#NNNN: New bug synopsis diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index ce9897afeed..3f728958df1 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1640,13 +1640,13 @@ String *Item_func_database::val_str(String *str) { DBUG_ASSERT(fixed == 1); THD *thd= current_thd; - if (!thd->db) + if (thd->db == NULL) { null_value= 1; return 0; } else - str->copy((const char*) thd->db,(uint) strlen(thd->db),system_charset_info); + str->copy(thd->db, thd->db_length, system_charset_info); return str; } diff --git a/sql/log_event.cc b/sql/log_event.cc index 266d6b064bd..9d6b223f2d7 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1856,9 +1856,10 @@ end: don't suffer from these assignments to 0 as DROP TEMPORARY TABLE uses the db.table syntax. */ - thd->db= thd->catalog= 0; // prevent db from being freed + thd->catalog= 0; + thd->reset_db(NULL, 0); // prevent db from being freed thd->query= 0; // just to be sure - thd->query_length= thd->db_length =0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); close_thread_tables(thd); free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); @@ -2845,7 +2846,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, TABLE_LIST tables; bzero((char*) &tables,sizeof(tables)); - tables.db = thd->db; + tables.db= thd->strmake(thd->db, thd->db_length); tables.alias = tables.table_name = (char*) table_name; tables.lock_type = TL_WRITE; tables.updating= 1; @@ -2940,7 +2941,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, ex.skip_lines = skip_lines; List field_list; thd->main_lex.select_lex.context.resolve_in_table_list_only(&tables); - set_fields(thd->db, field_list, &thd->main_lex.select_lex.context); + set_fields(tables.db, field_list, &thd->main_lex.select_lex.context); thd->variables.pseudo_thread_id= thread_id; List set_fields; if (net) @@ -2987,11 +2988,12 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, error: thd->net.vio = 0; - char *save_db= thd->db; + const char *remember_db= thd->db; VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->db= thd->catalog= 0; + thd->catalog= 0; + thd->reset_db(NULL, 0); thd->query= 0; - thd->query_length= thd->db_length= 0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); close_thread_tables(thd); if (thd->query_error) @@ -3008,7 +3010,7 @@ error: } slave_print_error(rli,sql_errno,"\ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", - err, (char*)table_name, print_slave_db_safe(save_db)); + err, (char*)table_name, print_slave_db_safe(remember_db)); free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); return 1; } @@ -3018,7 +3020,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", { slave_print_error(rli,ER_UNKNOWN_ERROR, "\ Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'", - (char*)table_name, print_slave_db_safe(save_db)); + (char*)table_name, print_slave_db_safe(remember_db)); return 1; } diff --git a/sql/slave.cc b/sql/slave.cc index caeefc1ad3c..d884e54d60d 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1581,9 +1581,8 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, // save old db in case we are creating in a different database save_db = thd->db; save_db_length= thd->db_length; - thd->db = (char*)db; - DBUG_ASSERT(thd->db != 0); - thd->db_length= strlen(thd->db); + DBUG_ASSERT(db != 0); + thd->reset_db((char*)db, strlen(db)); mysql_parse(thd, thd->query, packet_len); // run create table thd->db = save_db; // leave things the way the were before thd->db_length= save_db_length; @@ -3704,8 +3703,9 @@ err: sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s", IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff)); VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query = thd->db = 0; // extra safety - thd->query_length= thd->db_length= 0; + thd->query= 0; // extra safety + thd->query_length= 0; + thd->reset_db(NULL, 0); VOID(pthread_mutex_unlock(&LOCK_thread_count)); if (mysql) { @@ -3912,8 +3912,10 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ should already have done these assignments (each event which sets these variables is supposed to set them to 0 before terminating)). */ - thd->query= thd->db= thd->catalog= 0; - thd->query_length= thd->db_length= 0; + thd->catalog= 0; + thd->reset_db(NULL, 0); + thd->query= 0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->proc_info = "Waiting for slave mutex on exit"; pthread_mutex_lock(&rli->run_lock); diff --git a/sql/slave.h b/sql/slave.h index 040ce4eaf85..ebbb1e64df5 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -526,10 +526,6 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t* start_lock, MASTER_INFO* mi, bool high_priority); -/* If fd is -1, dump to NET */ -int mysql_table_dump(THD* thd, const char* db, - const char* tbl_name, int fd = -1); - /* retrieve table from master and copy to slave*/ int fetch_master_table(THD* thd, const char* db_name, const char* table_name, MASTER_INFO* mi, MYSQL* mysql, bool overwrite); diff --git a/sql/sp.cc b/sql/sp.cc index cae7a56fa57..553465ebff8 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -404,7 +404,8 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, { LEX *old_lex= thd->lex, newlex; String defstr; - char olddb[128]; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; ulong old_sql_mode= thd->variables.sql_mode; ha_rows old_select_limit= thd->variables.select_limit; @@ -450,9 +451,7 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, goto end; } - dbchanged= FALSE; - if ((ret= sp_use_new_db(thd, name->m_db.str, olddb, sizeof(olddb), - 1, &dbchanged))) + if ((ret= sp_use_new_db(thd, name->m_db, &old_db, 1, &dbchanged))) goto end; lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length()); @@ -462,14 +461,14 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, { sp_head *sp= newlex.sphead; - if (dbchanged && (ret= mysql_change_db(thd, olddb, 1))) + if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1))) goto end; delete sp; ret= SP_PARSE_ERROR; } else { - if (dbchanged && (ret= mysql_change_db(thd, olddb, 1))) + if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1))) goto end; *sphp= newlex.sphead; (*sphp)->set_definer(&definer_user_name, &definer_host_name); @@ -505,15 +504,14 @@ db_create_routine(THD *thd, int type, sp_head *sp) int ret; TABLE *table; char definer[USER_HOST_BUFF_SIZE]; - char olddb[128]; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; DBUG_ENTER("db_create_routine"); DBUG_PRINT("enter", ("type: %d name: %.*s",type,sp->m_name.length, sp->m_name.str)); - dbchanged= FALSE; - if ((ret= sp_use_new_db(thd, sp->m_db.str, olddb, sizeof(olddb), - 0, &dbchanged))) + if ((ret= sp_use_new_db(thd, sp->m_db, &old_db, 0, &dbchanged))) { ret= SP_NO_DB_ERROR; goto done; @@ -641,7 +639,7 @@ db_create_routine(THD *thd, int type, sp_head *sp) done: close_thread_tables(thd); if (dbchanged) - (void)mysql_change_db(thd, olddb, 1); + (void) mysql_change_db(thd, old_db.str, 1); DBUG_RETURN(ret); } @@ -1814,49 +1812,76 @@ create_string(THD *thd, String *buf, } -// -// Utilities... -// + +/* + Change the current database if needed. + + SYNOPSIS + sp_use_new_db() + thd thread handle + + new_db new database name (a string and its length) + + old_db [IN] str points to a buffer where to store the old + database, length contains the size of the buffer + [OUT] if old db was not NULL, its name is copied + to the buffer pointed at by str and length is updated + accordingly. Otherwise str[0] is set to '\0' and length + is set to 0. The out parameter should be used only if + the database name has been changed (see dbchangedp). + + dbchangedp [OUT] is set to TRUE if the current database is changed, + FALSE otherwise. A database is not changed if the old + name is the same as the new one, both names are empty, + or an error has occurred. + + RETURN VALUE + 0 success + 1 access denied or out of memory (the error message is + set in THD) +*/ int -sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddblen, +sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db, bool no_access_check, bool *dbchangedp) { - bool changeit; + int ret; + static char empty_c_string[1]= {0}; /* used for not defined db */ DBUG_ENTER("sp_use_new_db"); - DBUG_PRINT("enter", ("newdb: %s", newdb)); + DBUG_PRINT("enter", ("newdb: %s", new_db.str)); - if (! newdb) - newdb= (char *)""; - if (thd->db && thd->db[0]) + /* + Set new_db to an empty string if it's NULL, because mysql_change_db + requires a non-NULL argument. + new_db.str can be NULL only if we're restoring the old database after + execution of a stored procedure and there were no current database + selected. The stored procedure itself must always have its database + initialized. + */ + if (new_db.str == NULL) + new_db.str= empty_c_string; + + if (thd->db) { - if (my_strcasecmp(system_charset_info, thd->db, newdb) == 0) - changeit= 0; - else - { - changeit= 1; - strnmov(olddb, thd->db, olddblen); - } + old_db->length= (strmake(old_db->str, thd->db, old_db->length) - + old_db->str); } else - { // thd->db empty - if (newdb[0]) - changeit= 1; - else - changeit= 0; - olddb[0] = '\0'; + { + old_db->str[0]= '\0'; + old_db->length= 0; } - if (!changeit) + + /* Don't change the database if the new name is the same as the old one. */ + if (my_strcasecmp(system_charset_info, old_db->str, new_db.str) == 0) { *dbchangedp= FALSE; DBUG_RETURN(0); } - else - { - int ret= mysql_change_db(thd, newdb, no_access_check); - if (! ret) - *dbchangedp= TRUE; - DBUG_RETURN(ret); - } + ret= mysql_change_db(thd, new_db.str, no_access_check); + + *dbchangedp= ret == 0; + DBUG_RETURN(ret); } + diff --git a/sql/sp.h b/sql/sp.h index 2587a9b115a..631b8a87aa2 100644 --- a/sql/sp.h +++ b/sql/sp.h @@ -104,15 +104,15 @@ extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first); TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup); void close_proc_table(THD *thd, Open_tables_state *backup); -// -// Utilities... -// -// Do a "use newdb". The current db is stored at olddb. -// If newdb is the same as the current one, nothing is changed. -// dbchangedp is set to true if the db was actually changed. +/* + Do a "use new_db". The current db is stored at old_db. If new_db is the + same as the current one, nothing is changed. dbchangedp is set to true if + the db was actually changed. +*/ + int -sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddbmax, +sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db, bool no_access_check, bool *dbchangedp); #endif /* _SP_H_ */ diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 3b29a841966..02eed207f55 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -376,24 +376,6 @@ sp_name::init_qname(THD *thd) m_name.length, m_name.str); } -sp_name * -sp_name_current_db_new(THD *thd, LEX_STRING name) -{ - sp_name *qname; - - if (! thd->db) - qname= new sp_name(name); - else - { - LEX_STRING db; - - db.length= strlen(thd->db); - db.str= thd->strmake(thd->db, db.length); - qname= new sp_name(db, name); - } - qname->init_qname(thd); - return qname; -} /* Check that the name 'ident' is ok. It's assumed to be an 'ident' @@ -504,27 +486,20 @@ sp_head::init_strings(THD *thd, LEX *lex, sp_name *name) /* During parsing, we must use thd->mem_root */ MEM_ROOT *root= thd->mem_root; - /* We have to copy strings to get them into the right memroot */ - if (name) - { - m_db.length= name->m_db.length; - if (name->m_db.length == 0) - m_db.str= NULL; - else - m_db.str= strmake_root(root, name->m_db.str, name->m_db.length); - m_name.length= name->m_name.length; - m_name.str= strmake_root(root, name->m_name.str, name->m_name.length); + DBUG_ASSERT(name); + /* Must be initialized in the parser */ + DBUG_ASSERT(name->m_db.str && name->m_db.length); - if (name->m_qname.length == 0) - name->init_qname(thd); - m_qname.length= name->m_qname.length; - m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length); - } - else if (thd->db) - { - m_db.length= thd->db_length; - m_db.str= strmake_root(root, thd->db, m_db.length); - } + /* We have to copy strings to get them into the right memroot */ + m_db.length= name->m_db.length; + m_db.str= strmake_root(root, name->m_db.str, name->m_db.length); + m_name.length= name->m_name.length; + m_name.str= strmake_root(root, name->m_name.str, name->m_name.length); + + if (name->m_qname.length == 0) + name->init_qname(thd); + m_qname.length= name->m_qname.length; + m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length); if (m_param_begin && m_param_end) { @@ -933,7 +908,8 @@ bool sp_head::execute(THD *thd) { DBUG_ENTER("sp_head::execute"); - char olddb[128]; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; sp_rcontext *ctx; bool err_status= FALSE; @@ -980,10 +956,8 @@ sp_head::execute(THD *thd) m_first_instance->m_last_cached_sp == this) || (m_recursion_level + 1 == m_next_cached_sp->m_recursion_level)); - dbchanged= FALSE; if (m_db.length && - (err_status= sp_use_new_db(thd, m_db.str, olddb, sizeof(olddb), 0, - &dbchanged))) + (err_status= sp_use_new_db(thd, m_db, &old_db, 0, &dbchanged))) goto done; if ((ctx= thd->spcont)) @@ -1155,10 +1129,10 @@ sp_head::execute(THD *thd) { /* No access check when changing back to where we came from. - (It would generate an error from mysql_change_db() when olddb=="") + (It would generate an error from mysql_change_db() when old_db=="") */ if (! thd->killed) - err_status|= mysql_change_db(thd, olddb, 1); + err_status|= mysql_change_db(thd, old_db.str, 1); } m_flags&= ~IS_INVOKED; DBUG_PRINT("info", @@ -1816,9 +1790,6 @@ sp_head::reset_thd_mem_root(THD *thd) (ulong) &mem_root, (ulong) &thd->mem_root)); free_list= thd->free_list; // Keep the old list thd->free_list= NULL; // Start a new one - /* Copy the db, since substatements will point to it */ - m_thd_db= thd->db; - thd->db= thd->strmake(thd->db, thd->db_length); m_thd= thd; DBUG_VOID_RETURN; } @@ -1834,7 +1805,6 @@ sp_head::restore_thd_mem_root(THD *thd) DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx", (ulong) &mem_root, (ulong) &thd->mem_root)); thd->free_list= flist; // Restore the old one - thd->db= m_thd_db; // Restore the original db pointer thd->mem_root= m_thd_root; m_thd= NULL; DBUG_VOID_RETURN; diff --git a/sql/sp_head.h b/sql/sp_head.h index d5f49d8a964..073cca2cd12 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -61,13 +61,6 @@ public: */ LEX_STRING m_sroutines_key; - sp_name(LEX_STRING name) - : m_name(name) - { - m_db.str= m_qname.str= m_sroutines_key.str= 0; - m_db.length= m_qname.length= m_sroutines_key.length= 0; - } - sp_name(LEX_STRING db, LEX_STRING name) : m_db(db), m_name(name) { @@ -101,8 +94,6 @@ public: {} }; -sp_name * -sp_name_current_db_new(THD *thd, LEX_STRING name); bool check_routine_name(LEX_STRING name); @@ -355,7 +346,6 @@ private: MEM_ROOT *m_thd_root; // Temp. store for thd's mem_root THD *m_thd; // Set if we have reset mem_root - char *m_thd_db; // Original thd->db pointer sp_pcontext *m_pcont; // Parse context List m_lex; // Temp. store for the other lex diff --git a/sql/sql_class.h b/sql/sql_class.h index 0ddba0e6f05..b63f88d7210 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1570,6 +1570,47 @@ public: void restore_sub_statement_state(Sub_statement_state *backup); void set_n_backup_active_arena(Query_arena *set, Query_arena *backup); void restore_active_arena(Query_arena *set, Query_arena *backup); + + /* + Initialize the current database from a NULL-terminated string with length + */ + void set_db(const char *new_db, uint new_db_len) + { + if (new_db) + { + /* Do not reallocate memory if current chunk is big enough. */ + if (db && db_length >= new_db_len) + memcpy(db, new_db, new_db_len+1); + else + { + safeFree(db); + db= my_strdup_with_length(new_db, new_db_len, MYF(MY_WME)); + } + db_length= db ? new_db_len: 0; + } + } + void reset_db(char *new_db, uint new_db_len) + { + db= new_db; + db_length= new_db_len; + } + /* + Copy the current database to the argument. Use the current arena to + allocate memory for a deep copy: current database may be freed after + a statement is parsed but before it's executed. + */ + bool copy_db_to(char **p_db, uint *p_db_length) + { + if (db == NULL) + { + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + return TRUE; + } + *p_db= strmake(db, db_length); + if (p_db_length) + *p_db_length= db_length; + return FALSE; + } }; @@ -1915,7 +1956,7 @@ typedef struct st_sort_buffer { class Table_ident :public Sql_alloc { - public: +public: LEX_STRING db; LEX_STRING table; SELECT_LEX_UNIT *sel; diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 4caa0076c60..348d43dc702 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -773,8 +773,7 @@ exit: { if (!(thd->slave_thread)) /* a slave thread will free it itself */ x_free(thd->db); - thd->db= 0; - thd->db_length= 0; + thd->reset_db(NULL, 0); } exit2: VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); @@ -1186,14 +1185,10 @@ end: { if (!(thd->slave_thread)) my_free(dbname, MYF(0)); - thd->db= NULL; - thd->db_length= 0; + thd->reset_db(NULL, 0); } else - { - thd->db= dbname; // THD::~THD will free this - thd->db_length= db_length; - } + thd->reset_db(dbname, db_length); // THD::~THD will free this #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!no_access_check) sctx->db_access= db_access; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 26f3b6f5faa..9979b484292 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -298,9 +298,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, { if (thd->locked_tables) { - if (find_locked_table(thd, - table_list->db ? table_list->db : thd->db, - table_list->table_name)) + DBUG_ASSERT(table_list->db); /* Must be set in the parser */ + if (find_locked_table(thd, table_list->db, table_list->table_name)) { my_error(ER_DELAYED_INSERT_TABLE_LOCKED, MYF(0), table_list->table_name); @@ -1332,8 +1331,8 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) TABLE *table; DBUG_ENTER("delayed_get_table"); - if (!table_list->db) - table_list->db=thd->db; + /* Must be set in the parser */ + DBUG_ASSERT(table_list->db); /* Find the thread which handles this table. */ if (!(tmp=find_handler(thd,table_list))) @@ -1372,15 +1371,15 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) pthread_mutex_lock(&LOCK_thread_count); thread_count++; pthread_mutex_unlock(&LOCK_thread_count); - if (!(tmp->thd.db=my_strdup(table_list->db,MYF(MY_WME))) || - !(tmp->thd.query=my_strdup(table_list->table_name,MYF(MY_WME)))) + tmp->thd.set_db(table_list->db, strlen(table_list->db)); + tmp->thd.query= my_strdup(table_list->table_name,MYF(MY_WME)); + if (tmp->thd.db == NULL || tmp->thd.query == NULL) { delete tmp; my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); goto err1; } tmp->table_list= *table_list; // Needed to open table - tmp->table_list.db= tmp->thd.db; tmp->table_list.alias= tmp->table_list.table_name= tmp->thd.query; tmp->lock(); pthread_mutex_lock(&tmp->mutex); diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 6b5c6ddca60..e736aa13fa2 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -758,6 +758,11 @@ public: *this= *state; } + /* + Direct addition to the list of query tables. + If you are using this function, you must ensure that the table + object, in particular table->db member, is initialized. + */ void add_to_query_tables(TABLE_LIST *table) { *(table->prev_global= query_tables_last)= table; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 213a7730824..fcdd5d91c44 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -93,8 +93,6 @@ const char *xa_state_names[]={ "NON-EXISTING", "ACTIVE", "IDLE", "PREPARED" }; -static char empty_c_string[1]= {0}; // Used for not defined 'db' - #ifdef __WIN__ static void test_signal(int sig_ptr) { @@ -300,8 +298,7 @@ int check_user(THD *thd, enum enum_server_command command, thd->db is saved in caller and needs to be freed by caller if this function returns 0 */ - thd->db= 0; - thd->db_length= 0; + thd->reset_db(NULL, 0); if (mysql_change_db(thd, db, FALSE)) { /* Send the error to the client */ @@ -341,9 +338,8 @@ int check_user(THD *thd, enum enum_server_command command, if connect failed. Also in case of 'CHANGE USER' failure, current database will be switched to 'no database selected'. */ - thd->db= 0; - thd->db_length= 0; - + thd->reset_db(NULL, 0); + USER_RESOURCES ur; int res= acl_getroot(thd, &ur, passwd, passwd_len); #ifndef EMBEDDED_LIBRARY @@ -1316,19 +1312,6 @@ end: DBUG_RETURN(0); } - /* This works because items are allocated with sql_alloc() */ - -void free_items(Item *item) -{ - Item *next; - DBUG_ENTER("free_items"); - for (; item ; item=next) - { - next=item->next; - item->delete_self(); - } - DBUG_VOID_RETURN; -} /* This works because items are allocated with sql_alloc() */ @@ -1340,7 +1323,26 @@ void cleanup_items(Item *item) DBUG_VOID_RETURN; } -int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) +/* + Handle COM_TABLE_DUMP command + + SYNOPSIS + mysql_table_dump + thd thread handle + db database name or an empty string. If empty, + the current database of the connection is used + tbl_name name of the table to dump + + NOTES + This function is written to handle one specific command only. + + RETURN VALUE + 0 success + 1 error, the error message is set in THD +*/ + +static +int mysql_table_dump(THD* thd, char* db, char* tbl_name) { TABLE* table; TABLE_LIST* table_list; @@ -1377,7 +1379,7 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) goto err; } net_flush(&thd->net); - if ((error= table->file->dump(thd,fd))) + if ((error= table->file->dump(thd,-1))) my_error(ER_GET_ERRNO, MYF(0), error); err: @@ -1627,7 +1629,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } tbl_name= strmake(db, packet + 1, db_len)+1; strmake(tbl_name, packet + db_len + 2, tbl_len); - mysql_table_dump(thd, db, tbl_name, -1); + mysql_table_dump(thd, db, tbl_name); break; } case COM_CHANGE_USER: @@ -1801,11 +1803,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS], &LOCK_status); bzero((char*) &table_list,sizeof(table_list)); - if (!(table_list.db=thd->db)) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + if (thd->copy_db_to(&table_list.db, 0)) break; - } pend= strend(packet); thd->convert_string(&conv_name, system_charset_info, packet, (uint) (pend-packet), thd->charset()); @@ -2152,6 +2151,34 @@ void log_slow_statement(THD *thd) } +/* + Create a TABLE_LIST object for an INFORMATION_SCHEMA table. + + SYNOPSIS + prepare_schema_table() + thd thread handle + lex current lex + table_ident table alias if it's used + schema_table_idx the type of the INFORMATION_SCHEMA table to be + created + + DESCRIPTION + This function is used in the parser to convert a SHOW or DESCRIBE + table_name command to a SELECT from INFORMATION_SCHEMA. + It prepares a SELECT_LEX and a TABLE_LIST object to represent the + given command as a SELECT parse tree. + + NOTES + Due to the way this function works with memory and LEX it cannot + be used outside the parser (parse tree transformations outside + the parser break PS and SP). + + RETURN VALUE + 0 success + 1 out of memory or SHOW commands are not allowed + in this version of the server. +*/ + int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, enum enum_schema_tables schema_table_idx) { @@ -2179,13 +2206,13 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, DBUG_RETURN(1); #else { - char *db= lex->select_lex.db ? lex->select_lex.db : thd->db; - if (!db) + char *db; + if (lex->select_lex.db == NULL && + thd->copy_db_to(&lex->select_lex.db, 0)) { - my_message(ER_NO_DB_ERROR, - ER(ER_NO_DB_ERROR), MYF(0)); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ + DBUG_RETURN(1); } + db= lex->select_lex.db; remove_escape(db); // Fix escaped '_' if (check_db_name(db)) { @@ -2202,11 +2229,6 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, db); DBUG_RETURN(1); } - /* - We need to do a copy to make this prepared statement safe if this - was thd->db - */ - lex->select_lex.db= thd->strdup(db); break; } #endif @@ -2739,8 +2761,8 @@ mysql_execute_command(THD *thd) case SQLCOM_LOAD_MASTER_TABLE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (!first_table->db) - first_table->db= thd->db; + DBUG_ASSERT(first_table->db); /* Must be set in the parser */ + if (check_access(thd, CREATE_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table))) @@ -2988,25 +3010,8 @@ end_with_restore_list: my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name); goto error; } - if (!select_lex->db) - { - /* - In the case of ALTER TABLE ... RENAME we should supply the - default database if the new name is not explicitly qualified - by a database. (Bug #11493) - */ - if (lex->alter_info.flags & ALTER_RENAME) - { - if (! thd->db) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - goto error; - } - select_lex->db= thd->db; - } - else - select_lex->db= first_table->db; - } + /* Must be set in the parser */ + DBUG_ASSERT(select_lex->db); if (check_access(thd, ALTER_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table)) || @@ -3685,12 +3690,8 @@ end_with_restore_list: } case SQLCOM_ALTER_DB: { - char *db= lex->name ? lex->name : thd->db; - if (!db) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - break; - } + char *db= lex->name; + DBUG_ASSERT(db); /* Must be set in the parser */ if (!strip_sp(db) || check_db_name(db)) { my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); @@ -4139,23 +4140,11 @@ end_with_restore_list: case SQLCOM_CREATE_SPFUNCTION: { uint namelen; - char *name, *db; + char *name; int result; DBUG_ASSERT(lex->sphead != 0); - - if (!lex->sphead->m_db.str || !lex->sphead->m_db.str[0]) - { - if (!thd->db) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - delete lex->sphead; - lex->sphead= 0; - goto error; - } - lex->sphead->m_db.length= strlen(thd->db); - lex->sphead->m_db.str= thd->db; - } + DBUG_ASSERT(lex->sphead->m_db.str); /* Must be initialized in the parser */ if (check_access(thd, CREATE_PROC_ACL, lex->sphead->m_db.str, 0, 0, 0, is_schema_db(lex->sphead->m_db.str))) @@ -4272,41 +4261,27 @@ end_with_restore_list: } #endif /* NO_EMBEDDED_ACCESS_CHECKS */ - /* - We need to copy name and db in order to use them for - check_routine_access which is called after lex->sphead has - been deleted. - */ - name= thd->strdup(name); - lex->sphead->m_db.str= db= thd->strmake(lex->sphead->m_db.str, - lex->sphead->m_db.length); res= (result= lex->sphead->create(thd)); if (result == SP_OK) { - /* - We must cleanup the unit and the lex here because - sp_grant_privileges calls (indirectly) db_find_routine, - which in turn may call MYSQLparse with THD::lex. - TODO: fix db_find_routine to use a temporary lex. - */ - lex->unit.cleanup(); - delete lex->sphead; - lex->sphead= 0; #ifndef NO_EMBEDDED_ACCESS_CHECKS /* only add privileges if really neccessary */ if (sp_automatic_privileges && !opt_noacl && check_routine_access(thd, DEFAULT_CREATE_PROC_ACLS, - db, name, + lex->sphead->m_db.str, name, lex->sql_command == SQLCOM_CREATE_PROCEDURE, 1)) { close_thread_tables(thd); - if (sp_grant_privileges(thd, db, name, + if (sp_grant_privileges(thd, lex->sphead->m_db.str, name, lex->sql_command == SQLCOM_CREATE_PROCEDURE)) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_PROC_AUTO_GRANT_FAIL, ER(ER_PROC_AUTO_GRANT_FAIL)); } #endif + lex->unit.cleanup(); + delete lex->sphead; + lex->sphead= 0; send_ok(thd); } else @@ -4721,7 +4696,8 @@ end_with_restore_list: view_store_options(thd, first_table, &buff); buff.append(STRING_WITH_LEN("VIEW ")); /* Test if user supplied a db (ie: we did not use thd->db) */ - if (first_table->db != thd->db && first_table->db[0]) + if (first_table->db && first_table->db[0] && + (thd->db == NULL || strcmp(first_table->db, thd->db))) { append_identifier(thd, &buff, first_table->db, first_table->db_length); @@ -5244,7 +5220,7 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, (want_access & ~EXTRA_ACL) && thd->db) tables->grant.privilege= want_access; - else if (tables->db && tables->db == thd->db) + else if (tables->db && thd->db && strcmp(tables->db, thd->db) == 0) { if (found && !grant_option) // db already checked tables->grant.privilege=found_access; @@ -5392,22 +5368,25 @@ bool check_merge_table_access(THD *thd, char *db, static bool check_db_used(THD *thd,TABLE_LIST *tables) { + char *current_db= NULL; for (; tables; tables= tables->next_global) { - if (!tables->db) + if (tables->db == NULL) { - if (!(tables->db=thd->db)) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), - MYF(0)); /* purecov: tested */ - return TRUE; /* purecov: tested */ - } + /* + This code never works and should be removed in 5.1. All tables + that are added to the list of tables should already have its + database field initialized properly (see st_lex::add_table_to_list). + */ + DBUG_ASSERT(0); + if (thd->copy_db_to(¤t_db, 0)) + return TRUE; + tables->db= current_db; } } return FALSE; } - /**************************************************************************** Check stack size; Send error if there isn't enough stack to continue ****************************************************************************/ @@ -6027,19 +6006,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->db= table->db.str; ptr->db_length= table->db.length; } - else if (thd->db) - { - ptr->db= thd->db; - ptr->db_length= thd->db_length; - } - else - { - /* The following can't be "" as we may do 'casedn_str()' on it */ - ptr->db= empty_c_string; - ptr->db_length= 0; - } - if (thd->stmt_arena->is_stmt_prepare_or_first_sp_execute()) - ptr->db= thd->strdup(ptr->db); + else if (thd->copy_db_to(&ptr->db, &ptr->db_length)) + DBUG_RETURN(0); ptr->alias= alias_str; if (lower_case_table_names && table->table.length) @@ -7216,6 +7184,8 @@ bool insert_precheck(THD *thd, TABLE_LIST *tables) my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); DBUG_RETURN(TRUE); } + if (check_db_used(thd, tables)) + DBUG_RETURN(TRUE); DBUG_RETURN(FALSE); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 9ec8e8db1fb..fbceea84ce5 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2672,7 +2672,8 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST src_tables_list; DBUG_ENTER("mysql_create_like_table"); - src_db= table_ident->db.str ? table_ident->db.str : thd->db; + DBUG_ASSERT(table_ident->db.str); /* Must be set in the parser */ + src_db= table_ident->db.str; /* Validate the source table diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index f943b014118..db1d1a10b11 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -932,8 +932,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, save_db.str= thd->db; save_db.length= thd->db_length; - thd->db_length= strlen(db); - thd->db= (char *) db; + thd->reset_db((char*) db, strlen(db)); while ((trg_create_str= it++)) { trg_sql_mode= itm++; @@ -1035,8 +1034,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, lex_end(&lex); } - thd->db= save_db.str; - thd->db_length= save_db.length; + thd->reset_db(save_db.str, save_db.length); thd->lex= old_lex; thd->spcont= save_spcont; thd->variables.sql_mode= save_sql_mode; @@ -1049,8 +1047,7 @@ err_with_lex_cleanup: thd->lex= old_lex; thd->spcont= save_spcont; thd->variables.sql_mode= save_sql_mode; - thd->db= save_db.str; - thd->db_length= save_db.length; + thd->reset_db(save_db.str, save_db.length); DBUG_RETURN(1); } diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 6269c0a2eb3..95589a58b37 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -140,6 +140,7 @@ void udf_init() READ_RECORD read_record_info; TABLE *table; int error; + char db[]= "mysql"; /* A subject to casednstr, can't be constant */ DBUG_ENTER("ufd_init"); if (initialized) @@ -161,13 +162,12 @@ void udf_init() initialized = 1; new_thd->thread_stack= (char*) &new_thd; new_thd->store_globals(); - new_thd->db= my_strdup("mysql", MYF(0)); - new_thd->db_length=5; + new_thd->set_db(db, sizeof(db)-1); bzero((gptr) &tables,sizeof(tables)); tables.alias= tables.table_name= (char*) "func"; tables.lock_type = TL_READ; - tables.db=new_thd->db; + tables.db= db; if (simple_open_n_lock_tables(new_thd, &tables)) { diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 0f836bd58ff..1561ade78af 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -452,15 +452,15 @@ bool mysql_create_view(THD *thd, */ for (sl= select_lex; sl; sl= sl->next_select()) { - char *db= view->db ? view->db : thd->db; + DBUG_ASSERT(view->db); /* Must be set in the parser */ List_iterator_fast it(sl->item_list); Item *item; - fill_effective_table_privileges(thd, &view->grant, db, + fill_effective_table_privileges(thd, &view->grant, view->db, view->table_name); while ((item= it++)) { Item_field *fld; - uint priv= (get_column_grant(thd, &view->grant, db, + uint priv= (get_column_grant(thd, &view->grant, view->db, view->table_name, item->name) & VIEW_ANY_ACL); if ((fld= item->filed_for_view_update())) @@ -641,8 +641,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, if (!parser->ok() || !is_equal(&view_type, parser->type())) { - my_error(ER_WRONG_OBJECT, MYF(0), - (view->db ? view->db : thd->db), view->table_name, "VIEW"); + my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->table_name, "VIEW"); DBUG_RETURN(-1); } diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index b2dbc517fa4..954024df500 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1237,12 +1237,18 @@ sp_name: } | ident { + THD *thd= YYTHD; + LEX_STRING db; if (check_routine_name($1)) { my_error(ER_SP_WRONG_NAME, MYF(0), $1.str); YYABORT; } - $$= sp_name_current_db_new(YYTHD, $1); + if (thd->copy_db_to(&db.str, &db.length)) + YYABORT; + $$= new sp_name(db, $1); + if ($$) + $$->init_qname(YYTHD); } ; @@ -2405,14 +2411,26 @@ create2: | LIKE table_ident { LEX *lex=Lex; + THD *thd= lex->thd; if (!(lex->name= (char *)$2)) YYABORT; + if ($2->db.str == NULL && + thd->copy_db_to(&($2->db.str), &($2->db.length))) + { + YYABORT; + } } | '(' LIKE table_ident ')' { LEX *lex=Lex; + THD *thd= lex->thd; if (!(lex->name= (char *)$3)) YYABORT; + if ($3->db.str == NULL && + thd->copy_db_to(&($3->db.str), &($3->db.length))) + { + YYABORT; + } } ; @@ -3240,7 +3258,9 @@ alter: lex->key_list.empty(); lex->col_list.empty(); lex->select_lex.init_order(); - lex->select_lex.db=lex->name=0; + lex->select_lex.db= + ((TABLE_LIST*) lex->select_lex.table_list.first)->db; + lex->name=0; bzero((char*) &lex->create_info,sizeof(lex->create_info)); lex->create_info.db_type= DB_TYPE_DEFAULT; lex->create_info.default_table_charset= NULL; @@ -3258,8 +3278,11 @@ alter: opt_create_database_options { LEX *lex=Lex; + THD *thd= Lex->thd; lex->sql_command=SQLCOM_ALTER_DB; lex->name= $3; + if (lex->name == NULL && thd->copy_db_to(&lex->name, NULL)) + YYABORT; } | ALTER PROCEDURE sp_name { @@ -3421,14 +3444,20 @@ alter_list_item: | RENAME opt_to table_ident { LEX *lex=Lex; + THD *thd= lex->thd; lex->select_lex.db=$3->db.str; - lex->name= $3->table.str; + if (lex->select_lex.db == NULL && + thd->copy_db_to(&lex->select_lex.db, NULL)) + { + YYABORT; + } if (check_table_name($3->table.str,$3->table.length) || $3->db.str && check_db_name($3->db.str)) { my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str); YYABORT; } + lex->name= $3->table.str; lex->alter_info.flags|= ALTER_RENAME; } | CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate @@ -4742,7 +4771,13 @@ simple_expr: #endif /* HAVE_DLOPEN */ { LEX *lex= Lex; - sp_name *name= sp_name_current_db_new(YYTHD, $1); + THD *thd= lex->thd; + LEX_STRING db; + if (thd->copy_db_to(&db.str, &db.length)) + YYABORT; + sp_name *name= new sp_name(db, $1); + if (name) + name->init_qname(thd); sp_add_used_routine(lex, YYTHD, name, TYPE_ENUM_FUNCTION); if ($4) @@ -8460,7 +8495,9 @@ grant_ident: '*' { LEX *lex= Lex; - lex->current_select->db= lex->thd->db; + THD *thd= lex->thd; + if (thd->copy_db_to(&lex->current_select->db, NULL)) + YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; else if (lex->columns.elements) diff --git a/sql/tztime.cc b/sql/tztime.cc index 079abfc9299..d12aef47b40 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1548,6 +1548,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) TABLE *table; Tz_names_entry *tmp_tzname; my_bool return_val= 1; + char db[]= "mysql"; int res; DBUG_ENTER("my_tz_init"); @@ -1604,13 +1605,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) leap seconds shared by all time zones. */ - thd->db= my_strdup("mysql",MYF(0)); - thd->db_length= 5; // Safety + thd->set_db(db, sizeof(db)-1); bzero((char*) &tables_buff, sizeof(TABLE_LIST)); tables_buff[0].alias= tables_buff[0].table_name= (char*)"time_zone_leap_second"; tables_buff[0].lock_type= TL_READ; - tables_buff[0].db= thd->db; + tables_buff[0].db= db; /* Fill TABLE_LIST for the rest of the time zone describing tables and link it to first one. From c36dd286769f0a7aa9ee253afbd133f9a114c0b2 Mon Sep 17 00:00:00 2001 From: "kent@mysql.com" <> Date: Mon, 26 Jun 2006 23:44:17 +0200 Subject: [PATCH 14/74] make_sharedlib_distribution.sh: For compatibility, don't use {..,..} in pattern matching make_binary_distribution.sh: Added .dylib and .sl as shared library extensions --- scripts/make_binary_distribution.sh | 18 ++++++++++++++---- scripts/make_sharedlib_distribution.sh | 6 ++++-- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index f372762c05d..396c4f83bac 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -154,11 +154,21 @@ if [ $BASE_SYSTEM = "netware" ] ; then fi for i in \ - libmysql/.libs/libmysqlclient.a libmysql/.libs/libmysqlclient.so* \ - libmysql/libmysqlclient.* libmysql_r/.libs/libmysqlclient_r.a \ - libmysql_r/.libs/libmysqlclient_r.so* libmysql_r/libmysqlclient_r.* \ + libmysql/.libs/libmysqlclient.a \ + libmysql/.libs/libmysqlclient.so* \ + libmysql/.libs/libmysqlclient.sl* \ + libmysql/.libs/libmysqlclient*.dylib \ + libmysql/libmysqlclient.* \ + libmysql_r/.libs/libmysqlclient_r.a \ + libmysql_r/.libs/libmysqlclient_r.so* \ + libmysql_r/.libs/libmysqlclient_r.sl* \ + libmysql_r/.libs/libmysqlclient_r*.dylib \ + libmysql_r/libmysqlclient_r.* \ + libmysqld/.libs/libmysqld.a \ + libmysqld/.libs/libmysqld.so* \ + libmysqld/.libs/libmysqld.sl* \ + libmysqld/.libs/libmysqld*.dylib \ mysys/libmysys.a strings/libmystrings.a dbug/libdbug.a \ - libmysqld/.libs/libmysqld.a libmysqld/.libs/libmysqld.so* \ libmysqld/libmysqld.a netware/libmysql.imp do if [ -f $i ] diff --git a/scripts/make_sharedlib_distribution.sh b/scripts/make_sharedlib_distribution.sh index fbc945e445a..c475d0e14a4 100644 --- a/scripts/make_sharedlib_distribution.sh +++ b/scripts/make_sharedlib_distribution.sh @@ -45,9 +45,11 @@ fi mkdir -p $BASE/lib for i in \ - libmysql/.libs/libmysqlclient.s{l,o}* \ + libmysql/.libs/libmysqlclient.so* \ + libmysql/.libs/libmysqlclient.sl* \ libmysql/.libs/libmysqlclient*.dylib \ - libmysql_r/.libs/libmysqlclient_r.s{l,o}* \ + libmysql_r/.libs/libmysqlclient_r.so* \ + libmysql_r/.libs/libmysqlclient_r.sl* \ libmysql_r/.libs/libmysqlclient_r*.dylib do if [ -f $i ] From e67bdaf604418ce51fc74db2b69f143a28ef9746 Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Tue, 27 Jun 2006 11:26:41 +0200 Subject: [PATCH 15/74] Bug#11824 - internal /tmp/*.{MYD,MYI} files remain, causing subsequent queries to fail Very complex select statements can create temporary tables that are too big to be represented as a MyISAM table. This was not checked at table creation time, but only at open time. The result was an attempt to delete the "impossible" table. But if the server is built --with-raid, MyISAM tries to open the table before deleting the files. It needs to find out if the table uses the raid support and how many raid chunks there are. This is done with an open "for repair", which will almost always succeed. But in this case we have an "impossible" table. The open failed. Hence the files were not deleted. Also the error message was a bit unspecific. I turned an open error in this situation into the assumption of having no raid support on the table. Thus the normal data file is tried to be deleted. This may however leave existing raid chunks behind. I also added a check in mi_create() to prevent the creation of an "impossible" table. A more decriptive error message is given in this case. No test case. The required select statement is way too large for the test suite. I added a test script to the bug report. --- myisam/mi_create.c | 17 +++++++++++++++++ myisam/mi_delete_table.c | 24 ++++++++++++++++++------ 2 files changed, 35 insertions(+), 6 deletions(-) diff --git a/myisam/mi_create.c b/myisam/mi_create.c index 41c965c7c80..4183040500b 100644 --- a/myisam/mi_create.c +++ b/myisam/mi_create.c @@ -59,6 +59,8 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, my_off_t key_root[MI_MAX_POSSIBLE_KEY],key_del[MI_MAX_KEY_BLOCK_SIZE]; MI_CREATE_INFO tmp_create_info; DBUG_ENTER("mi_create"); + DBUG_PRINT("enter", ("keys: %u columns: %u uniques: %u flags: %u", + keys, columns, uniques, flags)); if (!ci) { @@ -447,6 +449,16 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, uniques * MI_UNIQUEDEF_SIZE + (key_segs + unique_key_parts)*HA_KEYSEG_SIZE+ columns*MI_COLUMNDEF_SIZE); + DBUG_PRINT("info", ("info_length: %u", info_length)); + /* There are only 16 bits for the total header length. */ + if (info_length > 65535) + { + my_printf_error(0, "MyISAM table '%s' has too many columns and/or " + "indexes and/or unique constraints.", + MYF(0), name + dirname_length(name)); + my_errno= HA_WRONG_CREATE_OPTION; + goto err; + } bmove(share.state.header.file_version,(byte*) myisam_file_magic,4); ci->old_options=options| (ci->old_options & HA_OPTION_TEMP_COMPRESS_RECORD ? @@ -594,6 +606,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, errpos=3; } + DBUG_PRINT("info", ("write state info and base info")); if (mi_state_info_write(file, &share.state, 2) || mi_base_info_write(file, &share.base)) goto err; @@ -607,6 +620,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, #endif /* Write key and keyseg definitions */ + DBUG_PRINT("info", ("write key and keyseg definitions")); for (i=0 ; i < share.base.keys - uniques; i++) { uint sp_segs=(keydefs[i].flag & HA_SPATIAL) ? 2*SPDIMS : 0; @@ -655,6 +669,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, } /* Save unique definition */ + DBUG_PRINT("info", ("write unique definitions")); for (i=0 ; i < share.state.header.uniques ; i++) { if (mi_uniquedef_write(file, &uniquedefs[i])) @@ -665,6 +680,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, goto err; } } + DBUG_PRINT("info", ("write field definitions")); for (i=0 ; i < share.base.fields ; i++) if (mi_recinfo_write(file, &recinfo[i])) goto err; @@ -679,6 +695,7 @@ int mi_create(const char *name,uint keys,MI_KEYDEF *keydefs, #endif /* Enlarge files */ + DBUG_PRINT("info", ("enlarge to keystart: %lu", (ulong) share.base.keystart)); if (my_chsize(file,(ulong) share.base.keystart,0,MYF(0))) goto err; diff --git a/myisam/mi_delete_table.c b/myisam/mi_delete_table.c index 6843881568d..2fba31cf8be 100644 --- a/myisam/mi_delete_table.c +++ b/myisam/mi_delete_table.c @@ -34,12 +34,24 @@ int mi_delete_table(const char *name) #ifdef USE_RAID { MI_INFO *info; - /* we use 'open_for_repair' to be able to delete a crashed table */ - if (!(info=mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR))) - DBUG_RETURN(my_errno); - raid_type = info->s->base.raid_type; - raid_chunks = info->s->base.raid_chunks; - mi_close(info); + /* + When built with RAID support, we need to determine if this table + makes use of the raid feature. If yes, we need to remove all raid + chunks. This is done with my_raid_delete(). Unfortunately it is + necessary to open the table just to check this. We use + 'open_for_repair' to be able to open even a crashed table. If even + this open fails, we assume no raid configuration for this table + and try to remove the normal data file only. This may however + leave the raid chunks behind. + */ + if (!(info= mi_open(name, O_RDONLY, HA_OPEN_FOR_REPAIR))) + raid_type= 0; + else + { + raid_type= info->s->base.raid_type; + raid_chunks= info->s->base.raid_chunks; + mi_close(info); + } } #ifdef EXTRA_DEBUG check_table_is_closed(name,"delete"); From 3cf181bb648bddb68a03c24f2ff892f9d21604ff Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Tue, 27 Jun 2006 14:56:24 +0400 Subject: [PATCH 16/74] Fix compilation failures on Windows caused by the patch for Bug#17199. Fix a minor issue with Bug#16206 (bdb.test failed if the tree is compiled without blackhole). --- include/my_sys.h | 2 +- mysql-test/r/bdb.result | 4 ++-- mysql-test/t/bdb.test | 2 +- mysys/my_malloc.c | 2 +- mysys/safemalloc.c | 2 +- sql/ha_federated.cc | 3 +-- sql/log_event.cc | 4 ++-- sql/set_var.cc | 2 +- sql/sql_class.h | 2 +- sql/table.h | 3 ++- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/include/my_sys.h b/include/my_sys.h index 229389f1ac5..cbd7c79fa11 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -157,7 +157,7 @@ extern gptr my_realloc(gptr oldpoint,uint Size,myf MyFlags); extern void my_no_flags_free(gptr ptr); extern gptr my_memdup(const byte *from,uint length,myf MyFlags); extern char *my_strdup(const char *from,myf MyFlags); -extern char *my_strdup_with_length(const byte *from, uint length, +extern char *my_strdup_with_length(const char *from, uint length, myf MyFlags); /* we do use FG (as a no-op) in below so that a typo on FG is caught */ #define my_free(PTR,FG) ((void)FG,my_no_flags_free(PTR)) diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result index 588644a6c66..ee7cdceefda 100644 --- a/mysql-test/r/bdb.result +++ b/mysql-test/r/bdb.result @@ -1930,7 +1930,7 @@ alter table t1 add primary key(a); drop table t1; set autocommit=1; reset master; -create table bug16206 (a int) engine= blackhole; +create table bug16206 (a int); insert into bug16206 values(1); start transaction; insert into bug16206 values(2); @@ -1938,7 +1938,7 @@ commit; show binlog events; Log_name Pos Event_type Server_id End_log_pos Info f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4 -f n Query 1 n use `test`; create table bug16206 (a int) engine= blackhole +f n Query 1 n use `test`; create table bug16206 (a int) f n Query 1 n use `test`; insert into bug16206 values(1) f n Query 1 n use `test`; insert into bug16206 values(2) drop table bug16206; diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test index d2e3ca5f36e..ec05eeb3c34 100644 --- a/mysql-test/t/bdb.test +++ b/mysql-test/t/bdb.test @@ -1028,7 +1028,7 @@ set autocommit=1; let $VERSION=`select version()`; reset master; -create table bug16206 (a int) engine= blackhole; +create table bug16206 (a int); insert into bug16206 values(1); start transaction; insert into bug16206 values(2); diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c index 3f601a42dc9..f33db2655c4 100644 --- a/mysys/my_malloc.c +++ b/mysys/my_malloc.c @@ -83,7 +83,7 @@ char *my_strdup(const char *from, myf my_flags) } -char *my_strdup_with_length(const byte *from, uint length, myf my_flags) +char *my_strdup_with_length(const char *from, uint length, myf my_flags) { gptr ptr; if ((ptr=my_malloc(length+1,my_flags)) != 0) diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c index 6cdf98c5f5f..f6d6644859e 100644 --- a/mysys/safemalloc.c +++ b/mysys/safemalloc.c @@ -525,7 +525,7 @@ char *_my_strdup(const char *from, const char *filename, uint lineno, } /* _my_strdup */ -char *_my_strdup_with_length(const byte *from, uint length, +char *_my_strdup_with_length(const char *from, uint length, const char *filename, uint lineno, myf MyFlags) { diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index c6d5c77803b..02bcde43f11 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -632,8 +632,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, DBUG_PRINT("info", ("Length %d \n", table->s->connect_string.length)); DBUG_PRINT("info", ("String %.*s \n", table->s->connect_string.length, table->s->connect_string.str)); - share->scheme= my_strdup_with_length((const byte*)table->s-> - connect_string.str, + share->scheme= my_strdup_with_length(table->s->connect_string.str, table->s->connect_string.length, MYF(0)); diff --git a/sql/log_event.cc b/sql/log_event.cc index e93c2855199..b4707826205 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -3123,7 +3123,7 @@ Rotate_log_event::Rotate_log_event(THD* thd_arg, llstr(pos_arg, buff), flags)); #endif if (flags & DUP_NAME) - new_log_ident= my_strdup_with_length((const byte*) new_log_ident_arg, + new_log_ident= my_strdup_with_length(new_log_ident_arg, ident_len, MYF(MY_WME)); DBUG_VOID_RETURN; } @@ -3147,7 +3147,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, (header_size+post_header_len)); ident_offset = post_header_len; set_if_smaller(ident_len,FN_REFLEN-1); - new_log_ident= my_strdup_with_length((byte*) buf + ident_offset, + new_log_ident= my_strdup_with_length(buf + ident_offset, (uint) ident_len, MYF(MY_WME)); DBUG_VOID_RETURN; diff --git a/sql/set_var.cc b/sql/set_var.cc index 003dd4a8ab3..aa58f7d41fc 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -1133,7 +1133,7 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex, uint new_length= (var ? var->value->str_value.length() : 0); if (!old_value) old_value= (char*) ""; - if (!(res= my_strdup_with_length((byte*)old_value, new_length, MYF(0)))) + if (!(res= my_strdup_with_length(old_value, new_length, MYF(0)))) return 1; /* Replace the old value in such a way that the any thread using diff --git a/sql/sql_class.h b/sql/sql_class.h index 47150912c52..1ba104df2a4 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1300,7 +1300,7 @@ public: pthread_t real_id; uint tmp_table, global_read_lock; uint server_status,open_options,system_thread; - uint32 db_length; + uint db_length; uint select_number; //number of select (used for EXPLAIN) /* variables.transaction_isolation is reset to this after each commit */ enum_tx_isolation session_tx_isolation; diff --git a/sql/table.h b/sql/table.h index 106421d7a17..ebb4481ef3a 100644 --- a/sql/table.h +++ b/sql/table.h @@ -599,7 +599,8 @@ typedef struct st_table_list thr_lock_type lock_type; uint outer_join; /* Which join type */ uint shared; /* Used in multi-upd */ - uint32 db_length, table_name_length; + uint db_length; + uint32 table_name_length; bool updatable; /* VIEW/TABLE can be updated now */ bool straight; /* optimize with prev table */ bool updating; /* for replicate-do/ignore table */ From 36fdaa7d1625ab77e1848c86ab08d4246336d22f Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Tue, 27 Jun 2006 15:39:43 +0400 Subject: [PATCH 17/74] Fix yet another place with an obsolete explicit cast to byte *. --- sql/set_var.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/set_var.h b/sql/set_var.h index 8e5a94b1e1b..b048428219d 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -935,7 +935,7 @@ public: uint name_length_arg, gptr data_arg) :name_length(name_length_arg), data(data_arg) { - name= my_strdup_with_length((byte*) name_arg, name_length, MYF(MY_WME)); + name= my_strdup_with_length(name_arg, name_length, MYF(MY_WME)); links->push_back(this); } inline bool cmp(const char *name_cmp, uint length) From e29ea3f2efc660c30e1ec4102be11689045b7930 Mon Sep 17 00:00:00 2001 From: "konstantin@mysql.com" <> Date: Tue, 27 Jun 2006 17:34:14 +0400 Subject: [PATCH 18/74] Fix yet another place that used uint32 instead of uint. --- sql/slave.cc | 2 +- sql/slave.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/slave.cc b/sql/slave.cc index aed2a41a1e6..4da447c4bc3 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1177,7 +1177,7 @@ bool net_request_file(NET* net, const char* fname) } -const char *rewrite_db(const char* db, uint32 *new_len) +const char *rewrite_db(const char* db, uint *new_len) { if (replicate_rewrite_db.is_empty() || !db) return db; diff --git a/sql/slave.h b/sql/slave.h index ebbb1e64df5..7f08105c0b9 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -550,7 +550,7 @@ int add_table_rule(HASH* h, const char* table_spec); int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec); void init_table_rule_hash(HASH* h, bool* h_inited); void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited); -const char *rewrite_db(const char* db, uint32 *new_db_len); +const char *rewrite_db(const char* db, uint *new_db_len); const char *print_slave_db_safe(const char *db); int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code); void skip_load_data_infile(NET* net); From 49cc2904d22c3c7b102d60303991ce2feed6fc28 Mon Sep 17 00:00:00 2001 From: "kroki@mysql.com" <> Date: Tue, 27 Jun 2006 19:33:59 +0400 Subject: [PATCH 19/74] Dec. 31st, 9999 is still a valid date, only starting with Jan 1st 10000 things become invalid (Bug #12356) --- mysql-test/r/func_sapdb.result | 6 ++++++ mysql-test/r/func_time.result | 6 ++++++ mysql-test/t/func_sapdb.test | 2 ++ mysql-test/t/func_time.test | 6 ++++++ sql/item_timefunc.cc | 11 ++++++----- 5 files changed, 26 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/func_sapdb.result b/mysql-test/r/func_sapdb.result index ea40e1559fd..b18885e218a 100644 --- a/mysql-test/r/func_sapdb.result +++ b/mysql-test/r/func_sapdb.result @@ -71,6 +71,12 @@ makedate(1997,1) select makedate(1997,0); makedate(1997,0) NULL +select makedate(9999,365); +makedate(9999,365) +9999-12-31 +select makedate(9999,366); +makedate(9999,366) +NULL select addtime("1997-12-31 23:59:59.999999", "1 1:1:1.000002"); addtime("1997-12-31 23:59:59.999999", "1 1:1:1.000002") 1998-01-02 01:01:01.000001 diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index c90a4258036..fab0bf01f58 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -352,6 +352,12 @@ extract(SECOND FROM "1999-01-02 10:11:12") select extract(MONTH FROM "2001-02-00"); extract(MONTH FROM "2001-02-00") 2 +SELECT DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); +DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE) +9999-12-31 00:00:00 +SELECT DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); +DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE) +9999-12-31 00:00:00 SELECT "1900-01-01 00:00:00" + INTERVAL 2147483648 SECOND; "1900-01-01 00:00:00" + INTERVAL 2147483648 SECOND 1968-01-20 03:14:08 diff --git a/mysql-test/t/func_sapdb.test b/mysql-test/t/func_sapdb.test index 8fd793f067b..930ad37c60c 100644 --- a/mysql-test/t/func_sapdb.test +++ b/mysql-test/t/func_sapdb.test @@ -37,6 +37,8 @@ select weekofyear("1997-11-31 23:59:59.000001"); select makedate(1997,1); select makedate(1997,0); +select makedate(9999,365); +select makedate(9999,366); #Time functions diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index d69545712c8..b232fb14e1e 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -139,6 +139,12 @@ select extract(MINUTE_SECOND FROM "10:11:12"); select extract(SECOND FROM "1999-01-02 10:11:12"); select extract(MONTH FROM "2001-02-00"); +# +# MySQL Bugs: #12356: DATE_SUB or DATE_ADD incorrectly returns null +# +SELECT DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); +SELECT DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); + # # Test big intervals (Bug #3498) # diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 8d3e768b74e..27876096bc5 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -27,6 +27,7 @@ /* TODO: Move month and days to language files */ +/* Day number for Dec 31st, 9999 */ #define MAX_DAY_NUMBER 3652424L static const char *month_names[]= @@ -401,7 +402,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, if (yearday > 0) { uint days= calc_daynr(l_time->year,1,1) + yearday - 1; - if (days <= 0 || days >= MAX_DAY_NUMBER) + if (days <= 0 || days > MAX_DAY_NUMBER) goto err; get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day); } @@ -447,7 +448,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, (weekday - 1); } - if (days <= 0 || days >= MAX_DAY_NUMBER) + if (days <= 0 || days > MAX_DAY_NUMBER) goto err; get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day); } @@ -1931,7 +1932,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) ltime->hour= (uint) (sec/3600); daynr= calc_daynr(ltime->year,ltime->month,1) + days; /* Day number from year 0 to 9999-12-31 */ - if ((ulonglong) daynr >= MAX_DAY_NUMBER) + if ((ulonglong) daynr > MAX_DAY_NUMBER) goto null_date; get_date_from_daynr((long) daynr, <ime->year, <ime->month, <ime->day); @@ -1941,7 +1942,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) period= (calc_daynr(ltime->year,ltime->month,ltime->day) + sign * (long) interval.day); /* Daynumber from year 0 to 9999-12-31 */ - if ((ulong) period >= MAX_DAY_NUMBER) + if ((ulong) period > MAX_DAY_NUMBER) goto null_date; get_date_from_daynr((long) period,<ime->year,<ime->month,<ime->day); break; @@ -2412,7 +2413,7 @@ String *Item_func_makedate::val_str(String *str) days= calc_daynr(yearnr,1,1) + daynr - 1; /* Day number from year 0 to 9999-12-31 */ - if (days >= 0 && days < MAX_DAY_NUMBER) + if (days >= 0 && days <= MAX_DAY_NUMBER) { null_value=0; get_date_from_daynr(days,&l_time.year,&l_time.month,&l_time.day); From 46ee3ac19d12551aaa6b49694b4fd3077f0fa7a7 Mon Sep 17 00:00:00 2001 From: "joerg@mysql.com" <> Date: Tue, 27 Jun 2006 18:17:53 +0200 Subject: [PATCH 20/74] Move "mysqldumpslow" from the client RPM to the server RPM (bug#20216). --- support-files/mysql.spec.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index d11fc5ef440..5796e776b83 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -469,6 +469,7 @@ fi %attr(755, root, root) %{_bindir}/mysql_convert_table_format %attr(755, root, root) %{_bindir}/mysqld_multi %attr(755, root, root) %{_bindir}/mysqld_safe +%attr(755, root, root) %{_bindir}/mysqldumpslow %attr(755, root, root) %{_bindir}/mysql_explain_log %attr(755, root, root) %{_bindir}/mysql_fix_extensions %attr(755, root, root) %{_bindir}/mysql_fix_privilege_tables @@ -506,7 +507,6 @@ fi %attr(755, root, root) %{_bindir}/mysqlbinlog %attr(755, root, root) %{_bindir}/mysqlcheck %attr(755, root, root) %{_bindir}/mysqldump -%attr(755, root, root) %{_bindir}/mysqldumpslow %attr(755, root, root) %{_bindir}/mysqlimport %attr(755, root, root) %{_bindir}/mysqlshow @@ -578,6 +578,10 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Tue Jun 27 2006 Joerg Bruehe + +- move "mysqldumpslow" from the client RPM to the server RPM (bug#20216) + * Sat May 20 2006 Kent Boortz - Always compile for PIC, position independent code. From ffd8ed17162333032e8d69454da5319c5d03b60b Mon Sep 17 00:00:00 2001 From: "svoj@may.pils.ru" <> Date: Tue, 27 Jun 2006 22:22:43 +0500 Subject: [PATCH 21/74] BUG#1662 - ALTER TABLE LIKE ignores DATA/INDEX DIRECTPORY Produce a warning if DATA/INDEX DIRECTORY is specified in ALTER TABLE statement. Ignoring of these options is documented in the symbolic links section of the manual. --- mysql-test/r/symlink.result | 6 ++++++ sql/sql_parse.cc | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/mysql-test/r/symlink.result b/mysql-test/r/symlink.result index caff53f8fd7..f6779689133 100644 --- a/mysql-test/r/symlink.result +++ b/mysql-test/r/symlink.result @@ -65,18 +65,24 @@ t9 CREATE TABLE `t9` ( ) ENGINE=MyISAM AUTO_INCREMENT=16725 DEFAULT CHARSET=latin1 DATA DIRECTORY='TEST_DIR/var/tmp/' INDEX DIRECTORY='TEST_DIR/var/run/' drop database mysqltest; create table t1 (a int not null) engine=myisam; +Warnings: +Warning 0 DATA DIRECTORY option ignored show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL default '0' ) ENGINE=MyISAM DEFAULT CHARSET=latin1 alter table t1 add b int; +Warnings: +Warning 0 DATA DIRECTORY option ignored show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL default '0', `b` int(11) default NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 +Warnings: +Warning 0 INDEX DIRECTORY option ignored show create table t1; Table Create Table t1 CREATE TABLE `t1` ( diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 504339684ce..fbe36bfdc4a 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2678,6 +2678,12 @@ unsent_create_error: } } /* Don't yet allow changing of symlinks with ALTER TABLE */ + if (lex->create_info.data_file_name) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0, + "DATA DIRECTORY option ignored"); + if (lex->create_info.index_file_name) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0, + "INDEX DIRECTORY option ignored"); lex->create_info.data_file_name=lex->create_info.index_file_name=0; /* ALTER TABLE ends previous transaction */ if (end_active_trans(thd)) From 44653eed3bd6461cd1ee510a65cdf66e9a451a2e Mon Sep 17 00:00:00 2001 From: "joerg@mysql.com" <> Date: Tue, 27 Jun 2006 20:18:03 +0200 Subject: [PATCH 22/74] Revert all previous attempts to call "mysql_upgrade" during RPM upgrade. This finishes bug#18516, as far as "generic RPMs" are concerned. --- support-files/mysql.spec.sh | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index a452811dc91..9f08d9330d4 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -485,25 +485,7 @@ chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir # Initiate databases if needed %{_bindir}/mysql_install_db --rpm --user=%{mysqld_user} -# Upgrade databases if needed -# This must be done as database user "root", who should be password-protected, -# but this password is not available here. -# So ensure the server is isolated as much as possible, and start it so that -# passwords are not checked. -# See the related change in the start script "/etc/init.d/mysql". -if type mktemp >/dev/null 2>&1 -then - mysql_tmp_sockdir=`mktemp -dt` -else - PID=$$ - mysql_tmp_sockdir=/tmp/mysql-$PID - ( umask 077 ; mkdir $mysql_tmp_sockdir ) -fi -chown %{mysqld_user}:%{mysqld_group} $mysql_tmp_sockdir -%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables --socket=$mysql_tmp_sockdir/upgrade.sock -%{_bindir}/mysql_upgrade --socket=$mysql_tmp_sockdir/upgrade.sock -%{_sysconfdir}/init.d/mysql stop --skip-networking --skip-grant-tables -rm -fr $mysql_tmp_sockdir +# Upgrade databases if needed would go here - but it cannot be automated yet # Change permissions again to fix any new files. chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir @@ -741,6 +723,12 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Tue Jun 27 2006 Joerg Bruehe + +- Revert all previous attempts to call "mysql_upgrade" during RPM upgrade, + there are some more aspects which need to be solved before this is possible. + For now, just ensure the binary "mysql_upgrade" is delivered and installed. + * Thu Jun 22 2006 Joerg Bruehe - Close a gap of the previous version by explicitly using From d8499f2d8f2ad08145c32e274a8cdd02c5933e01 Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Wed, 28 Jun 2006 14:27:37 +0200 Subject: [PATCH 23/74] Bug#17877 - Corrupted spatial index CHECK TABLE could complain about a fully intact spatial index. A wrong comparison operator was used for table checking. The result was that it checked for non-matching spatial keys. This succeeded if at least two different keys were present, but failed if only the matching key was present. I fixed the key comparison. --- myisam/mi_check.c | 5 ++-- myisam/mi_key.c | 2 +- myisam/rt_index.c | 8 ++++--- myisam/rt_mbr.c | 6 ++++- mysql-test/r/gis-rtree.result | 40 +++++++++++++++++++++++++++++++ mysql-test/t/gis-rtree.test | 44 +++++++++++++++++++++++++++++++++++ 6 files changed, 98 insertions(+), 7 deletions(-) diff --git a/myisam/mi_check.c b/myisam/mi_check.c index 2395640d5bf..1e62e5e641d 100644 --- a/myisam/mi_check.c +++ b/myisam/mi_check.c @@ -1155,12 +1155,13 @@ int chk_data_link(MI_CHECK *param, MI_INFO *info,int extend) */ int search_result= (keyinfo->flag & HA_SPATIAL) ? rtree_find_first(info, key, info->lastkey, key_length, - SEARCH_SAME) : + MBR_EQUAL | MBR_DATA) : _mi_search(info,keyinfo,info->lastkey,key_length, SEARCH_SAME, info->s->state.key_root[key]); if (search_result) { - mi_check_print_error(param,"Record at: %10s Can't find key for index: %2d", + mi_check_print_error(param,"Record at: %10s " + "Can't find key for index: %2d", llstr(start_recpos,llbuff),key+1); if (error++ > MAXERR || !(param->testflag & T_VERBOSE)) goto err2; diff --git a/myisam/mi_key.c b/myisam/mi_key.c index cb85febd869..eaa854b1a37 100644 --- a/myisam/mi_key.c +++ b/myisam/mi_key.c @@ -54,7 +54,7 @@ uint _mi_make_key(register MI_INFO *info, uint keynr, uchar *key, TODO: nulls processing */ #ifdef HAVE_SPATIAL - return sp_make_key(info,keynr,key,record,filepos); + DBUG_RETURN(sp_make_key(info,keynr,key,record,filepos)); #else DBUG_ASSERT(0); /* mi_open should check that this never happens*/ #endif diff --git a/myisam/rt_index.c b/myisam/rt_index.c index 97554dca4e6..1806476dc39 100644 --- a/myisam/rt_index.c +++ b/myisam/rt_index.c @@ -183,9 +183,11 @@ int rtree_find_first(MI_INFO *info, uint keynr, uchar *key, uint key_length, return -1; } - /* Save searched key */ - memcpy(info->first_mbr_key, key, keyinfo->keylength - - info->s->base.rec_reflength); + /* + Save searched key, include data pointer. + The data pointer is required if the search_flag contains MBR_DATA. + */ + memcpy(info->first_mbr_key, key, keyinfo->keylength); info->last_rkey_length = key_length; info->rtree_recursion_depth = -1; diff --git a/myisam/rt_mbr.c b/myisam/rt_mbr.c index c43daec2f7c..897862c1c9a 100644 --- a/myisam/rt_mbr.c +++ b/myisam/rt_mbr.c @@ -52,10 +52,14 @@ if (EQUAL_CMP(amin, amax, bmin, bmax)) \ return 1; \ } \ - else /* if (nextflag & MBR_DISJOINT) */ \ + else if (nextflag & MBR_DISJOINT) \ { \ if (DISJOINT_CMP(amin, amax, bmin, bmax)) \ return 1; \ + }\ + else /* if unknown comparison operator */ \ + { \ + DBUG_ASSERT(0); \ } #define RT_CMP_KORR(type, korr_func, len, nextflag) \ diff --git a/mysql-test/r/gis-rtree.result b/mysql-test/r/gis-rtree.result index f479fc41ffb..3fcae8843c0 100644 --- a/mysql-test/r/gis-rtree.result +++ b/mysql-test/r/gis-rtree.result @@ -817,3 +817,43 @@ check table t1 extended; Table Op Msg_type Msg_text test.t1 check status OK drop table t1; +CREATE TABLE t1 ( +c1 geometry NOT NULL default '', +SPATIAL KEY i1 (c1(32)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +INSERT INTO t1 (c1) VALUES ( +PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +CHECK TABLE t1 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; +CREATE TABLE t1 ( +c1 geometry NOT NULL default '', +SPATIAL KEY i1 (c1(32)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +INSERT INTO t1 (c1) VALUES ( +PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +INSERT INTO t1 (c1) VALUES ( +PolygonFromText('POLYGON((-65.7402776999 -96.6686111000, + -65.7372222000 -96.5516666000, + -65.8502777000 -96.5461111000, + -65.8527777000 -96.6627777000, + -65.7402776999 -96.6686111000))')); +INSERT INTO t1 (c1) VALUES ( +PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +CHECK TABLE t1 EXTENDED; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; diff --git a/mysql-test/t/gis-rtree.test b/mysql-test/t/gis-rtree.test index 682f67c61c4..eba53a8a9c5 100644 --- a/mysql-test/t/gis-rtree.test +++ b/mysql-test/t/gis-rtree.test @@ -188,4 +188,48 @@ check table t1 extended; drop table t1; +# +# Bug#17877 - Corrupted spatial index +# +CREATE TABLE t1 ( + c1 geometry NOT NULL default '', + SPATIAL KEY i1 (c1(32)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +INSERT INTO t1 (c1) VALUES ( + PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +# This showed a missing key. +CHECK TABLE t1 EXTENDED; +DROP TABLE t1; +# +CREATE TABLE t1 ( + c1 geometry NOT NULL default '', + SPATIAL KEY i1 (c1(32)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1; +INSERT INTO t1 (c1) VALUES ( + PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +INSERT INTO t1 (c1) VALUES ( + PolygonFromText('POLYGON((-65.7402776999 -96.6686111000, + -65.7372222000 -96.5516666000, + -65.8502777000 -96.5461111000, + -65.8527777000 -96.6627777000, + -65.7402776999 -96.6686111000))')); +# This is the same as the first insert to get a non-unique key. +INSERT INTO t1 (c1) VALUES ( + PolygonFromText('POLYGON((-18.6086111000 -66.9327777000, + -18.6055555000 -66.8158332999, + -18.7186111000 -66.8102777000, + -18.7211111000 -66.9269443999, + -18.6086111000 -66.9327777000))')); +# This showed (and still shows) OK. +CHECK TABLE t1 EXTENDED; +DROP TABLE t1; + # End of 4.1 tests From 9dd5bc3843a44072cc7f211b6ad8cf3102e81cc1 Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Wed, 28 Jun 2006 16:07:39 +0200 Subject: [PATCH 24/74] Bug#19835 - Binary copy of corrupted tables crash the server when issuing a query A corrupt table with dynamic record format can crash the server when trying to select from it. I fixed the crash that resulted from the particular type of corruption that has been reported for this bug. No test case. To test it, one needs a table with a very special corruption. The bug report contains a file with such a table. --- myisam/mi_dynrec.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/myisam/mi_dynrec.c b/myisam/mi_dynrec.c index 43783ca2d36..1b691c955f1 100644 --- a/myisam/mi_dynrec.c +++ b/myisam/mi_dynrec.c @@ -1116,6 +1116,9 @@ int _mi_read_dynamic_record(MI_INFO *info, my_off_t filepos, byte *buf) info->rec_cache.pos_in_file <= block_info.next_filepos && flush_io_cache(&info->rec_cache)) goto err; + /* A corrupted table can have wrong pointers. (Bug# 19835) */ + if (block_info.next_filepos == HA_OFFSET_ERROR) + goto panic; info->rec_cache.seek_not_done=1; if ((b_type=_mi_get_block_info(&block_info,file, block_info.next_filepos)) From 99ad23ec7cde9db5f02ca7ac375b0050289d711f Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Wed, 28 Jun 2006 18:55:30 +0200 Subject: [PATCH 25/74] Bug#14400 - Query joins wrong rows from table which is subject of "concurrent insert" It was possible that fetching a record by an exact key value (including the record pointer) could return a record with a different key value. This happened only if a concurrent insert added a record with the searched key value after the fetching statement locked the table for read. The search succeded on the key value, but the record was rejected as it was past the file length that was remembered at start of the fetching statement. With other words it was rejected as being a concurrently inserted record. The action to recover from this problem was to fetch the record that is pointed at by the next key of the index. This was repeated until a record below the file length was found. I do now avoid this loop if an exact match was searched. If this match is beyond the file length, it is now treated as "key not found". There cannot be another key with the same record pointer. --- myisam/mi_rkey.c | 16 ++++++++++++++-- sql/sql_class.cc | 6 +++--- 2 files changed, 17 insertions(+), 5 deletions(-) diff --git a/myisam/mi_rkey.c b/myisam/mi_rkey.c index 70122288d6c..41c2e173b70 100644 --- a/myisam/mi_rkey.c +++ b/myisam/mi_rkey.c @@ -66,6 +66,7 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, if (fast_mi_readinfo(info)) goto err; + if (share->concurrent_insert) rw_rdlock(&share->key_root_lock[inx]); @@ -77,14 +78,24 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, if (!_mi_search(info,keyinfo, key_buff, use_key_length, myisam_read_vec[search_flag], info->s->state.key_root[inx])) { - while (info->lastpos >= info->state->data_file_length) + /* + If we are searching for an exact key (including the data pointer) + and this was added by an concurrent insert, + then the result is "key not found". + */ + if ((search_flag == HA_READ_KEY_EXACT) && + (info->lastpos >= info->state->data_file_length)) + { + my_errno= HA_ERR_KEY_NOT_FOUND; + info->lastpos= HA_OFFSET_ERROR; + } + else while (info->lastpos >= info->state->data_file_length) { /* Skip rows that are inserted by other threads since we got a lock Note that this can only happen if we are not searching after an exact key, because the keys are sorted according to position */ - if (_mi_search_next(info, keyinfo, info->lastkey, info->lastkey_length, myisam_readnext_vec[search_flag], @@ -92,6 +103,7 @@ int mi_rkey(MI_INFO *info, byte *buf, int inx, const byte *key, uint key_len, break; } } + if (share->concurrent_insert) rw_unlock(&share->key_root_lock[inx]); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 66d23ada163..f8cf8a7a58e 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -477,7 +477,7 @@ bool select_send::send_data(List &items) { List_iterator_fast li(items); String *packet= &thd->packet; - DBUG_ENTER("send_data"); + DBUG_ENTER("select_send::send_data"); #ifdef HAVE_INNOBASE_DB /* We may be passing the control from mysqld to the client: release the @@ -611,7 +611,7 @@ select_export::prepare(List &list) bool select_export::send_data(List &items) { - DBUG_ENTER("send_data"); + DBUG_ENTER("select_export::send_data"); char buff[MAX_FIELD_WIDTH],null_buff[2],space[MAX_FIELD_WIDTH]; bool space_inited=0; String tmp(buff,sizeof(buff)),*res; @@ -828,7 +828,7 @@ bool select_dump::send_data(List &items) String tmp(buff,sizeof(buff)),*res; tmp.length(0); Item *item; - DBUG_ENTER("send_data"); + DBUG_ENTER("select_dump::send_data"); if (thd->offset_limit) { // using limit offset,count From f4a095a399a50fa4af919055f93e392d68d9ad6d Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Thu, 29 Jun 2006 16:20:18 +0200 Subject: [PATCH 26/74] ndb - autotest Fix testNodeRestart -n DuringLCP and others (add stopTest() at end of test :-)) --- ndb/test/ndbapi/testNodeRestart.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/ndb/test/ndbapi/testNodeRestart.cpp b/ndb/test/ndbapi/testNodeRestart.cpp index 68f101442c5..767ca23b324 100644 --- a/ndb/test/ndbapi/testNodeRestart.cpp +++ b/ndb/test/ndbapi/testNodeRestart.cpp @@ -294,6 +294,7 @@ int runRestarts(NDBT_Context* ctx, NDBT_Step* step){ } i++; } + ctx->stopTest(); return result; } From ab3992771bd7cfd7792e6f4dbc4c2cb4be4522fd Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Fri, 30 Jun 2006 09:41:41 +0200 Subject: [PATCH 27/74] ndb - bug#20774 crash if system restart with more than 4096 fragments solution: continueb enable expand check loop --- ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 1 + ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 54 +++++++++++++++++------ 2 files changed, 42 insertions(+), 13 deletions(-) diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 13ae5aa1bbf..7cca121d909 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -232,6 +232,7 @@ #define ZSCAN_MARKERS 18 #define ZOPERATION_EVENT_REP 19 #define ZPREP_DROP_TABLE 20 +#define ZENABLE_EXPAND_CHECK 21 /* ------------------------------------------------------------------------- */ /* NODE STATE DURING SYSTEM RESTART, VARIABLES CNODES_SR_STATE */ diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 3540fc79dff..42e38b41b4b 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -434,6 +434,33 @@ void Dblqh::execCONTINUEB(Signal* signal) checkDropTab(signal); return; break; + case ZENABLE_EXPAND_CHECK: + { + jam(); + fragptr.i = signal->theData[1]; + if (fragptr.i != RNIL) + { + jam(); + ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord); + signal->theData[0] = fragptr.p->tabRef; + signal->theData[1] = fragptr.p->fragId; + sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); + + signal->theData[0] = ZENABLE_EXPAND_CHECK; + signal->theData[1] = fragptr.p->nextFrag; + sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB); + return; + } + else + { + jam(); + StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); + conf->startingNodeId = getOwnNodeId(); + sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, + StartRecConf::SignalLength, JBB); + return; + } + } default: ndbrequire(false); break; @@ -15503,20 +15530,21 @@ void Dblqh::srFourthComp(Signal* signal) } else if ((cstartType == NodeState::ST_NODE_RESTART) || (cstartType == NodeState::ST_SYSTEM_RESTART)) { jam(); - StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); - conf->startingNodeId = getOwnNodeId(); - sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, - StartRecConf::SignalLength, JBB); - if(cstartType == NodeState::ST_SYSTEM_RESTART){ - fragptr.i = c_redo_log_complete_frags; - while(fragptr.i != RNIL){ - ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord); - signal->theData[0] = fragptr.p->tabRef; - signal->theData[1] = fragptr.p->fragId; - sendSignal(DBACC_REF, GSN_EXPANDCHECK2, signal, 2, JBB); - fragptr.i = fragptr.p->nextFrag; - } + if(cstartType == NodeState::ST_SYSTEM_RESTART) + { + jam(); + signal->theData[0] = ZENABLE_EXPAND_CHECK; + signal->theData[1] = c_redo_log_complete_frags; + sendSignal(DBLQH_REF, GSN_CONTINUEB, signal, 2, JBB); + } + else + { + jam(); + StartRecConf * conf = (StartRecConf*)signal->getDataPtrSend(); + conf->startingNodeId = getOwnNodeId(); + sendSignal(cmasterDihBlockref, GSN_START_RECCONF, signal, + StartRecConf::SignalLength, JBB); } } else { ndbrequire(false); From 9f542ef0f4f79d0814cbe4668c6cca3c0f4ad649 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Fri, 30 Jun 2006 16:25:07 +0200 Subject: [PATCH 28/74] adopted ndb handler code for tables without primary key and with unique index - added missing retrieval of hidden primary key --- sql/ha_ndbcluster.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 11fdd33fad9..e442d5991fa 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1364,6 +1364,12 @@ int ha_ndbcluster::unique_index_read(const byte *key, m_value[i].ptr= NULL; } } + if (table->primary_key == MAX_KEY) + { + DBUG_PRINT("info", ("Getting hidden key")); + if (get_ndb_value(op, NULL, i, NULL)) + ERR_RETURN(op->getNdbError()); + } if (execute_no_commit_ie(this,trans) != 0) { From 59d69f6d38370dc3a1830c178bc912cab6e1caaf Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Mon, 3 Jul 2006 17:09:32 +0200 Subject: [PATCH 29/74] Fix for Bug #18413 Data usage for varsize columns are not correctly reported to mysqld --- sql/ha_ndbcluster.cc | 9 ++++--- .../ndb/include/kernel/AttributeHeader.hpp | 3 ++- storage/ndb/include/ndbapi/NdbDictionary.hpp | 3 ++- storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 1 + .../src/kernel/blocks/dbtup/DbtupPageMap.cpp | 1 + .../src/kernel/blocks/dbtup/DbtupRoutines.cpp | 21 ++++++++++------ .../src/kernel/blocks/dbtup/DbtupVarAlloc.cpp | 1 + storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp | 24 +++++++++++++------ storage/ndb/tools/desc.cpp | 3 ++- 9 files changed, 46 insertions(+), 20 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index bbeea2ca1ba..76a8b66ffcc 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -7420,7 +7420,7 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab, do { - Uint64 rows, commits, mem; + Uint64 rows, commits, fixed_mem, var_mem; Uint32 size; Uint32 count= 0; Uint64 sum_rows= 0; @@ -7458,7 +7458,10 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab, pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows); pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits); pOp->getValue(NdbDictionary::Column::ROW_SIZE, (char*)&size); - pOp->getValue(NdbDictionary::Column::FRAGMENT_MEMORY, (char*)&mem); + pOp->getValue(NdbDictionary::Column::FRAGMENT_FIXED_MEMORY, + (char*)&fixed_mem); + pOp->getValue(NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY, + (char*)&var_mem); if (pTrans->execute(NdbTransaction::NoCommit, NdbTransaction::AbortOnError, @@ -7474,7 +7477,7 @@ ndb_get_table_statistics(Ndb* ndb, const NDBTAB *ndbtab, sum_commits+= commits; if (sum_row_size < size) sum_row_size= size; - sum_mem+= mem; + sum_mem+= fixed_mem + var_mem; count++; } diff --git a/storage/ndb/include/kernel/AttributeHeader.hpp b/storage/ndb/include/kernel/AttributeHeader.hpp index b17bb456bf0..b78b5912bec 100644 --- a/storage/ndb/include/kernel/AttributeHeader.hpp +++ b/storage/ndb/include/kernel/AttributeHeader.hpp @@ -39,12 +39,13 @@ public: STATIC_CONST( RANGE_NO = 0xFFFB ); // Read range no (when batched ranges) STATIC_CONST( ROW_SIZE = 0xFFFA ); - STATIC_CONST( FRAGMENT_MEMORY= 0xFFF9 ); + STATIC_CONST( FRAGMENT_FIXED_MEMORY= 0xFFF9 ); STATIC_CONST( RECORDS_IN_RANGE = 0xFFF8 ); STATIC_CONST( DISK_REF = 0xFFF7 ); STATIC_CONST( ROWID = 0xFFF6 ); STATIC_CONST( ROW_GCI = 0xFFF5 ); + STATIC_CONST( FRAGMENT_VARSIZED_MEMORY = 0xFFF4 ); // NOTE: in 5.1 ctors and init take size in bytes diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index ea4a2a9ca29..35b0d927bda 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -525,7 +525,8 @@ public: const char* getDefaultValue() const; static const Column * FRAGMENT; - static const Column * FRAGMENT_MEMORY; + static const Column * FRAGMENT_FIXED_MEMORY; + static const Column * FRAGMENT_VARSIZED_MEMORY; static const Column * ROW_COUNT; static const Column * COMMIT_COUNT; static const Column * ROW_SIZE; diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 9bc916c8c22..3cf62fe08ec 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -604,6 +604,7 @@ struct Fragrecord { Uint32 currentPageRange; Uint32 rootPageRange; Uint32 noOfPages; + Uint32 noOfVarPages; Uint32 noOfPagesToGrow; DLList::Head emptyPrimPage; // allocated pages (not init) diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index 90fdd8c69d7..82bac432545 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -351,6 +351,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr) regFragPtr->rootPageRange = RNIL; regFragPtr->currentPageRange = RNIL; regFragPtr->noOfPages = 0; + regFragPtr->noOfVarPages = 0; regFragPtr->noOfPagesToGrow = 2; regFragPtr->nextStartRange = 0; }//initFragRange() diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp index 940ccf54ba7..677eff53559 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupRoutines.cpp @@ -1135,13 +1135,20 @@ Dbtup::read_pseudo(Uint32 attrId, case AttributeHeader::FRAGMENT: * outBuffer = fragptr.p->fragmentId; return 1; - case AttributeHeader::FRAGMENT_MEMORY: - { - Uint64 tmp= fragptr.p->noOfPages; - tmp*= 32768; - memcpy(outBuffer,&tmp,8); - } - return 2; + case AttributeHeader::FRAGMENT_FIXED_MEMORY: + { + Uint64 tmp= fragptr.p->noOfPages; + tmp*= 32768; + memcpy(outBuffer,&tmp,8); + } + return 2; + case AttributeHeader::FRAGMENT_VARSIZED_MEMORY: + { + Uint64 tmp= fragptr.p->noOfVarPages; + tmp*= 32768; + memcpy(outBuffer,&tmp,8); + } + return 2; case AttributeHeader::ROW_SIZE: * outBuffer = tabptr.p->m_offsets[MM].m_fix_header_size << 2; return 1; diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp index 52ab66b5c0e..5f6dd68956a 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupVarAlloc.cpp @@ -302,6 +302,7 @@ Dbtup::get_empty_var_page(Fragrecord* fragPtr) Uint32 cnt; allocConsPages(10, cnt, ptr.i); + fragPtr->noOfVarPages+= cnt; if (unlikely(cnt == 0)) { return RNIL; diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 1e33a843a42..b9c03f0b209 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -328,9 +328,14 @@ NdbColumnImpl::create_pseudo(const char * name){ col->m_impl.m_attrId = AttributeHeader::FRAGMENT; col->m_impl.m_attrSize = 4; col->m_impl.m_arraySize = 1; - } else if(!strcmp(name, "NDB$FRAGMENT_MEMORY")){ + } else if(!strcmp(name, "NDB$FRAGMENT_FIXED_MEMORY")){ col->setType(NdbDictionary::Column::Bigunsigned); - col->m_impl.m_attrId = AttributeHeader::FRAGMENT_MEMORY; + col->m_impl.m_attrId = AttributeHeader::FRAGMENT_FIXED_MEMORY; + col->m_impl.m_attrSize = 8; + col->m_impl.m_arraySize = 1; + } else if(!strcmp(name, "NDB$FRAGMENT_VARSIZED_MEMORY")){ + col->setType(NdbDictionary::Column::Bigunsigned); + col->m_impl.m_attrId = AttributeHeader::FRAGMENT_VARSIZED_MEMORY; col->m_impl.m_attrSize = 8; col->m_impl.m_arraySize = 1; } else if(!strcmp(name, "NDB$ROW_COUNT")){ @@ -1316,7 +1321,8 @@ NdbDictionaryImpl::~NdbDictionaryImpl() m_globalHash->lock(); if(--f_dictionary_count == 0){ delete NdbDictionary::Column::FRAGMENT; - delete NdbDictionary::Column::FRAGMENT_MEMORY; + delete NdbDictionary::Column::FRAGMENT_FIXED_MEMORY; + delete NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY; delete NdbDictionary::Column::ROW_COUNT; delete NdbDictionary::Column::COMMIT_COUNT; delete NdbDictionary::Column::ROW_SIZE; @@ -1326,7 +1332,8 @@ NdbDictionaryImpl::~NdbDictionaryImpl() delete NdbDictionary::Column::ROWID; delete NdbDictionary::Column::ROW_GCI; NdbDictionary::Column::FRAGMENT= 0; - NdbDictionary::Column::FRAGMENT_MEMORY= 0; + NdbDictionary::Column::FRAGMENT_FIXED_MEMORY= 0; + NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY= 0; NdbDictionary::Column::ROW_COUNT= 0; NdbDictionary::Column::COMMIT_COUNT= 0; NdbDictionary::Column::ROW_SIZE= 0; @@ -1483,8 +1490,10 @@ NdbDictionaryImpl::setTransporter(class Ndb* ndb, if(f_dictionary_count++ == 0){ NdbDictionary::Column::FRAGMENT= NdbColumnImpl::create_pseudo("NDB$FRAGMENT"); - NdbDictionary::Column::FRAGMENT_MEMORY= - NdbColumnImpl::create_pseudo("NDB$FRAGMENT_MEMORY"); + NdbDictionary::Column::FRAGMENT_FIXED_MEMORY= + NdbColumnImpl::create_pseudo("NDB$FRAGMENT_FIXED_MEMORY"); + NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY= + NdbColumnImpl::create_pseudo("NDB$FRAGMENT_VARSIZED_MEMORY"); NdbDictionary::Column::ROW_COUNT= NdbColumnImpl::create_pseudo("NDB$ROW_COUNT"); NdbDictionary::Column::COMMIT_COUNT= @@ -5041,7 +5050,8 @@ template class Vector; template class Vector; const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT = 0; -const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_MEMORY = 0; +const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_FIXED_MEMORY = 0; +const NdbDictionary::Column * NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY = 0; const NdbDictionary::Column * NdbDictionary::Column::ROW_COUNT = 0; const NdbDictionary::Column * NdbDictionary::Column::COMMIT_COUNT = 0; const NdbDictionary::Column * NdbDictionary::Column::ROW_SIZE = 0; diff --git a/storage/ndb/tools/desc.cpp b/storage/ndb/tools/desc.cpp index 7c5ce68c950..49f188d12c0 100644 --- a/storage/ndb/tools/desc.cpp +++ b/storage/ndb/tools/desc.cpp @@ -293,7 +293,8 @@ void print_part_info(Ndb* pNdb, NDBT_Table* pTab) { "Partition", 0, NdbDictionary::Column::FRAGMENT }, { "Row count", 0, NdbDictionary::Column::ROW_COUNT }, { "Commit count", 0, NdbDictionary::Column::COMMIT_COUNT }, - { "Frag memory", 0, NdbDictionary::Column::FRAGMENT_MEMORY }, + { "Frag fixed memory", 0, NdbDictionary::Column::FRAGMENT_FIXED_MEMORY }, + { "Frag varsized memory", 0, NdbDictionary::Column::FRAGMENT_VARSIZED_MEMORY }, { 0, 0, 0 } }; From 98874725e073dfed67e4e372ad33d64c60db4901 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 4 Jul 2006 11:43:06 +0200 Subject: [PATCH 30/74] Bug #20784 Uninitialized memory in update on table with PK not on first column - partial backport of code from 5.1, do cot compare_record for engines that do not read all columns during update --- sql/ha_ndbcluster.cc | 3 ++- sql/handler.h | 1 + sql/sql_update.cc | 25 ++++++++++++++++++++++--- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index e4ff39797ca..280b23fc73f 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4575,7 +4575,8 @@ ha_ndbcluster::ha_ndbcluster(TABLE *table_arg): HA_NO_PREFIX_CHAR_KEYS | HA_NEED_READ_RANGE_BUFFER | HA_CAN_GEOMETRY | - HA_CAN_BIT_FIELD), + HA_CAN_BIT_FIELD | + HA_PARTIAL_COLUMN_READ), m_share(0), m_use_write(FALSE), m_ignore_dup_key(FALSE), diff --git a/sql/handler.h b/sql/handler.h index 31aac075a5e..aada647e071 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -57,6 +57,7 @@ see mi_rsame/heap_rsame/myrg_rsame */ #define HA_READ_RND_SAME (1 << 0) +#define HA_PARTIAL_COLUMN_READ (1 << 1) /* read may not return all columns */ #define HA_TABLE_SCAN_ON_INDEX (1 << 2) /* No separate data/index file */ #define HA_REC_NOT_IN_SEQ (1 << 3) /* ha_info don't return recnumber; It returns a position to ha_r_rnd */ diff --git a/sql/sql_update.cc b/sql/sql_update.cc index c2b7624c9e7..5237b3a1c05 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -120,6 +120,7 @@ int mysql_update(THD *thd, bool using_limit= limit != HA_POS_ERROR; bool safe_update= thd->options & OPTION_SAFE_UPDATES; bool used_key_is_modified, transactional_table; + bool can_compare_record; int res; int error; uint used_index= MAX_KEY; @@ -433,6 +434,13 @@ int mysql_update(THD *thd, (MODE_STRICT_TRANS_TABLES | MODE_STRICT_ALL_TABLES))); + /* + We can use compare_record() to optimize away updates if + the table handler is returning all columns + */ + can_compare_record= !(table->file->table_flags() & + HA_PARTIAL_COLUMN_READ); + while (!(error=info.read_record(&info)) && !thd->killed) { if (!(select && select->skip_record())) @@ -445,7 +453,7 @@ int mysql_update(THD *thd, found++; - if (compare_record(table, query_id)) + if (!can_compare_record || compare_record(table, query_id)) { if ((res= table_list->view_check_option(thd, ignore)) != VIEW_CHECK_OK) @@ -1248,8 +1256,15 @@ bool multi_update::send_data(List ¬_used_values) uint offset= cur_table->shared; table->file->position(table->record[0]); + /* + We can use compare_record() to optimize away updates if + the table handler is returning all columns + */ if (table == table_to_update) { + bool can_compare_record; + can_compare_record= !(table->file->table_flags() & + HA_PARTIAL_COLUMN_READ); table->status|= STATUS_UPDATED; store_record(table,record[1]); if (fill_record_n_invoke_before_triggers(thd, *fields_for_table[offset], @@ -1259,7 +1274,7 @@ bool multi_update::send_data(List ¬_used_values) DBUG_RETURN(1); found++; - if (compare_record(table, thd->query_id)) + if (!can_compare_record || compare_record(table, thd->query_id)) { int error; if ((error= cur_table->view_check_option(thd, ignore)) != @@ -1376,6 +1391,7 @@ int multi_update::do_updates(bool from_send_error) for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local) { byte *ref_pos; + bool can_compare_record; table = cur_table->table; if (table == table_to_update) @@ -1402,6 +1418,9 @@ int multi_update::do_updates(bool from_send_error) if ((local_error = tmp_table->file->ha_rnd_init(1))) goto err; + can_compare_record= !(table->file->table_flags() & + HA_PARTIAL_COLUMN_READ); + ref_pos= (byte*) tmp_table->field[0]->ptr; for (;;) { @@ -1431,7 +1450,7 @@ int multi_update::do_updates(bool from_send_error) TRG_ACTION_BEFORE, TRUE)) goto err2; - if (compare_record(table, thd->query_id)) + if (!can_compare_record || compare_record(table, thd->query_id)) { if ((local_error=table->file->update_row(table->record[1], table->record[0]))) From b2f58e94b145bdc644dda3c1bc6413d14f3bc3b0 Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Tue, 4 Jul 2006 12:34:23 +0200 Subject: [PATCH 31/74] After merge fix --- mysql-test/r/lock_multi.result | 16 ------ mysql-test/t/lock_multi.test | 98 ++++++++++++---------------------- 2 files changed, 34 insertions(+), 80 deletions(-) diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result index d7ba9174408..8ff02d898a3 100644 --- a/mysql-test/r/lock_multi.result +++ b/mysql-test/r/lock_multi.result @@ -95,19 +95,3 @@ lock tables t1 write; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; // unlock tables; drop table t1; -use mysql; -LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE; -FLUSH TABLES; -use mysql; - SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1; -OPTIMIZE TABLES columns_priv, db, host, user; -Table Op Msg_type Msg_text -mysql.columns_priv optimize status OK -mysql.db optimize status OK -mysql.host optimize status OK -mysql.user optimize status OK -UNLOCK TABLES; -Select_priv -N -use test; -use test; diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test index 37b8fbda376..9c9e68f931f 100644 --- a/mysql-test/t/lock_multi.test +++ b/mysql-test/t/lock_multi.test @@ -127,70 +127,6 @@ connection locker; unlock tables; drop table t1; -# -# Bug#16986 - Deadlock condition with MyISAM tables -# -connection locker; -use mysql; -LOCK TABLES columns_priv WRITE, db WRITE, host WRITE, user WRITE; -FLUSH TABLES; ---sleep 1 -# -connection reader; -use mysql; -#NOTE: This must be a multi-table select, otherwise the deadlock will not occur -send SELECT user.Select_priv FROM user, db WHERE user.user = db.user LIMIT 1; ---sleep 1 -# -connection locker; -# Make test case independent from earlier grants. ---replace_result "Table is already up to date" "OK" -OPTIMIZE TABLES columns_priv, db, host, user; -UNLOCK TABLES; -# -connection reader; -reap; -use test; -# -connection locker; -use test; -# -connection default; - -# -# Bug#19815 - CREATE/RENAME/DROP DATABASE can deadlock on a global read lock -# -connect (con1,localhost,root,,); -connect (con2,localhost,root,,); -# -connection con1; -CREATE DATABASE mysqltest_1; -FLUSH TABLES WITH READ LOCK; -# -# With bug in place: acquire LOCK_mysql_create_table and -# wait in wait_if_global_read_lock(). -connection con2; -send DROP DATABASE mysqltest_1; ---sleep 1 -# -# With bug in place: try to acquire LOCK_mysql_create_table... -# When fixed: Reject dropping db because of the read lock. -connection con1; ---error ER_CANT_UPDATE_WITH_READLOCK -DROP DATABASE mysqltest_1; -UNLOCK TABLES; -# -connection con2; -reap; -# -connection default; -disconnect con1; -disconnect con2; -# This must have been dropped by connection 2 already, -# which waited until the global read lock was released. ---error ER_DB_DROP_EXISTS -DROP DATABASE mysqltest_1; - # # Bug#16986 - Deadlock condition with MyISAM tables # @@ -270,6 +206,40 @@ UNLOCK TABLES; connection default; DROP TABLE t1; +# +# Bug#19815 - CREATE/RENAME/DROP DATABASE can deadlock on a global read lock +# +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); +# +connection con1; +CREATE DATABASE mysqltest_1; +FLUSH TABLES WITH READ LOCK; +# +# With bug in place: acquire LOCK_mysql_create_table and +# wait in wait_if_global_read_lock(). +connection con2; +send DROP DATABASE mysqltest_1; +--sleep 1 +# +# With bug in place: try to acquire LOCK_mysql_create_table... +# When fixed: Reject dropping db because of the read lock. +connection con1; +--error ER_CANT_UPDATE_WITH_READLOCK +DROP DATABASE mysqltest_1; +UNLOCK TABLES; +# +connection con2; +reap; +# +connection default; +disconnect con1; +disconnect con2; +# This must have been dropped by connection 2 already, +# which waited until the global read lock was released. +--error ER_DB_DROP_EXISTS +DROP DATABASE mysqltest_1; + # # Bug #17264: MySQL Server freeze # From 59958be558e9ba9822e1f6da5ea797733d798ea6 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 4 Jul 2006 12:57:23 +0200 Subject: [PATCH 32/74] disabled ndb_autodiscover3 --- mysql-test/t/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 6f26847f8d7..5e6ab1dd728 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -18,6 +18,7 @@ #im_life_cycle : Bug#20368 2006-06-10 alik im_life_cycle test fails ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog +ndb_autodiscover3 : BUD#20843 2006-07-04 tomas ndb_autodiscover3 fails randomly #ndb_binlog_discover : BUG#19395 2006-04-28 tomas/knielsen mysqld does not always detect cluster shutdown #ndb_cache2 : BUG#18597 2006-03-28 brian simultaneous drop table and ndb statistics update triggers node failure #ndb_cache_multi2 : BUG#18597 2006-04-10 kent simultaneous drop table and ndb statistics update triggers node failure From 2527b0f13a276eb3f94ef045e13361b2078624ba Mon Sep 17 00:00:00 2001 From: "andrey@lmy004." <> Date: Tue, 4 Jul 2006 17:30:39 +0200 Subject: [PATCH 33/74] fix test. this should remove the problem of fix for bug#20624 events_logs_tests.test fails randomly --- .bzrignore | 1 + mysql-test/r/events_logs_tests.result | 2 +- mysql-test/t/events_logs_tests.test | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.bzrignore b/.bzrignore index e07aa0cf86a..77db3c2985a 100644 --- a/.bzrignore +++ b/.bzrignore @@ -1778,3 +1778,4 @@ vio/viotest-sslconnect.cpp vio/viotest.cpp zlib/*.ds? zlib/*.vcproj +server-tools/instance-manager/net_serv.cc diff --git a/mysql-test/r/events_logs_tests.result b/mysql-test/r/events_logs_tests.result index 9202d63fd2c..950090399d5 100644 --- a/mysql-test/r/events_logs_tests.result +++ b/mysql-test/r/events_logs_tests.result @@ -49,8 +49,8 @@ USER_HOST SLEEPVAL events_test SELECT SLEEP(2) SET SESSION long_query_time=300; "Make it quite long" TRUNCATE mysql.slow_log; -SET SESSION long_query_time=1; CREATE TABLE slow_event_test (slo_val tinyint, val tinyint); +SET SESSION long_query_time=1; "This won't go to the slow log" CREATE EVENT long_event ON SCHEDULE EVERY 1 MINUTE DO INSERT INTO slow_event_test SELECT @@long_query_time, SLEEP(3); SELECT * FROM slow_event_test; diff --git a/mysql-test/t/events_logs_tests.test b/mysql-test/t/events_logs_tests.test index 5f4ec852cd3..5c252b1174b 100644 --- a/mysql-test/t/events_logs_tests.test +++ b/mysql-test/t/events_logs_tests.test @@ -69,8 +69,8 @@ SELECT user_host, query_time, db, sql_text FROM mysql.slow_log; SET SESSION long_query_time=300; --echo "Make it quite long" TRUNCATE mysql.slow_log; -SET SESSION long_query_time=1; CREATE TABLE slow_event_test (slo_val tinyint, val tinyint); +SET SESSION long_query_time=1; --echo "This won't go to the slow log" CREATE EVENT long_event ON SCHEDULE EVERY 1 MINUTE DO INSERT INTO slow_event_test SELECT @@long_query_time, SLEEP(3); SELECT * FROM slow_event_test; From 6512316c3aab352a739835c547ba580874cef8ad Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 4 Jul 2006 17:37:44 +0200 Subject: [PATCH 34/74] Bug #20742 Assertion in drop of ndb binlog events after node restart - make sure TE_ACTIVE event comes when node restarts so that ref counting is correct for drop of event --- storage/ndb/src/kernel/blocks/suma/Suma.cpp | 3 +- storage/ndb/src/ndbapi/ClusterMgr.cpp | 3 ++ .../ndb/src/ndbapi/NdbEventOperationImpl.cpp | 40 +++++++++++++++++++ .../ndb/src/ndbapi/NdbEventOperationImpl.hpp | 1 + storage/ndb/src/ndbapi/Ndbif.cpp | 15 +++++++ storage/ndb/src/ndbapi/TransporterFacade.cpp | 3 ++ 6 files changed, 64 insertions(+), 1 deletion(-) diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 42666a9e5d9..bb42c8874c5 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -2667,7 +2667,8 @@ Suma::reportAllSubscribers(Signal *signal, { SubTableData * data = (SubTableData*)signal->getDataPtrSend(); - if (table_event == NdbDictionary::Event::_TE_SUBSCRIBE) + if (table_event == NdbDictionary::Event::_TE_SUBSCRIBE && + !c_startup.m_restart_server_node_id) { data->gci = m_last_complete_gci + 1; data->tableId = subPtr.p->m_tableId; diff --git a/storage/ndb/src/ndbapi/ClusterMgr.cpp b/storage/ndb/src/ndbapi/ClusterMgr.cpp index 63fdb73c49f..49815ae6c13 100644 --- a/storage/ndb/src/ndbapi/ClusterMgr.cpp +++ b/storage/ndb/src/ndbapi/ClusterMgr.cpp @@ -396,6 +396,8 @@ ClusterMgr::execNF_COMPLETEREP(const Uint32 * theData){ void ClusterMgr::reportConnected(NodeId nodeId){ + DBUG_ENTER("ClusterMgr::reportConnected"); + DBUG_PRINT("info", ("nodeId: %u", nodeId)); /** * Ensure that we are sending heartbeat every 100 ms * until we have got the first reply from NDB providing @@ -421,6 +423,7 @@ ClusterMgr::reportConnected(NodeId nodeId){ theNode.nfCompleteRep = true; theFacade.ReportNodeAlive(nodeId); + DBUG_VOID_RETURN; } void diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 6f096046440..f766c769b24 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -1521,6 +1521,46 @@ NdbEventBuffer::complete_outof_order_gcis() ndbout_c("complete_outof_order_gcis: m_latestGCI: %lld", m_latestGCI); } +void +NdbEventBuffer::report_node_connected(Uint32 node_id) +{ + NdbEventOperation* op= m_ndb->getEventOperation(0); + if (op == 0) + return; + + DBUG_ENTER("NdbEventBuffer::report_node_connected"); + SubTableData data; + LinearSectionPtr ptr[3]; + bzero(&data, sizeof(data)); + bzero(ptr, sizeof(ptr)); + + data.tableId = ~0; + data.operation = NdbDictionary::Event::_TE_ACTIVE; + data.req_nodeid = (Uint8)node_id; + data.ndbd_nodeid = (Uint8)node_id; + data.logType = SubTableData::LOG; + data.gci = m_latestGCI + 1; + /** + * Insert this event for each operation + */ + { + // no need to lock()/unlock(), receive thread calls this + NdbEventOperationImpl* impl = &op->m_impl; + do if (!impl->m_node_bit_mask.isclear()) + { + data.senderData = impl->m_oid; + insertDataL(impl, &data, ptr); + } while((impl = impl->m_next)); + for (impl = m_dropped_ev_op; impl; impl = impl->m_next) + if (!impl->m_node_bit_mask.isclear()) + { + data.senderData = impl->m_oid; + insertDataL(impl, &data, ptr); + } + } + DBUG_VOID_RETURN; +} + void NdbEventBuffer::report_node_failure(Uint32 node_id) { diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp index c14ca83128f..adbef2fd125 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp @@ -422,6 +422,7 @@ public: void execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep); void complete_outof_order_gcis(); + void report_node_connected(Uint32 node_id); void report_node_failure(Uint32 node_id); void completeClusterFailed(); diff --git a/storage/ndb/src/ndbapi/Ndbif.cpp b/storage/ndb/src/ndbapi/Ndbif.cpp index ecaf6a3f435..0527744afe1 100644 --- a/storage/ndb/src/ndbapi/Ndbif.cpp +++ b/storage/ndb/src/ndbapi/Ndbif.cpp @@ -177,6 +177,7 @@ Ndb::executeMessage(void* NdbObject, void Ndb::connected(Uint32 ref) { +// cluster connect, a_node == own reference theMyRef= ref; Uint32 tmpTheNode= refToNode(ref); Uint64 tBlockNo= refToBlock(ref); @@ -209,16 +210,30 @@ void Ndb::connected(Uint32 ref) theNode= tmpTheNode; // flag that Ndb object is initialized } +void Ndb::report_node_connected(Uint32 nodeId) +{ + if (theEventBuffer) + { + // node connected + // eventOperations in the ndb object should be notified + theEventBuffer->report_node_connected(nodeId); + } +} + void Ndb::statusMessage(void* NdbObject, Uint32 a_node, bool alive, bool nfComplete) { DBUG_ENTER("Ndb::statusMessage"); + DBUG_PRINT("info", ("a_node: %u alive: %u nfComplete: %u", + a_node, alive, nfComplete)); Ndb* tNdb = (Ndb*)NdbObject; if (alive) { if (nfComplete) { + // cluster connect, a_node == own reference tNdb->connected(a_node); DBUG_VOID_RETURN; }//if + tNdb->report_node_connected(a_node); } else { if (nfComplete) { tNdb->report_node_failure_completed(a_node); diff --git a/storage/ndb/src/ndbapi/TransporterFacade.cpp b/storage/ndb/src/ndbapi/TransporterFacade.cpp index 15127953051..2f421271e91 100644 --- a/storage/ndb/src/ndbapi/TransporterFacade.cpp +++ b/storage/ndb/src/ndbapi/TransporterFacade.cpp @@ -794,6 +794,8 @@ TransporterFacade::connected() void TransporterFacade::ReportNodeDead(NodeId tNodeId) { + DBUG_ENTER("TransporterFacade::ReportNodeDead"); + DBUG_PRINT("enter",("nodeid= %d", tNodeId)); /** * When a node fails we must report this to each Ndb object. * The function that is used for communicating node failures is called. @@ -810,6 +812,7 @@ TransporterFacade::ReportNodeDead(NodeId tNodeId) (*RegPC) (obj, tNodeId, false, false); } } + DBUG_VOID_RETURN; } void From 980852eed3545d8392ef8cf454747cdaa7f6b6c0 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 4 Jul 2006 17:58:20 +0200 Subject: [PATCH 35/74] Bug #20742 Assertion in drop of ndb binlog events after node restart --- storage/ndb/include/ndbapi/Ndb.hpp | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/ndb/include/ndbapi/Ndb.hpp b/storage/ndb/include/ndbapi/Ndb.hpp index dcd03cdc467..07f11f6e78a 100644 --- a/storage/ndb/include/ndbapi/Ndb.hpp +++ b/storage/ndb/include/ndbapi/Ndb.hpp @@ -1553,6 +1553,7 @@ private: const char* aCatalogName, const char* aSchemaName); void connected(Uint32 block_reference); + void report_node_connected(Uint32 nodeId); NdbTransaction* startTransactionLocal(Uint32 aPrio, Uint32 aFragmentId); From d3b52bd475de1f34161818cd029390b435afc16e Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Tue, 4 Jul 2006 18:29:21 +0200 Subject: [PATCH 36/74] Bug #20077 Cluster to only support partition by key, needs to error on others --- mysql-test/t/ndb_blob_partition.test | 4 ++++ mysql-test/t/ndb_partition_error.test | 3 +++ mysql-test/t/ndb_partition_list.test | 4 ++++ mysql-test/t/ndb_partition_range.test | 4 ++++ sql/ha_ndbcluster.cc | 2 -- 5 files changed, 15 insertions(+), 2 deletions(-) diff --git a/mysql-test/t/ndb_blob_partition.test b/mysql-test/t/ndb_blob_partition.test index a3948cc9491..6173c9d9851 100644 --- a/mysql-test/t/ndb_blob_partition.test +++ b/mysql-test/t/ndb_blob_partition.test @@ -1,6 +1,10 @@ --source include/have_ndb.inc -- source include/not_embedded.inc +--disable_query_log +set new=on; +--enable_query_log + --disable_warnings drop table if exists t1; --enable_warnings diff --git a/mysql-test/t/ndb_partition_error.test b/mysql-test/t/ndb_partition_error.test index 06581f1270f..9db2a6a6f6d 100644 --- a/mysql-test/t/ndb_partition_error.test +++ b/mysql-test/t/ndb_partition_error.test @@ -10,6 +10,9 @@ drop table if exists t1; --enable_warnings +--disable_query_log +set new=on; +--enable_query_log # # Partition by range, generate node group error # diff --git a/mysql-test/t/ndb_partition_list.test b/mysql-test/t/ndb_partition_list.test index 2ad37b8768c..ccfcdbc84f4 100644 --- a/mysql-test/t/ndb_partition_list.test +++ b/mysql-test/t/ndb_partition_list.test @@ -5,6 +5,10 @@ # #-- source include/have_partition.inc +--disable_query_log +set new=on; +--enable_query_log + --disable_warnings drop table if exists t1; --enable_warnings diff --git a/mysql-test/t/ndb_partition_range.test b/mysql-test/t/ndb_partition_range.test index 981467d4055..7952ba502d2 100644 --- a/mysql-test/t/ndb_partition_range.test +++ b/mysql-test/t/ndb_partition_range.test @@ -6,6 +6,10 @@ # #-- source include/have_partition.inc +--disable_query_log +set new=on; +--enable_query_log + --disable_warnings drop table if exists t1; --enable_warnings diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index bbeea2ca1ba..a3ea70aafd9 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -9866,7 +9866,6 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, } else { -#ifdef NOT_YET if (!current_thd->variables.new_mode) { push_warning_printf(current_thd, MYSQL_ERROR::WARN_LEVEL_ERROR, @@ -9877,7 +9876,6 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, " use --new option to enable"); return HA_ERR_UNSUPPORTED; } -#endif /* Create a shadow field for those tables that have user defined partitioning. This field stores the value of the partition From 5ebc51cc701f9b7aa97744a22539593ca716732f Mon Sep 17 00:00:00 2001 From: "knielsen@rt.int.sifira.dk" <> Date: Tue, 4 Jul 2006 22:37:32 +0200 Subject: [PATCH 37/74] Fix hardcoded /tmp path (which causes problems when running multiple tests on a single machine) to use $MYSQLTEST_VARDIR/tmp instead. --- mysql-test/r/log_state.result | 2 +- mysql-test/t/log_state.test | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/log_state.result b/mysql-test/r/log_state.result index 43735243787..0547c5a5bbf 100644 --- a/mysql-test/r/log_state.result +++ b/mysql-test/r/log_state.result @@ -102,7 +102,7 @@ show variables like 'general_log_file'; Variable_name Value general_log_file # set global general_log= OFF; -set global general_log_file='/tmp/log.master'; +set global general_log_file='MYSQLTEST_VARDIR/tmp/log.master'; set global general_log= ON; create table t1(f1 int); drop table t1; diff --git a/mysql-test/t/log_state.test b/mysql-test/t/log_state.test index 41fbd068dce..6fc0f3421a7 100644 --- a/mysql-test/t/log_state.test +++ b/mysql-test/t/log_state.test @@ -80,7 +80,8 @@ set global general_log_file=''; --replace_column 2 # show variables like 'general_log_file'; set global general_log= OFF; -set global general_log_file='/tmp/log.master'; +--replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR +eval set global general_log_file='$MYSQLTEST_VARDIR/tmp/log.master'; set global general_log= ON; create table t1(f1 int); drop table t1; From 8c45fc2925e8f4ce43063234cb5e38acd9ae4cd6 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 01:50:16 +0200 Subject: [PATCH 38/74] Bug #20077 Cluster to only support partition by key, needs to error on others --- mysql-test/t/ndb_dd_backuprestore.test | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mysql-test/t/ndb_dd_backuprestore.test b/mysql-test/t/ndb_dd_backuprestore.test index be6d73e27b4..0dc6c2ae206 100644 --- a/mysql-test/t/ndb_dd_backuprestore.test +++ b/mysql-test/t/ndb_dd_backuprestore.test @@ -6,6 +6,10 @@ -- source include/have_ndb.inc +--disable_query_log +set new=on; +--enable_query_log + --disable_warnings DROP TABLE IF EXISTS test.t1; DROP TABLE IF EXISTS test.t2; From 35938c56f33b6786a3a717f01b86c6d32aac93a7 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 01:52:38 +0200 Subject: [PATCH 39/74] Bug #20077 Cluster to only support partition by key, needs to error on others --- sql/ha_ndbcluster.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a3ea70aafd9..33c79e1e4fd 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -9874,7 +9874,7 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, ndbcluster_hton_name, "LIST, RANGE and HASH partition disabled by default," " use --new option to enable"); - return HA_ERR_UNSUPPORTED; + DBUG_RETURN(HA_ERR_UNSUPPORTED); } /* Create a shadow field for those tables that have user defined From 1b7da4b8ed6a6effd7e4dc6ff64786d1c5fa01f3 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 14:20:14 +0200 Subject: [PATCH 40/74] extra logging and asserts to try to track down the spurious completeCluster failure assetion corrected error in invalidation of dict cache which may be causing the valgrind warning at times --- sql/ha_ndbcluster_binlog.cc | 5 ++ storage/ndb/src/ndbapi/DictCache.cpp | 13 +++-- .../ndb/src/ndbapi/NdbEventOperationImpl.cpp | 53 +++++++++++++++++-- .../ndb/src/ndbapi/NdbEventOperationImpl.hpp | 1 + 4 files changed, 62 insertions(+), 10 deletions(-) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index b88002b8529..0c0b7ae7a19 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -3466,6 +3466,11 @@ restart: ndb_latest_applied_binlog_epoch= 0; ndb_latest_received_binlog_epoch= 0; } + if (ndb_extra_logging) + { + sql_print_information("NDB Binlog: starting log at epoch %u", + (unsigned)schema_gci); + } } } { diff --git a/storage/ndb/src/ndbapi/DictCache.cpp b/storage/ndb/src/ndbapi/DictCache.cpp index 8a0bf2f8e8b..c06bb6fc62a 100644 --- a/storage/ndb/src/ndbapi/DictCache.cpp +++ b/storage/ndb/src/ndbapi/DictCache.cpp @@ -312,12 +312,15 @@ GlobalDictCache::invalidate_all() if (vers->size()) { TableVersion * ver = & vers->back(); - ver->m_impl->m_status = NdbDictionary::Object::Invalid; - ver->m_status = DROPPED; - if (ver->m_refCount == 0) + if (ver->m_status != RETREIVING) { - delete ver->m_impl; - vers->erase(vers->size() - 1); + ver->m_impl->m_status = NdbDictionary::Object::Invalid; + ver->m_status = DROPPED; + if (ver->m_refCount == 0) + { + delete ver->m_impl; + vers->erase(vers->size() - 1); + } } } curr = m_tableHash.getNext(curr); diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index f766c769b24..5cf974b6467 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -942,6 +942,7 @@ NdbEventBuffer::NdbEventBuffer(Ndb *ndb) : { #ifdef VM_TRACE m_latest_command= "NdbEventBuffer::NdbEventBuffer"; + m_flush_gci = 0; #endif if ((p_cond = NdbCondition_Create()) == NULL) { @@ -1109,6 +1110,8 @@ NdbEventBuffer::flushIncompleteEvents(Uint64 gci) /** * Find min complete gci */ + // called by user thread, so we need to lock the data + lock(); Uint32 i; Uint32 sz= m_active_gci.size(); Gci_container* array = (Gci_container*)m_active_gci.getBase(); @@ -1127,6 +1130,10 @@ NdbEventBuffer::flushIncompleteEvents(Uint64 gci) bzero(tmp, sizeof(Gci_container)); } } +#ifdef VM_TRACE + m_flush_gci = gci; +#endif + unlock(); return 0; } @@ -1301,7 +1308,11 @@ operator<<(NdbOut& out, const Gci_container_pod& gci) static Gci_container* -find_bucket_chained(Vector * active, Uint64 gci) +find_bucket_chained(Vector * active, Uint64 gci +#ifdef VM_TRACE + ,Uint64 flush_gci +#endif + ) { Uint32 pos = (gci & ACTIVE_GCI_MASK); Gci_container *bucket= ((Gci_container*)active->getBase()) + pos; @@ -1322,6 +1333,13 @@ find_bucket_chained(Vector * active, Uint64 gci) bzero(bucket, sizeof(Gci_container)); bucket->m_gci = gci; bucket->m_gcp_complete_rep_count = ~(Uint32)0; +#ifdef VM_TRACE + if (gci < flush_gci) + { + ndbout_c("received old gci %llu < flush gci %llu", gci, flush_gci); + assert(false); + } +#endif return bucket; } move_pos += ACTIVE_GCI_DIRECTORY_SIZE; @@ -1336,7 +1354,16 @@ find_bucket_chained(Vector * active, Uint64 gci) bucket += ACTIVE_GCI_DIRECTORY_SIZE; if(bucket->m_gci == gci) + { +#ifdef VM_TRACE + if (gci < flush_gci) + { + ndbout_c("received old gci %llu < flush gci %llu", gci, flush_gci); + assert(false); + } +#endif return bucket; + } } while(pos < size); @@ -1346,14 +1373,22 @@ find_bucket_chained(Vector * active, Uint64 gci) inline Gci_container* -find_bucket(Vector * active, Uint64 gci) +find_bucket(Vector * active, Uint64 gci +#ifdef VM_TRACE + ,Uint64 flush_gci +#endif + ) { Uint32 pos = (gci & ACTIVE_GCI_MASK); Gci_container *bucket= ((Gci_container*)active->getBase()) + pos; if(likely(gci == bucket->m_gci)) return bucket; - return find_bucket_chained(active,gci); + return find_bucket_chained(active,gci +#ifdef VM_TRACE + , flush_gci +#endif + ); } static @@ -1386,7 +1421,11 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep) const Uint64 gci= rep->gci; const Uint32 cnt= rep->gcp_complete_rep_count; - Gci_container *bucket = find_bucket(&m_active_gci, gci); + Gci_container *bucket = find_bucket(&m_active_gci, gci +#ifdef VM_TRACE + , m_flush_gci +#endif + ); if (unlikely(bucket == 0)) { @@ -1752,7 +1791,11 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, if ( likely((Uint32)op->mi_type & (1 << (Uint32)sdata->operation)) ) { - Gci_container* bucket= find_bucket(&m_active_gci, gci); + Gci_container* bucket= find_bucket(&m_active_gci, gci +#ifdef VM_TRACE + , m_flush_gci +#endif + ); DBUG_PRINT_EVENT("info", ("data insertion in eventId %d", op->m_eventId)); DBUG_PRINT_EVENT("info", ("gci=%d tab=%d op=%d node=%d", diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp index adbef2fd125..561e79a137e 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.hpp @@ -463,6 +463,7 @@ public: #ifdef VM_TRACE const char *m_latest_command; + Uint64 m_flush_gci; #endif Ndb *m_ndb; From d951bc61ffdfd070bcbcd871b81c414c886c6d8a Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Wed, 5 Jul 2006 15:04:47 +0200 Subject: [PATCH 41/74] Bug #19645 Data Node hangs in phase 100: distribute API_FAIL_REQ so all nodes have the same view of what API's are connected --- storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp | 1 + .../ndb/src/kernel/blocks/qmgr/QmgrInit.cpp | 1 + .../ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 25 +++++++++++++++++++ 3 files changed, 27 insertions(+) diff --git a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp index 70c0fdfc988..de080237668 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp +++ b/storage/ndb/src/kernel/blocks/qmgr/Qmgr.hpp @@ -248,6 +248,7 @@ private: void execAPI_FAILCONF(Signal* signal); void execREAD_NODESREQ(Signal* signal); void execSET_VAR_REQ(Signal* signal); + void execAPI_FAILREQ(Signal* signal); void execREAD_NODESREF(Signal* signal); void execREAD_NODESCONF(Signal* signal); diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp index 6ee24561b0a..8ec5e681045 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrInit.cpp @@ -81,6 +81,7 @@ Qmgr::Qmgr(Block_context& ctx) addRecSignal(GSN_API_REGREQ, &Qmgr::execAPI_REGREQ); addRecSignal(GSN_API_VERSION_REQ, &Qmgr::execAPI_VERSION_REQ); addRecSignal(GSN_DISCONNECT_REP, &Qmgr::execDISCONNECT_REP); + addRecSignal(GSN_API_FAILREQ, &Qmgr::execAPI_FAILREQ); addRecSignal(GSN_API_FAILCONF, &Qmgr::execAPI_FAILCONF); addRecSignal(GSN_READ_NODESREQ, &Qmgr::execREAD_NODESREQ); addRecSignal(GSN_SET_VAR_REQ, &Qmgr::execSET_VAR_REQ); diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 1eac369ec65..e3cb45bb1e7 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2338,6 +2338,8 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo) ndbrequire(failedNodePtr.p->failState == NORMAL); failedNodePtr.p->failState = WAITING_FOR_FAILCONF1; + NodeReceiverGroup rg(QMGR, c_clusterNodes); + sendSignal(rg, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(DBTC_REF, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA); @@ -2361,6 +2363,29 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo) CloseComReqConf::SignalLength, JBA); }//Qmgr::sendApiFailReq() +void Qmgr::execAPI_FAILREQ(Signal* signal) +{ + jamEntry(); + NodeRecPtr failedNodePtr; + failedNodePtr.i = signal->theData[0]; + // signal->theData[1] == QMGR_REF + ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec); + + ndbout_c("Qmgr::execAPI_FAILREQ node %d", failedNodePtr.i); + + ndbrequire(getNodeInfo(failedNodePtr.i).getType() != NodeInfo::DB); + + // ignore if api not active + if (failedNodePtr.p->phase != ZAPI_ACTIVE) + return; + + signal->theData[0] = NDB_LE_Disconnected; + signal->theData[1] = failedNodePtr.i; + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 2, JBB); + + node_failed(signal, failedNodePtr.i); +} + void Qmgr::execAPI_FAILCONF(Signal* signal) { NodeRecPtr failedNodePtr; From 7c650824dc9e289f44e9a969435b1cbd39eeccc9 Mon Sep 17 00:00:00 2001 From: "mskold@mysql.com" <> Date: Wed, 5 Jul 2006 15:32:17 +0200 Subject: [PATCH 42/74] Bug #19645 Data Node hangs in phase 100: post review fix, removed test printout --- storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index e3cb45bb1e7..7b461f93719 100644 --- a/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/storage/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2370,8 +2370,6 @@ void Qmgr::execAPI_FAILREQ(Signal* signal) failedNodePtr.i = signal->theData[0]; // signal->theData[1] == QMGR_REF ptrCheckGuard(failedNodePtr, MAX_NODES, nodeRec); - - ndbout_c("Qmgr::execAPI_FAILREQ node %d", failedNodePtr.i); ndbrequire(getNodeInfo(failedNodePtr.i).getType() != NodeInfo::DB); From b21acbe089b72b2b1c482352f752c098e8298478 Mon Sep 17 00:00:00 2001 From: "ingo@mysql.com" <> Date: Wed, 5 Jul 2006 16:23:18 +0200 Subject: [PATCH 43/74] After merge fix --- mysql-test/r/gis.result | 19 ++++++++++--------- mysql-test/r/key.result | 4 ++-- mysql-test/t/gis.test | 12 +++++++----- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 4140e13da12..7a0f689df36 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -671,15 +671,6 @@ POINT(10 10) select (asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))); (asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))) POINT(10 10) -create table t1 (g GEOMETRY); -select * from t1; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def test t1 t1 g g 255 4294967295 0 Y 144 0 63 -g -select asbinary(g) from t1; -Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def asbinary(g) 252 8192 0 Y 128 0 63 -asbinary(g) create table t1 (s1 geometry not null,s2 char(100)); create trigger t1_bu before update on t1 for each row set new.s1 = null; insert into t1 values (null,null); @@ -703,3 +694,13 @@ alter table t1 add primary key pti(pt); ERROR 42000: BLOB/TEXT column 'pt' used in key specification without a key length alter table t1 add primary key pti(pt(20)); drop table t1; +create table t1 (g GEOMETRY); +select * from t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 t1 g g 255 4294967295 0 Y 144 0 63 +g +select asbinary(g) from t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def asbinary(g) 252 8192 0 Y 128 0 63 +asbinary(g) +drop table t1; diff --git a/mysql-test/r/key.result b/mysql-test/r/key.result index 6c05a3dde8b..a6f05143b3e 100644 --- a/mysql-test/r/key.result +++ b/mysql-test/r/key.result @@ -336,8 +336,8 @@ UNIQUE i1idx (i1), UNIQUE i2idx (i2)); desc t1; Field Type Null Key Default Extra -i1 int(11) UNI 0 -i2 int(11) UNI 0 +i1 int(11) NO UNI +i2 int(11) NO UNI drop table t1; create table t1 ( c1 int, diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index b25faddc1e8..4c6ff9b2fe7 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -377,11 +377,6 @@ select (asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))); # End of 4.1 tests ---enable_metadata -create table t1 (g GEOMETRY); -select * from t1; -select asbinary(g) from t1; ---disable_metadata # # Bug #12281 (Geometry: crash in trigger) # @@ -414,3 +409,10 @@ create table t1(pt GEOMETRY); alter table t1 add primary key pti(pt); alter table t1 add primary key pti(pt(20)); drop table t1; + +--enable_metadata +create table t1 (g GEOMETRY); +select * from t1; +select asbinary(g) from t1; +--disable_metadata +drop table t1; From 04ae97157693041e546a7baa1b8618b93b03dae5 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 16:26:08 +0200 Subject: [PATCH 44/74] Bug #18863 NDB node fails to restart, cluster stuck in state trying to restart it. - remove some event code to get rid of some bugs --- ndb/include/Makefile.am | 1 - ndb/include/kernel/GlobalSignalNumbers.h | 8 +- ndb/include/ndbapi/Ndb.hpp | 47 - ndb/include/ndbapi/NdbApi.hpp | 1 - ndb/include/ndbapi/NdbDictionary.hpp | 186 -- .../debugger/signaldata/SignalDataPrint.cpp | 6 - .../debugger/signaldata/SignalNames.cpp | 19 - ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 2211 ---------------- ndb/src/kernel/blocks/dbdict/Dbdict.hpp | 261 -- ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 9 - ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp | 3 - ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 1 - ndb/src/kernel/blocks/suma/Suma.cpp | 2245 +---------------- ndb/src/kernel/blocks/suma/Suma.hpp | 186 -- ndb/src/kernel/blocks/suma/SumaInit.cpp | 52 - ndb/src/ndbapi/Makefile.am | 2 - ndb/src/ndbapi/Ndb.cpp | 46 - ndb/src/ndbapi/NdbDictionary.cpp | 148 -- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 735 ------ ndb/src/ndbapi/NdbDictionaryImpl.hpp | 92 - ndb/src/ndbapi/Ndberr.cpp | 8 - ndb/src/ndbapi/Ndbif.cpp | 18 - ndb/src/ndbapi/Ndbinit.cpp | 16 - ndb/test/include/HugoTransactions.hpp | 3 - ndb/test/ndbapi/Makefile.am | 6 +- ndb/test/src/HugoTransactions.cpp | 279 -- 26 files changed, 9 insertions(+), 6580 deletions(-) diff --git a/ndb/include/Makefile.am b/ndb/include/Makefile.am index 240101c2004..842f4daabee 100644 --- a/ndb/include/Makefile.am +++ b/ndb/include/Makefile.am @@ -15,7 +15,6 @@ ndbapi/NdbApi.hpp \ ndbapi/NdbTransaction.hpp \ ndbapi/NdbDictionary.hpp \ ndbapi/NdbError.hpp \ -ndbapi/NdbEventOperation.hpp \ ndbapi/NdbIndexOperation.hpp \ ndbapi/NdbOperation.hpp \ ndbapi/ndb_cluster_connection.hpp \ diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h index fcb0a87020f..5a8de2b1e3e 100644 --- a/ndb/include/kernel/GlobalSignalNumbers.h +++ b/ndb/include/kernel/GlobalSignalNumbers.h @@ -735,9 +735,11 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_SUB_CREATE_REQ 576 #define GSN_SUB_CREATE_REF 577 #define GSN_SUB_CREATE_CONF 578 +/* #define GSN_SUB_START_REQ 579 #define GSN_SUB_START_REF 580 #define GSN_SUB_START_CONF 581 +*/ #define GSN_SUB_SYNC_REQ 582 #define GSN_SUB_SYNC_REF 583 #define GSN_SUB_SYNC_CONF 584 @@ -903,10 +905,11 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; /** * SUMA restart protocol */ +/* #define GSN_SUMA_START_ME 691 #define GSN_SUMA_HANDOVER_REQ 692 #define GSN_SUMA_HANDOVER_CONF 693 - +*/ /* not used 694 */ /* not used 695 */ /* not used 696 */ @@ -923,6 +926,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; /* * EVENT Signals */ +/* #define GSN_SUB_GCP_COMPLETE_ACC 699 #define GSN_CREATE_EVNT_REQ 700 @@ -932,7 +936,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_DROP_EVNT_REQ 703 #define GSN_DROP_EVNT_CONF 704 #define GSN_DROP_EVNT_REF 705 - +*/ #define GSN_TUX_BOUND_INFO 710 #define GSN_ACC_LOCKREQ 711 diff --git a/ndb/include/ndbapi/Ndb.hpp b/ndb/include/ndbapi/Ndb.hpp index f128a45f5bf..e7c1e85c02a 100644 --- a/ndb/include/ndbapi/Ndb.hpp +++ b/ndb/include/ndbapi/Ndb.hpp @@ -38,9 +38,6 @@ In addition, the NDB API defines a structure NdbError, which contains the specification for an error. - It is also possible to receive "events" triggered when data in the database in changed. - This is done through the NdbEventOperation class. - There are also some auxiliary classes, which are listed in the class hierarchy. The main structure of an application program is as follows: @@ -968,7 +965,6 @@ class NdbObjectIdMap; class NdbOperation; -class NdbEventOperationImpl; class NdbScanOperation; class NdbIndexScanOperation; class NdbIndexOperation; @@ -981,13 +977,11 @@ class NdbSubroutine; class NdbCall; class Table; class BaseString; -class NdbEventOperation; class NdbBlob; class NdbReceiver; class Ndb_local_table_info; template struct Ndb_free_list_t; -typedef void (* NdbEventCallback)(NdbEventOperation*, Ndb*, void*); #if defined NDB_OSE /** @@ -1049,7 +1043,6 @@ class Ndb #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL friend class NdbReceiver; friend class NdbOperation; - friend class NdbEventOperationImpl; friend class NdbTransaction; friend class Table; friend class NdbApiSignal; @@ -1193,46 +1186,6 @@ public: class NdbDictionary::Dictionary* getDictionary() const; - /** @} *********************************************************************/ - - /** - * @name Event subscriptions - * @{ - */ - - /** - * Create a subcription to an event defined in the database - * - * @param eventName - * unique identifier of the event - * @param bufferLength - * circular buffer size for storing event data - * - * @return Object representing an event, NULL on failure - */ - NdbEventOperation* createEventOperation(const char* eventName, - const int bufferLength); - /** - * Drop a subscription to an event - * - * @param eventOp - * Event operation - * - * @return 0 on success - */ - int dropEventOperation(NdbEventOperation* eventOp); - - /** - * Wait for an event to occur. Will return as soon as an event - * is detected on any of the created events. - * - * @param aMillisecondNumber - * maximum time to wait - * - * @return the number of events that has occured, -1 on failure - */ - int pollEvents(int aMillisecondNumber); - /** @} *********************************************************************/ /** diff --git a/ndb/include/ndbapi/NdbApi.hpp b/ndb/include/ndbapi/NdbApi.hpp index aed4d5efbd7..c8400ed78ce 100644 --- a/ndb/include/ndbapi/NdbApi.hpp +++ b/ndb/include/ndbapi/NdbApi.hpp @@ -29,7 +29,6 @@ #include "NdbScanFilter.hpp" #include "NdbRecAttr.hpp" #include "NdbDictionary.hpp" -#include "NdbEventOperation.hpp" #include "NdbPool.hpp" #include "NdbBlob.hpp" #endif diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index e67a0253096..db84c3715a5 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -937,165 +937,6 @@ public: Index(NdbIndexImpl&); }; - /** - * @brief Represents an Event in NDB Cluster - * - */ - class Event : public Object { - public: - /** - * Specifies the type of database operations an Event listens to - */ -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - /** TableEvent must match 1 << TriggerEvent */ -#endif - enum TableEvent { - TE_INSERT=1, ///< Insert event on table - TE_DELETE=2, ///< Delete event on table - TE_UPDATE=4, ///< Update event on table - TE_ALL=7 ///< Any/all event on table (not relevant when - ///< events are received) - }; - /** - * Specifies the durability of an event - * (future version may supply other types) - */ - enum EventDurability { - ED_UNDEFINED -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 0 -#endif -#if 0 // not supported - ,ED_SESSION = 1, - // Only this API can use it - // and it's deleted after api has disconnected or ndb has restarted - - ED_TEMPORARY = 2 - // All API's can use it, - // But's its removed when ndb is restarted -#endif - ,ED_PERMANENT ///< All API's can use it. - ///< It's still defined after a cluster system restart -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - = 3 -#endif - }; - - /** - * Constructor - * @param name Name of event - */ - Event(const char *name); - /** - * Constructor - * @param name Name of event - * @param table Reference retrieved from NdbDictionary - */ - Event(const char *name, const NdbDictionary::Table& table); - virtual ~Event(); - /** - * Set unique identifier for the event - */ - void setName(const char *name); - /** - * Get unique identifier for the event - */ - const char *getName() const; - /** - * Define table on which events should be detected - * - * @note calling this method will default to detection - * of events on all columns. Calling subsequent - * addEventColumn calls will override this. - * - * @param table reference retrieved from NdbDictionary - */ - void setTable(const NdbDictionary::Table& table); - /** - * Set table for which events should be detected - * - * @note preferred way is using setTable(const NdbDictionary::Table&) - * or constructor with table object parameter - */ - void setTable(const char *tableName); - /** - * Get table name for events - * - * @return table name - */ - const char* getTableName() const; - /** - * Add type of event that should be detected - */ - void addTableEvent(const TableEvent te); - /** - * Set durability of the event - */ - void setDurability(EventDurability); - /** - * Get durability of the event - */ - EventDurability getDurability() const; -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - void addColumn(const Column &c); -#endif - /** - * Add a column on which events should be detected - * - * @param attrId Column id - * - * @note errors will mot be detected until createEvent() is called - */ - void addEventColumn(unsigned attrId); - /** - * Add a column on which events should be detected - * - * @param columnName Column name - * - * @note errors will not be detected until createEvent() is called - */ - void addEventColumn(const char * columnName); - /** - * Add several columns on which events should be detected - * - * @param n Number of columns - * @param columnNames Column names - * - * @note errors will mot be detected until - * NdbDictionary::Dictionary::createEvent() is called - */ - void addEventColumns(int n, const char ** columnNames); - - /** - * Get no of columns defined in an Event - * - * @return Number of columns, -1 on error - */ - int getNoOfEventColumns() const; - - /** - * Get object status - */ - virtual Object::Status getObjectStatus() const; - - /** - * Get object version - */ - virtual int getObjectVersion() const; - -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - void print(); -#endif - - private: -#ifndef DOXYGEN_SHOULD_SKIP_INTERNAL - friend class NdbEventImpl; - friend class NdbEventOperationImpl; -#endif - class NdbEventImpl & m_impl; - Event(NdbEventImpl&); - }; - /** * @class Dictionary * @brief Dictionary for defining and retreiving meta data @@ -1214,33 +1055,6 @@ public: int listIndexes(List & list, const char * tableName); int listIndexes(List & list, const char * tableName) const; - /** @} *******************************************************************/ - /** - * @name Events - * @{ - */ - - /** - * Create event given defined Event instance - * @param event Event to create - * @return 0 if successful otherwise -1. - */ - int createEvent(const Event &event); - - /** - * Drop event with given name - * @param eventName Name of event to drop. - * @return 0 if successful otherwise -1. - */ - int dropEvent(const char * eventName); - - /** - * Get event with given name. - * @param eventName Name of event to get. - * @return an Event if successful, otherwise NULL. - */ - const Event * getEvent(const char * eventName); - /** @} *******************************************************************/ /** diff --git a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp index 34cae9f618f..572d8f6e3ca 100644 --- a/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp +++ b/ndb/src/common/debugger/signaldata/SignalDataPrint.cpp @@ -156,12 +156,6 @@ SignalDataPrintFunctions[] = { { GSN_SUB_REMOVE_REQ, printSUB_REMOVE_REQ }, { GSN_SUB_REMOVE_REF, printSUB_REMOVE_REF }, { GSN_SUB_REMOVE_CONF, printSUB_REMOVE_CONF }, - { GSN_SUB_START_REQ, printSUB_START_REQ }, - { GSN_SUB_START_REF, printSUB_START_REF }, - { GSN_SUB_START_CONF, printSUB_START_CONF }, - { GSN_SUB_STOP_REQ, printSUB_STOP_REQ }, - { GSN_SUB_STOP_REF, printSUB_STOP_REF }, - { GSN_SUB_STOP_CONF, printSUB_STOP_CONF }, { GSN_SUB_SYNC_REQ, printSUB_SYNC_REQ }, { GSN_SUB_SYNC_REF, printSUB_SYNC_REF }, { GSN_SUB_SYNC_CONF, printSUB_SYNC_CONF }, diff --git a/ndb/src/common/debugger/signaldata/SignalNames.cpp b/ndb/src/common/debugger/signaldata/SignalNames.cpp index 984d28819c0..1896a000dd7 100644 --- a/ndb/src/common/debugger/signaldata/SignalNames.cpp +++ b/ndb/src/common/debugger/signaldata/SignalNames.cpp @@ -502,18 +502,6 @@ const GsnName SignalNames [] = { //,{ GSN_TCINDEXNEXTCONF, "TCINDEXNEXTCONF" } //,{ GSN_TCINDEXNEXREF, "TCINDEXNEXREF" } - ,{ GSN_CREATE_EVNT_REQ, "CREATE_EVNT_REQ" } - ,{ GSN_CREATE_EVNT_CONF, "CREATE_EVNT_CONF" } - ,{ GSN_CREATE_EVNT_REF, "CREATE_EVNT_REF" } - - ,{ GSN_SUMA_START_ME, "SUMA_START_ME" } - ,{ GSN_SUMA_HANDOVER_REQ, "SUMA_HANDOVER_REQ"} - ,{ GSN_SUMA_HANDOVER_CONF, "SUMA_HANDOVER_CONF"} - - ,{ GSN_DROP_EVNT_REQ, "DROP_EVNT_REQ" } - ,{ GSN_DROP_EVNT_CONF, "DROP_EVNT_CONF" } - ,{ GSN_DROP_EVNT_REF, "DROP_EVNT_REF" } - ,{ GSN_BACKUP_TRIG_REQ, "BACKUP_TRIG_REQ" } ,{ GSN_BACKUP_REQ, "BACKUP_REQ" } ,{ GSN_BACKUP_DATA, "BACKUP_DATA" } @@ -581,12 +569,6 @@ const GsnName SignalNames [] = { ,{ GSN_SUB_REMOVE_REQ, "SUB_REMOVE_REQ" } ,{ GSN_SUB_REMOVE_REF, "SUB_REMOVE_REF" } ,{ GSN_SUB_REMOVE_CONF, "SUB_REMOVE_CONF" } - ,{ GSN_SUB_START_REQ, "SUB_START_REQ" } - ,{ GSN_SUB_START_REF, "SUB_START_REF" } - ,{ GSN_SUB_START_CONF, "SUB_START_CONF" } - ,{ GSN_SUB_STOP_REQ, "SUB_STOP_REQ" } - ,{ GSN_SUB_STOP_REF, "SUB_STOP_REF" } - ,{ GSN_SUB_STOP_CONF, "SUB_STOP_CONF" } ,{ GSN_SUB_SYNC_REQ, "SUB_SYNC_REQ" } ,{ GSN_SUB_SYNC_REF, "SUB_SYNC_REF" } ,{ GSN_SUB_SYNC_CONF, "SUB_SYNC_CONF" } @@ -596,7 +578,6 @@ const GsnName SignalNames [] = { ,{ GSN_SUB_SYNC_CONTINUE_REF, "SUB_SYNC_CONTINUE_REF" } ,{ GSN_SUB_SYNC_CONTINUE_CONF, "SUB_SYNC_CONTINUE_CONF" } ,{ GSN_SUB_GCP_COMPLETE_REP, "SUB_GCP_COMPLETE_REP" } - ,{ GSN_SUB_GCP_COMPLETE_ACC, "SUB_GCP_COMPLETE_ACC" } ,{ GSN_CREATE_SUBID_REQ, "CREATE_SUBID_REQ" } ,{ GSN_CREATE_SUBID_REF, "CREATE_SUBID_REF" } diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 1f7fd8e6fa5..55c5aafe4d1 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -1205,10 +1205,6 @@ Dbdict::Dbdict(const class Configuration & conf): c_opDropIndex(c_opRecordPool), c_opAlterIndex(c_opRecordPool), c_opBuildIndex(c_opRecordPool), - c_opCreateEvent(c_opRecordPool), - c_opSubEvent(c_opRecordPool), - c_opDropEvent(c_opRecordPool), - c_opSignalUtil(c_opRecordPool), c_opCreateTrigger(c_opRecordPool), c_opDropTrigger(c_opRecordPool), c_opAlterTrigger(c_opRecordPool), @@ -1266,44 +1262,6 @@ Dbdict::Dbdict(const class Configuration & conf): addRecSignal(GSN_BUILDINDXCONF, &Dbdict::execBUILDINDXCONF); addRecSignal(GSN_BUILDINDXREF, &Dbdict::execBUILDINDXREF); - // Util signals - addRecSignal(GSN_UTIL_PREPARE_CONF, &Dbdict::execUTIL_PREPARE_CONF); - addRecSignal(GSN_UTIL_PREPARE_REF, &Dbdict::execUTIL_PREPARE_REF); - - addRecSignal(GSN_UTIL_EXECUTE_CONF, &Dbdict::execUTIL_EXECUTE_CONF); - addRecSignal(GSN_UTIL_EXECUTE_REF, &Dbdict::execUTIL_EXECUTE_REF); - - addRecSignal(GSN_UTIL_RELEASE_CONF, &Dbdict::execUTIL_RELEASE_CONF); - addRecSignal(GSN_UTIL_RELEASE_REF, &Dbdict::execUTIL_RELEASE_REF); - - // Event signals - addRecSignal(GSN_CREATE_EVNT_REQ, &Dbdict::execCREATE_EVNT_REQ); - addRecSignal(GSN_CREATE_EVNT_CONF, &Dbdict::execCREATE_EVNT_CONF); - addRecSignal(GSN_CREATE_EVNT_REF, &Dbdict::execCREATE_EVNT_REF); - - addRecSignal(GSN_CREATE_SUBID_CONF, &Dbdict::execCREATE_SUBID_CONF); - addRecSignal(GSN_CREATE_SUBID_REF, &Dbdict::execCREATE_SUBID_REF); - - addRecSignal(GSN_SUB_CREATE_CONF, &Dbdict::execSUB_CREATE_CONF); - addRecSignal(GSN_SUB_CREATE_REF, &Dbdict::execSUB_CREATE_REF); - - addRecSignal(GSN_SUB_START_REQ, &Dbdict::execSUB_START_REQ); - addRecSignal(GSN_SUB_START_CONF, &Dbdict::execSUB_START_CONF); - addRecSignal(GSN_SUB_START_REF, &Dbdict::execSUB_START_REF); - - addRecSignal(GSN_SUB_STOP_REQ, &Dbdict::execSUB_STOP_REQ); - addRecSignal(GSN_SUB_STOP_CONF, &Dbdict::execSUB_STOP_CONF); - addRecSignal(GSN_SUB_STOP_REF, &Dbdict::execSUB_STOP_REF); - - addRecSignal(GSN_SUB_SYNC_CONF, &Dbdict::execSUB_SYNC_CONF); - addRecSignal(GSN_SUB_SYNC_REF, &Dbdict::execSUB_SYNC_REF); - - addRecSignal(GSN_DROP_EVNT_REQ, &Dbdict::execDROP_EVNT_REQ); - - addRecSignal(GSN_SUB_REMOVE_REQ, &Dbdict::execSUB_REMOVE_REQ); - addRecSignal(GSN_SUB_REMOVE_CONF, &Dbdict::execSUB_REMOVE_CONF); - addRecSignal(GSN_SUB_REMOVE_REF, &Dbdict::execSUB_REMOVE_REF); - // Trigger signals addRecSignal(GSN_CREATE_TRIG_REQ, &Dbdict::execCREATE_TRIG_REQ); addRecSignal(GSN_CREATE_TRIG_CONF, &Dbdict::execCREATE_TRIG_CONF); @@ -1762,10 +1720,6 @@ void Dbdict::execREAD_CONFIG_REQ(Signal* signal) c_opCreateTable.setSize(8); c_opDropTable.setSize(8); c_opCreateIndex.setSize(8); - c_opCreateEvent.setSize(8); - c_opSubEvent.setSize(8); - c_opDropEvent.setSize(8); - c_opSignalUtil.setSize(8); c_opDropIndex.setSize(8); c_opAlterIndex.setSize(8); c_opBuildIndex.setSize(8); @@ -7406,2171 +7360,6 @@ Dbdict::dropIndex_sendReply(Signal* signal, OpDropIndexPtr opPtr, sendSignal(rep->getUserRef(), gsn, signal, length, JBB); } -/***************************************************** - * - * Util signalling - * - *****************************************************/ - -int -Dbdict::sendSignalUtilReq(Callback *pcallback, - BlockReference ref, - GlobalSignalNumber gsn, - Signal* signal, - Uint32 length, - JobBufferLevel jbuf, - LinearSectionPtr ptr[3], - Uint32 noOfSections) -{ - jam(); - EVENT_TRACE; - OpSignalUtilPtr utilRecPtr; - - // Seize a Util Send record - if (!c_opSignalUtil.seize(utilRecPtr)) { - // Failed to allocate util record - return -1; - } - utilRecPtr.p->m_callback = *pcallback; - - // should work for all util signal classes - UtilPrepareReq *req = (UtilPrepareReq*)signal->getDataPtrSend(); - utilRecPtr.p->m_userData = req->getSenderData(); - req->setSenderData(utilRecPtr.i); - - if (ptr) { - jam(); - sendSignal(ref, gsn, signal, length, jbuf, ptr, noOfSections); - } else { - jam(); - sendSignal(ref, gsn, signal, length, jbuf); - } - - return 0; -} - -int -Dbdict::recvSignalUtilReq(Signal* signal, Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - UtilPrepareConf * const req = (UtilPrepareConf*)signal->getDataPtr(); - OpSignalUtilPtr utilRecPtr; - utilRecPtr.i = req->getSenderData(); - if ((utilRecPtr.p = c_opSignalUtil.getPtr(utilRecPtr.i)) == NULL) { - jam(); - return -1; - } - - req->setSenderData(utilRecPtr.p->m_userData); - Callback c = utilRecPtr.p->m_callback; - c_opSignalUtil.release(utilRecPtr); - - execute(signal, c, returnCode); - return 0; -} - -void Dbdict::execUTIL_PREPARE_CONF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - ndbrequire(recvSignalUtilReq(signal, 0) == 0); -} - -void -Dbdict::execUTIL_PREPARE_REF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - ndbrequire(recvSignalUtilReq(signal, 1) == 0); -} - -void Dbdict::execUTIL_EXECUTE_CONF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - ndbrequire(recvSignalUtilReq(signal, 0) == 0); -} - -void Dbdict::execUTIL_EXECUTE_REF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - -#ifdef EVENT_DEBUG - UtilExecuteRef * ref = (UtilExecuteRef *)signal->getDataPtrSend(); - - ndbout_c("execUTIL_EXECUTE_REF"); - ndbout_c("senderData %u",ref->getSenderData()); - ndbout_c("errorCode %u",ref->getErrorCode()); - ndbout_c("TCErrorCode %u",ref->getTCErrorCode()); -#endif - - ndbrequire(recvSignalUtilReq(signal, 1) == 0); -} -void Dbdict::execUTIL_RELEASE_CONF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - ndbrequire(false); - ndbrequire(recvSignalUtilReq(signal, 0) == 0); -} -void Dbdict::execUTIL_RELEASE_REF(Signal *signal) -{ - jamEntry(); - EVENT_TRACE; - ndbrequire(false); - ndbrequire(recvSignalUtilReq(signal, 1) == 0); -} - -/** - * MODULE: Create event - * - * Create event in DICT. - * - * - * Request type in CREATE_EVNT signals: - * - * Signalflow see Dbdict.txt - * - */ - -/***************************************************************** - * - * Systable stuff - * - */ - -const Uint32 Dbdict::sysTab_NDBEVENTS_0_szs[EVENT_SYSTEM_TABLE_LENGTH] = { - sizeof(((sysTab_NDBEVENTS_0*)0)->NAME), - sizeof(((sysTab_NDBEVENTS_0*)0)->EVENT_TYPE), - sizeof(((sysTab_NDBEVENTS_0*)0)->TABLE_NAME), - sizeof(((sysTab_NDBEVENTS_0*)0)->ATTRIBUTE_MASK), - sizeof(((sysTab_NDBEVENTS_0*)0)->SUBID), - sizeof(((sysTab_NDBEVENTS_0*)0)->SUBKEY) -}; - -void -Dbdict::prepareTransactionEventSysTable (Callback *pcallback, - Signal* signal, - Uint32 senderData, - UtilPrepareReq::OperationTypeValue prepReq) -{ - // find table id for event system table - TableRecord keyRecord; - strcpy(keyRecord.tableName, EVENT_SYSTEM_TABLE_NAME); - - TableRecordPtr tablePtr; - c_tableRecordHash.find(tablePtr, keyRecord); - - ndbrequire(tablePtr.i != RNIL); // system table must exist - - Uint32 tableId = tablePtr.p->tableId; /* System table */ - Uint32 noAttr = tablePtr.p->noOfAttributes; - ndbrequire(noAttr == EVENT_SYSTEM_TABLE_LENGTH); - - switch (prepReq) { - case UtilPrepareReq::Update: - case UtilPrepareReq::Insert: - case UtilPrepareReq::Write: - case UtilPrepareReq::Read: - jam(); - break; - case UtilPrepareReq::Delete: - jam(); - noAttr = 1; // only involves Primary key which should be the first - break; - } - prepareUtilTransaction(pcallback, signal, senderData, tableId, NULL, - prepReq, noAttr, NULL, NULL); -} - -void -Dbdict::prepareUtilTransaction(Callback *pcallback, - Signal* signal, - Uint32 senderData, - Uint32 tableId, - const char* tableName, - UtilPrepareReq::OperationTypeValue prepReq, - Uint32 noAttr, - Uint32 attrIds[], - const char *attrNames[]) -{ - jam(); - EVENT_TRACE; - - UtilPrepareReq * utilPrepareReq = - (UtilPrepareReq *)signal->getDataPtrSend(); - - utilPrepareReq->setSenderRef(reference()); - utilPrepareReq->setSenderData(senderData); - - const Uint32 pageSizeInWords = 128; - Uint32 propPage[pageSizeInWords]; - LinearWriter w(&propPage[0],128); - w.first(); - w.add(UtilPrepareReq::NoOfOperations, 1); - w.add(UtilPrepareReq::OperationType, prepReq); - if (tableName) { - jam(); - w.add(UtilPrepareReq::TableName, tableName); - } else { - jam(); - w.add(UtilPrepareReq::TableId, tableId); - } - for(Uint32 i = 0; i < noAttr; i++) - if (tableName) { - jam(); - w.add(UtilPrepareReq::AttributeName, attrNames[i]); - } else { - if (attrIds) { - jam(); - w.add(UtilPrepareReq::AttributeId, attrIds[i]); - } else { - jam(); - w.add(UtilPrepareReq::AttributeId, i); - } - } -#ifdef EVENT_DEBUG - // Debugging - SimplePropertiesLinearReader reader(propPage, w.getWordsUsed()); - printf("Dict::prepareInsertTransactions: Sent SimpleProperties:\n"); - reader.printAll(ndbout); -#endif - - struct LinearSectionPtr sectionsPtr[UtilPrepareReq::NoOfSections]; - sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].p = propPage; - sectionsPtr[UtilPrepareReq::PROPERTIES_SECTION].sz = w.getWordsUsed(); - - sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_PREPARE_REQ, signal, - UtilPrepareReq::SignalLength, JBB, - sectionsPtr, UtilPrepareReq::NoOfSections); -} - -/***************************************************************** - * - * CREATE_EVNT_REQ has three types RT_CREATE, RT_GET (from user) - * and RT_DICT_AFTER_GET send from master DICT to slaves - * - * This function just dscpaches these to - * - * createEvent_RT_USER_CREATE - * createEvent_RT_USER_GET - * createEvent_RT_DICT_AFTER_GET - * - * repectively - * - */ - -void -Dbdict::execCREATE_EVNT_REQ(Signal* signal) -{ - jamEntry(); - -#if 0 - { - SafeCounterHandle handle; - { - SafeCounter tmp(c_counterMgr, handle); - tmp.init(CMVMI, GSN_DUMP_STATE_ORD, /* senderData */ 13); - tmp.clearWaitingFor(); - tmp.setWaitingFor(3); - ndbrequire(!tmp.done()); - ndbout_c("Allocted"); - } - ndbrequire(!handle.done()); - { - SafeCounter tmp(c_counterMgr, handle); - tmp.clearWaitingFor(3); - ndbrequire(tmp.done()); - ndbout_c("Deallocted"); - } - ndbrequire(handle.done()); - } - { - NodeBitmask nodes; - nodes.clear(); - - nodes.set(2); - nodes.set(3); - nodes.set(4); - nodes.set(5); - - { - Uint32 i = 0; - while((i = nodes.find(i)) != NodeBitmask::NotFound){ - ndbout_c("1 Node id = %u", i); - i++; - } - } - - NodeReceiverGroup rg(DBDICT, nodes); - RequestTracker rt2; - ndbrequire(rt2.done()); - ndbrequire(!rt2.hasRef()); - ndbrequire(!rt2.hasConf()); - rt2.init(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 13); - - RequestTracker rt3; - rt3.init(c_counterMgr, rg, GSN_CREATE_EVNT_REF, 13); - - ndbrequire(!rt2.done()); - ndbrequire(!rt3.done()); - - rt2.reportRef(c_counterMgr, 2); - rt3.reportConf(c_counterMgr, 2); - - ndbrequire(!rt2.done()); - ndbrequire(!rt3.done()); - - rt2.reportConf(c_counterMgr, 3); - rt3.reportConf(c_counterMgr, 3); - - ndbrequire(!rt2.done()); - ndbrequire(!rt3.done()); - - rt2.reportConf(c_counterMgr, 4); - rt3.reportConf(c_counterMgr, 4); - - ndbrequire(!rt2.done()); - ndbrequire(!rt3.done()); - - rt2.reportConf(c_counterMgr, 5); - rt3.reportConf(c_counterMgr, 5); - - ndbrequire(rt2.done()); - ndbrequire(rt3.done()); - } -#endif - - if (! assembleFragments(signal)) { - jam(); - return; - } - - CreateEvntReq *req = (CreateEvntReq*)signal->getDataPtr(); - const CreateEvntReq::RequestType requestType = req->getRequestType(); - const Uint32 requestFlag = req->getRequestFlag(); - - OpCreateEventPtr evntRecPtr; - // Seize a Create Event record - if (!c_opCreateEvent.seize(evntRecPtr)) { - // Failed to allocate event record - jam(); - releaseSections(signal); - - CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend(); - ret->senderRef = reference(); - ret->setErrorCode(CreateEvntRef::SeizeError); - ret->setErrorLine(__LINE__); - ret->setErrorNode(reference()); - sendSignal(signal->senderBlockRef(), GSN_CREATE_EVNT_REF, signal, - CreateEvntRef::SignalLength, JBB); - return; - } - -#ifdef EVENT_DEBUG - ndbout_c("DBDICT::execCREATE_EVNT_REQ from %u evntRecId = (%d)", refToNode(signal->getSendersBlockRef()), evntRecPtr.i); -#endif - - ndbrequire(req->getUserRef() == signal->getSendersBlockRef()); - - evntRecPtr.p->init(req,this); - - if (requestFlag & (Uint32)CreateEvntReq::RT_DICT_AFTER_GET) { - jam(); - EVENT_TRACE; - createEvent_RT_DICT_AFTER_GET(signal, evntRecPtr); - return; - } - if (requestType == CreateEvntReq::RT_USER_GET) { - jam(); - EVENT_TRACE; - createEvent_RT_USER_GET(signal, evntRecPtr); - return; - } - if (requestType == CreateEvntReq::RT_USER_CREATE) { - jam(); - EVENT_TRACE; - createEvent_RT_USER_CREATE(signal, evntRecPtr); - return; - } - -#ifdef EVENT_DEBUG - ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ other" << endl; -#endif - jam(); - releaseSections(signal); - - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); -} - -/******************************************************************** - * - * Event creation - * - *****************************************************************/ - -void -Dbdict::createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr){ - jam(); - evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef()); - -#ifdef EVENT_DEBUG - ndbout << "Dbdict.cpp: Dbdict::execCREATE_EVNT_REQ RT_USER" << endl; - char buf[128] = {0}; - AttributeMask mask = evntRecPtr.p->m_request.getAttrListBitmask(); - mask.getText(buf); - ndbout_c("mask = %s", buf); -#endif - - // Interpret the long signal - - SegmentedSectionPtr ssPtr; - // save name and event properties - signal->getSection(ssPtr, CreateEvntReq::EVENT_NAME_SECTION); - - SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool()); -#ifdef EVENT_DEBUG - r0.printAll(ndbout); -#endif - // event name - if ((!r0.first()) || - (r0.getValueType() != SimpleProperties::StringValue) || - (r0.getValueLen() <= 0)) { - jam(); - releaseSections(signal); - - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - return; - } - r0.getString(evntRecPtr.p->m_eventRec.NAME); - { - int len = strlen(evntRecPtr.p->m_eventRec.NAME); - memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len); -#ifdef EVENT_DEBUG - printf("CreateEvntReq::RT_USER_CREATE; EventName %s, len %u\n", - evntRecPtr.p->m_eventRec.NAME, len); - for(int i = 0; i < MAX_TAB_NAME_SIZE/4; i++) - printf("H'%.8x ", ((Uint32*)evntRecPtr.p->m_eventRec.NAME)[i]); - printf("\n"); -#endif - } - // table name - if ((!r0.next()) || - (r0.getValueType() != SimpleProperties::StringValue) || - (r0.getValueLen() <= 0)) { - jam(); - releaseSections(signal); - - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - return; - } - r0.getString(evntRecPtr.p->m_eventRec.TABLE_NAME); - { - int len = strlen(evntRecPtr.p->m_eventRec.TABLE_NAME); - memset(evntRecPtr.p->m_eventRec.TABLE_NAME+len, 0, MAX_TAB_NAME_SIZE-len); - } - -#ifdef EVENT_DEBUG - ndbout_c("event name: %s",evntRecPtr.p->m_eventRec.NAME); - ndbout_c("table name: %s",evntRecPtr.p->m_eventRec.TABLE_NAME); -#endif - - releaseSections(signal); - - // Send request to SUMA - - CreateSubscriptionIdReq * sumaIdReq = - (CreateSubscriptionIdReq *)signal->getDataPtrSend(); - - // make sure we save the original sender for later - sumaIdReq->senderData = evntRecPtr.i; -#ifdef EVENT_DEBUG - ndbout << "sumaIdReq->senderData = " << sumaIdReq->senderData << endl; -#endif - sendSignal(SUMA_REF, GSN_CREATE_SUBID_REQ, signal, - CreateSubscriptionIdReq::SignalLength, JBB); - // we should now return in either execCREATE_SUBID_CONF - // or execCREATE_SUBID_REF -} - -void Dbdict::execCREATE_SUBID_REF(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - CreateSubscriptionIdRef * const ref = - (CreateSubscriptionIdRef *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - - evntRecPtr.i = ref->senderData; - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); -} - -void Dbdict::execCREATE_SUBID_CONF(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - - CreateSubscriptionIdConf const * sumaIdConf = - (CreateSubscriptionIdConf *)signal->getDataPtr(); - - Uint32 evntRecId = sumaIdConf->senderData; - OpCreateEvent *evntRec; - - ndbrequire((evntRec = c_opCreateEvent.getPtr(evntRecId)) != NULL); - - evntRec->m_request.setEventId(sumaIdConf->subscriptionId); - evntRec->m_request.setEventKey(sumaIdConf->subscriptionKey); - - releaseSections(signal); - - Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 }; - - prepareTransactionEventSysTable(&c, signal, evntRecId, - UtilPrepareReq::Insert); -} - -void -Dbdict::createEventComplete_RT_USER_CREATE(Signal* signal, - OpCreateEventPtr evntRecPtr){ - jam(); - createEvent_sendReply(signal, evntRecPtr); -} - -/********************************************************************* - * - * UTIL_PREPARE, UTIL_EXECUTE - * - * insert or read systable NDB$EVENTS_0 - */ - -void interpretUtilPrepareErrorCode(UtilPrepareRef::ErrorCode errorCode, - bool& temporary, Uint32& line) -{ - switch (errorCode) { - case UtilPrepareRef::NO_ERROR: - jam(); - line = __LINE__; - EVENT_TRACE; - break; - case UtilPrepareRef::PREPARE_SEIZE_ERROR: - jam(); - temporary = true; - line = __LINE__; - EVENT_TRACE; - break; - case UtilPrepareRef::PREPARE_PAGES_SEIZE_ERROR: - jam(); - line = __LINE__; - EVENT_TRACE; - break; - case UtilPrepareRef::PREPARED_OPERATION_SEIZE_ERROR: - jam(); - line = __LINE__; - EVENT_TRACE; - break; - case UtilPrepareRef::DICT_TAB_INFO_ERROR: - jam(); - line = __LINE__; - EVENT_TRACE; - break; - case UtilPrepareRef::MISSING_PROPERTIES_SECTION: - jam(); - line = __LINE__; - EVENT_TRACE; - break; - default: - jam(); - line = __LINE__; - EVENT_TRACE; - break; - } -} - -void -Dbdict::createEventUTIL_PREPARE(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode == 0) { - UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - jam(); - evntRecPtr.i = req->getSenderData(); - const Uint32 prepareId = req->getPrepareId(); - - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - - Callback c = { safe_cast(&Dbdict::createEventUTIL_EXECUTE), 0 }; - - switch (evntRecPtr.p->m_requestType) { - case CreateEvntReq::RT_USER_GET: -#ifdef EVENT_DEBUG - printf("get type = %d\n", CreateEvntReq::RT_USER_GET); -#endif - jam(); - executeTransEventSysTable(&c, signal, - evntRecPtr.i, evntRecPtr.p->m_eventRec, - prepareId, UtilPrepareReq::Read); - break; - case CreateEvntReq::RT_USER_CREATE: -#ifdef EVENT_DEBUG - printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE); -#endif - { - evntRecPtr.p->m_eventRec.EVENT_TYPE = evntRecPtr.p->m_request.getEventType(); - AttributeMask m = evntRecPtr.p->m_request.getAttrListBitmask(); - memcpy(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK, &m, - sizeof(evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK)); - evntRecPtr.p->m_eventRec.SUBID = evntRecPtr.p->m_request.getEventId(); - evntRecPtr.p->m_eventRec.SUBKEY = evntRecPtr.p->m_request.getEventKey(); - } - jam(); - executeTransEventSysTable(&c, signal, - evntRecPtr.i, evntRecPtr.p->m_eventRec, - prepareId, UtilPrepareReq::Insert); - break; - default: -#ifdef EVENT_DEBUG - printf("type = %d\n", evntRecPtr.p->m_requestType); - printf("bet type = %d\n", CreateEvntReq::RT_USER_GET); - printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE); -#endif - ndbrequire(false); - } - } else { // returnCode != 0 - UtilPrepareRef* const ref = (UtilPrepareRef*)signal->getDataPtr(); - - const UtilPrepareRef::ErrorCode errorCode = - (UtilPrepareRef::ErrorCode)ref->getErrorCode(); - - OpCreateEventPtr evntRecPtr; - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - - bool temporary = false; - interpretUtilPrepareErrorCode(errorCode, - temporary, evntRecPtr.p->m_errorLine); - if (temporary) { - evntRecPtr.p->m_errorCode = - CreateEvntRef::makeTemporary(CreateEvntRef::Undefined); - } - - if (evntRecPtr.p->m_errorCode == 0) { - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - } - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - } -} - -void Dbdict::executeTransEventSysTable(Callback *pcallback, Signal *signal, - const Uint32 ptrI, - sysTab_NDBEVENTS_0& m_eventRec, - const Uint32 prepareId, - UtilPrepareReq::OperationTypeValue prepReq) -{ - jam(); - const Uint32 noAttr = EVENT_SYSTEM_TABLE_LENGTH; - Uint32 total_len = 0; - - Uint32* attrHdr = signal->theData + 25; - Uint32* attrPtr = attrHdr; - - Uint32 id=0; - // attribute 0 event name: Primary Key - { - AttributeHeader::init(attrPtr, id, sysTab_NDBEVENTS_0_szs[id]/4); - total_len += sysTab_NDBEVENTS_0_szs[id]; - attrPtr++; id++; - } - - switch (prepReq) { - case UtilPrepareReq::Read: - jam(); - EVENT_TRACE; - // no more - while ( id < noAttr ) - AttributeHeader::init(attrPtr++, id++, 0); - ndbrequire(id == (Uint32) noAttr); - break; - case UtilPrepareReq::Insert: - jam(); - EVENT_TRACE; - while ( id < noAttr ) { - AttributeHeader::init(attrPtr, id, sysTab_NDBEVENTS_0_szs[id]/4); - total_len += sysTab_NDBEVENTS_0_szs[id]; - attrPtr++; id++; - } - ndbrequire(id == (Uint32) noAttr); - break; - case UtilPrepareReq::Delete: - ndbrequire(id == 1); - break; - default: - ndbrequire(false); - } - - LinearSectionPtr headerPtr; - LinearSectionPtr dataPtr; - - headerPtr.p = attrHdr; - headerPtr.sz = noAttr; - - dataPtr.p = (Uint32*)&m_eventRec; - dataPtr.sz = total_len/4; - - ndbrequire((total_len == sysTab_NDBEVENTS_0_szs[0]) || - (total_len == sizeof(sysTab_NDBEVENTS_0))); - -#if 0 - printf("Header size %u\n", headerPtr.sz); - for(int i = 0; i < (int)headerPtr.sz; i++) - printf("H'%.8x ", attrHdr[i]); - printf("\n"); - - printf("Data size %u\n", dataPtr.sz); - for(int i = 0; i < (int)dataPtr.sz; i++) - printf("H'%.8x ", dataPage[i]); - printf("\n"); -#endif - - executeTransaction(pcallback, signal, - ptrI, - prepareId, - id, - headerPtr, - dataPtr); -} - -void Dbdict::executeTransaction(Callback *pcallback, - Signal* signal, - Uint32 senderData, - Uint32 prepareId, - Uint32 noAttr, - LinearSectionPtr headerPtr, - LinearSectionPtr dataPtr) -{ - jam(); - EVENT_TRACE; - - UtilExecuteReq * utilExecuteReq = - (UtilExecuteReq *)signal->getDataPtrSend(); - - utilExecuteReq->setSenderRef(reference()); - utilExecuteReq->setSenderData(senderData); - utilExecuteReq->setPrepareId(prepareId); - utilExecuteReq->setReleaseFlag(); // must be done after setting prepareId - -#if 0 - printf("Header size %u\n", headerPtr.sz); - for(int i = 0; i < (int)headerPtr.sz; i++) - printf("H'%.8x ", headerBuffer[i]); - printf("\n"); - - printf("Data size %u\n", dataPtr.sz); - for(int i = 0; i < (int)dataPtr.sz; i++) - printf("H'%.8x ", dataBuffer[i]); - printf("\n"); -#endif - - struct LinearSectionPtr sectionsPtr[UtilExecuteReq::NoOfSections]; - sectionsPtr[UtilExecuteReq::HEADER_SECTION].p = headerPtr.p; - sectionsPtr[UtilExecuteReq::HEADER_SECTION].sz = noAttr; - sectionsPtr[UtilExecuteReq::DATA_SECTION].p = dataPtr.p; - sectionsPtr[UtilExecuteReq::DATA_SECTION].sz = dataPtr.sz; - - sendSignalUtilReq(pcallback, DBUTIL_REF, GSN_UTIL_EXECUTE_REQ, signal, - UtilExecuteReq::SignalLength, JBB, - sectionsPtr, UtilExecuteReq::NoOfSections); -} - -void Dbdict::parseReadEventSys(Signal* signal, sysTab_NDBEVENTS_0& m_eventRec) -{ - SegmentedSectionPtr headerPtr, dataPtr; - jam(); - signal->getSection(headerPtr, UtilExecuteReq::HEADER_SECTION); - SectionReader headerReader(headerPtr, getSectionSegmentPool()); - - signal->getSection(dataPtr, UtilExecuteReq::DATA_SECTION); - SectionReader dataReader(dataPtr, getSectionSegmentPool()); - - AttributeHeader header; - Uint32 *dst = (Uint32*)&m_eventRec; - - for (int i = 0; i < EVENT_SYSTEM_TABLE_LENGTH; i++) { - headerReader.getWord((Uint32 *)&header); - int sz = header.getDataSize(); - for (int i=0; i < sz; i++) - dataReader.getWord(dst++); - } - - ndbrequire( ((char*)dst-(char*)&m_eventRec) == sizeof(m_eventRec) ); - - releaseSections(signal); -} - -void Dbdict::createEventUTIL_EXECUTE(Signal *signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode == 0) { - // Entry into system table all set - UtilExecuteConf* const conf = (UtilExecuteConf*)signal->getDataPtr(); - jam(); - OpCreateEventPtr evntRecPtr; - evntRecPtr.i = conf->getSenderData(); - - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - OpCreateEvent *evntRec = evntRecPtr.p; - - switch (evntRec->m_requestType) { - case CreateEvntReq::RT_USER_GET: { -#ifdef EVENT_DEBUG - printf("get type = %d\n", CreateEvntReq::RT_USER_GET); -#endif - parseReadEventSys(signal, evntRecPtr.p->m_eventRec); - - evntRec->m_request.setEventType(evntRecPtr.p->m_eventRec.EVENT_TYPE); - evntRec->m_request.setAttrListBitmask(*(AttributeMask*)evntRecPtr.p->m_eventRec.ATTRIBUTE_MASK); - evntRec->m_request.setEventId(evntRecPtr.p->m_eventRec.SUBID); - evntRec->m_request.setEventKey(evntRecPtr.p->m_eventRec.SUBKEY); - -#ifdef EVENT_DEBUG - printf("EventName: %s\n", evntRec->m_eventRec.NAME); - printf("TableName: %s\n", evntRec->m_eventRec.TABLE_NAME); -#endif - - // find table id for event table - TableRecord keyRecord; - strcpy(keyRecord.tableName, evntRecPtr.p->m_eventRec.TABLE_NAME); - - TableRecordPtr tablePtr; - c_tableRecordHash.find(tablePtr, keyRecord); - - if (tablePtr.i == RNIL) { - jam(); - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - return; - } - - evntRec->m_request.setTableId(tablePtr.p->tableId); - - createEventComplete_RT_USER_GET(signal, evntRecPtr); - return; - } - case CreateEvntReq::RT_USER_CREATE: { -#ifdef EVENT_DEBUG - printf("create type = %d\n", CreateEvntReq::RT_USER_CREATE); -#endif - jam(); - createEventComplete_RT_USER_CREATE(signal, evntRecPtr); - return; - } - break; - default: - ndbrequire(false); - } - } else { // returnCode != 0 - UtilExecuteRef * const ref = (UtilExecuteRef *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - jam(); - evntRecPtr.p->m_errorNode = reference(); - evntRecPtr.p->m_errorLine = __LINE__; - - switch (ref->getErrorCode()) { - case UtilExecuteRef::TCError: - switch (ref->getTCErrorCode()) { - case ZNOT_FOUND: - jam(); - evntRecPtr.p->m_errorCode = CreateEvntRef::EventNotFound; - break; - case ZALREADYEXIST: - jam(); - evntRecPtr.p->m_errorCode = CreateEvntRef::EventNameExists; - break; - default: - jam(); - evntRecPtr.p->m_errorCode = CreateEvntRef::UndefinedTCError; - break; - } - break; - default: - jam(); - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - break; - } - - createEvent_sendReply(signal, evntRecPtr); - } -} - -/*********************************************************************** - * - * NdbEventOperation, reading systable, creating event in suma - * - */ - -void -Dbdict::createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){ - jam(); - EVENT_TRACE; -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_REQ::RT_USER_GET evntRecPtr.i = (%d), ref = %u", evntRecPtr.i, evntRecPtr.p->m_request.getUserRef()); -#endif - - SegmentedSectionPtr ssPtr; - - signal->getSection(ssPtr, 0); - - SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool()); -#ifdef EVENT_DEBUG - r0.printAll(ndbout); -#endif - if ((!r0.first()) || - (r0.getValueType() != SimpleProperties::StringValue) || - (r0.getValueLen() <= 0)) { - jam(); - releaseSections(signal); - - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); - return; - } - - r0.getString(evntRecPtr.p->m_eventRec.NAME); - int len = strlen(evntRecPtr.p->m_eventRec.NAME); - memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len); - - releaseSections(signal); - - Callback c = { safe_cast(&Dbdict::createEventUTIL_PREPARE), 0 }; - - prepareTransactionEventSysTable(&c, signal, evntRecPtr.i, - UtilPrepareReq::Read); - /* - * Will read systable and fill an OpCreateEventPtr - * and return below - */ -} - -void -Dbdict::createEventComplete_RT_USER_GET(Signal* signal, - OpCreateEventPtr evntRecPtr){ - jam(); - - // Send to oneself and the other DICT's - CreateEvntReq * req = (CreateEvntReq *)signal->getDataPtrSend(); - - *req = evntRecPtr.p->m_request; - req->senderRef = reference(); - req->senderData = evntRecPtr.i; - - req->addRequestFlag(CreateEvntReq::RT_DICT_AFTER_GET); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Coordinator) sending GSN_CREATE_EVNT_REQ::RT_DICT_AFTER_GET to DBDICT participants evntRecPtr.i = (%d)", evntRecPtr.i); -#endif - - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - RequestTracker & p = evntRecPtr.p->m_reqTracker; - p.init(c_counterMgr, rg, GSN_CREATE_EVNT_REF, evntRecPtr.i); - - sendSignal(rg, GSN_CREATE_EVNT_REQ, signal, CreateEvntReq::SignalLength, JBB); -} - -void -Dbdict::createEvent_nodeFailCallback(Signal* signal, Uint32 eventRecPtrI, - Uint32 returnCode){ - OpCreateEventPtr evntRecPtr; - c_opCreateEvent.getPtr(evntRecPtr, eventRecPtrI); - createEvent_sendReply(signal, evntRecPtr); -} - -void Dbdict::execCREATE_EVNT_REF(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - CreateEvntRef * const ref = (CreateEvntRef *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - - evntRecPtr.i = ref->getUserData(); - - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_REF evntRecPtr.i = (%d)", evntRecPtr.i); -#endif - - if (ref->errorCode == CreateEvntRef::NF_FakeErrorREF){ - jam(); - evntRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(ref->senderRef)); - } else { - jam(); - evntRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(ref->senderRef)); - } - createEvent_sendReply(signal, evntRecPtr); - - return; -} - -void Dbdict::execCREATE_EVNT_CONF(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - CreateEvntConf * const conf = (CreateEvntConf *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - - evntRecPtr.i = conf->getUserData(); - - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Coordinator) got GSN_CREATE_EVNT_CONF evntRecPtr.i = (%d)", evntRecPtr.i); -#endif - - evntRecPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(conf->senderRef)); - - // we will only have a valid tablename if it the master DICT sending this - // but that's ok - LinearSectionPtr ptr[1]; - ptr[0].p = (Uint32 *)evntRecPtr.p->m_eventRec.TABLE_NAME; - ptr[0].sz = - (strlen(evntRecPtr.p->m_eventRec.TABLE_NAME)+4)/4; // to make sure we have a null - - createEvent_sendReply(signal, evntRecPtr, ptr, 1); - - return; -} - -/************************************************ - * - * Participant stuff - * - */ - -void -Dbdict::createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr){ - jam(); - evntRecPtr.p->m_request.setUserRef(signal->senderBlockRef()); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Participant) got CREATE_EVNT_REQ::RT_DICT_AFTER_GET evntRecPtr.i = (%d)", evntRecPtr.i); -#endif - - // the signal comes from the DICT block that got the first user request! - // This code runs on all DICT nodes, including oneself - - // Seize a Create Event record, the Coordinator will now have two seized - // but that's ok, it's like a recursion - - SubCreateReq * sumaReq = (SubCreateReq *)signal->getDataPtrSend(); - - sumaReq->subscriberRef = reference(); // reference to DICT - sumaReq->subscriberData = evntRecPtr.i; - sumaReq->subscriptionId = evntRecPtr.p->m_request.getEventId(); - sumaReq->subscriptionKey = evntRecPtr.p->m_request.getEventKey(); - sumaReq->subscriptionType = SubCreateReq::TableEvent; - sumaReq->tableId = evntRecPtr.p->m_request.getTableId(); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("sending GSN_SUB_CREATE_REQ"); -#endif - - sendSignal(SUMA_REF, GSN_SUB_CREATE_REQ, signal, - SubCreateReq::SignalLength+1 /*to get table Id*/, JBB); -} - -void Dbdict::execSUB_CREATE_REF(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - SubCreateRef * const ref = (SubCreateRef *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - - evntRecPtr.i = ref->subscriberData; - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Participant) got SUB_CREATE_REF evntRecPtr.i = (%d)", evntRecPtr.i); -#endif - - if (ref->err == GrepError::SUBSCRIPTION_ID_NOT_UNIQUE) { - jam(); -#ifdef EVENT_PH2_DEBUG - ndbout_c("SUBSCRIPTION_ID_NOT_UNIQUE"); -#endif - createEvent_sendReply(signal, evntRecPtr); - return; - } - -#ifdef EVENT_PH2_DEBUG - ndbout_c("Other error"); -#endif - - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); -} - -void Dbdict::execSUB_CREATE_CONF(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - - SubCreateConf * const sumaConf = (SubCreateConf *)signal->getDataPtr(); - - const Uint32 subscriptionId = sumaConf->subscriptionId; - const Uint32 subscriptionKey = sumaConf->subscriptionKey; - const Uint32 evntRecId = sumaConf->subscriberData; - - OpCreateEvent *evntRec; - ndbrequire((evntRec = c_opCreateEvent.getPtr(evntRecId)) != NULL); - -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT(Participant) got SUB_CREATE_CONF evntRecPtr.i = (%d)", evntRecId); -#endif - - SubSyncReq *sumaSync = (SubSyncReq *)signal->getDataPtrSend(); - - sumaSync->subscriptionId = subscriptionId; - sumaSync->subscriptionKey = subscriptionKey; - sumaSync->part = (Uint32) SubscriptionData::MetaData; - sumaSync->subscriberData = evntRecId; - - sendSignal(SUMA_REF, GSN_SUB_SYNC_REQ, signal, - SubSyncReq::SignalLength, JBB); -} - -void Dbdict::execSUB_SYNC_REF(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - SubSyncRef * const ref = (SubSyncRef *)signal->getDataPtr(); - OpCreateEventPtr evntRecPtr; - - evntRecPtr.i = ref->subscriberData; - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - createEvent_sendReply(signal, evntRecPtr); -} - -void Dbdict::execSUB_SYNC_CONF(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - - SubSyncConf * const sumaSyncConf = (SubSyncConf *)signal->getDataPtr(); - - // Uint32 subscriptionId = sumaSyncConf->subscriptionId; - // Uint32 subscriptionKey = sumaSyncConf->subscriptionKey; - OpCreateEventPtr evntRecPtr; - - evntRecPtr.i = sumaSyncConf->subscriberData; - ndbrequire((evntRecPtr.p = c_opCreateEvent.getPtr(evntRecPtr.i)) != NULL); - - ndbrequire(sumaSyncConf->part == (Uint32)SubscriptionData::MetaData); - - createEvent_sendReply(signal, evntRecPtr); -} - -/**************************************************** - * - * common create reply method - * - *******************************************************/ - -void Dbdict::createEvent_sendReply(Signal* signal, - OpCreateEventPtr evntRecPtr, - LinearSectionPtr *ptr, int noLSP) -{ - jam(); - EVENT_TRACE; - - // check if we're ready to sent reply - // if we are the master dict we might be waiting for conf/ref - - if (!evntRecPtr.p->m_reqTracker.done()) { - jam(); - return; // there's more to come - } - - if (evntRecPtr.p->m_reqTracker.hasRef()) { - ptr = NULL; // we don't want to return anything if there's an error - if (!evntRecPtr.p->hasError()) { - evntRecPtr.p->m_errorCode = CreateEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - jam(); - } else - jam(); - } - - // reference to API if master DICT - // else reference to master DICT - Uint32 senderRef = evntRecPtr.p->m_request.getUserRef(); - Uint32 signalLength; - Uint32 gsn; - - if (evntRecPtr.p->hasError()) { - jam(); - EVENT_TRACE; - CreateEvntRef * ret = (CreateEvntRef *)signal->getDataPtrSend(); - - ret->setEventId(evntRecPtr.p->m_request.getEventId()); - ret->setEventKey(evntRecPtr.p->m_request.getEventKey()); - ret->setUserData(evntRecPtr.p->m_request.getUserData()); - ret->senderRef = reference(); - ret->setTableId(evntRecPtr.p->m_request.getTableId()); - ret->setEventType(evntRecPtr.p->m_request.getEventType()); - ret->setRequestType(evntRecPtr.p->m_request.getRequestType()); - - ret->setErrorCode(evntRecPtr.p->m_errorCode); - ret->setErrorLine(evntRecPtr.p->m_errorLine); - ret->setErrorNode(evntRecPtr.p->m_errorNode); - - signalLength = CreateEvntRef::SignalLength; -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT sending GSN_CREATE_EVNT_REF to evntRecPtr.i = (%d) node = %u ref = %u", evntRecPtr.i, refToNode(senderRef), senderRef); - ndbout_c("errorCode = %u", evntRecPtr.p->m_errorCode); - ndbout_c("errorLine = %u", evntRecPtr.p->m_errorLine); -#endif - gsn = GSN_CREATE_EVNT_REF; - - } else { - jam(); - EVENT_TRACE; - CreateEvntConf * evntConf = (CreateEvntConf *)signal->getDataPtrSend(); - - evntConf->setEventId(evntRecPtr.p->m_request.getEventId()); - evntConf->setEventKey(evntRecPtr.p->m_request.getEventKey()); - evntConf->setUserData(evntRecPtr.p->m_request.getUserData()); - evntConf->senderRef = reference(); - evntConf->setTableId(evntRecPtr.p->m_request.getTableId()); - evntConf->setAttrListBitmask(evntRecPtr.p->m_request.getAttrListBitmask()); - evntConf->setEventType(evntRecPtr.p->m_request.getEventType()); - evntConf->setRequestType(evntRecPtr.p->m_request.getRequestType()); - - signalLength = CreateEvntConf::SignalLength; -#ifdef EVENT_PH2_DEBUG - ndbout_c("DBDICT sending GSN_CREATE_EVNT_CONF to evntRecPtr.i = (%d) node = %u ref = %u", evntRecPtr.i, refToNode(senderRef), senderRef); -#endif - gsn = GSN_CREATE_EVNT_CONF; - } - - if (ptr) { - jam(); - sendSignal(senderRef, gsn, signal, signalLength, JBB, ptr, noLSP); - } else { - jam(); - sendSignal(senderRef, gsn, signal, signalLength, JBB); - } - - c_opCreateEvent.release(evntRecPtr); -} - -/*************************************************************/ - -/******************************************************************** - * - * Start event - * - *******************************************************************/ - -void Dbdict::execSUB_START_REQ(Signal* signal) -{ - jamEntry(); - - Uint32 origSenderRef = signal->senderBlockRef(); - - OpSubEventPtr subbPtr; - if (!c_opSubEvent.seize(subbPtr)) { - SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend(); - { // fix - Uint32 subcriberRef = ((SubStartReq*)signal->getDataPtr())->subscriberRef; - ref->subscriberRef = subcriberRef; - } - jam(); - // ret->setErrorCode(SubStartRef::SeizeError); - // ret->setErrorLine(__LINE__); - // ret->setErrorNode(reference()); - ref->senderRef = reference(); - ref->setTemporary(SubStartRef::Busy); - - sendSignal(origSenderRef, GSN_SUB_START_REF, signal, - SubStartRef::SignalLength2, JBB); - return; - } - - { - const SubStartReq* req = (SubStartReq*) signal->getDataPtr(); - subbPtr.p->m_senderRef = req->senderRef; - subbPtr.p->m_senderData = req->senderData; - subbPtr.p->m_errorCode = 0; - } - - if (refToBlock(origSenderRef) != DBDICT) { - /* - * Coordinator - */ - jam(); - - subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - RequestTracker & p = subbPtr.p->m_reqTracker; - p.init(c_counterMgr, rg, GSN_SUB_START_REF, subbPtr.i); - - SubStartReq* req = (SubStartReq*) signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = subbPtr.i; - -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Coordinator) sending GSN_SUB_START_REQ to DBDICT participants subbPtr.i = (%d)", subbPtr.i); -#endif - - sendSignal(rg, GSN_SUB_START_REQ, signal, SubStartReq::SignalLength2, JBB); - return; - } - /* - * Participant - */ - ndbrequire(refToBlock(origSenderRef) == DBDICT); - - { - SubStartReq* req = (SubStartReq*) signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = subbPtr.i; - -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Participant) sending GSN_SUB_START_REQ to SUMA subbPtr.i = (%d)", subbPtr.i); -#endif - sendSignal(SUMA_REF, GSN_SUB_START_REQ, signal, SubStartReq::SignalLength2, JBB); - } -} - -void Dbdict::execSUB_START_REF(Signal* signal) -{ - jamEntry(); - - const SubStartRef* ref = (SubStartRef*) signal->getDataPtr(); - Uint32 senderRef = ref->senderRef; - - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, ref->senderData); - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Participant) got GSN_SUB_START_REF = (%d)", subbPtr.i); -#endif - - if (ref->isTemporary()){ - jam(); - SubStartReq* req = (SubStartReq*)signal->getDataPtrSend(); - { // fix - Uint32 subscriberRef = ref->subscriberRef; - req->subscriberRef = subscriberRef; - } - req->senderRef = reference(); - req->senderData = subbPtr.i; - sendSignal(SUMA_REF, GSN_SUB_START_REQ, - signal, SubStartReq::SignalLength2, JBB); - } else { - jam(); - - SubStartRef* ref = (SubStartRef*) signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = subbPtr.p->m_senderData; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF, - signal, SubStartRef::SignalLength2, JBB); - c_opSubEvent.release(subbPtr); - } - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_REF = (%d)", subbPtr.i); -#endif - if (ref->errorCode == SubStartRef::NF_FakeErrorREF){ - jam(); - subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef)); - } else { - jam(); - subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef)); - } - completeSubStartReq(signal,subbPtr.i,0); -} - -void Dbdict::execSUB_START_CONF(Signal* signal) -{ - jamEntry(); - - const SubStartConf* conf = (SubStartConf*) signal->getDataPtr(); - Uint32 senderRef = conf->senderRef; - - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, conf->senderData); - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - SubStartConf* conf = (SubStartConf*) signal->getDataPtrSend(); - -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Participant) got GSN_SUB_START_CONF = (%d)", subbPtr.i); -#endif - - conf->senderRef = reference(); - conf->senderData = subbPtr.p->m_senderData; - - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF, - signal, SubStartConf::SignalLength2, JBB); - c_opSubEvent.release(subbPtr); - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); -#ifdef EVENT_PH3_DEBUG - ndbout_c("DBDICT(Coordinator) got GSN_SUB_START_CONF = (%d)", subbPtr.i); -#endif - subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef)); - completeSubStartReq(signal,subbPtr.i,0); -} - -/* - * Coordinator - */ -void Dbdict::completeSubStartReq(Signal* signal, - Uint32 ptrI, - Uint32 returnCode){ - jam(); - - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, ptrI); - - if (!subbPtr.p->m_reqTracker.done()){ - jam(); - return; - } - - if (subbPtr.p->m_reqTracker.hasRef()) { - jam(); -#ifdef EVENT_DEBUG - ndbout_c("SUB_START_REF"); -#endif - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_REF, - signal, SubStartRef::SignalLength, JBB); - if (subbPtr.p->m_reqTracker.hasConf()) { - // stopStartedNodes(signal); - } - c_opSubEvent.release(subbPtr); - return; - } -#ifdef EVENT_DEBUG - ndbout_c("SUB_START_CONF"); -#endif - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_START_CONF, - signal, SubStartConf::SignalLength, JBB); - c_opSubEvent.release(subbPtr); -} - -/******************************************************************** - * - * Stop event - * - *******************************************************************/ - -void Dbdict::execSUB_STOP_REQ(Signal* signal) -{ - jamEntry(); - - Uint32 origSenderRef = signal->senderBlockRef(); - - OpSubEventPtr subbPtr; - if (!c_opSubEvent.seize(subbPtr)) { - SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend(); - jam(); - // ret->setErrorCode(SubStartRef::SeizeError); - // ret->setErrorLine(__LINE__); - // ret->setErrorNode(reference()); - ref->senderRef = reference(); - ref->setTemporary(SubStopRef::Busy); - - sendSignal(origSenderRef, GSN_SUB_STOP_REF, signal, - SubStopRef::SignalLength, JBB); - return; - } - - { - const SubStopReq* req = (SubStopReq*) signal->getDataPtr(); - subbPtr.p->m_senderRef = req->senderRef; - subbPtr.p->m_senderData = req->senderData; - subbPtr.p->m_errorCode = 0; - } - - if (refToBlock(origSenderRef) != DBDICT) { - /* - * Coordinator - */ - jam(); -#ifdef EVENT_DEBUG - ndbout_c("SUB_STOP_REQ 1"); -#endif - subbPtr.p->m_senderRef = origSenderRef; // not sure if API sets correctly - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - RequestTracker & p = subbPtr.p->m_reqTracker; - p.init(c_counterMgr, rg, GSN_SUB_STOP_REF, subbPtr.i); - - SubStopReq* req = (SubStopReq*) signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = subbPtr.i; - - sendSignal(rg, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB); - return; - } - /* - * Participant - */ -#ifdef EVENT_DEBUG - ndbout_c("SUB_STOP_REQ 2"); -#endif - ndbrequire(refToBlock(origSenderRef) == DBDICT); - { - SubStopReq* req = (SubStopReq*) signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = subbPtr.i; - - sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB); - } -} - -void Dbdict::execSUB_STOP_REF(Signal* signal) -{ - jamEntry(); - const SubStopRef* ref = (SubStopRef*) signal->getDataPtr(); - Uint32 senderRef = ref->senderRef; - - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, ref->senderData); - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - if (ref->isTemporary()){ - jam(); - SubStopReq* req = (SubStopReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = subbPtr.i; - sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, - signal, SubStopReq::SignalLength, JBB); - } else { - jam(); - SubStopRef* ref = (SubStopRef*) signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = subbPtr.p->m_senderData; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF, - signal, SubStopRef::SignalLength, JBB); - c_opSubEvent.release(subbPtr); - } - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); - if (ref->errorCode == SubStopRef::NF_FakeErrorREF){ - jam(); - subbPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef)); - } else { - jam(); - subbPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef)); - } - completeSubStopReq(signal,subbPtr.i,0); -} - -void Dbdict::execSUB_STOP_CONF(Signal* signal) -{ - jamEntry(); - - const SubStopConf* conf = (SubStopConf*) signal->getDataPtr(); - Uint32 senderRef = conf->senderRef; - - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, conf->senderData); - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - SubStopConf* conf = (SubStopConf*) signal->getDataPtrSend(); - - conf->senderRef = reference(); - conf->senderData = subbPtr.p->m_senderData; - - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF, - signal, SubStopConf::SignalLength, JBB); - c_opSubEvent.release(subbPtr); - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); - subbPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef)); - completeSubStopReq(signal,subbPtr.i,0); -} - -/* - * Coordinator - */ -void Dbdict::completeSubStopReq(Signal* signal, - Uint32 ptrI, - Uint32 returnCode){ - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, ptrI); - - if (!subbPtr.p->m_reqTracker.done()){ - jam(); - return; - } - - if (subbPtr.p->m_reqTracker.hasRef()) { - jam(); -#ifdef EVENT_DEBUG - ndbout_c("SUB_STOP_REF"); -#endif - SubStopRef* ref = (SubStopRef*)signal->getDataPtrSend(); - - ref->senderRef = reference(); - ref->senderData = subbPtr.p->m_senderData; - /* - ref->subscriptionId = subbPtr.p->m_senderData; - ref->subscriptionKey = subbPtr.p->m_senderData; - ref->part = subbPtr.p->m_part; // SubscriptionData::Part - ref->subscriberData = subbPtr.p->m_subscriberData; - ref->subscriberRef = subbPtr.p->m_subscriberRef; - */ - ref->errorCode = subbPtr.p->m_errorCode; - - - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_REF, - signal, SubStopRef::SignalLength, JBB); - if (subbPtr.p->m_reqTracker.hasConf()) { - // stopStartedNodes(signal); - } - c_opSubEvent.release(subbPtr); - return; - } -#ifdef EVENT_DEBUG - ndbout_c("SUB_STOP_CONF"); -#endif - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_STOP_CONF, - signal, SubStopConf::SignalLength, JBB); - c_opSubEvent.release(subbPtr); -} - -/*************************************************************** - * MODULE: Drop event. - * - * Drop event. - * - * TODO - */ - -void -Dbdict::execDROP_EVNT_REQ(Signal* signal) -{ - jamEntry(); - EVENT_TRACE; - - DropEvntReq *req = (DropEvntReq*)signal->getDataPtr(); - const Uint32 senderRef = signal->senderBlockRef(); - OpDropEventPtr evntRecPtr; - - // Seize a Create Event record - if (!c_opDropEvent.seize(evntRecPtr)) { - // Failed to allocate event record - jam(); - releaseSections(signal); - - DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend(); - ret->setErrorCode(DropEvntRef::SeizeError); - ret->setErrorLine(__LINE__); - ret->setErrorNode(reference()); - sendSignal(senderRef, GSN_DROP_EVNT_REF, signal, - DropEvntRef::SignalLength, JBB); - return; - } - -#ifdef EVENT_DEBUG - ndbout_c("DBDICT::execDROP_EVNT_REQ evntRecId = (%d)", evntRecPtr.i); -#endif - - OpDropEvent* evntRec = evntRecPtr.p; - evntRec->init(req); - - SegmentedSectionPtr ssPtr; - - signal->getSection(ssPtr, 0); - - SimplePropertiesSectionReader r0(ssPtr, getSectionSegmentPool()); -#ifdef EVENT_DEBUG - r0.printAll(ndbout); -#endif - // event name - if ((!r0.first()) || - (r0.getValueType() != SimpleProperties::StringValue) || - (r0.getValueLen() <= 0)) { - jam(); - releaseSections(signal); - - evntRecPtr.p->m_errorCode = DropEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorNode = reference(); - - dropEvent_sendReply(signal, evntRecPtr); - return; - } - r0.getString(evntRecPtr.p->m_eventRec.NAME); - { - int len = strlen(evntRecPtr.p->m_eventRec.NAME); - memset(evntRecPtr.p->m_eventRec.NAME+len, 0, MAX_TAB_NAME_SIZE-len); -#ifdef EVENT_DEBUG - printf("DropEvntReq; EventName %s, len %u\n", - evntRecPtr.p->m_eventRec.NAME, len); - for(int i = 0; i < MAX_TAB_NAME_SIZE/4; i++) - printf("H'%.8x ", ((Uint32*)evntRecPtr.p->m_eventRec.NAME)[i]); - printf("\n"); -#endif - } - - releaseSections(signal); - - Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_READ), 0 }; - - prepareTransactionEventSysTable(&c, signal, evntRecPtr.i, - UtilPrepareReq::Read); -} - -void -Dbdict::dropEventUTIL_PREPARE_READ(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode != 0) { - EVENT_TRACE; - dropEventUtilPrepareRef(signal, callbackData, returnCode); - return; - } - - UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr(); - OpDropEventPtr evntRecPtr; - evntRecPtr.i = req->getSenderData(); - const Uint32 prepareId = req->getPrepareId(); - - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); - - Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_READ), 0 }; - - executeTransEventSysTable(&c, signal, - evntRecPtr.i, evntRecPtr.p->m_eventRec, - prepareId, UtilPrepareReq::Read); -} - -void -Dbdict::dropEventUTIL_EXECUTE_READ(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode != 0) { - EVENT_TRACE; - dropEventUtilExecuteRef(signal, callbackData, returnCode); - return; - } - - OpDropEventPtr evntRecPtr; - UtilExecuteConf * const ref = (UtilExecuteConf *)signal->getDataPtr(); - jam(); - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); - - parseReadEventSys(signal, evntRecPtr.p->m_eventRec); - - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - RequestTracker & p = evntRecPtr.p->m_reqTracker; - p.init(c_counterMgr, rg, GSN_SUB_REMOVE_REF, - evntRecPtr.i); - - SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = evntRecPtr.i; - req->subscriptionId = evntRecPtr.p->m_eventRec.SUBID; - req->subscriptionKey = evntRecPtr.p->m_eventRec.SUBKEY; - - sendSignal(rg, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB); -} - -/* - * Participant - */ - -void -Dbdict::execSUB_REMOVE_REQ(Signal* signal) -{ - jamEntry(); - - Uint32 origSenderRef = signal->senderBlockRef(); - - OpSubEventPtr subbPtr; - if (!c_opSubEvent.seize(subbPtr)) { - SubRemoveRef * ref = (SubRemoveRef *)signal->getDataPtrSend(); - jam(); - ref->senderRef = reference(); - ref->setTemporary(SubRemoveRef::Busy); - - sendSignal(origSenderRef, GSN_SUB_REMOVE_REF, signal, - SubRemoveRef::SignalLength, JBB); - return; - } - - { - const SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtr(); - subbPtr.p->m_senderRef = req->senderRef; - subbPtr.p->m_senderData = req->senderData; - subbPtr.p->m_errorCode = 0; - } - - SubRemoveReq* req = (SubRemoveReq*) signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = subbPtr.i; - - sendSignal(SUMA_REF, GSN_SUB_REMOVE_REQ, signal, SubRemoveReq::SignalLength, JBB); -} - -/* - * Coordintor/Participant - */ - -void -Dbdict::execSUB_REMOVE_REF(Signal* signal) -{ - jamEntry(); - const SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtr(); - Uint32 senderRef = ref->senderRef; - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, ref->senderData); - if (ref->errorCode == (Uint32) GrepError::SUBSCRIPTION_ID_NOT_FOUND) { - // conf this since this may occur if a nodefailiure has occured - // earlier so that the systable was not cleared - SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = subbPtr.p->m_senderData; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_CONF, - signal, SubRemoveConf::SignalLength, JBB); - } else { - SubRemoveRef* ref = (SubRemoveRef*) signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->senderData = subbPtr.p->m_senderData; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_REF, - signal, SubRemoveRef::SignalLength, JBB); - } - c_opSubEvent.release(subbPtr); - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); - OpDropEventPtr eventRecPtr; - c_opDropEvent.getPtr(eventRecPtr, ref->senderData); - if (ref->errorCode == SubRemoveRef::NF_FakeErrorREF){ - jam(); - eventRecPtr.p->m_reqTracker.ignoreRef(c_counterMgr, refToNode(senderRef)); - } else { - jam(); - eventRecPtr.p->m_reqTracker.reportRef(c_counterMgr, refToNode(senderRef)); - } - completeSubRemoveReq(signal,eventRecPtr.i,0); -} - -void -Dbdict::execSUB_REMOVE_CONF(Signal* signal) -{ - jamEntry(); - const SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtr(); - Uint32 senderRef = conf->senderRef; - - if (refToBlock(senderRef) == SUMA) { - /* - * Participant - */ - jam(); - OpSubEventPtr subbPtr; - c_opSubEvent.getPtr(subbPtr, conf->senderData); - SubRemoveConf* conf = (SubRemoveConf*) signal->getDataPtrSend(); - conf->senderRef = reference(); - conf->senderData = subbPtr.p->m_senderData; - sendSignal(subbPtr.p->m_senderRef, GSN_SUB_REMOVE_CONF, - signal, SubRemoveConf::SignalLength, JBB); - c_opSubEvent.release(subbPtr); - return; - } - /* - * Coordinator - */ - ndbrequire(refToBlock(senderRef) == DBDICT); - OpDropEventPtr eventRecPtr; - c_opDropEvent.getPtr(eventRecPtr, conf->senderData); - eventRecPtr.p->m_reqTracker.reportConf(c_counterMgr, refToNode(senderRef)); - completeSubRemoveReq(signal,eventRecPtr.i,0); -} - -void -Dbdict::completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 xxx) -{ - OpDropEventPtr evntRecPtr; - c_opDropEvent.getPtr(evntRecPtr, ptrI); - - if (!evntRecPtr.p->m_reqTracker.done()){ - jam(); - return; - } - - if (evntRecPtr.p->m_reqTracker.hasRef()) { - jam(); - evntRecPtr.p->m_errorNode = reference(); - evntRecPtr.p->m_errorLine = __LINE__; - evntRecPtr.p->m_errorCode = DropEvntRef::Undefined; - dropEvent_sendReply(signal, evntRecPtr); - return; - } - - Callback c = { safe_cast(&Dbdict::dropEventUTIL_PREPARE_DELETE), 0 }; - - prepareTransactionEventSysTable(&c, signal, evntRecPtr.i, - UtilPrepareReq::Delete); -} - -void -Dbdict::dropEventUTIL_PREPARE_DELETE(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode != 0) { - EVENT_TRACE; - dropEventUtilPrepareRef(signal, callbackData, returnCode); - return; - } - - UtilPrepareConf* const req = (UtilPrepareConf*)signal->getDataPtr(); - OpDropEventPtr evntRecPtr; - jam(); - evntRecPtr.i = req->getSenderData(); - const Uint32 prepareId = req->getPrepareId(); - - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); -#ifdef EVENT_DEBUG - printf("DropEvntUTIL_PREPARE; evntRecPtr.i len %u\n",evntRecPtr.i); -#endif - - Callback c = { safe_cast(&Dbdict::dropEventUTIL_EXECUTE_DELETE), 0 }; - - executeTransEventSysTable(&c, signal, - evntRecPtr.i, evntRecPtr.p->m_eventRec, - prepareId, UtilPrepareReq::Delete); -} - -void -Dbdict::dropEventUTIL_EXECUTE_DELETE(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - if (returnCode != 0) { - EVENT_TRACE; - dropEventUtilExecuteRef(signal, callbackData, returnCode); - return; - } - - OpDropEventPtr evntRecPtr; - UtilExecuteConf * const ref = (UtilExecuteConf *)signal->getDataPtr(); - jam(); - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); - - dropEvent_sendReply(signal, evntRecPtr); -} - -void -Dbdict::dropEventUtilPrepareRef(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - UtilPrepareRef * const ref = (UtilPrepareRef *)signal->getDataPtr(); - OpDropEventPtr evntRecPtr; - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); - - bool temporary = false; - interpretUtilPrepareErrorCode((UtilPrepareRef::ErrorCode)ref->getErrorCode(), - temporary, evntRecPtr.p->m_errorLine); - if (temporary) { - evntRecPtr.p->m_errorCode = (DropEvntRef::ErrorCode) - ((Uint32) DropEvntRef::Undefined | (Uint32) DropEvntRef::Temporary); - } - - if (evntRecPtr.p->m_errorCode == 0) { - evntRecPtr.p->m_errorCode = DropEvntRef::Undefined; - evntRecPtr.p->m_errorLine = __LINE__; - } - evntRecPtr.p->m_errorNode = reference(); - - dropEvent_sendReply(signal, evntRecPtr); -} - -void -Dbdict::dropEventUtilExecuteRef(Signal* signal, - Uint32 callbackData, - Uint32 returnCode) -{ - jam(); - EVENT_TRACE; - OpDropEventPtr evntRecPtr; - UtilExecuteRef * const ref = (UtilExecuteRef *)signal->getDataPtr(); - jam(); - evntRecPtr.i = ref->getSenderData(); - ndbrequire((evntRecPtr.p = c_opDropEvent.getPtr(evntRecPtr.i)) != NULL); - - evntRecPtr.p->m_errorNode = reference(); - evntRecPtr.p->m_errorLine = __LINE__; - - switch (ref->getErrorCode()) { - case UtilExecuteRef::TCError: - switch (ref->getTCErrorCode()) { - case ZNOT_FOUND: - jam(); - evntRecPtr.p->m_errorCode = DropEvntRef::EventNotFound; - break; - default: - jam(); - evntRecPtr.p->m_errorCode = DropEvntRef::UndefinedTCError; - break; - } - break; - default: - jam(); - evntRecPtr.p->m_errorCode = DropEvntRef::Undefined; - break; - } - dropEvent_sendReply(signal, evntRecPtr); -} - -void Dbdict::dropEvent_sendReply(Signal* signal, - OpDropEventPtr evntRecPtr) -{ - jam(); - EVENT_TRACE; - Uint32 senderRef = evntRecPtr.p->m_request.getUserRef(); - - if (evntRecPtr.p->hasError()) { - jam(); - DropEvntRef * ret = (DropEvntRef *)signal->getDataPtrSend(); - - ret->setUserData(evntRecPtr.p->m_request.getUserData()); - ret->setUserRef(evntRecPtr.p->m_request.getUserRef()); - - ret->setErrorCode(evntRecPtr.p->m_errorCode); - ret->setErrorLine(evntRecPtr.p->m_errorLine); - ret->setErrorNode(evntRecPtr.p->m_errorNode); - - sendSignal(senderRef, GSN_DROP_EVNT_REF, signal, - DropEvntRef::SignalLength, JBB); - } else { - jam(); - DropEvntConf * evntConf = (DropEvntConf *)signal->getDataPtrSend(); - - evntConf->setUserData(evntRecPtr.p->m_request.getUserData()); - evntConf->setUserRef(evntRecPtr.p->m_request.getUserRef()); - - sendSignal(senderRef, GSN_DROP_EVNT_CONF, signal, - DropEvntConf::SignalLength, JBB); - } - - c_opDropEvent.release(evntRecPtr); -} /** * MODULE: Alter index diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index e4788898cc8..02443c648f1 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -45,8 +45,6 @@ #include #include #include -#include -#include #include #include #include @@ -514,45 +512,6 @@ private: void execBACKUP_FRAGMENT_REQ(Signal*); - // Util signals used by Event code - void execUTIL_PREPARE_CONF(Signal* signal); - void execUTIL_PREPARE_REF (Signal* signal); - void execUTIL_EXECUTE_CONF(Signal* signal); - void execUTIL_EXECUTE_REF (Signal* signal); - void execUTIL_RELEASE_CONF(Signal* signal); - void execUTIL_RELEASE_REF (Signal* signal); - - - // Event signals from API - void execCREATE_EVNT_REQ (Signal* signal); - void execCREATE_EVNT_CONF(Signal* signal); - void execCREATE_EVNT_REF (Signal* signal); - - void execDROP_EVNT_REQ (Signal* signal); - - void execSUB_START_REQ (Signal* signal); - void execSUB_START_CONF (Signal* signal); - void execSUB_START_REF (Signal* signal); - - void execSUB_STOP_REQ (Signal* signal); - void execSUB_STOP_CONF (Signal* signal); - void execSUB_STOP_REF (Signal* signal); - - // Event signals from SUMA - - void execCREATE_SUBID_CONF(Signal* signal); - void execCREATE_SUBID_REF (Signal* signal); - - void execSUB_CREATE_CONF(Signal* signal); - void execSUB_CREATE_REF (Signal* signal); - - void execSUB_SYNC_CONF(Signal* signal); - void execSUB_SYNC_REF (Signal* signal); - - void execSUB_REMOVE_REQ(Signal* signal); - void execSUB_REMOVE_CONF(Signal* signal); - void execSUB_REMOVE_REF(Signal* signal); - // Trigger signals void execCREATE_TRIG_REQ(Signal* signal); void execCREATE_TRIG_CONF(Signal* signal); @@ -1348,119 +1307,6 @@ private: }; typedef Ptr OpBuildIndexPtr; - /** - * Operation record for Util Signals. - */ - struct OpSignalUtil : OpRecordCommon{ - Callback m_callback; - Uint32 m_userData; - }; - typedef Ptr OpSignalUtilPtr; - - /** - * Operation record for subscribe-start-stop - */ - struct OpSubEvent : OpRecordCommon { - Uint32 m_senderRef; - Uint32 m_senderData; - Uint32 m_errorCode; - RequestTracker m_reqTracker; - }; - typedef Ptr OpSubEventPtr; - - static const Uint32 sysTab_NDBEVENTS_0_szs[]; - - /** - * Operation record for create event. - */ - struct OpCreateEvent : OpRecordCommon { - // original request (event id will be added) - CreateEvntReq m_request; - //AttributeMask m_attrListBitmask; - // AttributeList m_attrList; - sysTab_NDBEVENTS_0 m_eventRec; - // char m_eventName[MAX_TAB_NAME_SIZE]; - // char m_tableName[MAX_TAB_NAME_SIZE]; - - // coordinator DICT - RequestTracker m_reqTracker; - // state info - CreateEvntReq::RequestType m_requestType; - Uint32 m_requestFlag; - // error info - CreateEvntRef::ErrorCode m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // ctor - OpCreateEvent() { - memset(&m_request, 0, sizeof(m_request)); - m_requestType = CreateEvntReq::RT_UNDEFINED; - m_requestFlag = 0; - m_errorCode = CreateEvntRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - } - void init(const CreateEvntReq* req, Dbdict* dp) { - m_request = *req; - m_errorCode = CreateEvntRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - m_requestType = req->getRequestType(); - m_requestFlag = req->getRequestFlag(); - } - bool hasError() { - return m_errorCode != CreateEvntRef::NoError; - } - void setError(const CreateEvntRef* ref) { - if (ref != 0 && ! hasError()) { - m_errorCode = ref->getErrorCode(); - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - - }; - typedef Ptr OpCreateEventPtr; - - /** - * Operation record for drop event. - */ - struct OpDropEvent : OpRecordCommon { - // original request - DropEvntReq m_request; - // char m_eventName[MAX_TAB_NAME_SIZE]; - sysTab_NDBEVENTS_0 m_eventRec; - RequestTracker m_reqTracker; - // error info - DropEvntRef::ErrorCode m_errorCode; - Uint32 m_errorLine; - Uint32 m_errorNode; - // ctor - OpDropEvent() { - memset(&m_request, 0, sizeof(m_request)); - m_errorCode = DropEvntRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - } - void init(const DropEvntReq* req) { - m_request = *req; - m_errorCode = DropEvntRef::NoError; - m_errorLine = 0; - m_errorNode = 0; - } - bool hasError() { - return m_errorCode != DropEvntRef::NoError; - } - void setError(const DropEvntRef* ref) { - if (ref != 0 && ! hasError()) { - m_errorCode = ref->getErrorCode(); - m_errorLine = ref->getErrorLine(); - m_errorNode = ref->getErrorNode(); - } - } - }; - typedef Ptr OpDropEventPtr; - /** * Operation record for create trigger. */ @@ -1681,10 +1527,6 @@ public: STATIC_CONST( opDropIndexSize = sizeof(OpDropIndex) ); STATIC_CONST( opAlterIndexSize = sizeof(OpAlterIndex) ); STATIC_CONST( opBuildIndexSize = sizeof(OpBuildIndex) ); - STATIC_CONST( opCreateEventSize = sizeof(OpCreateEvent) ); - STATIC_CONST( opSubEventSize = sizeof(OpSubEvent) ); - STATIC_CONST( opDropEventSize = sizeof(OpDropEvent) ); - STATIC_CONST( opSignalUtilSize = sizeof(OpSignalUtil) ); STATIC_CONST( opCreateTriggerSize = sizeof(OpCreateTrigger) ); STATIC_CONST( opDropTriggerSize = sizeof(OpDropTrigger) ); STATIC_CONST( opAlterTriggerSize = sizeof(OpAlterTrigger) ); @@ -1695,10 +1537,6 @@ private: Uint32 u_opDropTable [PTR_ALIGN(opDropTableSize)]; Uint32 u_opCreateIndex [PTR_ALIGN(opCreateIndexSize)]; Uint32 u_opDropIndex [PTR_ALIGN(opDropIndexSize)]; - Uint32 u_opCreateEvent [PTR_ALIGN(opCreateEventSize)]; - Uint32 u_opSubEvent [PTR_ALIGN(opSubEventSize)]; - Uint32 u_opDropEvent [PTR_ALIGN(opDropEventSize)]; - Uint32 u_opSignalUtil [PTR_ALIGN(opSignalUtilSize)]; Uint32 u_opAlterIndex [PTR_ALIGN(opAlterIndexSize)]; Uint32 u_opBuildIndex [PTR_ALIGN(opBuildIndexSize)]; Uint32 u_opCreateTrigger[PTR_ALIGN(opCreateTriggerSize)]; @@ -1715,10 +1553,6 @@ private: KeyTable2 c_opDropIndex; KeyTable2 c_opAlterIndex; KeyTable2 c_opBuildIndex; - KeyTable2 c_opCreateEvent; - KeyTable2 c_opSubEvent; - KeyTable2 c_opDropEvent; - KeyTable2 c_opSignalUtil; KeyTable2 c_opCreateTrigger; KeyTable2 c_opDropTrigger; KeyTable2 c_opAlterTrigger; @@ -1918,101 +1752,6 @@ private: void buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr); void buildIndex_sendReply(Signal* signal, OpBuildIndexPtr opPtr, bool); - // Events - void - createEventUTIL_PREPARE(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void - createEventUTIL_EXECUTE(Signal *signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUTIL_PREPARE_READ(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUTIL_EXECUTE_READ(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUTIL_PREPARE_DELETE(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUTIL_EXECUTE_DELETE(Signal *signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUtilPrepareRef(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - void - dropEventUtilExecuteRef(Signal* signal, - Uint32 callbackData, - Uint32 returnCode); - int - sendSignalUtilReq(Callback *c, - BlockReference ref, - GlobalSignalNumber gsn, - Signal* signal, - Uint32 length, - JobBufferLevel jbuf, - LinearSectionPtr ptr[3], - Uint32 noOfSections); - int - recvSignalUtilReq(Signal* signal, Uint32 returnCode); - - void completeSubStartReq(Signal* signal, Uint32 ptrI, Uint32 returnCode); - void completeSubStopReq(Signal* signal, Uint32 ptrI, Uint32 returnCode); - void completeSubRemoveReq(Signal* signal, Uint32 ptrI, Uint32 returnCode); - - void dropEvent_sendReply(Signal* signal, - OpDropEventPtr evntRecPtr); - - void createEvent_RT_USER_CREATE(Signal* signal, OpCreateEventPtr evntRecPtr); - void createEventComplete_RT_USER_CREATE(Signal* signal, - OpCreateEventPtr evntRecPtr); - void createEvent_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr); - void createEventComplete_RT_USER_GET(Signal* signal, OpCreateEventPtr evntRecPtr); - - void createEvent_RT_DICT_AFTER_GET(Signal* signal, OpCreateEventPtr evntRecPtr); - - void createEvent_nodeFailCallback(Signal* signal, Uint32 eventRecPtrI, - Uint32 returnCode); - void createEvent_sendReply(Signal* signal, OpCreateEventPtr evntRecPtr, - LinearSectionPtr *ptr = NULL, int noLSP = 0); - - void prepareTransactionEventSysTable (Callback *c, - Signal* signal, - Uint32 senderData, - UtilPrepareReq::OperationTypeValue prepReq); - void prepareUtilTransaction(Callback *c, - Signal* signal, - Uint32 senderData, - Uint32 tableId, - const char *tableName, - UtilPrepareReq::OperationTypeValue prepReq, - Uint32 noAttr, - Uint32 attrIds[], - const char *attrNames[]); - - void executeTransEventSysTable(Callback *c, - Signal *signal, - const Uint32 ptrI, - sysTab_NDBEVENTS_0& m_eventRec, - const Uint32 prepareId, - UtilPrepareReq::OperationTypeValue prepReq); - void executeTransaction(Callback *c, - Signal* signal, - Uint32 senderData, - Uint32 prepareId, - Uint32 noAttr, - LinearSectionPtr headerPtr, - LinearSectionPtr dataPtr); - - void parseReadEventSys(Signal *signal, sysTab_NDBEVENTS_0& m_eventRec); - // create trigger void createTrigger_recvReply(Signal* signal, const CreateTrigConf* conf, const CreateTrigRef* ref); diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index dcb5d201d7f..a67dea40937 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -1970,9 +1970,6 @@ void Dbdih::execINCL_NODECONF(Signal* signal) signal->theData[0] = reference(); signal->theData[1] = c_nodeStartSlave.nodeId; sendSignal(BACKUP_REF, GSN_INCL_NODEREQ, signal, 2, JBB); - - // Suma will not send response to this for now, later... - sendSignal(SUMA_REF, GSN_INCL_NODEREQ, signal, 2, JBB); return; }//if if (TstartNode_or_blockref == numberToRef(BACKUP, getOwnNodeId())){ @@ -7971,12 +7968,6 @@ void Dbdih::writingCopyGciLab(Signal* signal, FileRecordPtr filePtr) if (reason == CopyGCIReq::GLOBAL_CHECKPOINT) { jam(); cgcpParticipantState = GCP_PARTICIPANT_READY; - - SubGcpCompleteRep * const rep = (SubGcpCompleteRep*)signal->getDataPtr(); - rep->gci = coldgcp; - rep->senderData = 0; - sendSignal(SUMA_REF, GSN_SUB_GCP_COMPLETE_REP, signal, - SubGcpCompleteRep::SignalLength, JBB); } jam(); diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index 176bab0d4bf..14efa8cd784 100644 --- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -1461,9 +1461,6 @@ void Ndbcntr::execNODE_FAILREP(Signal* signal) sendSignal(BACKUP_REF, GSN_NODE_FAILREP, signal, NodeFailRep::SignalLength, JBB); - sendSignal(SUMA_REF, GSN_NODE_FAILREP, signal, - NodeFailRep::SignalLength, JBB); - if (c_stopRec.stopReq.senderRef) { jam(); diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 9a7256b4a55..3d9ade9b57c 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2340,7 +2340,6 @@ void Qmgr::sendApiFailReq(Signal* signal, Uint16 failedNodeNo) failedNodePtr.p->failState = WAITING_FOR_FAILCONF1; sendSignal(DBTC_REF, GSN_API_FAILREQ, signal, 2, JBA); sendSignal(DBDICT_REF, GSN_API_FAILREQ, signal, 2, JBA); - sendSignal(SUMA_REF, GSN_API_FAILREQ, signal, 2, JBA); /**------------------------------------------------------------------------- * THE OTHER NODE WAS AN API NODE. THE COMMUNICATION LINK IS ALREADY diff --git a/ndb/src/kernel/blocks/suma/Suma.cpp b/ndb/src/kernel/blocks/suma/Suma.cpp index 3644bc0a03f..449436331e4 100644 --- a/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/ndb/src/kernel/blocks/suma/Suma.cpp @@ -82,44 +82,6 @@ static const Uint32 SUMA_SEQUENCE = 0xBABEBABE; #define PRINT_ONLY 0 static Uint32 g_TypeOfStart = NodeState::ST_ILLEGAL_TYPE; -void -Suma::getNodeGroupMembers(Signal* signal) { - jam(); - /** - * Ask DIH for nodeGroupMembers - */ - CheckNodeGroups * sd = (CheckNodeGroups*)signal->getDataPtrSend(); - sd->blockRef = reference(); - sd->requestType = - CheckNodeGroups::Direct | - CheckNodeGroups::GetNodeGroupMembers; - sd->nodeId = getOwnNodeId(); - EXECUTE_DIRECT(DBDIH, GSN_CHECKNODEGROUPSREQ, signal, - CheckNodeGroups::SignalLength); - jamEntry(); - - c_nodeGroup = sd->output; - c_noNodesInGroup = 0; - for (int i = 0; i < MAX_NDB_NODES; i++) { - if (sd->mask.get(i)) { - if (i == getOwnNodeId()) c_idInNodeGroup = c_noNodesInGroup; - c_nodesInGroup[c_noNodesInGroup] = i; - c_noNodesInGroup++; - } - } - - // ndbout_c("c_noNodesInGroup=%d", c_noNodesInGroup); - ndbrequire(c_noNodesInGroup > 0); // at least 1 node in the nodegroup - -#ifdef NODEFAIL_DEBUG - for (Uint32 i = 0; i < c_noNodesInGroup; i++) { - ndbout_c ("Suma: NodeGroup %u, me %u, me in group %u, member[%u] %u", - c_nodeGroup, getOwnNodeId(), c_idInNodeGroup, - i, c_nodesInGroup[i]); - } -#endif -} - void Suma::execREAD_CONFIG_REQ(Signal* signal) { @@ -188,11 +150,6 @@ Suma::execSTTOR(Signal* signal) { DBUG_PRINT("info",("startphase = %u, typeOfStart = %u", startphase, typeOfStart)); - if(startphase == 1){ - jam(); - c_restartLock = true; - } - if(startphase == 3){ jam(); g_TypeOfStart = typeOfStart; @@ -224,32 +181,7 @@ Suma::execSTTOR(Signal* signal) { DBUG_VOID_RETURN; } - if(startphase == 5) { - getNodeGroupMembers(signal); - if (g_TypeOfStart == NodeState::ST_NODE_RESTART) { - jam(); - for (Uint32 i = 0; i < c_noNodesInGroup; i++) { - Uint32 ref = calcSumaBlockRef(c_nodesInGroup[i]); - if (ref != reference()) - sendSignal(ref, GSN_SUMA_START_ME, signal, - 1 /*SumaStartMe::SignalLength*/, JBB); - } - } - } - if(startphase == 7) { - c_restartLock = false; // may be set false earlier with HANDOVER_REQ - - if (g_TypeOfStart != NodeState::ST_NODE_RESTART) { - for( int i = 0; i < NO_OF_BUCKETS; i++) { - if (getResponsibleSumaNodeId(i) == refToNode(reference())) { - // I'm running this bucket - DBUG_PRINT("info",("bucket %u set to true", i)); - c_buckets[i].active = true; - } - } - } - if(g_TypeOfStart == NodeState::ST_INITIAL_START && c_masterNodeId == getOwnNodeId()) { jam(); @@ -364,282 +296,6 @@ SumaParticipant::execCONTINUEB(Signal* signal) * *****************************************************************************/ -void Suma::execAPI_FAILREQ(Signal* signal) -{ - jamEntry(); - DBUG_ENTER("Suma::execAPI_FAILREQ"); - Uint32 failedApiNode = signal->theData[0]; - //BlockReference retRef = signal->theData[1]; - - c_failedApiNodes.set(failedApiNode); - bool found = removeSubscribersOnNode(signal, failedApiNode); - - if(!found){ - jam(); - c_failedApiNodes.clear(failedApiNode); - } - DBUG_VOID_RETURN; -}//execAPI_FAILREQ() - -bool -SumaParticipant::removeSubscribersOnNode(Signal *signal, Uint32 nodeId) -{ - DBUG_ENTER("SumaParticipant::removeSubscribersOnNode"); - bool found = false; - - SubscriberPtr i_subbPtr; - c_dataSubscribers.first(i_subbPtr); - while(!i_subbPtr.isNull()){ - SubscriberPtr subbPtr = i_subbPtr; - c_dataSubscribers.next(i_subbPtr); - jam(); - if (refToNode(subbPtr.p->m_subscriberRef) == nodeId) { - jam(); - c_dataSubscribers.remove(subbPtr); - c_removeDataSubscribers.add(subbPtr); - found = true; - } - } - if(found){ - jam(); - sendSubStopReq(signal); - } - DBUG_RETURN(found); -} - -void -SumaParticipant::sendSubStopReq(Signal *signal, bool unlock){ - DBUG_ENTER("SumaParticipant::sendSubStopReq"); - static bool remove_lock = false; - jam(); - - SubscriberPtr subbPtr; - c_removeDataSubscribers.first(subbPtr); - if (subbPtr.isNull()){ - jam(); -#if 0 - signal->theData[0] = failedApiNode; - signal->theData[1] = reference(); - sendSignal(retRef, GSN_API_FAILCONF, signal, 2, JBB); -#endif - c_failedApiNodes.clear(); - - remove_lock = false; - DBUG_VOID_RETURN; - } - - if(remove_lock && !unlock) { - jam(); - DBUG_VOID_RETURN; - } - remove_lock = true; - - SubscriptionPtr subPtr; - c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI); - - SubStopReq * const req = (SubStopReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = subbPtr.i; - req->subscriberRef = subbPtr.p->m_subscriberRef; - req->subscriberData = subbPtr.p->m_subscriberData; - req->subscriptionId = subPtr.p->m_subscriptionId; - req->subscriptionKey = subPtr.p->m_subscriptionKey; - req->part = SubscriptionData::TableData; - - sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB); - DBUG_VOID_RETURN; -} - -void -SumaParticipant::execSUB_STOP_CONF(Signal* signal){ - jamEntry(); - DBUG_ENTER("SumaParticipant::execSUB_STOP_CONF"); - - SubStopConf * const conf = (SubStopConf*)signal->getDataPtr(); - - // Uint32 subscriberData = conf->subscriberData; - // Uint32 subscriberRef = conf->subscriberRef; - - Subscription key; - key.m_subscriptionId = conf->subscriptionId; - key.m_subscriptionKey = conf->subscriptionKey; - - SubscriptionPtr subPtr; - if(c_subscriptions.find(subPtr, key)) { - jam(); - if (subPtr.p->m_markRemove) { - jam(); - ndbrequire(false); - ndbrequire(subPtr.p->m_nSubscribers > 0); - subPtr.p->m_nSubscribers--; - if (subPtr.p->m_nSubscribers == 0){ - jam(); - completeSubRemoveReq(signal, subPtr); - } - } - } - - sendSubStopReq(signal,true); - DBUG_VOID_RETURN; -} - -void -SumaParticipant::execSUB_STOP_REF(Signal* signal){ - jamEntry(); - DBUG_ENTER("SumaParticipant::execSUB_STOP_REF"); - - SubStopRef * const ref = (SubStopRef*)signal->getDataPtr(); - - Uint32 subscriptionId = ref->subscriptionId; - Uint32 subscriptionKey = ref->subscriptionKey; - Uint32 part = ref->part; - Uint32 subscriberData = ref->subscriberData; - Uint32 subscriberRef = ref->subscriberRef; - // Uint32 err = ref->err; - - if(!ref->isTemporary()){ - ndbrequire(false); - } - - SubStopReq * const req = (SubStopReq*)signal->getDataPtrSend(); - req->subscriberRef = subscriberRef; - req->subscriberData = subscriberData; - req->subscriptionId = subscriptionId; - req->subscriptionKey = subscriptionKey; - req->part = part; - - sendSignal(SUMA_REF, GSN_SUB_STOP_REQ, signal, SubStopReq::SignalLength, JBB); - - DBUG_VOID_RETURN; -} - -void -Suma::execNODE_FAILREP(Signal* signal){ - jamEntry(); - DBUG_ENTER("Suma::execNODE_FAILREP"); - - NodeFailRep * const rep = (NodeFailRep*)signal->getDataPtr(); - - bool changed = false; - - NodePtr nodePtr; -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma: nodefailrep"); -#endif - c_nodeFailGCI = getFirstGCI(signal); - - for(c_nodes.first(nodePtr); nodePtr.i != RNIL; c_nodes.next(nodePtr)){ - if(NodeBitmask::get(rep->theNodes, nodePtr.p->nodeId)){ - if(nodePtr.p->alive){ - ndbassert(c_aliveNodes.get(nodePtr.p->nodeId)); - changed = true; - jam(); - } else { - ndbassert(!c_aliveNodes.get(nodePtr.p->nodeId)); - jam(); - } - - if (c_preparingNodes.get(nodePtr.p->nodeId)) { - jam(); - // we are currently preparing this node that died - // it's ok just to clear and go back to waiting for it to start up - Restart.resetNode(calcSumaBlockRef(nodePtr.p->nodeId)); - c_preparingNodes.clear(nodePtr.p->nodeId); - } else if (c_handoverToDo) { - jam(); - // TODO what if I'm a SUMA that is currently restarting and the SUMA - // responsible for restarting me is the one that died? - - // a node has failed whilst handover is going on - // let's check if we're in the process of handover with that node - c_handoverToDo = false; - for( int i = 0; i < NO_OF_BUCKETS; i++) { - if (c_buckets[i].handover) { - // I'm doing handover, but is it with the dead node? - if (getResponsibleSumaNodeId(i) == nodePtr.p->nodeId) { - // so it was the dead node, has handover started? - if (c_buckets[i].handover_started) { - jam(); - // we're not ok and will have lost data! - // set not active to indicate this - - // this will generate takeover behaviour - c_buckets[i].active = false; - c_buckets[i].handover_started = false; - } // else we're ok to revert back to state before - c_buckets[i].handover = false; - } else { - jam(); - // ok, we're doing handover with a different node - c_handoverToDo = true; - } - } - } - } - - c_failoverBuffer.nodeFailRep(); - - nodePtr.p->alive = 0; - c_aliveNodes.clear(nodePtr.p->nodeId); // this has to be done after the loop above - } - } - DBUG_VOID_RETURN; -} - -void -Suma::execINCL_NODEREQ(Signal* signal){ - jamEntry(); - - //const Uint32 senderRef = signal->theData[0]; - const Uint32 inclNode = signal->theData[1]; - - NodePtr node; - for(c_nodes.first(node); node.i != RNIL; c_nodes.next(node)){ - jam(); - const Uint32 nodeId = node.p->nodeId; - if(inclNode == nodeId){ - jam(); - - ndbrequire(node.p->alive == 0); - ndbrequire(!c_aliveNodes.get(nodeId)); - - for (Uint32 j = 0; j < c_noNodesInGroup; j++) { - jam(); - if (c_nodesInGroup[j] == nodeId) { - // the starting node is part of my node group - jam(); - c_preparingNodes.set(nodeId); // set as being prepared - for (Uint32 i = 0; i < c_noNodesInGroup; i++) { - jam(); - if (i == c_idInNodeGroup) { - jam(); - // I'm responsible for restarting this SUMA - // ALL dict's should have meta data info so it is ok to start - Restart.startNode(signal, calcSumaBlockRef(nodeId)); - break; - }//if - if (c_aliveNodes.get(c_nodesInGroup[i])) { - jam(); - break; // another Suma takes care of this - }//if - }//for - break; - }//if - }//for - - node.p->alive = 1; - c_aliveNodes.set(nodeId); - - break; - }//if - }//for - -#if 0 // if we include this DIH's got to be prepared, later if needed... - signal->theData[0] = reference(); - - sendSignal(senderRef, GSN_INCL_NODECONF, signal, 1, JBB); -#endif -} - void Suma::execSIGNAL_DROPPED_REP(Signal* signal){ jamEntry(); @@ -685,10 +341,6 @@ Suma::execDUMP_STATE_ORD(Signal* signal){ syncPtr.p->startScan(signal); } - if(tCase == 8002){ - syncPtr.p->startTrigger(signal); - } - if(tCase == 8003){ subPtr.p->m_subscriptionType = SubCreateReq::SingleTableScan; LocalDataBuffer<15> attrs(c_dataBufferPool, syncPtr.p->m_attributeList); @@ -1154,26 +806,6 @@ SumaParticipant::sendSubCreateRef(Signal* signal, const SubCreateReq& req, Uint3 return; } - - - - - - - - - - - -Uint32 -SumaParticipant::getFirstGCI(Signal* signal) { - if (c_lastCompleteGCI == RNIL) { - ndbout_c("WARNING: c_lastCompleteGCI == RNIL"); - return 0; - } - return c_lastCompleteGCI+3; -} - /********************************************************** * * Setting upp trigger for subscription @@ -1219,27 +851,6 @@ SumaParticipant::execSUB_SYNC_REQ(Signal* signal) { case SubscriptionData::MetaData: ok = true; jam(); - if (subPtr.p->m_subscriptionType == SubCreateReq::DatabaseSnapshot) { - TableList::DataBufferIterator it; - syncPtr.p->m_tableList.first(it); - if(it.isNull()) { - /** - * Get all tables from dict - */ - ListTablesReq * req = (ListTablesReq*)signal->getDataPtrSend(); - req->senderRef = reference(); - req->senderData = syncPtr.i; - req->requestData = 0; - /** - * @todo: accomodate scan of index tables? - */ - req->setTableType(DictTabInfo::UserTable); - - sendSignal(DBDICT_REF, GSN_LIST_TABLES_REQ, signal, - ListTablesReq::SignalLength, JBB); - break; - } - } syncPtr.p->startMeta(signal); break; @@ -1273,16 +884,6 @@ SumaParticipant::sendSubSyncRef(Signal* signal, Uint32 errCode){ * Dict interface */ -void -SumaParticipant::execLIST_TABLES_CONF(Signal* signal){ - jamEntry(); - CRASH_INSERTION(13005); - ListTablesConf* const conf = (ListTablesConf*)signal->getDataPtr(); - SyncRecord* tmp = c_syncPool.getPtr(conf->senderData); - tmp->runLIST_TABLES_CONF(signal); -} - - void SumaParticipant::execGET_TABINFOREF(Signal* signal){ jamEntry(); @@ -1491,112 +1092,11 @@ SumaParticipant::execDIGETPRIMCONF(Signal* signal){ tmp->runDIGETPRIMCONF(signal); } -void -SumaParticipant::execCREATE_TRIG_CONF(Signal* signal){ - jamEntry(); - DBUG_ENTER("SumaParticipant::execCREATE_TRIG_CONF"); - CRASH_INSERTION(13009); - - CreateTrigConf * const conf = (CreateTrigConf*)signal->getDataPtr(); - - const Uint32 senderData = conf->getConnectionPtr(); - SyncRecord* tmp = c_syncPool.getPtr(senderData); - tmp->runCREATE_TRIG_CONF(signal); - - /** - * dodido - * @todo: I (Johan) dont know what to do here. Jonas, what do you mean? - */ - DBUG_VOID_RETURN; -} - -void -SumaParticipant::execCREATE_TRIG_REF(Signal* signal){ - jamEntry(); - ndbrequire(false); -} - -void -SumaParticipant::execDROP_TRIG_CONF(Signal* signal){ - jamEntry(); - DBUG_ENTER("SumaParticipant::execDROP_TRIG_CONF"); - CRASH_INSERTION(13010); - - DropTrigConf * const conf = (DropTrigConf*)signal->getDataPtr(); - - const Uint32 senderData = conf->getConnectionPtr(); - SyncRecord* tmp = c_syncPool.getPtr(senderData); - tmp->runDROP_TRIG_CONF(signal); - DBUG_VOID_RETURN; -} - -void -SumaParticipant::execDROP_TRIG_REF(Signal* signal){ - jamEntry(); - DBUG_ENTER("SumaParticipant::execDROP_TRIG_CONF"); - DropTrigRef * const ref = (DropTrigRef*)signal->getDataPtr(); - - const Uint32 senderData = ref->getConnectionPtr(); - SyncRecord* tmp = c_syncPool.getPtr(senderData); - tmp->runDROP_TRIG_CONF(signal); - DBUG_VOID_RETURN; -} - /************************************************************************* * * */ -void -SumaParticipant::SyncRecord::runLIST_TABLES_CONF(Signal* signal){ - jam(); - - ListTablesConf * const conf = (ListTablesConf*)signal->getDataPtr(); - const Uint32 len = signal->length() - ListTablesConf::HeaderLength; - - SubscriptionPtr subPtr; - suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI); - - for (unsigned i = 0; i < len; i++) { - subPtr.p->m_maxTables++; - suma.addTableId(ListTablesConf::getTableId(conf->tableData[i]), subPtr, this); - } - - // for (unsigned i = 0; i < len; i++) - // conf->tableData[i] = ListTablesConf::getTableId(conf->tableData[i]); - // m_tableList.append(&conf->tableData[0], len); - -#if 0 - TableList::DataBufferIterator it; - int i = 0; - for(m_tableList.first(it);!it.isNull();m_tableList.next(it)) { - ndbout_c("%u listtableconf tableid %d", i++, *it.data); - } -#endif - - if(len == ListTablesConf::DataLength){ - jam(); - // we expect more LIST_TABLE_CONF - return; - } - -#if 0 - subPtr.p->m_currentTable = 0; - subPtr.p->m_maxTables = 0; - - TableList::DataBufferIterator it; - for(m_tableList.first(it); !it.isNull(); m_tableList.next(it)) { - subPtr.p->m_maxTables++; - suma.addTableId(*it.data, subPtr, NULL); -#ifdef NODEFAIL_DEBUG - ndbout_c(" listtableconf tableid %d",*it.data); -#endif - } -#endif - - startMeta(signal); -} - void SumaParticipant::SyncRecord::startMeta(Signal* signal){ jam(); @@ -1696,18 +1196,6 @@ SumaParticipant::SyncRecord::runGET_TABINFO_CONF(Signal* signal){ SegmentedSectionPtr ptr; signal->getSection(ptr, GetTabInfoConf::DICT_TAB_INFO); - SubMetaData * data = (SubMetaData*)signal->getDataPtrSend(); - /** - * sending lastCompleteGCI. Used by Lars in interval calculations - * incremenet by one, since last_CompleteGCI is the not the current gci. - */ - data->gci = suma.c_lastCompleteGCI + 1; - data->tableId = tableId; - data->senderData = subPtr.p->m_subscriberData; -#if PRINT_ONLY - ndbout_c("GSN_SUB_META_DATA Table %d", tableId); -#else - bool okToSend = m_doSendSyncData; /* @@ -1737,7 +1225,6 @@ SumaParticipant::SyncRecord::runGET_TABINFO_CONF(Signal* signal){ SubMetaData::SignalLength, JBB); } } -#endif TablePtr tabPtr; ndbrequire(suma.c_tables.find(tabPtr, tableId)); @@ -2112,513 +1599,6 @@ SumaParticipant::execSCAN_HBREP(Signal* signal){ #endif } -/********************************************************** - * - * Suma participant interface - * - * Creation of subscriber - * - */ - -void -SumaParticipant::execSUB_START_REQ(Signal* signal){ - jamEntry(); - DBUG_ENTER("SumaParticipant::execSUB_START_REQ"); - - CRASH_INSERTION(13013); - - if (c_restartLock) { - jam(); - // ndbout_c("c_restartLock"); - if (RtoI(signal->getSendersBlockRef(), false) == RNIL) { - jam(); - sendSubStartRef(signal, /** Error Code */ 0, true); - DBUG_VOID_RETURN; - } - // only allow other Suma's in the nodegroup to come through for restart purposes - } - - Subscription key; - - SubStartReq * const req = (SubStartReq*)signal->getDataPtr(); - - Uint32 senderRef = req->senderRef; - Uint32 senderData = req->senderData; - Uint32 subscriberData = req->subscriberData; - Uint32 subscriberRef = req->subscriberRef; - SubscriptionData::Part part = (SubscriptionData::Part)req->part; - key.m_subscriptionId = req->subscriptionId; - key.m_subscriptionKey = req->subscriptionKey; - - SubscriptionPtr subPtr; - if(!c_subscriptions.find(subPtr, key)){ - jam(); - sendSubStartRef(signal, /** Error Code */ 0); - DBUG_VOID_RETURN; - } - - Ptr syncPtr; - c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI); - if (syncPtr.p->m_locked) { - jam(); -#if 0 - ndbout_c("Locked"); -#endif - sendSubStartRef(signal, /** Error Code */ 0, true); - DBUG_VOID_RETURN; - } - syncPtr.p->m_locked = true; - - SubscriberPtr subbPtr; - if(!c_subscriberPool.seize(subbPtr)){ - jam(); - syncPtr.p->m_locked = false; - sendSubStartRef(signal, /** Error Code */ 0); - DBUG_VOID_RETURN; - } - - Uint32 type = subPtr.p->m_subscriptionType; - - subbPtr.p->m_senderRef = senderRef; - subbPtr.p->m_senderData = senderData; - - switch (type) { - case SubCreateReq::TableEvent: - jam(); - // we want the data to return to the API not DICT - subbPtr.p->m_subscriberRef = subscriberRef; - // ndbout_c("start ref = %u", signal->getSendersBlockRef()); - // ndbout_c("ref = %u", subbPtr.p->m_subscriberRef); - // we use the subscription id for now, should really be API choice - subbPtr.p->m_subscriberData = subscriberData; - -#if 0 - if (RtoI(signal->getSendersBlockRef(), false) == RNIL) { - jam(); - for (Uint32 i = 0; i < c_noNodesInGroup; i++) { - Uint32 ref = calcSumaBlockRef(c_nodesInGroup[i]); - if (ref != reference()) { - jam(); - sendSubStartReq(subPtr, subbPtr, signal, ref); - } else - jam(); - } - } -#endif - break; - case SubCreateReq::DatabaseSnapshot: - case SubCreateReq::SelectiveTableSnapshot: - jam(); - ndbrequire(false); - //subbPtr.p->m_subscriberRef = GREP_REF; - subbPtr.p->m_subscriberData = subPtr.p->m_subscriberData; - break; - case SubCreateReq::SingleTableScan: - jam(); - subbPtr.p->m_subscriberRef = subPtr.p->m_subscriberRef; - subbPtr.p->m_subscriberData = subPtr.p->m_subscriberData; - } - - subbPtr.p->m_subPtrI = subPtr.i; - subbPtr.p->m_firstGCI = RNIL; - if (type == SubCreateReq::TableEvent) - subbPtr.p->m_lastGCI = 0; - else - subbPtr.p->m_lastGCI = RNIL; // disable usage of m_lastGCI - bool ok = false; - - switch(part){ - case SubscriptionData::MetaData: - ok = true; - jam(); - c_metaSubscribers.add(subbPtr); - sendSubStartComplete(signal, subbPtr, 0, part); - break; - case SubscriptionData::TableData: - ok = true; - jam(); - c_prepDataSubscribers.add(subbPtr); - syncPtr.p->startTrigger(signal); - break; - } - ndbrequire(ok); - DBUG_VOID_RETURN; -} - -void -SumaParticipant::sendSubStartComplete(Signal* signal, - SubscriberPtr subbPtr, - Uint32 firstGCI, - SubscriptionData::Part part){ - jam(); - - SubscriptionPtr subPtr; - c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI); - - Ptr syncPtr; - c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI); - syncPtr.p->m_locked = false; - - SubStartConf * const conf = (SubStartConf*)signal->getDataPtrSend(); - - conf->senderRef = reference(); - conf->senderData = subbPtr.p->m_senderData; - conf->subscriptionId = subPtr.p->m_subscriptionId; - conf->subscriptionKey = subPtr.p->m_subscriptionKey; - conf->firstGCI = firstGCI; - conf->part = (Uint32) part; - - conf->subscriberData = subPtr.p->m_subscriberData; - sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_START_CONF, signal, - SubStartConf::SignalLength, JBB); -} - -#if 0 -void -SumaParticipant::sendSubStartRef(SubscriptionPtr subPtr, - Signal* signal, Uint32 errCode, - bool temporary){ - jam(); - SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend(); - xxx ref->senderRef = reference(); - xxx ref->senderData = subPtr.p->m_senderData; - ref->subscriptionId = subPtr.p->m_subscriptionId; - ref->subscriptionKey = subPtr.p->m_subscriptionKey; - ref->part = (Uint32) subPtr.p->m_subscriptionType; - ref->subscriberData = subPtr.p->m_subscriberData; - ref->err = errCode; - if (temporary) { - jam(); - ref->setTemporary(); - } - releaseSections(signal); - sendSignal(subPtr.p->m_subscriberRef, GSN_SUB_START_REF, signal, - SubStartRef::SignalLength, JBB); -} -#endif -void -SumaParticipant::sendSubStartRef(Signal* signal, Uint32 errCode, - bool temporary){ - jam(); - SubStartRef * ref = (SubStartRef *)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->err = errCode; - if (temporary) { - jam(); - ref->setTemporary(); - } - releaseSections(signal); - sendSignal(signal->getSendersBlockRef(), GSN_SUB_START_REF, signal, - SubStartRef::SignalLength, JBB); -} - -/********************************************************** - * - * Trigger admin interface - * - */ - -void -SumaParticipant::SyncRecord::startTrigger(Signal* signal){ - jam(); - m_currentTable = 0; - m_latestTriggerId = RNIL; - nextTrigger(signal); -} - -void -SumaParticipant::SyncRecord::nextTrigger(Signal* signal){ - jam(); - - TableList::DataBufferIterator it; - - if(!m_tableList.position(it, m_currentTable)){ - completeTrigger(signal); - return; - } - - SubscriptionPtr subPtr; - suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI); - ndbrequire(subPtr.p->m_syncPtrI == ptrI); - const Uint32 RT_BREAK = 48; - Uint32 latestTriggerId = 0; - for(Uint32 i = 0; im_schemaVersion << 18) | - (j << 16) | tabPtr.p->m_tableId; - if(tabPtr.p->m_hasTriggerDefined[j] == 0) { - ndbrequire(tabPtr.p->m_triggerIds[j] == ILLEGAL_TRIGGER_ID); -#if 0 - ndbout_c("DEFINING trigger on table %u[%u]", tabPtr.p->m_tableId, j); -#endif - CreateTrigReq * const req = (CreateTrigReq*)signal->getDataPtrSend(); - req->setUserRef(SUMA_REF); - req->setConnectionPtr(ptrI); - req->setTriggerType(TriggerType::SUBSCRIPTION_BEFORE); - req->setTriggerActionTime(TriggerActionTime::TA_DETACHED); - req->setMonitorReplicas(true); - req->setMonitorAllAttributes(false); - req->setReceiverRef(SUMA_REF); - req->setTriggerId(latestTriggerId); - req->setTriggerEvent((TriggerEvent::Value)j); - req->setTableId(tabPtr.p->m_tableId); - req->setAttributeMask(attrMask); - suma.sendSignal(DBTUP_REF, GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength, JBB); - - } else { - /** - * Faking that a trigger has been created in order to - * simulate the proper behaviour. - * Perhaps this should be a dummy signal instead of - * (ab)using CREATE_TRIG_CONF. - */ - CreateTrigConf * conf = (CreateTrigConf*)signal->getDataPtrSend(); - conf->setConnectionPtr(ptrI); - conf->setTableId(tabPtr.p->m_tableId); - conf->setTriggerId(latestTriggerId); - suma.sendSignal(SUMA_REF,GSN_CREATE_TRIG_CONF, - signal, CreateTrigConf::SignalLength, JBB); - - } - - } - m_currentTable++; - } - m_latestTriggerId = latestTriggerId; -} - -void -SumaParticipant::SyncRecord::createAttributeMask(AttributeMask& mask, - Table * table){ - jam(); - mask.clear(); - DataBuffer<15>::DataBufferIterator it; - LocalDataBuffer<15> attrBuf(suma.c_dataBufferPool, table->m_attributes); - for(attrBuf.first(it); !it.curr.isNull(); attrBuf.next(it)){ - mask.set(* it.data); - } -} - -void -SumaParticipant::SyncRecord::runCREATE_TRIG_CONF(Signal* signal){ - jam(); - - CreateTrigConf * const conf = (CreateTrigConf*)signal->getDataPtr(); - const Uint32 triggerId = conf->getTriggerId(); - Uint32 type = (triggerId >> 16) & 0x3; - Uint32 tableId = conf->getTableId(); - - TablePtr tabPtr; - ndbrequire(suma.c_tables.find(tabPtr, tableId)); - - ndbrequire(type < 3); - tabPtr.p->m_triggerIds[type] = triggerId; - tabPtr.p->m_hasTriggerDefined[type]++; - - if(triggerId == m_latestTriggerId){ - jam(); - nextTrigger(signal); - } -} - -void -SumaParticipant::SyncRecord::completeTrigger(Signal* signal){ - jam(); - SubscriptionPtr subPtr; - CRASH_INSERTION(13013); -#ifdef EVENT_PH3_DEBUG - ndbout_c("SumaParticipant: trigger completed"); -#endif - Uint32 gci; - suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI); - ndbrequire(subPtr.p->m_syncPtrI == ptrI); - - SubscriberPtr subbPtr; - { - bool found = false; - - for(suma.c_prepDataSubscribers.first(subbPtr); - !subbPtr.isNull(); suma.c_prepDataSubscribers.next(subbPtr)) { - jam(); - if(subbPtr.p->m_subPtrI == subPtr.i) { - jam(); - found = true; - break; - } - } - ndbrequire(found); - gci = suma.getFirstGCI(signal); - subbPtr.p->m_firstGCI = gci; - suma.c_prepDataSubscribers.remove(subbPtr); - suma.c_dataSubscribers.add(subbPtr); - } - suma.sendSubStartComplete(signal, subbPtr, gci, SubscriptionData::TableData); -} - -void -SumaParticipant::SyncRecord::startDropTrigger(Signal* signal){ - jam(); - m_currentTable = 0; - m_latestTriggerId = RNIL; - nextDropTrigger(signal); -} - -void -SumaParticipant::SyncRecord::nextDropTrigger(Signal* signal){ - jam(); - - TableList::DataBufferIterator it; - - if(!m_tableList.position(it, m_currentTable)){ - completeDropTrigger(signal); - return; - } - - SubscriptionPtr subPtr; - suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI); - ndbrequire(subPtr.p->m_syncPtrI == ptrI); - - const Uint32 RT_BREAK = 48; - Uint32 latestTriggerId = 0; - for(Uint32 i = 0; im_triggerIds[j] != ILLEGAL_TRIGGER_ID); - i++; - latestTriggerId = tabPtr.p->m_triggerIds[j]; - if(tabPtr.p->m_hasTriggerDefined[j] == 1) { - jam(); - - DropTrigReq * const req = (DropTrigReq*)signal->getDataPtrSend(); - req->setConnectionPtr(ptrI); - req->setUserRef(SUMA_REF); // Sending to myself - req->setRequestType(DropTrigReq::RT_USER); - req->setTriggerType(TriggerType::SUBSCRIPTION_BEFORE); - req->setTriggerActionTime(TriggerActionTime::TA_DETACHED); - req->setIndexId(RNIL); - - req->setTableId(tabPtr.p->m_tableId); - req->setTriggerId(latestTriggerId); - req->setTriggerEvent((TriggerEvent::Value)j); - -#if 0 - ndbout_c("DROPPING trigger %u = %u %u %u on table %u[%u]", - latestTriggerId,TriggerType::SUBSCRIPTION_BEFORE, - TriggerActionTime::TA_DETACHED, j, tabPtr.p->m_tableId, j); -#endif - suma.sendSignal(DBTUP_REF, GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); - } else { - jam(); - ndbrequire(tabPtr.p->m_hasTriggerDefined[j] > 1); - /** - * Faking that a trigger has been dropped in order to - * simulate the proper behaviour. - * Perhaps this should be a dummy signal instead of - * (ab)using DROP_TRIG_CONF. - */ - DropTrigConf * conf = (DropTrigConf*)signal->getDataPtrSend(); - conf->setConnectionPtr(ptrI); - conf->setTableId(tabPtr.p->m_tableId); - conf->setTriggerId(latestTriggerId); - suma.sendSignal(SUMA_REF,GSN_DROP_TRIG_CONF, - signal, DropTrigConf::SignalLength, JBB); - } - } - m_currentTable++; - } - m_latestTriggerId = latestTriggerId; -} - -void -SumaParticipant::SyncRecord::runDROP_TRIG_REF(Signal* signal){ - jam(); - DropTrigRef * const ref = (DropTrigRef*)signal->getDataPtr(); - if (ref->getErrorCode() != DropTrigRef::TriggerNotFound){ - ndbrequire(false); - } - const Uint32 triggerId = ref->getTriggerId(); - Uint32 tableId = ref->getTableId(); - runDropTrig(signal, triggerId, tableId); -} - -void -SumaParticipant::SyncRecord::runDROP_TRIG_CONF(Signal* signal){ - jam(); - - DropTrigConf * const conf = (DropTrigConf*)signal->getDataPtr(); - const Uint32 triggerId = conf->getTriggerId(); - Uint32 tableId = conf->getTableId(); - runDropTrig(signal, triggerId, tableId); -} - -void -SumaParticipant::SyncRecord::runDropTrig(Signal* signal, - Uint32 triggerId, - Uint32 tableId){ - Uint32 type = (triggerId >> 16) & 0x3; - - TablePtr tabPtr; - ndbrequire(suma.c_tables.find(tabPtr, tableId)); - - ndbrequire(type < 3); - ndbrequire(tabPtr.p->m_triggerIds[type] == triggerId); - tabPtr.p->m_hasTriggerDefined[type]--; - if (tabPtr.p->m_hasTriggerDefined[type] == 0) { - jam(); - tabPtr.p->m_triggerIds[type] = ILLEGAL_TRIGGER_ID; - } - if(triggerId == m_latestTriggerId){ - jam(); - nextDropTrigger(signal); - } -} - -void -SumaParticipant::SyncRecord::completeDropTrigger(Signal* signal){ - jam(); - SubscriptionPtr subPtr; - CRASH_INSERTION(13014); -#if 0 - ndbout_c("trigger completed"); -#endif - - suma.c_subscriptions.getPtr(subPtr, m_subscriptionPtrI); - ndbrequire(subPtr.p->m_syncPtrI == ptrI); - - bool found = false; - SubscriberPtr subbPtr; - for(suma.c_prepDataSubscribers.first(subbPtr); - !subbPtr.isNull(); suma.c_prepDataSubscribers.next(subbPtr)) { - jam(); - if(subbPtr.p->m_subPtrI == subPtr.i) { - jam(); - found = true; - break; - } - } - ndbrequire(found); - suma.sendSubStopComplete(signal, subbPtr); -} - /********************************************************** * Scan data interface * @@ -2712,710 +1692,6 @@ SumaParticipant::execTRANSID_AI(Signal* signal){ f_bufferLock = 0; } -/********************************************************** - * - * Trigger data interface - * - */ - -void -SumaParticipant::execTRIG_ATTRINFO(Signal* signal){ - jamEntry(); - - CRASH_INSERTION(13016); - TrigAttrInfo* const trg = (TrigAttrInfo*)signal->getDataPtr(); - const Uint32 trigId = trg->getTriggerId(); - - const Uint32 dataLen = signal->length() - TrigAttrInfo::StaticLength; - - if(trg->getAttrInfoType() == TrigAttrInfo::BEFORE_VALUES){ - jam(); - - ndbrequire(b_bufferLock == trigId); - - memcpy(b_buffer + b_trigBufferSize, trg->getData(), 4 * dataLen); - b_trigBufferSize += dataLen; - // printf("before values %u %u %u\n",trigId, dataLen, b_trigBufferSize); - } else { - jam(); - - if(f_bufferLock == 0){ - f_bufferLock = trigId; - f_trigBufferSize = 0; - b_bufferLock = trigId; - b_trigBufferSize = 0; - } else { - ndbrequire(f_bufferLock == trigId); - } - - memcpy(f_buffer + f_trigBufferSize, trg->getData(), 4 * dataLen); - f_trigBufferSize += dataLen; - } -} - -#ifdef NODEFAIL_DEBUG2 -static int theCounts[64] = {0}; -#endif - -Uint32 -Suma::getStoreBucket(Uint32 v) -{ - // id will contain id to responsible suma or - // RNIL if we don't have nodegroup info yet - - const Uint32 N = NO_OF_BUCKETS; - const Uint32 D = v % N; // Distibution key - return D; -} - -Uint32 -Suma::getResponsibleSumaNodeId(Uint32 D) -{ - // id will contain id to responsible suma or - // RNIL if we don't have nodegroup info yet - - Uint32 id; - - if (c_restartLock) { - jam(); - // ndbout_c("c_restartLock"); - id = RNIL; - } else { - jam(); - id = RNIL; - const Uint32 n = c_noNodesInGroup; // Number nodes in node group - const Uint32 C1 = D / n; - const Uint32 C2 = D - C1*n; // = D % n; - const Uint32 C = C2 + C1 % n; - for (Uint32 i = 0; i < n; i++) { - jam(); - id = c_nodesInGroup[(C + i) % n]; - if (c_aliveNodes.get(id) && - !c_preparingNodes.get(id)) { - jam(); - break; - }//if - } -#ifdef NODEFAIL_DEBUG2 - theCounts[id]++; - ndbout_c("Suma:responsible n=%u, D=%u, id = %u, count=%u", - n,D, id, theCounts[id]); -#endif - } - return id; -} - -Uint32 -SumaParticipant::decideWhoToSend(Uint32 nBucket, Uint32 gci){ - bool replicaFlag = true; - Uint32 nId = RNIL; - - // bucket active/not active set by GCP_COMPLETE - if (c_buckets[nBucket].active) { - if (c_buckets[nBucket].handover && c_buckets[nBucket].handoverGCI <= gci) { - jam(); - replicaFlag = true; // let the other node send this - nId = RNIL; - // mark this as started, if we get a node failiure now we have some lost stuff - c_buckets[nBucket].handover_started = true; - } else { - jam(); - replicaFlag = false; - nId = refToNode(reference()); - } - } else { - nId = getResponsibleSumaNodeId(nBucket); - replicaFlag = !(nId == refToNode(reference())); - - if (!replicaFlag) { - if (!c_buckets[nBucket].handover) { - jam(); - // appearently a node has failed and we are taking over sending - // from that bucket. Now we need to go back to latest completed - // GCI. Handling will depend on Subscriber and Subscription - - // TODO, for now we make an easy takeover - if (gci < c_nodeFailGCI) - c_lastInconsistentGCI = gci; - - // we now have responsability for this bucket and we're actively - // sending from that - c_buckets[nBucket].active = true; -#ifdef HANDOVER_DEBUG - ndbout_c("Takeover Bucket %u", nBucket); -#endif - } else if (c_buckets[nBucket].handoverGCI > gci) { - jam(); - replicaFlag = true; // handover going on, but don't start sending yet - nId = RNIL; - } else { - jam(); -#ifdef HANDOVER_DEBUG - ndbout_c("Possible error: Will send from GCI = %u", gci); -#endif - } - } - } - -#ifdef NODEFAIL_DEBUG2 - ndbout_c("Suma:bucket %u, responsible id = %u, replicaFlag = %u", - nBucket, nId, (Uint32)replicaFlag); -#endif - return replicaFlag; -} - -void -SumaParticipant::execFIRE_TRIG_ORD(Signal* signal){ - jamEntry(); - DBUG_ENTER("SumaParticipant::execFIRE_TRIG_ORD"); - CRASH_INSERTION(13016); - FireTrigOrd* const trg = (FireTrigOrd*)signal->getDataPtr(); - const Uint32 trigId = trg->getTriggerId(); - const Uint32 hashValue = trg->getHashValue(); - const Uint32 gci = trg->getGCI(); - const Uint32 event = trg->getTriggerEvent(); - const Uint32 triggerId = trg->getTriggerId(); - Uint32 tableId = triggerId & 0xFFFF; - - ndbrequire(f_bufferLock == trigId); - -#ifdef EVENT_DEBUG2 - ndbout_c("SumaParticipant::execFIRE_TRIG_ORD"); -#endif - - Uint32 sz = trg->getNoOfPrimaryKeyWords()+trg->getNoOfAfterValueWords(); - ndbrequire(sz == f_trigBufferSize); - - /** - * Reformat as "all headers" + "all data" - */ - Uint32 dataLen = 0; - Uint32 noOfAttrs = 0; - Uint32 * src = f_buffer; - Uint32 * headers = signal->theData + 25; - Uint32 * dst = signal->theData + 25 + MAX_ATTRIBUTES_IN_TABLE; - - LinearSectionPtr ptr[3]; - int nptr; - - ptr[0].p = headers; - ptr[1].p = dst; - - while(sz > 0){ - jam(); - Uint32 tmp = * src ++; - * headers ++ = tmp; - Uint32 len = AttributeHeader::getDataSize(tmp); - memcpy(dst, src, 4 * len); - dst += len; - src += len; - - noOfAttrs++; - dataLen += len; - sz -= (1 + len); - } - ndbrequire(sz == 0); - - ptr[0].sz = noOfAttrs; - ptr[1].sz = dataLen; - - if (b_trigBufferSize > 0) { - jam(); - ptr[2].p = b_buffer; - ptr[2].sz = b_trigBufferSize; - nptr = 3; - } else { - jam(); - nptr = 2; - } - - // right now only for tableEvent - bool replicaFlag = decideWhoToSend(getStoreBucket(hashValue), gci); - - /** - * Signal to subscriber(s) - */ - SubTableData * data = (SubTableData*)signal->getDataPtrSend();//trg; - data->gci = gci; - data->tableId = tableId; - data->operation = event; - data->noOfAttributes = noOfAttrs; - data->dataSize = dataLen; - - SubscriberPtr subbPtr; - for(c_dataSubscribers.first(subbPtr); !subbPtr.isNull(); - c_dataSubscribers.next(subbPtr)){ - if (subbPtr.p->m_firstGCI > gci) { -#ifdef EVENT_DEBUG - ndbout_c("m_firstGCI = %u, gci = %u", subbPtr.p->m_firstGCI, gci); -#endif - jam(); - // we're either restarting or it's a newly created subscriber - // and waiting for the right gci - continue; - } - - jam(); - - const Uint32 ref = subbPtr.p->m_subscriberRef; - // ndbout_c("ref = %u", ref); - const Uint32 subdata = subbPtr.p->m_subscriberData; - data->senderData = subdata; - /* - * get subscription ptr for this subscriber - */ - SubscriptionPtr subPtr; - c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI); - - if(!subPtr.p->m_tables[tableId]) { - jam(); - continue; - //continue in for-loop if the table is not part of - //the subscription. Otherwise, send data to subscriber. - } - - if (subPtr.p->m_subscriptionType == SubCreateReq::TableEvent) { - if (replicaFlag) { - jam(); - c_failoverBuffer.subTableData(gci,NULL,0); - continue; - } - jam(); - Uint32 tmp = data->logType; - if (c_lastInconsistentGCI == data->gci) { - data->setGCINotConsistent(); - } - -#ifdef HANDOVER_DEBUG - { - static int aLongGCIName = 0; - if (data->gci != aLongGCIName) { - aLongGCIName = data->gci; - ndbout_c("sent from GCI = %u", aLongGCIName); - } - } -#endif - DBUG_PRINT("info",("GSN_SUB_TABLE_DATA to node %d", refToNode(ref))); - sendSignal(ref, GSN_SUB_TABLE_DATA, signal, - SubTableData::SignalLength, JBB, ptr, nptr); - data->logType = tmp; - } else { - ndbassert(refToNode(ref) == 0 || refToNode(ref) == getOwnNodeId()); - jam(); -#if PRINT_ONLY - ndbout_c("GSN_SUB_TABLE_DATA to %s: op: %d #attr: %d len: %d", - getBlockName(refToBlock(ref)), - noOfAttrs, dataLen); - -#else -#ifdef HANDOVER_DEBUG - { - static int aLongGCIName2 = 0; - if (data->gci != aLongGCIName2) { - aLongGCIName2 = data->gci; - ndbout_c("(EXECUTE_DIRECT) sent from GCI = %u to %u", aLongGCIName2, ref); - } - } -#endif - EXECUTE_DIRECT(refToBlock(ref), GSN_SUB_TABLE_DATA, signal, - SubTableData::SignalLength); - jamEntry(); -#endif - } - } - - /** - * Reset f_bufferLock - */ - f_bufferLock = 0; - b_bufferLock = 0; - - DBUG_VOID_RETURN; -} - -void -SumaParticipant::execSUB_GCP_COMPLETE_REP(Signal* signal){ - jamEntry(); - - SubGcpCompleteRep * rep = (SubGcpCompleteRep*)signal->getDataPtrSend(); - - Uint32 gci = rep->gci; - c_lastCompleteGCI = gci; - - /** - * Signal to subscriber(s) - */ - - SubscriberPtr subbPtr; - SubscriptionPtr subPtr; - c_dataSubscribers.first(subbPtr); - for(; !subbPtr.isNull(); c_dataSubscribers.next(subbPtr)){ - - if (subbPtr.p->m_firstGCI > gci) { - jam(); - // we don't send SUB_GCP_COMPLETE_REP for incomplete GCI's - continue; - } - - const Uint32 ref = subbPtr.p->m_subscriberRef; - rep->senderRef = ref; - rep->senderData = subbPtr.p->m_subscriberData; - - c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI); -#if PRINT_ONLY - ndbout_c("GSN_SUB_GCP_COMPLETE_REP to %s:", - getBlockName(refToBlock(ref))); -#else - - CRASH_INSERTION(13018); - - if (subPtr.p->m_subscriptionType == SubCreateReq::TableEvent) - { - jam(); - sendSignal(ref, GSN_SUB_GCP_COMPLETE_REP, signal, - SubGcpCompleteRep::SignalLength, JBB); - } - else - { - jam(); - ndbassert(refToNode(ref) == 0 || refToNode(ref) == getOwnNodeId()); - EXECUTE_DIRECT(refToBlock(ref), GSN_SUB_GCP_COMPLETE_REP, signal, - SubGcpCompleteRep::SignalLength); - jamEntry(); - } -#endif - } - - if (c_handoverToDo) { - jam(); - c_handoverToDo = false; - for( int i = 0; i < NO_OF_BUCKETS; i++) { - if (c_buckets[i].handover) { - if (c_buckets[i].handoverGCI > gci) { - jam(); - c_handoverToDo = true; // still waiting for the right GCI - break; /* since all handover should happen at the same time - * we can break here - */ - } else { - c_buckets[i].handover = false; -#ifdef HANDOVER_DEBUG - ndbout_c("Handover Bucket %u", i); -#endif - if (getResponsibleSumaNodeId(i) == refToNode(reference())) { - // my bucket to be handed over to me - ndbrequire(!c_buckets[i].active); - jam(); - c_buckets[i].active = true; - } else { - // someone else's bucket to handover to - ndbrequire(c_buckets[i].active); - jam(); - c_buckets[i].active = false; - } - } - } - } - } -} - -/*********************************************************** - * - * Embryo to syncronize the Suma's so as to know if a subscriber - * has received a GCP_COMPLETE from all suma's or not - * - */ - -void -SumaParticipant::runSUB_GCP_COMPLETE_ACC(Signal* signal){ - jam(); - - SubGcpCompleteAcc * const acc = (SubGcpCompleteAcc*)signal->getDataPtr(); - - Uint32 gci = acc->rep.gci; - -#ifdef EVENT_DEBUG - ndbout_c("SumaParticipant::runSUB_GCP_COMPLETE_ACC gci = %u", gci); -#endif - - c_failoverBuffer.subGcpCompleteRep(gci); -} - -void -Suma::execSUB_GCP_COMPLETE_ACC(Signal* signal){ - jamEntry(); - - if (RtoI(signal->getSendersBlockRef(), false) != RNIL) { - jam(); - // Ack from other SUMA - runSUB_GCP_COMPLETE_ACC(signal); - return; - } - - jam(); - // Ack from User and not an acc from other SUMA, redistribute in nodegroup - - SubGcpCompleteAcc * const acc = (SubGcpCompleteAcc*)signal->getDataPtr(); - Uint32 gci = acc->rep.gci; - Uint32 senderRef = acc->rep.senderRef; - Uint32 subscriberData = acc->rep.subscriberData; - -#ifdef EVENT_DEBUG - ndbout_c("Suma::execSUB_GCP_COMPLETE_ACC gci = %u", gci); -#endif - bool moreToCome = false; - - SubscriberPtr subbPtr; - for(c_dataSubscribers.first(subbPtr); - !subbPtr.isNull(); c_dataSubscribers.next(subbPtr)){ -#ifdef EVENT_DEBUG - ndbout_c("Suma::execSUB_GCP_COMPLETE_ACC %u == %u && %u == %u", - subbPtr.p->m_subscriberRef, - senderRef, - subbPtr.p->m_subscriberData, - subscriberData); -#endif - if (subbPtr.p->m_subscriberRef == senderRef && - subbPtr.p->m_subscriberData == subscriberData) { - jam(); -#ifdef EVENT_DEBUG - ndbout_c("Suma::execSUB_GCP_COMPLETE_ACC gci = FOUND SUBSCRIBER"); -#endif - subbPtr.p->m_lastGCI = gci; - } else if (subbPtr.p->m_lastGCI < gci) { - jam(); - if (subbPtr.p->m_firstGCI <= gci) - moreToCome = true; - } else - jam(); - } - - if (!moreToCome) { - // tell the other SUMA's that I'm done with this GCI - jam(); - for (Uint32 i = 0; i < c_noNodesInGroup; i++) { - Uint32 id = c_nodesInGroup[i]; - Uint32 ref = calcSumaBlockRef(id); - if ((ref != reference()) && c_aliveNodes.get(id)) { - jam(); - sendSignal(ref, GSN_SUB_GCP_COMPLETE_ACC, signal, - SubGcpCompleteAcc::SignalLength, JBB); - } else - jam(); - } - } -} - -static Uint32 tmpFailoverBuffer[512]; -//SumaParticipant::FailoverBuffer::FailoverBuffer(DataBuffer<15>::DataBufferPool & p) -// : m_dataList(p), -SumaParticipant::FailoverBuffer::FailoverBuffer() - : - c_gcis(tmpFailoverBuffer), c_sz(512), c_first(0), c_next(0), c_full(false) -{ -} - -bool SumaParticipant::FailoverBuffer::subTableData(Uint32 gci, Uint32 *src, int sz) -{ - bool ok = true; - - if (c_full) { - ok = false; -#ifdef EVENT_DEBUG - ndbout_c("Suma::FailoverBuffer::SubTableData buffer full gci=%u"); -#endif - } else { - c_gcis[c_next] = gci; - c_next++; - if (c_next == c_sz) c_next = 0; - if (c_next == c_first) - c_full = true; - // ndbout_c("%u %u %u",c_first,c_next,c_sz); - } - return ok; -} -bool SumaParticipant::FailoverBuffer::subGcpCompleteRep(Uint32 gci) -{ - bool ok = true; - - // ndbout_c("Empty"); - while (true) { - if (c_first == c_next && !c_full) - break; - if (c_gcis[c_first] > gci) - break; - c_full = false; - c_first++; - if (c_first == c_sz) c_first = 0; - // ndbout_c("%u %u %u : ",c_first,c_next,c_sz); - } - - return ok; -} -bool SumaParticipant::FailoverBuffer::nodeFailRep() -{ - bool ok = true; - while (true) { - if (c_first == c_next && !c_full) - break; - -#ifdef EVENT_DEBUG - ndbout_c("Suma::FailoverBuffer::NodeFailRep resending gci=%u", c_gcis[c_first]); -#endif - c_full = false; - c_first++; - if (c_first == c_sz) c_first = 0; - } - return ok; -} - -/********************************************************** - * Suma participant interface - * - * Stopping and removing of subscriber - * - */ - -void -SumaParticipant::execSUB_STOP_REQ(Signal* signal){ - jamEntry(); - DBUG_ENTER("SumaParticipant::execSUB_STOP_REQ"); - - CRASH_INSERTION(13019); - - SubStopReq * const req = (SubStopReq*)signal->getDataPtr(); - Uint32 senderRef = signal->getSendersBlockRef(); - Uint32 senderData = req->senderData; - Uint32 subscriberRef = req->subscriberRef; - Uint32 subscriberData = req->subscriberData; - SubscriptionPtr subPtr; - Subscription key; - key.m_subscriptionId = req->subscriptionId; - key.m_subscriptionKey = req->subscriptionKey; - Uint32 part = req->part; - - if (key.m_subscriptionKey == 0 && - key.m_subscriptionId == 0 && - subscriberData == 0) { - SubStopConf* conf = (SubStopConf*)signal->getDataPtrSend(); - - conf->senderRef = reference(); - conf->senderData = senderData; - conf->subscriptionId = key.m_subscriptionId; - conf->subscriptionKey = key.m_subscriptionKey; - conf->subscriberData = subscriberData; - - sendSignal(senderRef, GSN_SUB_STOP_CONF, signal, - SubStopConf::SignalLength, JBB); - - removeSubscribersOnNode(signal, refToNode(subscriberRef)); - DBUG_VOID_RETURN; - } - - if(!c_subscriptions.find(subPtr, key)){ - jam(); - sendSubStopRef(signal, GrepError::SUBSCRIPTION_ID_NOT_FOUND); - return; - } - - ndbrequire(part == SubscriptionData::TableData); - - SubscriberPtr subbPtr; - if (senderRef == reference()){ - jam(); - c_subscriberPool.getPtr(subbPtr, senderData); - ndbrequire(subbPtr.p->m_subPtrI == subPtr.i && - subbPtr.p->m_subscriberRef == subscriberRef && - subbPtr.p->m_subscriberData == subscriberData); - c_removeDataSubscribers.remove(subbPtr); - } else { - bool found = false; - jam(); - c_dataSubscribers.first(subbPtr); - for (;!subbPtr.isNull(); c_dataSubscribers.next(subbPtr)){ - jam(); - if (subbPtr.p->m_subPtrI == subPtr.i && - refToNode(subbPtr.p->m_subscriberRef) == refToNode(subscriberRef) && - subbPtr.p->m_subscriberData == subscriberData){ - // ndbout_c("STOP_REQ: before c_dataSubscribers.release"); - jam(); - c_dataSubscribers.remove(subbPtr); - found = true; - break; - } - } - /** - * If we didn't find anyone, send ref - */ - if (!found) { - jam(); - sendSubStopRef(signal, GrepError::SUBSCRIBER_NOT_FOUND); - DBUG_VOID_RETURN; - } - } - - subbPtr.p->m_senderRef = senderRef; // store ref to requestor - subbPtr.p->m_senderData = senderData; // store ref to requestor - c_prepDataSubscribers.add(subbPtr); - - Ptr syncPtr; - c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI); - if (syncPtr.p->m_locked) { - jam(); - sendSubStopRef(signal, /** Error Code */ 0, true); - DBUG_VOID_RETURN; - } - syncPtr.p->m_locked = true; - - syncPtr.p->startDropTrigger(signal); - DBUG_VOID_RETURN; -} - -void -SumaParticipant::sendSubStopComplete(Signal* signal, SubscriberPtr subbPtr){ - jam(); - - CRASH_INSERTION(13020); - - SubscriptionPtr subPtr; - c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI); - - Ptr syncPtr; - c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI); - syncPtr.p->m_locked = false; - - SubStopConf * const conf = (SubStopConf*)signal->getDataPtrSend(); - - conf->senderRef = reference(); - conf->senderData = subbPtr.p->m_senderData; - conf->subscriptionId = subPtr.p->m_subscriptionId; - conf->subscriptionKey = subPtr.p->m_subscriptionKey; - conf->subscriberData = subbPtr.p->m_subscriberData; - Uint32 senderRef = subbPtr.p->m_senderRef; - - c_prepDataSubscribers.release(subbPtr); - sendSignal(senderRef, GSN_SUB_STOP_CONF, signal, - SubStopConf::SignalLength, JBB); -} - -void -SumaParticipant::sendSubStopRef(Signal* signal, Uint32 errCode, - bool temporary){ - jam(); - SubStopRef * ref = (SubStopRef *)signal->getDataPtrSend(); - ref->senderRef = reference(); - ref->errorCode = errCode; - if (temporary) { - ref->setTemporary(); - } - sendSignal(signal->getSendersBlockRef(), - GSN_SUB_STOP_REF, - signal, - SubStopRef::SignalLength, - JBB); - return; -} - /************************************************************** * * Removing subscription @@ -3446,36 +1722,6 @@ SumaParticipant::execSUB_REMOVE_REQ(Signal* signal) { { jam(); SubscriberPtr i_subbPtr; - for(c_prepDataSubscribers.first(i_subbPtr); - !i_subbPtr.isNull(); c_prepDataSubscribers.next(i_subbPtr)){ - jam(); - if( i_subbPtr.p->m_subPtrI == subPtr.i ) { - jam(); - sendSubRemoveRef(signal, req, /* ErrorCode */ 0, true); - return; - // c_prepDataSubscribers.release(subbPtr); - } - } - c_dataSubscribers.first(i_subbPtr); - while(!i_subbPtr.isNull()){ - jam(); - SubscriberPtr subbPtr = i_subbPtr; - c_dataSubscribers.next(i_subbPtr); - if( subbPtr.p->m_subPtrI == subPtr.i ) { - jam(); - sendSubRemoveRef(signal, req, /* ErrorCode */ 0, true); - return; - /* Unfinished/untested code. If remove should be possible - * even if subscribers are left these have to be stopped - * first. See m_markRemove, m_nSubscribers. We need also to - * block remove for this subscription so that multiple - * removes is not possible... - */ - c_dataSubscribers.remove(subbPtr); - c_removeDataSubscribers.add(subbPtr); - count++; - } - } c_metaSubscribers.first(i_subbPtr); while(!i_subbPtr.isNull()){ jam(); @@ -3491,15 +1737,7 @@ SumaParticipant::execSUB_REMOVE_REQ(Signal* signal) { subPtr.p->m_senderRef = senderRef; subPtr.p->m_senderData = req.senderData; - if (count > 0){ - jam(); - ndbrequire(false); // code not finalized - subPtr.p->m_markRemove = true; - subPtr.p->m_nSubscribers = count; - sendSubStopReq(signal); - } else { - completeSubRemoveReq(signal, subPtr); - } + completeSubRemoveReq(signal, subPtr); } void @@ -3596,486 +1834,5 @@ SumaParticipant::SyncRecord::release(){ attrBuf.release(); } - -/************************************************************** - * - * Restarting remote node functions, master functionality - * (slave does nothing special) - * - triggered on INCL_NODEREQ calling startNode - * - included node will issue START_ME when it's ready to start - * the subscribers - * - */ - -Suma::Restart::Restart(Suma& s) : suma(s) { - for (int i = 0; i < MAX_REPLICAS; i++) { - c_okToStart[i] = false; - c_waitingToStart[i] = false; - } -} - -void -Suma::Restart::resetNode(Uint32 sumaRef) -{ - jam(); - int I = suma.RtoI(sumaRef); - c_okToStart[I] = false; - c_waitingToStart[I] = false; -} - -void -Suma::Restart::startNode(Signal* signal, Uint32 sumaRef) -{ - jam(); - resetNode(sumaRef); - - // right now we can only handle restarting one node - // at a time in a node group - - createSubscription(signal, sumaRef); -} - -void -Suma::Restart::createSubscription(Signal* signal, Uint32 sumaRef) { - jam(); - suma.c_subscriptions.first(c_subPtr); - nextSubscription(signal, sumaRef); -} - -void -Suma::Restart::nextSubscription(Signal* signal, Uint32 sumaRef) { - jam(); - if (c_subPtr.isNull()) { - jam(); - completeSubscription(signal, sumaRef); - return; - } - SubscriptionPtr subPtr; - subPtr.i = c_subPtr.curr.i; - subPtr.p = suma.c_subscriptions.getPtr(subPtr.i); - - suma.c_subscriptions.next(c_subPtr); - - SubCreateReq * req = (SubCreateReq *)signal->getDataPtrSend(); - - req->subscriberRef = suma.reference(); - req->subscriberData = subPtr.i; - req->subscriptionId = subPtr.p->m_subscriptionId; - req->subscriptionKey = subPtr.p->m_subscriptionKey; - req->subscriptionType = subPtr.p->m_subscriptionType | - SubCreateReq::RestartFlag; - - switch (subPtr.p->m_subscriptionType) { - case SubCreateReq::TableEvent: - case SubCreateReq::SelectiveTableSnapshot: - case SubCreateReq::DatabaseSnapshot: { - jam(); - - Ptr syncPtr; - suma.c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI); - syncPtr.p->m_tableList.first(syncPtr.p->m_tableList_it); - - ndbrequire(!syncPtr.p->m_tableList_it.isNull()); - - req->tableId = *syncPtr.p->m_tableList_it.data; - -#if 0 - for (int i = 0; i < MAX_TABLES; i++) - if (subPtr.p->m_tables[i]) { - req->tableId = i; - break; - } -#endif - - suma.sendSignal(sumaRef, GSN_SUB_CREATE_REQ, signal, - SubCreateReq::SignalLength+1 /*to get table Id*/, JBB); - return; - } - case SubCreateReq::SingleTableScan : - // TODO - jam(); - return; - } - ndbrequire(false); -} - -void -Suma::execSUB_CREATE_CONF(Signal* signal) { - jamEntry(); -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma::execSUB_CREATE_CONF"); -#endif - - const Uint32 senderRef = signal->senderBlockRef(); - - SubCreateConf * const conf = (SubCreateConf *)signal->getDataPtr(); - - Subscription key; - const Uint32 subscriberData = conf->subscriberData; - key.m_subscriptionId = conf->subscriptionId; - key.m_subscriptionKey = conf->subscriptionKey; - - SubscriptionPtr subPtr; - ndbrequire(c_subscriptions.find(subPtr, key)); - - switch(subPtr.p->m_subscriptionType) { - case SubCreateReq::TableEvent: - case SubCreateReq::SelectiveTableSnapshot: - case SubCreateReq::DatabaseSnapshot: - { - Ptr syncPtr; - c_syncPool.getPtr(syncPtr, subPtr.p->m_syncPtrI); - - syncPtr.p->m_tableList.next(syncPtr.p->m_tableList_it); - if (syncPtr.p->m_tableList_it.isNull()) { - jam(); - SubSyncReq *req = (SubSyncReq *)signal->getDataPtrSend(); - - req->subscriptionId = key.m_subscriptionId; - req->subscriptionKey = key.m_subscriptionKey; - req->subscriberData = subscriberData; - req->part = (Uint32) SubscriptionData::MetaData; - - sendSignal(senderRef, GSN_SUB_SYNC_REQ, signal, - SubSyncReq::SignalLength, JBB); - } else { - jam(); - SubCreateReq * req = (SubCreateReq *)signal->getDataPtrSend(); - - req->subscriberRef = reference(); - req->subscriberData = subPtr.i; - req->subscriptionId = subPtr.p->m_subscriptionId; - req->subscriptionKey = subPtr.p->m_subscriptionKey; - req->subscriptionType = subPtr.p->m_subscriptionType | - SubCreateReq::RestartFlag | - SubCreateReq::AddTableFlag; - - req->tableId = *syncPtr.p->m_tableList_it.data; - - sendSignal(senderRef, GSN_SUB_CREATE_REQ, signal, - SubCreateReq::SignalLength+1 /*to get table Id*/, JBB); - } - } - return; - case SubCreateReq::SingleTableScan: - ndbrequire(false); - } - ndbrequire(false); -} - -void -Suma::execSUB_CREATE_REF(Signal* signal) { - jamEntry(); -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma::execSUB_CREATE_REF"); -#endif - //ndbrequire(false); -} - -void -Suma::execSUB_SYNC_CONF(Signal* signal) { - jamEntry(); -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma::execSUB_SYNC_CONF"); -#endif - Uint32 sumaRef = signal->getSendersBlockRef(); - - SubSyncConf *conf = (SubSyncConf *)signal->getDataPtr(); - Subscription key; - - key.m_subscriptionId = conf->subscriptionId; - key.m_subscriptionKey = conf->subscriptionKey; - // SubscriptionData::Part part = (SubscriptionData::Part)conf->part; - // const Uint32 subscriberData = conf->subscriberData; - - SubscriptionPtr subPtr; - c_subscriptions.find(subPtr, key); - - switch(subPtr.p->m_subscriptionType) { - case SubCreateReq::TableEvent: - case SubCreateReq::SelectiveTableSnapshot: - case SubCreateReq::DatabaseSnapshot: - jam(); - Restart.nextSubscription(signal, sumaRef); - return; - case SubCreateReq::SingleTableScan: - ndbrequire(false); - return; - } - ndbrequire(false); -} - -void -Suma::execSUB_SYNC_REF(Signal* signal) { - jamEntry(); -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma::execSUB_SYNC_REF"); -#endif - //ndbrequire(false); -} - -void -Suma::execSUMA_START_ME(Signal* signal) { - jamEntry(); -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma::execSUMA_START_ME"); -#endif - - Restart.runSUMA_START_ME(signal, signal->getSendersBlockRef()); -} - -void -Suma::Restart::runSUMA_START_ME(Signal* signal, Uint32 sumaRef) { - int I = suma.RtoI(sumaRef); - - // restarting Suma is ready for SUB_START_REQ - if (c_waitingToStart[I]) { - // we've waited with startSubscriber since restarting suma was not ready - c_waitingToStart[I] = false; - startSubscriber(signal, sumaRef); - } else { - // do startSubscriber as soon as its time - c_okToStart[I] = true; - } -} - -void -Suma::Restart::completeSubscription(Signal* signal, Uint32 sumaRef) { - jam(); - int I = suma.RtoI(sumaRef); - - if (c_okToStart[I]) {// otherwise will start when START_ME comes - c_okToStart[I] = false; - startSubscriber(signal, sumaRef); - } else { - c_waitingToStart[I] = true; - } -} - -void -Suma::Restart::startSubscriber(Signal* signal, Uint32 sumaRef) { - jam(); - suma.c_dataSubscribers.first(c_subbPtr); - nextSubscriber(signal, sumaRef); -} - -void -Suma::Restart::sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr, - Signal* signal, Uint32 sumaRef) -{ - jam(); - SubStartReq * req = (SubStartReq *)signal->getDataPtrSend(); - - req->senderRef = suma.reference(); - req->senderData = subbPtr.p->m_senderData; - req->subscriptionId = subPtr.p->m_subscriptionId; - req->subscriptionKey = subPtr.p->m_subscriptionKey; - req->part = SubscriptionData::TableData; - req->subscriberData = subbPtr.p->m_subscriberData; - req->subscriberRef = subbPtr.p->m_subscriberRef; - - // restarting suma will not respond to this until startphase 5 - // since it is not until then data copying has been completed -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma::Restart::sendSubStartReq sending GSN_SUB_START_REQ id=%u key=%u", - req->subscriptionId, req->subscriptionKey); -#endif - suma.sendSignal(sumaRef, GSN_SUB_START_REQ, - signal, SubStartReq::SignalLength2, JBB); -} - -void -Suma::execSUB_START_CONF(Signal* signal) { - jamEntry(); -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma::execSUB_START_CONF"); -#endif - Uint32 sumaRef = signal->getSendersBlockRef(); - Restart.nextSubscriber(signal, sumaRef); -} - -void -Suma::execSUB_START_REF(Signal* signal) { - jamEntry(); -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma::execSUB_START_REF"); -#endif - //ndbrequire(false); -} - -void -Suma::Restart::nextSubscriber(Signal* signal, Uint32 sumaRef) { - jam(); - if (c_subbPtr.isNull()) { - jam(); - completeSubscriber(signal, sumaRef); - return; - } - - SubscriberPtr subbPtr = c_subbPtr; - suma.c_dataSubscribers.next(c_subbPtr); - - /* - * get subscription ptr for this subscriber - */ - - SubscriptionPtr subPtr; - suma.c_subscriptions.getPtr(subPtr, subbPtr.p->m_subPtrI); - switch (subPtr.p->m_subscriptionType) { - case SubCreateReq::TableEvent: - case SubCreateReq::SelectiveTableSnapshot: - case SubCreateReq::DatabaseSnapshot: - { - jam(); - sendSubStartReq(subPtr, subbPtr, signal, sumaRef); -#if 0 - SubStartReq * req = (SubStartReq *)signal->getDataPtrSend(); - - req->senderRef = reference(); - req->senderData = subbPtr.p->m_senderData; - req->subscriptionId = subPtr.p->m_subscriptionId; - req->subscriptionKey = subPtr.p->m_subscriptionKey; - req->part = SubscriptionData::TableData; - req->subscriberData = subbPtr.p->m_subscriberData; - req->subscriberRef = subbPtr.p->m_subscriberRef; - - // restarting suma will not respond to this until startphase 5 - // since it is not until then data copying has been completed -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma::nextSubscriber sending GSN_SUB_START_REQ id=%u key=%u", - req->subscriptionId, req->subscriptionKey); -#endif - suma.sendSignal(sumaRef, GSN_SUB_START_REQ, - signal, SubStartReq::SignalLength2, JBB); -#endif - } - return; - case SubCreateReq::SingleTableScan: - ndbrequire(false); - return; - } - ndbrequire(false); -} - -void -Suma::Restart::completeSubscriber(Signal* signal, Uint32 sumaRef) { - completeRestartingNode(signal, sumaRef); -} - -void -Suma::Restart::completeRestartingNode(Signal* signal, Uint32 sumaRef) { - jam(); - SumaHandoverReq * req = (SumaHandoverReq *)signal->getDataPtrSend(); - - req->gci = suma.getFirstGCI(signal); - - suma.sendSignal(sumaRef, GSN_SUMA_HANDOVER_REQ, signal, - SumaHandoverReq::SignalLength, JBB); -} - -// only run on restarting suma - -void -Suma::execSUMA_HANDOVER_REQ(Signal* signal) -{ - jamEntry(); - // Uint32 sumaRef = signal->getSendersBlockRef(); - SumaHandoverReq const * req = (SumaHandoverReq *)signal->getDataPtr(); - - Uint32 gci = req->gci; - Uint32 new_gci = getFirstGCI(signal); - - if (new_gci > gci) { - gci = new_gci; - } - - { // all recreated subscribers at restarting SUMA start at same GCI - SubscriberPtr subbPtr; - for(c_dataSubscribers.first(subbPtr); - !subbPtr.isNull(); - c_dataSubscribers.next(subbPtr)){ - subbPtr.p->m_firstGCI = gci; - } - } - -#ifdef NODEFAIL_DEBUG - ndbout_c("Suma::execSUMA_HANDOVER_REQ, gci = %u", gci); -#endif - - c_handoverToDo = false; - c_restartLock = false; - { -#ifdef HANDOVER_DEBUG - int c = 0; -#endif - for( int i = 0; i < NO_OF_BUCKETS; i++) { - jam(); - if (getResponsibleSumaNodeId(i) == refToNode(reference())) { -#ifdef HANDOVER_DEBUG - c++; -#endif - jam(); - c_buckets[i].active = false; - c_buckets[i].handoverGCI = gci; - c_buckets[i].handover = true; - c_buckets[i].handover_started = false; - c_handoverToDo = true; - } - } -#ifdef HANDOVER_DEBUG - ndbout_c("prepared handover of bucket %u buckets", c); -#endif - } - - for (Uint32 i = 0; i < c_noNodesInGroup; i++) { - jam(); - Uint32 ref = calcSumaBlockRef(c_nodesInGroup[i]); - if (ref != reference()) { - jam(); - sendSignal(ref, GSN_SUMA_HANDOVER_CONF, signal, - SumaHandoverConf::SignalLength, JBB); - }//if - } -} - -// only run on all but restarting suma -void -Suma::execSUMA_HANDOVER_CONF(Signal* signal) { - jamEntry(); - Uint32 sumaRef = signal->getSendersBlockRef(); - SumaHandoverConf const * conf = (SumaHandoverConf *)signal->getDataPtr(); - - Uint32 gci = conf->gci; - -#ifdef HANDOVER_DEBUG - ndbout_c("Suma::execSUMA_HANDOVER_CONF, gci = %u", gci); -#endif - - /* TODO, if we are restarting several SUMA's (>2 in a nodegroup) - * we have to collect all these conf's before proceding - */ - - // restarting node is now prepared and ready - c_preparingNodes.clear(refToNode(sumaRef)); /* !! important to do before - * below since it affects - * getResponsibleSumaNodeId() - */ - - c_handoverToDo = false; - // mark all active buckets really belonging to restarting SUMA - for( int i = 0; i < NO_OF_BUCKETS; i++) { - if (c_buckets[i].active) { - // I'm running this bucket - if (getResponsibleSumaNodeId(i) == refToNode(sumaRef)) { - // but it should really be the restarted node - c_buckets[i].handoverGCI = gci; - c_buckets[i].handover = true; - c_buckets[i].handover_started = false; - c_handoverToDo = true; - } - } - } -} - template void append(DataBuffer<11>&,SegmentedSectionPtr,SectionSegmentPool&); diff --git a/ndb/src/kernel/blocks/suma/Suma.hpp b/ndb/src/kernel/blocks/suma/Suma.hpp index 3508c5b0e0f..5cf1c4d543f 100644 --- a/ndb/src/kernel/blocks/suma/Suma.hpp +++ b/ndb/src/kernel/blocks/suma/Suma.hpp @@ -76,14 +76,6 @@ protected: void execSUB_SYNC_CONTINUE_REF(Signal* signal); void execSUB_SYNC_CONTINUE_CONF(Signal* signal); - /** - * Trigger logging - */ - void execTRIG_ATTRINFO(Signal* signal); - void execFIRE_TRIG_ORD(Signal* signal); - void execSUB_GCP_COMPLETE_REP(Signal* signal); - void runSUB_GCP_COMPLETE_ACC(Signal* signal); - /** * DIH signals */ @@ -92,14 +84,6 @@ protected: void execDIGETPRIMREF(Signal* signal); void execDIGETPRIMCONF(Signal* signal); - /** - * Trigger administration - */ - void execCREATE_TRIG_REF(Signal* signal); - void execCREATE_TRIG_CONF(Signal* signal); - void execDROP_TRIG_REF(Signal* signal); - void execDROP_TRIG_CONF(Signal* signal); - /** * continueb */ @@ -189,22 +173,6 @@ public: void nextMeta(Signal*); void completeMeta(Signal*); - /** - * Create triggers - */ - Uint32 m_latestTriggerId; - void startTrigger(Signal* signal); - void nextTrigger(Signal* signal); - void completeTrigger(Signal* signal); - void createAttributeMask(AttributeMask&, Table*); - - /** - * Drop triggers - */ - void startDropTrigger(Signal* signal); - void nextDropTrigger(Signal* signal); - void completeDropTrigger(Signal* signal); - /** * Sync data */ @@ -229,18 +197,12 @@ public: suma.progError(line, cause, extra); } - void runLIST_TABLES_CONF(Signal* signal); void runGET_TABINFO_CONF(Signal* signal); void runGET_TABINFOREF(Signal* signal); void runDI_FCOUNTCONF(Signal* signal); void runDIGETPRIMCONF(Signal* signal); - void runCREATE_TRIG_CONF(Signal* signal); - void runDROP_TRIG_CONF(Signal* signal); - void runDROP_TRIG_REF(Signal* signal); - void runDropTrig(Signal* signal, Uint32 triggerId, Uint32 tableId); - Uint32 ptrI; union { Uint32 nextPool; Uint32 nextList; }; }; @@ -294,24 +256,11 @@ public: Uint32 m_subscriberRef; Uint32 m_subscriberData; Uint32 m_subPtrI; //reference to subscription - Uint32 m_firstGCI; // first GCI to send - Uint32 m_lastGCI; // last acnowledged GCI Uint32 nextList; union { Uint32 nextPool; Uint32 prevList; }; }; typedef Ptr SubscriberPtr; - struct Bucket { - bool active; - bool handover; - bool handover_started; - Uint32 handoverGCI; - }; -#define NO_OF_BUCKETS 24 - struct Bucket c_buckets[NO_OF_BUCKETS]; - bool c_handoverToDo; - Uint32 c_lastCompleteGCI; - /** * */ @@ -335,26 +284,9 @@ public: ArrayPool c_syncPool; DataBuffer<15>::DataBufferPool c_dataBufferPool; - /** - * for restarting Suma not to start sending data too early - */ - bool c_restartLock; - - /** - * for flagging that a GCI containg inconsistent data - * typically due to node failiure - */ - - Uint32 c_lastInconsistentGCI; - Uint32 c_nodeFailGCI; - - NodeBitmask c_failedApiNodes; - /** * Functions */ - bool removeSubscribersOnNode(Signal *signal, Uint32 nodeId); - bool parseTable(Signal* signal, class GetTabInfoConf* conf, Uint32 tableId, SyncRecord* syncPtr_p); bool checkTableTriggers(SegmentedSectionPtr ptr); @@ -365,52 +297,11 @@ public: void sendSubIdRef(Signal* signal, Uint32 errorCode); void sendSubCreateConf(Signal* signal, Uint32 sender, SubscriptionPtr subPtr); void sendSubCreateRef(Signal* signal, const SubCreateReq& req, Uint32 errorCode); - void sendSubStartRef(SubscriptionPtr subPtr, Signal* signal, - Uint32 errorCode, bool temporary = false); - void sendSubStartRef(Signal* signal, - Uint32 errorCode, bool temporary = false); - void sendSubStopRef(Signal* signal, - Uint32 errorCode, bool temporary = false); void sendSubSyncRef(Signal* signal, Uint32 errorCode); void sendSubRemoveRef(Signal* signal, const SubRemoveReq& ref, Uint32 errorCode, bool temporary = false); - void sendSubStartComplete(Signal*, SubscriberPtr, Uint32, - SubscriptionData::Part); - void sendSubStopComplete(Signal*, SubscriberPtr); - void sendSubStopReq(Signal* signal, bool unlock= false); - void completeSubRemoveReq(Signal* signal, SubscriptionPtr subPtr); - Uint32 getFirstGCI(Signal* signal); - Uint32 decideWhoToSend(Uint32 nBucket, Uint32 gci); - - virtual Uint32 getStoreBucket(Uint32 v) = 0; - virtual Uint32 getResponsibleSumaNodeId(Uint32 D) = 0; - virtual Uint32 RtoI(Uint32 sumaRef, bool dieOnNotFound = true) = 0; - - struct FailoverBuffer { - // FailoverBuffer(DataBuffer<15>::DataBufferPool & p); - FailoverBuffer(); - - bool subTableData(Uint32 gci, Uint32 *src, int sz); - bool subGcpCompleteRep(Uint32 gci); - bool nodeFailRep(); - - // typedef DataBuffer<15> GCIDataBuffer; - // GCIDataBuffer m_GCIDataBuffer; - // GCIDataBuffer::DataBufferIterator m_GCIDataBuffer_it; - - Uint32 *c_gcis; - int c_sz; - - // Uint32 *c_buf; - // int c_buf_sz; - - int c_first; - int c_next; - bool c_full; - } c_failoverBuffer; - /** * Table admin */ @@ -441,8 +332,6 @@ private: * Framework signals */ - void getNodeGroupMembers(Signal* signal); - void execREAD_CONFIG_REQ(Signal* signal); void execSTTOR(Signal* signal); @@ -454,35 +343,13 @@ private: void execINCL_NODEREQ(Signal* signal); void execCONTINUEB(Signal* signal); void execSIGNAL_DROPPED_REP(Signal* signal); - void execAPI_FAILREQ(Signal* signal) ; - - void execSUB_GCP_COMPLETE_ACC(Signal* signal); /** * Controller interface */ - void execSUB_CREATE_REF(Signal* signal); - void execSUB_CREATE_CONF(Signal* signal); - - void execSUB_DROP_REF(Signal* signal); - void execSUB_DROP_CONF(Signal* signal); - - void execSUB_START_REF(Signal* signal); - void execSUB_START_CONF(Signal* signal); - - void execSUB_STOP_REF(Signal* signal); - void execSUB_STOP_CONF(Signal* signal); - - void execSUB_SYNC_REF(Signal* signal); - void execSUB_SYNC_CONF(Signal* signal); - void execSUB_ABORT_SYNC_REF(Signal* signal); void execSUB_ABORT_SYNC_CONF(Signal* signal); - void execSUMA_START_ME(Signal* signal); - void execSUMA_HANDOVER_REQ(Signal* signal); - void execSUMA_HANDOVER_CONF(Signal* signal); - /** * Subscription generation interface */ @@ -494,49 +361,6 @@ private: void execUTIL_SEQUENCE_REF(Signal* signal); void execCREATE_SUBID_REQ(Signal* signal); - Uint32 getStoreBucket(Uint32 v); - Uint32 getResponsibleSumaNodeId(Uint32 D); - - /** - * for Suma that is restarting another - */ - - struct Restart { - Restart(Suma& s); - - Suma & suma; - - bool c_okToStart[MAX_REPLICAS]; - bool c_waitingToStart[MAX_REPLICAS]; - - DLHashTable::Iterator c_subPtr; // TODO [MAX_REPLICAS] - SubscriberPtr c_subbPtr; // TODO [MAX_REPLICAS] - - void progError(int line, int cause, const char * extra) { - suma.progError(line, cause, extra); - } - - void resetNode(Uint32 sumaRef); - void runSUMA_START_ME(Signal*, Uint32 sumaRef); - void startNode(Signal*, Uint32 sumaRef); - - void createSubscription(Signal* signal, Uint32 sumaRef); - void nextSubscription(Signal* signal, Uint32 sumaRef); - void completeSubscription(Signal* signal, Uint32 sumaRef); - - void startSync(Signal* signal, Uint32 sumaRef); - void nextSync(Signal* signal, Uint32 sumaRef); - void completeSync(Signal* signal, Uint32 sumaRef); - - void sendSubStartReq(SubscriptionPtr subPtr, SubscriberPtr subbPtr, - Signal* signal, Uint32 sumaRef); - void startSubscriber(Signal* signal, Uint32 sumaRef); - void nextSubscriber(Signal* signal, Uint32 sumaRef); - void completeSubscriber(Signal* signal, Uint32 sumaRef); - - void completeRestartingNode(Signal* signal, Uint32 sumaRef); - } Restart; - private: friend class Restart; struct SubCoordinator { @@ -590,14 +414,4 @@ private: DLList c_runningSubscriptions; }; -inline Uint32 -Suma::RtoI(Uint32 sumaRef, bool dieOnNotFound) { - for (Uint32 i = 0; i < c_noNodesInGroup; i++) { - if (sumaRef == calcSumaBlockRef(c_nodesInGroup[i])) - return i; - } - ndbrequire(!dieOnNotFound); - return RNIL; -} - #endif diff --git a/ndb/src/kernel/blocks/suma/SumaInit.cpp b/ndb/src/kernel/blocks/suma/SumaInit.cpp index ad8493ff908..ae7425da4bf 100644 --- a/ndb/src/kernel/blocks/suma/SumaInit.cpp +++ b/ndb/src/kernel/blocks/suma/SumaInit.cpp @@ -35,19 +35,11 @@ SumaParticipant::SumaParticipant(const Configuration & conf) : */ addRecSignal(GSN_SUB_CREATE_REQ, &SumaParticipant::execSUB_CREATE_REQ); addRecSignal(GSN_SUB_REMOVE_REQ, &SumaParticipant::execSUB_REMOVE_REQ); - addRecSignal(GSN_SUB_START_REQ, &SumaParticipant::execSUB_START_REQ); - addRecSignal(GSN_SUB_STOP_REQ, &SumaParticipant::execSUB_STOP_REQ); addRecSignal(GSN_SUB_SYNC_REQ, &SumaParticipant::execSUB_SYNC_REQ); - addRecSignal(GSN_SUB_STOP_CONF, &SumaParticipant::execSUB_STOP_CONF); - addRecSignal(GSN_SUB_STOP_REF, &SumaParticipant::execSUB_STOP_REF); - /** * Dict interface */ - //addRecSignal(GSN_LIST_TABLES_REF, &SumaParticipant::execLIST_TABLES_REF); - addRecSignal(GSN_LIST_TABLES_CONF, &SumaParticipant::execLIST_TABLES_CONF); - //addRecSignal(GSN_GET_TABINFOREF, &SumaParticipant::execGET_TABINFO_REF); addRecSignal(GSN_GET_TABINFO_CONF, &SumaParticipant::execGET_TABINFO_CONF); addRecSignal(GSN_GET_TABINFOREF, &SumaParticipant::execGET_TABINFOREF); #if 0 @@ -76,32 +68,6 @@ SumaParticipant::SumaParticipant(const Configuration & conf) : addRecSignal(GSN_SUB_SYNC_CONTINUE_CONF, &SumaParticipant::execSUB_SYNC_CONTINUE_CONF); - /** - * Trigger stuff - */ - addRecSignal(GSN_TRIG_ATTRINFO, &SumaParticipant::execTRIG_ATTRINFO); - addRecSignal(GSN_FIRE_TRIG_ORD, &SumaParticipant::execFIRE_TRIG_ORD); - - addRecSignal(GSN_CREATE_TRIG_REF, &Suma::execCREATE_TRIG_REF); - addRecSignal(GSN_CREATE_TRIG_CONF, &Suma::execCREATE_TRIG_CONF); - addRecSignal(GSN_DROP_TRIG_REF, &Suma::execDROP_TRIG_REF); - addRecSignal(GSN_DROP_TRIG_CONF, &Suma::execDROP_TRIG_CONF); - - addRecSignal(GSN_SUB_GCP_COMPLETE_REP, - &SumaParticipant::execSUB_GCP_COMPLETE_REP); - - for( int i = 0; i < NO_OF_BUCKETS; i++) { - c_buckets[i].active = false; - c_buckets[i].handover = false; - c_buckets[i].handover_started = false; - c_buckets[i].handoverGCI = 0; - } - c_handoverToDo = false; - c_lastInconsistentGCI = RNIL; - c_lastCompleteGCI = RNIL; - c_nodeFailGCI = 0; - - c_failedApiNodes.clear(); } SumaParticipant::~SumaParticipant() @@ -110,7 +76,6 @@ SumaParticipant::~SumaParticipant() Suma::Suma(const Configuration & conf) : SumaParticipant(conf), - Restart(*this), c_nodes(c_nodePool), c_runningSubscriptions(c_subCoordinatorPool) { @@ -120,29 +85,12 @@ Suma::Suma(const Configuration & conf) : addRecSignal(GSN_NDB_STTOR, &Suma::execNDB_STTOR); addRecSignal(GSN_DUMP_STATE_ORD, &Suma::execDUMP_STATE_ORD); addRecSignal(GSN_READ_NODESCONF, &Suma::execREAD_NODESCONF); - addRecSignal(GSN_API_FAILREQ, &Suma::execAPI_FAILREQ); - addRecSignal(GSN_NODE_FAILREP, &Suma::execNODE_FAILREP); - addRecSignal(GSN_INCL_NODEREQ, &Suma::execINCL_NODEREQ); addRecSignal(GSN_CONTINUEB, &Suma::execCONTINUEB); addRecSignal(GSN_SIGNAL_DROPPED_REP, &Suma::execSIGNAL_DROPPED_REP, true); addRecSignal(GSN_UTIL_SEQUENCE_CONF, &Suma::execUTIL_SEQUENCE_CONF); addRecSignal(GSN_UTIL_SEQUENCE_REF, &Suma::execUTIL_SEQUENCE_REF); addRecSignal(GSN_CREATE_SUBID_REQ, &Suma::execCREATE_SUBID_REQ); - - addRecSignal(GSN_SUB_CREATE_CONF, &Suma::execSUB_CREATE_CONF); - addRecSignal(GSN_SUB_CREATE_REF, &Suma::execSUB_CREATE_REF); - addRecSignal(GSN_SUB_SYNC_CONF, &Suma::execSUB_SYNC_CONF); - addRecSignal(GSN_SUB_SYNC_REF, &Suma::execSUB_SYNC_REF); - addRecSignal(GSN_SUB_START_CONF, &Suma::execSUB_START_CONF); - addRecSignal(GSN_SUB_START_REF, &Suma::execSUB_START_REF); - - addRecSignal(GSN_SUMA_START_ME, &Suma::execSUMA_START_ME); - addRecSignal(GSN_SUMA_HANDOVER_REQ, &Suma::execSUMA_HANDOVER_REQ); - addRecSignal(GSN_SUMA_HANDOVER_CONF, &Suma::execSUMA_HANDOVER_CONF); - - addRecSignal(GSN_SUB_GCP_COMPLETE_ACC, - &Suma::execSUB_GCP_COMPLETE_ACC); } Suma::~Suma() diff --git a/ndb/src/ndbapi/Makefile.am b/ndb/src/ndbapi/Makefile.am index 99b75ffbd53..522e78dd6e0 100644 --- a/ndb/src/ndbapi/Makefile.am +++ b/ndb/src/ndbapi/Makefile.am @@ -24,8 +24,6 @@ libndbapi_la_SOURCES = \ NdbOperationExec.cpp \ NdbScanOperation.cpp NdbScanFilter.cpp \ NdbIndexOperation.cpp \ - NdbEventOperation.cpp \ - NdbEventOperationImpl.cpp \ NdbApiSignal.cpp \ NdbRecAttr.cpp \ NdbUtil.cpp \ diff --git a/ndb/src/ndbapi/Ndb.cpp b/ndb/src/ndbapi/Ndb.cpp index 56d68503825..9d1c78a5972 100644 --- a/ndb/src/ndbapi/Ndb.cpp +++ b/ndb/src/ndbapi/Ndb.cpp @@ -28,7 +28,6 @@ Name: Ndb.cpp #include "NdbImpl.hpp" #include #include -#include #include #include #include @@ -1300,51 +1299,6 @@ Ndb::getSchemaFromInternalName(const char * internalName) return ret; } -NdbEventOperation* Ndb::createEventOperation(const char* eventName, - const int bufferLength) -{ - NdbEventOperation* tOp; - - tOp = new NdbEventOperation(this, eventName, bufferLength); - - if (tOp == 0) - { - theError.code= 4000; - return NULL; - } - - if (tOp->getState() != NdbEventOperation::EO_CREATED) { - theError.code= tOp->getNdbError().code; - delete tOp; - tOp = NULL; - } - - //now we have to look up this event in dict - - return tOp; -} - -int Ndb::dropEventOperation(NdbEventOperation* op) { - delete op; - return 0; -} - -NdbGlobalEventBufferHandle* Ndb::getGlobalEventBufferHandle() -{ - return theGlobalEventBufferHandle; -} - -//void Ndb::monitorEvent(NdbEventOperation *op, NdbEventCallback cb, void* rs) -//{ -//} - -int -Ndb::pollEvents(int aMillisecondNumber) -{ - return NdbEventOperation::wait(theGlobalEventBufferHandle, - aMillisecondNumber); -} - #ifdef VM_TRACE #include extern NdbMutex *ndb_print_state_mutex; diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index a0a3dd431b8..6c721b76ba0 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -610,132 +610,6 @@ NdbDictionary::Index::getObjectVersion() const { return m_impl.m_version; } -/***************************************************************** - * Event facade - */ -NdbDictionary::Event::Event(const char * name) - : m_impl(* new NdbEventImpl(* this)) -{ - setName(name); -} - -NdbDictionary::Event::Event(const char * name, const Table& table) - : m_impl(* new NdbEventImpl(* this)) -{ - setName(name); - setTable(table); -} - -NdbDictionary::Event::Event(NdbEventImpl & impl) - : m_impl(impl) -{ -} - -NdbDictionary::Event::~Event() -{ - NdbEventImpl * tmp = &m_impl; - if(this != tmp){ - delete tmp; - } -} - -void -NdbDictionary::Event::setName(const char * name) -{ - m_impl.setName(name); -} - -const char * -NdbDictionary::Event::getName() const -{ - return m_impl.getName(); -} - -void -NdbDictionary::Event::setTable(const Table& table) -{ - m_impl.setTable(table); -} - -void -NdbDictionary::Event::setTable(const char * table) -{ - m_impl.setTable(table); -} - -const char* -NdbDictionary::Event::getTableName() const -{ - return m_impl.getTableName(); -} - -void -NdbDictionary::Event::addTableEvent(const TableEvent t) -{ - m_impl.addTableEvent(t); -} - -void -NdbDictionary::Event::setDurability(EventDurability d) -{ - m_impl.setDurability(d); -} - -NdbDictionary::Event::EventDurability -NdbDictionary::Event::getDurability() const -{ - return m_impl.getDurability(); -} - -void -NdbDictionary::Event::addColumn(const Column & c){ - NdbColumnImpl* col = new NdbColumnImpl; - (* col) = NdbColumnImpl::getImpl(c); - m_impl.m_columns.push_back(col); -} - -void -NdbDictionary::Event::addEventColumn(unsigned attrId) -{ - m_impl.m_attrIds.push_back(attrId); -} - -void -NdbDictionary::Event::addEventColumn(const char * name) -{ - const Column c(name); - addColumn(c); -} - -void -NdbDictionary::Event::addEventColumns(int n, const char ** names) -{ - for (int i = 0; i < n; i++) - addEventColumn(names[i]); -} - -int NdbDictionary::Event::getNoOfEventColumns() const -{ - return m_impl.getNoOfEventColumns(); -} - -NdbDictionary::Object::Status -NdbDictionary::Event::getObjectStatus() const -{ - return m_impl.m_status; -} - -int -NdbDictionary::Event::getObjectVersion() const -{ - return m_impl.m_version; -} - -void NdbDictionary::Event::print() -{ - m_impl.print(); -} - /***************************************************************** * Dictionary facade */ @@ -885,28 +759,6 @@ NdbDictionary::Dictionary::getIndexTable(const char * indexName, return 0; } - -int -NdbDictionary::Dictionary::createEvent(const Event & ev) -{ - return m_impl.createEvent(NdbEventImpl::getImpl(ev)); -} - -int -NdbDictionary::Dictionary::dropEvent(const char * eventName) -{ - return m_impl.dropEvent(eventName); -} - -const NdbDictionary::Event * -NdbDictionary::Dictionary::getEvent(const char * eventName) -{ - NdbEventImpl * t = m_impl.getEvent(eventName); - if(t) - return t->m_facade; - return 0; -} - int NdbDictionary::Dictionary::listObjects(List& list, Object::Type type) { diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index ce348b616c9..b91df24d8d7 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -32,8 +32,6 @@ #include #include #include -#include -#include "NdbEventOperationImpl.hpp" #include #include "NdbBlobImpl.hpp" #include @@ -585,99 +583,6 @@ NdbIndexImpl::getIndexTable() const return m_table; } -/** - * NdbEventImpl - */ - -NdbEventImpl::NdbEventImpl() : - NdbDictionary::Event(* this), - m_facade(this) -{ - init(); -} - -NdbEventImpl::NdbEventImpl(NdbDictionary::Event & f) : - NdbDictionary::Event(* this), - m_facade(&f) -{ - init(); -} - -void NdbEventImpl::init() -{ - m_eventId= RNIL; - m_eventKey= RNIL; - m_tableId= RNIL; - mi_type= 0; - m_dur= NdbDictionary::Event::ED_UNDEFINED; - m_tableImpl= NULL; - m_bufferId= RNIL; - eventOp= NULL; -} - -NdbEventImpl::~NdbEventImpl() -{ - for (unsigned i = 0; i < m_columns.size(); i++) - delete m_columns[i]; -} - -void NdbEventImpl::setName(const char * name) -{ - m_externalName.assign(name); -} - -const char *NdbEventImpl::getName() const -{ - return m_externalName.c_str(); -} - -void -NdbEventImpl::setTable(const NdbDictionary::Table& table) -{ - m_tableImpl= &NdbTableImpl::getImpl(table); - m_tableName.assign(m_tableImpl->getName()); -} - -void -NdbEventImpl::setTable(const char * table) -{ - m_tableName.assign(table); -} - -const char * -NdbEventImpl::getTableName() const -{ - return m_tableName.c_str(); -} - -void -NdbEventImpl::addTableEvent(const NdbDictionary::Event::TableEvent t = NdbDictionary::Event::TE_ALL) -{ - switch (t) { - case NdbDictionary::Event::TE_INSERT : mi_type |= 1; break; - case NdbDictionary::Event::TE_DELETE : mi_type |= 2; break; - case NdbDictionary::Event::TE_UPDATE : mi_type |= 4; break; - default: mi_type = 4 | 2 | 1; // all types - } -} - -void -NdbEventImpl::setDurability(NdbDictionary::Event::EventDurability d) -{ - m_dur = d; -} - -NdbDictionary::Event::EventDurability -NdbEventImpl::getDurability() const -{ - return m_dur; -} - -int NdbEventImpl::getNoOfEventColumns() const -{ - return m_attrIds.size() + m_columns.size(); -} - /** * NdbDictionaryImpl */ @@ -901,36 +806,6 @@ NdbDictInterface::execSignal(void* dictImpl, case GSN_DROP_INDX_CONF: tmp->execDROP_INDX_CONF(signal, ptr); break; - case GSN_CREATE_EVNT_REF: - tmp->execCREATE_EVNT_REF(signal, ptr); - break; - case GSN_CREATE_EVNT_CONF: - tmp->execCREATE_EVNT_CONF(signal, ptr); - break; - case GSN_SUB_START_CONF: - tmp->execSUB_START_CONF(signal, ptr); - break; - case GSN_SUB_START_REF: - tmp->execSUB_START_REF(signal, ptr); - break; - case GSN_SUB_TABLE_DATA: - tmp->execSUB_TABLE_DATA(signal, ptr); - break; - case GSN_SUB_GCP_COMPLETE_REP: - tmp->execSUB_GCP_COMPLETE_REP(signal, ptr); - break; - case GSN_SUB_STOP_CONF: - tmp->execSUB_STOP_CONF(signal, ptr); - break; - case GSN_SUB_STOP_REF: - tmp->execSUB_STOP_REF(signal, ptr); - break; - case GSN_DROP_EVNT_REF: - tmp->execDROP_EVNT_REF(signal, ptr); - break; - case GSN_DROP_EVNT_CONF: - tmp->execDROP_EVNT_CONF(signal, ptr); - break; case GSN_LIST_TABLES_CONF: tmp->execLIST_TABLES_CONF(signal, ptr); break; @@ -2384,616 +2259,6 @@ NdbDictInterface::execDROP_INDX_REF(NdbApiSignal * signal, m_waiter.signal(NO_WAIT); } -/***************************************************************** - * Create event - */ - -int -NdbDictionaryImpl::createEvent(NdbEventImpl & evnt) -{ - int i; - NdbTableImpl* tab = getTable(evnt.getTableName()); - - if(tab == 0){ -#ifdef EVENT_DEBUG - ndbout_c("NdbDictionaryImpl::createEvent: table not found: %s", - evnt.getTableName()); -#endif - return -1; - } - - evnt.m_tableId = tab->m_tableId; - evnt.m_tableImpl = tab; -#ifdef EVENT_DEBUG - ndbout_c("Event on tableId=%d", evnt.m_tableId); -#endif - - NdbTableImpl &table = *evnt.m_tableImpl; - - - int attributeList_sz = evnt.m_attrIds.size(); - - for (i = 0; i < attributeList_sz; i++) { - NdbColumnImpl *col_impl = table.getColumn(evnt.m_attrIds[i]); - if (col_impl) { - evnt.m_facade->addColumn(*(col_impl->m_facade)); - } else { - ndbout_c("Attr id %u in table %s not found", evnt.m_attrIds[i], - evnt.getTableName()); - m_error.code= 4713; - return -1; - } - } - - evnt.m_attrIds.clear(); - - attributeList_sz = evnt.m_columns.size(); -#ifdef EVENT_DEBUG - ndbout_c("creating event %s", evnt.m_externalName.c_str()); - ndbout_c("no of columns %d", evnt.m_columns.size()); -#endif - int pk_count = 0; - evnt.m_attrListBitmask.clear(); - - for(i = 0; im_name.c_str()); - if(col == 0){ - m_error.code= 4247; - return -1; - } - // Copy column definition - *evnt.m_columns[i] = *col; - - if(col->m_pk){ - pk_count++; - } - - evnt.m_attrListBitmask.set(col->m_attrId); - } - - // Sort index attributes according to primary table (using insertion sort) - for(i = 1; i < attributeList_sz; i++) { - NdbColumnImpl* temp = evnt.m_columns[i]; - unsigned int j = i; - while((j > 0) && (evnt.m_columns[j - 1]->m_attrId > temp->m_attrId)) { - evnt.m_columns[j] = evnt.m_columns[j - 1]; - j--; - } - evnt.m_columns[j] = temp; - } - // Check for illegal duplicate attributes - for(i = 1; im_attrId == evnt.m_columns[i]->m_attrId) { - m_error.code= 4258; - return -1; - } - } - -#ifdef EVENT_DEBUG - char buf[128] = {0}; - evnt.m_attrListBitmask.getText(buf); - ndbout_c("createEvent: mask = %s", buf); -#endif - - // NdbDictInterface m_receiver; - return m_receiver.createEvent(m_ndb, evnt, 0 /* getFlag unset */); -} - -int -NdbDictInterface::createEvent(class Ndb & ndb, - NdbEventImpl & evnt, - int getFlag) -{ - NdbApiSignal tSignal(m_reference); - tSignal.theReceiversBlockNumber = DBDICT; - tSignal.theVerId_signalNumber = GSN_CREATE_EVNT_REQ; - if (getFlag) - tSignal.theLength = CreateEvntReq::SignalLengthGet; - else - tSignal.theLength = CreateEvntReq::SignalLengthCreate; - - CreateEvntReq * const req = CAST_PTR(CreateEvntReq, tSignal.getDataPtrSend()); - - req->setUserRef(m_reference); - req->setUserData(0); - - if (getFlag) { - // getting event from Dictionary - req->setRequestType(CreateEvntReq::RT_USER_GET); - } else { - // creating event in Dictionary - req->setRequestType(CreateEvntReq::RT_USER_CREATE); - req->setTableId(evnt.m_tableId); - req->setAttrListBitmask(evnt.m_attrListBitmask); - req->setEventType(evnt.mi_type); - } - - UtilBufferWriter w(m_buffer); - - const size_t len = strlen(evnt.m_externalName.c_str()) + 1; - if(len > MAX_TAB_NAME_SIZE) { - m_error.code= 4241; - return -1; - } - - w.add(SimpleProperties::StringValue, evnt.m_externalName.c_str()); - - if (getFlag == 0) - { - const BaseString internal_tabname( - ndb.internalize_table_name(evnt.m_tableName.c_str())); - w.add(SimpleProperties::StringValue, - internal_tabname.c_str()); - } - - LinearSectionPtr ptr[1]; - ptr[0].p = (Uint32*)m_buffer.get_data(); - ptr[0].sz = (m_buffer.length()+3) >> 2; - - int ret = createEvent(&tSignal, ptr, 1); - - if (ret) { - return ret; - } - - char *dataPtr = (char *)m_buffer.get_data(); - unsigned int lenCreateEvntConf = *((unsigned int *)dataPtr); - dataPtr += sizeof(lenCreateEvntConf); - CreateEvntConf const * evntConf = (CreateEvntConf *)dataPtr; - dataPtr += lenCreateEvntConf; - - // NdbEventImpl *evntImpl = (NdbEventImpl *)evntConf->getUserData(); - - if (getFlag) { - evnt.m_tableId = evntConf->getTableId(); - evnt.m_attrListBitmask = evntConf->getAttrListBitmask(); - evnt.mi_type = evntConf->getEventType(); - evnt.setTable(dataPtr); - } else { - if (evnt.m_tableId != evntConf->getTableId() || - //evnt.m_attrListBitmask != evntConf->getAttrListBitmask() || - evnt.mi_type != evntConf->getEventType()) { - ndbout_c("ERROR*************"); - return 1; - } - } - - evnt.m_eventId = evntConf->getEventId(); - evnt.m_eventKey = evntConf->getEventKey(); - - return ret; -} - -int -NdbDictInterface::createEvent(NdbApiSignal* signal, - LinearSectionPtr ptr[3], int noLSP) -{ - const int noErrCodes = 1; - int errCodes[noErrCodes] = {CreateEvntRef::Busy}; - return dictSignal(signal,ptr,noLSP, - 1 /*use masternode id*/, - 100, - WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/, - -1, - errCodes,noErrCodes, CreateEvntRef::Temporary); -} - -int -NdbDictionaryImpl::executeSubscribeEvent(NdbEventImpl & ev) -{ - // NdbDictInterface m_receiver; - return m_receiver.executeSubscribeEvent(m_ndb, ev); -} - -int -NdbDictInterface::executeSubscribeEvent(class Ndb & ndb, - NdbEventImpl & evnt) -{ - DBUG_ENTER("NdbDictInterface::executeSubscribeEvent"); - NdbApiSignal tSignal(m_reference); - // tSignal.theReceiversBlockNumber = SUMA; - tSignal.theReceiversBlockNumber = DBDICT; - tSignal.theVerId_signalNumber = GSN_SUB_START_REQ; - tSignal.theLength = SubStartReq::SignalLength2; - - SubStartReq * sumaStart = CAST_PTR(SubStartReq, tSignal.getDataPtrSend()); - - sumaStart->subscriptionId = evnt.m_eventId; - sumaStart->subscriptionKey = evnt.m_eventKey; - sumaStart->part = SubscriptionData::TableData; - sumaStart->subscriberData = evnt.m_bufferId & 0xFF; - sumaStart->subscriberRef = m_reference; - - DBUG_RETURN(executeSubscribeEvent(&tSignal, NULL)); -} - -int -NdbDictInterface::executeSubscribeEvent(NdbApiSignal* signal, - LinearSectionPtr ptr[3]) -{ - return dictSignal(signal,NULL,0, - 1 /*use masternode id*/, - 100, - WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/, - -1, - NULL,0); -} - -int -NdbDictionaryImpl::stopSubscribeEvent(NdbEventImpl & ev) -{ - // NdbDictInterface m_receiver; - return m_receiver.stopSubscribeEvent(m_ndb, ev); -} - -int -NdbDictInterface::stopSubscribeEvent(class Ndb & ndb, - NdbEventImpl & evnt) -{ - DBUG_ENTER("NdbDictInterface::stopSubscribeEvent"); - - NdbApiSignal tSignal(m_reference); - // tSignal.theReceiversBlockNumber = SUMA; - tSignal.theReceiversBlockNumber = DBDICT; - tSignal.theVerId_signalNumber = GSN_SUB_STOP_REQ; - tSignal.theLength = SubStopReq::SignalLength; - - SubStopReq * sumaStop = CAST_PTR(SubStopReq, tSignal.getDataPtrSend()); - - sumaStop->subscriptionId = evnt.m_eventId; - sumaStop->subscriptionKey = evnt.m_eventKey; - sumaStop->subscriberData = evnt.m_bufferId & 0xFF; - sumaStop->part = (Uint32) SubscriptionData::TableData; - sumaStop->subscriberRef = m_reference; - - DBUG_RETURN(stopSubscribeEvent(&tSignal, NULL)); -} - -int -NdbDictInterface::stopSubscribeEvent(NdbApiSignal* signal, - LinearSectionPtr ptr[3]) -{ - return dictSignal(signal,NULL,0, - 1 /*use masternode id*/, - 100, - WAIT_CREATE_INDX_REQ /*WAIT_SUB_STOP__REQ*/, - -1, - NULL,0); -} - -NdbEventImpl * -NdbDictionaryImpl::getEvent(const char * eventName) -{ - NdbEventImpl *ev = new NdbEventImpl(); - - if (ev == NULL) { - return NULL; - } - - ev->setName(eventName); - - int ret = m_receiver.createEvent(m_ndb, *ev, 1 /* getFlag set */); - - if (ret) { - delete ev; - return NULL; - } - - // We only have the table name with internal name - ev->setTable(m_ndb.externalizeTableName(ev->getTableName())); - ev->m_tableImpl = getTable(ev->getTableName()); - - // get the columns from the attrListBitmask - - NdbTableImpl &table = *ev->m_tableImpl; - AttributeMask & mask = ev->m_attrListBitmask; - int attributeList_sz = mask.count(); - int id = -1; - -#ifdef EVENT_DEBUG - ndbout_c("NdbDictionaryImpl::getEvent attributeList_sz = %d", - attributeList_sz); - char buf[128] = {0}; - mask.getText(buf); - ndbout_c("mask = %s", buf); -#endif - - for(int i = 0; i < attributeList_sz; i++) { - id++; while (!mask.get(id)) id++; - - const NdbColumnImpl* col = table.getColumn(id); - if(col == 0) { -#ifdef EVENT_DEBUG - ndbout_c("NdbDictionaryImpl::getEvent could not find column id %d", id); -#endif - m_error.code= 4247; - delete ev; - return NULL; - } - NdbColumnImpl* new_col = new NdbColumnImpl; - // Copy column definition - *new_col = *col; - - ev->m_columns.push_back(new_col); - } - - return ev; -} - -void -NdbDictInterface::execCREATE_EVNT_CONF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) -{ - DBUG_ENTER("NdbDictInterface::execCREATE_EVNT_CONF"); - - m_buffer.clear(); - unsigned int len = signal->getLength() << 2; - m_buffer.append((char *)&len, sizeof(len)); - m_buffer.append(signal->getDataPtr(), len); - - if (signal->m_noOfSections > 0) { - m_buffer.append((char *)ptr[0].p, strlen((char *)ptr[0].p)+1); - } - - const CreateEvntConf * const createEvntConf= - CAST_CONSTPTR(CreateEvntConf, signal->getDataPtr()); - - Uint32 subscriptionId = createEvntConf->getEventId(); - Uint32 subscriptionKey = createEvntConf->getEventKey(); - - DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d", - subscriptionId,subscriptionKey)); - m_waiter.signal(NO_WAIT); - DBUG_VOID_RETURN; -} - -void -NdbDictInterface::execCREATE_EVNT_REF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) -{ - DBUG_ENTER("NdbDictInterface::execCREATE_EVNT_REF"); - - const CreateEvntRef* const ref= - CAST_CONSTPTR(CreateEvntRef, signal->getDataPtr()); - m_error.code= ref->getErrorCode(); - DBUG_PRINT("error",("error=%d,line=%d,node=%d",ref->getErrorCode(), - ref->getErrorLine(),ref->getErrorNode())); - m_waiter.signal(NO_WAIT); - DBUG_VOID_RETURN; -} - -void -NdbDictInterface::execSUB_STOP_CONF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) -{ - DBUG_ENTER("NdbDictInterface::execSUB_STOP_CONF"); - const SubStopConf * const subStopConf= - CAST_CONSTPTR(SubStopConf, signal->getDataPtr()); - - Uint32 subscriptionId = subStopConf->subscriptionId; - Uint32 subscriptionKey = subStopConf->subscriptionKey; - Uint32 subscriberData = subStopConf->subscriberData; - - DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d", - subscriptionId,subscriptionKey,subscriberData)); - m_waiter.signal(NO_WAIT); - DBUG_VOID_RETURN; -} - -void -NdbDictInterface::execSUB_STOP_REF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) -{ - DBUG_ENTER("NdbDictInterface::execSUB_STOP_REF"); - const SubStopRef * const subStopRef= - CAST_CONSTPTR(SubStopRef, signal->getDataPtr()); - - Uint32 subscriptionId = subStopRef->subscriptionId; - Uint32 subscriptionKey = subStopRef->subscriptionKey; - Uint32 subscriberData = subStopRef->subscriberData; - m_error.code= subStopRef->errorCode; - - DBUG_PRINT("error",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d,error=%d", - subscriptionId,subscriptionKey,subscriberData,m_error.code)); - m_waiter.signal(NO_WAIT); - DBUG_VOID_RETURN; -} - -void -NdbDictInterface::execSUB_START_CONF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) -{ - DBUG_ENTER("NdbDictInterface::execSUB_START_CONF"); - const SubStartConf * const subStartConf= - CAST_CONSTPTR(SubStartConf, signal->getDataPtr()); - - Uint32 subscriptionId = subStartConf->subscriptionId; - Uint32 subscriptionKey = subStartConf->subscriptionKey; - SubscriptionData::Part part = - (SubscriptionData::Part)subStartConf->part; - Uint32 subscriberData = subStartConf->subscriberData; - - switch(part) { - case SubscriptionData::MetaData: { - DBUG_PRINT("error",("SubscriptionData::MetaData")); - m_error.code= 1; - break; - } - case SubscriptionData::TableData: { - DBUG_PRINT("info",("SubscriptionData::TableData")); - break; - } - default: { - DBUG_PRINT("error",("wrong data")); - m_error.code= 2; - break; - } - } - DBUG_PRINT("info",("subscriptionId=%d,subscriptionKey=%d,subscriberData=%d", - subscriptionId,subscriptionKey,subscriberData)); - m_waiter.signal(NO_WAIT); - DBUG_VOID_RETURN; -} - -void -NdbDictInterface::execSUB_START_REF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) -{ - DBUG_ENTER("NdbDictInterface::execSUB_START_REF"); - const SubStartRef * const subStartRef= - CAST_CONSTPTR(SubStartRef, signal->getDataPtr()); - m_error.code= subStartRef->errorCode; - m_waiter.signal(NO_WAIT); - DBUG_VOID_RETURN; -} -void -NdbDictInterface::execSUB_GCP_COMPLETE_REP(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) -{ - const SubGcpCompleteRep * const rep= - CAST_CONSTPTR(SubGcpCompleteRep, signal->getDataPtr()); - - const Uint32 gci = rep->gci; - // const Uint32 senderRef = rep->senderRef; - const Uint32 subscriberData = rep->subscriberData; - - const Uint32 bufferId = subscriberData; - - const Uint32 ref = signal->theSendersBlockRef; - - NdbApiSignal tSignal(m_reference); - SubGcpCompleteAcc * acc= - CAST_PTR(SubGcpCompleteAcc, tSignal.getDataPtrSend()); - - acc->rep = *rep; - - tSignal.theReceiversBlockNumber = refToBlock(ref); - tSignal.theVerId_signalNumber = GSN_SUB_GCP_COMPLETE_ACC; - tSignal.theLength = SubGcpCompleteAcc::SignalLength; - - Uint32 aNodeId = refToNode(ref); - - // m_transporter->lock_mutex(); - int r; - r = m_transporter->sendSignal(&tSignal, aNodeId); - // m_transporter->unlock_mutex(); - - NdbGlobalEventBufferHandle::latestGCI(bufferId, gci); -} - -void -NdbDictInterface::execSUB_TABLE_DATA(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) -{ -#ifdef EVENT_DEBUG - const char * FNAME = "NdbDictInterface::execSUB_TABLE_DATA"; -#endif - //TODO - const SubTableData * const sdata = CAST_CONSTPTR(SubTableData, signal->getDataPtr()); - - // const Uint32 gci = sdata->gci; - // const Uint32 operation = sdata->operation; - // const Uint32 tableId = sdata->tableId; - // const Uint32 noOfAttrs = sdata->noOfAttributes; - // const Uint32 dataLen = sdata->dataSize; - const Uint32 subscriberData = sdata->subscriberData; - // const Uint32 logType = sdata->logType; - - for (int i=signal->m_noOfSections;i < 3; i++) { - ptr[i].p = NULL; - ptr[i].sz = 0; - } -#ifdef EVENT_DEBUG - ndbout_c("%s: senderData %d, gci %d, operation %d, tableId %d, noOfAttrs %d, dataLen %d", - FNAME, subscriberData, gci, operation, tableId, noOfAttrs, dataLen); - ndbout_c("ptr[0] %u %u ptr[1] %u %u ptr[2] %u %u\n", - ptr[0].p,ptr[0].sz,ptr[1].p,ptr[1].sz,ptr[2].p,ptr[2].sz); -#endif - const Uint32 bufferId = subscriberData; - - NdbGlobalEventBufferHandle::insertDataL(bufferId, - sdata, ptr); -} - -/***************************************************************** - * Drop event - */ -int -NdbDictionaryImpl::dropEvent(const char * eventName) -{ - NdbEventImpl *ev= new NdbEventImpl(); - ev->setName(eventName); - int ret= m_receiver.dropEvent(*ev); - delete ev; - - // printf("__________________RET %u\n", ret); - return ret; -} - -int -NdbDictInterface::dropEvent(const NdbEventImpl &evnt) -{ - NdbApiSignal tSignal(m_reference); - tSignal.theReceiversBlockNumber = DBDICT; - tSignal.theVerId_signalNumber = GSN_DROP_EVNT_REQ; - tSignal.theLength = DropEvntReq::SignalLength; - - DropEvntReq * const req = CAST_PTR(DropEvntReq, tSignal.getDataPtrSend()); - - req->setUserRef(m_reference); - req->setUserData(0); - - UtilBufferWriter w(m_buffer); - - w.add(SimpleProperties::StringValue, evnt.m_externalName.c_str()); - - LinearSectionPtr ptr[1]; - ptr[0].p = (Uint32*)m_buffer.get_data(); - ptr[0].sz = (m_buffer.length()+3) >> 2; - - return dropEvent(&tSignal, ptr, 1); -} - -int -NdbDictInterface::dropEvent(NdbApiSignal* signal, - LinearSectionPtr ptr[3], int noLSP) -{ - //TODO - const int noErrCodes = 1; - int errCodes[noErrCodes] = {DropEvntRef::Busy}; - return dictSignal(signal,ptr,noLSP, - 1 /*use masternode id*/, - 100, - WAIT_CREATE_INDX_REQ /*WAIT_CREATE_EVNT_REQ*/, - -1, - errCodes,noErrCodes, DropEvntRef::Temporary); -} -void -NdbDictInterface::execDROP_EVNT_CONF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) -{ - DBUG_ENTER("NdbDictInterface::execDROP_EVNT_CONF"); - m_waiter.signal(NO_WAIT); - DBUG_VOID_RETURN; -} - -void -NdbDictInterface::execDROP_EVNT_REF(NdbApiSignal * signal, - LinearSectionPtr ptr[3]) -{ - DBUG_ENTER("NdbDictInterface::execDROP_EVNT_REF"); - const DropEvntRef* const ref= - CAST_CONSTPTR(DropEvntRef, signal->getDataPtr()); - m_error.code= ref->getErrorCode(); - - DBUG_PRINT("info",("ErrorCode=%u Errorline=%u ErrorNode=%u", - ref->getErrorCode(), ref->getErrorLine(), ref->getErrorNode())); - - m_waiter.signal(NO_WAIT); - DBUG_VOID_RETURN; -} - /***************************************************************** * List objects or indexes */ diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index dfccf120228..6a86ee44bfb 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -208,55 +208,6 @@ public: NdbDictionary::Index * m_facade; }; -class NdbEventImpl : public NdbDictionary::Event, public NdbDictObjectImpl { -public: - NdbEventImpl(); - NdbEventImpl(NdbDictionary::Event &); - ~NdbEventImpl(); - - void init(); - void setName(const char * name); - const char * getName() const; - void setTable(const NdbDictionary::Table& table); - void setTable(const char * table); - const char * getTableName() const; - void addTableEvent(const NdbDictionary::Event::TableEvent t); - void setDurability(NdbDictionary::Event::EventDurability d); - NdbDictionary::Event::EventDurability getDurability() const; - void addEventColumn(const NdbColumnImpl &c); - int getNoOfEventColumns() const; - - void print() { - ndbout_c("NdbEventImpl: id=%d, key=%d", - m_eventId, - m_eventKey); - }; - - Uint32 m_eventId; - Uint32 m_eventKey; - Uint32 m_tableId; - AttributeMask m_attrListBitmask; - //BaseString m_internalName; - BaseString m_externalName; - Uint32 mi_type; - NdbDictionary::Event::EventDurability m_dur; - - - NdbTableImpl *m_tableImpl; - BaseString m_tableName; - Vector m_columns; - Vector m_attrIds; - - int m_bufferId; - - NdbEventOperation *eventOp; - - static NdbEventImpl & getImpl(NdbDictionary::Event & t); - static NdbEventImpl & getImpl(const NdbDictionary::Event & t); - NdbDictionary::Event * m_facade; -}; - - class NdbDictInterface { public: NdbDictInterface(NdbError& err) : m_error(err) { @@ -294,24 +245,12 @@ public: const NdbTableImpl &); int createIndex(NdbApiSignal* signal, LinearSectionPtr ptr[3]); - int createEvent(class Ndb & ndb, NdbEventImpl &, int getFlag); - int createEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3], int noLSP); - int dropTable(const NdbTableImpl &); int dropTable(NdbApiSignal* signal, LinearSectionPtr ptr[3]); int dropIndex(const NdbIndexImpl &, const NdbTableImpl &); int dropIndex(NdbApiSignal* signal, LinearSectionPtr ptr[3]); - int dropEvent(const NdbEventImpl &); - int dropEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3], int noLSP); - - int executeSubscribeEvent(class Ndb & ndb, NdbEventImpl &); - int executeSubscribeEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3]); - - int stopSubscribeEvent(class Ndb & ndb, NdbEventImpl &); - int stopSubscribeEvent(NdbApiSignal* signal, LinearSectionPtr ptr[3]); - int listObjects(NdbDictionary::Dictionary::List& list, Uint32 requestData, bool fullyQualifiedNames); int listObjects(NdbApiSignal* signal); @@ -357,17 +296,6 @@ private: void execDROP_INDX_REF(NdbApiSignal *, LinearSectionPtr ptr[3]); void execDROP_INDX_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execCREATE_EVNT_REF(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execCREATE_EVNT_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execSUB_START_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execSUB_START_REF(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execSUB_TABLE_DATA(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execSUB_GCP_COMPLETE_REP(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execSUB_STOP_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execSUB_STOP_REF(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execDROP_EVNT_REF(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execDROP_EVNT_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]); - void execDROP_TABLE_REF(NdbApiSignal *, LinearSectionPtr ptr[3]); void execDROP_TABLE_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]); void execLIST_TABLES_CONF(NdbApiSignal *, LinearSectionPtr ptr[3]); @@ -402,12 +330,6 @@ public: NdbTableImpl * getIndexTable(NdbIndexImpl * index, NdbTableImpl * table); - int createEvent(NdbEventImpl &); - int dropEvent(const char * eventName); - - int executeSubscribeEvent(NdbEventImpl &); - int stopSubscribeEvent(NdbEventImpl &); - int listObjects(List& list, NdbDictionary::Object::Type type); int listIndexes(List& list, Uint32 indexId); @@ -418,8 +340,6 @@ public: const char * tableName); NdbIndexImpl * getIndex(const char * indexName, NdbTableImpl * table); - NdbEventImpl * getEvent(const char * eventName); - NdbEventImpl * getEventImpl(const char * internalName); const NdbError & getNdbError() const; NdbError m_error; @@ -440,18 +360,6 @@ private: Ndb_local_table_info * fetchGlobalTableImpl(const BaseString& internalName); }; -inline -NdbEventImpl & -NdbEventImpl::getImpl(const NdbDictionary::Event & t){ - return t.m_impl; -} - -inline -NdbEventImpl & -NdbEventImpl::getImpl(NdbDictionary::Event & t){ - return t.m_impl; -} - inline NdbColumnImpl & NdbColumnImpl::getImpl(NdbDictionary::Column & t){ diff --git a/ndb/src/ndbapi/Ndberr.cpp b/ndb/src/ndbapi/Ndberr.cpp index b05818de6f1..ad0b4eafcb4 100644 --- a/ndb/src/ndbapi/Ndberr.cpp +++ b/ndb/src/ndbapi/Ndberr.cpp @@ -21,7 +21,6 @@ #include #include #include -#include "NdbEventOperationImpl.hpp" static void update(const NdbError & _err){ @@ -73,10 +72,3 @@ NdbBlob::getNdbError() const { update(theError); return theError; } - -const -NdbError & -NdbEventOperationImpl::getNdbError() const { - update(m_error); - return m_error; -} diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp index 4af070638d4..6aaf44d0168 100644 --- a/ndb/src/ndbapi/Ndbif.cpp +++ b/ndb/src/ndbapi/Ndbif.cpp @@ -661,29 +661,11 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) case GSN_CREATE_INDX_REF: case GSN_DROP_INDX_CONF: case GSN_DROP_INDX_REF: - case GSN_CREATE_EVNT_CONF: - case GSN_CREATE_EVNT_REF: - case GSN_DROP_EVNT_CONF: - case GSN_DROP_EVNT_REF: case GSN_LIST_TABLES_CONF: NdbDictInterface::execSignal(&theDictionary->m_receiver, aSignal, ptr); break; - case GSN_SUB_META_DATA: - case GSN_SUB_REMOVE_CONF: - case GSN_SUB_REMOVE_REF: - break; // ignore these signals - case GSN_SUB_GCP_COMPLETE_REP: - case GSN_SUB_START_CONF: - case GSN_SUB_START_REF: - case GSN_SUB_TABLE_DATA: - case GSN_SUB_STOP_CONF: - case GSN_SUB_STOP_REF: - NdbDictInterface::execSignal(&theDictionary->m_receiver, - aSignal, ptr); - break; - case GSN_DIHNDBTAMPER: { tFirstDataPtr = int2void(tFirstData); diff --git a/ndb/src/ndbapi/Ndbinit.cpp b/ndb/src/ndbapi/Ndbinit.cpp index d5ad7066273..40cac675b21 100644 --- a/ndb/src/ndbapi/Ndbinit.cpp +++ b/ndb/src/ndbapi/Ndbinit.cpp @@ -34,10 +34,6 @@ #include "NdbUtil.hpp" #include -class NdbGlobalEventBufferHandle; -NdbGlobalEventBufferHandle *NdbGlobalEventBuffer_init(int); -void NdbGlobalEventBuffer_drop(NdbGlobalEventBufferHandle *); - Ndb::Ndb( Ndb_cluster_connection *ndb_cluster_connection, const char* aDataBase , const char* aSchema) : theImpl(NULL) @@ -107,16 +103,6 @@ void Ndb::setup(Ndb_cluster_connection *ndb_cluster_connection, if (theInitState == NotConstructed) theInitState = NotInitialised; - { - NdbGlobalEventBufferHandle *h= - NdbGlobalEventBuffer_init(NDB_MAX_ACTIVE_EVENTS); - if (h == NULL) { - ndbout_c("Failed NdbGlobalEventBuffer_init(%d)",NDB_MAX_ACTIVE_EVENTS); - exit(-1); - } - theGlobalEventBufferHandle = h; - } - DBUG_VOID_RETURN; } @@ -132,8 +118,6 @@ Ndb::~Ndb() DBUG_PRINT("enter",("Ndb::~Ndb this=0x%x",this)); doDisconnect(); - NdbGlobalEventBuffer_drop(theGlobalEventBufferHandle); - if (TransporterFacade::instance() != NULL && theNdbBlockNumber > 0){ TransporterFacade::instance()->close(theNdbBlockNumber, theFirstTransId); } diff --git a/ndb/test/include/HugoTransactions.hpp b/ndb/test/include/HugoTransactions.hpp index 5795bbc94c9..7a15a2f977d 100644 --- a/ndb/test/include/HugoTransactions.hpp +++ b/ndb/test/include/HugoTransactions.hpp @@ -28,9 +28,6 @@ public: HugoTransactions(const NdbDictionary::Table&, const NdbDictionary::Index* idx = 0); ~HugoTransactions(); - int createEvent(Ndb*); - int eventOperation(Ndb*, void* stats, - int records); int loadTable(Ndb*, int records, int batch = 512, diff --git a/ndb/test/ndbapi/Makefile.am b/ndb/test/ndbapi/Makefile.am index 7dfa239cb66..19d3c4902a8 100644 --- a/ndb/test/ndbapi/Makefile.am +++ b/ndb/test/ndbapi/Makefile.am @@ -31,13 +31,11 @@ testSystemRestart \ testTimeout \ testTransactions \ testDeadlock \ -test_event ndbapi_slow_select testReadPerf testLcp \ +ndbapi_slow_select testReadPerf testLcp \ testPartitioning \ testBitfield \ DbCreate DbAsyncGenerator \ -test_event_multi_table \ -testSRBank \ -test_event_merge +testSRBank #flexTimedAsynch #testBlobs diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index 3260b921985..7616c93c9e3 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -767,285 +767,6 @@ HugoTransactions::fillTable(Ndb* pNdb, return NDBT_OK; } -int -HugoTransactions::createEvent(Ndb* pNdb){ - - char eventName[1024]; - sprintf(eventName,"%s_EVENT",tab.getName()); - - NdbDictionary::Dictionary *myDict = pNdb->getDictionary(); - - if (!myDict) { - g_err << "Dictionary not found " - << pNdb->getNdbError().code << " " - << pNdb->getNdbError().message << endl; - return NDBT_FAILED; - } - - NdbDictionary::Event myEvent(eventName); - myEvent.setTable(tab.getName()); - myEvent.addTableEvent(NdbDictionary::Event::TE_ALL); - // myEvent.addTableEvent(NdbDictionary::Event::TE_INSERT); - // myEvent.addTableEvent(NdbDictionary::Event::TE_UPDATE); - // myEvent.addTableEvent(NdbDictionary::Event::TE_DELETE); - - // const NdbDictionary::Table *_table = myDict->getTable(tab.getName()); - for(int a = 0; a < tab.getNoOfColumns(); a++){ - // myEvent.addEventColumn(_table->getColumn(a)->getName()); - myEvent.addEventColumn(a); - } - - int res = myDict->createEvent(myEvent); // Add event to database - - if (res == 0) - myEvent.print(); - else if (myDict->getNdbError().classification == - NdbError::SchemaObjectExists) - { - g_info << "Event creation failed event exists\n"; - res = myDict->dropEvent(eventName); - if (res) { - g_err << "Failed to drop event: " - << myDict->getNdbError().code << " : " - << myDict->getNdbError().message << endl; - return NDBT_FAILED; - } - // try again - res = myDict->createEvent(myEvent); // Add event to database - if (res) { - g_err << "Failed to create event (1): " - << myDict->getNdbError().code << " : " - << myDict->getNdbError().message << endl; - return NDBT_FAILED; - } - } - else - { - g_err << "Failed to create event (2): " - << myDict->getNdbError().code << " : " - << myDict->getNdbError().message << endl; - return NDBT_FAILED; - } - - return NDBT_OK; -} - -#include -#include "TestNdbEventOperation.hpp" -#include - -struct receivedEvent { - Uint32 pk; - Uint32 count; - Uint32 event; -}; - -int XXXXX = 0; - -int -HugoTransactions::eventOperation(Ndb* pNdb, void* pstats, - int records) { - int myXXXXX = XXXXX++; - Uint32 i; - const char function[] = "HugoTransactions::eventOperation: "; - struct receivedEvent* recInsertEvent; - NdbAutoObjArrayPtr - p00( recInsertEvent = new struct receivedEvent[3*records] ); - struct receivedEvent* recUpdateEvent = &recInsertEvent[records]; - struct receivedEvent* recDeleteEvent = &recInsertEvent[2*records]; - - EventOperationStats &stats = *(EventOperationStats*)pstats; - - stats.n_inserts = 0; - stats.n_deletes = 0; - stats.n_updates = 0; - stats.n_consecutive = 0; - stats.n_duplicates = 0; - stats.n_inconsistent_gcis = 0; - - for (i = 0; i < records; i++) { - recInsertEvent[i].pk = 0xFFFFFFFF; - recInsertEvent[i].count = 0; - recInsertEvent[i].event = 0xFFFFFFFF; - - recUpdateEvent[i].pk = 0xFFFFFFFF; - recUpdateEvent[i].count = 0; - recUpdateEvent[i].event = 0xFFFFFFFF; - - recDeleteEvent[i].pk = 0xFFFFFFFF; - recDeleteEvent[i].count = 0; - recDeleteEvent[i].event = 0xFFFFFFFF; - } - - NdbDictionary::Dictionary *myDict = pNdb->getDictionary(); - - if (!myDict) { - g_err << function << "Event Creation failedDictionary not found\n"; - return NDBT_FAILED; - } - - int r = 0; - NdbEventOperation *pOp; - - char eventName[1024]; - sprintf(eventName,"%s_EVENT",tab.getName()); - int noEventColumnName = tab.getNoOfColumns(); - - g_info << function << "create EventOperation\n"; - pOp = pNdb->createEventOperation(eventName, 100); - if ( pOp == NULL ) { - g_err << function << "Event operation creation failed\n"; - return NDBT_FAILED; - } - - g_info << function << "get values\n"; - NdbRecAttr* recAttr[1024]; - NdbRecAttr* recAttrPre[1024]; - - const NdbDictionary::Table *_table = myDict->getTable(tab.getName()); - - for (int a = 0; a < noEventColumnName; a++) { - recAttr[a] = pOp->getValue(_table->getColumn(a)->getName()); - recAttrPre[a] = pOp->getPreValue(_table->getColumn(a)->getName()); - } - - // set up the callbacks - g_info << function << "execute\n"; - if (pOp->execute()) { // This starts changes to "start flowing" - g_err << function << "operation execution failed: \n"; - g_err << pOp->getNdbError().code << " " - << pOp->getNdbError().message << endl; - return NDBT_FAILED; - } - - g_info << function << "ok\n"; - - int count = 0; - Uint32 last_inconsitant_gci = 0xEFFFFFF0; - - while (r < records){ - //printf("now waiting for event...\n"); - int res = pNdb->pollEvents(1000); // wait for event or 1000 ms - - if (res > 0) { - //printf("got data! %d\n", r); - int overrun; - while (pOp->next(&overrun) > 0) { - r++; - r += overrun; - count++; - - Uint32 gci = pOp->getGCI(); - Uint32 pk = recAttr[0]->u_32_value(); - - if (!pOp->isConsistent()) { - if (last_inconsitant_gci != gci) { - last_inconsitant_gci = gci; - stats.n_inconsistent_gcis++; - } - g_warning << "A node failure has occured and events might be missing\n"; - } - g_info << function << "GCI " << gci << ": " << count; - struct receivedEvent* recEvent; - switch (pOp->getEventType()) { - case NdbDictionary::Event::TE_INSERT: - stats.n_inserts++; - g_info << " INSERT: "; - recEvent = recInsertEvent; - break; - case NdbDictionary::Event::TE_DELETE: - stats.n_deletes++; - g_info << " DELETE: "; - recEvent = recDeleteEvent; - break; - case NdbDictionary::Event::TE_UPDATE: - stats.n_updates++; - g_info << " UPDATE: "; - recEvent = recUpdateEvent; - break; - case NdbDictionary::Event::TE_ALL: - abort(); - } - - if ((int)pk < records) { - recEvent[pk].pk = pk; - recEvent[pk].count++; - } - - g_info << "overrun " << overrun << " pk " << pk; - for (i = 1; i < noEventColumnName; i++) { - if (recAttr[i]->isNULL() >= 0) { // we have a value - g_info << " post[" << i << "]="; - if (recAttr[i]->isNULL() == 0) // we have a non-null value - g_info << recAttr[i]->u_32_value(); - else // we have a null value - g_info << "NULL"; - } - if (recAttrPre[i]->isNULL() >= 0) { // we have a value - g_info << " pre[" << i << "]="; - if (recAttrPre[i]->isNULL() == 0) // we have a non-null value - g_info << recAttrPre[i]->u_32_value(); - else // we have a null value - g_info << "NULL"; - } - } - g_info << endl; - } - } else - ;//printf("timed out\n"); - } - - // sleep ((XXXXX-myXXXXX)*2); - - g_info << myXXXXX << "dropping event operation" << endl; - - int res = pNdb->dropEventOperation(pOp); - if (res != 0) { - g_err << "operation execution failed\n"; - return NDBT_FAILED; - } - - g_info << myXXXXX << " ok" << endl; - - if (stats.n_inserts > 0) { - stats.n_consecutive++; - } - if (stats.n_deletes > 0) { - stats.n_consecutive++; - } - if (stats.n_updates > 0) { - stats.n_consecutive++; - } - for (i = 0; i < (Uint32)records/3; i++) { - if (recInsertEvent[i].pk != i) { - stats.n_consecutive ++; - ndbout << "missing insert pk " << i << endl; - } else if (recInsertEvent[i].count > 1) { - ndbout << "duplicates insert pk " << i - << " count " << recInsertEvent[i].count << endl; - stats.n_duplicates += recInsertEvent[i].count-1; - } - if (recUpdateEvent[i].pk != i) { - stats.n_consecutive ++; - ndbout << "missing update pk " << i << endl; - } else if (recUpdateEvent[i].count > 1) { - ndbout << "duplicates update pk " << i - << " count " << recUpdateEvent[i].count << endl; - stats.n_duplicates += recUpdateEvent[i].count-1; - } - if (recDeleteEvent[i].pk != i) { - stats.n_consecutive ++; - ndbout << "missing delete pk " << i << endl; - } else if (recDeleteEvent[i].count > 1) { - ndbout << "duplicates delete pk " << i - << " count " << recDeleteEvent[i].count << endl; - stats.n_duplicates += recDeleteEvent[i].count-1; - } - } - - return NDBT_OK; -} - int HugoTransactions::pkReadRecords(Ndb* pNdb, int records, From 7b876a71d0d12e6ff36d56280343809621ac9239 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 17:36:18 +0200 Subject: [PATCH 45/74] Bug #20843 tests fails randomly with assertion in completeClusterFailed --- sql/ha_ndbcluster_binlog.cc | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 0c0b7ae7a19..0f25f4dc38c 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -3442,8 +3442,8 @@ restart: // wait for the first event thd->proc_info= "Waiting for first event from ndbcluster"; DBUG_PRINT("info", ("Waiting for the first event")); - int schema_res= 0; - Uint64 schema_gci= 0; + int schema_res= 0, res= 0; + Uint64 schema_gci= 0, gci= 0; while (schema_res == 0 && !abort_loop) { schema_res= s_ndb->pollEvents(100, &schema_gci); @@ -3452,7 +3452,14 @@ restart: DBUG_PRINT("info", ("schema_res: %d schema_gci: %d", schema_res, schema_gci)); if (schema_res > 0) { - i_ndb->pollEvents(0); + while (res >= 0 && gci < schema_gci && !abort_loop) + { + res= i_ndb->pollEvents(100, &gci); + } + if (gci > schema_gci) + { + schema_gci= gci; + } i_ndb->flushIncompleteEvents(schema_gci); s_ndb->flushIncompleteEvents(schema_gci); if (schema_gci < ndb_latest_handled_binlog_epoch) From 38d63c303ad68c81c39418571d373fe7eeb11d33 Mon Sep 17 00:00:00 2001 From: "pekka@clam.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 17:36:19 +0200 Subject: [PATCH 46/74] ndb - ndb api : try to catch autoincr 'error 0' --- storage/ndb/src/ndbapi/Ndb.cpp | 42 ++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/storage/ndb/src/ndbapi/Ndb.cpp b/storage/ndb/src/ndbapi/Ndb.cpp index 5b0a9e9d330..5eddbc35665 100644 --- a/storage/ndb/src/ndbapi/Ndb.cpp +++ b/storage/ndb/src/ndbapi/Ndb.cpp @@ -1025,14 +1025,19 @@ int Ndb::initAutoIncrement() setDatabaseName("sys"); setDatabaseSchemaName("def"); - m_sys_tab_0 = getDictionary()->getTableGlobal("SYSTAB_0"); + m_sys_tab_0 = theDictionary->getTableGlobal("SYSTAB_0"); // Restore current name space setDatabaseName(currentDb.c_str()); setDatabaseSchemaName(currentSchema.c_str()); + if (m_sys_tab_0 == NULL) { + assert(theDictionary->m_error.code != 0); + theError.code = theDictionary->m_error.code; + return -1; + } - return (m_sys_tab_0 == NULL); + return 0; } int @@ -1043,19 +1048,19 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, Uint32 aTableId = table->m_id; DBUG_PRINT("enter", ("table=%u value=%llu op=%u", aTableId, opValue, op)); - NdbTransaction* tConnection; - NdbOperation* tOperation= 0; // Compiler warning if not initialized + NdbTransaction* tConnection = NULL; + NdbOperation* tOperation = NULL; Uint64 tValue; NdbRecAttr* tRecAttrResult; - CHECK_STATUS_MACRO_ZERO; + CHECK_STATUS_MACRO; - if (initAutoIncrement()) - goto error_return; + if (initAutoIncrement() == -1) + goto error_handler; tConnection = this->startTransaction(); if (tConnection == NULL) - goto error_return; + goto error_handler; tOperation = tConnection->getNdbOperation(m_sys_tab_0); if (tOperation == NULL) @@ -1065,7 +1070,7 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, { case 0: tOperation->interpretedUpdateTuple(); - tOperation->equal("SYSKEY_0", aTableId ); + tOperation->equal("SYSKEY_0", aTableId); tOperation->incValue("NEXTID", opValue); tRecAttrResult = tOperation->getValue("NEXTID"); @@ -1130,14 +1135,21 @@ Ndb::opTupleIdOnNdb(const NdbTableImpl* table, DBUG_RETURN(0); - error_handler: - theError.code = tConnection->theError.code; - this->closeTransaction(tConnection); - error_return: +error_handler: DBUG_PRINT("error", ("ndb=%d con=%d op=%d", theError.code, - tConnection ? tConnection->theError.code : -1, - tOperation ? tOperation->theError.code : -1)); + tConnection != NULL ? tConnection->theError.code : -1, + tOperation != NULL ? tOperation->theError.code : -1)); + + if (theError.code == 0 && tConnection != NULL) + theError.code = tConnection->theError.code; + if (theError.code == 0 && tOperation != NULL) + theError.code = tOperation->theError.code; + DBUG_ASSERT(theError.code != 0); + + if (tConnection != NULL) + this->closeTransaction(tConnection); + DBUG_RETURN(-1); } From 26e39baca130f8758b1f1ab8e6ab55964842ec02 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 18:36:18 +0200 Subject: [PATCH 47/74] added warning on cluster reconnect and binlog usage, that data may be missing --- sql/ha_ndbcluster_binlog.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 0f25f4dc38c..8b04f263b26 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -3473,6 +3473,12 @@ restart: ndb_latest_applied_binlog_epoch= 0; ndb_latest_received_binlog_epoch= 0; } + else if (ndb_latest_applied_binlog_epoch > 0) + { + sql_print_warning("NDB Binlog: cluster has reconnected. " + "Changes to the database that occured while " + "disconnected will not be in the binlog"); + } if (ndb_extra_logging) { sql_print_information("NDB Binlog: starting log at epoch %u", From c9dc1184fd36d078abf9fe29e35c45dffe994ecc Mon Sep 17 00:00:00 2001 From: "joerg@mysql.com" <> Date: Wed, 5 Jul 2006 20:17:04 +0200 Subject: [PATCH 48/74] Extend the Perl script running the test suite to produce a "Logging:" line (like the shell script does). --- mysql-test/mysql-test-run.pl | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 3293487a0ac..122a2524d9e 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -534,6 +534,11 @@ sub command_line_setup () { "($opt_master_myport - $opt_master_myport + 10)"); } + # This is needed for test log evaluation in "gen-build-status-page" + # in all cases where the calling tool does not log the commands + # directly before it executes them, like "make test-force-pl" in RPM builds. + print "Logging: $0 ", join(" ", @ARGV), "\n"; + # Read the command line # Note: Keep list, and the order, in sync with usage at end of this file From aacb705613927c11cc8703a273b324e19368fe21 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 20:20:39 +0200 Subject: [PATCH 49/74] Bug #20419 ndbd --nowait-nodes= fails - updated error message to more correctly reflect the issue --- ndb/include/mgmapi/ndbd_exit_codes.h | 1 + ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 16 +++++++--------- ndb/src/kernel/error/ndbd_exit_codes.c | 2 ++ 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/ndb/include/mgmapi/ndbd_exit_codes.h b/ndb/include/mgmapi/ndbd_exit_codes.h index 686641ebef5..1016234c513 100644 --- a/ndb/include/mgmapi/ndbd_exit_codes.h +++ b/ndb/include/mgmapi/ndbd_exit_codes.h @@ -71,6 +71,7 @@ typedef ndbd_exit_classification_enum ndbd_exit_classification; #define NDBD_EXIT_INDEX_NOTINRANGE 2304 #define NDBD_EXIT_ARBIT_SHUTDOWN 2305 #define NDBD_EXIT_POINTER_NOTINRANGE 2306 +#define NDBD_EXIT_PARTITIONED_SHUTDOWN 2307 #define NDBD_EXIT_SR_OTHERNODEFAILED 2308 #define NDBD_EXIT_NODE_NOT_DEAD 2309 #define NDBD_EXIT_SR_REDOLOG 2310 diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 3d9ade9b57c..0d59c087913 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -907,9 +907,9 @@ retry: char buf[255]; BaseString::snprintf(buf, sizeof(buf), - "Partitioned cluster! check StartPartialTimeout, " - " node %d thinks %d is president, " - " I think president is: %d", + "check StartPartialTimeout, " + "node %d thinks %d is president, " + "I think president is: %d", nodeId, president, cpresident); ndbout_c(buf); @@ -941,7 +941,7 @@ retry: CRASH_INSERTION(932); progError(__LINE__, - NDBD_EXIT_ARBIT_SHUTDOWN, + NDBD_EXIT_PARTITIONED_SHUTDOWN, buf); ndbrequire(false); @@ -2794,7 +2794,7 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode, break; case FailRep::ZPARTITIONED_CLUSTER: { - code = NDBD_EXIT_ARBIT_SHUTDOWN; + code = NDBD_EXIT_PARTITIONED_SHUTDOWN; char buf1[100], buf2[100]; c_clusterNodes.getText(buf1); if (signal->getLength()== FailRep::SignalLength + FailRep::ExtraLength && @@ -2805,16 +2805,14 @@ void Qmgr::failReportLab(Signal* signal, Uint16 aFailedNode, part.assign(NdbNodeBitmask::Size, rep->partition); part.getText(buf2); BaseString::snprintf(extra, sizeof(extra), - "Partitioned cluster!" - " Our cluster: %s other cluster: %s", + "Our cluster: %s other cluster: %s", buf1, buf2); } else { jam(); BaseString::snprintf(extra, sizeof(extra), - "Partitioned cluster!" - " Our cluster: %s ", buf1); + "Our cluster: %s", buf1); } msg = extra; break; diff --git a/ndb/src/kernel/error/ndbd_exit_codes.c b/ndb/src/kernel/error/ndbd_exit_codes.c index 257af4c5b1b..07b276346a0 100644 --- a/ndb/src/kernel/error/ndbd_exit_codes.c +++ b/ndb/src/kernel/error/ndbd_exit_codes.c @@ -54,6 +54,8 @@ static const ErrStruct errArray[] = {NDBD_EXIT_ARBIT_SHUTDOWN, XAE, "Node lost connection to other nodes and " "can not form a unpartitioned cluster, please investigate if there are " "error(s) on other node(s)"}, + {NDBD_EXIT_PARTITIONED_SHUTDOWN, XAE, "Partitioned cluster detected. " + "Please check if cluster is already running"}, {NDBD_EXIT_POINTER_NOTINRANGE, XIE, "Pointer too large"}, {NDBD_EXIT_SR_OTHERNODEFAILED, XRE, "Another node failed during system " "restart, please investigate error(s) on other node(s)"}, From f413bc2fe8337ada8deab30e86601cac4c53bd93 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 20:24:12 +0200 Subject: [PATCH 50/74] ndbd: added missing jamEntry(); --- ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 0d59c087913..95698a9a37e 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -438,6 +438,7 @@ void Qmgr::execCONNECT_REP(Signal* signal) void Qmgr::execREAD_NODESCONF(Signal* signal) { + jamEntry(); check_readnodes_reply(signal, refToNode(signal->getSendersBlockRef()), GSN_READ_NODESCONF); @@ -446,6 +447,7 @@ Qmgr::execREAD_NODESCONF(Signal* signal) void Qmgr::execREAD_NODESREF(Signal* signal) { + jamEntry(); check_readnodes_reply(signal, refToNode(signal->getSendersBlockRef()), GSN_READ_NODESREF); From 06ac56e61c82eb15eba65b253d380022a38f4bfa Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 21:44:11 +0200 Subject: [PATCH 51/74] Bug #20843 tests fails randomly with assertion in completeClusterFailed - reenabled test as this now seems fixed --- mysql-test/t/disabled.def | 4 ---- 1 file changed, 4 deletions(-) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 5e6ab1dd728..ebe61e1af4a 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -18,10 +18,6 @@ #im_life_cycle : Bug#20368 2006-06-10 alik im_life_cycle test fails ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog -ndb_autodiscover3 : BUD#20843 2006-07-04 tomas ndb_autodiscover3 fails randomly -#ndb_binlog_discover : BUG#19395 2006-04-28 tomas/knielsen mysqld does not always detect cluster shutdown -#ndb_cache2 : BUG#18597 2006-03-28 brian simultaneous drop table and ndb statistics update triggers node failure -#ndb_cache_multi2 : BUG#18597 2006-04-10 kent simultaneous drop table and ndb statistics update triggers node failure ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open From 25ea9964dc8303cd42cfa9d78b0fe115035c8061 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Wed, 5 Jul 2006 23:12:48 +0200 Subject: [PATCH 52/74] disabled ndb_autodiscover3 again --- mysql-test/t/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index ebe61e1af4a..5acf93d4843 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -18,6 +18,7 @@ #im_life_cycle : Bug#20368 2006-06-10 alik im_life_cycle test fails ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog +ndb_autodiscover3 : BUG#20872 2006-07-05 tomas ndb_autodiscover3 fails randomly ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open From 58c27909e083d8581569817ed0c7abcf70b5bd71 Mon Sep 17 00:00:00 2001 From: "ngrishakin@mysql.com" <> Date: Wed, 5 Jul 2006 23:50:29 +0200 Subject: [PATCH 53/74] .del-ndb_dd_advance.result~165b282a93715547: Delete: mysql-test/r/ndb_dd_advance.result .del-ndb_dd_advance2.result~89be9fe55c2fda5a: Delete: mysql-test/r/ndb_dd_advance2.result .del-ndb_dd_advance2.test~e9e56bc3e8e8f740: Delete: mysql-test/t/ndb_dd_advance2.test .del-ndb_dd_advance.test~58e757b6c1d33996: Delete: mysql-test/t/ndb_dd_advance.test --- mysql-test/r/ndb_dd_advance.result | 1088 --------------------------- mysql-test/r/ndb_dd_advance2.result | 745 ------------------ mysql-test/t/ndb_dd_advance.test | 630 ---------------- mysql-test/t/ndb_dd_advance2.test | 723 ------------------ 4 files changed, 3186 deletions(-) delete mode 100644 mysql-test/r/ndb_dd_advance.result delete mode 100644 mysql-test/r/ndb_dd_advance2.result delete mode 100755 mysql-test/t/ndb_dd_advance.test delete mode 100755 mysql-test/t/ndb_dd_advance2.test diff --git a/mysql-test/r/ndb_dd_advance.result b/mysql-test/r/ndb_dd_advance.result deleted file mode 100644 index 09fe75805d5..00000000000 --- a/mysql-test/r/ndb_dd_advance.result +++ /dev/null @@ -1,1088 +0,0 @@ -DROP TABLE IF EXISTS test.t1; -DROP TABLE IF EXISTS test.t2; -**** Test Setup Section **** -CREATE LOGFILE GROUP log_group1 -ADD UNDOFILE './log_group1/undofile.dat' -INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE table_space1 -ADD DATAFILE './table_space1/datafile.dat' -USE LOGFILE GROUP log_group1 -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 -(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) -TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; -CREATE TABLE test.t2 -(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL) -ENGINE=NDB; - -**** Data load for first test **** -INSERT INTO test.t1 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), -(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), -(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), -(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), -(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), -(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), -(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); -INSERT INTO test.t2 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), -(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), -(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), -(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), -(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), -(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), -(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); - -*** Test 1 Section Begins *** -SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); -COUNT(*) -1 -SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); -pk2 b2 c2 pk1 b c -4 4 4 4 4 4 -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4); -COUNT(*) -1 -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75); -COUNT(*) -1 -SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; -b c -1 1 -2 2 -3 3 -4 4 -5 5 -6 6 -7 7 -8 8 -9 9 -10 10 -11 11 -12 12 -13 13 -14 14 -15 15 -16 16 -17 17 -18 18 -19 19 -20 20 -21 21 -22 22 -23 23 -24 24 -25 25 -26 26 -27 27 -28 28 -29 29 -30 30 -31 31 -32 32 -33 33 -34 34 -35 35 -36 36 -37 37 -38 38 -39 39 -40 40 -41 41 -42 42 -43 43 -44 44 -45 45 -46 46 -47 47 -48 48 -49 49 -50 50 -51 51 -52 52 -53 53 -54 54 -55 55 -56 56 -57 57 -58 58 -59 59 -60 60 -61 61 -62 62 -63 63 -64 64 -65 65 -66 66 -67 67 -68 68 -69 69 -70 70 -71 71 -72 72 -73 73 -74 74 -75 75 - -*** Setup for test 2 **** -DELETE FROM test.t1; -INSERT INTO test.t1 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45); - -**** Test Section 2 **** -SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; -b c -1 1 -2 2 -3 3 -4 4 -5 5 -6 6 -7 7 -8 8 -9 9 -10 10 -11 11 -12 12 -13 13 -14 14 -15 15 -16 16 -17 17 -18 18 -19 19 -20 20 -21 21 -22 22 -23 23 -24 24 -25 25 -26 26 -27 27 -28 28 -29 29 -30 30 -31 31 -32 32 -33 33 -34 34 -35 35 -36 36 -37 37 -38 38 -39 39 -40 40 -41 41 -42 42 -43 43 -44 44 -45 45 -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2; -COUNT(*) -45 -SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2; -COUNT(*) -75 -SHOW CREATE TABLE test.t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `pk2` int(11) NOT NULL, - `b2` int(11) NOT NULL, - `c2` int(11) NOT NULL, - PRIMARY KEY (`pk2`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `pk1` int(11) NOT NULL, - `b` int(11) NOT NULL, - `c` int(11) NOT NULL, - PRIMARY KEY (`pk1`) -) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; -SHOW CREATE TABLE test.t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `pk2` int(11) NOT NULL, - `b2` int(11) NOT NULL, - `c2` int(11) NOT NULL, - PRIMARY KEY (`pk2`) -) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 ENGINE=NDBCLUSTER; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `pk1` int(11) NOT NULL, - `b` int(11) NOT NULL, - `c` int(11) NOT NULL, - PRIMARY KEY (`pk1`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 - -DROP TABLE test.t1; -DROP TABLE test.t2; -*** Setup for Test Section 3 *** -CREATE TABLE test.t1 ( -usr_id INT unsigned NOT NULL, -uniq_id INT unsigned NOT NULL AUTO_INCREMENT, -start_num INT unsigned NOT NULL DEFAULT 1, -increment INT unsigned NOT NULL DEFAULT 1, -PRIMARY KEY (uniq_id), -INDEX usr_uniq_idx (usr_id, uniq_id), -INDEX uniq_usr_idx (uniq_id, usr_id)) -TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; -CREATE TABLE test.t2 ( -id INT unsigned NOT NULL DEFAULT 0, -usr2_id INT unsigned NOT NULL DEFAULT 0, -max INT unsigned NOT NULL DEFAULT 0, -c_amount INT unsigned NOT NULL DEFAULT 0, -d_max INT unsigned NOT NULL DEFAULT 0, -d_num INT unsigned NOT NULL DEFAULT 0, -orig_time INT unsigned NOT NULL DEFAULT 0, -c_time INT unsigned NOT NULL DEFAULT 0, -active ENUM ("no","yes") NOT NULL, -PRIMARY KEY (id,usr2_id), -INDEX id_idx (id), -INDEX usr2_idx (usr2_id)) -ENGINE=NDB; -INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198); - -**** Test Section 3 **** -SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, -test.t2.usr2_id,test.t2.c_amount,test.t2.max -FROM test.t1 -LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id -WHERE test.t1.uniq_id = 4 -ORDER BY test.t2.c_amount; -usr_id uniq_id increment usr2_id c_amount max -3 4 84676 NULL NULL NULL -INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes'); -INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes'); -INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes'); -SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, -test.t2.usr2_id,test.t2.c_amount,test.t2.max -FROM test.t1 -LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id -WHERE test.t1.uniq_id = 4 -ORDER BY test.t2.c_amount; -usr_id uniq_id increment usr2_id c_amount max -3 4 84676 3 6000 3000 - -DROP TABLE test.t1; -DROP TABLE test.t2; -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; -DROP TABLESPACE table_space1 -ENGINE = NDB; -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLESPACE ts2 -ADD DATAFILE './table_space2/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int) -ENGINE=NDB; -SHOW CREATE TABLE t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a` int(11) NOT NULL, - `b` int(11) DEFAULT NULL, - PRIMARY KEY (`a`) -) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SHOW CREATE TABLE t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `a` int(11) NOT NULL, - `b` int(11) DEFAULT NULL, - PRIMARY KEY (`a`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -INSERT INTO t1 VALUES (1,1); -INSERT INTO t1 VALUES (2,2); -SELECT * FROM t1 order by a; -a b -1 1 -2 2 -INSERT INTO t2(a,b) SELECT * FROM t1; -SELECT * FROM t2 order by a; -a b -1 1 -2 2 -TRUNCATE t1; -TRUNCATE t2; -INSERT INTO t2 VALUES (3,3); -INSERT INTO t2 VALUES (4,4); -INSERT INTO t1(a,b) SELECT * FROM t2; -SELECT * FROM t1 order by a; -a b -3 3 -4 4 -DROP TABLE t1, t2; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts1 ENGINE NDB; -ALTER TABLESPACE ts2 -DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts2 ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -CREATE LOGFILE GROUP lg -ADD UNDOFILE './undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts -ADD DATAFILE './datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t ( -a smallint NOT NULL, -b int NOT NULL, -c bigint NOT NULL, -d char(10), -e TEXT, -f VARCHAR(255), -PRIMARY KEY(a) -) TABLESPACE ts STORAGE DISK ENGINE=NDB; -ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); -SHOW CREATE TABLE test.t; -Table Create Table -t CREATE TABLE `t` ( - `a` smallint(6) NOT NULL, - `b` int(11) NOT NULL, - `c` bigint(20) NOT NULL, - `d` char(10) DEFAULT NULL, - `e` text, - `f` varchar(255) DEFAULT NULL, - PRIMARY KEY (`a`), - KEY `d` (`d`), - KEY `f` (`f`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SELECT * FROM test.t order by a; -a b c d e f -1 2 3 aaa1 bbb1 ccccc1 -2 3 4 aaa2 bbb2 ccccc2 -3 4 5 aaa3 bbb3 ccccc3 -4 5 6 aaa4 bbb4 ccccc4 -5 6 7 aaa5 bbb5 ccccc5 -6 7 8 aaa6 bbb6 ccccc6 -7 8 9 aaa7 bbb7 ccccc7 -8 9 10 aaa8 bbb8 ccccc8 -9 10 11 aaa9 bbb9 ccccc9 -10 11 12 aaa10 bbb10 ccccc10 -11 12 13 aaa11 bbb11 ccccc11 -12 13 14 aaa12 bbb12 ccccc12 -13 14 15 aaa13 bbb13 ccccc13 -14 15 16 aaa14 bbb14 ccccc14 -15 16 17 aaa15 bbb15 ccccc15 -16 17 18 aaa16 bbb16 ccccc16 -17 18 19 aaa17 bbb17 ccccc17 -18 19 20 aaa18 bbb18 ccccc18 -19 20 21 aaa19 bbb19 ccccc19 -20 21 22 aaa20 bbb20 ccccc20 -21 22 23 aaa21 bbb21 ccccc21 -22 23 24 aaa22 bbb22 ccccc22 -23 24 25 aaa23 bbb23 ccccc23 -24 25 26 aaa24 bbb24 ccccc24 -25 26 27 aaa25 bbb25 ccccc25 -26 27 28 aaa26 bbb26 ccccc26 -27 28 29 aaa27 bbb27 ccccc27 -28 29 30 aaa28 bbb28 ccccc28 -29 30 31 aaa29 bbb29 ccccc29 -30 31 32 aaa30 bbb30 ccccc30 -31 32 33 aaa31 bbb31 ccccc31 -32 33 34 aaa32 bbb32 ccccc32 -33 34 35 aaa33 bbb33 ccccc33 -34 35 36 aaa34 bbb34 ccccc34 -35 36 37 aaa35 bbb35 ccccc35 -36 37 38 aaa36 bbb36 ccccc36 -37 38 39 aaa37 bbb37 ccccc37 -38 39 40 aaa38 bbb38 ccccc38 -39 40 41 aaa39 bbb39 ccccc39 -40 41 42 aaa40 bbb40 ccccc40 -41 42 43 aaa41 bbb41 ccccc41 -42 43 44 aaa42 bbb42 ccccc42 -43 44 45 aaa43 bbb43 ccccc43 -44 45 46 aaa44 bbb44 ccccc44 -45 46 47 aaa45 bbb45 ccccc45 -46 47 48 aaa46 bbb46 ccccc46 -47 48 49 aaa47 bbb47 ccccc47 -48 49 50 aaa48 bbb48 ccccc48 -49 50 51 aaa49 bbb49 ccccc49 -50 51 52 aaa50 bbb50 ccccc50 -51 52 53 aaa51 bbb51 ccccc51 -52 53 54 aaa52 bbb52 ccccc52 -53 54 55 aaa53 bbb53 ccccc53 -54 55 56 aaa54 bbb54 ccccc54 -55 56 57 aaa55 bbb55 ccccc55 -56 57 58 aaa56 bbb56 ccccc56 -57 58 59 aaa57 bbb57 ccccc57 -58 59 60 aaa58 bbb58 ccccc58 -59 60 61 aaa59 bbb59 ccccc59 -60 61 62 aaa60 bbb60 ccccc60 -61 62 63 aaa61 bbb61 ccccc61 -62 63 64 aaa62 bbb62 ccccc62 -63 64 65 aaa63 bbb63 ccccc63 -64 65 66 aaa64 bbb64 ccccc64 -65 66 67 aaa65 bbb65 ccccc65 -66 67 68 aaa66 bbb66 ccccc66 -67 68 69 aaa67 bbb67 ccccc67 -68 69 70 aaa68 bbb68 ccccc68 -69 70 71 aaa69 bbb69 ccccc69 -70 71 72 aaa70 bbb70 ccccc70 -71 72 73 aaa71 bbb71 ccccc71 -72 73 74 aaa72 bbb72 ccccc72 -73 74 75 aaa73 bbb73 ccccc73 -74 75 76 aaa74 bbb74 ccccc74 -75 76 77 aaa75 bbb75 ccccc75 -76 77 78 aaa76 bbb76 ccccc76 -77 78 79 aaa77 bbb77 ccccc77 -78 79 80 aaa78 bbb78 ccccc78 -79 80 81 aaa79 bbb79 ccccc79 -80 81 82 aaa80 bbb80 ccccc80 -81 82 83 aaa81 bbb81 ccccc81 -82 83 84 aaa82 bbb82 ccccc82 -83 84 85 aaa83 bbb83 ccccc83 -84 85 86 aaa84 bbb84 ccccc84 -85 86 87 aaa85 bbb85 ccccc85 -86 87 88 aaa86 bbb86 ccccc86 -87 88 89 aaa87 bbb87 ccccc87 -88 89 90 aaa88 bbb88 ccccc88 -89 90 91 aaa89 bbb89 ccccc89 -90 91 92 aaa90 bbb90 ccccc90 -91 92 93 aaa91 bbb91 ccccc91 -92 93 94 aaa92 bbb92 ccccc92 -93 94 95 aaa93 bbb93 ccccc93 -94 95 96 aaa94 bbb94 ccccc94 -95 96 97 aaa95 bbb95 ccccc95 -96 97 98 aaa96 bbb96 ccccc96 -97 98 99 aaa97 bbb97 ccccc97 -98 99 100 aaa98 bbb98 ccccc98 -99 100 101 aaa99 bbb99 ccccc99 -100 101 102 aaa100 bbb100 ccccc100 -DROP TABLE test.t; -USE test; -show tables; -Tables_in_test -t -SELECT * FROM test.t order by a; -a b c d e f -1 2 3 aaa1 bbb1 ccccc1 -2 3 4 aaa2 bbb2 ccccc2 -3 4 5 aaa3 bbb3 ccccc3 -4 5 6 aaa4 bbb4 ccccc4 -5 6 7 aaa5 bbb5 ccccc5 -6 7 8 aaa6 bbb6 ccccc6 -7 8 9 aaa7 bbb7 ccccc7 -8 9 10 aaa8 bbb8 ccccc8 -9 10 11 aaa9 bbb9 ccccc9 -10 11 12 aaa10 bbb10 ccccc10 -11 12 13 aaa11 bbb11 ccccc11 -12 13 14 aaa12 bbb12 ccccc12 -13 14 15 aaa13 bbb13 ccccc13 -14 15 16 aaa14 bbb14 ccccc14 -15 16 17 aaa15 bbb15 ccccc15 -16 17 18 aaa16 bbb16 ccccc16 -17 18 19 aaa17 bbb17 ccccc17 -18 19 20 aaa18 bbb18 ccccc18 -19 20 21 aaa19 bbb19 ccccc19 -20 21 22 aaa20 bbb20 ccccc20 -21 22 23 aaa21 bbb21 ccccc21 -22 23 24 aaa22 bbb22 ccccc22 -23 24 25 aaa23 bbb23 ccccc23 -24 25 26 aaa24 bbb24 ccccc24 -25 26 27 aaa25 bbb25 ccccc25 -26 27 28 aaa26 bbb26 ccccc26 -27 28 29 aaa27 bbb27 ccccc27 -28 29 30 aaa28 bbb28 ccccc28 -29 30 31 aaa29 bbb29 ccccc29 -30 31 32 aaa30 bbb30 ccccc30 -31 32 33 aaa31 bbb31 ccccc31 -32 33 34 aaa32 bbb32 ccccc32 -33 34 35 aaa33 bbb33 ccccc33 -34 35 36 aaa34 bbb34 ccccc34 -35 36 37 aaa35 bbb35 ccccc35 -36 37 38 aaa36 bbb36 ccccc36 -37 38 39 aaa37 bbb37 ccccc37 -38 39 40 aaa38 bbb38 ccccc38 -39 40 41 aaa39 bbb39 ccccc39 -40 41 42 aaa40 bbb40 ccccc40 -41 42 43 aaa41 bbb41 ccccc41 -42 43 44 aaa42 bbb42 ccccc42 -43 44 45 aaa43 bbb43 ccccc43 -44 45 46 aaa44 bbb44 ccccc44 -45 46 47 aaa45 bbb45 ccccc45 -46 47 48 aaa46 bbb46 ccccc46 -47 48 49 aaa47 bbb47 ccccc47 -48 49 50 aaa48 bbb48 ccccc48 -49 50 51 aaa49 bbb49 ccccc49 -50 51 52 aaa50 bbb50 ccccc50 -51 52 53 aaa51 bbb51 ccccc51 -52 53 54 aaa52 bbb52 ccccc52 -53 54 55 aaa53 bbb53 ccccc53 -54 55 56 aaa54 bbb54 ccccc54 -55 56 57 aaa55 bbb55 ccccc55 -56 57 58 aaa56 bbb56 ccccc56 -57 58 59 aaa57 bbb57 ccccc57 -58 59 60 aaa58 bbb58 ccccc58 -59 60 61 aaa59 bbb59 ccccc59 -60 61 62 aaa60 bbb60 ccccc60 -61 62 63 aaa61 bbb61 ccccc61 -62 63 64 aaa62 bbb62 ccccc62 -63 64 65 aaa63 bbb63 ccccc63 -64 65 66 aaa64 bbb64 ccccc64 -65 66 67 aaa65 bbb65 ccccc65 -66 67 68 aaa66 bbb66 ccccc66 -67 68 69 aaa67 bbb67 ccccc67 -68 69 70 aaa68 bbb68 ccccc68 -69 70 71 aaa69 bbb69 ccccc69 -70 71 72 aaa70 bbb70 ccccc70 -71 72 73 aaa71 bbb71 ccccc71 -72 73 74 aaa72 bbb72 ccccc72 -73 74 75 aaa73 bbb73 ccccc73 -74 75 76 aaa74 bbb74 ccccc74 -75 76 77 aaa75 bbb75 ccccc75 -76 77 78 aaa76 bbb76 ccccc76 -77 78 79 aaa77 bbb77 ccccc77 -78 79 80 aaa78 bbb78 ccccc78 -79 80 81 aaa79 bbb79 ccccc79 -80 81 82 aaa80 bbb80 ccccc80 -81 82 83 aaa81 bbb81 ccccc81 -82 83 84 aaa82 bbb82 ccccc82 -83 84 85 aaa83 bbb83 ccccc83 -84 85 86 aaa84 bbb84 ccccc84 -85 86 87 aaa85 bbb85 ccccc85 -86 87 88 aaa86 bbb86 ccccc86 -87 88 89 aaa87 bbb87 ccccc87 -88 89 90 aaa88 bbb88 ccccc88 -89 90 91 aaa89 bbb89 ccccc89 -90 91 92 aaa90 bbb90 ccccc90 -91 92 93 aaa91 bbb91 ccccc91 -92 93 94 aaa92 bbb92 ccccc92 -93 94 95 aaa93 bbb93 ccccc93 -94 95 96 aaa94 bbb94 ccccc94 -95 96 97 aaa95 bbb95 ccccc95 -96 97 98 aaa96 bbb96 ccccc96 -97 98 99 aaa97 bbb97 ccccc97 -98 99 100 aaa98 bbb98 ccccc98 -99 100 101 aaa99 bbb99 ccccc99 -100 101 102 aaa100 bbb100 ccccc100 -DROP TABLE test.t; -ALTER TABLESPACE ts -DROP DATAFILE './datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -DROP table IF EXISTS test.t1; -Warnings: -Note 1051 Unknown table 't1' -DROP table IF EXISTS test.t2; -Warnings: -Note 1051 Unknown table 't2' -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLESPACE ts2 -ADD DATAFILE './table_space2/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 ( -a1 smallint NOT NULL, -a2 int NOT NULL, -a3 bigint NOT NULL, -a4 char(10), -a5 decimal(5,1), -a6 time, -a7 date, -a8 datetime, -a9 VARCHAR(255), -a10 blob, -PRIMARY KEY(a1) -) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` smallint(6) NOT NULL, - `a2` int(11) NOT NULL, - `a3` bigint(20) NOT NULL, - `a4` char(10) DEFAULT NULL, - `a5` decimal(5,1) DEFAULT NULL, - `a6` time DEFAULT NULL, - `a7` date DEFAULT NULL, - `a8` datetime DEFAULT NULL, - `a9` varchar(255) DEFAULT NULL, - `a10` blob, - PRIMARY KEY (`a1`), - KEY `a2` (`a2`), - KEY `a3` (`a3`), - KEY `a8` (`a8`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -CREATE TABLE test.t2 ( -b1 smallint NOT NULL, -b2 int NOT NULL, -b3 bigint NOT NULL, -b4 char(10), -b5 decimal(5,1), -b6 time, -b7 date, -b8 datetime, -b9 VARCHAR(255), -b10 blob, -PRIMARY KEY(b1) -) ENGINE=NDB; -ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); -SHOW CREATE TABLE test.t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `b1` smallint(6) NOT NULL, - `b2` int(11) NOT NULL, - `b3` bigint(20) NOT NULL, - `b4` char(10) DEFAULT NULL, - `b5` decimal(5,1) DEFAULT NULL, - `b6` time DEFAULT NULL, - `b7` date DEFAULT NULL, - `b8` datetime DEFAULT NULL, - `b9` varchar(255) DEFAULT NULL, - `b10` blob, - PRIMARY KEY (`b1`), - KEY `b2` (`b2`), - KEY `b3` (`b3`), - KEY `b8` (`b8`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SELECT * FROM test.t1 order by a1; -a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 -1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data -2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data -3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data -4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data -5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data -6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data -7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data -8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data -9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data -10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data -11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data -12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data -13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data -14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data -15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data -16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data -17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data -18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data -19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data -20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data -SELECT * FROM test.t2 order by b1; -b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 -3 4 3000000001 aaa1 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data -4 5 3000000002 aaa2 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data -5 6 3000000003 aaa3 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data -6 7 3000000004 aaa4 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data -7 8 3000000005 aaa5 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data -8 9 3000000006 aaa6 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data -9 10 3000000007 aaa7 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data -10 11 3000000008 aaa8 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data -11 12 3000000009 aaa9 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data -12 13 3000000010 aaa10 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data -13 14 3000000011 aaa11 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data -14 15 3000000012 aaa12 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data -15 16 3000000013 aaa13 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data -16 17 3000000014 aaa14 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data -17 18 3000000015 aaa15 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data -18 19 3000000016 aaa16 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data -19 20 3000000017 aaa17 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data -20 21 3000000018 aaa18 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data -21 22 3000000019 aaa19 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data -22 23 3000000020 aaa20 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data -SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; -COUNT(a1) a1 COUNT(a1)*a1 -1 1 1 -1 2 2 -1 3 3 -1 4 4 -1 5 5 -1 6 6 -1 7 7 -1 8 8 -1 9 9 -1 10 10 -1 11 11 -1 12 12 -1 13 13 -1 14 14 -1 15 15 -1 16 16 -1 17 17 -1 18 18 -1 19 19 -1 20 20 -SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; -COUNT(a2) (a2+1) COUNT(a2)*(a2+0) -1 3 2 -1 4 3 -1 5 4 -1 6 5 -1 7 6 -1 8 7 -1 9 8 -1 10 9 -1 11 10 -1 12 11 -1 13 12 -1 14 13 -1 15 14 -1 16 15 -1 17 16 -1 18 17 -1 19 18 -1 20 19 -1 21 20 -1 22 21 -DROP TABLE test.t1; -DROP TABLE test.t2; -create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -Warnings: -Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' -insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4); -select distinct a from test.t1 group by b,a having a > 2 order by a desc; -a -4 -3 -select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; -a c -4 NULL -3 NULL -select distinct a from test.t1 group by b,a having a > 2 order by a asc; -a -3 -4 -select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc; -a c -3 NULL -4 NULL -drop table test.t1; -create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); -select * from test.t1 where a >= '1' order by a; -a -1 -1 -1 -2 -2 -3 -3 -select distinct a from test.t1 order by a desc; -a -3 -2 -1 -select distinct a from test.t1 where a >= '1' order by a desc; -a -3 -2 -1 -select distinct a from test.t1 where a >= '1' order by a asc; -a -1 -2 -3 -drop table test.t1; -CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; -INSERT INTO test.t1 (email, infoID, dateentered) VALUES -('test1@testdomain.com', 1, '2002-07-30 22:56:38'), -('test1@testdomain.com', 1, '2002-07-27 22:58:16'), -('test2@testdomain.com', 1, '2002-06-19 15:22:19'), -('test2@testdomain.com', 2, '2002-06-18 14:23:47'), -('test3@testdomain.com', 1, '2002-05-19 22:17:32'); -INSERT INTO test.t2(infoID, shipcode) VALUES -(1, 'Z001'), -(2, 'R002'); -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode; -email shipcode -test1@testdomain.com Z001 -test2@testdomain.com R002 -test2@testdomain.com Z001 -test3@testdomain.com Z001 -SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; -email -test1@testdomain.com -test2@testdomain.com -test3@testdomain.com -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; -email shipcode -test1@testdomain.com Z001 -test2@testdomain.com Z001 -test2@testdomain.com R002 -test3@testdomain.com Z001 -drop table test.t1,test.t2; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts1 ENGINE NDB; -ALTER TABLESPACE ts2 -DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts2 ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -DROP TABLE IF EXISTS test.t; -Warnings: -Note 1051 Unknown table 't' -create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB; -insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1); -insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2); -select * from test.t order by f1; -f1 f2 f3 -111111 aaaaaa 1 -222222 bbbbbb 2 -select f1,f2 from test.t order by f2; -f1 f2 -111111 aaaaaa -222222 bbbbbb -select f2 from test.t order by f2; -f2 -aaaaaa -bbbbbb -select f1,f2 from test.t order by f1; -f1 f2 -111111 aaaaaa -222222 bbbbbb -drop table test.t; -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts -ADD DATAFILE './table_space/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) ENGINE=InnoDB DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; -Warnings: -Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) ENGINE=MyISAM DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; -Warnings: -Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` blob, - `a3` text, - PRIMARY KEY (`a1`) -) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 ENGINE=InnoDB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` blob, - `a3` text, - PRIMARY KEY (`a1`) -) ENGINE=InnoDB DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; -Warnings: -Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 ENGINE=MyISAM; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) DEFAULT NULL, - `a2` blob, - `a3` text -) ENGINE=MyISAM DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; -ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` float DEFAULT NULL, - `a3` double DEFAULT NULL, - `a4` bit(1) DEFAULT NULL, - `a5` tinyint(4) DEFAULT NULL, - `a6` bigint(20) DEFAULT NULL, - `a7` date DEFAULT NULL, - `a8` time DEFAULT NULL, - `a9` datetime DEFAULT NULL, - `a10` tinytext, - `a11` mediumtext, - `a12` longtext, - `a13` text, - `a14` blob, - PRIMARY KEY (`a1`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6), -ADD INDEX (a7), ADD INDEX (a8); -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` float DEFAULT NULL, - `a3` double DEFAULT NULL, - `a4` bit(1) DEFAULT NULL, - `a5` tinyint(4) DEFAULT NULL, - `a6` bigint(20) DEFAULT NULL, - `a7` date DEFAULT NULL, - `a8` time DEFAULT NULL, - `a9` datetime DEFAULT NULL, - `a10` tinytext, - `a11` mediumtext, - `a12` longtext, - `a13` text, - `a14` blob, - PRIMARY KEY (`a1`), - KEY `a2` (`a2`), - KEY `a3` (`a3`), - KEY `a5` (`a5`), - KEY `a6` (`a6`), - KEY `a7` (`a7`), - KEY `a8` (`a8`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; -ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` float DEFAULT NULL, - `a3` double DEFAULT NULL, - `a4` bit(1) DEFAULT NULL, - `a5` tinyint(4) DEFAULT NULL, - `a6` bigint(20) DEFAULT NULL, - `a7` date DEFAULT NULL, - `a8` time DEFAULT NULL, - `a9` datetime DEFAULT NULL, - `a10` tinytext, - `a11` mediumtext, - `a12` longtext, - `a13` text, - `a14` blob, - PRIMARY KEY (`a1`) -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -ALTER TABLE test.t1 DROP a14; -ALTER TABLE test.t1 DROP a13; -ALTER TABLE test.t1 DROP a12; -ALTER TABLE test.t1 DROP a11; -ALTER TABLE test.t1 DROP a10; -ALTER TABLE test.t1 DROP a9; -ALTER TABLE test.t1 DROP a8; -ALTER TABLE test.t1 DROP a7; -ALTER TABLE test.t1 DROP a6; -ALTER TABLE test.t1 DROP PRIMARY KEY; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` float DEFAULT NULL, - `a3` double DEFAULT NULL, - `a4` bit(1) DEFAULT NULL, - `a5` tinyint(4) DEFAULT NULL -) ENGINE=ndbcluster DEFAULT CHARSET=latin1 -DROP TABLE test.t1; -ALTER TABLESPACE ts -DROP DATAFILE './table_space/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_advance2.result deleted file mode 100644 index 00490fbc32b..00000000000 --- a/mysql-test/r/ndb_dd_advance2.result +++ /dev/null @@ -1,745 +0,0 @@ -DROP TABLE IF EXISTS test.t1; -DROP TABLE IF EXISTS test.t2; -DROP TABLE IF EXISTS test.t3; -***** -**** Copy data from table in one table space to table in different table space -***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLESPACE ts2 -ADD DATAFILE './table_space2/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) -TABLESPACE ts2 STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` varchar(256) DEFAULT NULL, - `a3` blob, - PRIMARY KEY (`a1`) -) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SHOW CREATE TABLE test.t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `a1` int(11) NOT NULL, - `a2` varchar(256) DEFAULT NULL, - `a3` blob, - PRIMARY KEY (`a1`) -) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa'); -INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb'); -SELECT * FROM test.t1; -a1 a2 a3 -1 111111 aaaaaaaa -2 222222 bbbbbbbb -INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; -SELECT * FROM test.t2; -a1 a2 a3 -1 111111 aaaaaaaa -2 222222 bbbbbbbb -DROP TABLE test.t1, test.t2; -set @vc1 = repeat('a', 200); -set @vc2 = repeat('b', 500); -set @vc3 = repeat('c', 1000); -set @vc4 = repeat('d', 4000); -set @x0 = '01234567012345670123456701234567'; -set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); -set @b1 = 'b1'; -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@x0); -set @d1 = 'dd1'; -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); -set @b2 = 'b2'; -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @d2 = 'dd2'; -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -select length(@x0),length(@b1),length(@d1) from dual; -length(@x0) length(@b1) length(@d1) -256 2256 3000 -select length(@x0),length(@b2),length(@d2) from dual; -length(@x0) length(@b2) length(@d2) -256 20000 30000 -CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) -TABLESPACE ts2 STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -Table Create Table -t1 CREATE TABLE `t1` ( - `a1` int(11) NOT NULL, - `a2` varchar(5000) DEFAULT NULL, - `a3` blob, - PRIMARY KEY (`a1`) -) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -SHOW CREATE TABLE test.t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `a1` int(11) NOT NULL, - `a2` varchar(5000) DEFAULT NULL, - `a3` blob, - PRIMARY KEY (`a1`) -) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 -INSERT INTO test.t1 VALUES (1,@vc1,@d1); -INSERT INTO test.t1 VALUES (2,@vc2,@b1); -INSERT INTO test.t1 VALUES (3,@vc3,@d2); -INSERT INTO test.t1 VALUES (4,@vc4,@b2); -SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) -FROM test.t1 WHERE a1=1; -a1 length(a2) substr(a2,180,2) length(a3) substr(a3,1+3*900,3) -1 200 aa 3000 dd1 -SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) -FROM test.t1 where a1=2; -a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3) -2 500 bb 2256 b1b -INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; -SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) -FROM test.t2 WHERE a1=1; -a1 length(a2) substr(a2,180,2) length(a3) substr(a3,1+3*900,3) -1 200 aa 3000 dd1 -SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) -FROM test.t2 where a1=2; -a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3) -2 500 bb 2256 b1b -DROP TABLE test.t1, test.t2; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts1 ENGINE NDB; -ALTER TABLESPACE ts2 -DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts2 ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -***** -**** Insert, Update, Delete from NDB table with BLOB fields -***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -set @vc1 = repeat('a', 200); -set @vc2 = repeat('b', 500); -set @vc3 = repeat('c', 1000); -set @vc4 = repeat('d', 4000); -set @vc5 = repeat('d', 5000); -set @bb1 = repeat('1', 2000); -set @bb2 = repeat('2', 5000); -set @bb3 = repeat('3', 10000); -set @bb4 = repeat('4', 40000); -set @bb5 = repeat('5', 50000); -select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual; -length(@vc1) length(@vc2) length(@vc3) length(@vc4) length(@vc5) -200 500 1000 4000 5000 -select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual; -length(@bb1) length(@bb2) length(@bb3) length(@bb4) length(@bb5) -2000 5000 10000 40000 50000 -CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -INSERT INTO test.t1 VALUES (1,@vc1,@bb1); -INSERT INTO test.t1 VALUES (2,@vc2,@bb2); -INSERT INTO test.t1 VALUES (3,@vc3,@bb3); -INSERT INTO test.t1 VALUES (4,@vc4,@bb4); -INSERT INTO test.t1 VALUES (5,@vc5,@bb5); -UPDATE test.t1 SET a2=@vc5, a3=@bb5 WHERE a1=1; -SELECT a1,length(a2),substr(a2,4998,2),length(a3),substr(a3,49997,3) -FROM test.t1 WHERE a1=1; -a1 length(a2) substr(a2,4998,2) length(a3) substr(a3,49997,3) -1 5000 dd 50000 555 -UPDATE test.t1 SET a2=@vc4, a3=@bb4 WHERE a1=2; -SELECT a1,length(a2),substr(a2,3998,2),length(a3),substr(a3,39997,3) -FROM test.t1 WHERE a1=2; -a1 length(a2) substr(a2,3998,2) length(a3) substr(a3,39997,3) -2 4000 dd 40000 444 -UPDATE test.t1 SET a2=@vc2, a3=@bb2 WHERE a1=3; -SELECT a1,length(a2),substr(a2,498,2),length(a3),substr(a3,3997,3) -FROM test.t1 WHERE a1=3; -a1 length(a2) substr(a2,498,2) length(a3) substr(a3,3997,3) -3 500 bb 5000 222 -UPDATE test.t1 SET a2=@vc3, a3=@bb3 WHERE a1=4; -SELECT a1,length(a2),substr(a2,998,2),length(a3),substr(a3,9997,3) -FROM test.t1 WHERE a1=4; -a1 length(a2) substr(a2,998,2) length(a3) substr(a3,9997,3) -4 1000 cc 10000 333 -UPDATE test.t1 SET a2=@vc1, a3=@bb1 WHERE a1=5; -SELECT a1,length(a2),substr(a2,198,2),length(a3),substr(a3,1997,3) -FROM test.t1 WHERE a1=5; -a1 length(a2) substr(a2,198,2) length(a3) substr(a3,1997,3) -5 200 aa 2000 111 -DELETE FROM test.t1 where a1=5; -SELECT count(*) from test.t1; -count(*) -4 -DELETE FROM test.t1 where a1=4; -SELECT count(*) from test.t1; -count(*) -3 -DELETE FROM test.t1 where a1=3; -SELECT count(*) from test.t1; -count(*) -2 -DELETE FROM test.t1 where a1=2; -SELECT count(*) from test.t1; -count(*) -1 -DELETE FROM test.t1 where a1=1; -SELECT count(*) from test.t1; -count(*) -0 -DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; -DROP TABLESPACE ts1 ENGINE NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -***** -**** Create Stored procedures that use disk based tables -***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB// -CREATE PROCEDURE test.sp1() -BEGIN -INSERT INTO test.t1 values (1,'111111','aaaaaaaa'); -END// -CALL test.sp1(); -SELECT * FROM test.t1; -a1 a2 a3 -1 111111 aaaaaaaa -CREATE PROCEDURE test.sp2(n INT, vc VARCHAR(256), blb BLOB) -BEGIN -UPDATE test.t1 SET a2=vc, a3=blb where a1=n; -END// -CALL test.sp2(1,'222222','bbbbbbbb'); -SELECT * FROM test.t1; -a1 a2 a3 -1 222222 bbbbbbbb -DELETE FROM test.t1; -DROP PROCEDURE test.sp1; -DROP PROCEDURE test.sp2; -DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -***** -***** Create function that operate on disk based tables -***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE FUNCTION test.fn1(n INT) RETURNS INT -BEGIN -DECLARE v INT; -SELECT a1 INTO v FROM test.t1 WHERE a1=n; -RETURN v; -END// -CREATE FUNCTION test.fn2(n INT, blb BLOB) RETURNS BLOB -BEGIN -DECLARE vv BLOB; -UPDATE test.t1 SET a3=blb where a1=n; -SELECT a3 INTO vv FROM test.t1 WHERE a1=n; -RETURN vv; -END// -SELECT test.fn1(10) FROM DUAL; -test.fn1(10) -10 -SELECT test.fn2(50, 'new BLOB content') FROM DUAL; -test.fn2(50, 'new BLOB content') -new BLOB content -DELETE FROM test.t1; -DROP FUNCTION test.fn1; -DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -***** -***** Create triggers that operate on disk based tables -***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW -BEGIN -if isnull(new.a2) then -set new.a2:= 'trg1 works on a2 field'; -end if; -if isnull(new.a3) then -set new.a3:= 'trg1 works on a3 field'; -end if; -end// -insert into test.t1 (a1) values (1)// -insert into test.t1 (a1,a2) values (2, 'ccccccc')// -select * from test.t1// -a1 a2 a3 -1 trg1 works on a2 field trg1 works on a3 field -2 ccccccc trg1 works on a3 field -DELETE FROM test.t1; -DROP TRIGGER test.trg1; -DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -***** -***** Create, update views that operate on disk based tables -***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE VIEW test.v1 AS SELECT * FROM test.t1; -SELECT * FROM test.v1 order by a1; -a1 a2 a3 -1 aaaaa1 bbbbb1 -2 aaaaa2 bbbbb2 -3 aaaaa3 bbbbb3 -4 aaaaa4 bbbbb4 -5 aaaaa5 bbbbb5 -6 aaaaa6 bbbbb6 -7 aaaaa7 bbbbb7 -8 aaaaa8 bbbbb8 -9 aaaaa9 bbbbb9 -10 aaaaa10 bbbbb10 -CHECK TABLE test.v1, test.t1; -Table Op Msg_type Msg_text -test.v1 check status OK -test.t1 check note The storage engine for the table doesn't support check -UPDATE test.v1 SET a2='zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' WHERE a1=5; -SELECT * FROM test.v1 order by a1; -a1 a2 a3 -1 aaaaa1 bbbbb1 -2 aaaaa2 bbbbb2 -3 aaaaa3 bbbbb3 -4 aaaaa4 bbbbb4 -5 zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz bbbbb5 -6 aaaaa6 bbbbb6 -7 aaaaa7 bbbbb7 -8 aaaaa8 bbbbb8 -9 aaaaa9 bbbbb9 -10 aaaaa10 bbbbb10 -DROP VIEW test.v1; -DELETE FROM test.t1; -DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -***** -***** Create and use disk based table that use auto inc -***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -SELECT * FROM test.t1 ORDER BY a1; -a1 a2 a3 -1 aaaaa10 bbbbb10 -2 aaaaa9 bbbbb9 -3 aaaaa8 bbbbb8 -4 aaaaa7 bbbbb7 -5 aaaaa6 bbbbb6 -6 aaaaa5 bbbbb5 -7 aaaaa4 bbbbb4 -8 aaaaa3 bbbbb3 -9 aaaaa2 bbbbb2 -10 aaaaa1 bbbbb1 -DELETE FROM test.t1; -DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -***** -***** Create test that use transaction (commit, rollback) -***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -SET AUTOCOMMIT=0; -CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); -COMMIT; -SELECT * FROM test.t1 ORDER BY a1; -a1 a2 a3 -1 aaaaa1 bbbbb1 -INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); -ROLLBACK; -SELECT * FROM test.t1 ORDER BY a1; -a1 a2 a3 -1 aaaaa1 bbbbb1 -DELETE FROM test.t1; -DROP TABLE test.t1; -SET AUTOCOMMIT=1; -CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -START TRANSACTION; -INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); -COMMIT; -SELECT * FROM test.t1 ORDER BY a1; -a1 a2 a3 -1 aaaaa1 bbbbb1 -START TRANSACTION; -INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); -ROLLBACK; -SELECT * FROM test.t1 ORDER BY a1; -a1 a2 a3 -1 aaaaa1 bbbbb1 -DELETE FROM test.t1; -DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -***** -***** Create test that uses locks -***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -drop table if exists test.t1; -CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -LOCK TABLES test.t1 write; -INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); -INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); -SELECT * FROM test.t1 ORDER BY a1; -a1 a2 a3 -1 aaaaa1 bbbbb1 -2 aaaaa2 bbbbb2 -SELECT * FROM test.t1 ORDER BY a1; -a1 a2 a3 -1 aaaaa1 bbbbb1 -2 aaaaa2 bbbbb2 -INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); -UNLOCK TABLES; -INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); -SELECT * FROM test.t1 ORDER BY a1; -a1 a2 a3 -1 aaaaa1 bbbbb1 -2 aaaaa2 bbbbb2 -3 aaaaa3 bbbbb3 -4 aaaaa3 bbbbb3 -DELETE FROM test.t1; -DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -***** -***** Create large disk base table, do random queries, check cache hits -***** -set @vc1 = repeat('a', 200); -SELECT @vc1 FROM DUAL; -@vc1 -aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa -set @vc2 = repeat('b', 500); -set @vc3 = repeat('b', 998); -set @x0 = '01234567012345670123456701234567'; -set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); -set @b1 = 'b1'; -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@x0); -set @d1 = 'dd1'; -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); -set @b2 = 'b2'; -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @d2 = 'dd2'; -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -select length(@x0),length(@b1),length(@d1) from dual; -length(@x0) length(@b1) length(@d1) -256 2256 3000 -select length(@x0),length(@b2),length(@d2) from dual; -length(@x0) length(@b2) length(@d2) -256 20000 30000 -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -INSERT INTO test.t1 values(1,@vc1,@d1); -INSERT INTO test.t1 values(2,@vc2,@d2); -explain SELECT * from test.t1 WHERE a1 = 1; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 -SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) -FROM test.t1 WHERE a1=1 ORDER BY a1; -a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) -1 200 3000 dd1 -SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) -FROM test.t1 where a1=2 ORDER BY a1; -a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3) -2 500 30000 dd2 -UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1; -UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2; -SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) -FROM test.t1 where a1=1; -a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3) -1 500 30000 dd2 -SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) -FROM test.t1 where a1=2; -a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) -2 200 3000 dd1 -DELETE FROM test.t1; -DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; -***** -***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE -***** -CREATE LOGFILE GROUP lg -ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; -CREATE TABLESPACE ts1 -ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg -INITIAL_SIZE 12M -ENGINE NDB; -CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250)) -TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -SELECT COUNT(*) from test.t1; -COUNT(*) -100 -SELECT SUM(a1) from test.t1; -SUM(a1) -5050 -SELECT MIN(a1) from test.t1; -MIN(a1) -1 -SELECT MAX(a1) from test.t1; -MAX(a1) -100 -SELECT a5 from test.t1 where a1=50; -a5 -root@localhost -SELECT * from test.t1 order by a1; -a1 a2 a3 a4 a5 -1 aaaaaaaaaaaaaaaa1 bbbbbbbbbbbbbbbbbb1 2006-06-20 root@localhost -2 aaaaaaaaaaaaaaaa2 bbbbbbbbbbbbbbbbbb2 2006-06-20 root@localhost -3 aaaaaaaaaaaaaaaa3 bbbbbbbbbbbbbbbbbb3 2006-06-20 root@localhost -4 aaaaaaaaaaaaaaaa4 bbbbbbbbbbbbbbbbbb4 2006-06-20 root@localhost -5 aaaaaaaaaaaaaaaa5 bbbbbbbbbbbbbbbbbb5 2006-06-20 root@localhost -6 aaaaaaaaaaaaaaaa6 bbbbbbbbbbbbbbbbbb6 2006-06-20 root@localhost -7 aaaaaaaaaaaaaaaa7 bbbbbbbbbbbbbbbbbb7 2006-06-20 root@localhost -8 aaaaaaaaaaaaaaaa8 bbbbbbbbbbbbbbbbbb8 2006-06-20 root@localhost -9 aaaaaaaaaaaaaaaa9 bbbbbbbbbbbbbbbbbb9 2006-06-20 root@localhost -10 aaaaaaaaaaaaaaaa10 bbbbbbbbbbbbbbbbbb10 2006-06-20 root@localhost -11 aaaaaaaaaaaaaaaa11 bbbbbbbbbbbbbbbbbb11 2006-06-20 root@localhost -12 aaaaaaaaaaaaaaaa12 bbbbbbbbbbbbbbbbbb12 2006-06-20 root@localhost -13 aaaaaaaaaaaaaaaa13 bbbbbbbbbbbbbbbbbb13 2006-06-20 root@localhost -14 aaaaaaaaaaaaaaaa14 bbbbbbbbbbbbbbbbbb14 2006-06-20 root@localhost -15 aaaaaaaaaaaaaaaa15 bbbbbbbbbbbbbbbbbb15 2006-06-20 root@localhost -16 aaaaaaaaaaaaaaaa16 bbbbbbbbbbbbbbbbbb16 2006-06-20 root@localhost -17 aaaaaaaaaaaaaaaa17 bbbbbbbbbbbbbbbbbb17 2006-06-20 root@localhost -18 aaaaaaaaaaaaaaaa18 bbbbbbbbbbbbbbbbbb18 2006-06-20 root@localhost -19 aaaaaaaaaaaaaaaa19 bbbbbbbbbbbbbbbbbb19 2006-06-20 root@localhost -20 aaaaaaaaaaaaaaaa20 bbbbbbbbbbbbbbbbbb20 2006-06-20 root@localhost -21 aaaaaaaaaaaaaaaa21 bbbbbbbbbbbbbbbbbb21 2006-06-20 root@localhost -22 aaaaaaaaaaaaaaaa22 bbbbbbbbbbbbbbbbbb22 2006-06-20 root@localhost -23 aaaaaaaaaaaaaaaa23 bbbbbbbbbbbbbbbbbb23 2006-06-20 root@localhost -24 aaaaaaaaaaaaaaaa24 bbbbbbbbbbbbbbbbbb24 2006-06-20 root@localhost -25 aaaaaaaaaaaaaaaa25 bbbbbbbbbbbbbbbbbb25 2006-06-20 root@localhost -26 aaaaaaaaaaaaaaaa26 bbbbbbbbbbbbbbbbbb26 2006-06-20 root@localhost -27 aaaaaaaaaaaaaaaa27 bbbbbbbbbbbbbbbbbb27 2006-06-20 root@localhost -28 aaaaaaaaaaaaaaaa28 bbbbbbbbbbbbbbbbbb28 2006-06-20 root@localhost -29 aaaaaaaaaaaaaaaa29 bbbbbbbbbbbbbbbbbb29 2006-06-20 root@localhost -30 aaaaaaaaaaaaaaaa30 bbbbbbbbbbbbbbbbbb30 2006-06-20 root@localhost -31 aaaaaaaaaaaaaaaa31 bbbbbbbbbbbbbbbbbb31 2006-06-20 root@localhost -32 aaaaaaaaaaaaaaaa32 bbbbbbbbbbbbbbbbbb32 2006-06-20 root@localhost -33 aaaaaaaaaaaaaaaa33 bbbbbbbbbbbbbbbbbb33 2006-06-20 root@localhost -34 aaaaaaaaaaaaaaaa34 bbbbbbbbbbbbbbbbbb34 2006-06-20 root@localhost -35 aaaaaaaaaaaaaaaa35 bbbbbbbbbbbbbbbbbb35 2006-06-20 root@localhost -36 aaaaaaaaaaaaaaaa36 bbbbbbbbbbbbbbbbbb36 2006-06-20 root@localhost -37 aaaaaaaaaaaaaaaa37 bbbbbbbbbbbbbbbbbb37 2006-06-20 root@localhost -38 aaaaaaaaaaaaaaaa38 bbbbbbbbbbbbbbbbbb38 2006-06-20 root@localhost -39 aaaaaaaaaaaaaaaa39 bbbbbbbbbbbbbbbbbb39 2006-06-20 root@localhost -40 aaaaaaaaaaaaaaaa40 bbbbbbbbbbbbbbbbbb40 2006-06-20 root@localhost -41 aaaaaaaaaaaaaaaa41 bbbbbbbbbbbbbbbbbb41 2006-06-20 root@localhost -42 aaaaaaaaaaaaaaaa42 bbbbbbbbbbbbbbbbbb42 2006-06-20 root@localhost -43 aaaaaaaaaaaaaaaa43 bbbbbbbbbbbbbbbbbb43 2006-06-20 root@localhost -44 aaaaaaaaaaaaaaaa44 bbbbbbbbbbbbbbbbbb44 2006-06-20 root@localhost -45 aaaaaaaaaaaaaaaa45 bbbbbbbbbbbbbbbbbb45 2006-06-20 root@localhost -46 aaaaaaaaaaaaaaaa46 bbbbbbbbbbbbbbbbbb46 2006-06-20 root@localhost -47 aaaaaaaaaaaaaaaa47 bbbbbbbbbbbbbbbbbb47 2006-06-20 root@localhost -48 aaaaaaaaaaaaaaaa48 bbbbbbbbbbbbbbbbbb48 2006-06-20 root@localhost -49 aaaaaaaaaaaaaaaa49 bbbbbbbbbbbbbbbbbb49 2006-06-20 root@localhost -50 aaaaaaaaaaaaaaaa50 bbbbbbbbbbbbbbbbbb50 2006-06-20 root@localhost -51 aaaaaaaaaaaaaaaa51 bbbbbbbbbbbbbbbbbb51 2006-06-20 root@localhost -52 aaaaaaaaaaaaaaaa52 bbbbbbbbbbbbbbbbbb52 2006-06-20 root@localhost -53 aaaaaaaaaaaaaaaa53 bbbbbbbbbbbbbbbbbb53 2006-06-20 root@localhost -54 aaaaaaaaaaaaaaaa54 bbbbbbbbbbbbbbbbbb54 2006-06-20 root@localhost -55 aaaaaaaaaaaaaaaa55 bbbbbbbbbbbbbbbbbb55 2006-06-20 root@localhost -56 aaaaaaaaaaaaaaaa56 bbbbbbbbbbbbbbbbbb56 2006-06-20 root@localhost -57 aaaaaaaaaaaaaaaa57 bbbbbbbbbbbbbbbbbb57 2006-06-20 root@localhost -58 aaaaaaaaaaaaaaaa58 bbbbbbbbbbbbbbbbbb58 2006-06-20 root@localhost -59 aaaaaaaaaaaaaaaa59 bbbbbbbbbbbbbbbbbb59 2006-06-20 root@localhost -60 aaaaaaaaaaaaaaaa60 bbbbbbbbbbbbbbbbbb60 2006-06-20 root@localhost -61 aaaaaaaaaaaaaaaa61 bbbbbbbbbbbbbbbbbb61 2006-06-20 root@localhost -62 aaaaaaaaaaaaaaaa62 bbbbbbbbbbbbbbbbbb62 2006-06-20 root@localhost -63 aaaaaaaaaaaaaaaa63 bbbbbbbbbbbbbbbbbb63 2006-06-20 root@localhost -64 aaaaaaaaaaaaaaaa64 bbbbbbbbbbbbbbbbbb64 2006-06-20 root@localhost -65 aaaaaaaaaaaaaaaa65 bbbbbbbbbbbbbbbbbb65 2006-06-20 root@localhost -66 aaaaaaaaaaaaaaaa66 bbbbbbbbbbbbbbbbbb66 2006-06-20 root@localhost -67 aaaaaaaaaaaaaaaa67 bbbbbbbbbbbbbbbbbb67 2006-06-20 root@localhost -68 aaaaaaaaaaaaaaaa68 bbbbbbbbbbbbbbbbbb68 2006-06-20 root@localhost -69 aaaaaaaaaaaaaaaa69 bbbbbbbbbbbbbbbbbb69 2006-06-20 root@localhost -70 aaaaaaaaaaaaaaaa70 bbbbbbbbbbbbbbbbbb70 2006-06-20 root@localhost -71 aaaaaaaaaaaaaaaa71 bbbbbbbbbbbbbbbbbb71 2006-06-20 root@localhost -72 aaaaaaaaaaaaaaaa72 bbbbbbbbbbbbbbbbbb72 2006-06-20 root@localhost -73 aaaaaaaaaaaaaaaa73 bbbbbbbbbbbbbbbbbb73 2006-06-20 root@localhost -74 aaaaaaaaaaaaaaaa74 bbbbbbbbbbbbbbbbbb74 2006-06-20 root@localhost -75 aaaaaaaaaaaaaaaa75 bbbbbbbbbbbbbbbbbb75 2006-06-20 root@localhost -76 aaaaaaaaaaaaaaaa76 bbbbbbbbbbbbbbbbbb76 2006-06-20 root@localhost -77 aaaaaaaaaaaaaaaa77 bbbbbbbbbbbbbbbbbb77 2006-06-20 root@localhost -78 aaaaaaaaaaaaaaaa78 bbbbbbbbbbbbbbbbbb78 2006-06-20 root@localhost -79 aaaaaaaaaaaaaaaa79 bbbbbbbbbbbbbbbbbb79 2006-06-20 root@localhost -80 aaaaaaaaaaaaaaaa80 bbbbbbbbbbbbbbbbbb80 2006-06-20 root@localhost -81 aaaaaaaaaaaaaaaa81 bbbbbbbbbbbbbbbbbb81 2006-06-20 root@localhost -82 aaaaaaaaaaaaaaaa82 bbbbbbbbbbbbbbbbbb82 2006-06-20 root@localhost -83 aaaaaaaaaaaaaaaa83 bbbbbbbbbbbbbbbbbb83 2006-06-20 root@localhost -84 aaaaaaaaaaaaaaaa84 bbbbbbbbbbbbbbbbbb84 2006-06-20 root@localhost -85 aaaaaaaaaaaaaaaa85 bbbbbbbbbbbbbbbbbb85 2006-06-20 root@localhost -86 aaaaaaaaaaaaaaaa86 bbbbbbbbbbbbbbbbbb86 2006-06-20 root@localhost -87 aaaaaaaaaaaaaaaa87 bbbbbbbbbbbbbbbbbb87 2006-06-20 root@localhost -88 aaaaaaaaaaaaaaaa88 bbbbbbbbbbbbbbbbbb88 2006-06-20 root@localhost -89 aaaaaaaaaaaaaaaa89 bbbbbbbbbbbbbbbbbb89 2006-06-20 root@localhost -90 aaaaaaaaaaaaaaaa90 bbbbbbbbbbbbbbbbbb90 2006-06-20 root@localhost -91 aaaaaaaaaaaaaaaa91 bbbbbbbbbbbbbbbbbb91 2006-06-20 root@localhost -92 aaaaaaaaaaaaaaaa92 bbbbbbbbbbbbbbbbbb92 2006-06-20 root@localhost -93 aaaaaaaaaaaaaaaa93 bbbbbbbbbbbbbbbbbb93 2006-06-20 root@localhost -94 aaaaaaaaaaaaaaaa94 bbbbbbbbbbbbbbbbbb94 2006-06-20 root@localhost -95 aaaaaaaaaaaaaaaa95 bbbbbbbbbbbbbbbbbb95 2006-06-20 root@localhost -96 aaaaaaaaaaaaaaaa96 bbbbbbbbbbbbbbbbbb96 2006-06-20 root@localhost -97 aaaaaaaaaaaaaaaa97 bbbbbbbbbbbbbbbbbb97 2006-06-20 root@localhost -98 aaaaaaaaaaaaaaaa98 bbbbbbbbbbbbbbbbbb98 2006-06-20 root@localhost -99 aaaaaaaaaaaaaaaa99 bbbbbbbbbbbbbbbbbb99 2006-06-20 root@localhost -100 aaaaaaaaaaaaaaaa100 bbbbbbbbbbbbbbbbbb100 2006-06-20 root@localhost -DROP TABLE test.t1; -ALTER TABLESPACE ts1 -DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; -DROP TABLESPACE ts1 ENGINE=NDB; -DROP LOGFILE GROUP lg -ENGINE=NDB; diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test deleted file mode 100755 index e882ec794c1..00000000000 --- a/mysql-test/t/ndb_dd_advance.test +++ /dev/null @@ -1,630 +0,0 @@ -############################################################## -# Author: JBM -# Date: 2006-01-12 -# Purpose: To test using ndb memory and disk tables together. -############################################################## - -############################################################## -# Author: Nikolay -# Date: 2006-05-12 -# Purpose: To test using ndb memory and disk tables together. -# -# Select from disk into memory table -# Select from disk into memory table -# Create test that loads data, use mysql dump to dump data, drop table, -# create table and load from mysql dump. -# Use group by asc and dec; Use having; Use order by -# ALTER Tests (Meta data testing): -# ALTER from InnoDB to Cluster Disk Data -# ALTER from MyISAM to Cluster Disk Data -# ALTER from Cluster Disk Data to InnoDB -# ALTER from Cluster Disk Data to MyISAM -# ALTER DD Tables and add columns -# ALTER DD Tables and add Indexes -# ALTER DD Tables and drop columns -# -############################################################## - --- source include/have_ndb.inc --- source include/not_embedded.inc - ---disable_warnings -DROP TABLE IF EXISTS test.t1; -DROP TABLE IF EXISTS test.t2; ---enable_warnings - -############ Test Setup Section ############# --- echo **** Test Setup Section **** - -CREATE LOGFILE GROUP log_group1 -ADD UNDOFILE './log_group1/undofile.dat' -INITIAL_SIZE 16M -UNDO_BUFFER_SIZE = 1M -ENGINE=NDB; - -CREATE TABLESPACE table_space1 -ADD DATAFILE './table_space1/datafile.dat' -USE LOGFILE GROUP log_group1 -INITIAL_SIZE 12M -ENGINE NDB; - - -CREATE TABLE test.t1 -(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) -TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; - -CREATE TABLE test.t2 -(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL) -ENGINE=NDB; - ---echo -##################### Data load for first test #################### ---echo **** Data load for first test **** - -INSERT INTO test.t1 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), -(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), -(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), -(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), -(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), -(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), -(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); - - -INSERT INTO test.t2 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), -(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), -(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), -(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), -(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), -(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), -(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); - ---echo -##################### Test 1 Section Begins ############### ---echo *** Test 1 Section Begins *** -SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); -SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4); -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75); -SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; ---echo -####################### Test 1 Section End ################ - -##################### Setup for test 2 #################### ---echo *** Setup for test 2 **** -DELETE FROM test.t1; -INSERT INTO test.t1 VALUES -(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), -(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), -(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), -(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), -(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), -(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), -(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), -(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), -(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45); ---echo -############################# Test Section 2 ############### ---echo **** Test Section 2 **** -SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; -SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2; -SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2; -SHOW CREATE TABLE test.t2; -SHOW CREATE TABLE test.t1; -ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; -SHOW CREATE TABLE test.t2; -ALTER TABLE test.t1 ENGINE=NDBCLUSTER; -SHOW CREATE TABLE test.t1; ---echo -######################### End Test Section 2 ################# -DROP TABLE test.t1; -DROP TABLE test.t2; -##################### Setup for Test Section 3 ############### ---echo *** Setup for Test Section 3 *** -CREATE TABLE test.t1 ( - usr_id INT unsigned NOT NULL, - uniq_id INT unsigned NOT NULL AUTO_INCREMENT, - start_num INT unsigned NOT NULL DEFAULT 1, - increment INT unsigned NOT NULL DEFAULT 1, - PRIMARY KEY (uniq_id), - INDEX usr_uniq_idx (usr_id, uniq_id), - INDEX uniq_usr_idx (uniq_id, usr_id)) -TABLESPACE table_space1 STORAGE DISK -ENGINE=NDB; - - -CREATE TABLE test.t2 ( - id INT unsigned NOT NULL DEFAULT 0, - usr2_id INT unsigned NOT NULL DEFAULT 0, - max INT unsigned NOT NULL DEFAULT 0, - c_amount INT unsigned NOT NULL DEFAULT 0, - d_max INT unsigned NOT NULL DEFAULT 0, - d_num INT unsigned NOT NULL DEFAULT 0, - orig_time INT unsigned NOT NULL DEFAULT 0, - c_time INT unsigned NOT NULL DEFAULT 0, - active ENUM ("no","yes") NOT NULL, - PRIMARY KEY (id,usr2_id), - INDEX id_idx (id), - INDEX usr2_idx (usr2_id)) -ENGINE=NDB; - -INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198); - ---echo -###################### Test Section 3 ###################### ---echo **** Test Section 3 **** -SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, -test.t2.usr2_id,test.t2.c_amount,test.t2.max -FROM test.t1 -LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id -WHERE test.t1.uniq_id = 4 -ORDER BY test.t2.c_amount; - -INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes'); -INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes'); -INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes'); - -SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, -test.t2.usr2_id,test.t2.c_amount,test.t2.max -FROM test.t1 -LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id -WHERE test.t1.uniq_id = 4 -ORDER BY test.t2.c_amount; ---echo -####################### End Section 3 ######################### -DROP TABLE test.t1; -DROP TABLE test.t2; -ALTER TABLESPACE table_space1 -DROP DATAFILE './table_space1/datafile.dat' -ENGINE = NDB; - -DROP TABLESPACE table_space1 -ENGINE = NDB; - -DROP LOGFILE GROUP log_group1 -ENGINE =NDB; - -####################### Section 4 ######################### - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLESPACE ts2 - ADD DATAFILE './table_space2/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - -### Select from disk into memory table ### - - CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int) - ENGINE=NDB; - - SHOW CREATE TABLE t1; - SHOW CREATE TABLE t2; - - INSERT INTO t1 VALUES (1,1); - INSERT INTO t1 VALUES (2,2); - SELECT * FROM t1 order by a; - INSERT INTO t2(a,b) SELECT * FROM t1; - SELECT * FROM t2 order by a; - -### Select from disk into memory table ### - - TRUNCATE t1; - TRUNCATE t2; - INSERT INTO t2 VALUES (3,3); - INSERT INTO t2 VALUES (4,4); - INSERT INTO t1(a,b) SELECT * FROM t2; - SELECT * FROM t1 order by a; - - DROP TABLE t1, t2; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; - - DROP TABLESPACE ts1 ENGINE NDB; - - ALTER TABLESPACE ts2 - DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; - - DROP TABLESPACE ts2 ENGINE NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create test that loads data, use mysql dump to dump data, drop table, -#### create table and load from mysql dump. - -# DROP DATABASE IF EXISTS test; - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts - ADD DATAFILE './datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - -# CREATE DATABASE test; - -CREATE TABLE test.t ( - a smallint NOT NULL, - b int NOT NULL, - c bigint NOT NULL, - d char(10), - e TEXT, - f VARCHAR(255), - PRIMARY KEY(a) -) TABLESPACE ts STORAGE DISK ENGINE=NDB; - - ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); - SHOW CREATE TABLE test.t; - -# insert records into tables - - let $1=100; - disable_query_log; - while ($1) - { - eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); - dec $1; - } - enable_query_log; - - SELECT * FROM test.t order by a; ---exec $MYSQL_DUMP --skip-comments --databases test > $MYSQLTEST_VARDIR/tmp/t_dump.sql -DROP TABLE test.t; ---exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/t_dump.sql -USE test; -show tables; - -SELECT * FROM test.t order by a; - - DROP TABLE test.t; -# DROP DATABASE test; - - ALTER TABLESPACE ts - DROP DATAFILE './datafile.dat' - ENGINE NDB; - - DROP TABLESPACE ts ENGINE NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### BUG 18856 test case comented out -##### Use "SELECT * INTO OUTFILE" to dump data and "LOAD DATA INFILE" to load ##### data back to the data file. - -# CREATE LOGFILE GROUP lg -# ADD UNDOFILE './undofile.dat' -# INITIAL_SIZE 16M -# UNDO_BUFFER_SIZE = 1M -# ENGINE=NDB; - -# CREATE TABLESPACE ts -# ADD DATAFILE './datafile.dat' -# USE LOGFILE GROUP lg -# INITIAL_SIZE 12M -# ENGINE NDB; - -#CREATE DATABASE test; - -#CREATE TABLE test.t ( -# a smallint NOT NULL, -# b int NOT NULL, -# c bigint NOT NULL, -# d char(10), -# e TEXT, -# f VARCHAR(255), -# PRIMARY KEY(a) -#) TABLESPACE ts STORAGE DISK ENGINE=NDB; - -# ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); -# SHOW CREATE TABLE test.t; - -# insert records into tables - -# let $1=100; -# disable_query_log; -# while ($1) -# { -# eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); -# dec $1; -# } -# enable_query_log; - -# SELECT * FROM test.t order by a; - -# SELECT * INTO OUTFILE 't_backup' FROM test.t; -# TRUNCATE test.t; - -#'TRUNCATE test.t' failed: 1205: Lock wait timeout exceeded; try restarting #transaction. TABLESPACE ts STORAGE DISK ENGINE=NDB; - -# SELECT count(*) FROM test.t; -# LOAD DATA INFILE 't_backup' INTO TABLE test.t; - -# SELECT * FROM test.t order by a; - -# DROP TABLE test.t; -# DROP DATABASE test; - -# ALTER TABLESPACE ts -# DROP DATAFILE './datafile.dat' -# ENGINE NDB; -# DROP TABLESPACE ts ENGINE NDB; -# DROP LOGFILE GROUP lg -# ENGINE=NDB; - -#### Use group by asc and dec; Use having; Use order by. #### - -# DROP DATABASE IF EXISTS test; - DROP table IF EXISTS test.t1; - DROP table IF EXISTS test.t2; - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLESPACE ts2 - ADD DATAFILE './table_space2/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - -# CREATE DATABASE test; - -CREATE TABLE test.t1 ( - a1 smallint NOT NULL, - a2 int NOT NULL, - a3 bigint NOT NULL, - a4 char(10), - a5 decimal(5,1), - a6 time, - a7 date, - a8 datetime, - a9 VARCHAR(255), - a10 blob, - PRIMARY KEY(a1) -) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - - ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); - SHOW CREATE TABLE test.t1; - -CREATE TABLE test.t2 ( - b1 smallint NOT NULL, - b2 int NOT NULL, - b3 bigint NOT NULL, - b4 char(10), - b5 decimal(5,1), - b6 time, - b7 date, - b8 datetime, - b9 VARCHAR(255), - b10 blob, - PRIMARY KEY(b1) -) ENGINE=NDB; - - ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); - SHOW CREATE TABLE test.t2; - -let $1=20; -disable_query_log; -while ($1) -{ - eval insert into test.t1 values($1, $1+1, $1+2000000000, "aaa$1", 34.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); - eval insert into test.t2 values($1+2, $1+3, $1+3000000000, "aaa$1", 35.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); - dec $1; -} -enable_query_log; - -SELECT * FROM test.t1 order by a1; -SELECT * FROM test.t2 order by b1; -SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; -SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; - -DROP TABLE test.t1; -DROP TABLE test.t2; - -create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - -insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4); -select distinct a from test.t1 group by b,a having a > 2 order by a desc; -select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; -select distinct a from test.t1 group by b,a having a > 2 order by a asc; -select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc; -drop table test.t1; - -create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); -select * from test.t1 where a >= '1' order by a; -select distinct a from test.t1 order by a desc; -select distinct a from test.t1 where a >= '1' order by a desc; -select distinct a from test.t1 where a >= '1' order by a asc; -drop table test.t1; - -CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; - -INSERT INTO test.t1 (email, infoID, dateentered) VALUES - ('test1@testdomain.com', 1, '2002-07-30 22:56:38'), - ('test1@testdomain.com', 1, '2002-07-27 22:58:16'), - ('test2@testdomain.com', 1, '2002-06-19 15:22:19'), - ('test2@testdomain.com', 2, '2002-06-18 14:23:47'), - ('test3@testdomain.com', 1, '2002-05-19 22:17:32'); - -INSERT INTO test.t2(infoID, shipcode) VALUES - (1, 'Z001'), - (2, 'R002'); - -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode; -SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; -SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; -drop table test.t1,test.t2; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts1 ENGINE NDB; - ALTER TABLESPACE ts2 - DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts2 ENGINE NDB; - DROP LOGFILE GROUP lg - ENGINE=NDB; -#################################################################### - - -#### Customer posted order by test case - -DROP TABLE IF EXISTS test.t; -create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB; -insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1); -insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2); -select * from test.t order by f1; -select f1,f2 from test.t order by f2; -select f2 from test.t order by f2; -select f1,f2 from test.t order by f1; -drop table test.t; - -################## ALTER Tests (Meta data testing) #################### - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts - ADD DATAFILE './table_space/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - -#### Try to ALTER from InnoDB to Cluster Disk Data - -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB; -SHOW CREATE TABLE test.t1; -ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -DROP TABLE test.t1; - -#### Try to ALTER from MyISAM to Cluster Disk Data - -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM; -SHOW CREATE TABLE test.t1; -ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -DROP TABLE test.t1; - -#### Try to ALTER from Cluster Disk Data to InnoDB - -CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -ALTER TABLE test.t1 ENGINE=InnoDB; -SHOW CREATE TABLE test.t1; -DROP TABLE test.t1; - -#### Try to ALTER from Cluster Disk Data to MyISAM - -CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; -SHOW CREATE TABLE test.t1; -ALTER TABLE test.t1 ENGINE=MyISAM; -SHOW CREATE TABLE test.t1; -DROP TABLE test.t1; - -#### Try to ALTER DD Tables and add columns - -CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; - -ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; - -SHOW CREATE TABLE test.t1; - -#### Try to ALTER DD Tables and add Indexes - -ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6), -ADD INDEX (a7), ADD INDEX (a8); - -SHOW CREATE TABLE test.t1; - -DROP TABLE test.t1; - -#### Try to ALTER DD Tables and drop columns - -CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; - -ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; - -SHOW CREATE TABLE test.t1; - -ALTER TABLE test.t1 DROP a14; -ALTER TABLE test.t1 DROP a13; -ALTER TABLE test.t1 DROP a12; -ALTER TABLE test.t1 DROP a11; -ALTER TABLE test.t1 DROP a10; -ALTER TABLE test.t1 DROP a9; -ALTER TABLE test.t1 DROP a8; -ALTER TABLE test.t1 DROP a7; -ALTER TABLE test.t1 DROP a6; -ALTER TABLE test.t1 DROP PRIMARY KEY; - -SHOW CREATE TABLE test.t1; - -DROP TABLE test.t1; - - ALTER TABLESPACE ts - DROP DATAFILE './table_space/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts ENGINE NDB; - DROP LOGFILE GROUP lg - ENGINE=NDB; - -####################### End section 4 ######################### -#End 5.1 test case - diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_advance2.test deleted file mode 100755 index c10a009c224..00000000000 --- a/mysql-test/t/ndb_dd_advance2.test +++ /dev/null @@ -1,723 +0,0 @@ -############################################################## -# Author: Nikolay -# Date: 2006-04-01 -# Purpose: Specific Blob and Varchar testing using disk tables. -############################################################## -# Create Stored procedures that use disk based tables. -# Create function that operate on disk based tables. -# Create triggers that operate on disk based tables. -# Create views that operate on disk based tables. -# Try to create FK constraints on disk based tables. -# Create and use disk based table that use auto inc. -# Create test that use transaction (commit, rollback) -# Create large disk base table, do random queries, check cache hits, do same -# query 10 times check cache hits. -# Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), ... other built in # SQL functions -# Create test that uses locks. -# Create test using truncate. -############################################################## - --- source include/have_ndb.inc - ---disable_warnings -DROP TABLE IF EXISTS test.t1; -DROP TABLE IF EXISTS test.t2; -DROP TABLE IF EXISTS test.t3; ---enable_warnings - -#### Copy data from table in one table space to table in different table space. #### ---echo ***** ---echo **** Copy data from table in one table space to table in different table space ---echo ***** - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLESPACE ts2 - ADD DATAFILE './table_space2/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) - TABLESPACE ts2 STORAGE DISK ENGINE=NDB; - - SHOW CREATE TABLE test.t1; - SHOW CREATE TABLE test.t2; - - INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa'); - INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb'); - SELECT * FROM test.t1; - INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; - SELECT * FROM test.t2; - - DROP TABLE test.t1, test.t2; - - # populate BLOB field with large data - -set @vc1 = repeat('a', 200); -set @vc2 = repeat('b', 500); -set @vc3 = repeat('c', 1000); -set @vc4 = repeat('d', 4000); - -# x0 size 256 -set @x0 = '01234567012345670123456701234567'; -set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); - -# b1 length 2000+256 -set @b1 = 'b1'; -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@x0); -# d1 length 3000 -set @d1 = 'dd1'; -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); - -# b2 length 20000 -set @b2 = 'b2'; -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -# d2 length 30000 -set @d2 = 'dd2'; -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); - -select length(@x0),length(@b1),length(@d1) from dual; -select length(@x0),length(@b2),length(@d2) from dual; - - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) - TABLESPACE ts2 STORAGE DISK ENGINE=NDB; - - SHOW CREATE TABLE test.t1; - SHOW CREATE TABLE test.t2; - - INSERT INTO test.t1 VALUES (1,@vc1,@d1); - INSERT INTO test.t1 VALUES (2,@vc2,@b1); - INSERT INTO test.t1 VALUES (3,@vc3,@d2); - INSERT INTO test.t1 VALUES (4,@vc4,@b2); - - SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) - FROM test.t1 WHERE a1=1; - SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) - FROM test.t1 where a1=2; - - INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; - SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) - FROM test.t2 WHERE a1=1; - SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) - FROM test.t2 where a1=2; - - - DROP TABLE test.t1, test.t2; - - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts1 ENGINE NDB; - - ALTER TABLESPACE ts2 - DROP DATAFILE './table_space2/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts2 ENGINE NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Insert, Update, Delete from NDB table with BLOB fields #### ---echo ***** ---echo **** Insert, Update, Delete from NDB table with BLOB fields ---echo ***** - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - -set @vc1 = repeat('a', 200); -set @vc2 = repeat('b', 500); -set @vc3 = repeat('c', 1000); -set @vc4 = repeat('d', 4000); -set @vc5 = repeat('d', 5000); - -set @bb1 = repeat('1', 2000); -set @bb2 = repeat('2', 5000); -set @bb3 = repeat('3', 10000); -set @bb4 = repeat('4', 40000); -set @bb5 = repeat('5', 50000); - -select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual; -select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual; - - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB; -# CREATE TABLE test.t2 (a1 int NOT NULL, a2 VARCHAR(5000), a3 BLOB) -# TABLESPACE ts2 STORAGE DISK ENGINE=NDB; - - INSERT INTO test.t1 VALUES (1,@vc1,@bb1); - INSERT INTO test.t1 VALUES (2,@vc2,@bb2); - INSERT INTO test.t1 VALUES (3,@vc3,@bb3); - INSERT INTO test.t1 VALUES (4,@vc4,@bb4); - INSERT INTO test.t1 VALUES (5,@vc5,@bb5); - - UPDATE test.t1 SET a2=@vc5, a3=@bb5 WHERE a1=1; - SELECT a1,length(a2),substr(a2,4998,2),length(a3),substr(a3,49997,3) - FROM test.t1 WHERE a1=1; - - UPDATE test.t1 SET a2=@vc4, a3=@bb4 WHERE a1=2; - SELECT a1,length(a2),substr(a2,3998,2),length(a3),substr(a3,39997,3) - FROM test.t1 WHERE a1=2; - - UPDATE test.t1 SET a2=@vc2, a3=@bb2 WHERE a1=3; - SELECT a1,length(a2),substr(a2,498,2),length(a3),substr(a3,3997,3) - FROM test.t1 WHERE a1=3; - - UPDATE test.t1 SET a2=@vc3, a3=@bb3 WHERE a1=4; - SELECT a1,length(a2),substr(a2,998,2),length(a3),substr(a3,9997,3) - FROM test.t1 WHERE a1=4; - - UPDATE test.t1 SET a2=@vc1, a3=@bb1 WHERE a1=5; - SELECT a1,length(a2),substr(a2,198,2),length(a3),substr(a3,1997,3) - FROM test.t1 WHERE a1=5; - - DELETE FROM test.t1 where a1=5; - SELECT count(*) from test.t1; - DELETE FROM test.t1 where a1=4; - SELECT count(*) from test.t1; - DELETE FROM test.t1 where a1=3; - SELECT count(*) from test.t1; - DELETE FROM test.t1 where a1=2; - SELECT count(*) from test.t1; - DELETE FROM test.t1 where a1=1; - SELECT count(*) from test.t1; - - DROP TABLE test.t1; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE NDB; - DROP TABLESPACE ts1 ENGINE NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -##### Create Stored procedures that use disk based tables ##### ---echo ***** ---echo **** Create Stored procedures that use disk based tables ---echo ***** - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - -delimiter //; - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB// - CREATE PROCEDURE test.sp1() - BEGIN - INSERT INTO test.t1 values (1,'111111','aaaaaaaa'); - END// -delimiter ;// - - CALL test.sp1(); - SELECT * FROM test.t1; - -delimiter //; - CREATE PROCEDURE test.sp2(n INT, vc VARCHAR(256), blb BLOB) - BEGIN - UPDATE test.t1 SET a2=vc, a3=blb where a1=n; - END// -delimiter ;// - - CALL test.sp2(1,'222222','bbbbbbbb'); - SELECT * FROM test.t1; - - DELETE FROM test.t1; - DROP PROCEDURE test.sp1; - DROP PROCEDURE test.sp2; - DROP TABLE test.t1; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create function that operate on disk based tables #### ---echo ***** ---echo ***** Create function that operate on disk based tables ---echo ***** - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - let $1=100; - disable_query_log; - while ($1) - { - eval insert into test.t1 values($1, "aaaaa$1", "bbbbb$1"); - dec $1; - } - enable_query_log; - - delimiter //; - CREATE FUNCTION test.fn1(n INT) RETURNS INT - BEGIN - DECLARE v INT; - SELECT a1 INTO v FROM test.t1 WHERE a1=n; - RETURN v; - END// - delimiter ;// - -delimiter //; - CREATE FUNCTION test.fn2(n INT, blb BLOB) RETURNS BLOB - BEGIN - DECLARE vv BLOB; - UPDATE test.t1 SET a3=blb where a1=n; - SELECT a3 INTO vv FROM test.t1 WHERE a1=n; - RETURN vv; - END// - delimiter ;// - - SELECT test.fn1(10) FROM DUAL; - SELECT test.fn2(50, 'new BLOB content') FROM DUAL; - - DELETE FROM test.t1; - DROP FUNCTION test.fn1; - DROP TABLE test.t1; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create triggers that operate on disk based tables #### ---echo ***** ---echo ***** Create triggers that operate on disk based tables ---echo ***** - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - - delimiter //; - CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW - BEGIN - if isnull(new.a2) then - set new.a2:= 'trg1 works on a2 field'; - end if; - if isnull(new.a3) then - set new.a3:= 'trg1 works on a3 field'; - end if; - end// - insert into test.t1 (a1) values (1)// - insert into test.t1 (a1,a2) values (2, 'ccccccc')// - select * from test.t1// - delimiter ;// - - DELETE FROM test.t1; - DROP TRIGGER test.trg1; - DROP TABLE test.t1; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create, update views that operate on disk based tables #### ---echo ***** ---echo ***** Create, update views that operate on disk based tables ---echo ***** - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - let $1=10; - disable_query_log; - while ($1) - { - eval insert into test.t1 values($1, "aaaaa$1", "bbbbb$1"); - dec $1; - } - enable_query_log; - CREATE VIEW test.v1 AS SELECT * FROM test.t1; - SELECT * FROM test.v1 order by a1; - CHECK TABLE test.v1, test.t1; - - UPDATE test.v1 SET a2='zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' WHERE a1=5; - SELECT * FROM test.v1 order by a1; - - DROP VIEW test.v1; - DELETE FROM test.t1; - DROP TABLE test.t1; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create and use disk based table that use auto inc #### ---echo ***** ---echo ***** Create and use disk based table that use auto inc ---echo ***** - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - let $1=10; - disable_query_log; - while ($1) - { - eval insert into test.t1 values(NULL, "aaaaa$1", "bbbbb$1"); - dec $1; - } - enable_query_log; - SELECT * FROM test.t1 ORDER BY a1; - DELETE FROM test.t1; - DROP TABLE test.t1; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create test that use transaction (commit, rollback) #### ---echo ***** ---echo ***** Create test that use transaction (commit, rollback) ---echo ***** - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - SET AUTOCOMMIT=0; - CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - - INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); - COMMIT; - SELECT * FROM test.t1 ORDER BY a1; - INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); - ROLLBACK; - SELECT * FROM test.t1 ORDER BY a1; - - DELETE FROM test.t1; - DROP TABLE test.t1; - SET AUTOCOMMIT=1; - -# Now do the same thing with START TRANSACTION without using AUTOCOMMIT. - - CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - - START TRANSACTION; - INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); - COMMIT; - SELECT * FROM test.t1 ORDER BY a1; - - START TRANSACTION; - INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); - ROLLBACK; - SELECT * FROM test.t1 ORDER BY a1; - - DELETE FROM test.t1; - DROP TABLE test.t1; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create test that uses locks #### ---echo ***** ---echo ***** Create test that uses locks ---echo ***** - - connect (con1,localhost,root,,); - connect (con2,localhost,root,,); - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - -# connection con1; ---disable_warnings - drop table if exists test.t1; - CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; ---enable_warnings - - LOCK TABLES test.t1 write; - INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); - INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); - SELECT * FROM test.t1 ORDER BY a1; - - connection con2; - SELECT * FROM test.t1 ORDER BY a1; - INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); - - connection con1; - UNLOCK TABLES; - - connection con2; - INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); - SELECT * FROM test.t1 ORDER BY a1; - DELETE FROM test.t1; - DROP TABLE test.t1; - - #connection defualt; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create large disk base table, do random queries, check cache hits #### ---echo ***** ---echo ***** Create large disk base table, do random queries, check cache hits ---echo ***** - -set @vc1 = repeat('a', 200); -SELECT @vc1 FROM DUAL; -set @vc2 = repeat('b', 500); -set @vc3 = repeat('b', 998); - -# x0 size 256 -set @x0 = '01234567012345670123456701234567'; -set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); - -# b1 length 2000+256 (blob part aligned) -set @b1 = 'b1'; -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); -set @b1 = concat(@b1,@x0); -# d1 length 3000 -set @d1 = 'dd1'; -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); -set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); - -# b2 length 20000 -set @b2 = 'b2'; -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); -# d2 length 30000 -set @d2 = 'dd2'; -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); -set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); - -select length(@x0),length(@b1),length(@d1) from dual; -select length(@x0),length(@b2),length(@d2) from dual; - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - - INSERT INTO test.t1 values(1,@vc1,@d1); - INSERT INTO test.t1 values(2,@vc2,@d2); - explain SELECT * from test.t1 WHERE a1 = 1; - - SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) - FROM test.t1 WHERE a1=1 ORDER BY a1; - SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) - FROM test.t1 where a1=2 ORDER BY a1; - - UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1; - UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2; - - SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) - FROM test.t1 where a1=1; - SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) - FROM test.t1 where a1=2; - - #SHOW VARIABLES LIKE 'have_query_cache'; - #SHOW STATUS LIKE 'Qcache%'; - - DELETE FROM test.t1; - DROP TABLE test.t1; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - -#### Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE #### ---echo ***** ---echo ***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE ---echo ***** - - CREATE LOGFILE GROUP lg - ADD UNDOFILE './lg_group/undofile.dat' - INITIAL_SIZE 16M - UNDO_BUFFER_SIZE = 1M - ENGINE=NDB; - - CREATE TABLESPACE ts1 - ADD DATAFILE './table_space1/datafile.dat' - USE LOGFILE GROUP lg - INITIAL_SIZE 12M - ENGINE NDB; - - CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250)) - TABLESPACE ts1 STORAGE DISK ENGINE=NDB; - let $1=100; - disable_query_log; - while ($1) - { - eval insert into test.t1 values($1, "aaaaaaaaaaaaaaaa$1", "bbbbbbbbbbbbbbbbbb$1", '2006-06-20' , USER()); - dec $1; - } - enable_query_log; - - SELECT COUNT(*) from test.t1; - SELECT SUM(a1) from test.t1; - SELECT MIN(a1) from test.t1; - SELECT MAX(a1) from test.t1; - SELECT a5 from test.t1 where a1=50; - - - SELECT * from test.t1 order by a1; - - DROP TABLE test.t1; - - ALTER TABLESPACE ts1 - DROP DATAFILE './table_space1/datafile.dat' - ENGINE=NDB; - DROP TABLESPACE ts1 ENGINE=NDB; - - DROP LOGFILE GROUP lg - ENGINE=NDB; - - -#End 5.1 test case - From 55a9bab25e9f2876fc3d8962a4e569b01cfbb97c Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Thu, 6 Jul 2006 01:33:53 +0200 Subject: [PATCH 54/74] Bug #20843 tests fails randomly with assertion in completeClusterFailed - flush gci needs to be reset on disconnect as cluster may reconnect after --initial with a smaller gci --- mysql-test/t/disabled.def | 1 - storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp | 4 ++++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index 5acf93d4843..ebe61e1af4a 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -18,7 +18,6 @@ #im_life_cycle : Bug#20368 2006-06-10 alik im_life_cycle test fails ndb_autodiscover : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog ndb_autodiscover2 : BUG#18952 2006-02-16 jmiller Needs to be fixed w.r.t binlog -ndb_autodiscover3 : BUG#20872 2006-07-05 tomas ndb_autodiscover3 fails randomly ndb_load : BUG#17233 2006-05-04 tomas failed load data from infile causes mysqld dbug_assert, binlog not flushed partition_03ndb : BUG#16385 2006-03-24 mikael Partitions: crash when updating a range partitioned NDB table ps_7ndb : BUG#18950 2006-02-16 jmiller create table like does not obtain LOCK_open diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index 5cf974b6467..06b0d7ea5b9 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -1658,6 +1658,10 @@ NdbEventBuffer::completeClusterFailed() data.logType = SubTableData::LOG; data.gci = m_latestGCI + 1; +#ifdef VM_TRACE + m_flush_gci = 0; +#endif + /** * Insert this event for each operation */ From 9323799bb4fb799d756bf5cc461c609ad4b719cb Mon Sep 17 00:00:00 2001 From: "ngrishakin@mysql.com" <> Date: Thu, 6 Jul 2006 04:37:03 +0200 Subject: [PATCH 55/74] ndb_dd_advance test cases --- mysql-test/r/ndb_dd_advance.result | 1088 +++++++++++++++++++++++++++ mysql-test/r/ndb_dd_advance2.result | 745 ++++++++++++++++++ mysql-test/t/ndb_dd_advance.test | 630 ++++++++++++++++ mysql-test/t/ndb_dd_advance2.test | 723 ++++++++++++++++++ 4 files changed, 3186 insertions(+) create mode 100644 mysql-test/r/ndb_dd_advance.result create mode 100644 mysql-test/r/ndb_dd_advance2.result create mode 100755 mysql-test/t/ndb_dd_advance.test create mode 100755 mysql-test/t/ndb_dd_advance2.test diff --git a/mysql-test/r/ndb_dd_advance.result b/mysql-test/r/ndb_dd_advance.result new file mode 100644 index 00000000000..09fe75805d5 --- /dev/null +++ b/mysql-test/r/ndb_dd_advance.result @@ -0,0 +1,1088 @@ +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +**** Test Setup Section **** +CREATE LOGFILE GROUP log_group1 +ADD UNDOFILE './log_group1/undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE table_space1 +ADD DATAFILE './table_space1/datafile.dat' +USE LOGFILE GROUP log_group1 +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 +(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +CREATE TABLE test.t2 +(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL) +ENGINE=NDB; + +**** Data load for first test **** +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); +INSERT INTO test.t2 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); + +*** Test 1 Section Begins *** +SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +COUNT(*) +1 +SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +pk2 b2 c2 pk1 b c +4 4 4 4 4 4 +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4); +COUNT(*) +1 +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75); +COUNT(*) +1 +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +b c +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +46 46 +47 47 +48 48 +49 49 +50 50 +51 51 +52 52 +53 53 +54 54 +55 55 +56 56 +57 57 +58 58 +59 59 +60 60 +61 61 +62 62 +63 63 +64 64 +65 65 +66 66 +67 67 +68 68 +69 69 +70 70 +71 71 +72 72 +73 73 +74 74 +75 75 + +*** Setup for test 2 **** +DELETE FROM test.t1; +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45); + +**** Test Section 2 **** +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +b c +1 1 +2 2 +3 3 +4 4 +5 5 +6 6 +7 7 +8 8 +9 9 +10 10 +11 11 +12 12 +13 13 +14 14 +15 15 +16 16 +17 17 +18 18 +19 19 +20 20 +21 21 +22 22 +23 23 +24 24 +25 25 +26 26 +27 27 +28 28 +29 29 +30 30 +31 31 +32 32 +33 33 +34 34 +35 35 +36 36 +37 37 +38 38 +39 39 +40 40 +41 41 +42 42 +43 43 +44 44 +45 45 +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2; +COUNT(*) +45 +SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2; +COUNT(*) +75 +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `pk2` int(11) NOT NULL, + `b2` int(11) NOT NULL, + `c2` int(11) NOT NULL, + PRIMARY KEY (`pk2`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk1` int(11) NOT NULL, + `b` int(11) NOT NULL, + `c` int(11) NOT NULL, + PRIMARY KEY (`pk1`) +) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `pk2` int(11) NOT NULL, + `b2` int(11) NOT NULL, + `c2` int(11) NOT NULL, + PRIMARY KEY (`pk2`) +) TABLESPACE table_space1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ENGINE=NDBCLUSTER; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `pk1` int(11) NOT NULL, + `b` int(11) NOT NULL, + `c` int(11) NOT NULL, + PRIMARY KEY (`pk1`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 + +DROP TABLE test.t1; +DROP TABLE test.t2; +*** Setup for Test Section 3 *** +CREATE TABLE test.t1 ( +usr_id INT unsigned NOT NULL, +uniq_id INT unsigned NOT NULL AUTO_INCREMENT, +start_num INT unsigned NOT NULL DEFAULT 1, +increment INT unsigned NOT NULL DEFAULT 1, +PRIMARY KEY (uniq_id), +INDEX usr_uniq_idx (usr_id, uniq_id), +INDEX uniq_usr_idx (uniq_id, usr_id)) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +CREATE TABLE test.t2 ( +id INT unsigned NOT NULL DEFAULT 0, +usr2_id INT unsigned NOT NULL DEFAULT 0, +max INT unsigned NOT NULL DEFAULT 0, +c_amount INT unsigned NOT NULL DEFAULT 0, +d_max INT unsigned NOT NULL DEFAULT 0, +d_num INT unsigned NOT NULL DEFAULT 0, +orig_time INT unsigned NOT NULL DEFAULT 0, +c_time INT unsigned NOT NULL DEFAULT 0, +active ENUM ("no","yes") NOT NULL, +PRIMARY KEY (id,usr2_id), +INDEX id_idx (id), +INDEX usr2_idx (usr2_id)) +ENGINE=NDB; +INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198); + +**** Test Section 3 **** +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; +usr_id uniq_id increment usr2_id c_amount max +3 4 84676 NULL NULL NULL +INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes'); +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; +usr_id uniq_id increment usr2_id c_amount max +3 4 84676 3 6000 3000 + +DROP TABLE test.t1; +DROP TABLE test.t2; +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; +DROP TABLESPACE table_space1 +ENGINE = NDB; +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLESPACE ts2 +ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int) +ENGINE=NDB; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a` int(11) NOT NULL, + `b` int(11) DEFAULT NULL, + PRIMARY KEY (`a`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +INSERT INTO t1 VALUES (1,1); +INSERT INTO t1 VALUES (2,2); +SELECT * FROM t1 order by a; +a b +1 1 +2 2 +INSERT INTO t2(a,b) SELECT * FROM t1; +SELECT * FROM t2 order by a; +a b +1 1 +2 2 +TRUNCATE t1; +TRUNCATE t2; +INSERT INTO t2 VALUES (3,3); +INSERT INTO t2 VALUES (4,4); +INSERT INTO t1(a,b) SELECT * FROM t2; +SELECT * FROM t1 order by a; +a b +3 3 +4 4 +DROP TABLE t1, t2; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +ALTER TABLESPACE ts2 +DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts2 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +CREATE LOGFILE GROUP lg +ADD UNDOFILE './undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts +ADD DATAFILE './datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t ( +a smallint NOT NULL, +b int NOT NULL, +c bigint NOT NULL, +d char(10), +e TEXT, +f VARCHAR(255), +PRIMARY KEY(a) +) TABLESPACE ts STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); +SHOW CREATE TABLE test.t; +Table Create Table +t CREATE TABLE `t` ( + `a` smallint(6) NOT NULL, + `b` int(11) NOT NULL, + `c` bigint(20) NOT NULL, + `d` char(10) DEFAULT NULL, + `e` text, + `f` varchar(255) DEFAULT NULL, + PRIMARY KEY (`a`), + KEY `d` (`d`), + KEY `f` (`f`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SELECT * FROM test.t order by a; +a b c d e f +1 2 3 aaa1 bbb1 ccccc1 +2 3 4 aaa2 bbb2 ccccc2 +3 4 5 aaa3 bbb3 ccccc3 +4 5 6 aaa4 bbb4 ccccc4 +5 6 7 aaa5 bbb5 ccccc5 +6 7 8 aaa6 bbb6 ccccc6 +7 8 9 aaa7 bbb7 ccccc7 +8 9 10 aaa8 bbb8 ccccc8 +9 10 11 aaa9 bbb9 ccccc9 +10 11 12 aaa10 bbb10 ccccc10 +11 12 13 aaa11 bbb11 ccccc11 +12 13 14 aaa12 bbb12 ccccc12 +13 14 15 aaa13 bbb13 ccccc13 +14 15 16 aaa14 bbb14 ccccc14 +15 16 17 aaa15 bbb15 ccccc15 +16 17 18 aaa16 bbb16 ccccc16 +17 18 19 aaa17 bbb17 ccccc17 +18 19 20 aaa18 bbb18 ccccc18 +19 20 21 aaa19 bbb19 ccccc19 +20 21 22 aaa20 bbb20 ccccc20 +21 22 23 aaa21 bbb21 ccccc21 +22 23 24 aaa22 bbb22 ccccc22 +23 24 25 aaa23 bbb23 ccccc23 +24 25 26 aaa24 bbb24 ccccc24 +25 26 27 aaa25 bbb25 ccccc25 +26 27 28 aaa26 bbb26 ccccc26 +27 28 29 aaa27 bbb27 ccccc27 +28 29 30 aaa28 bbb28 ccccc28 +29 30 31 aaa29 bbb29 ccccc29 +30 31 32 aaa30 bbb30 ccccc30 +31 32 33 aaa31 bbb31 ccccc31 +32 33 34 aaa32 bbb32 ccccc32 +33 34 35 aaa33 bbb33 ccccc33 +34 35 36 aaa34 bbb34 ccccc34 +35 36 37 aaa35 bbb35 ccccc35 +36 37 38 aaa36 bbb36 ccccc36 +37 38 39 aaa37 bbb37 ccccc37 +38 39 40 aaa38 bbb38 ccccc38 +39 40 41 aaa39 bbb39 ccccc39 +40 41 42 aaa40 bbb40 ccccc40 +41 42 43 aaa41 bbb41 ccccc41 +42 43 44 aaa42 bbb42 ccccc42 +43 44 45 aaa43 bbb43 ccccc43 +44 45 46 aaa44 bbb44 ccccc44 +45 46 47 aaa45 bbb45 ccccc45 +46 47 48 aaa46 bbb46 ccccc46 +47 48 49 aaa47 bbb47 ccccc47 +48 49 50 aaa48 bbb48 ccccc48 +49 50 51 aaa49 bbb49 ccccc49 +50 51 52 aaa50 bbb50 ccccc50 +51 52 53 aaa51 bbb51 ccccc51 +52 53 54 aaa52 bbb52 ccccc52 +53 54 55 aaa53 bbb53 ccccc53 +54 55 56 aaa54 bbb54 ccccc54 +55 56 57 aaa55 bbb55 ccccc55 +56 57 58 aaa56 bbb56 ccccc56 +57 58 59 aaa57 bbb57 ccccc57 +58 59 60 aaa58 bbb58 ccccc58 +59 60 61 aaa59 bbb59 ccccc59 +60 61 62 aaa60 bbb60 ccccc60 +61 62 63 aaa61 bbb61 ccccc61 +62 63 64 aaa62 bbb62 ccccc62 +63 64 65 aaa63 bbb63 ccccc63 +64 65 66 aaa64 bbb64 ccccc64 +65 66 67 aaa65 bbb65 ccccc65 +66 67 68 aaa66 bbb66 ccccc66 +67 68 69 aaa67 bbb67 ccccc67 +68 69 70 aaa68 bbb68 ccccc68 +69 70 71 aaa69 bbb69 ccccc69 +70 71 72 aaa70 bbb70 ccccc70 +71 72 73 aaa71 bbb71 ccccc71 +72 73 74 aaa72 bbb72 ccccc72 +73 74 75 aaa73 bbb73 ccccc73 +74 75 76 aaa74 bbb74 ccccc74 +75 76 77 aaa75 bbb75 ccccc75 +76 77 78 aaa76 bbb76 ccccc76 +77 78 79 aaa77 bbb77 ccccc77 +78 79 80 aaa78 bbb78 ccccc78 +79 80 81 aaa79 bbb79 ccccc79 +80 81 82 aaa80 bbb80 ccccc80 +81 82 83 aaa81 bbb81 ccccc81 +82 83 84 aaa82 bbb82 ccccc82 +83 84 85 aaa83 bbb83 ccccc83 +84 85 86 aaa84 bbb84 ccccc84 +85 86 87 aaa85 bbb85 ccccc85 +86 87 88 aaa86 bbb86 ccccc86 +87 88 89 aaa87 bbb87 ccccc87 +88 89 90 aaa88 bbb88 ccccc88 +89 90 91 aaa89 bbb89 ccccc89 +90 91 92 aaa90 bbb90 ccccc90 +91 92 93 aaa91 bbb91 ccccc91 +92 93 94 aaa92 bbb92 ccccc92 +93 94 95 aaa93 bbb93 ccccc93 +94 95 96 aaa94 bbb94 ccccc94 +95 96 97 aaa95 bbb95 ccccc95 +96 97 98 aaa96 bbb96 ccccc96 +97 98 99 aaa97 bbb97 ccccc97 +98 99 100 aaa98 bbb98 ccccc98 +99 100 101 aaa99 bbb99 ccccc99 +100 101 102 aaa100 bbb100 ccccc100 +DROP TABLE test.t; +USE test; +show tables; +Tables_in_test +t +SELECT * FROM test.t order by a; +a b c d e f +1 2 3 aaa1 bbb1 ccccc1 +2 3 4 aaa2 bbb2 ccccc2 +3 4 5 aaa3 bbb3 ccccc3 +4 5 6 aaa4 bbb4 ccccc4 +5 6 7 aaa5 bbb5 ccccc5 +6 7 8 aaa6 bbb6 ccccc6 +7 8 9 aaa7 bbb7 ccccc7 +8 9 10 aaa8 bbb8 ccccc8 +9 10 11 aaa9 bbb9 ccccc9 +10 11 12 aaa10 bbb10 ccccc10 +11 12 13 aaa11 bbb11 ccccc11 +12 13 14 aaa12 bbb12 ccccc12 +13 14 15 aaa13 bbb13 ccccc13 +14 15 16 aaa14 bbb14 ccccc14 +15 16 17 aaa15 bbb15 ccccc15 +16 17 18 aaa16 bbb16 ccccc16 +17 18 19 aaa17 bbb17 ccccc17 +18 19 20 aaa18 bbb18 ccccc18 +19 20 21 aaa19 bbb19 ccccc19 +20 21 22 aaa20 bbb20 ccccc20 +21 22 23 aaa21 bbb21 ccccc21 +22 23 24 aaa22 bbb22 ccccc22 +23 24 25 aaa23 bbb23 ccccc23 +24 25 26 aaa24 bbb24 ccccc24 +25 26 27 aaa25 bbb25 ccccc25 +26 27 28 aaa26 bbb26 ccccc26 +27 28 29 aaa27 bbb27 ccccc27 +28 29 30 aaa28 bbb28 ccccc28 +29 30 31 aaa29 bbb29 ccccc29 +30 31 32 aaa30 bbb30 ccccc30 +31 32 33 aaa31 bbb31 ccccc31 +32 33 34 aaa32 bbb32 ccccc32 +33 34 35 aaa33 bbb33 ccccc33 +34 35 36 aaa34 bbb34 ccccc34 +35 36 37 aaa35 bbb35 ccccc35 +36 37 38 aaa36 bbb36 ccccc36 +37 38 39 aaa37 bbb37 ccccc37 +38 39 40 aaa38 bbb38 ccccc38 +39 40 41 aaa39 bbb39 ccccc39 +40 41 42 aaa40 bbb40 ccccc40 +41 42 43 aaa41 bbb41 ccccc41 +42 43 44 aaa42 bbb42 ccccc42 +43 44 45 aaa43 bbb43 ccccc43 +44 45 46 aaa44 bbb44 ccccc44 +45 46 47 aaa45 bbb45 ccccc45 +46 47 48 aaa46 bbb46 ccccc46 +47 48 49 aaa47 bbb47 ccccc47 +48 49 50 aaa48 bbb48 ccccc48 +49 50 51 aaa49 bbb49 ccccc49 +50 51 52 aaa50 bbb50 ccccc50 +51 52 53 aaa51 bbb51 ccccc51 +52 53 54 aaa52 bbb52 ccccc52 +53 54 55 aaa53 bbb53 ccccc53 +54 55 56 aaa54 bbb54 ccccc54 +55 56 57 aaa55 bbb55 ccccc55 +56 57 58 aaa56 bbb56 ccccc56 +57 58 59 aaa57 bbb57 ccccc57 +58 59 60 aaa58 bbb58 ccccc58 +59 60 61 aaa59 bbb59 ccccc59 +60 61 62 aaa60 bbb60 ccccc60 +61 62 63 aaa61 bbb61 ccccc61 +62 63 64 aaa62 bbb62 ccccc62 +63 64 65 aaa63 bbb63 ccccc63 +64 65 66 aaa64 bbb64 ccccc64 +65 66 67 aaa65 bbb65 ccccc65 +66 67 68 aaa66 bbb66 ccccc66 +67 68 69 aaa67 bbb67 ccccc67 +68 69 70 aaa68 bbb68 ccccc68 +69 70 71 aaa69 bbb69 ccccc69 +70 71 72 aaa70 bbb70 ccccc70 +71 72 73 aaa71 bbb71 ccccc71 +72 73 74 aaa72 bbb72 ccccc72 +73 74 75 aaa73 bbb73 ccccc73 +74 75 76 aaa74 bbb74 ccccc74 +75 76 77 aaa75 bbb75 ccccc75 +76 77 78 aaa76 bbb76 ccccc76 +77 78 79 aaa77 bbb77 ccccc77 +78 79 80 aaa78 bbb78 ccccc78 +79 80 81 aaa79 bbb79 ccccc79 +80 81 82 aaa80 bbb80 ccccc80 +81 82 83 aaa81 bbb81 ccccc81 +82 83 84 aaa82 bbb82 ccccc82 +83 84 85 aaa83 bbb83 ccccc83 +84 85 86 aaa84 bbb84 ccccc84 +85 86 87 aaa85 bbb85 ccccc85 +86 87 88 aaa86 bbb86 ccccc86 +87 88 89 aaa87 bbb87 ccccc87 +88 89 90 aaa88 bbb88 ccccc88 +89 90 91 aaa89 bbb89 ccccc89 +90 91 92 aaa90 bbb90 ccccc90 +91 92 93 aaa91 bbb91 ccccc91 +92 93 94 aaa92 bbb92 ccccc92 +93 94 95 aaa93 bbb93 ccccc93 +94 95 96 aaa94 bbb94 ccccc94 +95 96 97 aaa95 bbb95 ccccc95 +96 97 98 aaa96 bbb96 ccccc96 +97 98 99 aaa97 bbb97 ccccc97 +98 99 100 aaa98 bbb98 ccccc98 +99 100 101 aaa99 bbb99 ccccc99 +100 101 102 aaa100 bbb100 ccccc100 +DROP TABLE test.t; +ALTER TABLESPACE ts +DROP DATAFILE './datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +DROP table IF EXISTS test.t1; +Warnings: +Note 1051 Unknown table 't1' +DROP table IF EXISTS test.t2; +Warnings: +Note 1051 Unknown table 't2' +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLESPACE ts2 +ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 ( +a1 smallint NOT NULL, +a2 int NOT NULL, +a3 bigint NOT NULL, +a4 char(10), +a5 decimal(5,1), +a6 time, +a7 date, +a8 datetime, +a9 VARCHAR(255), +a10 blob, +PRIMARY KEY(a1) +) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` smallint(6) NOT NULL, + `a2` int(11) NOT NULL, + `a3` bigint(20) NOT NULL, + `a4` char(10) DEFAULT NULL, + `a5` decimal(5,1) DEFAULT NULL, + `a6` time DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` datetime DEFAULT NULL, + `a9` varchar(255) DEFAULT NULL, + `a10` blob, + PRIMARY KEY (`a1`), + KEY `a2` (`a2`), + KEY `a3` (`a3`), + KEY `a8` (`a8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +CREATE TABLE test.t2 ( +b1 smallint NOT NULL, +b2 int NOT NULL, +b3 bigint NOT NULL, +b4 char(10), +b5 decimal(5,1), +b6 time, +b7 date, +b8 datetime, +b9 VARCHAR(255), +b10 blob, +PRIMARY KEY(b1) +) ENGINE=NDB; +ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `b1` smallint(6) NOT NULL, + `b2` int(11) NOT NULL, + `b3` bigint(20) NOT NULL, + `b4` char(10) DEFAULT NULL, + `b5` decimal(5,1) DEFAULT NULL, + `b6` time DEFAULT NULL, + `b7` date DEFAULT NULL, + `b8` datetime DEFAULT NULL, + `b9` varchar(255) DEFAULT NULL, + `b10` blob, + PRIMARY KEY (`b1`), + KEY `b2` (`b2`), + KEY `b3` (`b3`), + KEY `b8` (`b8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SELECT * FROM test.t1 order by a1; +a1 a2 a3 a4 a5 a6 a7 a8 a9 a10 +1 2 2000000001 aaa1 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +2 3 2000000002 aaa2 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +3 4 2000000003 aaa3 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +4 5 2000000004 aaa4 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +5 6 2000000005 aaa5 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +6 7 2000000006 aaa6 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +7 8 2000000007 aaa7 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +8 9 2000000008 aaa8 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +9 10 2000000009 aaa9 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +10 11 2000000010 aaa10 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +11 12 2000000011 aaa11 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +12 13 2000000012 aaa12 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +13 14 2000000013 aaa13 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +14 15 2000000014 aaa14 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +15 16 2000000015 aaa15 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +16 17 2000000016 aaa16 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +17 18 2000000017 aaa17 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +18 19 2000000018 aaa18 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +19 20 2000000019 aaa19 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +20 21 2000000020 aaa20 34.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +SELECT * FROM test.t2 order by b1; +b1 b2 b3 b4 b5 b6 b7 b8 b9 b10 +3 4 3000000001 aaa1 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb1 binary data +4 5 3000000002 aaa2 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb2 binary data +5 6 3000000003 aaa3 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb3 binary data +6 7 3000000004 aaa4 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb4 binary data +7 8 3000000005 aaa5 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb5 binary data +8 9 3000000006 aaa6 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb6 binary data +9 10 3000000007 aaa7 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb7 binary data +10 11 3000000008 aaa8 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb8 binary data +11 12 3000000009 aaa9 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb9 binary data +12 13 3000000010 aaa10 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb10 binary data +13 14 3000000011 aaa11 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb11 binary data +14 15 3000000012 aaa12 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb12 binary data +15 16 3000000013 aaa13 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb13 binary data +16 17 3000000014 aaa14 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb14 binary data +17 18 3000000015 aaa15 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb15 binary data +18 19 3000000016 aaa16 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb16 binary data +19 20 3000000017 aaa17 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb17 binary data +20 21 3000000018 aaa18 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb18 binary data +21 22 3000000019 aaa19 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb19 binary data +22 23 3000000020 aaa20 35.2 04:03:02 2006-01-01 1971-05-28 16:55:03 bbbbbbbbbbbbb20 binary data +SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; +COUNT(a1) a1 COUNT(a1)*a1 +1 1 1 +1 2 2 +1 3 3 +1 4 4 +1 5 5 +1 6 6 +1 7 7 +1 8 8 +1 9 9 +1 10 10 +1 11 11 +1 12 12 +1 13 13 +1 14 14 +1 15 15 +1 16 16 +1 17 17 +1 18 18 +1 19 19 +1 20 20 +SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; +COUNT(a2) (a2+1) COUNT(a2)*(a2+0) +1 3 2 +1 4 3 +1 5 4 +1 6 5 +1 7 6 +1 8 7 +1 9 8 +1 10 9 +1 11 10 +1 12 11 +1 13 12 +1 14 13 +1 15 14 +1 16 15 +1 17 16 +1 18 17 +1 19 18 +1 20 19 +1 21 20 +1 22 21 +DROP TABLE test.t1; +DROP TABLE test.t2; +create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4); +select distinct a from test.t1 group by b,a having a > 2 order by a desc; +a +4 +3 +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; +a c +4 NULL +3 NULL +select distinct a from test.t1 group by b,a having a > 2 order by a asc; +a +3 +4 +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc; +a c +3 NULL +4 NULL +drop table test.t1; +create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); +select * from test.t1 where a >= '1' order by a; +a +1 +1 +1 +2 +2 +3 +3 +select distinct a from test.t1 order by a desc; +a +3 +2 +1 +select distinct a from test.t1 where a >= '1' order by a desc; +a +3 +2 +1 +select distinct a from test.t1 where a >= '1' order by a asc; +a +1 +2 +3 +drop table test.t1; +CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; +INSERT INTO test.t1 (email, infoID, dateentered) VALUES +('test1@testdomain.com', 1, '2002-07-30 22:56:38'), +('test1@testdomain.com', 1, '2002-07-27 22:58:16'), +('test2@testdomain.com', 1, '2002-06-19 15:22:19'), +('test2@testdomain.com', 2, '2002-06-18 14:23:47'), +('test3@testdomain.com', 1, '2002-05-19 22:17:32'); +INSERT INTO test.t2(infoID, shipcode) VALUES +(1, 'Z001'), +(2, 'R002'); +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode; +email shipcode +test1@testdomain.com Z001 +test2@testdomain.com R002 +test2@testdomain.com Z001 +test3@testdomain.com Z001 +SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; +email +test1@testdomain.com +test2@testdomain.com +test3@testdomain.com +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; +email shipcode +test1@testdomain.com Z001 +test2@testdomain.com Z001 +test2@testdomain.com R002 +test3@testdomain.com Z001 +drop table test.t1,test.t2; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +ALTER TABLESPACE ts2 +DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts2 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +DROP TABLE IF EXISTS test.t; +Warnings: +Note 1051 Unknown table 't' +create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB; +insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1); +insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2); +select * from test.t order by f1; +f1 f2 f3 +111111 aaaaaa 1 +222222 bbbbbb 2 +select f1,f2 from test.t order by f2; +f1 f2 +111111 aaaaaa +222222 bbbbbb +select f2 from test.t order by f2; +f2 +aaaaaa +bbbbbb +select f1,f2 from test.t order by f1; +f1 f2 +111111 aaaaaa +222222 bbbbbb +drop table test.t; +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts +ADD DATAFILE './table_space/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` blob, + `a3` text, + PRIMARY KEY (`a1`) +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` blob, + `a3` text, + PRIMARY KEY (`a1`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +Warnings: +Error 1465 Table storage engine 'ndbcluster' does not support the create option 'Binlog of table with BLOB attribute and no PK' +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) TABLESPACE ts STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) DEFAULT NULL, + `a2` blob, + `a3` text +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6), +ADD INDEX (a7), ADD INDEX (a8); +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`), + KEY `a2` (`a2`), + KEY `a3` (`a3`), + KEY `a5` (`a5`), + KEY `a6` (`a6`), + KEY `a7` (`a7`), + KEY `a8` (`a8`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL, + `a6` bigint(20) DEFAULT NULL, + `a7` date DEFAULT NULL, + `a8` time DEFAULT NULL, + `a9` datetime DEFAULT NULL, + `a10` tinytext, + `a11` mediumtext, + `a12` longtext, + `a13` text, + `a14` blob, + PRIMARY KEY (`a1`) +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +ALTER TABLE test.t1 DROP a14; +ALTER TABLE test.t1 DROP a13; +ALTER TABLE test.t1 DROP a12; +ALTER TABLE test.t1 DROP a11; +ALTER TABLE test.t1 DROP a10; +ALTER TABLE test.t1 DROP a9; +ALTER TABLE test.t1 DROP a8; +ALTER TABLE test.t1 DROP a7; +ALTER TABLE test.t1 DROP a6; +ALTER TABLE test.t1 DROP PRIMARY KEY; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` float DEFAULT NULL, + `a3` double DEFAULT NULL, + `a4` bit(1) DEFAULT NULL, + `a5` tinyint(4) DEFAULT NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 +DROP TABLE test.t1; +ALTER TABLESPACE ts +DROP DATAFILE './table_space/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_advance2.result new file mode 100644 index 00000000000..00490fbc32b --- /dev/null +++ b/mysql-test/r/ndb_dd_advance2.result @@ -0,0 +1,745 @@ +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +DROP TABLE IF EXISTS test.t3; +***** +**** Copy data from table in one table space to table in different table space +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLESPACE ts2 +ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts2 STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` varchar(256) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a1` int(11) NOT NULL, + `a2` varchar(256) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa'); +INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb'); +SELECT * FROM test.t1; +a1 a2 a3 +1 111111 aaaaaaaa +2 222222 bbbbbbbb +INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; +SELECT * FROM test.t2; +a1 a2 a3 +1 111111 aaaaaaaa +2 222222 bbbbbbbb +DROP TABLE test.t1, test.t2; +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +select length(@x0),length(@b1),length(@d1) from dual; +length(@x0) length(@b1) length(@d1) +256 2256 3000 +select length(@x0),length(@b2),length(@d2) from dual; +length(@x0) length(@b2) length(@d2) +256 20000 30000 +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) +TABLESPACE ts2 STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a1` int(11) NOT NULL, + `a2` varchar(5000) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts1 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +SHOW CREATE TABLE test.t2; +Table Create Table +t2 CREATE TABLE `t2` ( + `a1` int(11) NOT NULL, + `a2` varchar(5000) DEFAULT NULL, + `a3` blob, + PRIMARY KEY (`a1`) +) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 +INSERT INTO test.t1 VALUES (1,@vc1,@d1); +INSERT INTO test.t1 VALUES (2,@vc2,@b1); +INSERT INTO test.t1 VALUES (3,@vc3,@d2); +INSERT INTO test.t1 VALUES (4,@vc4,@b2); +SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) +FROM test.t1 WHERE a1=1; +a1 length(a2) substr(a2,180,2) length(a3) substr(a3,1+3*900,3) +1 200 aa 3000 dd1 +SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) +FROM test.t1 where a1=2; +a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3) +2 500 bb 2256 b1b +INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; +SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) +FROM test.t2 WHERE a1=1; +a1 length(a2) substr(a2,180,2) length(a3) substr(a3,1+3*900,3) +1 200 aa 3000 dd1 +SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) +FROM test.t2 where a1=2; +a1 length(a2) substr(a2,480,2) length(a3) substr(a3,1+2*900,3) +2 500 bb 2256 b1b +DROP TABLE test.t1, test.t2; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +ALTER TABLESPACE ts2 +DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts2 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +**** Insert, Update, Delete from NDB table with BLOB fields +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); +set @vc5 = repeat('d', 5000); +set @bb1 = repeat('1', 2000); +set @bb2 = repeat('2', 5000); +set @bb3 = repeat('3', 10000); +set @bb4 = repeat('4', 40000); +set @bb5 = repeat('5', 50000); +select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual; +length(@vc1) length(@vc2) length(@vc3) length(@vc4) length(@vc5) +200 500 1000 4000 5000 +select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual; +length(@bb1) length(@bb2) length(@bb3) length(@bb4) length(@bb5) +2000 5000 10000 40000 50000 +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +INSERT INTO test.t1 VALUES (1,@vc1,@bb1); +INSERT INTO test.t1 VALUES (2,@vc2,@bb2); +INSERT INTO test.t1 VALUES (3,@vc3,@bb3); +INSERT INTO test.t1 VALUES (4,@vc4,@bb4); +INSERT INTO test.t1 VALUES (5,@vc5,@bb5); +UPDATE test.t1 SET a2=@vc5, a3=@bb5 WHERE a1=1; +SELECT a1,length(a2),substr(a2,4998,2),length(a3),substr(a3,49997,3) +FROM test.t1 WHERE a1=1; +a1 length(a2) substr(a2,4998,2) length(a3) substr(a3,49997,3) +1 5000 dd 50000 555 +UPDATE test.t1 SET a2=@vc4, a3=@bb4 WHERE a1=2; +SELECT a1,length(a2),substr(a2,3998,2),length(a3),substr(a3,39997,3) +FROM test.t1 WHERE a1=2; +a1 length(a2) substr(a2,3998,2) length(a3) substr(a3,39997,3) +2 4000 dd 40000 444 +UPDATE test.t1 SET a2=@vc2, a3=@bb2 WHERE a1=3; +SELECT a1,length(a2),substr(a2,498,2),length(a3),substr(a3,3997,3) +FROM test.t1 WHERE a1=3; +a1 length(a2) substr(a2,498,2) length(a3) substr(a3,3997,3) +3 500 bb 5000 222 +UPDATE test.t1 SET a2=@vc3, a3=@bb3 WHERE a1=4; +SELECT a1,length(a2),substr(a2,998,2),length(a3),substr(a3,9997,3) +FROM test.t1 WHERE a1=4; +a1 length(a2) substr(a2,998,2) length(a3) substr(a3,9997,3) +4 1000 cc 10000 333 +UPDATE test.t1 SET a2=@vc1, a3=@bb1 WHERE a1=5; +SELECT a1,length(a2),substr(a2,198,2),length(a3),substr(a3,1997,3) +FROM test.t1 WHERE a1=5; +a1 length(a2) substr(a2,198,2) length(a3) substr(a3,1997,3) +5 200 aa 2000 111 +DELETE FROM test.t1 where a1=5; +SELECT count(*) from test.t1; +count(*) +4 +DELETE FROM test.t1 where a1=4; +SELECT count(*) from test.t1; +count(*) +3 +DELETE FROM test.t1 where a1=3; +SELECT count(*) from test.t1; +count(*) +2 +DELETE FROM test.t1 where a1=2; +SELECT count(*) from test.t1; +count(*) +1 +DELETE FROM test.t1 where a1=1; +SELECT count(*) from test.t1; +count(*) +0 +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; +DROP TABLESPACE ts1 ENGINE NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +**** Create Stored procedures that use disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB// +CREATE PROCEDURE test.sp1() +BEGIN +INSERT INTO test.t1 values (1,'111111','aaaaaaaa'); +END// +CALL test.sp1(); +SELECT * FROM test.t1; +a1 a2 a3 +1 111111 aaaaaaaa +CREATE PROCEDURE test.sp2(n INT, vc VARCHAR(256), blb BLOB) +BEGIN +UPDATE test.t1 SET a2=vc, a3=blb where a1=n; +END// +CALL test.sp2(1,'222222','bbbbbbbb'); +SELECT * FROM test.t1; +a1 a2 a3 +1 222222 bbbbbbbb +DELETE FROM test.t1; +DROP PROCEDURE test.sp1; +DROP PROCEDURE test.sp2; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create function that operate on disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE FUNCTION test.fn1(n INT) RETURNS INT +BEGIN +DECLARE v INT; +SELECT a1 INTO v FROM test.t1 WHERE a1=n; +RETURN v; +END// +CREATE FUNCTION test.fn2(n INT, blb BLOB) RETURNS BLOB +BEGIN +DECLARE vv BLOB; +UPDATE test.t1 SET a3=blb where a1=n; +SELECT a3 INTO vv FROM test.t1 WHERE a1=n; +RETURN vv; +END// +SELECT test.fn1(10) FROM DUAL; +test.fn1(10) +10 +SELECT test.fn2(50, 'new BLOB content') FROM DUAL; +test.fn2(50, 'new BLOB content') +new BLOB content +DELETE FROM test.t1; +DROP FUNCTION test.fn1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create triggers that operate on disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW +BEGIN +if isnull(new.a2) then +set new.a2:= 'trg1 works on a2 field'; +end if; +if isnull(new.a3) then +set new.a3:= 'trg1 works on a3 field'; +end if; +end// +insert into test.t1 (a1) values (1)// +insert into test.t1 (a1,a2) values (2, 'ccccccc')// +select * from test.t1// +a1 a2 a3 +1 trg1 works on a2 field trg1 works on a3 field +2 ccccccc trg1 works on a3 field +DELETE FROM test.t1; +DROP TRIGGER test.trg1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create, update views that operate on disk based tables +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE VIEW test.v1 AS SELECT * FROM test.t1; +SELECT * FROM test.v1 order by a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +3 aaaaa3 bbbbb3 +4 aaaaa4 bbbbb4 +5 aaaaa5 bbbbb5 +6 aaaaa6 bbbbb6 +7 aaaaa7 bbbbb7 +8 aaaaa8 bbbbb8 +9 aaaaa9 bbbbb9 +10 aaaaa10 bbbbb10 +CHECK TABLE test.v1, test.t1; +Table Op Msg_type Msg_text +test.v1 check status OK +test.t1 check note The storage engine for the table doesn't support check +UPDATE test.v1 SET a2='zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' WHERE a1=5; +SELECT * FROM test.v1 order by a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +3 aaaaa3 bbbbb3 +4 aaaaa4 bbbbb4 +5 zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz bbbbb5 +6 aaaaa6 bbbbb6 +7 aaaaa7 bbbbb7 +8 aaaaa8 bbbbb8 +9 aaaaa9 bbbbb9 +10 aaaaa10 bbbbb10 +DROP VIEW test.v1; +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create and use disk based table that use auto inc +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa10 bbbbb10 +2 aaaaa9 bbbbb9 +3 aaaaa8 bbbbb8 +4 aaaaa7 bbbbb7 +5 aaaaa6 bbbbb6 +6 aaaaa5 bbbbb5 +7 aaaaa4 bbbbb4 +8 aaaaa3 bbbbb3 +9 aaaaa2 bbbbb2 +10 aaaaa1 bbbbb1 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create test that use transaction (commit, rollback) +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +SET AUTOCOMMIT=0; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); +COMMIT; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); +ROLLBACK; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +DELETE FROM test.t1; +DROP TABLE test.t1; +SET AUTOCOMMIT=1; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +START TRANSACTION; +INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); +COMMIT; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +START TRANSACTION; +INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); +ROLLBACK; +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create test that uses locks +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +drop table if exists test.t1; +CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +LOCK TABLES test.t1 write; +INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); +INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); +UNLOCK TABLES; +INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); +SELECT * FROM test.t1 ORDER BY a1; +a1 a2 a3 +1 aaaaa1 bbbbb1 +2 aaaaa2 bbbbb2 +3 aaaaa3 bbbbb3 +4 aaaaa3 bbbbb3 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create large disk base table, do random queries, check cache hits +***** +set @vc1 = repeat('a', 200); +SELECT @vc1 FROM DUAL; +@vc1 +aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa +set @vc2 = repeat('b', 500); +set @vc3 = repeat('b', 998); +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +select length(@x0),length(@b1),length(@d1) from dual; +length(@x0) length(@b1) length(@d1) +256 2256 3000 +select length(@x0),length(@b2),length(@d2) from dual; +length(@x0) length(@b2) length(@d2) +256 20000 30000 +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +INSERT INTO test.t1 values(1,@vc1,@d1); +INSERT INTO test.t1 values(2,@vc2,@d2); +explain SELECT * from test.t1 WHERE a1 = 1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 +SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) +FROM test.t1 WHERE a1=1 ORDER BY a1; +a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) +1 200 3000 dd1 +SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) +FROM test.t1 where a1=2 ORDER BY a1; +a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3) +2 500 30000 dd2 +UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1; +UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2; +SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) +FROM test.t1 where a1=1; +a1 length(a2) substr(a2,1+2*9000,2) length(a3) substr(a3,1+3*9000,3) +1 500 30000 dd2 +SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) +FROM test.t1 where a1=2; +a1 length(a2) substr(a2,1+2*900,2) length(a3) substr(a3,1+3*900,3) +2 200 3000 dd1 +DELETE FROM test.t1; +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; +***** +***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE +***** +CREATE LOGFILE GROUP lg +ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; +CREATE TABLESPACE ts1 +ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg +INITIAL_SIZE 12M +ENGINE NDB; +CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250)) +TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +SELECT COUNT(*) from test.t1; +COUNT(*) +100 +SELECT SUM(a1) from test.t1; +SUM(a1) +5050 +SELECT MIN(a1) from test.t1; +MIN(a1) +1 +SELECT MAX(a1) from test.t1; +MAX(a1) +100 +SELECT a5 from test.t1 where a1=50; +a5 +root@localhost +SELECT * from test.t1 order by a1; +a1 a2 a3 a4 a5 +1 aaaaaaaaaaaaaaaa1 bbbbbbbbbbbbbbbbbb1 2006-06-20 root@localhost +2 aaaaaaaaaaaaaaaa2 bbbbbbbbbbbbbbbbbb2 2006-06-20 root@localhost +3 aaaaaaaaaaaaaaaa3 bbbbbbbbbbbbbbbbbb3 2006-06-20 root@localhost +4 aaaaaaaaaaaaaaaa4 bbbbbbbbbbbbbbbbbb4 2006-06-20 root@localhost +5 aaaaaaaaaaaaaaaa5 bbbbbbbbbbbbbbbbbb5 2006-06-20 root@localhost +6 aaaaaaaaaaaaaaaa6 bbbbbbbbbbbbbbbbbb6 2006-06-20 root@localhost +7 aaaaaaaaaaaaaaaa7 bbbbbbbbbbbbbbbbbb7 2006-06-20 root@localhost +8 aaaaaaaaaaaaaaaa8 bbbbbbbbbbbbbbbbbb8 2006-06-20 root@localhost +9 aaaaaaaaaaaaaaaa9 bbbbbbbbbbbbbbbbbb9 2006-06-20 root@localhost +10 aaaaaaaaaaaaaaaa10 bbbbbbbbbbbbbbbbbb10 2006-06-20 root@localhost +11 aaaaaaaaaaaaaaaa11 bbbbbbbbbbbbbbbbbb11 2006-06-20 root@localhost +12 aaaaaaaaaaaaaaaa12 bbbbbbbbbbbbbbbbbb12 2006-06-20 root@localhost +13 aaaaaaaaaaaaaaaa13 bbbbbbbbbbbbbbbbbb13 2006-06-20 root@localhost +14 aaaaaaaaaaaaaaaa14 bbbbbbbbbbbbbbbbbb14 2006-06-20 root@localhost +15 aaaaaaaaaaaaaaaa15 bbbbbbbbbbbbbbbbbb15 2006-06-20 root@localhost +16 aaaaaaaaaaaaaaaa16 bbbbbbbbbbbbbbbbbb16 2006-06-20 root@localhost +17 aaaaaaaaaaaaaaaa17 bbbbbbbbbbbbbbbbbb17 2006-06-20 root@localhost +18 aaaaaaaaaaaaaaaa18 bbbbbbbbbbbbbbbbbb18 2006-06-20 root@localhost +19 aaaaaaaaaaaaaaaa19 bbbbbbbbbbbbbbbbbb19 2006-06-20 root@localhost +20 aaaaaaaaaaaaaaaa20 bbbbbbbbbbbbbbbbbb20 2006-06-20 root@localhost +21 aaaaaaaaaaaaaaaa21 bbbbbbbbbbbbbbbbbb21 2006-06-20 root@localhost +22 aaaaaaaaaaaaaaaa22 bbbbbbbbbbbbbbbbbb22 2006-06-20 root@localhost +23 aaaaaaaaaaaaaaaa23 bbbbbbbbbbbbbbbbbb23 2006-06-20 root@localhost +24 aaaaaaaaaaaaaaaa24 bbbbbbbbbbbbbbbbbb24 2006-06-20 root@localhost +25 aaaaaaaaaaaaaaaa25 bbbbbbbbbbbbbbbbbb25 2006-06-20 root@localhost +26 aaaaaaaaaaaaaaaa26 bbbbbbbbbbbbbbbbbb26 2006-06-20 root@localhost +27 aaaaaaaaaaaaaaaa27 bbbbbbbbbbbbbbbbbb27 2006-06-20 root@localhost +28 aaaaaaaaaaaaaaaa28 bbbbbbbbbbbbbbbbbb28 2006-06-20 root@localhost +29 aaaaaaaaaaaaaaaa29 bbbbbbbbbbbbbbbbbb29 2006-06-20 root@localhost +30 aaaaaaaaaaaaaaaa30 bbbbbbbbbbbbbbbbbb30 2006-06-20 root@localhost +31 aaaaaaaaaaaaaaaa31 bbbbbbbbbbbbbbbbbb31 2006-06-20 root@localhost +32 aaaaaaaaaaaaaaaa32 bbbbbbbbbbbbbbbbbb32 2006-06-20 root@localhost +33 aaaaaaaaaaaaaaaa33 bbbbbbbbbbbbbbbbbb33 2006-06-20 root@localhost +34 aaaaaaaaaaaaaaaa34 bbbbbbbbbbbbbbbbbb34 2006-06-20 root@localhost +35 aaaaaaaaaaaaaaaa35 bbbbbbbbbbbbbbbbbb35 2006-06-20 root@localhost +36 aaaaaaaaaaaaaaaa36 bbbbbbbbbbbbbbbbbb36 2006-06-20 root@localhost +37 aaaaaaaaaaaaaaaa37 bbbbbbbbbbbbbbbbbb37 2006-06-20 root@localhost +38 aaaaaaaaaaaaaaaa38 bbbbbbbbbbbbbbbbbb38 2006-06-20 root@localhost +39 aaaaaaaaaaaaaaaa39 bbbbbbbbbbbbbbbbbb39 2006-06-20 root@localhost +40 aaaaaaaaaaaaaaaa40 bbbbbbbbbbbbbbbbbb40 2006-06-20 root@localhost +41 aaaaaaaaaaaaaaaa41 bbbbbbbbbbbbbbbbbb41 2006-06-20 root@localhost +42 aaaaaaaaaaaaaaaa42 bbbbbbbbbbbbbbbbbb42 2006-06-20 root@localhost +43 aaaaaaaaaaaaaaaa43 bbbbbbbbbbbbbbbbbb43 2006-06-20 root@localhost +44 aaaaaaaaaaaaaaaa44 bbbbbbbbbbbbbbbbbb44 2006-06-20 root@localhost +45 aaaaaaaaaaaaaaaa45 bbbbbbbbbbbbbbbbbb45 2006-06-20 root@localhost +46 aaaaaaaaaaaaaaaa46 bbbbbbbbbbbbbbbbbb46 2006-06-20 root@localhost +47 aaaaaaaaaaaaaaaa47 bbbbbbbbbbbbbbbbbb47 2006-06-20 root@localhost +48 aaaaaaaaaaaaaaaa48 bbbbbbbbbbbbbbbbbb48 2006-06-20 root@localhost +49 aaaaaaaaaaaaaaaa49 bbbbbbbbbbbbbbbbbb49 2006-06-20 root@localhost +50 aaaaaaaaaaaaaaaa50 bbbbbbbbbbbbbbbbbb50 2006-06-20 root@localhost +51 aaaaaaaaaaaaaaaa51 bbbbbbbbbbbbbbbbbb51 2006-06-20 root@localhost +52 aaaaaaaaaaaaaaaa52 bbbbbbbbbbbbbbbbbb52 2006-06-20 root@localhost +53 aaaaaaaaaaaaaaaa53 bbbbbbbbbbbbbbbbbb53 2006-06-20 root@localhost +54 aaaaaaaaaaaaaaaa54 bbbbbbbbbbbbbbbbbb54 2006-06-20 root@localhost +55 aaaaaaaaaaaaaaaa55 bbbbbbbbbbbbbbbbbb55 2006-06-20 root@localhost +56 aaaaaaaaaaaaaaaa56 bbbbbbbbbbbbbbbbbb56 2006-06-20 root@localhost +57 aaaaaaaaaaaaaaaa57 bbbbbbbbbbbbbbbbbb57 2006-06-20 root@localhost +58 aaaaaaaaaaaaaaaa58 bbbbbbbbbbbbbbbbbb58 2006-06-20 root@localhost +59 aaaaaaaaaaaaaaaa59 bbbbbbbbbbbbbbbbbb59 2006-06-20 root@localhost +60 aaaaaaaaaaaaaaaa60 bbbbbbbbbbbbbbbbbb60 2006-06-20 root@localhost +61 aaaaaaaaaaaaaaaa61 bbbbbbbbbbbbbbbbbb61 2006-06-20 root@localhost +62 aaaaaaaaaaaaaaaa62 bbbbbbbbbbbbbbbbbb62 2006-06-20 root@localhost +63 aaaaaaaaaaaaaaaa63 bbbbbbbbbbbbbbbbbb63 2006-06-20 root@localhost +64 aaaaaaaaaaaaaaaa64 bbbbbbbbbbbbbbbbbb64 2006-06-20 root@localhost +65 aaaaaaaaaaaaaaaa65 bbbbbbbbbbbbbbbbbb65 2006-06-20 root@localhost +66 aaaaaaaaaaaaaaaa66 bbbbbbbbbbbbbbbbbb66 2006-06-20 root@localhost +67 aaaaaaaaaaaaaaaa67 bbbbbbbbbbbbbbbbbb67 2006-06-20 root@localhost +68 aaaaaaaaaaaaaaaa68 bbbbbbbbbbbbbbbbbb68 2006-06-20 root@localhost +69 aaaaaaaaaaaaaaaa69 bbbbbbbbbbbbbbbbbb69 2006-06-20 root@localhost +70 aaaaaaaaaaaaaaaa70 bbbbbbbbbbbbbbbbbb70 2006-06-20 root@localhost +71 aaaaaaaaaaaaaaaa71 bbbbbbbbbbbbbbbbbb71 2006-06-20 root@localhost +72 aaaaaaaaaaaaaaaa72 bbbbbbbbbbbbbbbbbb72 2006-06-20 root@localhost +73 aaaaaaaaaaaaaaaa73 bbbbbbbbbbbbbbbbbb73 2006-06-20 root@localhost +74 aaaaaaaaaaaaaaaa74 bbbbbbbbbbbbbbbbbb74 2006-06-20 root@localhost +75 aaaaaaaaaaaaaaaa75 bbbbbbbbbbbbbbbbbb75 2006-06-20 root@localhost +76 aaaaaaaaaaaaaaaa76 bbbbbbbbbbbbbbbbbb76 2006-06-20 root@localhost +77 aaaaaaaaaaaaaaaa77 bbbbbbbbbbbbbbbbbb77 2006-06-20 root@localhost +78 aaaaaaaaaaaaaaaa78 bbbbbbbbbbbbbbbbbb78 2006-06-20 root@localhost +79 aaaaaaaaaaaaaaaa79 bbbbbbbbbbbbbbbbbb79 2006-06-20 root@localhost +80 aaaaaaaaaaaaaaaa80 bbbbbbbbbbbbbbbbbb80 2006-06-20 root@localhost +81 aaaaaaaaaaaaaaaa81 bbbbbbbbbbbbbbbbbb81 2006-06-20 root@localhost +82 aaaaaaaaaaaaaaaa82 bbbbbbbbbbbbbbbbbb82 2006-06-20 root@localhost +83 aaaaaaaaaaaaaaaa83 bbbbbbbbbbbbbbbbbb83 2006-06-20 root@localhost +84 aaaaaaaaaaaaaaaa84 bbbbbbbbbbbbbbbbbb84 2006-06-20 root@localhost +85 aaaaaaaaaaaaaaaa85 bbbbbbbbbbbbbbbbbb85 2006-06-20 root@localhost +86 aaaaaaaaaaaaaaaa86 bbbbbbbbbbbbbbbbbb86 2006-06-20 root@localhost +87 aaaaaaaaaaaaaaaa87 bbbbbbbbbbbbbbbbbb87 2006-06-20 root@localhost +88 aaaaaaaaaaaaaaaa88 bbbbbbbbbbbbbbbbbb88 2006-06-20 root@localhost +89 aaaaaaaaaaaaaaaa89 bbbbbbbbbbbbbbbbbb89 2006-06-20 root@localhost +90 aaaaaaaaaaaaaaaa90 bbbbbbbbbbbbbbbbbb90 2006-06-20 root@localhost +91 aaaaaaaaaaaaaaaa91 bbbbbbbbbbbbbbbbbb91 2006-06-20 root@localhost +92 aaaaaaaaaaaaaaaa92 bbbbbbbbbbbbbbbbbb92 2006-06-20 root@localhost +93 aaaaaaaaaaaaaaaa93 bbbbbbbbbbbbbbbbbb93 2006-06-20 root@localhost +94 aaaaaaaaaaaaaaaa94 bbbbbbbbbbbbbbbbbb94 2006-06-20 root@localhost +95 aaaaaaaaaaaaaaaa95 bbbbbbbbbbbbbbbbbb95 2006-06-20 root@localhost +96 aaaaaaaaaaaaaaaa96 bbbbbbbbbbbbbbbbbb96 2006-06-20 root@localhost +97 aaaaaaaaaaaaaaaa97 bbbbbbbbbbbbbbbbbb97 2006-06-20 root@localhost +98 aaaaaaaaaaaaaaaa98 bbbbbbbbbbbbbbbbbb98 2006-06-20 root@localhost +99 aaaaaaaaaaaaaaaa99 bbbbbbbbbbbbbbbbbb99 2006-06-20 root@localhost +100 aaaaaaaaaaaaaaaa100 bbbbbbbbbbbbbbbbbb100 2006-06-20 root@localhost +DROP TABLE test.t1; +ALTER TABLESPACE ts1 +DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; +DROP TABLESPACE ts1 ENGINE=NDB; +DROP LOGFILE GROUP lg +ENGINE=NDB; diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test new file mode 100755 index 00000000000..e882ec794c1 --- /dev/null +++ b/mysql-test/t/ndb_dd_advance.test @@ -0,0 +1,630 @@ +############################################################## +# Author: JBM +# Date: 2006-01-12 +# Purpose: To test using ndb memory and disk tables together. +############################################################## + +############################################################## +# Author: Nikolay +# Date: 2006-05-12 +# Purpose: To test using ndb memory and disk tables together. +# +# Select from disk into memory table +# Select from disk into memory table +# Create test that loads data, use mysql dump to dump data, drop table, +# create table and load from mysql dump. +# Use group by asc and dec; Use having; Use order by +# ALTER Tests (Meta data testing): +# ALTER from InnoDB to Cluster Disk Data +# ALTER from MyISAM to Cluster Disk Data +# ALTER from Cluster Disk Data to InnoDB +# ALTER from Cluster Disk Data to MyISAM +# ALTER DD Tables and add columns +# ALTER DD Tables and add Indexes +# ALTER DD Tables and drop columns +# +############################################################## + +-- source include/have_ndb.inc +-- source include/not_embedded.inc + +--disable_warnings +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +--enable_warnings + +############ Test Setup Section ############# +-- echo **** Test Setup Section **** + +CREATE LOGFILE GROUP log_group1 +ADD UNDOFILE './log_group1/undofile.dat' +INITIAL_SIZE 16M +UNDO_BUFFER_SIZE = 1M +ENGINE=NDB; + +CREATE TABLESPACE table_space1 +ADD DATAFILE './table_space1/datafile.dat' +USE LOGFILE GROUP log_group1 +INITIAL_SIZE 12M +ENGINE NDB; + + +CREATE TABLE test.t1 +(pk1 INT NOT NULL PRIMARY KEY, b INT NOT NULL, c INT NOT NULL) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; + +CREATE TABLE test.t2 +(pk2 INT NOT NULL PRIMARY KEY, b2 INT NOT NULL, c2 INT NOT NULL) +ENGINE=NDB; + +--echo +##################### Data load for first test #################### +--echo **** Data load for first test **** + +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); + + +INSERT INTO test.t2 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45), +(46,46,46),(47,47,47),(48,48,48),(49,49,49),(50,50,50), +(51,51,51),(52,52,52),(53,53,53),(54,54,54),(55,55,55), +(56,56,56),(57,57,57),(58,58,58),(59,59,59),(60,60,60), +(61,61,61),(62,62,62),(63,63,63),(64,64,64),(65,65,65), +(66,66,66),(67,67,67),(68,68,68),(69,69,69),(70,70,70), +(71,71,71),(72,72,72),(73,73,73),(74,74,74),(75,75,75); + +--echo +##################### Test 1 Section Begins ############### +--echo *** Test 1 Section Begins *** +SELECT COUNT(*) FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +SELECT * FROM test.t2 LEFT JOIN test.t1 ON pk2=pk1 WHERE b2 IN (4); +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 WHERE b IN (4); +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2 WHERE pk1 IN (75); +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +--echo +####################### Test 1 Section End ################ + +##################### Setup for test 2 #################### +--echo *** Setup for test 2 **** +DELETE FROM test.t1; +INSERT INTO test.t1 VALUES +(1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5), +(6,6,6),(7,7,7),(8,8,8),(9,9,9),(10,10,10), +(11,11,11),(12,12,12),(13,13,13),(14,14,14),(15,15,15), +(16,16,16),(17,17,17),(18,18,18),(19,19,19),(20,20,20), +(21,21,21),(22,22,22),(23,23,23),(24,24,24),(25,25,25), +(26,26,26),(27,27,27),(28,28,28),(29,29,29),(30,30,30), +(31,31,31),(32,32,32),(33,33,33),(34,34,34),(35,35,35), +(36,36,36),(37,37,37),(38,38,38),(39,39,39),(40,40,40), +(41,41,41),(42,42,42),(43,43,43),(44,44,44),(45,45,45); +--echo +############################# Test Section 2 ############### +--echo **** Test Section 2 **** +SELECT b, c FROM test.t1 LEFT JOIN test.t2 ON pk1=pk2 ORDER BY b; +SELECT COUNT(*) FROM test.t1 LEFT JOIN test.t2 ON b=b2; +SELECT COUNT(*) FROM test.t1 RIGHT JOIN test.t2 ON b=b2; +SHOW CREATE TABLE test.t2; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t2 TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; +SHOW CREATE TABLE test.t2; +ALTER TABLE test.t1 ENGINE=NDBCLUSTER; +SHOW CREATE TABLE test.t1; +--echo +######################### End Test Section 2 ################# +DROP TABLE test.t1; +DROP TABLE test.t2; +##################### Setup for Test Section 3 ############### +--echo *** Setup for Test Section 3 *** +CREATE TABLE test.t1 ( + usr_id INT unsigned NOT NULL, + uniq_id INT unsigned NOT NULL AUTO_INCREMENT, + start_num INT unsigned NOT NULL DEFAULT 1, + increment INT unsigned NOT NULL DEFAULT 1, + PRIMARY KEY (uniq_id), + INDEX usr_uniq_idx (usr_id, uniq_id), + INDEX uniq_usr_idx (uniq_id, usr_id)) +TABLESPACE table_space1 STORAGE DISK +ENGINE=NDB; + + +CREATE TABLE test.t2 ( + id INT unsigned NOT NULL DEFAULT 0, + usr2_id INT unsigned NOT NULL DEFAULT 0, + max INT unsigned NOT NULL DEFAULT 0, + c_amount INT unsigned NOT NULL DEFAULT 0, + d_max INT unsigned NOT NULL DEFAULT 0, + d_num INT unsigned NOT NULL DEFAULT 0, + orig_time INT unsigned NOT NULL DEFAULT 0, + c_time INT unsigned NOT NULL DEFAULT 0, + active ENUM ("no","yes") NOT NULL, + PRIMARY KEY (id,usr2_id), + INDEX id_idx (id), + INDEX usr2_idx (usr2_id)) +ENGINE=NDB; + +INSERT INTO test.t1 VALUES (3,NULL,0,50),(3,NULL,0,200),(3,NULL,0,25),(3,NULL,0,84676),(3,NULL,0,235),(3,NULL,0,10),(3,NULL,0,3098),(3,NULL,0,2947),(3,NULL,0,8987),(3,NULL,0,8347654),(3,NULL,0,20398),(3,NULL,0,8976),(3,NULL,0,500),(3,NULL,0,198); + +--echo +###################### Test Section 3 ###################### +--echo **** Test Section 3 **** +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; + +INSERT INTO test.t2 VALUES (2,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (4,3,3000,6000,0,0,746584,837484,'yes'); +INSERT INTO test.t2 VALUES (7,3,1000,2000,0,0,746294,937484,'yes'); + +SELECT test.t1.usr_id,test.t1.uniq_id,test.t1.increment, +test.t2.usr2_id,test.t2.c_amount,test.t2.max +FROM test.t1 +LEFT JOIN test.t2 ON test.t2.id = test.t1.uniq_id +WHERE test.t1.uniq_id = 4 +ORDER BY test.t2.c_amount; +--echo +####################### End Section 3 ######################### +DROP TABLE test.t1; +DROP TABLE test.t2; +ALTER TABLESPACE table_space1 +DROP DATAFILE './table_space1/datafile.dat' +ENGINE = NDB; + +DROP TABLESPACE table_space1 +ENGINE = NDB; + +DROP LOGFILE GROUP log_group1 +ENGINE =NDB; + +####################### Section 4 ######################### + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLESPACE ts2 + ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + +### Select from disk into memory table ### + + CREATE TABLE t1 (a int NOT NULL PRIMARY KEY, b int) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE t2 (a int NOT NULL PRIMARY KEY, b int) + ENGINE=NDB; + + SHOW CREATE TABLE t1; + SHOW CREATE TABLE t2; + + INSERT INTO t1 VALUES (1,1); + INSERT INTO t1 VALUES (2,2); + SELECT * FROM t1 order by a; + INSERT INTO t2(a,b) SELECT * FROM t1; + SELECT * FROM t2 order by a; + +### Select from disk into memory table ### + + TRUNCATE t1; + TRUNCATE t2; + INSERT INTO t2 VALUES (3,3); + INSERT INTO t2 VALUES (4,4); + INSERT INTO t1(a,b) SELECT * FROM t2; + SELECT * FROM t1 order by a; + + DROP TABLE t1, t2; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + + DROP TABLESPACE ts1 ENGINE NDB; + + ALTER TABLESPACE ts2 + DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; + + DROP TABLESPACE ts2 ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that loads data, use mysql dump to dump data, drop table, +#### create table and load from mysql dump. + +# DROP DATABASE IF EXISTS test; + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts + ADD DATAFILE './datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +# CREATE DATABASE test; + +CREATE TABLE test.t ( + a smallint NOT NULL, + b int NOT NULL, + c bigint NOT NULL, + d char(10), + e TEXT, + f VARCHAR(255), + PRIMARY KEY(a) +) TABLESPACE ts STORAGE DISK ENGINE=NDB; + + ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); + SHOW CREATE TABLE test.t; + +# insert records into tables + + let $1=100; + disable_query_log; + while ($1) + { + eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); + dec $1; + } + enable_query_log; + + SELECT * FROM test.t order by a; +--exec $MYSQL_DUMP --skip-comments --databases test > $MYSQLTEST_VARDIR/tmp/t_dump.sql +DROP TABLE test.t; +--exec $MYSQL test < $MYSQLTEST_VARDIR/tmp/t_dump.sql +USE test; +show tables; + +SELECT * FROM test.t order by a; + + DROP TABLE test.t; +# DROP DATABASE test; + + ALTER TABLESPACE ts + DROP DATAFILE './datafile.dat' + ENGINE NDB; + + DROP TABLESPACE ts ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### BUG 18856 test case comented out +##### Use "SELECT * INTO OUTFILE" to dump data and "LOAD DATA INFILE" to load ##### data back to the data file. + +# CREATE LOGFILE GROUP lg +# ADD UNDOFILE './undofile.dat' +# INITIAL_SIZE 16M +# UNDO_BUFFER_SIZE = 1M +# ENGINE=NDB; + +# CREATE TABLESPACE ts +# ADD DATAFILE './datafile.dat' +# USE LOGFILE GROUP lg +# INITIAL_SIZE 12M +# ENGINE NDB; + +#CREATE DATABASE test; + +#CREATE TABLE test.t ( +# a smallint NOT NULL, +# b int NOT NULL, +# c bigint NOT NULL, +# d char(10), +# e TEXT, +# f VARCHAR(255), +# PRIMARY KEY(a) +#) TABLESPACE ts STORAGE DISK ENGINE=NDB; + +# ALTER TABLE test.t ADD INDEX (d), ADD INDEX (f); +# SHOW CREATE TABLE test.t; + +# insert records into tables + +# let $1=100; +# disable_query_log; +# while ($1) +# { +# eval insert into test.t values($1, $1+1, $1+2, "aaa$1", "bbb$1", "ccccc$1"); +# dec $1; +# } +# enable_query_log; + +# SELECT * FROM test.t order by a; + +# SELECT * INTO OUTFILE 't_backup' FROM test.t; +# TRUNCATE test.t; + +#'TRUNCATE test.t' failed: 1205: Lock wait timeout exceeded; try restarting #transaction. TABLESPACE ts STORAGE DISK ENGINE=NDB; + +# SELECT count(*) FROM test.t; +# LOAD DATA INFILE 't_backup' INTO TABLE test.t; + +# SELECT * FROM test.t order by a; + +# DROP TABLE test.t; +# DROP DATABASE test; + +# ALTER TABLESPACE ts +# DROP DATAFILE './datafile.dat' +# ENGINE NDB; +# DROP TABLESPACE ts ENGINE NDB; +# DROP LOGFILE GROUP lg +# ENGINE=NDB; + +#### Use group by asc and dec; Use having; Use order by. #### + +# DROP DATABASE IF EXISTS test; + DROP table IF EXISTS test.t1; + DROP table IF EXISTS test.t2; + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLESPACE ts2 + ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +# CREATE DATABASE test; + +CREATE TABLE test.t1 ( + a1 smallint NOT NULL, + a2 int NOT NULL, + a3 bigint NOT NULL, + a4 char(10), + a5 decimal(5,1), + a6 time, + a7 date, + a8 datetime, + a9 VARCHAR(255), + a10 blob, + PRIMARY KEY(a1) +) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a8); + SHOW CREATE TABLE test.t1; + +CREATE TABLE test.t2 ( + b1 smallint NOT NULL, + b2 int NOT NULL, + b3 bigint NOT NULL, + b4 char(10), + b5 decimal(5,1), + b6 time, + b7 date, + b8 datetime, + b9 VARCHAR(255), + b10 blob, + PRIMARY KEY(b1) +) ENGINE=NDB; + + ALTER TABLE test.t2 ADD INDEX (b2), ADD INDEX (b3), ADD INDEX (b8); + SHOW CREATE TABLE test.t2; + +let $1=20; +disable_query_log; +while ($1) +{ + eval insert into test.t1 values($1, $1+1, $1+2000000000, "aaa$1", 34.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); + eval insert into test.t2 values($1+2, $1+3, $1+3000000000, "aaa$1", 35.2, '4:3:2', '2006-1-1', '1971-5-28 16:55:03', "bbbbbbbbbbbbb$1", "binary data"); + dec $1; +} +enable_query_log; + +SELECT * FROM test.t1 order by a1; +SELECT * FROM test.t2 order by b1; +SELECT COUNT(a1), a1, COUNT(a1)*a1 FROM test.t1 GROUP BY a1; +SELECT COUNT(a2), (a2+1), COUNT(a2)*(a2+0) FROM test.t1 GROUP BY a2; + +DROP TABLE test.t1; +DROP TABLE test.t2; + +create table test.t1 (a int not null,b char(5), c text) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + +insert into test.t1 (a) values (1),(2),(3),(4),(1),(2),(3),(4); +select distinct a from test.t1 group by b,a having a > 2 order by a desc; +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a desc; +select distinct a from test.t1 group by b,a having a > 2 order by a asc; +select distinct a,c from test.t1 group by b,c,a having a > 2 order by a asc; +drop table test.t1; + +create table test.t1 (a char(1), key(a)) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +insert into test.t1 values('1'),('1'),('1'),('2'),('2'),('3'),('3'); +select * from test.t1 where a >= '1' order by a; +select distinct a from test.t1 order by a desc; +select distinct a from test.t1 where a >= '1' order by a desc; +select distinct a from test.t1 where a >= '1' order by a asc; +drop table test.t1; + +CREATE TABLE test.t1 (email varchar(50), infoID BIGINT, dateentered DATETIME) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +CREATE TABLE test.t2 (infoID BIGINT, shipcode varchar(10)) ENGINE=NDB; + +INSERT INTO test.t1 (email, infoID, dateentered) VALUES + ('test1@testdomain.com', 1, '2002-07-30 22:56:38'), + ('test1@testdomain.com', 1, '2002-07-27 22:58:16'), + ('test2@testdomain.com', 1, '2002-06-19 15:22:19'), + ('test2@testdomain.com', 2, '2002-06-18 14:23:47'), + ('test3@testdomain.com', 1, '2002-05-19 22:17:32'); + +INSERT INTO test.t2(infoID, shipcode) VALUES + (1, 'Z001'), + (2, 'R002'); + +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE t1.infoID=t2.infoID order by email, shipcode; +SELECT DISTINCTROW email FROM test.t1 ORDER BY dateentered DESC; +SELECT DISTINCTROW email, shipcode FROM test.t1, test.t2 WHERE test.t1.infoID=test.t2.infoID ORDER BY dateentered DESC; +drop table test.t1,test.t2; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts1 ENGINE NDB; + ALTER TABLESPACE ts2 + DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts2 ENGINE NDB; + DROP LOGFILE GROUP lg + ENGINE=NDB; +#################################################################### + + +#### Customer posted order by test case + +DROP TABLE IF EXISTS test.t; +create table test.t (f1 varchar(50) primary key, f2 text,f3 int) engine=NDB; +insert into test.t (f1,f2,f3)VALUES("111111","aaaaaa",1); +insert into test.t (f1,f2,f3)VALUES("222222","bbbbbb",2); +select * from test.t order by f1; +select f1,f2 from test.t order by f2; +select f2 from test.t order by f2; +select f1,f2 from test.t order by f1; +drop table test.t; + +################## ALTER Tests (Meta data testing) #################### + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts + ADD DATAFILE './table_space/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +#### Try to ALTER from InnoDB to Cluster Disk Data + +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER from MyISAM to Cluster Disk Data + +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER from Cluster Disk Data to InnoDB + +CREATE TABLE test.t1 (a1 INT PRIMARY KEY, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 ENGINE=InnoDB; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER from Cluster Disk Data to MyISAM + +CREATE TABLE test.t1 (a1 INT, a2 BLOB, a3 TEXT) TABLESPACE ts STORAGE DISK ENGINE=NDB; +SHOW CREATE TABLE test.t1; +ALTER TABLE test.t1 ENGINE=MyISAM; +SHOW CREATE TABLE test.t1; +DROP TABLE test.t1; + +#### Try to ALTER DD Tables and add columns + +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; + +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; + +SHOW CREATE TABLE test.t1; + +#### Try to ALTER DD Tables and add Indexes + +ALTER TABLE test.t1 ADD INDEX (a2), ADD INDEX (a3), ADD INDEX (a5), ADD INDEX (a6), +ADD INDEX (a7), ADD INDEX (a8); + +SHOW CREATE TABLE test.t1; + +DROP TABLE test.t1; + +#### Try to ALTER DD Tables and drop columns + +CREATE TABLE test.t1 (a1 INT PRIMARY KEY) TABLESPACE ts STORAGE DISK ENGINE=NDB; + +ALTER TABLE test.t1 ADD a2 FLOAT, ADD a3 DOUBLE, ADD a4 BIT, ADD a5 TINYINT, ADD a6 BIGINT, ADD a7 DATE, ADD a8 TIME, ADD a9 DATETIME, ADD a10 TINYTEXT, ADD a11 MEDIUMTEXT, ADD a12 LONGTEXT, ADD a13 TEXT, ADD a14 BLOB; + +SHOW CREATE TABLE test.t1; + +ALTER TABLE test.t1 DROP a14; +ALTER TABLE test.t1 DROP a13; +ALTER TABLE test.t1 DROP a12; +ALTER TABLE test.t1 DROP a11; +ALTER TABLE test.t1 DROP a10; +ALTER TABLE test.t1 DROP a9; +ALTER TABLE test.t1 DROP a8; +ALTER TABLE test.t1 DROP a7; +ALTER TABLE test.t1 DROP a6; +ALTER TABLE test.t1 DROP PRIMARY KEY; + +SHOW CREATE TABLE test.t1; + +DROP TABLE test.t1; + + ALTER TABLESPACE ts + DROP DATAFILE './table_space/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts ENGINE NDB; + DROP LOGFILE GROUP lg + ENGINE=NDB; + +####################### End section 4 ######################### +#End 5.1 test case + diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_advance2.test new file mode 100755 index 00000000000..c10a009c224 --- /dev/null +++ b/mysql-test/t/ndb_dd_advance2.test @@ -0,0 +1,723 @@ +############################################################## +# Author: Nikolay +# Date: 2006-04-01 +# Purpose: Specific Blob and Varchar testing using disk tables. +############################################################## +# Create Stored procedures that use disk based tables. +# Create function that operate on disk based tables. +# Create triggers that operate on disk based tables. +# Create views that operate on disk based tables. +# Try to create FK constraints on disk based tables. +# Create and use disk based table that use auto inc. +# Create test that use transaction (commit, rollback) +# Create large disk base table, do random queries, check cache hits, do same +# query 10 times check cache hits. +# Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), ... other built in # SQL functions +# Create test that uses locks. +# Create test using truncate. +############################################################## + +-- source include/have_ndb.inc + +--disable_warnings +DROP TABLE IF EXISTS test.t1; +DROP TABLE IF EXISTS test.t2; +DROP TABLE IF EXISTS test.t3; +--enable_warnings + +#### Copy data from table in one table space to table in different table space. #### +--echo ***** +--echo **** Copy data from table in one table space to table in different table space +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLESPACE ts2 + ADD DATAFILE './table_space2/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts2 STORAGE DISK ENGINE=NDB; + + SHOW CREATE TABLE test.t1; + SHOW CREATE TABLE test.t2; + + INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa'); + INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb'); + SELECT * FROM test.t1; + INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; + SELECT * FROM test.t2; + + DROP TABLE test.t1, test.t2; + + # populate BLOB field with large data + +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); + +# x0 size 256 +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); + +# b1 length 2000+256 +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +# d1 length 3000 +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); + +# b2 length 20000 +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +# d2 length 30000 +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); + +select length(@x0),length(@b1),length(@d1) from dual; +select length(@x0),length(@b2),length(@d2) from dual; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + CREATE TABLE test.t2 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) + TABLESPACE ts2 STORAGE DISK ENGINE=NDB; + + SHOW CREATE TABLE test.t1; + SHOW CREATE TABLE test.t2; + + INSERT INTO test.t1 VALUES (1,@vc1,@d1); + INSERT INTO test.t1 VALUES (2,@vc2,@b1); + INSERT INTO test.t1 VALUES (3,@vc3,@d2); + INSERT INTO test.t1 VALUES (4,@vc4,@b2); + + SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) + FROM test.t1 WHERE a1=1; + SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) + FROM test.t1 where a1=2; + + INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; + SELECT a1,length(a2),substr(a2,180,2),length(a3),substr(a3,1+3*900,3) + FROM test.t2 WHERE a1=1; + SELECT a1,length(a2),substr(a2,480,2),length(a3),substr(a3,1+2*900,3) + FROM test.t2 where a1=2; + + + DROP TABLE test.t1, test.t2; + + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts1 ENGINE NDB; + + ALTER TABLESPACE ts2 + DROP DATAFILE './table_space2/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts2 ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Insert, Update, Delete from NDB table with BLOB fields #### +--echo ***** +--echo **** Insert, Update, Delete from NDB table with BLOB fields +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +set @vc1 = repeat('a', 200); +set @vc2 = repeat('b', 500); +set @vc3 = repeat('c', 1000); +set @vc4 = repeat('d', 4000); +set @vc5 = repeat('d', 5000); + +set @bb1 = repeat('1', 2000); +set @bb2 = repeat('2', 5000); +set @bb3 = repeat('3', 10000); +set @bb4 = repeat('4', 40000); +set @bb5 = repeat('5', 50000); + +select length(@vc1),length(@vc2),length(@vc3),length(@vc4),length(@vc5) from dual; +select length(@bb1),length(@bb2),length(@bb3),length(@bb4),length(@bb5) from dual; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(5000), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +# CREATE TABLE test.t2 (a1 int NOT NULL, a2 VARCHAR(5000), a3 BLOB) +# TABLESPACE ts2 STORAGE DISK ENGINE=NDB; + + INSERT INTO test.t1 VALUES (1,@vc1,@bb1); + INSERT INTO test.t1 VALUES (2,@vc2,@bb2); + INSERT INTO test.t1 VALUES (3,@vc3,@bb3); + INSERT INTO test.t1 VALUES (4,@vc4,@bb4); + INSERT INTO test.t1 VALUES (5,@vc5,@bb5); + + UPDATE test.t1 SET a2=@vc5, a3=@bb5 WHERE a1=1; + SELECT a1,length(a2),substr(a2,4998,2),length(a3),substr(a3,49997,3) + FROM test.t1 WHERE a1=1; + + UPDATE test.t1 SET a2=@vc4, a3=@bb4 WHERE a1=2; + SELECT a1,length(a2),substr(a2,3998,2),length(a3),substr(a3,39997,3) + FROM test.t1 WHERE a1=2; + + UPDATE test.t1 SET a2=@vc2, a3=@bb2 WHERE a1=3; + SELECT a1,length(a2),substr(a2,498,2),length(a3),substr(a3,3997,3) + FROM test.t1 WHERE a1=3; + + UPDATE test.t1 SET a2=@vc3, a3=@bb3 WHERE a1=4; + SELECT a1,length(a2),substr(a2,998,2),length(a3),substr(a3,9997,3) + FROM test.t1 WHERE a1=4; + + UPDATE test.t1 SET a2=@vc1, a3=@bb1 WHERE a1=5; + SELECT a1,length(a2),substr(a2,198,2),length(a3),substr(a3,1997,3) + FROM test.t1 WHERE a1=5; + + DELETE FROM test.t1 where a1=5; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=4; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=3; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=2; + SELECT count(*) from test.t1; + DELETE FROM test.t1 where a1=1; + SELECT count(*) from test.t1; + + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE NDB; + DROP TABLESPACE ts1 ENGINE NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +##### Create Stored procedures that use disk based tables ##### +--echo ***** +--echo **** Create Stored procedures that use disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +delimiter //; + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB// + CREATE PROCEDURE test.sp1() + BEGIN + INSERT INTO test.t1 values (1,'111111','aaaaaaaa'); + END// +delimiter ;// + + CALL test.sp1(); + SELECT * FROM test.t1; + +delimiter //; + CREATE PROCEDURE test.sp2(n INT, vc VARCHAR(256), blb BLOB) + BEGIN + UPDATE test.t1 SET a2=vc, a3=blb where a1=n; + END// +delimiter ;// + + CALL test.sp2(1,'222222','bbbbbbbb'); + SELECT * FROM test.t1; + + DELETE FROM test.t1; + DROP PROCEDURE test.sp1; + DROP PROCEDURE test.sp2; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create function that operate on disk based tables #### +--echo ***** +--echo ***** Create function that operate on disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=100; + disable_query_log; + while ($1) + { + eval insert into test.t1 values($1, "aaaaa$1", "bbbbb$1"); + dec $1; + } + enable_query_log; + + delimiter //; + CREATE FUNCTION test.fn1(n INT) RETURNS INT + BEGIN + DECLARE v INT; + SELECT a1 INTO v FROM test.t1 WHERE a1=n; + RETURN v; + END// + delimiter ;// + +delimiter //; + CREATE FUNCTION test.fn2(n INT, blb BLOB) RETURNS BLOB + BEGIN + DECLARE vv BLOB; + UPDATE test.t1 SET a3=blb where a1=n; + SELECT a3 INTO vv FROM test.t1 WHERE a1=n; + RETURN vv; + END// + delimiter ;// + + SELECT test.fn1(10) FROM DUAL; + SELECT test.fn2(50, 'new BLOB content') FROM DUAL; + + DELETE FROM test.t1; + DROP FUNCTION test.fn1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create triggers that operate on disk based tables #### +--echo ***** +--echo ***** Create triggers that operate on disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + delimiter //; + CREATE TRIGGER test.trg1 BEFORE INSERT ON test.t1 FOR EACH ROW + BEGIN + if isnull(new.a2) then + set new.a2:= 'trg1 works on a2 field'; + end if; + if isnull(new.a3) then + set new.a3:= 'trg1 works on a3 field'; + end if; + end// + insert into test.t1 (a1) values (1)// + insert into test.t1 (a1,a2) values (2, 'ccccccc')// + select * from test.t1// + delimiter ;// + + DELETE FROM test.t1; + DROP TRIGGER test.trg1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create, update views that operate on disk based tables #### +--echo ***** +--echo ***** Create, update views that operate on disk based tables +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=10; + disable_query_log; + while ($1) + { + eval insert into test.t1 values($1, "aaaaa$1", "bbbbb$1"); + dec $1; + } + enable_query_log; + CREATE VIEW test.v1 AS SELECT * FROM test.t1; + SELECT * FROM test.v1 order by a1; + CHECK TABLE test.v1, test.t1; + + UPDATE test.v1 SET a2='zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz' WHERE a1=5; + SELECT * FROM test.v1 order by a1; + + DROP VIEW test.v1; + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create and use disk based table that use auto inc #### +--echo ***** +--echo ***** Create and use disk based table that use auto inc +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=10; + disable_query_log; + while ($1) + { + eval insert into test.t1 values(NULL, "aaaaa$1", "bbbbb$1"); + dec $1; + } + enable_query_log; + SELECT * FROM test.t1 ORDER BY a1; + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that use transaction (commit, rollback) #### +--echo ***** +--echo ***** Create test that use transaction (commit, rollback) +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + SET AUTOCOMMIT=0; + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); + COMMIT; + SELECT * FROM test.t1 ORDER BY a1; + INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); + ROLLBACK; + SELECT * FROM test.t1 ORDER BY a1; + + DELETE FROM test.t1; + DROP TABLE test.t1; + SET AUTOCOMMIT=1; + +# Now do the same thing with START TRANSACTION without using AUTOCOMMIT. + + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + START TRANSACTION; + INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); + COMMIT; + SELECT * FROM test.t1 ORDER BY a1; + + START TRANSACTION; + INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); + ROLLBACK; + SELECT * FROM test.t1 ORDER BY a1; + + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that uses locks #### +--echo ***** +--echo ***** Create test that uses locks +--echo ***** + + connect (con1,localhost,root,,); + connect (con2,localhost,root,,); + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + +# connection con1; +--disable_warnings + drop table if exists test.t1; + CREATE TABLE test.t1 (a1 INT NOT NULL AUTO_INCREMENT PRIMARY KEY, a2 VARCHAR(256), a3 BLOB) TABLESPACE ts1 STORAGE DISK ENGINE=NDB; +--enable_warnings + + LOCK TABLES test.t1 write; + INSERT INTO test.t1 VALUES(NULL, "aaaaa1", "bbbbb1"); + INSERT INTO test.t1 VALUES(NULL, "aaaaa2", "bbbbb2"); + SELECT * FROM test.t1 ORDER BY a1; + + connection con2; + SELECT * FROM test.t1 ORDER BY a1; + INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); + + connection con1; + UNLOCK TABLES; + + connection con2; + INSERT INTO test.t1 VALUES(NULL, "aaaaa3", "bbbbb3"); + SELECT * FROM test.t1 ORDER BY a1; + DELETE FROM test.t1; + DROP TABLE test.t1; + + #connection defualt; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create large disk base table, do random queries, check cache hits #### +--echo ***** +--echo ***** Create large disk base table, do random queries, check cache hits +--echo ***** + +set @vc1 = repeat('a', 200); +SELECT @vc1 FROM DUAL; +set @vc2 = repeat('b', 500); +set @vc3 = repeat('b', 998); + +# x0 size 256 +set @x0 = '01234567012345670123456701234567'; +set @x0 = concat(@x0,@x0,@x0,@x0,@x0,@x0,@x0,@x0); + +# b1 length 2000+256 (blob part aligned) +set @b1 = 'b1'; +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1,@b1); +set @b1 = concat(@b1,@x0); +# d1 length 3000 +set @d1 = 'dd1'; +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); +set @d1 = concat(@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1,@d1); + +# b2 length 20000 +set @b2 = 'b2'; +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +set @b2 = concat(@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2,@b2); +# d2 length 30000 +set @d2 = 'dd2'; +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); +set @d2 = concat(@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2,@d2); + +select length(@x0),length(@b1),length(@d1) from dual; +select length(@x0),length(@b2),length(@d2) from dual; + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(1000), a3 BLOB) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + + INSERT INTO test.t1 values(1,@vc1,@d1); + INSERT INTO test.t1 values(2,@vc2,@d2); + explain SELECT * from test.t1 WHERE a1 = 1; + + SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) + FROM test.t1 WHERE a1=1 ORDER BY a1; + SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) + FROM test.t1 where a1=2 ORDER BY a1; + + UPDATE test.t1 set a2=@vc2,a3=@d2 where a1=1; + UPDATE test.t1 set a2=@vc1,a3=@d1 where a1=2; + + SELECT a1,length(a2),substr(a2,1+2*9000,2),length(a3),substr(a3,1+3*9000,3) + FROM test.t1 where a1=1; + SELECT a1,length(a2),substr(a2,1+2*900,2),length(a3),substr(a3,1+3*900,3) + FROM test.t1 where a1=2; + + #SHOW VARIABLES LIKE 'have_query_cache'; + #SHOW STATUS LIKE 'Qcache%'; + + DELETE FROM test.t1; + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + +#### Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE #### +--echo ***** +--echo ***** Create test that uses COUNT(), SUM(), MAX(), MIN(), NOW(), USER(), TRUNCATE +--echo ***** + + CREATE LOGFILE GROUP lg + ADD UNDOFILE './lg_group/undofile.dat' + INITIAL_SIZE 16M + UNDO_BUFFER_SIZE = 1M + ENGINE=NDB; + + CREATE TABLESPACE ts1 + ADD DATAFILE './table_space1/datafile.dat' + USE LOGFILE GROUP lg + INITIAL_SIZE 12M + ENGINE NDB; + + CREATE TABLE test.t1 (a1 int NOT NULL PRIMARY KEY, a2 VARCHAR(256), a3 BLOB, a4 DATE, a5 CHAR(250)) + TABLESPACE ts1 STORAGE DISK ENGINE=NDB; + let $1=100; + disable_query_log; + while ($1) + { + eval insert into test.t1 values($1, "aaaaaaaaaaaaaaaa$1", "bbbbbbbbbbbbbbbbbb$1", '2006-06-20' , USER()); + dec $1; + } + enable_query_log; + + SELECT COUNT(*) from test.t1; + SELECT SUM(a1) from test.t1; + SELECT MIN(a1) from test.t1; + SELECT MAX(a1) from test.t1; + SELECT a5 from test.t1 where a1=50; + + + SELECT * from test.t1 order by a1; + + DROP TABLE test.t1; + + ALTER TABLESPACE ts1 + DROP DATAFILE './table_space1/datafile.dat' + ENGINE=NDB; + DROP TABLESPACE ts1 ENGINE=NDB; + + DROP LOGFILE GROUP lg + ENGINE=NDB; + + +#End 5.1 test case + From f5ec695c18ffb70f42cfda7518fcf3fd18692cb2 Mon Sep 17 00:00:00 2001 From: "jonas@perch.ndb.mysql.com" <> Date: Thu, 6 Jul 2006 09:38:18 +0200 Subject: [PATCH 56/74] ndb - revert bug fix for bug#20442 --- ndb/src/ndbapi/NdbScanOperation.cpp | 60 ----------------------------- 1 file changed, 60 deletions(-) diff --git a/ndb/src/ndbapi/NdbScanOperation.cpp b/ndb/src/ndbapi/NdbScanOperation.cpp index 6b587be688f..f14b5409ce8 100644 --- a/ndb/src/ndbapi/NdbScanOperation.cpp +++ b/ndb/src/ndbapi/NdbScanOperation.cpp @@ -1503,66 +1503,6 @@ NdbScanOperation::close_impl(TransporterFacade* tp, bool forceSend){ return -1; } - bool holdLock = false; - if (theSCAN_TABREQ) - { - ScanTabReq * req = CAST_PTR(ScanTabReq, theSCAN_TABREQ->getDataPtrSend()); - holdLock = ScanTabReq::getHoldLockFlag(req->requestInfo); - } - - /** - * When using locks, force close of scan directly - */ - if (holdLock && theError.code == 0 && - (m_sent_receivers_count + m_conf_receivers_count + m_api_receivers_count)) - { - TransporterFacade * tp = TransporterFacade::instance(); - NdbApiSignal tSignal(theNdb->theMyRef); - tSignal.setSignal(GSN_SCAN_NEXTREQ); - - Uint32* theData = tSignal.getDataPtrSend(); - Uint64 transId = theNdbCon->theTransactionId; - theData[0] = theNdbCon->theTCConPtr; - theData[1] = 1; - theData[2] = transId; - theData[3] = (Uint32) (transId >> 32); - - tSignal.setLength(4); - int ret = tp->sendSignal(&tSignal, nodeId); - if (ret) - { - setErrorCode(4008); - return -1; - } - checkForceSend(forceSend); - - /** - * If no receiver is outstanding... - * set it to 1 as execCLOSE_SCAN_REP resets it - */ - m_sent_receivers_count = m_sent_receivers_count ? m_sent_receivers_count : 1; - - while(theError.code == 0 && (m_sent_receivers_count + m_conf_receivers_count)) - { - theNdb->theImpl->theWaiter.m_node = nodeId; - theNdb->theImpl->theWaiter.m_state = WAIT_SCAN; - int return_code = theNdb->receiveResponse(WAITFOR_SCAN_TIMEOUT); - switch(return_code){ - case 0: - break; - case -1: - setErrorCode(4008); - case -2: - m_api_receivers_count = 0; - m_conf_receivers_count = 0; - m_sent_receivers_count = 0; - theNdbCon->theReleaseOnClose = true; - return -1; - } - } - return 0; - } - /** * Wait for outstanding */ From 42d549a9dd958bcc3bac16d7f53b50db2d7ebb9f Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Thu, 6 Jul 2006 10:20:49 +0200 Subject: [PATCH 57/74] corrected results --- mysql-test/r/ndb_dd_advance2.result | 5 +++-- mysql-test/t/ndb_dd_advance2.test | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_advance2.result index 00490fbc32b..545a356bbb6 100644 --- a/mysql-test/r/ndb_dd_advance2.result +++ b/mysql-test/r/ndb_dd_advance2.result @@ -41,12 +41,12 @@ t2 CREATE TABLE `t2` ( ) TABLESPACE ts2 STORAGE DISK ENGINE=ndbcluster DEFAULT CHARSET=latin1 INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa'); INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb'); -SELECT * FROM test.t1; +SELECT * FROM test.t1 ORDER BY a1; a1 a2 a3 1 111111 aaaaaaaa 2 222222 bbbbbbbb INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; -SELECT * FROM test.t2; +SELECT * FROM test.t2 ORDER BY a1; a1 a2 a3 1 111111 aaaaaaaa 2 222222 bbbbbbbb @@ -299,6 +299,7 @@ test.fn2(50, 'new BLOB content') new BLOB content DELETE FROM test.t1; DROP FUNCTION test.fn1; +DROP FUNCTION test.fn2; DROP TABLE test.t1; ALTER TABLESPACE ts1 DROP DATAFILE './table_space1/datafile.dat' diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_advance2.test index c10a009c224..cb33d788684 100755 --- a/mysql-test/t/ndb_dd_advance2.test +++ b/mysql-test/t/ndb_dd_advance2.test @@ -58,9 +58,9 @@ DROP TABLE IF EXISTS test.t3; INSERT INTO test.t1 VALUES (1,'111111','aaaaaaaa'); INSERT INTO test.t1 VALUES (2,'222222','bbbbbbbb'); - SELECT * FROM test.t1; + SELECT * FROM test.t1 ORDER BY a1; INSERT INTO test.t2(a1,a2,a3) SELECT * FROM test.t1; - SELECT * FROM test.t2; + SELECT * FROM test.t2 ORDER BY a1; DROP TABLE test.t1, test.t2; @@ -332,6 +332,7 @@ delimiter //; DELETE FROM test.t1; DROP FUNCTION test.fn1; + DROP FUNCTION test.fn2; DROP TABLE test.t1; ALTER TABLESPACE ts1 From ec8771af696df16cf6e22abc914dcd56c683e7ae Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Thu, 6 Jul 2006 15:18:00 +0200 Subject: [PATCH 58/74] backport of ndb DictCache fix - don't invalidate tables that are in state RETRIEVING --- ndb/src/ndbapi/DictCache.cpp | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/ndb/src/ndbapi/DictCache.cpp b/ndb/src/ndbapi/DictCache.cpp index 9b6449e8ec5..66ce6266fb9 100644 --- a/ndb/src/ndbapi/DictCache.cpp +++ b/ndb/src/ndbapi/DictCache.cpp @@ -278,12 +278,15 @@ GlobalDictCache::invalidate_all() if (vers->size()) { TableVersion * ver = & vers->back(); - ver->m_impl->m_status = NdbDictionary::Object::Invalid; - ver->m_status = DROPPED; - if (ver->m_refCount == 0) + if (ver->m_status != RETREIVING) { - delete ver->m_impl; - vers->erase(vers->size() - 1); + ver->m_impl->m_status = NdbDictionary::Object::Invalid; + ver->m_status = DROPPED; + if (ver->m_refCount == 0) + { + delete ver->m_impl; + vers->erase(vers->size() - 1); + } } } curr = m_tableHash.getNext(curr); From 44ed4b8ecb604cf5007658bffb59e338a47e8591 Mon Sep 17 00:00:00 2001 From: "ingo@chilla.local" <> Date: Thu, 6 Jul 2006 15:38:47 +0200 Subject: [PATCH 59/74] After merge fixes. --- mysql-test/r/archive.result | 2 ++ sql/time.cc | 5 +++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/archive.result b/mysql-test/r/archive.result index cacf4aaf304..1dfec8ff713 100644 --- a/mysql-test/r/archive.result +++ b/mysql-test/r/archive.result @@ -13812,6 +13812,8 @@ select * from t1 where i between 2 and 4 and v in ('def','3r4f','lmn'); i v 4 3r4f alter table t1 data directory="$MYSQLTEST_VARDIR/tmp"; +Warnings: +Warning 0 DATA DIRECTORY option ignored select * from t1; i v 1 def diff --git a/sql/time.cc b/sql/time.cc index ae776a32aab..0461f7723c6 100644 --- a/sql/time.cc +++ b/sql/time.cc @@ -749,6 +749,7 @@ void make_truncated_value_warning(THD *thd, const char *str_val, ER_TRUNCATED_WRONG_VALUE, warn_buff); } +/* Daynumber from year 0 to 9999-12-31 */ #define MAX_DAY_NUMBER 3652424L bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval) @@ -804,7 +805,7 @@ bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval) ltime->hour= (uint) (sec/3600); daynr= calc_daynr(ltime->year,ltime->month,1) + days; /* Day number from year 0 to 9999-12-31 */ - if ((ulonglong) daynr >= MAX_DAY_NUMBER) + if ((ulonglong) daynr > MAX_DAY_NUMBER) goto invalid_date; get_date_from_daynr((long) daynr, <ime->year, <ime->month, <ime->day); @@ -815,7 +816,7 @@ bool date_add_interval(TIME *ltime, interval_type int_type, INTERVAL interval) period= (calc_daynr(ltime->year,ltime->month,ltime->day) + sign * (long) interval.day); /* Daynumber from year 0 to 9999-12-31 */ - if ((ulong) period >= MAX_DAY_NUMBER) + if ((ulong) period > MAX_DAY_NUMBER) goto invalid_date; get_date_from_daynr((long) period,<ime->year,<ime->month,<ime->day); break; From deb1884cef699e22580937f2beff84ac0ea9bc2a Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Thu, 6 Jul 2006 15:51:13 +0200 Subject: [PATCH 60/74] corrected result file - missing order by --- mysql-test/r/ndb_dd_advance2.result | 2 +- mysql-test/t/ndb_dd_advance2.test | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/ndb_dd_advance2.result b/mysql-test/r/ndb_dd_advance2.result index 545a356bbb6..c7fcda650e6 100644 --- a/mysql-test/r/ndb_dd_advance2.result +++ b/mysql-test/r/ndb_dd_advance2.result @@ -333,7 +333,7 @@ end if; end// insert into test.t1 (a1) values (1)// insert into test.t1 (a1,a2) values (2, 'ccccccc')// -select * from test.t1// +select * from test.t1 order by a1// a1 a2 a3 1 trg1 works on a2 field trg1 works on a3 field 2 ccccccc trg1 works on a3 field diff --git a/mysql-test/t/ndb_dd_advance2.test b/mysql-test/t/ndb_dd_advance2.test index cb33d788684..7b7a15ef01a 100755 --- a/mysql-test/t/ndb_dd_advance2.test +++ b/mysql-test/t/ndb_dd_advance2.test @@ -375,7 +375,7 @@ delimiter //; end// insert into test.t1 (a1) values (1)// insert into test.t1 (a1,a2) values (2, 'ccccccc')// - select * from test.t1// + select * from test.t1 order by a1// delimiter ;// DELETE FROM test.t1; From 41997194389725e3e665966e936bb3474fb332d2 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Thu, 6 Jul 2006 18:50:44 +0200 Subject: [PATCH 61/74] Bug #20820 auto inc table not handled correctly when restored from cluster backup --- mysql-test/include/ndb_default_cluster.inc | 2 +- mysql-test/r/ndb_default_cluster.require | 2 +- mysql-test/r/ndb_restore.result | 24 +++++++++---- mysql-test/t/ndb_restore.test | 23 +++++++++---- ndb/tools/restore/consumer_restore.cpp | 40 ++++++++++++++++------ 5 files changed, 65 insertions(+), 26 deletions(-) diff --git a/mysql-test/include/ndb_default_cluster.inc b/mysql-test/include/ndb_default_cluster.inc index 2f900b6a0b4..de7eda3c596 100644 --- a/mysql-test/include/ndb_default_cluster.inc +++ b/mysql-test/include/ndb_default_cluster.inc @@ -1,4 +1,4 @@ -- require r/ndb_default_cluster.require disable_query_log; -show status like "Ndb_connected_host"; +show status like "Ndb_config_from_host"; enable_query_log; diff --git a/mysql-test/r/ndb_default_cluster.require b/mysql-test/r/ndb_default_cluster.require index aa4988cdca3..3616ae0f343 100644 --- a/mysql-test/r/ndb_default_cluster.require +++ b/mysql-test/r/ndb_default_cluster.require @@ -1,2 +1,2 @@ Variable_name Value -Ndb_connected_host localhost +Ndb_config_from_host localhost diff --git a/mysql-test/r/ndb_restore.result b/mysql-test/r/ndb_restore.result index c78a4137468..e5bf4315e5c 100644 --- a/mysql-test/r/ndb_restore.result +++ b/mysql-test/r/ndb_restore.result @@ -1,6 +1,6 @@ use test; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; CREATE TABLE `t1` ( `capgoaledatta` smallint(5) unsigned NOT NULL auto_increment, `goaledatta` char(2) NOT NULL default '', @@ -116,6 +116,8 @@ CREATE TABLE `t9` ( PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`) ) ENGINE=myisam DEFAULT CHARSET=latin1; INSERT INTO `t9` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3); +create table t10 (a int auto_increment key); +insert into t10 values (1),(2),(3); create table t1_c engine=ndbcluster as select * from t1; create table t2_c engine=ndbcluster as select * from t2; create table t3_c engine=ndbcluster as select * from t3; @@ -125,10 +127,12 @@ create table t6_c engine=ndbcluster as select * from t6; create table t7_c engine=ndbcluster as select * from t7; create table t8_c engine=ndbcluster as select * from t8; create table t9_c engine=ndbcluster as select * from t9; -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +create table t10_c engine=ndbcluster as select * from t10; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; show tables; Tables_in_test t1 +t10 t2 t3 t4 @@ -137,14 +141,15 @@ t6 t7 t8 t9 -t8_c +t3_c t9_c t1_c +t8_c t7_c t6_c t5_c t4_c -t3_c +t10_c t2_c select count(*) from t1; count(*) @@ -245,6 +250,11 @@ from (select * from t9 union select * from t9_c) a; count(*) 3 -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +select * from t10_c order by a; +a +1 +2 +3 +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9, t10; +drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; 520093696,1 diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test index 049b07d5a8b..39c7ab67efb 100644 --- a/mysql-test/t/ndb_restore.test +++ b/mysql-test/t/ndb_restore.test @@ -4,8 +4,8 @@ --disable_warnings use test; -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; +drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; --enable_warnings CREATE TABLE `t1` ( @@ -132,6 +132,13 @@ CREATE TABLE `t9` ( ) ENGINE=myisam DEFAULT CHARSET=latin1; INSERT INTO `t9` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3); +# Bug #20820 +# auto inc table not handled correctly when restored from cluster backup +# - before fix ndb_restore would not set auto inc value correct, +# seen by select below +create table t10 (a int auto_increment key); +insert into t10 values (1),(2),(3); + create table t1_c engine=ndbcluster as select * from t1; create table t2_c engine=ndbcluster as select * from t2; create table t3_c engine=ndbcluster as select * from t3; @@ -141,10 +148,11 @@ create table t6_c engine=ndbcluster as select * from t6; create table t7_c engine=ndbcluster as select * from t7; create table t8_c engine=ndbcluster as select * from t8; create table t9_c engine=ndbcluster as select * from t9; +create table t10_c engine=ndbcluster as select * from t10; --exec $NDB_MGM --no-defaults -e "start backup" >> $NDB_TOOLS_OUTPUT -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 >> $NDB_TOOLS_OUTPUT --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 >> $NDB_TOOLS_OUTPUT @@ -205,9 +213,12 @@ select count(*) from (select * from t9 union select * from t9_c) a; +# Bug #20820 cont'd +select * from t10_c order by a; + --disable_warnings -drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; -drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9, t10; +drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; --enable_warnings # @@ -216,4 +227,4 @@ drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; --exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults -d sys -D , SYSTAB_0 | grep 520093696 -# End of 4.1 tests +# End of 5.0 tests (4.1 test intermixed to save test time) diff --git a/ndb/tools/restore/consumer_restore.cpp b/ndb/tools/restore/consumer_restore.cpp index bff63c28716..dc1399e73b2 100644 --- a/ndb/tools/restore/consumer_restore.cpp +++ b/ndb/tools/restore/consumer_restore.cpp @@ -145,17 +145,38 @@ BackupRestore::finalize_table(const TableS & table){ bool ret= true; if (!m_restore && !m_restore_meta) return ret; - if (table.have_auto_inc()) + if (!table.have_auto_inc()) + return ret; + + Uint64 max_val= table.get_max_auto_val(); + do { - Uint64 max_val= table.get_max_auto_val(); - Uint64 auto_val; + Uint64 auto_val = ~(Uint64)0; int r= m_ndb->readAutoIncrementValue(get_table(table.m_dictTable), auto_val); - if (r == -1 && m_ndb->getNdbError().code != 626) + if (r == -1 && m_ndb->getNdbError().status == NdbError::TemporaryError) + { + NdbSleep_MilliSleep(50); + continue; // retry + } + else if (r == -1 && m_ndb->getNdbError().code != 626) + { ret= false; - else if (r == -1 || max_val+1 > auto_val) - ret= m_ndb->setAutoIncrementValue(get_table(table.m_dictTable), max_val+1, false) != -1; - } - return ret; + } + else if ((r == -1 && m_ndb->getNdbError().code == 626) || + max_val+1 > auto_val || auto_val == ~(Uint64)0) + { + r= m_ndb->setAutoIncrementValue(get_table(table.m_dictTable), + max_val+1, false); + if (r == -1 && + m_ndb->getNdbError().status == NdbError::TemporaryError) + { + NdbSleep_MilliSleep(50); + continue; // retry + } + ret = (r == 0); + } + return (ret); + } while (1); } bool @@ -217,9 +238,6 @@ BackupRestore::table(const TableS & table){ err << "Unable to find table: " << split[2].c_str() << endl; return false; } - if(m_restore_meta){ - m_ndb->setAutoIncrementValue(tab, ~(Uint64)0, false); - } const NdbDictionary::Table* null = 0; m_new_tables.fill(table.m_dictTable->getTableId(), null); m_new_tables[table.m_dictTable->getTableId()] = tab; From 8f613e9263eecc2a8ccd5bfce468e2427fdcb927 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Thu, 6 Jul 2006 20:04:48 +0200 Subject: [PATCH 62/74] Bug #20820 auto inc table not handled correctly when restored from cluster backup --- mysql-test/r/ndb_restore.result | 24 ++++++++++++++---------- mysql-test/t/ndb_dd_backuprestore.test | 2 ++ mysql-test/t/ndb_restore.test | 11 ++++++++--- mysql-test/t/rpl_ndb_dd_advance.test | 2 ++ mysql-test/t/rpl_ndb_sync.test | 2 ++ 5 files changed, 28 insertions(+), 13 deletions(-) diff --git a/mysql-test/r/ndb_restore.result b/mysql-test/r/ndb_restore.result index acc898949a6..7a84ddd6ef0 100644 --- a/mysql-test/r/ndb_restore.result +++ b/mysql-test/r/ndb_restore.result @@ -116,7 +116,7 @@ CREATE TABLE `t9_c` ( PRIMARY KEY (`kattjame`,`hunderaaarbagefa`,`hassetistart`,`hassetino`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1; INSERT INTO `t9_c` VALUES ('3g4jh8gar2t','joe','q3.net','elredun.com','q3.net','436643316120','436643316939','91341234568968','695595699','1.1.1.1','2.2.6.2','3','86989','34','x','x','2012-03-12 18:35:04','2012-12-05 12:35:04',3123123,9569,6565,1),('4tt45345235','pap','q3plus.qt','q3plus.qt','q3.net','436643316120','436643316939','8956234534568968','5254595969','1.1.1.1','8.6.2.2','4','86989','34','x','x','2012-03-12 12:55:34','2012-12-05 11:20:04',3223433,3369,9565,2),('4545435545','john','q3.net','q3.net','acne.li','436643316120','436643316939','45345234568968','995696699','1.1.1.1','2.9.9.2','2','86998','34','x','x','2012-03-12 11:35:03','2012-12-05 08:50:04',8823123,169,3565,3); -CREATE TABLE `t10_c` (a INT AUTO_INCREMENT KEY) ENGINE=ndbcluster DEFAULT CHARSET=latin1; +CREATE TABLE t10_c (a INT AUTO_INCREMENT KEY) ENGINE=ndbcluster DEFAULT CHARSET=latin1; INSERT INTO t10_c VALUES (1),(2),(3); create table t1 engine=myisam as select * from t1_c; create table t2 engine=myisam as select * from t2_c; @@ -127,7 +127,7 @@ create table t6 engine=myisam as select * from t6_c; create table t7 engine=myisam as select * from t7_c; create table t8 engine=myisam as select * from t8_c; create table t9 engine=myisam as select * from t9_c; -create table t10 engine=ndbcluster as select * from t10_c; +create table t10 engine=myisam as select * from t10_c; CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; DELETE FROM test.backup_info; LOAD DATA INFILE '../tmp.dat' INTO TABLE test.backup_info FIELDS TERMINATED BY ','; @@ -235,20 +235,30 @@ from (select * from t9 union select * from t9_c) a; count(*) 3 +select * from t10_c order by a; +a +1 +2 +3 ALTER TABLE t1_c PARTITION BY RANGE (`capgoaledatta`) (PARTITION p0 VALUES LESS THAN MAXVALUE); +ERROR HY000: Can't create table 'test.#sql-2a5b_3' (errno: 138) ALTER TABLE t2_c PARTITION BY LIST(`capgotod`) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6)); +ERROR HY000: Can't create table 'test.#sql-2a5b_3' (errno: 138) ALTER TABLE t3_c PARTITION BY HASH (`CapGoaledatta`); +ERROR HY000: Can't create table 'test.#sql-2a5b_3' (errno: 138) ALTER TABLE t5_c PARTITION BY HASH (`capfa`) PARTITIONS 4; +ERROR HY000: Can't create table 'test.#sql-2a5b_3' (errno: 138) ALTER TABLE t6_c PARTITION BY LINEAR HASH (`relatta`) PARTITIONS 4; +ERROR HY000: Can't create table 'test.#sql-2a5b_3' (errno: 138) ALTER TABLE t7_c PARTITION BY LINEAR KEY (`dardtestard`); CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; @@ -258,7 +268,7 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info; @the_backup_id:=backup_id DROP TABLE test.backup_info; -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; select count(*) from t1; count(*) 5 @@ -358,7 +368,7 @@ from (select * from t9 union select * from t9_c) a; count(*) 3 -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; select count(*) from t1; count(*) 5 @@ -458,11 +468,6 @@ from (select * from t9 union select * from t9_c) a; count(*) 3 -select * from t10_c order by a; -a -1 -2 -3 drop table t1_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; DELETE FROM test.backup_info; @@ -471,7 +476,6 @@ SELECT @the_backup_id:=backup_id FROM test.backup_info; @the_backup_id:=backup_id DROP TABLE test.backup_info; -Create table test/def/t2_c failed: Translate frm error drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9,t10; drop table if exists t2_c; 520093696, diff --git a/mysql-test/t/ndb_dd_backuprestore.test b/mysql-test/t/ndb_dd_backuprestore.test index 0dc6c2ae206..48db8ec3e0b 100644 --- a/mysql-test/t/ndb_dd_backuprestore.test +++ b/mysql-test/t/ndb_dd_backuprestore.test @@ -5,6 +5,8 @@ ######################################## -- source include/have_ndb.inc +-- source include/ndb_default_cluster.inc +-- source include/not_embedded.inc --disable_query_log set new=on; diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test index 42efedb898a..c465a3d7b92 100644 --- a/mysql-test/t/ndb_restore.test +++ b/mysql-test/t/ndb_restore.test @@ -152,7 +152,7 @@ create table t10 engine=myisam as select * from t10_c; --source include/ndb_backup.inc -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c, t10_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT @@ -219,21 +219,26 @@ select * from t10_c order by a; # # Try Partitioned tables as well # +--error 1005 ALTER TABLE t1_c PARTITION BY RANGE (`capgoaledatta`) (PARTITION p0 VALUES LESS THAN MAXVALUE); +--error 1005 ALTER TABLE t2_c PARTITION BY LIST(`capgotod`) (PARTITION p0 VALUES IN (0,1,2,3,4,5,6)); +--error 1005 ALTER TABLE t3_c PARTITION BY HASH (`CapGoaledatta`); +--error 1005 ALTER TABLE t5_c PARTITION BY HASH (`capfa`) PARTITIONS 4; +--error 1005 ALTER TABLE t6_c PARTITION BY LINEAR HASH (`relatta`) PARTITIONS 4; @@ -242,7 +247,7 @@ ALTER TABLE t7_c PARTITION BY LINEAR KEY (`dardtestard`); --source include/ndb_backup.inc -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT @@ -300,7 +305,7 @@ select count(*) from (select * from t9 union select * from t9_c) a; -drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c,t10_c; --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 1 -m -r --ndb-nodegroup_map '(0,0)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT --exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b $the_backup_id -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-$the_backup_id >> $NDB_TOOLS_OUTPUT diff --git a/mysql-test/t/rpl_ndb_dd_advance.test b/mysql-test/t/rpl_ndb_dd_advance.test index 80ff533ec5b..30d5deb47ad 100644 --- a/mysql-test/t/rpl_ndb_dd_advance.test +++ b/mysql-test/t/rpl_ndb_dd_advance.test @@ -7,6 +7,8 @@ #### Include Section #### --source include/have_ndb.inc --source include/have_binlog_format_row.inc +--source include/ndb_default_cluster.inc +--source include/not_embedded.inc #--source include/have_ndb_extra.inc --source include/master-slave.inc diff --git a/mysql-test/t/rpl_ndb_sync.test b/mysql-test/t/rpl_ndb_sync.test index 95f56609ed7..20d4f5707f8 100644 --- a/mysql-test/t/rpl_ndb_sync.test +++ b/mysql-test/t/rpl_ndb_sync.test @@ -1,4 +1,6 @@ --source include/have_ndb.inc +--source include/ndb_default_cluster.inc +--source include/not_embedded.inc --source include/have_binlog_format_row.inc --source include/master-slave.inc From 8ef49706505723af6b0d77fe4f10d4826a27cc20 Mon Sep 17 00:00:00 2001 From: "ingo@chilla.local" <> Date: Thu, 6 Jul 2006 20:12:33 +0200 Subject: [PATCH 63/74] After merge fix. --- sql/table.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/table.cc b/sql/table.cc index a96ca0da881..8bee8bf1598 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1456,7 +1456,7 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, Create a new field for the key part that matches the index */ field= key_part->field=field->new_field(&outparam->mem_root, - outparam); + outparam, 0); field->field_length= key_part->length; } } From 5ea51287d69e54698246836d9286374187262e29 Mon Sep 17 00:00:00 2001 From: "knielsen@ymer.(none)" <> Date: Thu, 6 Jul 2006 23:49:09 +0200 Subject: [PATCH 64/74] BUG#19951: Race conditions in test wait_timeout. Fix random failures in test 'wait_timeout' that depend on exact timing. 1. Force a reconnect initially if necessary, as otherwise slow startup might have caused a connection timeout before the test can even start. 2. Explicitly disconnect the first connection to remove confusion about which connection aborts from timeout, causing test failure. --- mysql-test/r/wait_timeout.result | 4 ++++ mysql-test/t/wait_timeout.test | 12 +++++++++--- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/wait_timeout.result b/mysql-test/r/wait_timeout.result index 683986abf5d..b865a17454d 100644 --- a/mysql-test/r/wait_timeout.result +++ b/mysql-test/r/wait_timeout.result @@ -1,3 +1,7 @@ +select 0; +0 +0 +flush status; select 1; 1 1 diff --git a/mysql-test/t/wait_timeout.test b/mysql-test/t/wait_timeout.test index 8387c08c902..195d1a5d3f2 100644 --- a/mysql-test/t/wait_timeout.test +++ b/mysql-test/t/wait_timeout.test @@ -9,16 +9,20 @@ # Connect with another connection and reset counters --disable_query_log connect (wait_con,localhost,root,,test,,); -flush status; # Reset counters connection wait_con; set session wait_timeout=100; let $retries=300; -let $aborted_clients = `SHOW STATUS LIKE 'aborted_clients'`; set @aborted_clients= 0; --enable_query_log # Disable reconnect and do the query connection default; +# If slow host (Valgrind...), we may have already timed out here. +# So force a reconnect if necessary, using a dummy query. And issue a +# 'flush status' to reset the 'aborted_clients' counter. +--enable_reconnect +select 0; +flush status; --disable_reconnect select 1; @@ -46,6 +50,9 @@ connection default; select 2; --enable_reconnect select 3; +# Disconnect so that we will not be confused by a future abort from this +# connection. +disconnect default # # Do the same test as above on a TCP connection @@ -56,7 +63,6 @@ select 3; connection wait_con; flush status; # Reset counters let $retries=300; -let $aborted_clients = `SHOW STATUS LIKE 'aborted_clients'`; set @aborted_clients= 0; --enable_query_log From 5e3861cb95b08851aa9a580978e1227fa66d2a8e Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Fri, 7 Jul 2006 00:18:01 +0200 Subject: [PATCH 65/74] removed alter table partition error test since it cannot be made predictable... name will change of created file --- mysql-test/r/ndb_restore.result | 19 ------------------- mysql-test/t/ndb_restore.test | 24 ------------------------ 2 files changed, 43 deletions(-) diff --git a/mysql-test/r/ndb_restore.result b/mysql-test/r/ndb_restore.result index 7a84ddd6ef0..b946d97bea1 100644 --- a/mysql-test/r/ndb_restore.result +++ b/mysql-test/r/ndb_restore.result @@ -240,25 +240,6 @@ a 1 2 3 -ALTER TABLE t1_c -PARTITION BY RANGE (`capgoaledatta`) -(PARTITION p0 VALUES LESS THAN MAXVALUE); -ERROR HY000: Can't create table 'test.#sql-2a5b_3' (errno: 138) -ALTER TABLE t2_c -PARTITION BY LIST(`capgotod`) -(PARTITION p0 VALUES IN (0,1,2,3,4,5,6)); -ERROR HY000: Can't create table 'test.#sql-2a5b_3' (errno: 138) -ALTER TABLE t3_c -PARTITION BY HASH (`CapGoaledatta`); -ERROR HY000: Can't create table 'test.#sql-2a5b_3' (errno: 138) -ALTER TABLE t5_c -PARTITION BY HASH (`capfa`) -PARTITIONS 4; -ERROR HY000: Can't create table 'test.#sql-2a5b_3' (errno: 138) -ALTER TABLE t6_c -PARTITION BY LINEAR HASH (`relatta`) -PARTITIONS 4; -ERROR HY000: Can't create table 'test.#sql-2a5b_3' (errno: 138) ALTER TABLE t7_c PARTITION BY LINEAR KEY (`dardtestard`); CREATE TEMPORARY TABLE IF NOT EXISTS test.backup_info (id INT, backup_id INT) ENGINE = HEAP; diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test index c465a3d7b92..9030dfbe304 100644 --- a/mysql-test/t/ndb_restore.test +++ b/mysql-test/t/ndb_restore.test @@ -219,30 +219,6 @@ select * from t10_c order by a; # # Try Partitioned tables as well # ---error 1005 -ALTER TABLE t1_c -PARTITION BY RANGE (`capgoaledatta`) -(PARTITION p0 VALUES LESS THAN MAXVALUE); - ---error 1005 -ALTER TABLE t2_c -PARTITION BY LIST(`capgotod`) -(PARTITION p0 VALUES IN (0,1,2,3,4,5,6)); - ---error 1005 -ALTER TABLE t3_c -PARTITION BY HASH (`CapGoaledatta`); - ---error 1005 -ALTER TABLE t5_c -PARTITION BY HASH (`capfa`) -PARTITIONS 4; - ---error 1005 -ALTER TABLE t6_c -PARTITION BY LINEAR HASH (`relatta`) -PARTITIONS 4; - ALTER TABLE t7_c PARTITION BY LINEAR KEY (`dardtestard`); From 343a5244dc0d5d62023eabc26b060d9c52efb762 Mon Sep 17 00:00:00 2001 From: "konstantin@bodhi.netgear" <> Date: Fri, 7 Jul 2006 03:07:45 +0400 Subject: [PATCH 66/74] After merge fixes. --- sql/event_timed.cc | 23 ++++-------- sql/events.cc | 12 +++---- sql/sp_head.cc | 36 ++++++++++++------- sql/sql_parse.cc | 89 ++++++++++++++++------------------------------ 4 files changed, 66 insertions(+), 94 deletions(-) diff --git a/sql/event_timed.cc b/sql/event_timed.cc index 4ec875f32a3..98369e0e055 100644 --- a/sql/event_timed.cc +++ b/sql/event_timed.cc @@ -143,24 +143,13 @@ Event_timed::init_name(THD *thd, sp_name *spn) MEM_ROOT *root= thd->mem_root; /* We have to copy strings to get them into the right memroot */ - if (spn) - { - dbname.length= spn->m_db.length; - if (spn->m_db.length == 0) - dbname.str= NULL; - else - dbname.str= strmake_root(root, spn->m_db.str, spn->m_db.length); - name.length= spn->m_name.length; - name.str= strmake_root(root, spn->m_name.str, spn->m_name.length); + dbname.length= spn->m_db.length; + dbname.str= strmake_root(root, spn->m_db.str, spn->m_db.length); + name.length= spn->m_name.length; + name.str= strmake_root(root, spn->m_name.str, spn->m_name.length); - if (spn->m_qname.length == 0) - spn->init_qname(thd); - } - else if (thd->db) - { - dbname.length= thd->db_length; - dbname.str= strmake_root(root, thd->db, dbname.length); - } + if (spn->m_qname.length == 0) + spn->init_qname(thd); DBUG_PRINT("dbname", ("len=%d db=%s",dbname.length, dbname.str)); DBUG_PRINT("name", ("len=%d name=%s",name.length, name.str)); diff --git a/sql/events.cc b/sql/events.cc index d67c42326e3..4a2c7338d7c 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -598,8 +598,9 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not, int ret= 0; CHARSET_INFO *scs= system_charset_info; TABLE *table; - char olddb[128]; - bool dbchanged= false; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; + bool dbchanged; DBUG_ENTER("db_create_event"); DBUG_PRINT("enter", ("name: %.*s", et->name.length, et->name.str)); @@ -626,8 +627,7 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not, } DBUG_PRINT("info", ("non-existant, go forward")); - if ((ret= sp_use_new_db(thd, et->dbname.str,olddb, sizeof(olddb),0, - &dbchanged))) + if ((ret= sp_use_new_db(thd, et->dbname, &old_db, 0, &dbchanged))) { my_error(ER_BAD_DB_ERROR, MYF(0)); goto err; @@ -691,14 +691,14 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not, *rows_affected= 1; ok: if (dbchanged) - (void) mysql_change_db(thd, olddb, 1); + (void) mysql_change_db(thd, old_db.str, 1); if (table) close_thread_tables(thd); DBUG_RETURN(EVEX_OK); err: if (dbchanged) - (void) mysql_change_db(thd, olddb, 1); + (void) mysql_change_db(thd, old_db.str, 1); if (table) close_thread_tables(thd); DBUG_RETURN(EVEX_GENERAL_ERROR); diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 1ebd645d9f9..81f5d502ec9 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -495,20 +495,32 @@ sp_head::init_strings(THD *thd, LEX *lex, sp_name *name) /* During parsing, we must use thd->mem_root */ MEM_ROOT *root= thd->mem_root; - DBUG_ASSERT(name); - /* Must be initialized in the parser */ - DBUG_ASSERT(name->m_db.str && name->m_db.length); + if (name) + { + /* Must be initialized in the parser */ + DBUG_ASSERT(name->m_db.str && name->m_db.length); - /* We have to copy strings to get them into the right memroot */ - m_db.length= name->m_db.length; - m_db.str= strmake_root(root, name->m_db.str, name->m_db.length); - m_name.length= name->m_name.length; - m_name.str= strmake_root(root, name->m_name.str, name->m_name.length); + /* We have to copy strings to get them into the right memroot */ + m_db.length= name->m_db.length; + m_db.str= strmake_root(root, name->m_db.str, name->m_db.length); + m_name.length= name->m_name.length; + m_name.str= strmake_root(root, name->m_name.str, name->m_name.length); - if (name->m_qname.length == 0) - name->init_qname(thd); - m_qname.length= name->m_qname.length; - m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length); + if (name->m_qname.length == 0) + name->init_qname(thd); + m_qname.length= name->m_qname.length; + m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length); + } + else + { + /* + FIXME: the only use case when name is NULL is events, and it should + be rewritten soon. Remove the else part and replace 'if' with + an assert when this is done. + */ + LEX_STRING str_reset= { NULL, 0 }; + m_db= m_name= m_qname= str_reset; + } if (m_param_begin && m_param_end) { diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 7201648dbfc..26e6a66b9b4 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -66,7 +66,6 @@ static void time_out_user_resource_limits(THD *thd, USER_CONN *uc); static int check_for_max_user_connections(THD *thd, USER_CONN *uc); #endif static void decrease_user_connections(USER_CONN *uc); -static bool check_db_used(THD *thd,TABLE_LIST *tables); static bool check_multi_update_lock(THD *thd); static void remove_escape(char *name); static bool execute_sqlcom_select(THD *thd, TABLE_LIST *all_tables); @@ -1362,7 +1361,21 @@ end: } - /* This works because items are allocated with sql_alloc() */ +/* This works because items are allocated with sql_alloc() */ + +void free_items(Item *item) +{ + Item *next; + DBUG_ENTER("free_items"); + for (; item ; item=next) + { + next=item->next; + item->delete_self(); + } + DBUG_VOID_RETURN; +} + +/* This works because items are allocated with sql_alloc() */ void cleanup_items(Item *item) { @@ -2719,8 +2732,7 @@ mysql_execute_command(THD *thd) case SQLCOM_BACKUP_TABLE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL, all_tables, 0) || + if (check_table_access(thd, SELECT_ACL, all_tables, 0) || check_global_access(thd, FILE_ACL)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; @@ -2732,8 +2744,7 @@ mysql_execute_command(THD *thd) case SQLCOM_RESTORE_TABLE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, INSERT_ACL, all_tables, 0) || + if (check_table_access(thd, INSERT_ACL, all_tables, 0) || check_global_access(thd, FILE_ACL)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; @@ -2745,8 +2756,7 @@ mysql_execute_command(THD *thd) case SQLCOM_ASSIGN_TO_KEYCACHE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_access(thd, INDEX_ACL, first_table->db, + if (check_access(thd, INDEX_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table))) goto error; @@ -2756,8 +2766,7 @@ mysql_execute_command(THD *thd) case SQLCOM_PRELOAD_KEYS: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_access(thd, INDEX_ACL, first_table->db, + if (check_access(thd, INDEX_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table))) goto error; @@ -3131,8 +3140,6 @@ end_with_restore_list: { DBUG_ASSERT(first_table == all_tables && first_table != 0); TABLE_LIST *table; - if (check_db_used(thd, all_tables)) - goto error; for (table= first_table; table; table= table->next_local->next_local) { if (check_access(thd, ALTER_ACL | DROP_ACL, table->db, @@ -3189,8 +3196,7 @@ end_with_restore_list: if (lex->only_view) first_table->skip_temporary= 1; - if (check_db_used(thd, all_tables) || - check_access(thd, SELECT_ACL | EXTRA_ACL, first_table->db, + if (check_access(thd, SELECT_ACL | EXTRA_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table))) goto error; @@ -3203,8 +3209,7 @@ end_with_restore_list: case SQLCOM_CHECKSUM: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables, 0)) + if (check_table_access(thd, SELECT_ACL | EXTRA_ACL, all_tables, 0)) goto error; /* purecov: inspected */ res = mysql_checksum_table(thd, first_table, &lex->check_opt); break; @@ -3212,8 +3217,7 @@ end_with_restore_list: case SQLCOM_REPAIR: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) + if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; res= mysql_repair_table(thd, first_table, &lex->check_opt); @@ -3234,8 +3238,7 @@ end_with_restore_list: case SQLCOM_CHECK: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables, 0)) + if (check_table_access(thd, SELECT_ACL | EXTRA_ACL , all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; res = mysql_check_table(thd, first_table, &lex->check_opt); @@ -3246,8 +3249,7 @@ end_with_restore_list: case SQLCOM_ANALYZE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) + if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; res= mysql_analyze_table(thd, first_table, &lex->check_opt); @@ -3269,8 +3271,7 @@ end_with_restore_list: case SQLCOM_OPTIMIZE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) + if (check_table_access(thd, SELECT_ACL | INSERT_ACL, all_tables, 0)) goto error; /* purecov: inspected */ thd->enable_slow_log= opt_log_slow_admin_statements; res= (specialflag & (SPECIAL_SAFE_MODE | SPECIAL_NO_NEW_FUNC)) ? @@ -3690,7 +3691,7 @@ end_with_restore_list: break; case SQLCOM_LOCK_TABLES: unlock_locked_tables(thd); - if (check_db_used(thd, all_tables) || end_active_trans(thd)) + if (end_active_trans(thd)) goto error; if (check_table_access(thd, LOCK_TABLES_ACL | SELECT_ACL, all_tables, 0)) goto error; @@ -4167,7 +4168,7 @@ end_with_restore_list: case SQLCOM_FLUSH: { bool write_to_binlog; - if (check_global_access(thd,RELOAD_ACL) || check_db_used(thd, all_tables)) + if (check_global_access(thd,RELOAD_ACL)) goto error; /* reload_acl_and_cache() will tell us if we are allowed to write to the @@ -4216,15 +4217,12 @@ end_with_restore_list: #endif case SQLCOM_HA_OPEN: DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables) || - check_table_access(thd, SELECT_ACL, all_tables, 0)) + if (check_table_access(thd, SELECT_ACL, all_tables, 0)) goto error; res= mysql_ha_open(thd, first_table, 0); break; case SQLCOM_HA_CLOSE: DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (check_db_used(thd, all_tables)) - goto error; res= mysql_ha_close(thd, first_table); break; case SQLCOM_HA_READ: @@ -4234,8 +4232,6 @@ end_with_restore_list: if a user has no permissions to read a table, he won't be able to open it (with SQLCOM_HA_OPEN) in the first place. */ - if (check_db_used(thd, all_tables)) - goto error; unit->set_limit(select_lex); res= mysql_ha_read(thd, first_table, lex->ha_read_mode, lex->ident.str, lex->insert_list, lex->ha_rkey_mode, select_lex->where, @@ -5734,27 +5730,6 @@ bool check_merge_table_access(THD *thd, char *db, } -static bool check_db_used(THD *thd,TABLE_LIST *tables) -{ - char *current_db= NULL; - for (; tables; tables= tables->next_global) - { - if (tables->db == NULL) - { - /* - This code never works and should be removed in 5.1. All tables - that are added to the list of tables should already have its - database field initialized properly (see st_lex::add_table_to_list). - */ - DBUG_ASSERT(0); - if (thd->copy_db_to(¤t_db, 0)) - return TRUE; - tables->db= current_db; - } - } - return FALSE; -} - /**************************************************************************** Check stack size; Send error if there isn't enough stack to continue ****************************************************************************/ @@ -7450,8 +7425,7 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables) /* sql_yacc guarantees that tables and aux_tables are not zero */ DBUG_ASSERT(aux_tables != 0); - if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) || - check_table_access(thd, SELECT_ACL, tables, 0)) + if (check_table_access(thd, SELECT_ACL, tables, 0)) DBUG_RETURN(TRUE); /* @@ -7551,8 +7525,7 @@ bool update_precheck(THD *thd, TABLE_LIST *tables) my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); DBUG_RETURN(TRUE); } - DBUG_RETURN(check_db_used(thd, tables) || - check_one_table_access(thd, UPDATE_ACL, tables)); + DBUG_RETURN(check_one_table_access(thd, UPDATE_ACL, tables)); } @@ -7614,8 +7587,6 @@ bool insert_precheck(THD *thd, TABLE_LIST *tables) my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); DBUG_RETURN(TRUE); } - if (check_db_used(thd, tables)) - DBUG_RETURN(TRUE); DBUG_RETURN(FALSE); } From 35b6ad79687591db74b870cec9aad99969201fca Mon Sep 17 00:00:00 2001 From: "konstantin/kostja@bodhi.netgear" <> Date: Fri, 7 Jul 2006 13:49:43 +0400 Subject: [PATCH 67/74] Another post-merge fix. --- sql/events.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/events.cc b/sql/events.cc index 4a2c7338d7c..210cc2c4735 100644 --- a/sql/events.cc +++ b/sql/events.cc @@ -600,7 +600,7 @@ db_create_event(THD *thd, Event_timed *et, my_bool create_if_not, TABLE *table; char old_db_buf[NAME_LEN+1]; LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; - bool dbchanged; + bool dbchanged= FALSE; DBUG_ENTER("db_create_event"); DBUG_PRINT("enter", ("name: %.*s", et->name.length, et->name.str)); From 377e0862cac098b65291fccdb335a5748eb0cb55 Mon Sep 17 00:00:00 2001 From: "konstantin/kostja@bodhi.local" <> Date: Fri, 7 Jul 2006 16:14:07 +0400 Subject: [PATCH 68/74] Fixes for Windows compilation failures. --- server-tools/instance-manager/parse.h | 2 +- sql/log_event.cc | 2 +- sql/rpl_filter.cc | 2 +- sql/rpl_filter.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/server-tools/instance-manager/parse.h b/server-tools/instance-manager/parse.h index ae29c7eb64a..fd970f54d29 100644 --- a/server-tools/instance-manager/parse.h +++ b/server-tools/instance-manager/parse.h @@ -69,7 +69,7 @@ private: inline char *Named_value::alloc_str(const LEX_STRING *str) { - return my_strndup((const byte *) str->str, str->length, MYF(0)); + return my_strndup(str->str, str->length, MYF(0)); } inline char *Named_value::alloc_str(const char *str) diff --git a/sql/log_event.cc b/sql/log_event.cc index 7a4f22a4064..823fad1e8e2 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -5839,7 +5839,7 @@ int Table_map_log_event::exec_event(st_relay_log_info *rli) if (memory == NULL) DBUG_RETURN(HA_ERR_OUT_OF_MEM); - uint32 dummy_len; + uint dummy_len; bzero(table_list, sizeof(*table_list)); table_list->db = db_mem; table_list->alias= table_list->table_name = tname_mem; diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc index 143cd027b5f..c01b5189887 100644 --- a/sql/rpl_filter.cc +++ b/sql/rpl_filter.cc @@ -513,7 +513,7 @@ Rpl_filter::get_wild_ignore_table(String* str) const char* -Rpl_filter::get_rewrite_db(const char* db, uint32 *new_len) +Rpl_filter::get_rewrite_db(const char* db, uint *new_len) { if (rewrite_db.is_empty() || !db) return db; diff --git a/sql/rpl_filter.h b/sql/rpl_filter.h index 58d2b97c9c6..718fd401c56 100644 --- a/sql/rpl_filter.h +++ b/sql/rpl_filter.h @@ -70,7 +70,7 @@ public: void get_wild_do_table(String* str); void get_wild_ignore_table(String* str); - const char* get_rewrite_db(const char* db, uint32 *new_len); + const char* get_rewrite_db(const char* db, uint *new_len); I_List* get_do_db(); I_List* get_ignore_db(); From 438d8cea7af89c9b003cb08a0464f7a017d6faa1 Mon Sep 17 00:00:00 2001 From: "knielsen@ymer.(none)" <> Date: Fri, 7 Jul 2006 17:20:49 +0200 Subject: [PATCH 69/74] BUG#20902: Test failure in ndb_alter_table3 with statement-based binlogging Disable test cases ndb_alter_table3, ndb_autodiscover3, ndb_dd_advance, and ndb_multi in statement-based binlogging mode until bug can be fixed. --- mysql-test/t/ndb_alter_table3.test | 3 +++ mysql-test/t/ndb_autodiscover3.test | 3 +++ mysql-test/t/ndb_dd_advance.test | 3 +++ mysql-test/t/ndb_multi.test | 3 +++ 4 files changed, 12 insertions(+) diff --git a/mysql-test/t/ndb_alter_table3.test b/mysql-test/t/ndb_alter_table3.test index a5fe613adcf..b0295834563 100644 --- a/mysql-test/t/ndb_alter_table3.test +++ b/mysql-test/t/ndb_alter_table3.test @@ -2,6 +2,9 @@ -- source include/have_multi_ndb.inc -- source include/not_embedded.inc +# BUG#20902: Test fails in statement-based binlogging mode. Remove when fixed. +-- source include/have_binlog_format_row.inc + --disable_warnings DROP TABLE IF EXISTS t1; --enable_warnings diff --git a/mysql-test/t/ndb_autodiscover3.test b/mysql-test/t/ndb_autodiscover3.test index ed75c89cdd1..5c8a95f302b 100644 --- a/mysql-test/t/ndb_autodiscover3.test +++ b/mysql-test/t/ndb_autodiscover3.test @@ -2,6 +2,9 @@ -- source include/have_multi_ndb.inc -- source include/not_embedded.inc +# BUG#20902: Test fails in statement-based binlogging mode. Remove when fixed. +-- source include/have_binlog_format_row.inc + --disable_warnings drop table if exists t1, t2; diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test index e882ec794c1..51d01c139f0 100755 --- a/mysql-test/t/ndb_dd_advance.test +++ b/mysql-test/t/ndb_dd_advance.test @@ -28,6 +28,9 @@ -- source include/have_ndb.inc -- source include/not_embedded.inc +# BUG#20902: Test fails in statement-based binlogging mode. Remove when fixed. +-- source include/have_binlog_format_row.inc + --disable_warnings DROP TABLE IF EXISTS test.t1; DROP TABLE IF EXISTS test.t2; diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test index d2dc0561955..5b04612bbc5 100644 --- a/mysql-test/t/ndb_multi.test +++ b/mysql-test/t/ndb_multi.test @@ -3,6 +3,9 @@ -- source include/not_embedded.inc -- source include/have_binlog_format_statement.inc +# BUG#20902: Test fails in statement-based binlogging mode. Remove when fixed. +-- source include/have_binlog_format_row.inc + --disable_warnings connection server2; From 5ab4800fc265a7d3526dd00ef6201af2ca7ffec9 Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Fri, 7 Jul 2006 17:50:45 +0200 Subject: [PATCH 70/74] Bug #20897 race condition between drop table and suma drop trigger - keep accounting for aoutstanding drop trigger requests - also lock table object in suma while doing that (should be impossible right now though since dict serializes all requests) --- storage/ndb/src/kernel/blocks/suma/Suma.cpp | 53 ++++++++++++++------- storage/ndb/src/kernel/blocks/suma/Suma.hpp | 3 +- storage/ndb/src/ndbapi/ndberror.c | 2 + 3 files changed, 41 insertions(+), 17 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index bb42c8874c5..2b746fdbdd8 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -1445,12 +1445,13 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr) tabPtr.p->m_error = 0; tabPtr.p->m_schemaVersion = RNIL; tabPtr.p->m_state = Table::DEFINING; - tabPtr.p->m_hasTriggerDefined[0] = 0; - tabPtr.p->m_hasTriggerDefined[1] = 0; - tabPtr.p->m_hasTriggerDefined[2] = 0; - tabPtr.p->m_triggerIds[0] = ILLEGAL_TRIGGER_ID; - tabPtr.p->m_triggerIds[1] = ILLEGAL_TRIGGER_ID; - tabPtr.p->m_triggerIds[2] = ILLEGAL_TRIGGER_ID; + tabPtr.p->m_drop_subbPtr.p = 0; + for (int j= 0; j < 3; j++) + { + tabPtr.p->m_hasTriggerDefined[j] = 0; + tabPtr.p->m_hasOutstandingTriggerReq[j] = 0; + tabPtr.p->m_triggerIds[j] = ILLEGAL_TRIGGER_ID; + } c_tables.add(tabPtr); @@ -2491,6 +2492,13 @@ Suma::execSUB_STOP_REQ(Signal* signal){ DBUG_VOID_RETURN; } + if (tabPtr.p->m_drop_subbPtr.p != 0) { + jam(); + DBUG_PRINT("error", ("table locked")); + sendSubStopRef(signal, 1420); + DBUG_VOID_RETURN; + } + DBUG_PRINT("info",("subscription: %u tableId: %u[i=%u] id: %u key: %u", subPtr.i, subPtr.p->m_tableId, tabPtr.i, subPtr.p->m_subscriptionId,subPtr.p->m_subscriptionKey)); @@ -2543,7 +2551,7 @@ Suma::execSUB_STOP_REQ(Signal* signal){ subPtr.p->m_senderRef = senderRef; // store ref to requestor subPtr.p->m_senderData = senderData; // store ref to requestor - tabPtr.p->m_drop_subbPtr= subbPtr; + tabPtr.p->m_drop_subbPtr = subbPtr; if (subPtr.p->m_state == Subscription::DEFINED) { @@ -2560,6 +2568,7 @@ Suma::execSUB_STOP_REQ(Signal* signal){ tabPtr.p->m_tableId, tabPtr.p->n_subscribers)); tabPtr.p->checkRelease(*this); sendSubStopComplete(signal, tabPtr.p->m_drop_subbPtr); + tabPtr.p->m_drop_subbPtr.p = 0; } else { @@ -2894,6 +2903,9 @@ Suma::Table::dropTrigger(Signal* signal,Suma& suma) jam(); DBUG_ENTER("Suma::dropTrigger"); + m_hasOutstandingTriggerReq[0] = + m_hasOutstandingTriggerReq[1] = + m_hasOutstandingTriggerReq[2] = 1; for(Uint32 j = 0; j<3; j++){ jam(); suma.suma_ndbrequire(m_triggerIds[j] != ILLEGAL_TRIGGER_ID); @@ -2972,14 +2984,18 @@ Suma::Table::runDropTrigger(Signal* signal, suma.suma_ndbrequire(type < 3); suma.suma_ndbrequire(m_triggerIds[type] == triggerId); + suma.suma_ndbrequire(m_hasTriggerDefined[type] > 0); + suma.suma_ndbrequire(m_hasOutstandingTriggerReq[type] == 1); m_hasTriggerDefined[type]--; + m_hasOutstandingTriggerReq[type] = 0; if (m_hasTriggerDefined[type] == 0) { jam(); m_triggerIds[type] = ILLEGAL_TRIGGER_ID; } - if( m_hasTriggerDefined[0] != m_hasTriggerDefined[1] || - m_hasTriggerDefined[0] != m_hasTriggerDefined[2]) + if( m_hasOutstandingTriggerReq[0] || + m_hasOutstandingTriggerReq[1] || + m_hasOutstandingTriggerReq[2]) { // more to come jam(); @@ -2997,6 +3013,7 @@ Suma::Table::runDropTrigger(Signal* signal, checkRelease(suma); suma.sendSubStopComplete(signal, m_drop_subbPtr); + m_drop_subbPtr.p = 0; } void Suma::suma_ndbrequire(bool v) { ndbrequire(v); } @@ -3551,13 +3568,17 @@ Suma::execDROP_TAB_CONF(Signal *signal) DBUG_PRINT("info",("drop table id: %d[i=%u]", tableId, tabPtr.i)); tabPtr.p->m_state = Table::DROPPED; - tabPtr.p->m_hasTriggerDefined[0] = 0; - tabPtr.p->m_hasTriggerDefined[1] = 0; - tabPtr.p->m_hasTriggerDefined[2] = 0; - tabPtr.p->m_triggerIds[0] = ILLEGAL_TRIGGER_ID; - tabPtr.p->m_triggerIds[1] = ILLEGAL_TRIGGER_ID; - tabPtr.p->m_triggerIds[2] = ILLEGAL_TRIGGER_ID; - + for (int j= 0; j < 3; j++) + { + if (!tabPtr.p->m_hasOutstandingTriggerReq[j]) + { + tabPtr.p->m_hasTriggerDefined[j] = 0; + tabPtr.p->m_hasOutstandingTriggerReq[j] = 0; + tabPtr.p->m_triggerIds[j] = ILLEGAL_TRIGGER_ID; + } + else + tabPtr.p->m_hasTriggerDefined[j] = 1; + } if (senderRef == 0) { DBUG_VOID_RETURN; diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp index 51f5fa4a8c8..4408d6aff8d 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp @@ -301,7 +301,8 @@ public: union { Uint32 m_tableId; Uint32 key; }; Uint32 m_schemaVersion; - Uint32 m_hasTriggerDefined[3]; // Insert/Update/Delete + Uint8 m_hasTriggerDefined[3]; // Insert/Update/Delete + Uint8 m_hasOutstandingTriggerReq[3]; // Insert/Update/Delete Uint32 m_triggerIds[3]; // Insert/Update/Delete Uint32 m_error; diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index 486d78538f0..d0d26c19cfa 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -484,6 +484,8 @@ ErrorBundle ErrorCodes[] = { { 1418, DMEC, SE, "Subscription dropped, no new subscribers allowed" }, { 1419, DMEC, SE, "Subscription already dropped" }, + { 1420, DMEC, TR, "Subscriber manager busy with adding/removing a table" }, + { 4004, DMEC, AE, "Attribute name not found in the Table" }, { 4100, DMEC, AE, "Status Error in NDB" }, From 4f7df01b4f50ab47fddf8cb64dfc9839f9becd2c Mon Sep 17 00:00:00 2001 From: "tomas@poseidon.ndb.mysql.com" <> Date: Sat, 8 Jul 2006 03:26:13 +0200 Subject: [PATCH 71/74] Bug #20843 tests fails randomly with assertion in completeClusterFailed Bug #20902 Alter table invalid schema version with statement-based binlogging (latter caused by the above) --- mysql-test/t/ndb_alter_table3.test | 3 --- mysql-test/t/ndb_autodiscover3.test | 4 ---- mysql-test/t/ndb_dd_advance.test | 3 --- mysql-test/t/ndb_multi.test | 4 ---- sql/ha_ndbcluster.cc | 4 ++-- sql/ha_ndbcluster_binlog.cc | 33 +++++++++++++++++------------ 6 files changed, 22 insertions(+), 29 deletions(-) diff --git a/mysql-test/t/ndb_alter_table3.test b/mysql-test/t/ndb_alter_table3.test index b0295834563..a5fe613adcf 100644 --- a/mysql-test/t/ndb_alter_table3.test +++ b/mysql-test/t/ndb_alter_table3.test @@ -2,9 +2,6 @@ -- source include/have_multi_ndb.inc -- source include/not_embedded.inc -# BUG#20902: Test fails in statement-based binlogging mode. Remove when fixed. --- source include/have_binlog_format_row.inc - --disable_warnings DROP TABLE IF EXISTS t1; --enable_warnings diff --git a/mysql-test/t/ndb_autodiscover3.test b/mysql-test/t/ndb_autodiscover3.test index 5c8a95f302b..5f6d457d140 100644 --- a/mysql-test/t/ndb_autodiscover3.test +++ b/mysql-test/t/ndb_autodiscover3.test @@ -2,10 +2,6 @@ -- source include/have_multi_ndb.inc -- source include/not_embedded.inc -# BUG#20902: Test fails in statement-based binlogging mode. Remove when fixed. --- source include/have_binlog_format_row.inc - - --disable_warnings drop table if exists t1, t2; --enable_warnings diff --git a/mysql-test/t/ndb_dd_advance.test b/mysql-test/t/ndb_dd_advance.test index 51d01c139f0..e882ec794c1 100755 --- a/mysql-test/t/ndb_dd_advance.test +++ b/mysql-test/t/ndb_dd_advance.test @@ -28,9 +28,6 @@ -- source include/have_ndb.inc -- source include/not_embedded.inc -# BUG#20902: Test fails in statement-based binlogging mode. Remove when fixed. --- source include/have_binlog_format_row.inc - --disable_warnings DROP TABLE IF EXISTS test.t1; DROP TABLE IF EXISTS test.t2; diff --git a/mysql-test/t/ndb_multi.test b/mysql-test/t/ndb_multi.test index 5b04612bbc5..36018e6c679 100644 --- a/mysql-test/t/ndb_multi.test +++ b/mysql-test/t/ndb_multi.test @@ -3,10 +3,6 @@ -- source include/not_embedded.inc -- source include/have_binlog_format_statement.inc -# BUG#20902: Test fails in statement-based binlogging mode. Remove when fixed. --- source include/have_binlog_format_row.inc - - --disable_warnings connection server2; drop table if exists t1, t2, t3, t4; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 9a85d0888b2..8b17dae9d7e 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4781,7 +4781,7 @@ int ha_ndbcluster::create(const char *name, expect it to be there. */ if (!ndbcluster_create_event(ndb, m_table, event_name.c_ptr(), share, - share && do_event_op /* push warning */)) + share && do_event_op ? 2 : 1/* push warning */)) { if (ndb_extra_logging) sql_print_information("NDB Binlog: CREATE TABLE Event: %s", @@ -5175,7 +5175,7 @@ int ha_ndbcluster::rename_table(const char *from, const char *to) const NDBTAB *ndbtab= ndbtab_g2.get_table(); if (!ndbcluster_create_event(ndb, ndbtab, event_name.c_ptr(), share, - share && ndb_binlog_running /* push warning */)) + share && ndb_binlog_running ? 2 : 1/* push warning */)) { if (ndb_extra_logging) sql_print_information("NDB Binlog: RENAME Event: %s", diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index 8b04f263b26..8e9f0077dd0 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -1220,7 +1220,7 @@ int ndbcluster_log_schema_op(THD *thd, NDB_SHARE *share, type_str= "create table"; break; case SOT_ALTER_TABLE: - type_str= "create table"; + type_str= "alter table"; break; case SOT_DROP_DB: type_str= "drop db"; @@ -2500,7 +2500,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, /* failed, print a warning */ - if (push_warning) + if (push_warning > 1) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), dict->getNdbError().code, @@ -2528,7 +2528,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, if (dict->getNdbError().code == NDB_INVALID_SCHEMA_OBJECT && dict->dropEvent(my_event.getName())) { - if (push_warning) + if (push_warning > 1) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), dict->getNdbError().code, @@ -2547,7 +2547,7 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, */ if (dict->createEvent(my_event)) { - if (push_warning) + if (push_warning > 1) push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, ER_GET_ERRMSG, ER(ER_GET_ERRMSG), dict->getNdbError().code, @@ -3442,24 +3442,31 @@ restart: // wait for the first event thd->proc_info= "Waiting for first event from ndbcluster"; DBUG_PRINT("info", ("Waiting for the first event")); - int schema_res= 0, res= 0; - Uint64 schema_gci= 0, gci= 0; - while (schema_res == 0 && !abort_loop) + int schema_res, res; + Uint64 schema_gci; + do { + if (abort_loop) + goto err; schema_res= s_ndb->pollEvents(100, &schema_gci); - } - // now check that we have epochs consistant with what we had before the restart - DBUG_PRINT("info", ("schema_res: %d schema_gci: %d", schema_res, schema_gci)); - if (schema_res > 0) + } while (ndb_latest_received_binlog_epoch == schema_gci); + if (ndb_binlog_running) { - while (res >= 0 && gci < schema_gci && !abort_loop) + Uint64 gci= i_ndb->getLatestGCI(); + while (gci < schema_gci || gci == ndb_latest_received_binlog_epoch) { - res= i_ndb->pollEvents(100, &gci); + if (abort_loop) + goto err; + res= i_ndb->pollEvents(10, &gci); } if (gci > schema_gci) { schema_gci= gci; } + } + // now check that we have epochs consistant with what we had before the restart + DBUG_PRINT("info", ("schema_res: %d schema_gci: %d", schema_res, schema_gci)); + { i_ndb->flushIncompleteEvents(schema_gci); s_ndb->flushIncompleteEvents(schema_gci); if (schema_gci < ndb_latest_handled_binlog_epoch) From 7c83e6d201239d72aaa551bb990a55ab7c6aed1c Mon Sep 17 00:00:00 2001 From: "pekka@orca.ndb.mysql.com" <> Date: Mon, 10 Jul 2006 13:44:15 +0200 Subject: [PATCH 72/74] ndb - bug#18781 : 5.0 : add NODE_START_REP from 5.1 (re-commit, try to by-pass merge jam) --- ndb/include/kernel/GlobalSignalNumbers.h | 1 + ndb/src/common/debugger/signaldata/SignalNames.cpp | 2 ++ ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp | 7 +++++++ ndb/src/kernel/vm/SimulatedBlock.cpp | 6 ++++++ ndb/src/kernel/vm/SimulatedBlock.hpp | 1 + 5 files changed, 17 insertions(+) diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h index d60f7a2c582..4c28d4c3dd2 100644 --- a/ndb/include/kernel/GlobalSignalNumbers.h +++ b/ndb/include/kernel/GlobalSignalNumbers.h @@ -587,6 +587,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_BLOCK_COMMIT_ORD 485 #define GSN_UNBLOCK_COMMIT_ORD 486 +#define GSN_NODE_START_REP 502 #define GSN_NODE_STATE_REP 487 #define GSN_CHANGE_NODE_STATE_REQ 488 #define GSN_CHANGE_NODE_STATE_CONF 489 diff --git a/ndb/src/common/debugger/signaldata/SignalNames.cpp b/ndb/src/common/debugger/signaldata/SignalNames.cpp index 5162679017a..719397dd10d 100644 --- a/ndb/src/common/debugger/signaldata/SignalNames.cpp +++ b/ndb/src/common/debugger/signaldata/SignalNames.cpp @@ -399,6 +399,8 @@ const GsnName SignalNames [] = { ,{ GSN_TUP_COM_UNBLOCK, "TUP_COM_UNBLOCK" } ,{ GSN_DUMP_STATE_ORD, "DUMP_STATE_ORD" } + ,{ GSN_NODE_START_REP, "NODE_START_REP" } + ,{ GSN_START_INFOREQ, "START_INFOREQ" } ,{ GSN_START_INFOREF, "START_INFOREF" } ,{ GSN_START_INFOCONF, "START_INFOCONF" } diff --git a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index 176bab0d4bf..2a3207aac61 100644 --- a/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -591,6 +591,13 @@ Ndbcntr::execCNTR_START_REP(Signal* signal){ Uint32 nodeId = signal->theData[0]; c_startedNodes.set(nodeId); c_start.m_starting.clear(nodeId); + + /** + * Inform all interested blocks that node has started + */ + for(Uint32 i = 0; i c_fragmentInfoPool; From 9f3b47e53cfd6be1f9c4aa1c1557093a2323e267 Mon Sep 17 00:00:00 2001 From: "pekka@orca.ndb.mysql.com" <> Date: Mon, 10 Jul 2006 13:59:13 +0200 Subject: [PATCH 73/74] ndb - bug#18781: close a tiny window (re-commit, try to by-pass merge jam) --- ndb/src/kernel/blocks/dbdict/DictLock.txt | 12 ++++--- ndb/src/kernel/blocks/dbdih/Dbdih.hpp | 3 ++ ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 40 +++++++++++++---------- ndb/src/kernel/vm/SimulatedBlock.cpp | 9 +++++ ndb/src/kernel/vm/SimulatedBlock.hpp | 1 + ndb/test/run-test/daily-basic-tests.txt | 4 +++ 6 files changed, 47 insertions(+), 22 deletions(-) diff --git a/ndb/src/kernel/blocks/dbdict/DictLock.txt b/ndb/src/kernel/blocks/dbdict/DictLock.txt index 17f24119e9d..72e23ed15a5 100644 --- a/ndb/src/kernel/blocks/dbdict/DictLock.txt +++ b/ndb/src/kernel/blocks/dbdict/DictLock.txt @@ -85,10 +85,14 @@ DIH/s START_MECONF DIH/s -* sp7 - release DICT lock +* (copy data, omitted) -DIH/s - DICT_UNLOCK_ORD - DICT/m +* SL_STARTED - release DICT lock + +CNTR/s + NODE_START_REP + DIH/s + DICT_UNLOCK_ORD + DICT/m # vim: set et sw=4: diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index f4a33df9805..5c2cfac5eb1 100644 --- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -1599,6 +1599,9 @@ private: */ void startInfoReply(Signal *, Uint32 nodeId); + // DIH specifics for execNODE_START_REP (sendDictUnlockOrd) + void exec_node_start_rep(Signal* signal); + /* * Lock master DICT. Only current use is by starting node * during NR. A pool of slave records is convenient anyway. diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 352053bef10..50d5c6b660f 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -1356,24 +1356,6 @@ void Dbdih::execNDB_STTOR(Signal* signal) } ndbrequire(false); break; - case ZNDB_SPH7: - jam(); - switch (typestart) { - case NodeState::ST_INITIAL_START: - case NodeState::ST_SYSTEM_RESTART: - jam(); - ndbsttorry10Lab(signal, __LINE__); - return; - case NodeState::ST_NODE_RESTART: - case NodeState::ST_INITIAL_NODE_RESTART: - jam(); - sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart); - c_dictLockSlavePtrI_nodeRestart = RNIL; - ndbsttorry10Lab(signal, __LINE__); - return; - } - ndbrequire(false); - break; default: jam(); ndbsttorry10Lab(signal, __LINE__); @@ -1381,6 +1363,27 @@ void Dbdih::execNDB_STTOR(Signal* signal) }//switch }//Dbdih::execNDB_STTOR() +void +Dbdih::exec_node_start_rep(Signal* signal) +{ + /* + * Send DICT_UNLOCK_ORD when this node is SL_STARTED. + * + * Sending it before (sp 7) conflicts with code which assumes + * SL_STARTING means we are in copy phase of NR. + * + * NodeState::starting.restartType is not supposed to be used + * when SL_STARTED. Also it seems NODE_START_REP can arrive twice. + * + * For these reasons there are no consistency checks and + * we rely on c_dictLockSlavePtrI_nodeRestart alone. + */ + if (c_dictLockSlavePtrI_nodeRestart != RNIL) { + sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart); + c_dictLockSlavePtrI_nodeRestart = RNIL; + } +} + void Dbdih::createMutexes(Signal * signal, Uint32 count){ Callback c = { safe_cast(&Dbdih::createMutex_done), count }; @@ -1605,6 +1608,7 @@ void Dbdih::nodeRestartPh2Lab(Signal* signal) void Dbdih::recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret) { ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL); + ndbrequire(data != RNIL); c_dictLockSlavePtrI_nodeRestart = data; nodeRestartPh2Lab2(signal); diff --git a/ndb/src/kernel/vm/SimulatedBlock.cpp b/ndb/src/kernel/vm/SimulatedBlock.cpp index bbf13528c5c..b4787209d55 100644 --- a/ndb/src/kernel/vm/SimulatedBlock.cpp +++ b/ndb/src/kernel/vm/SimulatedBlock.cpp @@ -916,6 +916,15 @@ SimulatedBlock::execCONTINUE_FRAGMENTED(Signal * signal){ void SimulatedBlock::execNODE_START_REP(Signal* signal) +{ + // common stuff for all blocks + + // block specific stuff by virtual method override (default empty) + exec_node_start_rep(signal); +} + +void +SimulatedBlock::exec_node_start_rep(Signal* signal) { } diff --git a/ndb/src/kernel/vm/SimulatedBlock.hpp b/ndb/src/kernel/vm/SimulatedBlock.hpp index f7ca4ecbf38..4a3620a00ab 100644 --- a/ndb/src/kernel/vm/SimulatedBlock.hpp +++ b/ndb/src/kernel/vm/SimulatedBlock.hpp @@ -424,6 +424,7 @@ private: void execSIGNAL_DROPPED_REP(Signal* signal); void execCONTINUE_FRAGMENTED(Signal* signal); void execNODE_START_REP(Signal* signal); + virtual void exec_node_start_rep(Signal* signal); Uint32 c_fragmentIdCounter; ArrayPool c_fragmentInfoPool; diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 6077c9fb536..094c1edede6 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -500,6 +500,10 @@ max-time: 1500 cmd: testDict args: -n TemporaryTables T1 T6 T7 T8 +max-time: 1500 +cmd: testDict +args: -n Restart_NR2 T1 + # # TEST NDBAPI # From 4163c91d8e4a101c19fbadb423c116d1aba78533 Mon Sep 17 00:00:00 2001 From: "pekka@orca.ndb.mysql.com" <> Date: Mon, 10 Jul 2006 15:43:47 +0200 Subject: [PATCH 74/74] ndb - debug stuff in LQH --- storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 702cde0fddc..695580d556c 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -498,6 +498,7 @@ void Dblqh::execSTTOR(Signal* signal) csignalKey = signal->theData[6]; #if defined VM_TRACE || defined ERROR_INSERT || defined NDBD_TRACENR char *name; + FILE *out = 0; #endif switch (tstartPhase) { case ZSTART_PHASE1: @@ -509,8 +510,14 @@ void Dblqh::execSTTOR(Signal* signal) sendsttorryLab(signal); #if defined VM_TRACE || defined ERROR_INSERT || defined NDBD_TRACENR - name = NdbConfig_SignalLogFileName(getOwnNodeId()); - tracenrout = new NdbOut(* new FileOutputStream(fopen(name, "w+"))); +#ifdef VM_TRACE + out = globalSignalLoggers.getOutputStream(); +#endif + if (out == 0) { + name = NdbConfig_SignalLogFileName(getOwnNodeId()); + out = fopen(name, "a"); + } + tracenrout = new NdbOut(* new FileOutputStream(out)); #endif #ifdef ERROR_INSERT