diff --git a/config/ac-macros/compiler_flag.m4 b/config/ac-macros/compiler_flag.m4 index a236f61a198..88097c7a62e 100644 --- a/config/ac-macros/compiler_flag.m4 +++ b/config/ac-macros/compiler_flag.m4 @@ -38,3 +38,25 @@ AC_DEFUN([AC_SYS_OS_COMPILER_FLAG], fi ]) +AC_DEFUN([AC_CHECK_NOEXECSTACK], +[ + AC_CACHE_CHECK(whether --noexecstack is desirable for .S files, + mysql_cv_as_noexecstack, [dnl + cat > conftest.c <&AS_MESSAGE_LOG_FD]) \ + && grep .note.GNU-stack conftest.s >/dev/null \ + && AC_TRY_COMMAND([${CC-cc} $CCASFLAGS $CPPFLAGS -Wa,--noexecstack + -c -o conftest.o conftest.s 1>&AS_MESSAGE_LOG_FD]) + then + mysql_cv_as_noexecstack=yes + else + mysql_cv_as_noexecstack=no + fi + rm -f conftest*]) + if test $mysql_cv_as_noexecstack = yes; then + CCASFLAGS="$CCASFLAGS -Wa,--noexecstack" + fi +]) diff --git a/configure.in b/configure.in index fd2cf3baf97..fe111b8a671 100644 --- a/configure.in +++ b/configure.in @@ -515,6 +515,10 @@ AM_PROG_CC_STDC # We need an assembler, too AM_PROG_AS +CCASFLAGS="$CCASFLAGS $ASFLAGS" + +# Check if we need noexec stack for assembler +AC_CHECK_NOEXECSTACK if test "$am_cv_prog_cc_stdc" = "no" then diff --git a/include/sql_common.h b/include/sql_common.h index c07a4a831bb..9fc8d4f457b 100644 --- a/include/sql_common.h +++ b/include/sql_common.h @@ -22,6 +22,7 @@ extern const char *not_error_sqlstate; extern "C" { #endif +extern CHARSET_INFO *default_client_charset_info; MYSQL_FIELD *unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields, my_bool default_value, uint server_capabilities); void free_rows(MYSQL_DATA *cur); diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index a2fdae994b1..a2c570c2fbc 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -37,6 +37,8 @@ extern "C" int check_user(THD *thd, enum enum_server_command command, const char *passwd, uint passwd_len, const char *db, bool check_count); +void thd_init_client_charset(THD *thd, uint cs_number); + C_MODE_START #include @@ -600,11 +602,14 @@ err: return NULL; } + #ifdef NO_EMBEDDED_ACCESS_CHECKS int check_embedded_connection(MYSQL *mysql) { int result; THD *thd= (THD*)mysql->thd; + thd_init_client_charset(thd, mysql->charset->number); + thd->update_charset(); Security_context *sctx= thd->security_ctx; sctx->host_or_ip= sctx->host= (char*) my_localhost; strmake(sctx->priv_host, (char*) my_localhost, MAX_HOSTNAME-1); @@ -623,6 +628,8 @@ int check_embedded_connection(MYSQL *mysql) char scramble_buff[SCRAMBLE_LENGTH]; int passwd_len; + thd_init_client_charset(thd, mysql->charset->number); + thd->update_charset(); if (mysql->options.client_ip) { sctx->host= my_strdup(mysql->options.client_ip, MYF(0)); diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c index cad1bd4c47b..5df61783451 100644 --- a/libmysqld/libmysqld.c +++ b/libmysqld/libmysqld.c @@ -90,49 +90,7 @@ static void end_server(MYSQL *mysql) } -static int mysql_init_charset(MYSQL *mysql) -{ - char charset_name_buff[16], *charset_name; - - if ((charset_name=mysql->options.charset_name)) - { - const char *save=charsets_dir; - if (mysql->options.charset_dir) - charsets_dir=mysql->options.charset_dir; - mysql->charset=get_charset_by_name(mysql->options.charset_name, - MYF(MY_WME)); - charsets_dir=save; - } - else if (mysql->server_language) - { - charset_name=charset_name_buff; - sprintf(charset_name,"%d",mysql->server_language); /* In case of errors */ - mysql->charset=get_charset((uint8) mysql->server_language, MYF(MY_WME)); - } - else - mysql->charset=default_charset_info; - - if (!mysql->charset) - { - mysql->net.last_errno=CR_CANT_READ_CHARSET; - strmov(mysql->net.sqlstate, "HY0000"); - if (mysql->options.charset_dir) - sprintf(mysql->net.last_error,ER(mysql->net.last_errno), - charset_name ? charset_name : "unknown", - mysql->options.charset_dir); - else - { - char cs_dir_name[FN_REFLEN]; - get_charsets_dir(cs_dir_name); - sprintf(mysql->net.last_error,ER(mysql->net.last_errno), - charset_name ? charset_name : "unknown", - cs_dir_name); - } - return mysql->net.last_errno; - } - return 0; -} - +int mysql_init_character_set(MYSQL *mysql); MYSQL * STDCALL mysql_real_connect(MYSQL *mysql,const char *host, const char *user, @@ -222,10 +180,10 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, init_embedded_mysql(mysql, client_flag, db_name); - if (check_embedded_connection(mysql)) + if (mysql_init_character_set(mysql)) goto error; - if (mysql_init_charset(mysql)) + if (check_embedded_connection(mysql)) goto error; mysql->server_status= SERVER_STATUS_AUTOCOMMIT; diff --git a/mysql-test/r/ctype_ucs2_def.result b/mysql-test/r/ctype_ucs2_def.result index 897dbac251c..2f9dc4ae616 100644 --- a/mysql-test/r/ctype_ucs2_def.result +++ b/mysql-test/r/ctype_ucs2_def.result @@ -1,3 +1,6 @@ +show variables like 'collation_server'; +Variable_name Value +collation_server ucs2_unicode_ci show variables like "%character_set_ser%"; Variable_name Value character_set_server ucs2 diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result index f11da4ee62f..9194439efd7 100644 --- a/mysql-test/r/federated.result +++ b/mysql-test/r/federated.result @@ -1558,6 +1558,8 @@ id 3 4 5 +DROP TABLE federated.t1; +DROP TABLE federated.t1; DROP TABLE IF EXISTS federated.bug_17377_table; CREATE TABLE federated.bug_17377_table ( `fld_cid` bigint(20) NOT NULL auto_increment, @@ -1601,6 +1603,92 @@ fld_cid fld_name fld_parentid fld_delt 5 Torkel 0 0 DROP TABLE federated.t1; DROP TABLE federated.bug_17377_table; +create table federated.t1 (i1 int, i2 int, i3 int); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); +create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 2 2 +3 7 12 +4 5 2 +9 10 15 +select * from federated.t2; +id c1 c2 +9 abc def +5 opq lmn +2 test t t test +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 15 2 +3 7 12 +4 5 2 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +5 opq lmn +9 abc ppc +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +2 15 2 +3 7 12 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +9 abc ppc +drop table federated.t1, federated.t2; +drop table federated.t1, federated.t2; +create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); +create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 2 2 +3 7 12 +4 5 2 +9 10 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t t test +5 opq lmn +9 abc def +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 15 2 +3 7 12 +4 5 2 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +5 opq lmn +9 abc ppc +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +2 15 2 +3 7 12 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +9 abc ppc +drop table federated.t1, federated.t2; +drop table federated.t1, federated.t2; DROP TABLE IF EXISTS federated.t1; DROP DATABASE IF EXISTS federated; DROP TABLE IF EXISTS federated.t1; diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 13e2d56d83e..7a0f689df36 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -694,3 +694,13 @@ alter table t1 add primary key pti(pt); ERROR 42000: BLOB/TEXT column 'pt' used in key specification without a key length alter table t1 add primary key pti(pt(20)); drop table t1; +create table t1 (g GEOMETRY); +select * from t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 t1 g g 255 4294967295 0 Y 144 0 63 +g +select asbinary(g) from t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def asbinary(g) 252 8192 0 Y 128 0 63 +asbinary(g) +drop table t1; diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 63af90aa0f1..64969fcdf44 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -369,11 +369,11 @@ show keys from v4; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment select * from information_schema.views where TABLE_NAME like "v%"; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE -NULL test v0 select `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER -NULL test v1 select `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER -NULL test v2 select `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER -NULL test v3 select `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER -NULL test v4 select `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER +NULL test v0 /* ALGORITHM=UNDEFINED */ select `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER +NULL test v1 /* ALGORITHM=UNDEFINED */ select `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER +NULL test v3 /* ALGORITHM=UNDEFINED */ select `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER +NULL test v4 /* ALGORITHM=UNDEFINED */ select `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER drop view v0, v1, v2, v3, v4; create table t1 (a int); grant select,update,insert on t1 to mysqltest_1@localhost; @@ -464,9 +464,9 @@ create view v2 (c) as select a from t1 WITH LOCAL CHECK OPTION; create view v3 (c) as select a from t1 WITH CASCADED CHECK OPTION; select * from information_schema.views; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE -NULL test v1 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER -NULL test v2 select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER -NULL test v3 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER +NULL test v1 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER +NULL test v3 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER grant select (a) on test.t1 to joe@localhost with grant option; select * from INFORMATION_SCHEMA.COLUMN_PRIVILEGES; GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE @@ -1121,7 +1121,7 @@ select * from information_schema.views where table_name='v1' or table_name='v2'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE NULL test v1 NONE YES root@localhost DEFINER -NULL test v2 select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER drop view v1, v2; drop table t1; drop user mysqltest_1@localhost; diff --git a/mysql-test/r/sp-security.result b/mysql-test/r/sp-security.result index 04f2f58ba37..a53b4c4d246 100644 --- a/mysql-test/r/sp-security.result +++ b/mysql-test/r/sp-security.result @@ -420,3 +420,34 @@ ERROR HY000: There is no 'mysqltest_1'@'localhost' registered ---> connection: root DROP USER mysqltest_2@localhost; DROP DATABASE mysqltest; +GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO +user19857@localhost; +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; +Host User Password +localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C + +---> connection: mysqltest_2_con +use test; +CREATE PROCEDURE sp19857() DETERMINISTIC +BEGIN +DECLARE a INT; +SET a=1; +SELECT a; +END // +SHOW CREATE PROCEDURE test.sp19857; +Procedure sql_mode Create Procedure +sp19857 CREATE DEFINER=`user19857`@`localhost` PROCEDURE `sp19857`() + DETERMINISTIC +BEGIN +DECLARE a INT; +SET a=1; +SELECT a; +END +DROP PROCEDURE IF EXISTS test.sp19857; + +---> connection: root +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; +Host User Password +localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C +DROP USER user19857@localhost; diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index cd51ff9485f..a0e516d2397 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -421,6 +421,28 @@ set tmp_table_size=100; set tx_isolation="READ-COMMITTED"; set wait_timeout=100; set log_warnings=1; +select @@session.insert_id; +@@session.insert_id +1 +set @save_insert_id=@@session.insert_id; +set session insert_id=20; +select @@session.insert_id; +@@session.insert_id +20 +set session last_insert_id=100; +select @@session.insert_id; +@@session.insert_id +20 +select @@session.last_insert_id; +@@session.last_insert_id +100 +select @@session.insert_id; +@@session.insert_id +20 +set @@session.insert_id=@save_insert_id; +select @@session.insert_id; +@@session.insert_id +1 create table t1 (a int not null auto_increment, primary key(a)); create table t2 (a int not null auto_increment, primary key(a)); insert into t1 values(null),(null),(null); diff --git a/mysql-test/t/ctype_ucs2_def-master.opt b/mysql-test/t/ctype_ucs2_def-master.opt index 1f884ff1d67..a0b5b061860 100644 --- a/mysql-test/t/ctype_ucs2_def-master.opt +++ b/mysql-test/t/ctype_ucs2_def-master.opt @@ -1 +1 @@ ---default-character-set=ucs2 --default-collation=ucs2_unicode_ci +--default-collation=ucs2_unicode_ci --default-character-set=ucs2 diff --git a/mysql-test/t/ctype_ucs2_def.test b/mysql-test/t/ctype_ucs2_def.test index fb174d551cf..00f636d79dc 100644 --- a/mysql-test/t/ctype_ucs2_def.test +++ b/mysql-test/t/ctype_ucs2_def.test @@ -1,3 +1,8 @@ +# +# MySQL Bug#15276: MySQL ignores collation-server +# +show variables like 'collation_server'; + # # Bug#18004 Connecting crashes server when default charset is UCS2 # diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test index 80b31c610a2..2eacc2ba990 100644 --- a/mysql-test/t/federated.test +++ b/mysql-test/t/federated.test @@ -1254,6 +1254,10 @@ SELECT LAST_INSERT_ID(); INSERT INTO federated.t1 VALUES (); SELECT LAST_INSERT_ID(); SELECT * FROM federated.t1; +DROP TABLE federated.t1; + +connection slave; +DROP TABLE federated.t1; # # Bug#17377 Federated Engine returns wrong Data, always the rows @@ -1309,5 +1313,54 @@ DROP TABLE federated.t1; connection slave; DROP TABLE federated.bug_17377_table; +# +# BUG 19773 Crash when using multi-table updates, deletes +# with federated tables +# +connection slave; +create table federated.t1 (i1 int, i2 int, i3 int); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); + +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +select * from federated.t2; +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +drop table federated.t1, federated.t2; +connection slave; +drop table federated.t1, federated.t2; + +# Test multi updates and deletes with keys +connection slave; +create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); + +connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT +eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +drop table federated.t1, federated.t2; + +connection slave; +drop table federated.t1, federated.t2; source include/federated_cleanup.inc; diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index bb3f621d194..4c6ff9b2fe7 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -409,3 +409,10 @@ create table t1(pt GEOMETRY); alter table t1 add primary key pti(pt); alter table t1 add primary key pti(pt(20)); drop table t1; + +--enable_metadata +create table t1 (g GEOMETRY); +select * from t1; +select asbinary(g) from t1; +--disable_metadata +drop table t1; diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index b6a552adbde..7e4fedb297d 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -610,7 +610,7 @@ drop database db1; # BUG#15328 Segmentation fault occured if my.cnf is invalid for escape sequence # ---exec $MYSQL_MY_PRINT_DEFAULTS --defaults-extra-file=$MYSQL_TEST_DIR/std_data/bug15328.cnf mysqldump +--exec $MYSQL_MY_PRINT_DEFAULTS --config-file=$MYSQL_TEST_DIR/std_data/bug15328.cnf mysqldump # diff --git a/mysql-test/t/sp-security.test b/mysql-test/t/sp-security.test index a8c3c0a22eb..d323b180216 100644 --- a/mysql-test/t/sp-security.test +++ b/mysql-test/t/sp-security.test @@ -744,4 +744,50 @@ DROP USER mysqltest_2@localhost; DROP DATABASE mysqltest; +# +# Bug#19857 - When a user with CREATE ROUTINE priv creates a routine, +# it results in NULL p/w +# + +# Can't test with embedded server that doesn't support grants + +GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO +user19857@localhost; +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; + +--connect (mysqltest_2_con,localhost,user19857,meow,test) +--echo +--echo ---> connection: mysqltest_2_con +--connection mysqltest_2_con + +use test; + +DELIMITER //; + CREATE PROCEDURE sp19857() DETERMINISTIC + BEGIN + DECLARE a INT; + SET a=1; + SELECT a; + END // +DELIMITER ;// + +SHOW CREATE PROCEDURE test.sp19857; + +--disconnect mysqltest_2_con +--connect (mysqltest_2_con,localhost,user19857,meow,test) +--connection mysqltest_2_con + +DROP PROCEDURE IF EXISTS test.sp19857; + +--echo +--echo ---> connection: root +--connection con1root + +--disconnect mysqltest_2_con + +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; + +DROP USER user19857@localhost; + # End of 5.0 bugs. diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index be1731e7493..68efcafd1e0 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -301,6 +301,22 @@ set tx_isolation="READ-COMMITTED"; set wait_timeout=100; set log_warnings=1; +# +# Bugs: #20392: INSERT_ID session variable has weird value +# +select @@session.insert_id; +set @save_insert_id=@@session.insert_id; +set session insert_id=20; +select @@session.insert_id; + +set session last_insert_id=100; +select @@session.insert_id; +select @@session.last_insert_id; +select @@session.insert_id; + +set @@session.insert_id=@save_insert_id; +select @@session.insert_id; + # # key buffer # diff --git a/mysys/my_init.c b/mysys/my_init.c index 9b8d4db172f..8346fab95da 100644 --- a/mysys/my_init.c +++ b/mysys/my_init.c @@ -245,6 +245,22 @@ void setEnvString(char *ret, const char *name, const char *value) DBUG_VOID_RETURN ; } +/* + my_paramter_handler + Invalid paramter handler we will use instead of the one "baked" into the CRT + for MSC v8. This one just prints out what invalid parameter was encountered. + By providing this routine, routines like lseek will return -1 when we expect them + to instead of crash. +*/ +void my_parameter_handler(const wchar_t * expression, const wchar_t * function, + const wchar_t * file, unsigned int line, + uintptr_t pReserved) +{ + DBUG_PRINT("my",("Expression: %s function: %s file: %s, line: %d", + expression, function, file, line)); +} + + static void my_win_init(void) { HKEY hSoftMysql ; @@ -262,12 +278,18 @@ static void my_win_init(void) setlocale(LC_CTYPE, ""); /* To get right sortorder */ -#if defined(_MSC_VER) && (_MSC_VER < 1300) +#if defined(_MSC_VER) +#if _MSC_VER < 1300 /* Clear the OS system variable TZ and avoid the 100% CPU usage Only for old versions of Visual C++ */ _putenv( "TZ=" ); +#endif +#if _MSC_VER >= 1400 + /* this is required to make crt functions return -1 appropriately */ + _set_invalid_parameter_handler(my_parameter_handler); +#endif #endif _tzset(); diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h index d60f7a2c582..ca82806f4b1 100644 --- a/ndb/include/kernel/GlobalSignalNumbers.h +++ b/ndb/include/kernel/GlobalSignalNumbers.h @@ -607,8 +607,6 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_WAIT_GCP_REF 500 #define GSN_WAIT_GCP_CONF 501 -/* 502 not used */ - /** * Trigger and index signals */ @@ -678,6 +676,8 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_BACKUP_FRAGMENT_REF 546 #define GSN_BACKUP_FRAGMENT_CONF 547 +#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 575 + #define GSN_STOP_BACKUP_REQ 548 #define GSN_STOP_BACKUP_REF 549 #define GSN_STOP_BACKUP_CONF 550 @@ -727,7 +727,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_SUB_STOP_REQ 572 #define GSN_SUB_STOP_REF 573 #define GSN_SUB_STOP_CONF 574 -/* 575 unused */ +/* 575 used */ #define GSN_SUB_CREATE_REQ 576 #define GSN_SUB_CREATE_REF 577 #define GSN_SUB_CREATE_CONF 578 diff --git a/ndb/include/kernel/signaldata/BackupContinueB.hpp b/ndb/include/kernel/signaldata/BackupContinueB.hpp index d3d3f79f310..fe3f48444ec 100644 --- a/ndb/include/kernel/signaldata/BackupContinueB.hpp +++ b/ndb/include/kernel/signaldata/BackupContinueB.hpp @@ -31,7 +31,8 @@ private: BUFFER_UNDERFLOW = 1, BUFFER_FULL_SCAN = 2, BUFFER_FULL_FRAG_COMPLETE = 3, - BUFFER_FULL_META = 4 + BUFFER_FULL_META = 4, + BACKUP_FRAGMENT_INFO = 5 }; }; diff --git a/ndb/include/kernel/signaldata/BackupImpl.hpp b/ndb/include/kernel/signaldata/BackupImpl.hpp index 298440ad377..07ab5bc543b 100644 --- a/ndb/include/kernel/signaldata/BackupImpl.hpp +++ b/ndb/include/kernel/signaldata/BackupImpl.hpp @@ -258,15 +258,31 @@ class BackupFragmentConf { friend bool printBACKUP_FRAGMENT_CONF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 6 ); + STATIC_CONST( SignalLength = 8 ); private: Uint32 backupId; Uint32 backupPtr; Uint32 tableId; Uint32 fragmentNo; - Uint32 noOfRecords; - Uint32 noOfBytes; + Uint32 noOfRecordsLow; + Uint32 noOfBytesLow; + Uint32 noOfRecordsHigh; + Uint32 noOfBytesHigh; +}; + +class BackupFragmentCompleteRep { +public: + STATIC_CONST( SignalLength = 8 ); + + Uint32 backupId; + Uint32 backupPtr; + Uint32 tableId; + Uint32 fragmentNo; + Uint32 noOfTableRowsLow; + Uint32 noOfFragmentRowsLow; + Uint32 noOfTableRowsHigh; + Uint32 noOfFragmentRowsHigh; }; class StopBackupReq { diff --git a/ndb/include/kernel/signaldata/BackupSignalData.hpp b/ndb/include/kernel/signaldata/BackupSignalData.hpp index e1b8c6203a1..9e34ea3a211 100644 --- a/ndb/include/kernel/signaldata/BackupSignalData.hpp +++ b/ndb/include/kernel/signaldata/BackupSignalData.hpp @@ -201,17 +201,19 @@ class BackupCompleteRep { friend bool printBACKUP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 8 + NdbNodeBitmask::Size ); + STATIC_CONST( SignalLength = 10 + NdbNodeBitmask::Size ); private: Uint32 senderData; Uint32 backupId; Uint32 startGCP; Uint32 stopGCP; - Uint32 noOfBytes; - Uint32 noOfRecords; + Uint32 noOfBytesLow; + Uint32 noOfRecordsLow; Uint32 noOfLogBytes; Uint32 noOfLogRecords; NdbNodeBitmask nodes; + Uint32 noOfBytesHigh; + Uint32 noOfRecordsHigh; }; /** diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp index bc4817f0cf3..0a7f6aa3fb3 100644 --- a/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -117,9 +117,16 @@ public: CustomTriggerId = 25, FrmLen = 26, FrmData = 27, + FragmentCount = 128, // No of fragments in table (!fragment replicas) FragmentDataLen = 129, FragmentData = 130, // CREATE_FRAGMENTATION reply + + MaxRowsLow = 139, + MaxRowsHigh = 140, + MinRowsLow = 143, + MinRowsHigh = 144, + TableEnd = 999, AttributeName = 1000, // String, Mandatory @@ -263,6 +270,10 @@ public: Uint32 FragmentCount; Uint32 FragmentDataLen; Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2]; + Uint32 MaxRowsLow; + Uint32 MaxRowsHigh; + Uint32 MinRowsLow; + Uint32 MinRowsHigh; void init(); }; diff --git a/ndb/include/kernel/signaldata/LqhFrag.hpp b/ndb/include/kernel/signaldata/LqhFrag.hpp index 13dfafcc653..72c1537854c 100644 --- a/ndb/include/kernel/signaldata/LqhFrag.hpp +++ b/ndb/include/kernel/signaldata/LqhFrag.hpp @@ -104,7 +104,7 @@ class LqhFragReq { friend bool printLQH_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 25 ); + STATIC_CONST( SignalLength = 23 ); enum RequestInfo { CreateInRunning = 0x8000000, @@ -116,27 +116,32 @@ private: Uint32 senderRef; Uint32 fragmentId; Uint32 requestInfo; - Uint32 tableId; - Uint32 localKeyLength; Uint32 maxLoadFactor; Uint32 minLoadFactor; Uint32 kValue; - Uint32 lh3DistrBits; - Uint32 lh3PageBits; - Uint32 noOfAttributes; - Uint32 noOfNullAttributes; - Uint32 noOfPagesToPreAllocate; Uint32 schemaVersion; - Uint32 keyLength; Uint32 nextLCP; - Uint32 noOfKeyAttr; - Uint32 noOfNewAttr; // noOfCharsets in upper half - Uint32 checksumIndicator; - Uint32 noOfAttributeGroups; - Uint32 GCPIndicator; + Uint16 noOfNewAttr; + Uint16 noOfCharsets; Uint32 startGci; Uint32 tableType; // DictTabInfo::TableType Uint32 primaryTableId; // table of index or RNIL + Uint16 tableId; + Uint16 localKeyLength; + Uint16 lh3DistrBits; + Uint16 lh3PageBits; + Uint16 noOfAttributes; + Uint16 noOfNullAttributes; + Uint16 noOfPagesToPreAllocate; + Uint16 keyLength; + Uint16 noOfKeyAttr; + Uint8 checksumIndicator; + Uint8 GCPIndicator; + Uint32 noOfAttributeGroups; + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; }; class LqhFragConf { diff --git a/ndb/include/kernel/signaldata/TupFrag.hpp b/ndb/include/kernel/signaldata/TupFrag.hpp index 5fb9d7bcf42..c9f2ad5382f 100644 --- a/ndb/include/kernel/signaldata/TupFrag.hpp +++ b/ndb/include/kernel/signaldata/TupFrag.hpp @@ -30,7 +30,7 @@ class TupFragReq { friend class Dblqh; friend class Dbtup; public: - STATIC_CONST( SignalLength = 14 ); + STATIC_CONST( SignalLength = 17 ); private: Uint32 userPtr; Uint32 userRef; @@ -38,7 +38,18 @@ private: Uint32 tableId; Uint32 noOfAttr; Uint32 fragId; - Uint32 todo[8]; + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; + Uint32 noOfNullAttr; + Uint32 schemaVersion; + Uint32 noOfKeyAttr; + Uint16 noOfNewAttr; + Uint16 noOfCharsets; + Uint32 checksumIndicator; + Uint32 noOfAttributeGroups; + Uint32 globalCheckpointIdIndicator; }; class TupFragConf { diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 1413931035d..e67a0253096 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -722,6 +722,20 @@ public: */ void setObjectType(Object::Type type); + /** + * Set/Get Maximum number of rows in table (only used to calculate + * number of partitions). + */ + void setMaxRows(Uint64 maxRows); + Uint64 getMaxRows() const; + + /** + * Set/Get Minimum number of rows in table (only used to calculate + * number of partitions). + */ + void setMinRows(Uint64 minRows); + Uint64 getMinRows() const; + /** @} *******************************************************************/ #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL diff --git a/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/ndb/src/common/debugger/signaldata/BackupImpl.cpp index e9b0188d93b..855db0834bc 100644 --- a/ndb/src/common/debugger/signaldata/BackupImpl.cpp +++ b/ndb/src/common/debugger/signaldata/BackupImpl.cpp @@ -100,8 +100,10 @@ printBACKUP_FRAGMENT_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ BackupFragmentConf* sig = (BackupFragmentConf*)data; fprintf(out, " backupPtr: %d backupId: %d\n", sig->backupPtr, sig->backupId); - fprintf(out, " tableId: %d fragmentNo: %d records: %d bytes: %d\n", - sig->tableId, sig->fragmentNo, sig->noOfRecords, sig->noOfBytes); + fprintf(out, " tableId: %d fragmentNo: %d records: %llu bytes: %llu\n", + sig->tableId, sig->fragmentNo, + sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32), + sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32)); return true; } diff --git a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp index 4b0a0e07b66..27fed22ac72 100644 --- a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp +++ b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp @@ -72,11 +72,11 @@ printBACKUP_ABORT_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){ bool printBACKUP_COMPLETE_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){ BackupCompleteRep* sig = (BackupCompleteRep*)data; - fprintf(out, " senderData: %d backupId: %d records: %d bytes: %d\n", + fprintf(out, " senderData: %d backupId: %d records: %llu bytes: %llu\n", sig->senderData, sig->backupId, - sig->noOfRecords, - sig->noOfBytes); + sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32), + sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32)); return true; } diff --git a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp index 43c129347c0..a1d8d82474d 100644 --- a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp +++ b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp @@ -48,6 +48,10 @@ DictTabInfo::TableMapping[] = { DTIMAP(Table, FragmentCount, FragmentCount), DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, MAX_FRAGMENT_DATA_BYTES), DTIMAPB(Table, FragmentData, FragmentData, 0, MAX_FRAGMENT_DATA_BYTES, FragmentDataLen), + DTIMAP(Table, MaxRowsLow, MaxRowsLow), + DTIMAP(Table, MaxRowsHigh, MaxRowsHigh), + DTIMAP(Table, MinRowsLow, MinRowsLow), + DTIMAP(Table, MinRowsHigh, MinRowsHigh), DTIBREAK(AttributeName) }; @@ -124,6 +128,10 @@ DictTabInfo::Table::init(){ FragmentCount = 0; FragmentDataLen = 0; memset(FragmentData, 0, sizeof(FragmentData)); + MaxRowsLow = 0; + MaxRowsHigh = 0; + MinRowsLow = 0; + MinRowsHigh = 0; } void diff --git a/ndb/src/common/debugger/signaldata/LqhFrag.cpp b/ndb/src/common/debugger/signaldata/LqhFrag.cpp index 6d727959a67..3175582c3a2 100644 --- a/ndb/src/common/debugger/signaldata/LqhFrag.cpp +++ b/ndb/src/common/debugger/signaldata/LqhFrag.cpp @@ -37,8 +37,10 @@ printLQH_FRAG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recB fprintf(output, " noOfAttributes: %d noOfNullAttributes: %d keyLength: %d\n", sig->noOfAttributes, sig->noOfNullAttributes, sig->keyLength); - fprintf(output, " noOfPagesToPreAllocate: %d schemaVersion: %d nextLCP: %d\n", - sig->noOfPagesToPreAllocate, sig->schemaVersion, sig->nextLCP); + fprintf(output, " maxRowsLow/High: %u/%u minRowsLow/High: %u/%u\n", + sig->maxRowsLow, sig->maxRowsHigh, sig->minRowsLow, sig->minRowsHigh); + fprintf(output, " schemaVersion: %d nextLCP: %d\n", + sig->schemaVersion, sig->nextLCP); return true; } diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index f9089355475..43c1de5e2b3 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -266,6 +266,65 @@ Backup::execCONTINUEB(Signal* signal) const Uint32 Tdata2 = signal->theData[2]; switch(Tdata0) { + case BackupContinueB::BACKUP_FRAGMENT_INFO: + { + const Uint32 ptr_I = Tdata1; + Uint32 tabPtr_I = Tdata2; + Uint32 fragPtr_I = signal->theData[3]; + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, ptr_I); + TablePtr tabPtr; + ptr.p->tables.getPtr(tabPtr, tabPtr_I); + FragmentPtr fragPtr; + tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I); + + BackupFilePtr filePtr; + ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); + + const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2; + Uint32 * dst; + if (!filePtr.p->operation.dataBuffer.getWritePtr(&dst, sz)) + { + sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 4); + return; + } + + BackupFormat::CtlFile::FragmentInfo * fragInfo = + (BackupFormat::CtlFile::FragmentInfo*)dst; + fragInfo->SectionType = htonl(BackupFormat::FRAGMENT_INFO); + fragInfo->SectionLength = htonl(sz); + fragInfo->TableId = htonl(fragPtr.p->tableId); + fragInfo->FragmentNo = htonl(fragPtr_I); + fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF); + fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32); + fragInfo->FilePosLow = htonl(0 & 0xFFFFFFFF); + fragInfo->FilePosHigh = htonl(0 >> 32); + + filePtr.p->operation.dataBuffer.updateWritePtr(sz); + + fragPtr_I++; + if (fragPtr_I == tabPtr.p->fragments.getSize()) + { + signal->theData[0] = tabPtr.p->tableId; + signal->theData[1] = 0; // unlock + EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); + + fragPtr_I = 0; + ptr.p->tables.next(tabPtr); + if ((tabPtr_I = tabPtr.i) == RNIL) + { + closeFiles(signal, ptr); + return; + } + } + signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO; + signal->theData[1] = ptr_I; + signal->theData[2] = tabPtr_I; + signal->theData[3] = fragPtr_I; + sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB); + return; + } case BackupContinueB::START_FILE_THREAD: case BackupContinueB::BUFFER_UNDERFLOW: { @@ -455,7 +514,7 @@ Backup::findTable(const BackupRecordPtr & ptr, return false; } -static Uint32 xps(Uint32 x, Uint64 ms) +static Uint32 xps(Uint64 x, Uint64 ms) { float fx = x; float fs = ms; @@ -469,9 +528,9 @@ static Uint32 xps(Uint32 x, Uint64 ms) } struct Number { - Number(Uint32 r) { val = r;} - Number & operator=(Uint32 r) { val = r; return * this; } - Uint32 val; + Number(Uint64 r) { val = r;} + Number & operator=(Uint64 r) { val = r; return * this; } + Uint64 val; }; NdbOut & @@ -545,8 +604,10 @@ Backup::execBACKUP_COMPLETE_REP(Signal* signal) startTime = NdbTick_CurrentMillisecond() - startTime; ndbout_c("Backup %d has completed", rep->backupId); - const Uint32 bytes = rep->noOfBytes; - const Uint32 records = rep->noOfRecords; + const Uint64 bytes = + rep->noOfBytesLow + (((Uint64)rep->noOfBytesHigh) << 32); + const Uint64 records = + rep->noOfRecordsLow + (((Uint64)rep->noOfRecordsHigh) << 32); Number rps = xps(records, startTime); Number bps = xps(bytes, startTime); @@ -1905,8 +1966,10 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) const Uint32 tableId = conf->tableId; const Uint32 fragmentNo = conf->fragmentNo; const Uint32 nodeId = refToNode(signal->senderBlockRef()); - const Uint32 noOfBytes = conf->noOfBytes; - const Uint32 noOfRecords = conf->noOfRecords; + const Uint64 noOfBytes = + conf->noOfBytesLow + (((Uint64)conf->noOfBytesHigh) << 32); + const Uint64 noOfRecords = + conf->noOfRecordsLow + (((Uint64)conf->noOfRecordsHigh) << 32); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); @@ -1918,9 +1981,13 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); + tabPtr.p->noOfRecords += noOfRecords; + FragmentPtr fragPtr; tabPtr.p->fragments.getPtr(fragPtr, fragmentNo); + fragPtr.p->noOfRecords = noOfRecords; + ndbrequire(fragPtr.p->scanned == 0); ndbrequire(fragPtr.p->scanning == 1); ndbrequire(fragPtr.p->node == nodeId); @@ -1944,6 +2011,24 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) } else { + NodeBitmask nodes = ptr.p->nodes; + nodes.clear(getOwnNodeId()); + if (!nodes.isclear()) + { + BackupFragmentCompleteRep *rep = + (BackupFragmentCompleteRep*)signal->getDataPtrSend(); + rep->backupId = ptr.p->backupId; + rep->backupPtr = ptr.i; + rep->tableId = tableId; + rep->fragmentNo = fragmentNo; + rep->noOfTableRowsLow = (Uint32)(tabPtr.p->noOfRecords & 0xFFFFFFFF); + rep->noOfTableRowsHigh = (Uint32)(tabPtr.p->noOfRecords >> 32); + rep->noOfFragmentRowsLow = (Uint32)(noOfRecords & 0xFFFFFFFF); + rep->noOfFragmentRowsHigh = (Uint32)(noOfRecords >> 32); + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + sendSignal(rg, GSN_BACKUP_FRAGMENT_COMPLETE_REP, signal, + BackupFragmentCompleteRep::SignalLength, JBB); + } nextFragment(signal, ptr); } } @@ -2006,6 +2091,29 @@ err: execABORT_BACKUP_ORD(signal); } +void +Backup::execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal) +{ + jamEntry(); + BackupFragmentCompleteRep * rep = + (BackupFragmentCompleteRep*)signal->getDataPtr(); + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, rep->backupPtr); + + TablePtr tabPtr; + ndbrequire(findTable(ptr, tabPtr, rep->tableId)); + + tabPtr.p->noOfRecords = + rep->noOfTableRowsLow + (((Uint64)rep->noOfTableRowsHigh) << 32); + + FragmentPtr fragPtr; + tabPtr.p->fragments.getPtr(fragPtr, rep->fragmentNo); + + fragPtr.p->noOfRecords = + rep->noOfFragmentRowsLow + (((Uint64)rep->noOfFragmentRowsHigh) << 32); +} + /***************************************************************************** * * Master functionallity - Drop triggers @@ -2206,8 +2314,10 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) rep->senderData = ptr.p->clientData; rep->startGCP = ptr.p->startGCP; rep->stopGCP = ptr.p->stopGCP; - rep->noOfBytes = ptr.p->noOfBytes; - rep->noOfRecords = ptr.p->noOfRecords; + rep->noOfBytesLow = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF); + rep->noOfRecordsLow = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF); + rep->noOfBytesHigh = (Uint32)(ptr.p->noOfBytes >> 32); + rep->noOfRecordsHigh = (Uint32)(ptr.p->noOfRecords >> 32); rep->noOfLogBytes = ptr.p->noOfLogBytes; rep->noOfLogRecords = ptr.p->noOfLogRecords; rep->nodes = ptr.p->nodes; @@ -2220,12 +2330,14 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) signal->theData[2] = ptr.p->backupId; signal->theData[3] = ptr.p->startGCP; signal->theData[4] = ptr.p->stopGCP; - signal->theData[5] = ptr.p->noOfBytes; - signal->theData[6] = ptr.p->noOfRecords; + signal->theData[5] = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF); + signal->theData[6] = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF); signal->theData[7] = ptr.p->noOfLogBytes; signal->theData[8] = ptr.p->noOfLogRecords; ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB); + signal->theData[9+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfBytes >> 32); + signal->theData[10+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfRecords >> 32); + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 11+NdbNodeBitmask::Size, JBB); } else { @@ -2988,6 +3100,7 @@ Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len) /** * Initialize table object */ + tabPtr.p->noOfRecords = 0; tabPtr.p->schemaVersion = tmpTab.TableVersion; tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes; tabPtr.p->noOfNull = 0; @@ -3695,8 +3808,10 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) conf->backupPtr = ptr.i; conf->tableId = filePtr.p->tableId; conf->fragmentNo = filePtr.p->fragmentNo; - conf->noOfRecords = op.noOfRecords; - conf->noOfBytes = op.noOfBytes; + conf->noOfRecordsLow = (Uint32)(op.noOfRecords & 0xFFFFFFFF); + conf->noOfRecordsHigh = (Uint32)(op.noOfRecords >> 32); + conf->noOfBytesLow = (Uint32)(op.noOfBytes & 0xFFFFFFFF); + conf->noOfBytesHigh = (Uint32)(op.noOfBytes >> 32); sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal, BackupFragmentConf::SignalLength, JBB); @@ -4123,20 +4238,18 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) gcp->StartGCP = htonl(startGCP); gcp->StopGCP = htonl(stopGCP - 1); filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz); - } - { - TablePtr tabPtr; - for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL; - ptr.p->tables.next(tabPtr)) { - signal->theData[0] = tabPtr.p->tableId; - signal->theData[1] = 0; // unlock - EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); + TablePtr tabPtr; + ptr.p->tables.first(tabPtr); + + signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO; + signal->theData[1] = ptr.i; + signal->theData[2] = tabPtr.i; + signal->theData[3] = 0; + sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB); } } - - closeFiles(signal, ptr); } void diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp index c455e32fa67..e37923da749 100644 --- a/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/ndb/src/kernel/blocks/backup/Backup.hpp @@ -68,6 +68,7 @@ protected: void execBACKUP_DATA(Signal* signal); void execSTART_BACKUP_REQ(Signal* signal); void execBACKUP_FRAGMENT_REQ(Signal* signal); + void execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal); void execSTOP_BACKUP_REQ(Signal* signal); void execBACKUP_STATUS_REQ(Signal* signal); void execABORT_BACKUP_ORD(Signal* signal); @@ -183,10 +184,12 @@ public: typedef Ptr AttributePtr; struct Fragment { + Uint64 noOfRecords; Uint32 tableId; - Uint32 node; - Uint16 scanned; // 0 = not scanned x = scanned by node x - Uint16 scanning; // 0 = not scanning x = scanning on node x + Uint8 node; + Uint8 scanned; // 0 = not scanned x = scanned by node x + Uint8 scanning; // 0 = not scanning x = scanning on node x + Uint8 unused1; Uint32 nextPool; }; typedef Ptr FragmentPtr; @@ -194,6 +197,8 @@ public: struct Table { Table(ArrayPool &, ArrayPool &); + Uint64 noOfRecords; + Uint32 tableId; Uint32 schemaVersion; Uint32 tableType; @@ -269,8 +274,8 @@ public: Uint32 tablePtr; // Ptr.i to current table FsBuffer dataBuffer; - Uint32 noOfRecords; - Uint32 noOfBytes; + Uint64 noOfRecords; + Uint64 noOfBytes; Uint32 maxRecordSize; private: diff --git a/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/ndb/src/kernel/blocks/backup/BackupFormat.hpp index 65dd2ad9053..b8ffff3a294 100644 --- a/ndb/src/kernel/blocks/backup/BackupFormat.hpp +++ b/ndb/src/kernel/blocks/backup/BackupFormat.hpp @@ -32,7 +32,8 @@ struct BackupFormat { FRAGMENT_FOOTER = 3, TABLE_LIST = 4, TABLE_DESCRIPTION = 5, - GCP_ENTRY = 6 + GCP_ENTRY = 6, + FRAGMENT_INFO = 7 }; struct FileHeader { @@ -126,6 +127,20 @@ struct BackupFormat { Uint32 StartGCP; Uint32 StopGCP; }; + + /** + * Fragment Info + */ + struct FragmentInfo { + Uint32 SectionType; + Uint32 SectionLength; + Uint32 TableId; + Uint32 FragmentNo; + Uint32 NoOfRecordsLow; + Uint32 NoOfRecordsHigh; + Uint32 FilePosLow; + Uint32 FilePosHigh; + }; }; /** diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp index 4c734d58c8e..96c11468939 100644 --- a/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp @@ -97,6 +97,9 @@ Backup::Backup(const Configuration & conf) : addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ); addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF); addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF); + + addRecSignal(GSN_BACKUP_FRAGMENT_COMPLETE_REP, + &Backup::execBACKUP_FRAGMENT_COMPLETE_REP); addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ); addRecSignal(GSN_STOP_BACKUP_REF, &Backup::execSTOP_BACKUP_REF); diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 3cdba251492..133b4d75d8e 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -286,6 +286,10 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w, w.add(DictTabInfo::TableKValue, tablePtr.p->kValue); w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType); w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType); + w.add(DictTabInfo::MaxRowsLow, tablePtr.p->maxRowsLow); + w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh); + w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow); + w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh); if(!signal) { @@ -1535,6 +1539,10 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) tablePtr.p->minLoadFactor = 70; tablePtr.p->noOfPrimkey = 1; tablePtr.p->tupKeyLength = 1; + tablePtr.p->maxRowsLow = 0; + tablePtr.p->maxRowsHigh = 0; + tablePtr.p->minRowsLow = 0; + tablePtr.p->minRowsHigh = 0; tablePtr.p->storedTable = true; tablePtr.p->tableType = DictTabInfo::UserTable; tablePtr.p->primaryTableId = RNIL; @@ -4501,6 +4509,13 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { Uint32 lhPageBits = 0; ::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount); + Uint64 maxRows = tabPtr.p->maxRowsLow + + (((Uint64)tabPtr.p->maxRowsHigh) << 32); + Uint64 minRows = tabPtr.p->minRowsLow + + (((Uint64)tabPtr.p->minRowsHigh) << 32); + maxRows = (maxRows + fragCount - 1) / fragCount; + minRows = (minRows + fragCount - 1) / fragCount; + { LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend(); req->senderData = senderData; @@ -4516,7 +4531,10 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->lh3PageBits = 0; //lhPageBits; req->noOfAttributes = tabPtr.p->noOfAttributes; req->noOfNullAttributes = tabPtr.p->noOfNullBits; - req->noOfPagesToPreAllocate = 0; + req->maxRowsLow = maxRows & 0xFFFFFFFF; + req->maxRowsHigh = maxRows >> 32; + req->minRowsLow = minRows & 0xFFFFFFFF; + req->minRowsHigh = minRows >> 32; req->schemaVersion = tabPtr.p->tableVersion; Uint32 keyLen = tabPtr.p->tupKeyLength; req->keyLength = keyLen; // wl-2066 no more "long keys" @@ -4524,8 +4542,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->noOfKeyAttr = tabPtr.p->noOfPrimkey; req->noOfNewAttr = 0; - // noOfCharsets passed to TUP in upper half - req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16); + req->noOfCharsets = tabPtr.p->noOfCharsets; req->checksumIndicator = 1; req->noOfAttributeGroups = 1; req->GCPIndicator = 0; @@ -5091,6 +5108,15 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType; tablePtr.p->kValue = tableDesc.TableKValue; tablePtr.p->fragmentCount = tableDesc.FragmentCount; + tablePtr.p->maxRowsLow = tableDesc.MaxRowsLow; + tablePtr.p->maxRowsHigh = tableDesc.MaxRowsHigh; + tablePtr.p->minRowsLow = tableDesc.MinRowsLow; + tablePtr.p->minRowsHigh = tableDesc.MinRowsHigh; + + Uint64 maxRows = + (((Uint64)tablePtr.p->maxRowsHigh) << 32) + tablePtr.p->maxRowsLow; + Uint64 minRows = + (((Uint64)tablePtr.p->minRowsHigh) << 32) + tablePtr.p->minRowsLow; tablePtr.p->frmLen = tableDesc.FrmLen; memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen); diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 9c0bf65b69c..0fa984a4c61 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -134,6 +134,10 @@ public: * on disk. Index trigger ids are volatile. */ struct TableRecord : public MetaData::Table { + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; /**************************************************** * Support variables for table handling ****************************************************/ @@ -1780,6 +1784,8 @@ private: * * XXX only table ops check BlockState */ + struct DictLockType; + friend struct DictLockType; struct DictLockType { DictLockReq::LockType lockType; @@ -1787,6 +1793,9 @@ private: const char* text; }; + struct DictLockRecord; + friend struct DictLockRecord; + struct DictLockRecord { DictLockReq req; const DictLockType* lt; diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 1ed383853ba..f8e6292f7f2 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -443,7 +443,6 @@ public: UintR dictConnectptr; UintR fragmentPtr; UintR nextAddfragrec; - UintR noOfAllocPages; UintR schemaVer; UintR tup1Connectptr; UintR tup2Connectptr; @@ -465,12 +464,17 @@ public: Uint16 totalAttrReceived; Uint16 fragCopyCreation; Uint16 noOfKeyAttr; - Uint32 noOfNewAttr; // noOfCharsets in upper half + Uint16 noOfNewAttr; + Uint16 noOfCharsets; Uint16 noOfAttributeGroups; Uint16 lh3DistrBits; Uint16 tableType; Uint16 primaryTableId; - };// Size 108 bytes + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; + };// Size 124 bytes typedef Ptr AddFragRecordPtr; /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 56e93e6ee01..ecb67d04050 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -939,12 +939,16 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) Uint8 tlh = req->lh3PageBits; Uint32 tnoOfAttr = req->noOfAttributes; Uint32 tnoOfNull = req->noOfNullAttributes; - Uint32 noOfAlloc = req->noOfPagesToPreAllocate; + Uint32 maxRowsLow = req->maxRowsLow; + Uint32 maxRowsHigh = req->maxRowsHigh; + Uint32 minRowsLow = req->minRowsLow; + Uint32 minRowsHigh = req->minRowsHigh; Uint32 tschemaVersion = req->schemaVersion; Uint32 ttupKeyLength = req->keyLength; Uint32 nextLcp = req->nextLCP; Uint32 noOfKeyAttr = req->noOfKeyAttr; Uint32 noOfNewAttr = req->noOfNewAttr; + Uint32 noOfCharsets = req->noOfCharsets; Uint32 checksumIndicator = req->checksumIndicator; Uint32 noOfAttributeGroups = req->noOfAttributeGroups; Uint32 gcpIndicator = req->GCPIndicator; @@ -1042,7 +1046,10 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) addfragptr.p->m_senderAttrPtr = RNIL; addfragptr.p->noOfAttr = tnoOfAttr; addfragptr.p->noOfNull = tnoOfNull; - addfragptr.p->noOfAllocPages = noOfAlloc; + addfragptr.p->maxRowsLow = maxRowsLow; + addfragptr.p->maxRowsHigh = maxRowsHigh; + addfragptr.p->minRowsLow = minRowsLow; + addfragptr.p->minRowsHigh = minRowsHigh; addfragptr.p->tabId = tabptr.i; addfragptr.p->totalAttrReceived = 0; addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */ @@ -1052,6 +1059,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) addfragptr.p->addfragErrorCode = 0; addfragptr.p->noOfKeyAttr = noOfKeyAttr; addfragptr.p->noOfNewAttr = noOfNewAttr; + addfragptr.p->noOfCharsets = noOfCharsets; addfragptr.p->checksumIndicator = checksumIndicator; addfragptr.p->noOfAttributeGroups = noOfAttributeGroups; addfragptr.p->GCPIndicator = gcpIndicator; @@ -1221,47 +1229,56 @@ Dblqh::sendAddFragReq(Signal* signal) ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord); if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP || addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUP) { + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); if (DictTabInfo::isTable(addfragptr.p->tableType) || DictTabInfo::isHashIndex(addfragptr.p->tableType)) { jam(); - signal->theData[0] = addfragptr.i; - signal->theData[1] = cownref; - signal->theData[2] = 0; /* ADD TABLE */ - signal->theData[3] = addfragptr.p->tabId; - signal->theData[4] = addfragptr.p->noOfAttr; - signal->theData[5] = + tupFragReq->userPtr = addfragptr.i; + tupFragReq->userRef = cownref; + tupFragReq->reqInfo = 0; /* ADD TABLE */ + tupFragReq->tableId = addfragptr.p->tabId; + tupFragReq->noOfAttr = addfragptr.p->noOfAttr; + tupFragReq->fragId = addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ? addfragptr.p->fragid1 : addfragptr.p->fragid2; - signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1; - signal->theData[7] = addfragptr.p->noOfNull; - signal->theData[8] = addfragptr.p->schemaVer; - signal->theData[9] = addfragptr.p->noOfKeyAttr; - signal->theData[10] = addfragptr.p->noOfNewAttr; - signal->theData[11] = addfragptr.p->checksumIndicator; - signal->theData[12] = addfragptr.p->noOfAttributeGroups; - signal->theData[13] = addfragptr.p->GCPIndicator; + tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow; + tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh; + tupFragReq->minRowsLow = addfragptr.p->minRowsLow; + tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh; + tupFragReq->noOfNullAttr = addfragptr.p->noOfNull; + tupFragReq->schemaVersion = addfragptr.p->schemaVer; + tupFragReq->noOfKeyAttr = addfragptr.p->noOfKeyAttr; + tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr; + tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; + tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; + tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups; + tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, TupFragReq::SignalLength, JBB); return; } if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) { jam(); - signal->theData[0] = addfragptr.i; - signal->theData[1] = cownref; - signal->theData[2] = 0; /* ADD TABLE */ - signal->theData[3] = addfragptr.p->tabId; - signal->theData[4] = 1; /* ordered index: one array attr */ - signal->theData[5] = + tupFragReq->userPtr = addfragptr.i; + tupFragReq->userRef = cownref; + tupFragReq->reqInfo = 0; /* ADD TABLE */ + tupFragReq->tableId = addfragptr.p->tabId; + tupFragReq->noOfAttr = 1; /* ordered index: one array attr */ + tupFragReq->fragId = addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ? addfragptr.p->fragid1 : addfragptr.p->fragid2; - signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1; - signal->theData[7] = 0; /* ordered index: no nullable */ - signal->theData[8] = addfragptr.p->schemaVer; - signal->theData[9] = 1; /* ordered index: one key */ - signal->theData[10] = addfragptr.p->noOfNewAttr; - signal->theData[11] = addfragptr.p->checksumIndicator; - signal->theData[12] = addfragptr.p->noOfAttributeGroups; - signal->theData[13] = addfragptr.p->GCPIndicator; + tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow; + tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh; + tupFragReq->minRowsLow = addfragptr.p->minRowsLow; + tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh; + tupFragReq->noOfNullAttr = 0; /* ordered index: no nullable */ + tupFragReq->schemaVersion = addfragptr.p->schemaVer; + tupFragReq->noOfKeyAttr = 1; /* ordered index: one key */ + tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr; + tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; + tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; + tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups; + tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, TupFragReq::SignalLength, JBB); return; @@ -1580,28 +1597,35 @@ void Dblqh::abortAddFragOps(Signal* signal) { fragptr.i = addfragptr.p->fragmentPtr; ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord); - signal->theData[0] = (Uint32)-1; if (addfragptr.p->tup1Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tup1Connectptr; + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); + tupFragReq->userPtr = (Uint32)-1; + tupFragReq->userRef = addfragptr.p->tup1Connectptr; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB); addfragptr.p->tup1Connectptr = RNIL; } if (addfragptr.p->tup2Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tup2Connectptr; + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); + tupFragReq->userPtr = (Uint32)-1; + tupFragReq->userRef = addfragptr.p->tup2Connectptr; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB); addfragptr.p->tup2Connectptr = RNIL; } if (addfragptr.p->tux1Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tux1Connectptr; + TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend(); + tuxFragReq->userPtr = (Uint32)-1; + tuxFragReq->userRef = addfragptr.p->tux1Connectptr; sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB); addfragptr.p->tux1Connectptr = RNIL; } if (addfragptr.p->tux2Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tux2Connectptr; + TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend(); + tuxFragReq->userPtr = (Uint32)-1; + tuxFragReq->userRef = addfragptr.p->tux2Connectptr; sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB); addfragptr.p->tux2Connectptr = RNIL; } diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index cf3c6056d65..41194fba82c 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -496,7 +496,8 @@ struct DiskBufferSegmentInfo { typedef Ptr DiskBufferSegmentInfoPtr; struct Fragoperrec { - bool definingFragment; + Uint64 minRows; + Uint64 maxRows; Uint32 nextFragoprec; Uint32 lqhPtrFrag; Uint32 fragidFrag; @@ -509,6 +510,7 @@ struct Fragoperrec { Uint32 charsetIndex; BlockReference lqhBlockrefFrag; bool inUse; + bool definingFragment; }; typedef Ptr FragoperrecPtr; @@ -560,6 +562,7 @@ struct Fragrecord { Uint32 currentPageRange; Uint32 rootPageRange; Uint32 noOfPages; + Uint32 noOfPagesToGrow; Uint32 emptyPrimPage; Uint32 firstusedOprec; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index bacba2a880c..12cd61a17a6 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -41,7 +41,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) { ljamEntry(); - if (signal->theData[0] == (Uint32)-1) { + TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr(); + if (tupFragReq->userPtr == (Uint32)-1) { ljam(); abortAddFragOp(signal); return; @@ -51,30 +52,34 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) FragrecordPtr regFragPtr; TablerecPtr regTabPtr; - Uint32 userptr = signal->theData[0]; - Uint32 userblockref = signal->theData[1]; - Uint32 reqinfo = signal->theData[2]; - regTabPtr.i = signal->theData[3]; - Uint32 noOfAttributes = signal->theData[4]; - Uint32 fragId = signal->theData[5]; - Uint32 noOfNullAttr = signal->theData[7]; - /* Uint32 schemaVersion = signal->theData[8];*/ - Uint32 noOfKeyAttr = signal->theData[9]; + Uint32 userptr = tupFragReq->userPtr; + Uint32 userblockref = tupFragReq->userRef; + Uint32 reqinfo = tupFragReq->reqInfo; + regTabPtr.i = tupFragReq->tableId; + Uint32 noOfAttributes = tupFragReq->noOfAttr; + Uint32 fragId = tupFragReq->fragId; + Uint32 noOfNullAttr = tupFragReq->noOfNullAttr; + /* Uint32 schemaVersion = tupFragReq->schemaVersion;*/ + Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr; - Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF); - /* DICT sends number of character sets in upper half */ - Uint32 noOfCharsets = (signal->theData[10] >> 16); + Uint32 noOfNewAttr = tupFragReq->noOfNewAttr; + Uint32 noOfCharsets = tupFragReq->noOfCharsets; - Uint32 checksumIndicator = signal->theData[11]; - Uint32 noOfAttributeGroups = signal->theData[12]; - Uint32 globalCheckpointIdIndicator = signal->theData[13]; + Uint32 checksumIndicator = tupFragReq->checksumIndicator; + Uint32 noOfAttributeGroups = tupFragReq->noOfAttributeGroups; + Uint32 globalCheckpointIdIndicator = tupFragReq->globalCheckpointIdIndicator; + + Uint64 maxRows = + (((Uint64)tupFragReq->maxRowsHigh) << 32) + tupFragReq->maxRowsLow; + Uint64 minRows = + (((Uint64)tupFragReq->minRowsHigh) << 32) + tupFragReq->minRowsLow; #ifndef VM_TRACE // config mismatch - do not crash if release compiled if (regTabPtr.i >= cnoOfTablerec) { ljam(); - signal->theData[0] = userptr; - signal->theData[1] = 800; + tupFragReq->userPtr = userptr; + tupFragReq->userRef = 800; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); return; } @@ -83,8 +88,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); if (cfirstfreeFragopr == RNIL) { ljam(); - signal->theData[0] = userptr; - signal->theData[1] = ZNOFREE_FRAGOP_ERROR; + tupFragReq->userPtr = userptr; + tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); return; }//if @@ -100,6 +105,9 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) fragOperPtr.p->noOfNewAttrCount = noOfNewAttr; fragOperPtr.p->charsetIndex = 0; fragOperPtr.p->currNullBit = 0; + // remove in 5.1, 2 fragments per fragment in 5.0 + fragOperPtr.p->minRows = (minRows + 1)/2; + fragOperPtr.p->maxRows = (maxRows + 1)/2; ndbrequire(reqinfo == ZADDFRAG); @@ -141,16 +149,6 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regFragPtr.p->fragmentId = fragId; regFragPtr.p->checkpointVersion = RNIL; - Uint32 noAllocatedPages = 2; - noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); - - if (noAllocatedPages == 0) { - ljam(); - terrorCode = ZNO_PAGES_ALLOCATED_ERROR; - fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); - return; - }//if - if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId || ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) { ljam(); @@ -407,6 +405,27 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) CLEAR_ERROR_INSERT_VALUE; return; } + + if (lastAttr) + { + ljam(); + Uint32 noRowsPerPage = ZWORDS_ON_PAGE/regTabPtr.p->tupheadsize; + Uint32 noAllocatedPages = + (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage; + if (fragOperPtr.p->minRows == 0) + noAllocatedPages = 2; + else if (noAllocatedPages == 0) + noAllocatedPages = 2; + noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); + + if (noAllocatedPages == 0) { + ljam(); + terrorCode = ZNO_PAGES_ALLOCATED_ERROR; + addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); + return; + }//if + } + /* **************************************************************** */ /* ************** TUP_ADD_ATTCONF ****************** */ /* **************************************************************** */ diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index 1f674876642..acdb73704cb 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -332,6 +332,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr) regFragPtr->rootPageRange = RNIL; regFragPtr->currentPageRange = RNIL; regFragPtr->noOfPages = 0; + regFragPtr->noOfPagesToGrow = 2; regFragPtr->nextStartRange = 0; }//initFragRange() @@ -393,9 +394,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* const regFragPtr, Uint32 tafpNoAllocReq void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr) { - Uint32 noAllocPages = regFragPtr->noOfPages >> 3; // 12.5% - noAllocPages += regFragPtr->noOfPages >> 4; // 6.25% + Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5% + noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25% noAllocPages += 2; + regFragPtr->noOfPagesToGrow += noAllocPages; /* -----------------------------------------------------------------*/ // We will grow by 18.75% plus two more additional pages to grow // a little bit quicker in the beginning. diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index b9466ed1173..69c0286a1de 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -2380,14 +2380,20 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) event.Event = BackupEvent::BackupCompleted; event.Completed.BackupId = rep->backupId; - event.Completed.NoOfBytes = rep->noOfBytes; + event.Completed.NoOfBytes = rep->noOfBytesLow; event.Completed.NoOfLogBytes = rep->noOfLogBytes; - event.Completed.NoOfRecords = rep->noOfRecords; + event.Completed.NoOfRecords = rep->noOfRecordsLow; event.Completed.NoOfLogRecords = rep->noOfLogRecords; event.Completed.stopGCP = rep->stopGCP; event.Completed.startGCP = rep->startGCP; event.Nodes = rep->nodes; + if (signal->header.theLength >= BackupCompleteRep::SignalLength) + { + event.Completed.NoOfBytes += ((Uint64)rep->noOfBytesHigh) << 32; + event.Completed.NoOfRecords += ((Uint64)rep->noOfRecordsHigh) << 32; + } + backupId = rep->backupId; return 0; } diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 7811cf0e5d1..187f225470a 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -323,9 +323,9 @@ public: Uint32 ErrorCode; } FailedToStart ; struct { + Uint64 NoOfBytes; + Uint64 NoOfRecords; Uint32 BackupId; - Uint32 NoOfBytes; - Uint32 NoOfRecords; Uint32 NoOfLogBytes; Uint32 NoOfLogRecords; Uint32 startGCP; diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index a342a5d5926..a0a3dd431b8 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -385,6 +385,30 @@ NdbDictionary::Table::getNoOfPrimaryKeys() const { return m_impl.m_noOfKeys; } +void +NdbDictionary::Table::setMaxRows(Uint64 maxRows) +{ + m_impl.m_max_rows = maxRows; +} + +Uint64 +NdbDictionary::Table::getMaxRows() const +{ + return m_impl.m_max_rows; +} + +void +NdbDictionary::Table::setMinRows(Uint64 minRows) +{ + m_impl.m_min_rows = minRows; +} + +Uint64 +NdbDictionary::Table::getMinRows() const +{ + return m_impl.m_min_rows; +} + const char* NdbDictionary::Table::getPrimaryKey(int no) const { int count = 0; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index bd50440b3c0..ce348b616c9 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -319,6 +319,8 @@ NdbTableImpl::init(){ m_noOfDistributionKeys= 0; m_noOfBlobs= 0; m_replicaCount= 0; + m_min_rows = 0; + m_max_rows = 0; } bool @@ -416,6 +418,9 @@ NdbTableImpl::assign(const NdbTableImpl& org) m_version = org.m_version; m_status = org.m_status; + + m_max_rows = org.m_max_rows; + m_min_rows = org.m_min_rows; } void NdbTableImpl::setName(const char * name) @@ -1302,6 +1307,12 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, fragmentTypeMapping, (Uint32)NdbDictionary::Object::FragUndefined); + Uint64 max_rows = ((Uint64)tableDesc.MaxRowsHigh) << 32; + max_rows += tableDesc.MaxRowsLow; + impl->m_max_rows = max_rows; + Uint64 min_rows = ((Uint64)tableDesc.MinRowsHigh) << 32; + min_rows += tableDesc.MinRowsLow; + impl->m_min_rows = min_rows; impl->m_logging = tableDesc.TableLoggedFlag; impl->m_kvalue = tableDesc.TableKValue; impl->m_minLoadFactor = tableDesc.MinLoadFactor; @@ -1630,7 +1641,16 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, tmpTab.MaxLoadFactor = impl.m_maxLoadFactor; tmpTab.TableType = DictTabInfo::UserTable; tmpTab.NoOfAttributes = sz; + tmpTab.MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32); + tmpTab.MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF); + tmpTab.MinRowsHigh = (Uint32)(impl.m_min_rows >> 32); + tmpTab.MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF); + Uint64 maxRows = + (((Uint64)tmpTab.MaxRowsHigh) << 32) + tmpTab.MaxRowsLow; + Uint64 minRows = + (((Uint64)tmpTab.MinRowsHigh) << 32) + tmpTab.MinRowsLow; + tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType, fragmentTypeMapping, DictTabInfo::AllNodesSmallTable); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index bc9894497f8..dfccf120228 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -130,6 +130,9 @@ public: Uint32 m_hashpointerValue; Vector m_fragments; + Uint64 m_max_rows; + Uint64 m_min_rows; + bool m_logging; int m_kvalue; int m_minLoadFactor; diff --git a/ndb/tools/restore/Restore.cpp b/ndb/tools/restore/Restore.cpp index 6ac06f8a6f8..a808a48b558 100644 --- a/ndb/tools/restore/Restore.cpp +++ b/ndb/tools/restore/Restore.cpp @@ -80,7 +80,12 @@ RestoreMetaData::RestoreMetaData(const char* path, Uint32 nodeId, Uint32 bNo) { RestoreMetaData::~RestoreMetaData(){ for(Uint32 i= 0; i < allTables.size(); i++) - delete allTables[i]; + { + TableS *table = allTables[i]; + for(Uint32 j= 0; j < table->m_fragmentInfo.size(); j++) + delete table->m_fragmentInfo[j]; + delete table; + } allTables.clear(); } @@ -111,6 +116,9 @@ RestoreMetaData::loadContent() } if(!readGCPEntry()) return 0; + + if(!readFragmentInfo()) + return 0; return 1; } @@ -192,6 +200,52 @@ RestoreMetaData::readGCPEntry() { return true; } +bool +RestoreMetaData::readFragmentInfo() +{ + BackupFormat::CtlFile::FragmentInfo fragInfo; + TableS * table = 0; + Uint32 tableId = RNIL; + + while (buffer_read(&fragInfo, 4, 2) == 2) + { + fragInfo.SectionType = ntohl(fragInfo.SectionType); + fragInfo.SectionLength = ntohl(fragInfo.SectionLength); + + if (fragInfo.SectionType != BackupFormat::FRAGMENT_INFO) + { + err << "readFragmentInfo invalid section type: " << + fragInfo.SectionType << endl; + return false; + } + + if (buffer_read(&fragInfo.TableId, (fragInfo.SectionLength-2)*4, 1) != 1) + { + err << "readFragmentInfo invalid section length: " << + fragInfo.SectionLength << endl; + return false; + } + + fragInfo.TableId = ntohl(fragInfo.TableId); + if (fragInfo.TableId != tableId) + { + tableId = fragInfo.TableId; + table = getTable(tableId); + } + + FragmentInfo * tmp = new FragmentInfo; + tmp->fragmentNo = ntohl(fragInfo.FragmentNo); + tmp->noOfRecords = ntohl(fragInfo.NoOfRecordsLow) + + (((Uint64)ntohl(fragInfo.NoOfRecordsHigh)) << 32); + tmp->filePosLow = ntohl(fragInfo.FilePosLow); + tmp->filePosHigh = ntohl(fragInfo.FilePosHigh); + + table->m_fragmentInfo.push_back(tmp); + table->m_noOfRecords += tmp->noOfRecords; + } + return true; +} + TableS::TableS(Uint32 version, NdbTableImpl* tableImpl) : m_dictTable(tableImpl) { @@ -199,6 +253,7 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl) m_noOfNullable = m_nullBitmaskSize = 0; m_auto_val_id= ~(Uint32)0; m_max_auto_val= 0; + m_noOfRecords= 0; backupVersion = version; for (int i = 0; i < tableImpl->getNoOfColumns(); i++) @@ -937,4 +992,5 @@ operator<<(NdbOut& ndbout, const TableS & table){ template class Vector; template class Vector; template class Vector; +template class Vector; diff --git a/ndb/tools/restore/Restore.hpp b/ndb/tools/restore/Restore.hpp index 85793baf9df..cf8feb7125c 100644 --- a/ndb/tools/restore/Restore.hpp +++ b/ndb/tools/restore/Restore.hpp @@ -114,6 +114,14 @@ public: AttributeData * getData(int i) const; }; // class TupleS +struct FragmentInfo +{ + Uint32 fragmentNo; + Uint64 noOfRecords; + Uint32 filePosLow; + Uint32 filePosHigh; +}; + class TableS { friend class TupleS; @@ -136,6 +144,9 @@ class TableS { int pos; + Uint64 m_noOfRecords; + Vector m_fragmentInfo; + void createAttr(NdbDictionary::Column *column); public: @@ -146,6 +157,9 @@ public: Uint32 getTableId() const { return m_dictTable->getTableId(); } + Uint32 getNoOfRecords() const { + return m_noOfRecords; + } /* void setMysqlTableName(char * tableName) { strpcpy(mysqlTableName, tableName); @@ -274,6 +288,7 @@ class RestoreMetaData : public BackupFile { bool readMetaTableDesc(); bool readGCPEntry(); + bool readFragmentInfo(); Uint32 readMetaTableList(); Uint32 m_startGCP; diff --git a/ndb/tools/restore/consumer_restore.cpp b/ndb/tools/restore/consumer_restore.cpp index d62ca3f610a..bff63c28716 100644 --- a/ndb/tools/restore/consumer_restore.cpp +++ b/ndb/tools/restore/consumer_restore.cpp @@ -193,6 +193,16 @@ BackupRestore::table(const TableS & table){ copy.setName(split[2].c_str()); + /* + update min and max rows to reflect the table, this to + ensure that memory is allocated properly in the ndb kernel + */ + copy.setMinRows(table.getNoOfRecords()); + if (table.getNoOfRecords() > copy.getMaxRows()) + { + copy.setMaxRows(table.getNoOfRecords()); + } + if (dict->createTable(copy) == -1) { err << "Create table " << table.getTableName() << " failed: " diff --git a/scripts/Makefile.am b/scripts/Makefile.am index 0f68b484f41..a339ebc5b8f 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -32,7 +32,7 @@ bin_SCRIPTS = @server_scripts@ \ mysqldumpslow \ mysql_explain_log \ mysql_tableinfo \ - mysql_upgrade \ + mysql_upgrade_shell \ mysqld_multi \ mysql_create_system_tables @@ -60,7 +60,7 @@ EXTRA_SCRIPTS = make_binary_distribution.sh \ mysql_explain_log.sh \ mysqld_multi.sh \ mysql_tableinfo.sh \ - mysql_upgrade.sh \ + mysql_upgrade_shell.sh \ mysqld_safe.sh \ mysql_create_system_tables.sh @@ -89,7 +89,7 @@ CLEANFILES = @server_scripts@ \ mysqldumpslow \ mysql_explain_log \ mysql_tableinfo \ - mysql_upgrade \ + mysql_upgrade_shell \ mysqld_multi \ make_win_src_distribution \ mysql_create_system_tables diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index d96391f8aef..c344cf3e93a 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -134,7 +134,7 @@ BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \ client/mysql$BS client/mysqlshow$BS client/mysqladmin$BS \ client/mysqldump$BS client/mysqlimport$BS \ client/mysqltest$BS client/mysqlcheck$BS \ - client/mysqlbinlog$BS \ + client/mysqlbinlog$BS client/mysql_upgrade$BS \ tests/mysql_client_test$BS \ libmysqld/examples/mysql_client_test_embedded$BS \ libmysqld/examples/mysqltest_embedded$BS \ diff --git a/scripts/mysql_upgrade.sh b/scripts/mysql_upgrade_shell.sh similarity index 100% rename from scripts/mysql_upgrade.sh rename to scripts/mysql_upgrade_shell.sh diff --git a/sql-common/client.c b/sql-common/client.c index 26ebc9cc6b0..56a5862c90e 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -130,6 +130,8 @@ static void mysql_close_free(MYSQL *mysql); static int wait_for_data(my_socket fd, uint timeout); #endif +CHARSET_INFO *default_client_charset_info = &my_charset_latin1; + /**************************************************************************** A modified version of connect(). my_connect() allows you to specify @@ -1431,7 +1433,7 @@ mysql_init(MYSQL *mysql) bzero((char*) (mysql), sizeof(*(mysql))); mysql->options.connect_timeout= CONNECT_TIMEOUT; mysql->last_used_con= mysql->next_slave= mysql->master = mysql; - mysql->charset=default_charset_info; + mysql->charset=default_client_charset_info; strmov(mysql->net.sqlstate, not_error_sqlstate); /* By default, we are a replication pivot. The caller must reset it @@ -1660,7 +1662,51 @@ static MYSQL_METHODS client_methods= #endif }; -MYSQL * +C_MODE_START +int mysql_init_character_set(MYSQL *mysql) +{ + NET *net= &mysql->net; + /* Set character set */ + if (!mysql->options.charset_name && + !(mysql->options.charset_name= + my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME)))) + return 1; + + { + const char *save= charsets_dir; + if (mysql->options.charset_dir) + charsets_dir=mysql->options.charset_dir; + mysql->charset=get_charset_by_csname(mysql->options.charset_name, + MY_CS_PRIMARY, MYF(MY_WME)); + charsets_dir= save; + } + + if (!mysql->charset) + { + net->last_errno=CR_CANT_READ_CHARSET; + strmov(net->sqlstate, unknown_sqlstate); + if (mysql->options.charset_dir) + my_snprintf(net->last_error, sizeof(net->last_error)-1, + ER(net->last_errno), + mysql->options.charset_name, + mysql->options.charset_dir); + else + { + char cs_dir_name[FN_REFLEN]; + get_charsets_dir(cs_dir_name); + my_snprintf(net->last_error, sizeof(net->last_error)-1, + ER(net->last_errno), + mysql->options.charset_name, + cs_dir_name); + } + return 1; + } + return 0; +} +C_MODE_END + + +MYSQL * STDCALL CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, const char *passwd, const char *db, uint port, const char *unix_socket,ulong client_flag) @@ -1997,42 +2043,8 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, goto error; } - /* Set character set */ - if (!mysql->options.charset_name && - !(mysql->options.charset_name= - my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME)))) + if (mysql_init_character_set(mysql)) goto error; - - { - const char *save= charsets_dir; - if (mysql->options.charset_dir) - charsets_dir=mysql->options.charset_dir; - mysql->charset=get_charset_by_csname(mysql->options.charset_name, - MY_CS_PRIMARY, MYF(MY_WME)); - charsets_dir= save; - } - - if (!mysql->charset) - { - net->last_errno=CR_CANT_READ_CHARSET; - strmov(net->sqlstate, unknown_sqlstate); - if (mysql->options.charset_dir) - my_snprintf(net->last_error, sizeof(net->last_error)-1, - ER(net->last_errno), - mysql->options.charset_name, - mysql->options.charset_dir); - else - { - char cs_dir_name[FN_REFLEN]; - get_charsets_dir(cs_dir_name); - my_snprintf(net->last_error, sizeof(net->last_error)-1, - ER(net->last_errno), - mysql->options.charset_name, - cs_dir_name); - } - goto error; - } - /* Save connection information */ if (!my_multi_malloc(MYF(0), diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index 02bcde43f11..11f676d9cf6 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -32,13 +32,14 @@ so to read, that data has to be parsed into fields, to write, fields have to be stored in this format to write to this data file. - With MySQL Federated storage engine, there will be no local files for each - table's data (such as .MYD). A foreign database will store the data that would - normally be in this file. This will necessitate the use of MySQL client API - to read, delete, update, insert this data. The data will have to be retrieve - via an SQL call "SELECT * FROM users". Then, to read this data, it will have - to be retrieved via mysql_fetch_row one row at a time, then converted from - the column in this select into the format that the handler expects. + With MySQL Federated storage engine, there will be no local files + for each table's data (such as .MYD). A foreign database will store + the data that would normally be in this file. This will necessitate + the use of MySQL client API to read, delete, update, insert this + data. The data will have to be retrieve via an SQL call "SELECT * + FROM users". Then, to read this data, it will have to be retrieved + via mysql_fetch_row one row at a time, then converted from the + column in this select into the format that the handler expects. The create table will simply create the .frm file, and within the "CREATE TABLE" SQL, there SHALL be any of the following : @@ -395,8 +396,8 @@ handlerton federated_hton= { static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, my_bool not_used __attribute__ ((unused))) { - *length= share->table_name_length; - return (byte*) share->table_name; + *length= share->connect_string_length; + return (byte*) share->scheme; } /* @@ -416,7 +417,7 @@ bool federated_db_init() DBUG_ENTER("federated_db_init"); if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST)) goto error; - if (hash_init(&federated_open_tables, system_charset_info, 32, 0, 0, + if (hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0, (hash_get_key) federated_get_key, 0, 0)) { VOID(pthread_mutex_destroy(&federated_mutex)); @@ -513,6 +514,7 @@ static int check_foreign_data_source(FEDERATED_SHARE *share, } else { + int escaped_table_name_length= 0; /* Since we do not support transactions at this version, we can let the client API silently reconnect. For future versions, we will need more @@ -531,17 +533,16 @@ static int check_foreign_data_source(FEDERATED_SHARE *share, query.append(FEDERATED_STAR); query.append(FEDERATED_FROM); query.append(FEDERATED_BTICK); - escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name, + escaped_table_name_length= + escape_string_for_mysql(&my_charset_bin, (char*)escaped_table_name, sizeof(escaped_table_name), share->table_name, share->table_name_length); - query.append(escaped_table_name); + query.append(escaped_table_name, escaped_table_name_length); query.append(FEDERATED_BTICK); query.append(FEDERATED_WHERE); query.append(FEDERATED_FALSE); - DBUG_PRINT("info", ("check_foreign_data_source query %s", - query.c_ptr_quick())); if (mysql_real_query(mysql, query.ptr(), query.length())) { error_code= table_create_flag ? @@ -636,8 +637,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, table->s->connect_string.length, MYF(0)); - // Add a null for later termination of table name - share->scheme[table->s->connect_string.length]= 0; + share->connect_string_length= table->s->connect_string.length; DBUG_PRINT("info",("parse_url alloced share->scheme %lx", share->scheme)); /* @@ -703,7 +703,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, share->table_name++; share->table_name_length= strlen(share->table_name); - + /* make sure there's not an extra / */ if ((strchr(share->table_name, '/'))) goto error; @@ -739,8 +739,7 @@ error: ha_federated::ha_federated(TABLE *table_arg) :handler(&federated_hton, table_arg), - mysql(0), stored_result(0), - ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0) + mysql(0), stored_result(0) {} @@ -751,6 +750,7 @@ ha_federated::ha_federated(TABLE *table_arg) convert_row_to_internal_format() record Byte pointer to record row MySQL result set row from fetchrow() + result Result set to use DESCRIPTION This method simply iterates through a row returned via fetchrow with @@ -763,14 +763,15 @@ ha_federated::ha_federated(TABLE *table_arg) 0 After fields have had field values stored from record */ -uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row) +uint ha_federated::convert_row_to_internal_format(byte *record, + MYSQL_ROW row, + MYSQL_RES *result) { ulong *lengths; Field **field; DBUG_ENTER("ha_federated::convert_row_to_internal_format"); - lengths= mysql_fetch_lengths(stored_result); - memset(record, 0, table->s->null_bytes); + lengths= mysql_fetch_lengths(result); for (field= table->field; *field; field++) { @@ -1298,12 +1299,11 @@ next_loop: static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) { - char *select_query, *tmp_table_name; + char *select_query; char query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - uint tmp_table_name_length; Field **field; String query(query_buffer, sizeof(query_buffer), &my_charset_bin); - FEDERATED_SHARE *share; + FEDERATED_SHARE *share= NULL, tmp_share; /* In order to use this string, we must first zero it's length, or it will contain garbage @@ -1311,12 +1311,15 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) query.length(0); pthread_mutex_lock(&federated_mutex); - tmp_table_name= (char *)table->s->table_name; - tmp_table_name_length= (uint) strlen(tmp_table_name); + if (parse_url(&tmp_share, table, 0)) + goto error; + + /* TODO: change tmp_share.scheme to LEX_STRING object */ if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables, - (byte*) table_name, - strlen(table_name)))) + (byte*) tmp_share.scheme, + tmp_share. + connect_string_length))) { query.set_charset(system_charset_info); query.append(FEDERATED_SELECT); @@ -1334,24 +1337,20 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) if (!(share= (FEDERATED_SHARE *) my_multi_malloc(MYF(MY_WME), &share, sizeof(*share), - &tmp_table_name, tmp_table_name_length+ 1, &select_query, query.length()+table->s->connect_string.length+1, NullS))) - { - pthread_mutex_unlock(&federated_mutex); - return NULL; - } - - if (parse_url(share, table, 0)) goto error; + memcpy(share, &tmp_share, sizeof(tmp_share)); + + share->table_name_length= strlen(share->table_name); + /* TODO: share->table_name to LEX_STRING object */ query.append(share->table_name, share->table_name_length); query.append(FEDERATED_BTICK); share->select_query= select_query; strmov(share->select_query, query.ptr()); share->use_count= 0; - share->table_name_length= strlen(share->table_name); DBUG_PRINT("info", ("share->select_query %s", share->select_query)); @@ -1367,11 +1366,8 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) error: pthread_mutex_unlock(&federated_mutex); - if (share->scheme) - { - my_free((gptr) share->scheme, MYF(0)); - share->scheme= 0; - } + my_free((gptr) tmp_share.scheme, MYF(MY_ALLOW_ZERO_PTR)); + my_free((gptr) share, MYF(MY_ALLOW_ZERO_PTR)); return NULL; } @@ -1391,13 +1387,7 @@ static int free_share(FEDERATED_SHARE *share) { hash_delete(&federated_open_tables, (byte*) share); my_free((gptr) share->scheme, MYF(MY_ALLOW_ZERO_PTR)); - share->scheme= 0; - if (share->socket) - { - my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR)); - share->socket= 0; - } - + my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR)); thr_lock_delete(&share->lock); VOID(pthread_mutex_destroy(&share->mutex)); my_free((gptr) share, MYF(0)); @@ -1459,22 +1449,29 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked) /* Connect to foreign database mysql_real_connect() */ mysql= mysql_init(0); - if (!mysql_real_connect(mysql, - share->hostname, - share->username, - share->password, - share->database, - share->port, - share->socket, 0)) + if (!mysql || !mysql_real_connect(mysql, + share->hostname, + share->username, + share->password, + share->database, + share->port, + share->socket, 0)) { + free_share(share); DBUG_RETURN(stash_remote_error()); } /* Since we do not support transactions at this version, we can let the client - API silently reconnect. For future versions, we will need more logic to deal - with transactions + API silently reconnect. For future versions, we will need more logic to + deal with transactions */ mysql->reconnect= 1; + + ref_length= (table->s->primary_key != MAX_KEY ? + table->key_info[table->s->primary_key].key_length : + table->s->reclength); + DBUG_PRINT("info", ("ref_length: %u", ref_length)); + DBUG_RETURN(0); } @@ -1498,13 +1495,12 @@ int ha_federated::close(void) /* free the result set */ if (stored_result) { - DBUG_PRINT("info", - ("mysql_free_result result at address %lx", stored_result)); mysql_free_result(stored_result); stored_result= 0; } /* Disconnect from mysql */ - mysql_close(mysql); + if (mysql) // QQ is this really needed + mysql_close(mysql); retval= free_share(share); DBUG_RETURN(retval); @@ -1694,15 +1690,13 @@ int ha_federated::write_row(byte *buf) /* add the values */ insert_string.append(values_string); - DBUG_PRINT("info", ("insert query %s", insert_string.c_ptr_quick())); - if (mysql_real_query(mysql, insert_string.ptr(), insert_string.length())) { DBUG_RETURN(stash_remote_error()); } /* - If the table we've just written a record to contains an auto_increment field, - then store the last_insert_id() value from the foreign server + If the table we've just written a record to contains an auto_increment + field, then store the last_insert_id() value from the foreign server */ if (table->next_number_field) update_auto_increment(); @@ -1771,7 +1765,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt) query.append(FEDERATED_EXTENDED); if (check_opt->sql_flags & TT_USEFRM) query.append(FEDERATED_USE_FRM); - + if (mysql_real_query(mysql, query.ptr(), query.length())) { DBUG_RETURN(stash_remote_error()); @@ -1923,7 +1917,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) /* This will delete a row. 'buf' will contain a copy of the row to be =deleted. The server will call this right after the current row has been called (from - either a previous rnd_nexT() or index call). + either a previous rnd_next() or index call). If you keep a pointer to the last row or can access a primary key it will make doing the deletion quite a bit easier. Keep in mind that the server does no guarentee consecutive deletions. @@ -1983,6 +1977,7 @@ int ha_federated::delete_row(const byte *buf) DBUG_RETURN(stash_remote_error()); } deleted+= mysql->affected_rows; + records-= mysql->affected_rows; DBUG_PRINT("info", ("rows deleted %d rows deleted for all time %d", int(mysql->affected_rows), deleted)); @@ -1999,12 +1994,15 @@ int ha_federated::delete_row(const byte *buf) */ int ha_federated::index_read(byte *buf, const byte *key, - uint key_len, enum ha_rkey_function find_flag) + uint key_len, ha_rkey_function find_flag) { - int retval; DBUG_ENTER("ha_federated::index_read"); - retval= index_read_idx(buf, active_index, key, key_len, find_flag); - DBUG_RETURN(retval); + + if (stored_result) + mysql_free_result(stored_result); + DBUG_RETURN(index_read_idx_with_result_set(buf, active_index, key, + key_len, find_flag, + &stored_result)); } @@ -2013,26 +2011,60 @@ int ha_federated::index_read(byte *buf, const byte *key, row if any. This is only used to read whole keys. This method is called via index_read in the case of a WHERE clause using - a regular non-primary key index, OR is called DIRECTLY when the WHERE clause + a primary key index OR is called DIRECTLY when the WHERE clause uses a PRIMARY KEY index. + + NOTES + This uses an internal result set that is deleted before function + returns. We need to be able to be calable from ha_rnd_pos() */ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, uint key_len, enum ha_rkey_function find_flag) +{ + int retval; + MYSQL_RES *mysql_result; + DBUG_ENTER("ha_federated::index_read_idx"); + + if ((retval= index_read_idx_with_result_set(buf, index, key, + key_len, find_flag, + &mysql_result))) + DBUG_RETURN(retval); + mysql_free_result(mysql_result); + DBUG_RETURN(retval); +} + + +/* + Create result set for rows matching query and return first row + + RESULT + 0 ok In this case *result will contain the result set + table->status == 0 + # error In this case *result will contain 0 + table->status == STATUS_NOT_FOUND +*/ + +int ha_federated::index_read_idx_with_result_set(byte *buf, uint index, + const byte *key, + uint key_len, + ha_rkey_function find_flag, + MYSQL_RES **result) { int retval; char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; char index_value[STRING_BUFFER_USUAL_SIZE]; char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - String index_string(index_value, + String index_string(index_value, sizeof(index_value), &my_charset_bin); String sql_query(sql_query_buffer, sizeof(sql_query_buffer), &my_charset_bin); key_range range; - DBUG_ENTER("ha_federated::index_read_idx"); + DBUG_ENTER("ha_federated::index_read_idx_with_result_set"); + *result= 0; // In case of errors index_string.length(0); sql_query.length(0); statistic_increment(table->in_use->status_var.ha_read_key_count, @@ -2049,20 +2081,6 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, NULL, 0); sql_query.append(index_string); - DBUG_PRINT("info", - ("current key %d key value %s index_string value %s length %d", - index, (char*) key, index_string.c_ptr_quick(), - index_string.length())); - - DBUG_PRINT("info", - ("current position %d sql_query %s", current_position, - sql_query.c_ptr_quick())); - - if (stored_result) - { - mysql_free_result(stored_result); - stored_result= 0; - } if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) { my_sprintf(error_buffer, (error_buffer, "error: %d '%s'", @@ -2070,53 +2088,44 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE; goto error; } - stored_result= mysql_store_result(mysql); - - if (!stored_result) + if (!(*result= mysql_store_result(mysql))) { retval= HA_ERR_END_OF_FILE; goto error; } - /* - This basically says that the record in table->record[0] is legal, - and that it is ok to use this record, for whatever reason, such - as with a join (without it, joins will not work) - */ - table->status= 0; + if (!(retval= read_next(buf, *result))) + DBUG_RETURN(retval); - retval= rnd_next(buf); + mysql_free_result(*result); + *result= 0; + table->status= STATUS_NOT_FOUND; DBUG_RETURN(retval); error: - if (stored_result) - { - mysql_free_result(stored_result); - stored_result= 0; - } table->status= STATUS_NOT_FOUND; my_error(retval, MYF(0), error_buffer); DBUG_RETURN(retval); } + /* Initialized at each key walk (called multiple times unlike rnd_init()) */ + int ha_federated::index_init(uint keynr) { DBUG_ENTER("ha_federated::index_init"); - DBUG_PRINT("info", - ("table: '%s' key: %d", table->s->table_name, keynr)); + DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr)); active_index= keynr; DBUG_RETURN(0); } -/* - int read_range_first(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted); +/* + Read first range */ + int ha_federated::read_range_first(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted) + const key_range *end_key, + bool eq_range, bool sorted) { char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; int retval; @@ -2125,8 +2134,7 @@ int ha_federated::read_range_first(const key_range *start_key, &my_charset_bin); DBUG_ENTER("ha_federated::read_range_first"); - if (start_key == NULL && end_key == NULL) - DBUG_RETURN(0); + DBUG_ASSERT(!(start_key == NULL && end_key == NULL)); sql_query.length(0); sql_query.append(share->select_query); @@ -2134,6 +2142,11 @@ int ha_federated::read_range_first(const key_range *start_key, &table->key_info[active_index], start_key, end_key, 0); + if (stored_result) + { + mysql_free_result(stored_result); + stored_result= 0; + } if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) { retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE; @@ -2141,38 +2154,21 @@ int ha_federated::read_range_first(const key_range *start_key, } sql_query.length(0); - if (stored_result) - { - DBUG_PRINT("info", - ("mysql_free_result address %lx", stored_result)); - mysql_free_result(stored_result); - stored_result= 0; - } - stored_result= mysql_store_result(mysql); - - if (!stored_result) + if (!(stored_result= mysql_store_result(mysql))) { retval= HA_ERR_END_OF_FILE; goto error; } - - /* This was successful, please let it be known! */ - table->status= 0; - retval= rnd_next(table->record[0]); + retval= read_next(table->record[0], stored_result); DBUG_RETURN(retval); error: - table->status= STATUS_NOT_FOUND; - if (stored_result) - { - DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result)); - mysql_free_result(stored_result); - stored_result= 0; - } - DBUG_RETURN(retval); + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(retval); } + int ha_federated::read_range_next() { int retval; @@ -2185,13 +2181,13 @@ int ha_federated::read_range_next() /* Used to read forward through the index. */ int ha_federated::index_next(byte *buf) { - int retval; DBUG_ENTER("ha_federated::index_next"); statistic_increment(table->in_use->status_var.ha_read_next_count, &LOCK_status); - retval= rnd_next(buf); - DBUG_RETURN(retval); + DBUG_RETURN(read_next(buf, stored_result)); } + + /* rnd_init() is called when the system wants the storage engine to do a table scan. @@ -2245,11 +2241,8 @@ int ha_federated::rnd_init(bool scan) if (scan) { - DBUG_PRINT("info", ("share->select_query %s", share->select_query)); if (stored_result) { - DBUG_PRINT("info", - ("mysql_free_result address %lx", stored_result)); mysql_free_result(stored_result); stored_result= 0; } @@ -2266,27 +2259,25 @@ int ha_federated::rnd_init(bool scan) DBUG_RETURN(0); error: - DBUG_RETURN(stash_remote_error()); + DBUG_RETURN(stash_remote_error()); } + int ha_federated::rnd_end() { - int retval; DBUG_ENTER("ha_federated::rnd_end"); - - if (stored_result) - { - DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result)); - mysql_free_result(stored_result); - stored_result= 0; - } - retval= index_end(); - DBUG_RETURN(retval); + DBUG_RETURN(index_end()); } + int ha_federated::index_end(void) { DBUG_ENTER("ha_federated::index_end"); + if (stored_result) + { + mysql_free_result(stored_result); + stored_result= 0; + } active_index= MAX_KEY; DBUG_RETURN(0); } @@ -2303,8 +2294,6 @@ int ha_federated::index_end(void) int ha_federated::rnd_next(byte *buf) { - int retval; - MYSQL_ROW row; DBUG_ENTER("ha_federated::rnd_next"); if (stored_result == 0) @@ -2312,32 +2301,60 @@ int ha_federated::rnd_next(byte *buf) /* Return value of rnd_init is not always checked (see records.cc), so we can get here _even_ if there is _no_ pre-fetched result-set! - TODO: fix it. - */ + TODO: fix it. We can delete this in 5.1 when rnd_init() is checked. + */ DBUG_RETURN(1); } - + DBUG_RETURN(read_next(buf, stored_result)); +} + + +/* + ha_federated::read_next + + reads from a result set and converts to mysql internal + format + + SYNOPSIS + field_in_record_is_null() + buf byte pointer to record + result mysql result set + + DESCRIPTION + This method is a wrapper method that reads one record from a result + set and converts it to the internal table format + + RETURN VALUE + 1 error + 0 no error +*/ + +int ha_federated::read_next(byte *buf, MYSQL_RES *result) +{ + int retval; + my_ulonglong num_rows; + MYSQL_ROW row; + DBUG_ENTER("ha_federated::read_next"); + + table->status= STATUS_NOT_FOUND; // For easier return + /* Fetch a row, insert it back in a row format. */ - current_position= stored_result->data_cursor; - DBUG_PRINT("info", ("current position %d", current_position)); - if (!(row= mysql_fetch_row(stored_result))) + if (!(row= mysql_fetch_row(result))) DBUG_RETURN(HA_ERR_END_OF_FILE); - retval= convert_row_to_internal_format(buf, row); + if (!(retval= convert_row_to_internal_format(buf, row, result))) + table->status= 0; + DBUG_RETURN(retval); } /* - 'position()' is called after each call to rnd_next() if the data needs to be - ordered. You can do something like the following to store the position: - my_store_ptr(ref, ref_length, current_position); + store reference to current row so that we can later find it for + a re-read, update or delete. - The server uses ref to store data. ref_length in the above case is the size - needed to store current_position. ref is just a byte array that the server - will maintain. If you are using offsets to mark rows, then current_position - should be the offset. If it is a primary key like in BDB, then it needs to - be a primary key. + In case of federated, a reference is either a primary key or + the whole record. Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc. */ @@ -2345,32 +2362,44 @@ int ha_federated::rnd_next(byte *buf) void ha_federated::position(const byte *record) { DBUG_ENTER("ha_federated::position"); - /* my_store_ptr Add seek storage */ - *(MYSQL_ROW_OFFSET *) ref= current_position; // ref is always aligned + if (table->s->primary_key != MAX_KEY) + key_copy(ref, (byte *)record, table->key_info + table->s->primary_key, + ref_length); + else + memcpy(ref, record, ref_length); DBUG_VOID_RETURN; } /* This is like rnd_next, but you are given a position to use to determine the - row. The position will be of the type that you stored in ref. You can use - ha_get_ptr(pos,ref_length) to retrieve whatever key or position you saved - when position() was called. + row. The position will be of the type that you stored in ref. - This method is required for an ORDER BY. + This method is required for an ORDER BY Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc. */ + int ha_federated::rnd_pos(byte *buf, byte *pos) { + int result; DBUG_ENTER("ha_federated::rnd_pos"); - statistic_increment(table->in_use->status_var.ha_read_rnd_count, &LOCK_status); - memcpy_fixed(¤t_position, pos, sizeof(MYSQL_ROW_OFFSET)); - stored_result->current_row= 0; - stored_result->data_cursor= current_position; - DBUG_RETURN(rnd_next(buf)); + if (table->s->primary_key != MAX_KEY) + { + /* We have a primary key, so use index_read_idx to find row */ + result= index_read_idx(buf, table->s->primary_key, pos, + ref_length, HA_READ_KEY_EXACT); + } + else + { + /* otherwise, get the old record ref as obtained in ::position */ + memcpy(buf, pos, ref_length); + result= 0; + } + table->status= result ? STATUS_NOT_FOUND : 0; + DBUG_RETURN(result); } @@ -2475,18 +2504,22 @@ void ha_federated::info(uint flag) delete_length = ? */ if (row[4] != NULL) - records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error); - if (row[5] != NULL) - mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0, &error); + records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error); + + mean_rec_length= table->s->reclength; + data_file_length= records * mean_rec_length; + if (row[12] != NULL) - update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error); + update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error); if (row[13] != NULL) - check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error); - } - if (flag & HA_STATUS_CONST) - { - block_size= 4096; + check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error); } + + /* + size of IO operations (This is based on a good guess, no high science + involved) + */ + block_size= 4096; } if (result) @@ -2497,6 +2530,7 @@ void ha_federated::info(uint flag) error: if (result) mysql_free_result(result); + my_sprintf(error_buffer, (error_buffer, ": %d : %s", mysql_errno(mysql), mysql_error(mysql))); my_error(error_code, MYF(0), error_buffer); @@ -2577,6 +2611,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { + DBUG_ENTER("ha_federated::store_lock"); if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { /* @@ -2606,7 +2641,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd, *to++= &lock; - return to; + DBUG_RETURN(to); } /* diff --git a/sql/ha_federated.h b/sql/ha_federated.h index cafd1fe59a5..85474d142a3 100644 --- a/sql/ha_federated.h +++ b/sql/ha_federated.h @@ -78,7 +78,7 @@ #define FEDERATED_VALUES_LEN sizeof(FEDERATED_VALUES) #define FEDERATED_UPDATE "UPDATE " #define FEDERATED_UPDATE_LEN sizeof(FEDERATED_UPDATE) -#define FEDERATED_SET "SET " +#define FEDERATED_SET " SET " #define FEDERATED_SET_LEN sizeof(FEDERATED_SET) #define FEDERATED_AND " AND " #define FEDERATED_AND_LEN sizeof(FEDERATED_AND) @@ -130,6 +130,7 @@ typedef struct st_federated_share { remote host info, parse_url supplies */ char *scheme; + char *connect_string; char *hostname; char *username; char *password; @@ -139,7 +140,7 @@ typedef struct st_federated_share { char *socket; char *sport; ushort port; - uint table_name_length, use_count; + uint table_name_length, connect_string_length, use_count; pthread_mutex_t mutex; THR_LOCK lock; } FEDERATED_SHARE; @@ -153,7 +154,6 @@ class ha_federated: public handler FEDERATED_SHARE *share; /* Shared lock info */ MYSQL *mysql; /* MySQL connection */ MYSQL_RES *stored_result; - uint ref_length; uint fetch_num; // stores the fetch num MYSQL_ROW_OFFSET current_position; // Current position used by ::position() int remote_error_number; @@ -164,8 +164,9 @@ private: return 0 on success return errorcode otherwise */ - uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row); - bool create_where_from_key(String *to, KEY *key_info, + uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row, + MYSQL_RES *result); + bool create_where_from_key(String *to, KEY *key_info, const key_range *start_key, const key_range *end_key, bool records_in_range); @@ -298,6 +299,13 @@ public: THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); //required virtual bool get_error_message(int error, String *buf); + + int read_next(byte *buf, MYSQL_RES *result); + int index_read_idx_with_result_set(byte *buf, uint index, + const byte *key, + uint key_len, + ha_rkey_function find_flag, + MYSQL_RES **result); }; bool federated_db_init(void); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 46ab5b88624..a955bc44721 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4116,7 +4116,11 @@ static int create_ndb_column(NDBCOL &col, static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) { - if (form->s->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */ + ha_rows max_rows= form->s->max_rows; + ha_rows min_rows= form->s->min_rows; + if (max_rows < min_rows) + max_rows= min_rows; + if (max_rows == (ha_rows)0) /* default setting, don't set fragmentation */ return; /** * get the number of fragments right @@ -4134,7 +4138,6 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) acc_row_size+= 4 + /*safety margin*/ 4; #endif ulonglong acc_fragment_size= 512*1024*1024; - ulonglong max_rows= form->s->max_rows; #if MYSQL_VERSION_ID >= 50100 no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; #else @@ -4158,6 +4161,8 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) ftype= NDBTAB::FragAllSmall; tab.setFragmentType(ftype); } + tab.setMaxRows(max_rows); + tab.setMinRows(min_rows); } int ha_ndbcluster::create(const char *name, diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index c45fb88a48a..1f64fdba609 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -32,6 +32,7 @@ public: Item_geometry_func(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} Item_geometry_func(List &list) :Item_str_func(list) {} void fix_length_and_dec(); + enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; } }; class Item_func_geometry_from_text: public Item_geometry_func @@ -67,6 +68,7 @@ public: Item_func_as_wkb(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "aswkb"; } String *val_str(String *); + enum_field_types field_type() const { return MYSQL_TYPE_BLOB; } }; class Item_func_geometry_type: public Item_str_func diff --git a/sql/mysqld.cc b/sql/mysqld.cc index b73cd350012..429bdee17d6 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -314,6 +314,7 @@ static char *default_character_set_name; static char *character_set_filesystem_name; static char *my_bind_addr_str; static char *default_collation_name; +static char compiled_default_collation_name[]= MYSQL_DEFAULT_COLLATION_NAME; static char mysql_data_home_buff[2]; static struct passwd *user_info; static I_List thread_cache; @@ -6382,7 +6383,7 @@ static void mysql_init_variables(void) /* Variables in libraries */ charsets_dir= 0; default_character_set_name= (char*) MYSQL_DEFAULT_CHARSET_NAME; - default_collation_name= (char*) MYSQL_DEFAULT_COLLATION_NAME; + default_collation_name= compiled_default_collation_name; sys_charset_system.value= (char*) system_charset_info->csname; character_set_filesystem_name= (char*) "binary"; @@ -6544,7 +6545,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), strmake(mysql_home,argument,sizeof(mysql_home)-1); break; case 'C': - default_collation_name= 0; + if (default_collation_name == compiled_default_collation_name) + default_collation_name= 0; break; case 'l': opt_log=1; diff --git a/sql/set_var.cc b/sql/set_var.cc index aa58f7d41fc..1cb3878ac70 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -2578,7 +2578,7 @@ bool sys_var_insert_id::update(THD *thd, set_var *var) byte *sys_var_insert_id::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { - return (byte*) &thd->current_insert_id; + return (byte*) &thd->next_insert_id; } diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 8b235d26d37..124d3566b19 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -5601,25 +5601,30 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name, TABLE_LIST tables[1]; List user_list; bool result; + ACL_USER *au; + char passwd_buff[SCRAMBLED_PASSWORD_CHAR_LENGTH+1]; DBUG_ENTER("sp_grant_privileges"); if (!(combo=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) DBUG_RETURN(TRUE); combo->user.str= sctx->user; - + VOID(pthread_mutex_lock(&acl_cache->lock)); - if (!find_acl_user(combo->host.str=(char*)sctx->host_or_ip, combo->user.str, - FALSE) && - !find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str, - FALSE) && - !find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str, - FALSE) && - !find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE)) - { - VOID(pthread_mutex_unlock(&acl_cache->lock)); - DBUG_RETURN(TRUE); - } + + if ((au= find_acl_user(combo->host.str=(char*)sctx->host_or_ip,combo->user.str,FALSE))) + goto found_acl; + if ((au= find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str,FALSE))) + goto found_acl; + if ((au= find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str,FALSE))) + goto found_acl; + if((au= find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE))) + goto found_acl; + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + DBUG_RETURN(TRUE); + + found_acl: VOID(pthread_mutex_unlock(&acl_cache->lock)); bzero((char*)tables, sizeof(TABLE_LIST)); @@ -5627,13 +5632,37 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name, tables->db= (char*)sp_db; tables->table_name= tables->alias= (char*)sp_name; - + combo->host.length= strlen(combo->host.str); combo->user.length= strlen(combo->user.str); combo->host.str= thd->strmake(combo->host.str,combo->host.length); combo->user.str= thd->strmake(combo->user.str,combo->user.length); - combo->password.str= (char*)""; - combo->password.length= 0; + + + if(au && au->salt_len) + { + if (au->salt_len == SCRAMBLE_LENGTH) + { + make_password_from_salt(passwd_buff, au->salt); + combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH; + } + else if (au->salt_len == SCRAMBLE_LENGTH_323) + { + make_password_from_salt_323(passwd_buff, (ulong *) au->salt); + combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323; + } + else + { + my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); + return -1; + } + combo->password.str= passwd_buff; + } + else + { + combo->password.str= (char*)""; + combo->password.length= 0; + } if (user_list.push_back(combo)) DBUG_RETURN(TRUE); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 29201e2a2f9..cd57c280950 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -771,6 +771,37 @@ static void reset_mqh(LEX_USER *lu, bool get_them= 0) #endif /* NO_EMBEDDED_ACCESS_CHECKS */ } +void thd_init_client_charset(THD *thd, uint cs_number) +{ + /* + Use server character set and collation if + - opt_character_set_client_handshake is not set + - client has not specified a character set + - client character set is the same as the servers + - client character set doesn't exists in server + */ + if (!opt_character_set_client_handshake || + !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) || + !my_strcasecmp(&my_charset_latin1, + global_system_variables.character_set_client->name, + thd->variables.character_set_client->name)) + { + thd->variables.character_set_client= + global_system_variables.character_set_client; + thd->variables.collation_connection= + global_system_variables.collation_connection; + thd->variables.character_set_results= + global_system_variables.character_set_results; + } + else + { + thd->variables.character_set_results= + thd->variables.collation_connection= + thd->variables.character_set_client; + } +} + + /* Perform handshake, authorize client and update thd ACL variables. SYNOPSIS @@ -906,33 +937,7 @@ static int check_connection(THD *thd) thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; thd->max_client_packet_length= uint4korr(net->read_pos+4); DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); - /* - Use server character set and collation if - - opt_character_set_client_handshake is not set - - client has not specified a character set - - client character set is the same as the servers - - client character set doesn't exists in server - */ - if (!opt_character_set_client_handshake || - !(thd->variables.character_set_client= - get_charset((uint) net->read_pos[8], MYF(0))) || - !my_strcasecmp(&my_charset_latin1, - global_system_variables.character_set_client->name, - thd->variables.character_set_client->name)) - { - thd->variables.character_set_client= - global_system_variables.character_set_client; - thd->variables.collation_connection= - global_system_variables.collation_connection; - thd->variables.character_set_results= - global_system_variables.character_set_results; - } - else - { - thd->variables.character_set_results= - thd->variables.collation_connection= - thd->variables.character_set_client; - } + thd_init_client_charset(thd, (uint) net->read_pos[8]); thd->update_charset(); end= (char*) net->read_pos+32; } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index ca6a8ddfb6b..60d50c415d5 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -41,6 +41,8 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **), static int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet); +static void +append_algorithm(TABLE_LIST *table, String *buff); static int view_store_create_info(THD *thd, TABLE_LIST *table, String *buff); static bool schema_table_store_record(THD *thd, TABLE *table); @@ -1098,6 +1100,28 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) void view_store_options(THD *thd, TABLE_LIST *table, String *buff) +{ + append_algorithm(table, buff); + append_definer(thd, buff, &table->definer.user, &table->definer.host); + if (table->view_suid) + buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER ")); + else + buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER ")); +} + + +/* + Append DEFINER clause to the given buffer. + + SYNOPSIS + append_definer() + thd [in] thread handle + buffer [inout] buffer to hold DEFINER clause + definer_user [in] user name part of definer + definer_host [in] host name part of definer +*/ + +static void append_algorithm(TABLE_LIST *table, String *buff) { buff->append(STRING_WITH_LEN("ALGORITHM=")); switch ((int8)table->algorithm) { @@ -1113,11 +1137,6 @@ view_store_options(THD *thd, TABLE_LIST *table, String *buff) default: DBUG_ASSERT(0); // never should happen } - append_definer(thd, buff, &table->definer.user, &table->definer.host); - if (table->view_suid) - buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER ")); - else - buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER ")); } @@ -3105,7 +3124,16 @@ static int get_schema_views_record(THD *thd, struct st_table_list *tables, table->field[1]->store(tables->view_db.str, tables->view_db.length, cs); table->field[2]->store(tables->view_name.str, tables->view_name.length, cs); if (grant & SHOW_VIEW_ACL) - table->field[3]->store(tables->query.str, tables->query.length, cs); + { + char buff[2048]; + String qwe_str(buff, sizeof(buff), cs); + qwe_str.length(0); + qwe_str.append(STRING_WITH_LEN("/* ")); + append_algorithm(tables, &qwe_str); + qwe_str.append(STRING_WITH_LEN("*/ ")); + qwe_str.append(tables->query.str, tables->query.length); + table->field[3]->store(qwe_str.ptr(), qwe_str.length(), cs); + } if (tables->with_check != VIEW_CHECK_NONE) { diff --git a/strings/Makefile.am b/strings/Makefile.am index c43cf0f290a..7ee115c09e5 100644 --- a/strings/Makefile.am +++ b/strings/Makefile.am @@ -66,12 +66,6 @@ conf_to_src_LDFLAGS= @NOINST_LDFLAGS@ #strtoull.o: @CHARSET_OBJS@ -if ASSEMBLER -# On Linux gcc can compile the assembly files -%.o : %.s - $(AS) $(ASFLAGS) -o $@ $< -endif - FLAGS=$(DEFS) $(INCLUDES) $(CPPFLAGS) $(CFLAGS) @NOINST_LDFLAGS@ str_test: str_test.c $(pkglib_LIBRARIES) diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 92d0708200b..fd92c2bd25e 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -485,17 +485,7 @@ chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir # Initiate databases if needed %{_bindir}/mysql_install_db --rpm --user=%{mysqld_user} -# Upgrade databases if needed -# This must be done as database user "root", who should be password-protected, -# but this password is not available here. -# So ensure the server is isolated as much as possible, and start it so that -# passwords are not checked. -# See the related change in the start script "/etc/init.d/mysql". -chmod 700 $mysql_datadir -%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables -%{_bindir}/mysql_upgrade -%{_sysconfdir}/init.d/mysql stop --skip-networking --skip-grant-tables -chmod 755 $mysql_datadir +# Upgrade databases if needed would go here - but it cannot be automated yet # Change permissions again to fix any new files. chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir @@ -598,6 +588,7 @@ fi %attr(755, root, root) %{_bindir}/mysqlbug %attr(755, root, root) %{_bindir}/mysqld_multi %attr(755, root, root) %{_bindir}/mysqld_safe +%attr(755, root, root) %{_bindir}/mysqldumpslow %attr(755, root, root) %{_bindir}/mysqlhotcopy %attr(755, root, root) %{_bindir}/mysqltest %attr(755, root, root) %{_bindir}/perror @@ -628,7 +619,6 @@ fi %attr(755, root, root) %{_bindir}/mysqlbinlog %attr(755, root, root) %{_bindir}/mysqlcheck %attr(755, root, root) %{_bindir}/mysqldump -%attr(755, root, root) %{_bindir}/mysqldumpslow %attr(755, root, root) %{_bindir}/mysqlimport %attr(755, root, root) %{_bindir}/mysqlshow @@ -733,6 +723,20 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Tue Jun 27 2006 Joerg Bruehe + +- move "mysqldumpslow" from the client RPM to the server RPM (bug#20216) + +- Revert all previous attempts to call "mysql_upgrade" during RPM upgrade, + there are some more aspects which need to be solved before this is possible. + For now, just ensure the binary "mysql_upgrade" is delivered and installed. + +* Thu Jun 22 2006 Joerg Bruehe + +- Close a gap of the previous version by explicitly using + a newly created temporary directory for the socket to be used + in the "mysql_upgrade" operation, overriding any local setting. + * Tue Jun 20 2006 Joerg Bruehe - To run "mysql_upgrade", we need a running server;