From 443de04577b6dd94941ea449d45b09f083de1164 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 30 May 2006 14:49:05 +0200 Subject: [PATCH 01/74] Bug#17371: Unable to dump a schema with invalid views 'show create' works even on views that are short of a base-table (this throw a warning though, like you would expect). Unfortunately, this is not what mysqldump uses; it creates stand-in tables and hence requests 'show fields' on the view which fails with missing base-tables. The --force option prevents the dump from stopping at this point; furthermore this patch dumps a comment showing create for the offending view for better diagnostics. This solution was confirmed by submitter as solving their/clients' problem. Problem might become non-issue once mysqldump no longer creates stand-in tables. client/mysqldump.c: Dump a comment showing create for a view if we can't show fields for it for better diagnostics. mysql-test/r/mysqldump.result: add test for #17371 - be defensive. if we can't do a full dump on a view (incl. 'show fields' for a stand-in table), at least create a comment with the 'show create' info when --force is given. mysql-test/t/mysqldump.test: add test for #17371 - be defensive. if we can't do a full dump on a view (incl. 'show fields' for a stand-in table), at least create a comment with the 'show create' info when --force is given. --- client/mysqldump.c | 21 ++++++++++++++++++++- mysql-test/r/mysqldump.result | 9 +++++++++ mysql-test/t/mysqldump.test | 13 +++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) diff --git a/client/mysqldump.c b/client/mysqldump.c index ee6d7b9d12b..31882515a34 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -1494,9 +1494,15 @@ static uint get_table_structure(char *table, char *db, char *table_type, field= mysql_fetch_field_direct(result, 0); if (strcmp(field->name, "View") == 0) { + char *scv_buff = NULL; + if (verbose) fprintf(stderr, "-- It's a view, create dummy table for view\n"); + /* save "show create" statement for later */ + if ((row= mysql_fetch_row(result)) && (scv_buff=row[1])) + scv_buff= my_strdup(scv_buff, MYF(0)); + mysql_free_result(result); /* @@ -1514,9 +1520,22 @@ static uint get_table_structure(char *table, char *db, char *table_type, "SHOW FIELDS FROM %s", result_table); if (mysql_query_with_error_report(sock, 0, query_buff)) { + /* + View references invalid or privileged table/col/fun (err 1356), + so we cannot create a stand-in table. Be defensive and dump + a comment with the view's 'show create' statement. (Bug #17371) + */ + + if (mysql_errno(sock) == ER_VIEW_INVALID) + fprintf(sql_file, "\n-- failed on view %s: %s\n\n", result_table, scv_buff ? scv_buff : ""); + + my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR)); + safe_exit(EX_MYSQLERR); - DBUG_RETURN(0); + DBUG_RETURN(0); } + else + my_free(scv_buff, MYF(MY_ALLOW_ZERO_PTR)); if ((result= mysql_store_result(sock))) { diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index 467e0818646..94c9c4f9007 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -2731,3 +2731,12 @@ p CREATE DEFINER=`root`@`localhost` PROCEDURE `p`() select 42 drop function f; drop procedure p; +create table t1 ( id serial ); +create view v1 as select * from t1; +drop table t1; +mysqldump { + +-- failed on view `v1`: CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`t1`.`id` AS `id` from `t1` + +} mysqldump +drop view v1; diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index 4076fd258e9..9cd8671bb99 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -1143,3 +1143,16 @@ show create procedure p; drop function f; drop procedure p; +# +# Bug #17371 Unable to dump a schema with invalid views +# +# +create table t1 ( id serial ); +create view v1 as select * from t1; +drop table t1; +# mysqldump gets 1356 from server, but gives us 2 +--echo mysqldump { +--error 2 +--exec $MYSQL_DUMP --force -N --compact --skip-comments test +--echo } mysqldump +drop view v1; From 32a7fafe093d9d3a84eca63a6e4aad02c508539f Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 31 May 2006 00:07:58 -0700 Subject: [PATCH 02/74] Bug#12096 "Add line for non-executable stack in .s files" Fix so that configure will use "--noexecstack" for assembler if gcc supports option and compiled C doesn't need executable stack. config/ac-macros/compiler_flag.m4: Bug#12096 Add macro to check if "--noexecstack" should be used when compiling assembler configure.in: Bug#12096 Add macro to check if "--noexecstack" should be used when compiling assembler strings/Makefile.am: Bug#12096 Automake knows how to handle assembler --- config/ac-macros/compiler_flag.m4 | 22 ++++++++++++++++++++++ configure.in | 4 ++++ strings/Makefile.am | 6 ------ 3 files changed, 26 insertions(+), 6 deletions(-) diff --git a/config/ac-macros/compiler_flag.m4 b/config/ac-macros/compiler_flag.m4 index a236f61a198..9dda6da72fa 100644 --- a/config/ac-macros/compiler_flag.m4 +++ b/config/ac-macros/compiler_flag.m4 @@ -38,3 +38,25 @@ AC_DEFUN([AC_SYS_OS_COMPILER_FLAG], fi ]) +AC_DEFUN([AC_CHECK_NOEXECSTACK], +[ + AC_CACHE_CHECK(whether --noexecstack is desirable for .S files, + mysql_cv_as_noexecstack, [dnl + cat > conftest.c <&AS_MESSAGE_LOG_FD]) \ + && grep -q .note.GNU-stack conftest.s \ + && AC_TRY_COMMAND([${CC-cc} $CCASFLAGS $CPPFLAGS -Wa,--noexecstack + -c -o conftest.o conftest.s 1>&AS_MESSAGE_LOG_FD]) + then + mysql_cv_as_noexecstack=yes + else + mysql_cv_as_noexecstack=no + fi + rm -f conftest*]) + if test $mysql_cv_as_noexecstack = yes; then + CCASFLAGS="$CCASFLAGS -Wa,--noexecstack" + fi +]) diff --git a/configure.in b/configure.in index ac1c122c2ea..1b44081a4a0 100644 --- a/configure.in +++ b/configure.in @@ -515,6 +515,10 @@ AM_PROG_CC_STDC # We need an assembler, too AM_PROG_AS +CCASFLAGS="$CCASFLAGS $ASFLAGS" + +# Check if we need noexec stack for assembler +AC_CHECK_NOEXECSTACK if test "$am_cv_prog_cc_stdc" = "no" then diff --git a/strings/Makefile.am b/strings/Makefile.am index c43cf0f290a..7ee115c09e5 100644 --- a/strings/Makefile.am +++ b/strings/Makefile.am @@ -66,12 +66,6 @@ conf_to_src_LDFLAGS= @NOINST_LDFLAGS@ #strtoull.o: @CHARSET_OBJS@ -if ASSEMBLER -# On Linux gcc can compile the assembly files -%.o : %.s - $(AS) $(ASFLAGS) -o $@ $< -endif - FLAGS=$(DEFS) $(INCLUDES) $(CPPFLAGS) $(CFLAGS) @NOINST_LDFLAGS@ str_test: str_test.c $(pkglib_LIBRARIES) From c69ba2559bd1b866895186d7e41d0a1e17e25714 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 31 May 2006 13:36:28 +0200 Subject: [PATCH 03/74] Bug#18462: mysqldump does not dump view structures correctly (The above problem only occurs with -T -- create a separate file for each table / view.) This ChangeSet results in correct output of view- information while omitting the information for the view's stand-in table. The rationale is that with -T, the user is likely interested in transferring part of a database, not the db in its entirety (that would be difficult as replay order is obscure, the files being named for the table/view they contain rather than getting a sequence number). client/mysqldump.c: Added missing fclose(). Before, a view's stand-in table would get dumped in get_table_structure(), and the file would remain open. get_view_structure() would re-open the same file and write to it, resulting in garbage. The way we handle it now, the table-struct gets closed, then the opening of the view-struct (same name) overwrites it. (The SQL for the view drop-if-exists the table, anyway.) If this were not desired and we wanted SQL for the views that contains the create for the stand-in table, we'd hand a mode to open_sql_file_for_table(), which would feature O_APPEND in get_view_structure(), but not in get_table_structure(). mysql-test/r/mysqldump.result: prove mysqldump -T (each item gets its own file) dumps views correctly mysql-test/t/mysqldump.test: prove mysqldump -T (each item gets its own file) dumps views correctly --- client/mysqldump.c | 3 +++ mysql-test/r/mysqldump.result | 19 +++++++++++++++++++ mysql-test/t/mysqldump.test | 21 +++++++++++++++++++++ 3 files changed, 43 insertions(+) diff --git a/client/mysqldump.c b/client/mysqldump.c index ee6d7b9d12b..bc41ebb486f 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -1554,6 +1554,9 @@ static uint get_table_structure(char *table, char *db, char *table_type, } mysql_free_result(result); + if (path) + my_fclose(sql_file, MYF(MY_WME)); + was_views= 1; DBUG_RETURN(0); } diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index 467e0818646..2d5be53ea6f 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -2717,6 +2717,25 @@ end AFTER # root@localhost drop trigger tr1; drop trigger tr2; drop table t1, t2; +create table t (qty int, price int); +insert into t values(3, 50); +insert into t values(5, 51); +create view v1 as select qty, price, qty*price as value from t; +create view v2 as select qty from v1; +mysqldump { +/*!50001 CREATE ALGORITHM=UNDEFINED */ +/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */ +/*!50001 VIEW `v1` AS select `t`.`qty` AS `qty`,`t`.`price` AS `price`,(`t`.`qty` * `t`.`price`) AS `value` from `t` */; + +} mysqldump { +/*!50001 CREATE ALGORITHM=UNDEFINED */ +/*!50013 DEFINER=`root`@`localhost` SQL SECURITY DEFINER */ +/*!50001 VIEW `v2` AS select `v1`.`qty` AS `qty` from `v1` */; + +} mysqldump +drop view v1; +drop view v2; +drop table t; /*!50003 CREATE FUNCTION `f`() RETURNS bigint(20) return 42 */| /*!50003 CREATE PROCEDURE `p`() diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index 4076fd258e9..f88b0f7cd2b 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -1127,6 +1127,27 @@ drop trigger tr2; drop table t1, t2; +# +# Bug#18462 mysqldump does not dump view structures correctly +# +# +create table t (qty int, price int); +insert into t values(3, 50); +insert into t values(5, 51); +create view v1 as select qty, price, qty*price as value from t; +create view v2 as select qty from v1; +--echo mysqldump { +--exec $MYSQL_DUMP --compact -F --tab . test +--exec cat v1.sql +--echo } mysqldump { +--exec cat v2.sql +--echo } mysqldump +--rm v.sql t.sql t.txt +drop view v1; +drop view v2; +drop table t; + + # # Bug#14857 Reading dump files with single statement stored routines fails. # fixed by patch for bug#16878 From 69146b9d0a88455ce3d029bafd161a43064367b1 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 5 Jun 2006 13:37:06 -0500 Subject: [PATCH 04/74] Bug #18275 invalid file descriptor causes crash on windows mysys/my_init.c: Added invalid parameter handler so that routines such as lseek would return -1 instead of ASSERT. --- mysys/my_init.c | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/mysys/my_init.c b/mysys/my_init.c index 9b8d4db172f..8346fab95da 100644 --- a/mysys/my_init.c +++ b/mysys/my_init.c @@ -245,6 +245,22 @@ void setEnvString(char *ret, const char *name, const char *value) DBUG_VOID_RETURN ; } +/* + my_paramter_handler + Invalid paramter handler we will use instead of the one "baked" into the CRT + for MSC v8. This one just prints out what invalid parameter was encountered. + By providing this routine, routines like lseek will return -1 when we expect them + to instead of crash. +*/ +void my_parameter_handler(const wchar_t * expression, const wchar_t * function, + const wchar_t * file, unsigned int line, + uintptr_t pReserved) +{ + DBUG_PRINT("my",("Expression: %s function: %s file: %s, line: %d", + expression, function, file, line)); +} + + static void my_win_init(void) { HKEY hSoftMysql ; @@ -262,12 +278,18 @@ static void my_win_init(void) setlocale(LC_CTYPE, ""); /* To get right sortorder */ -#if defined(_MSC_VER) && (_MSC_VER < 1300) +#if defined(_MSC_VER) +#if _MSC_VER < 1300 /* Clear the OS system variable TZ and avoid the 100% CPU usage Only for old versions of Visual C++ */ _putenv( "TZ=" ); +#endif +#if _MSC_VER >= 1400 + /* this is required to make crt functions return -1 appropriately */ + _set_invalid_parameter_handler(my_parameter_handler); +#endif #endif _tzset(); From 40f44b48b0f44f9eb7f13dc7c2e28e56ad805634 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 8 Jun 2006 16:16:07 +0200 Subject: [PATCH 05/74] ndb - bug#18781 lock DICT during node restart ndb/src/kernel/main.cpp: signal log from start (#if 0-ed) ndb/test/ndbapi/testDict.cpp: test NF/NR + dict ops ndb/src/kernel/vm/DLFifoList.hpp: add hasPrev ndb/src/kernel/vm/pc.hpp: ERROR_INSERTED_CLEAR(x) test and clear if set ndb/src/common/debugger/SignalLoggerManager.cpp: block no fix ndb/src/kernel/blocks/qmgr/QmgrMain.cpp: spelling ndb/include/kernel/GlobalSignalNumbers.h: locking of master DICT against schema ops, used by slave DIH under NR ndb/include/kernel/signaldata/AlterTable.hpp: locking of master DICT against schema ops, used by slave DIH under NR ndb/include/kernel/signaldata/CreateTable.hpp: locking of master DICT against schema ops, used by slave DIH under NR ndb/include/kernel/signaldata/DictLock.hpp: locking of master DICT against schema ops, used by slave DIH under NR ndb/include/kernel/signaldata/DropTable.hpp: locking of master DICT against schema ops, used by slave DIH under NR ndb/src/common/debugger/signaldata/SignalNames.cpp: locking of master DICT against schema ops, used by slave DIH under NR ndb/src/kernel/blocks/ERROR_codes.txt: locking of master DICT against schema ops, used by slave DIH under NR ndb/src/kernel/blocks/dbdict/Dbdict.cpp: locking of master DICT against schema ops, used by slave DIH under NR ndb/src/kernel/blocks/dbdict/Dbdict.hpp: locking of master DICT against schema ops, used by slave DIH under NR ndb/src/kernel/blocks/dbdih/Dbdih.hpp: locking of master DICT against schema ops, used by slave DIH under NR ndb/src/kernel/blocks/dbdih/DbdihInit.cpp: locking of master DICT against schema ops, used by slave DIH under NR ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: locking of master DICT against schema ops, used by slave DIH under NR ndb/src/ndbapi/ndberror.c: locking of master DICT against schema ops, used by slave DIH under NR --- ndb/include/kernel/GlobalSignalNumbers.h | 10 +- ndb/include/kernel/signaldata/AlterTable.hpp | 1 + ndb/include/kernel/signaldata/CreateTable.hpp | 1 + ndb/include/kernel/signaldata/DictLock.hpp | 76 +++++ ndb/include/kernel/signaldata/DropTable.hpp | 1 + .../common/debugger/SignalLoggerManager.cpp | 2 +- .../debugger/signaldata/SignalNames.cpp | 6 + ndb/src/kernel/blocks/ERROR_codes.txt | 4 +- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 245 +++++++++++++- ndb/src/kernel/blocks/dbdict/Dbdict.hpp | 97 +++++- ndb/src/kernel/blocks/dbdih/Dbdih.hpp | 28 ++ ndb/src/kernel/blocks/dbdih/DbdihInit.cpp | 6 + ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 133 +++++++- ndb/src/kernel/blocks/qmgr/QmgrMain.cpp | 4 +- ndb/src/kernel/main.cpp | 4 + ndb/src/kernel/vm/DLFifoList.hpp | 14 + ndb/src/kernel/vm/pc.hpp | 2 + ndb/src/ndbapi/ndberror.c | 1 + ndb/test/ndbapi/testDict.cpp | 304 ++++++++++++++++++ 19 files changed, 924 insertions(+), 15 deletions(-) create mode 100644 ndb/include/kernel/signaldata/DictLock.hpp diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h index 98b6ce7d949..d60f7a2c582 100644 --- a/ndb/include/kernel/GlobalSignalNumbers.h +++ b/ndb/include/kernel/GlobalSignalNumbers.h @@ -507,16 +507,12 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_TEST_ORD 407 #define GSN_TESTSIG 408 #define GSN_TIME_SIGNAL 409 -/* 410 unused */ -/* 411 unused */ -/* 412 unused */ #define GSN_TUP_ABORTREQ 414 #define GSN_TUP_ADD_ATTCONF 415 #define GSN_TUP_ADD_ATTRREF 416 #define GSN_TUP_ADD_ATTRREQ 417 #define GSN_TUP_ATTRINFO 418 #define GSN_TUP_COMMITREQ 419 -/* 420 unused */ #define GSN_TUP_LCPCONF 421 #define GSN_TUP_LCPREF 422 #define GSN_TUP_LCPREQ 423 @@ -938,4 +934,10 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_ACC_LOCKREQ 711 #define GSN_READ_PSUEDO_REQ 712 +/* DICT LOCK signals */ +#define GSN_DICT_LOCK_REQ 410 +#define GSN_DICT_LOCK_CONF 411 +#define GSN_DICT_LOCK_REF 412 +#define GSN_DICT_UNLOCK_ORD 420 + #endif diff --git a/ndb/include/kernel/signaldata/AlterTable.hpp b/ndb/include/kernel/signaldata/AlterTable.hpp index 16c9eb204c9..f5006c27fdb 100644 --- a/ndb/include/kernel/signaldata/AlterTable.hpp +++ b/ndb/include/kernel/signaldata/AlterTable.hpp @@ -114,6 +114,7 @@ public: InvalidTableVersion = 241, DropInProgress = 283, Busy = 701, + BusyWithNR = 711, NotMaster = 702, InvalidFormat = 703, AttributeNameTooLong = 704, diff --git a/ndb/include/kernel/signaldata/CreateTable.hpp b/ndb/include/kernel/signaldata/CreateTable.hpp index 481b323fdb0..7d3189cc126 100644 --- a/ndb/include/kernel/signaldata/CreateTable.hpp +++ b/ndb/include/kernel/signaldata/CreateTable.hpp @@ -77,6 +77,7 @@ public: enum ErrorCode { NoError = 0, Busy = 701, + BusyWithNR = 711, NotMaster = 702, InvalidFormat = 703, AttributeNameTooLong = 704, diff --git a/ndb/include/kernel/signaldata/DictLock.hpp b/ndb/include/kernel/signaldata/DictLock.hpp new file mode 100644 index 00000000000..c8f919f65a8 --- /dev/null +++ b/ndb/include/kernel/signaldata/DictLock.hpp @@ -0,0 +1,76 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#ifndef DICT_LOCK_HPP +#define DICT_LOCK_HPP + +#include "SignalData.hpp" + +// see comments in Dbdict.hpp + +class DictLockReq { + friend class Dbdict; + friend class Dbdih; +public: + STATIC_CONST( SignalLength = 3 ); + enum LockType { + NoLock = 0, + NodeRestartLock = 1 + }; +private: + Uint32 userPtr; + Uint32 lockType; + Uint32 userRef; +}; + +class DictLockConf { + friend class Dbdict; + friend class Dbdih; +public: + STATIC_CONST( SignalLength = 3 ); +private: + Uint32 userPtr; + Uint32 lockType; + Uint32 lockPtr; +}; + +class DictLockRef { + friend class Dbdict; + friend class Dbdih; +public: + STATIC_CONST( SignalLength = 3 ); + enum ErrorCode { + NotMaster = 1, + InvalidLockType = 2, + TooManyRequests = 3 + }; +private: + Uint32 userPtr; + Uint32 lockType; + Uint32 errorCode; +}; + +class DictUnlockOrd { + friend class Dbdict; + friend class Dbdih; +public: + STATIC_CONST( SignalLength = 2 ); +private: + Uint32 lockPtr; + Uint32 lockType; +}; + +#endif diff --git a/ndb/include/kernel/signaldata/DropTable.hpp b/ndb/include/kernel/signaldata/DropTable.hpp index cae6aff8754..e762446d2b8 100644 --- a/ndb/include/kernel/signaldata/DropTable.hpp +++ b/ndb/include/kernel/signaldata/DropTable.hpp @@ -53,6 +53,7 @@ public: enum ErrorCode { Busy = 701, + BusyWithNR = 711, NotMaster = 702, NoSuchTable = 709, InvalidTableVersion = 241, diff --git a/ndb/src/common/debugger/SignalLoggerManager.cpp b/ndb/src/common/debugger/SignalLoggerManager.cpp index d8710d2058f..67e13dc805a 100644 --- a/ndb/src/common/debugger/SignalLoggerManager.cpp +++ b/ndb/src/common/debugger/SignalLoggerManager.cpp @@ -139,7 +139,7 @@ SignalLoggerManager::log(LogMode logMode, const char * params) } else { for (int i = 0; i < count; ++i){ BlockNumber number = getBlockNo(blocks[i]); - cnt += log(SLM_ON, number-MIN_BLOCK_NO, logMode); + cnt += log(SLM_ON, number, logMode); } } for(int i = 0; ireq.userRef), lockPtr.p->lt->text); +} + +void +Dbdict::execDICT_LOCK_REQ(Signal* signal) +{ + jamEntry(); + const DictLockReq* req = (const DictLockReq*)&signal->theData[0]; + + if (getOwnNodeId() != c_masterNodeId) { + jam(); + sendDictLockRef(signal, *req, DictLockRef::NotMaster); + return; + } + + const DictLockType* lt = getDictLockType(req->lockType); + if (lt == NULL) { + jam(); + sendDictLockRef(signal, *req, DictLockRef::InvalidLockType); + return; + } + + DictLockPtr lockPtr; + if (! c_dictLockQueue.seize(lockPtr)) { + jam(); + sendDictLockRef(signal, *req, DictLockRef::TooManyRequests); + return; + } + + lockPtr.p->req = *req; + lockPtr.p->locked = false; + lockPtr.p->lt = lt; + + checkDictLockQueue(signal); + + if (! lockPtr.p->locked) + sendDictLockInfoEvent(lockPtr, "lock request by node"); +} + +void +Dbdict::checkDictLockQueue(Signal* signal) +{ + DictLockPtr lockPtr; + + do { + if (! c_dictLockQueue.first(lockPtr)) { + jam(); + setDictLockPoll(signal, false); + return; + } + + if (lockPtr.p->locked) { + jam(); + ndbrequire(c_blockState == lockPtr.p->lt->blockState); + break; + } + + if (c_opRecordPool.getNoOfFree() != c_opRecordPool.getSize()) { + jam(); + break; + } + + ndbrequire(c_blockState == BS_IDLE); + lockPtr.p->locked = true; + c_blockState = lockPtr.p->lt->blockState; + sendDictLockConf(signal, lockPtr); + + sendDictLockInfoEvent(lockPtr, "locked by node"); + } while (0); + + // poll while first request is open + // this routine is called again when it is removed for any reason + + bool on = ! lockPtr.p->locked; + setDictLockPoll(signal, on); +} + +void +Dbdict::execDICT_UNLOCK_ORD(Signal* signal) +{ + jamEntry(); + const DictUnlockOrd* ord = (const DictUnlockOrd*)&signal->theData[0]; + + DictLockPtr lockPtr; + c_dictLockQueue.getPtr(lockPtr, ord->lockPtr); + ndbrequire(lockPtr.p->lt->lockType == ord->lockType); + + if (lockPtr.p->locked) { + jam(); + ndbrequire(c_blockState == lockPtr.p->lt->blockState); + ndbrequire(c_opRecordPool.getNoOfFree() == c_opRecordPool.getSize()); + ndbrequire(! c_dictLockQueue.hasPrev(lockPtr)); + + c_blockState = BS_IDLE; + sendDictLockInfoEvent(lockPtr, "unlocked by node"); + } else { + sendDictLockInfoEvent(lockPtr, "lock request removed by node"); + } + + c_dictLockQueue.release(lockPtr); + + checkDictLockQueue(signal); +} + +void +Dbdict::sendDictLockConf(Signal* signal, DictLockPtr lockPtr) +{ + DictLockConf* conf = (DictLockConf*)&signal->theData[0]; + const DictLockReq& req = lockPtr.p->req; + + conf->userPtr = req.userPtr; + conf->lockType = req.lockType; + conf->lockPtr = lockPtr.i; + + sendSignal(req.userRef, GSN_DICT_LOCK_CONF, signal, + DictLockConf::SignalLength, JBB); +} + +void +Dbdict::sendDictLockRef(Signal* signal, DictLockReq req, Uint32 errorCode) +{ + DictLockRef* ref = (DictLockRef*)&signal->theData[0]; + + ref->userPtr = req.userPtr; + ref->lockType = req.lockType; + ref->errorCode = errorCode; + + sendSignal(req.userRef, GSN_DICT_LOCK_REF, signal, + DictLockRef::SignalLength, JBB); +} + +// control polling + +void +Dbdict::setDictLockPoll(Signal* signal, bool on) +{ + if (on) { + jam(); + signal->theData[0] = ZDICT_LOCK_POLL; + sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1); + } + + if (c_dictLockPoll != on) { + jam(); +#ifdef VM_TRACE + infoEvent("DICT: lock polling %s", on ? "On" : "Off"); +#endif + c_dictLockPoll = on; + } +} + +// NF handling + +void +Dbdict::removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes) +{ + DictLockPtr loopPtr; + c_dictLockQueue.first(loopPtr); + + while (loopPtr.i != RNIL) { + jam(); + DictLockPtr lockPtr = loopPtr; + c_dictLockQueue.next(loopPtr); + + Uint32 nodeId = refToNode(lockPtr.p->req.userRef); + + if (NodeBitmask::get(theFailedNodes, nodeId)) { + if (lockPtr.p->locked) { + jam(); + ndbrequire(c_blockState == lockPtr.p->lt->blockState); + ndbrequire(c_opRecordPool.getNoOfFree() == c_opRecordPool.getSize()); + ndbrequire(! c_dictLockQueue.hasPrev(lockPtr)); + + c_blockState = BS_IDLE; + + sendDictLockInfoEvent(lockPtr, "remove lock by failed node"); + } else { + sendDictLockInfoEvent(lockPtr, "remove lock request by failed node"); + } + + c_dictLockQueue.release(lockPtr); + } + } + + checkDictLockQueue(signal); +} + + /* **************************************************************** */ /* ---------------------------------------------------------------- */ /* MODULE: STORE/RESTORE SCHEMA FILE---------------------- */ diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 6b78fb86534..fbad67d8822 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -50,6 +51,7 @@ #include #include #include +#include #include "SchemaFile.hpp" #include #include @@ -63,6 +65,7 @@ /*--------------------------------------------------------------*/ #define ZPACK_TABLE_INTO_PAGES 0 #define ZSEND_GET_TAB_RESPONSE 3 +#define ZDICT_LOCK_POLL 4 /*--------------------------------------------------------------*/ @@ -587,6 +590,9 @@ private: void execALTER_TAB_CONF(Signal* signal); bool check_ndb_versions() const; + void execDICT_LOCK_REQ(Signal* signal); + void execDICT_UNLOCK_ORD(Signal* signal); + /* * 2.4 COMMON STORED VARIABLES */ @@ -817,12 +823,43 @@ private: // State variables /* ----------------------------------------------------------------------- */ +#ifndef ndb_dbdict_log_block_state enum BlockState { BS_IDLE = 0, BS_CREATE_TAB = 1, BS_BUSY = 2, - BS_NODE_FAILURE = 3 + BS_NODE_FAILURE = 3, + BS_NODE_RESTART = 4 }; +#else // quick hack to log changes + enum { + BS_IDLE = 0, + BS_CREATE_TAB = 1, + BS_BUSY = 2, + BS_NODE_FAILURE = 3, + BS_NODE_RESTART = 4 + }; + struct BlockState; + friend struct BlockState; + struct BlockState { + BlockState() : + m_value(BS_IDLE) { + } + BlockState(int value) : + m_value(value) { + } + operator int() const { + return m_value; + } + BlockState& operator=(const BlockState& bs) { + Dbdict* dict = (Dbdict*)globalData.getBlock(DBDICT); + dict->infoEvent("DICT: bs %d->%d", m_value, bs.m_value); + m_value = bs.m_value; + return *this; + } + int m_value; + }; +#endif BlockState c_blockState; struct PackTable { @@ -1722,6 +1759,64 @@ private: // Unique key for operation XXX move to some system table Uint32 c_opRecordSequence; + /* + * Master DICT can be locked in 2 mutually exclusive ways: + * + * 1) for schema ops, via operation records + * 2) against schema ops, via a lock queue + * + * Current use of 2) is by a starting node, to prevent schema ops + * until started. The ops are refused (BlockState != BS_IDLE), + * not queued. + * + * Master failure is not handled, in node start case the starting + * node will crash too anyway. Use lock table in future.. + * + * The lock queue is "serial" but other behaviour is possible + * by checking lock types e.g. to allow parallel node starts. + * + * Checking release of last op record is not convenient with + * current structure (5.0). Instead we poll via continueB. + * + * XXX only table ops check BlockState + */ + + struct DictLockType { + DictLockReq::LockType lockType; + BlockState blockState; + const char* text; + }; + + struct DictLockRecord { + DictLockReq req; + const DictLockType* lt; + bool locked; + union { + Uint32 nextPool; + Uint32 nextList; + }; + Uint32 prevList; + }; + + typedef Ptr DictLockPtr; + ArrayPool c_dictLockPool; + DLFifoList c_dictLockQueue; + bool c_dictLockPoll; + + static const DictLockType* getDictLockType(Uint32 lockType); + void sendDictLockInfoEvent(DictLockPtr lockPtr, const char* text); + + void checkDictLockQueue(Signal* signal); + void sendDictLockConf(Signal* signal, DictLockPtr lockPtr); + void sendDictLockRef(Signal* signal, DictLockReq req, Uint32 errorCode); + + // control polling i.e. continueB loop + void setDictLockPoll(Signal* signal, bool on); + + // NF handling + void removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes); + + // Statement blocks /* ------------------------------------------------------------ */ diff --git a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp index 78acf1ffd19..f4a33df9805 100644 --- a/ndb/src/kernel/blocks/dbdih/Dbdih.hpp +++ b/ndb/src/kernel/blocks/dbdih/Dbdih.hpp @@ -718,6 +718,9 @@ private: void checkPrepDropTabComplete(Signal *, TabRecordPtr tabPtr); void checkWaitDropTabFailedLqh(Signal *, Uint32 nodeId, Uint32 tableId); + void execDICT_LOCK_CONF(Signal* signal); + void execDICT_LOCK_REF(Signal* signal); + // Statement blocks //------------------------------------ // Methods that send signals @@ -935,6 +938,7 @@ private: void initialStartCompletedLab(Signal *); void allNodesLcpCompletedLab(Signal *); void nodeRestartPh2Lab(Signal *); + void nodeRestartPh2Lab2(Signal *); void initGciFilesLab(Signal *); void dictStartConfLab(Signal *); void nodeDictStartConfLab(Signal *); @@ -1594,6 +1598,30 @@ private: * Reply from nodeId */ void startInfoReply(Signal *, Uint32 nodeId); + + /* + * Lock master DICT. Only current use is by starting node + * during NR. A pool of slave records is convenient anyway. + */ + struct DictLockSlaveRecord { + Uint32 lockPtr; + Uint32 lockType; + bool locked; + Callback callback; + Uint32 nextPool; + }; + + typedef Ptr DictLockSlavePtr; + ArrayPool c_dictLockSlavePool; + + // slave + void sendDictLockReq(Signal* signal, Uint32 lockType, Callback c); + void recvDictLockConf(Signal* signal); + void sendDictUnlockOrd(Signal* signal, Uint32 lockSlavePtrI); + + // NR + Uint32 c_dictLockSlavePtrI_nodeRestart; // userPtr for NR + void recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret); }; #if (DIH_CDATA_SIZE < _SYSFILE_SIZE32) diff --git a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp index cd987048577..2b878034258 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihInit.cpp @@ -66,6 +66,9 @@ void Dbdih::initData() waitGCPProxyPool.setSize(ZPROXY_FILE_SIZE); waitGCPMasterPool.setSize(ZPROXY_MASTER_FILE_SIZE); + c_dictLockSlavePool.setSize(1); // assert single usage + c_dictLockSlavePtrI_nodeRestart = RNIL; + cgcpOrderBlocked = 0; c_lcpState.ctcCounter = 0; cwaitLcpSr = false; @@ -264,6 +267,9 @@ Dbdih::Dbdih(const class Configuration & config): addRecSignal(GSN_CREATE_FRAGMENTATION_REQ, &Dbdih::execCREATE_FRAGMENTATION_REQ); + addRecSignal(GSN_DICT_LOCK_CONF, &Dbdih::execDICT_LOCK_CONF); + addRecSignal(GSN_DICT_LOCK_REF, &Dbdih::execDICT_LOCK_REF); + apiConnectRecord = 0; connectRecord = 0; fileRecord = 0; diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 3ebad7f0cd2..c37461a1f65 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -67,6 +67,7 @@ #include #include #include +#include #include #include @@ -544,7 +545,7 @@ void Dbdih::execCONTINUEB(Signal* signal) break; case DihContinueB::ZSTART_PERMREQ_AGAIN: jam(); - nodeRestartPh2Lab(signal); + nodeRestartPh2Lab2(signal); return; break; case DihContinueB::SwitchReplica: @@ -1284,6 +1285,7 @@ void Dbdih::execNDB_STTOR(Signal* signal) case NodeState::ST_INITIAL_NODE_RESTART: case NodeState::ST_NODE_RESTART: jam(); + /*********************************************************************** * When starting nodes while system is operational we must be controlled * by the master since only one node restart is allowed at a time. @@ -1294,7 +1296,7 @@ void Dbdih::execNDB_STTOR(Signal* signal) req->startingRef = reference(); req->startingVersion = 0; // Obsolete sendSignal(cmasterdihref, GSN_START_MEREQ, signal, - StartMeReq::SignalLength, JBB); + StartMeReq::SignalLength, JBB); return; } ndbrequire(false); @@ -1354,6 +1356,24 @@ void Dbdih::execNDB_STTOR(Signal* signal) } ndbrequire(false); break; + case ZNDB_SPH7: + jam(); + switch (typestart) { + case NodeState::ST_INITIAL_START: + case NodeState::ST_SYSTEM_RESTART: + jam(); + ndbsttorry10Lab(signal, __LINE__); + return; + case NodeState::ST_NODE_RESTART: + case NodeState::ST_INITIAL_NODE_RESTART: + jam(); + sendDictUnlockOrd(signal, c_dictLockSlavePtrI_nodeRestart); + c_dictLockSlavePtrI_nodeRestart = RNIL; + ndbsttorry10Lab(signal, __LINE__); + return; + } + ndbrequire(false); + break; default: jam(); ndbsttorry10Lab(signal, __LINE__); @@ -1563,6 +1583,31 @@ void Dbdih::execREAD_NODESCONF(Signal* signal) /* START NODE LOGIC FOR NODE RESTART */ /*---------------------------------------------------------------------------*/ void Dbdih::nodeRestartPh2Lab(Signal* signal) +{ + /* + * Lock master DICT to avoid metadata operations during INR/NR. + * Done just before START_PERMREQ. + * + * It would be more elegant to do this just before START_MEREQ. + * The problem is, on INR we end up in massive invalidateNodeLCP + * which is not fully protected against metadata ops. + */ + ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL); + + Uint32 lockType = DictLockReq::NodeRestartLock; + Callback c = { safe_cast(&Dbdih::recvDictLockConf_nodeRestart), 0 }; + sendDictLockReq(signal, lockType, c); +} + +void Dbdih::recvDictLockConf_nodeRestart(Signal* signal, Uint32 data, Uint32 ret) +{ + ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL); + c_dictLockSlavePtrI_nodeRestart = data; + + nodeRestartPh2Lab2(signal); +} + +void Dbdih::nodeRestartPh2Lab2(Signal* signal) { /*------------------------------------------------------------------------*/ // REQUEST FOR PERMISSION FROM MASTER TO START A NODE IN AN ALREADY @@ -1574,7 +1619,7 @@ void Dbdih::nodeRestartPh2Lab(Signal* signal) req->nodeId = cownNodeId; req->startType = cstarttype; sendSignal(cmasterdihref, GSN_START_PERMREQ, signal, 3, JBB); -}//Dbdih::nodeRestartPh2Lab() +} void Dbdih::execSTART_PERMCONF(Signal* signal) { @@ -1696,12 +1741,12 @@ void Dbdih::execSTART_PERMREQ(Signal* signal) const BlockReference retRef = req->blockRef; const Uint32 nodeId = req->nodeId; const Uint32 typeStart = req->startType; - CRASH_INSERTION(7122); ndbrequire(isMaster()); ndbrequire(refToNode(retRef) == nodeId); if ((c_nodeStartMaster.activeState) || - (c_nodeStartMaster.wait != ZFALSE)) { + (c_nodeStartMaster.wait != ZFALSE) || + ERROR_INSERTED_CLEAR(7174)) { jam(); signal->theData[0] = nodeId; signal->theData[1] = StartPermRef::ZNODE_ALREADY_STARTING_ERROR; @@ -10448,6 +10493,10 @@ void Dbdih::crashSystemAtGcpStop(Signal* signal) c_copyGCIMaster.m_copyReason, c_copyGCIMaster.m_waiting); break; + case GCP_READY: // shut up lint + case GCP_PREPARE_SENT: + case GCP_COMMIT_SENT: + break; } ndbout_c("c_copyGCISlave: sender{Data, Ref} %d %x reason: %d nextWord: %d", @@ -14639,3 +14688,77 @@ Dbdih::NodeRecord::NodeRecord(){ copyCompleted = false; allowNodeStart = true; } + +// DICT lock slave + +void +Dbdih::sendDictLockReq(Signal* signal, Uint32 lockType, Callback c) +{ + DictLockReq* req = (DictLockReq*)&signal->theData[0]; + DictLockSlavePtr lockPtr; + + c_dictLockSlavePool.seize(lockPtr); + ndbrequire(lockPtr.i != RNIL); + + req->userPtr = lockPtr.i; + req->lockType = lockType; + req->userRef = reference(); + + lockPtr.p->lockPtr = RNIL; + lockPtr.p->lockType = lockType; + lockPtr.p->locked = false; + lockPtr.p->callback = c; + + BlockReference dictMasterRef = calcDictBlockRef(cmasterNodeId); + sendSignal(dictMasterRef, GSN_DICT_LOCK_REQ, signal, + DictLockReq::SignalLength, JBB); +} + +void +Dbdih::execDICT_LOCK_CONF(Signal* signal) +{ + jamEntry(); + recvDictLockConf(signal); +} + +void +Dbdih::execDICT_LOCK_REF(Signal* signal) +{ + jamEntry(); + ndbrequire(false); +} + +void +Dbdih::recvDictLockConf(Signal* signal) +{ + const DictLockConf* conf = (const DictLockConf*)&signal->theData[0]; + + DictLockSlavePtr lockPtr; + c_dictLockSlavePool.getPtr(lockPtr, conf->userPtr); + + lockPtr.p->lockPtr = conf->lockPtr; + ndbrequire(lockPtr.p->lockType == conf->lockType); + ndbrequire(lockPtr.p->locked == false); + lockPtr.p->locked = true; + + lockPtr.p->callback.m_callbackData = lockPtr.i; + execute(signal, lockPtr.p->callback, 0); +} + +void +Dbdih::sendDictUnlockOrd(Signal* signal, Uint32 lockSlavePtrI) +{ + DictUnlockOrd* ord = (DictUnlockOrd*)&signal->theData[0]; + + DictLockSlavePtr lockPtr; + c_dictLockSlavePool.getPtr(lockPtr, lockSlavePtrI); + + ord->lockPtr = lockPtr.p->lockPtr; + ord->lockType = lockPtr.p->lockType; + + c_dictLockSlavePool.release(lockPtr); + + BlockReference dictMasterRef = calcDictBlockRef(cmasterNodeId); + sendSignal(dictMasterRef, GSN_DICT_UNLOCK_ORD, signal, + DictUnlockOrd::SignalLength, JBB); +} diff --git a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp index 9a7256b4a55..b9bf522f7c8 100644 --- a/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp +++ b/ndb/src/kernel/blocks/qmgr/QmgrMain.cpp @@ -2477,7 +2477,7 @@ void Qmgr::execDISCONNECT_REP(Signal* signal) { jam(); CRASH_INSERTION(932); - BaseString::snprintf(buf, 100, "Node %u disconected", nodeId); + BaseString::snprintf(buf, 100, "Node %u disconnected", nodeId); progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, buf); ndbrequire(false); } @@ -2500,7 +2500,7 @@ void Qmgr::execDISCONNECT_REP(Signal* signal) ndbrequire(false); case ZAPI_INACTIVE: { - BaseString::snprintf(buf, 100, "Node %u disconected", nodeId); + BaseString::snprintf(buf, 100, "Node %u disconnected", nodeId); progError(__LINE__, NDBD_EXIT_SR_OTHERNODEFAILED, buf); ndbrequire(false); } diff --git a/ndb/src/kernel/main.cpp b/ndb/src/kernel/main.cpp index 7c1763485ce..649ae7cae3f 100644 --- a/ndb/src/kernel/main.cpp +++ b/ndb/src/kernel/main.cpp @@ -420,6 +420,10 @@ int main(int argc, char** argv) FILE * signalLog = fopen(buf, "a"); globalSignalLoggers.setOwnNodeId(globalData.ownId); globalSignalLoggers.setOutputStream(signalLog); +#if 0 // to log startup + globalSignalLoggers.log(SignalLoggerManager::LogInOut, "BLOCK=DBDICT,DBDIH"); + globalData.testOn = 1; +#endif #endif catchsigs(false); diff --git a/ndb/src/kernel/vm/DLFifoList.hpp b/ndb/src/kernel/vm/DLFifoList.hpp index b139ade831d..963ab007b65 100644 --- a/ndb/src/kernel/vm/DLFifoList.hpp +++ b/ndb/src/kernel/vm/DLFifoList.hpp @@ -115,6 +115,13 @@ public: */ bool hasNext(const Ptr &) const; + /** + * Check if prev exists i.e. this is not first + * + * NOTE ptr must be both p & i + */ + bool hasPrev(const Ptr &) const; + Uint32 noOfElements() const { Uint32 c = 0; Uint32 i = head.firstItem; @@ -357,4 +364,11 @@ DLFifoList::hasNext(const Ptr & p) const { return p.p->nextList != RNIL; } +template +inline +bool +DLFifoList::hasPrev(const Ptr & p) const { + return p.p->prevList != RNIL; +} + #endif diff --git a/ndb/src/kernel/vm/pc.hpp b/ndb/src/kernel/vm/pc.hpp index 6aeda59224f..95839c48e4e 100644 --- a/ndb/src/kernel/vm/pc.hpp +++ b/ndb/src/kernel/vm/pc.hpp @@ -125,11 +125,13 @@ #ifdef ERROR_INSERT #define ERROR_INSERT_VARIABLE UintR cerrorInsert #define ERROR_INSERTED(x) (cerrorInsert == (x)) +#define ERROR_INSERTED_CLEAR(x) (cerrorInsert == (x) ? (cerrorInsert = 0, true) : false) #define SET_ERROR_INSERT_VALUE(x) cerrorInsert = x #define CLEAR_ERROR_INSERT_VALUE cerrorInsert = 0 #else #define ERROR_INSERT_VARIABLE typedef void * cerrorInsert // Will generate compiler error if used #define ERROR_INSERTED(x) false +#define ERROR_INSERTED_CLEAR(x) false #define SET_ERROR_INSERT_VALUE(x) #define CLEAR_ERROR_INSERT_VALUE #endif diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index 5ca8ad7be60..657c57021bb 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -325,6 +325,7 @@ ErrorBundle ErrorCodes[] = { * SchemaError */ { 701, SE, "System busy with other schema operation" }, + { 711, SE, "System busy with node restart, schema operations not allowed" }, { 703, SE, "Invalid table format" }, { 704, SE, "Attribute name too long" }, { 705, SE, "Table name too long" }, diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index 710a47bf3dc..2cfb78f143e 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -1551,6 +1551,282 @@ end: return result; } +// NFNR + +// Restarter controls dict ops : 1-run 2-pause 3-stop +// synced by polling... + +static bool +send_dict_ops_cmd(NDBT_Context* ctx, Uint32 cmd) +{ + ctx->setProperty("DictOps_CMD", cmd); + while (1) { + if (ctx->isTestStopped()) + return false; + if (ctx->getProperty("DictOps_ACK") == cmd) + break; + NdbSleep_MilliSleep(100); + } + return true; +} + +static bool +recv_dict_ops_run(NDBT_Context* ctx) +{ + while (1) { + if (ctx->isTestStopped()) + return false; + Uint32 cmd = ctx->getProperty("DictOps_CMD"); + ctx->setProperty("DictOps_ACK", cmd); + if (cmd == 1) + break; + if (cmd == 3) + return false; + NdbSleep_MilliSleep(100); + } + return true; +} + +int +runRestarts(NDBT_Context* ctx, NDBT_Step* step) +{ + static int err_master[] = { // non-crashing + 0, + 7174 // send one fake START_PERMREF + }; + static int err_node[] = { + 0, + 7121, // crash on START_PERMCONF + 7130 // crash on START_MECONF + }; + const uint err_master_cnt = sizeof(err_master)/sizeof(err_master[0]); + const uint err_node_cnt = sizeof(err_node)/sizeof(err_node[0]); + + myRandom48Init(NdbTick_CurrentMillisecond()); + NdbRestarter restarter; + int result = NDBT_OK; + const int loops = ctx->getNumLoops(); + + for (int l = 0; l < loops && result == NDBT_OK; l++) { + g_info << "1: === loop " << l << " ===" << endl; + + // assuming 2-way replicated + + int numnodes = restarter.getNumDbNodes(); + CHECK(numnodes >= 1); + if (numnodes == 1) + break; + + int masterNodeId = restarter.getMasterNodeId(); + CHECK(masterNodeId != -1); + + // for more complex cases need more restarter support methods + + int nodeIdList[2] = { 0, 0 }; + int nodeIdCnt = 0; + + if (numnodes >= 2) { + int rand = myRandom48(numnodes); + int nodeId = restarter.getRandomNotMasterNodeId(rand); + CHECK(nodeId != -1); + nodeIdList[nodeIdCnt++] = nodeId; + } + + if (numnodes >= 4) { + int rand = myRandom48(numnodes); + int nodeId = restarter.getRandomNodeOtherNodeGroup(nodeIdList[0], rand); + CHECK(nodeId != -1); + if (nodeId != masterNodeId) + nodeIdList[nodeIdCnt++] = nodeId; + } + + g_info << "1: master=" << masterNodeId << " nodes=" << nodeIdList[0] << "," << nodeIdList[1] << endl; + + const unsigned maxsleep = 2000; //ms + + bool NF_ops = ctx->getProperty("Restart_NF_ops"); + uint NF_type = ctx->getProperty("Restart_NF_type"); + bool NR_ops = ctx->getProperty("Restart_NR_ops"); + bool NR_error = ctx->getProperty("Restart_NR_error"); + + g_info << "1: " << (NF_ops ? "run" : "pause") << " dict ops" << endl; + if (! send_dict_ops_cmd(ctx, NF_ops ? 1 : 2)) + break; + NdbSleep_MilliSleep(myRandom48(maxsleep)); + + { + int i = 0; + while (i < nodeIdCnt) { + int nodeId = nodeIdList[i++]; + + bool nostart = true; + bool abort = NF_type == 0 ? myRandom48(2) : (NF_type == 2); + bool initial = myRandom48(2); + + char flags[40]; + strcpy(flags, "flags: nostart"); + if (abort) + strcat(flags, ",abort"); + if (initial) + strcat(flags, ",initial"); + + g_info << "1: restart " << nodeId << " " << flags << endl; + CHECK(restarter.restartOneDbNode(nodeId, initial, nostart, abort) == 0); + } + } + + g_info << "1: wait for nostart" << endl; + CHECK(restarter.waitNodesNoStart(nodeIdList, nodeIdCnt) == 0); + NdbSleep_MilliSleep(myRandom48(maxsleep)); + + g_info << "1: " << (NR_ops ? "run" : "pause") << " dict ops" << endl; + if (! send_dict_ops_cmd(ctx, NR_ops ? 1 : 2)) + break; + NdbSleep_MilliSleep(myRandom48(maxsleep)); + + g_info << "1: start nodes" << endl; + CHECK(restarter.startNodes(nodeIdList, nodeIdCnt) == 0); + + if (NR_error) { + { + int rand = myRandom48(err_master_cnt); + int err = err_master[rand]; + if (err != 0) { + g_info << "1: insert master error " << err << endl; + CHECK(restarter.insertErrorInNode(masterNodeId, err) == 0); + } + } + + // limitation: cannot have 2 node restarts and crash_insert + // one node may die for real (NF during startup) + + int i = 0; + while (i < nodeIdCnt && nodeIdCnt == 1) { + int nodeId = nodeIdList[i++]; + + int rand = myRandom48(err_node_cnt); + int err = err_node[rand]; + if (err != 0) { + g_info << "1: insert node " << nodeId << " error " << err << endl; + CHECK(restarter.insertErrorInNode(nodeId, err) == 0); + } + } + } + NdbSleep_MilliSleep(myRandom48(maxsleep)); + + g_info << "1: wait cluster started" << endl; + CHECK(restarter.waitClusterStarted() == 0); + NdbSleep_MilliSleep(myRandom48(maxsleep)); + + g_info << "1: restart done" << endl; + } + + g_info << "1: stop dict ops" << endl; + send_dict_ops_cmd(ctx, 3); + + return result; +} + +int +runDictOps(NDBT_Context* ctx, NDBT_Step* step) +{ + myRandom48Init(NdbTick_CurrentMillisecond()); + int result = NDBT_OK; + + for (int l = 0; result == NDBT_OK; l++) { + if (! recv_dict_ops_run(ctx)) + break; + + g_info << "2: === loop " << l << " ===" << endl; + + Ndb* pNdb = GETNDB(step); + NdbDictionary::Dictionary* pDic = pNdb->getDictionary(); + const NdbDictionary::Table* pTab = ctx->getTab(); + const char* tabName = pTab->getName(); + + const unsigned long maxsleep = 100; //ms + + g_info << "2: create table" << endl; + { + uint count = 0; + try_create: + count++; + if (pDic->createTable(*pTab) != 0) { + const NdbError err = pDic->getNdbError(); + if (count == 1) + g_err << "2: " << tabName << ": create failed: " << err << endl; + if (err.code != 711) { + result = NDBT_FAILED; + break; + } + NdbSleep_MilliSleep(myRandom48(maxsleep)); + goto try_create; + } + } + NdbSleep_MilliSleep(myRandom48(maxsleep)); + + g_info << "2: verify create" << endl; + const NdbDictionary::Table* pTab2 = pDic->getTable(tabName); + if (pTab2 == NULL) { + const NdbError err = pDic->getNdbError(); + g_err << "2: " << tabName << ": verify create: " << err << endl; + result = NDBT_FAILED; + break; + } + NdbSleep_MilliSleep(myRandom48(maxsleep)); + + // replace by the Retrieved table + pTab = pTab2; + + int records = myRandom48(ctx->getNumRecords()); + g_info << "2: load " << records << " records" << endl; + HugoTransactions hugoTrans(*pTab); + if (hugoTrans.loadTable(pNdb, records) != 0) { + // XXX get error code from hugo + g_err << "2: " << tabName << ": load failed" << endl; + result = NDBT_FAILED; + break; + } + NdbSleep_MilliSleep(myRandom48(maxsleep)); + + g_info << "2: drop" << endl; + { + uint count = 0; + try_drop: + count++; + if (pDic->dropTable(tabName) != 0) { + const NdbError err = pDic->getNdbError(); + if (count == 1) + g_err << "2: " << tabName << ": drop failed: " << err << endl; + if (err.code != 711) { + result = NDBT_FAILED; + break; + } + NdbSleep_MilliSleep(myRandom48(maxsleep)); + goto try_drop; + } + } + NdbSleep_MilliSleep(myRandom48(maxsleep)); + + g_info << "2: verify drop" << endl; + const NdbDictionary::Table* pTab3 = pDic->getTable(tabName); + if (pTab3 != NULL) { + g_err << "2: " << tabName << ": verify drop: table exists" << endl; + result = NDBT_FAILED; + break; + } + if (pDic->getNdbError().code != 709) { + const NdbError err = pDic->getNdbError(); + g_err << "2: " << tabName << ": verify drop: " << err << endl; + result = NDBT_FAILED; + break; + } + NdbSleep_MilliSleep(myRandom48(maxsleep)); + } + + return result; +} + NDBT_TESTSUITE(testDict); TESTCASE("CreateAndDrop", "Try to create and drop the table loop number of times\n"){ @@ -1655,6 +1931,34 @@ TESTCASE("FailAddFragment", "Fail add fragment or attribute in ACC or TUP or TUX\n"){ INITIALIZER(runFailAddFragment); } +TESTCASE("Restart_NF1", + "DICT ops during node graceful shutdown (not master)"){ + TC_PROPERTY("Restart_NF_ops", 1); + TC_PROPERTY("Restart_NF_type", 1); + STEP(runRestarts); + STEP(runDictOps); +} +TESTCASE("Restart_NF2", + "DICT ops during node shutdown abort (not master)"){ + TC_PROPERTY("Restart_NF_ops", 1); + TC_PROPERTY("Restart_NF_type", 2); + STEP(runRestarts); + STEP(runDictOps); +} +TESTCASE("Restart_NR1", + "DICT ops during node startup (not master)"){ + TC_PROPERTY("Restart_NR_ops", 1); + STEP(runRestarts); + STEP(runDictOps); +} +TESTCASE("Restart_NR2", + "DICT ops during node startup with crash inserts (not master)"){ + TC_PROPERTY("Restart_NR_ops", 1); + TC_PROPERTY("Restart_NR_error", 1); + STEP(runRestarts); + STEP(runDictOps); +} + NDBT_TESTSUITE_END(testDict); int main(int argc, const char** argv){ From e2495206b276d6aaeb506ed135016112532d71e3 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 8 Jun 2006 13:25:28 -0400 Subject: [PATCH 06/74] Bug#19904: UDF: not initialized *is_null per row The is_null value was initialized once and thereafter only set to indicate NULL, and never unset to indicate not-NULL. Now set is_null to false, in addition to only setting it to true when the value in question is null. mysql-test/r/udf.result: Add result. mysql-test/t/udf.test: Add test. sql/sql_udf.h: Initialize is_null to false before trying to use it, so that historical NULLs don't affect our operation. --- mysql-test/r/udf.result | 18 ++++++++++++++++++ mysql-test/t/udf.test | 12 ++++++++++++ sql/sql_udf.h | 2 ++ 3 files changed, 32 insertions(+) diff --git a/mysql-test/r/udf.result b/mysql-test/r/udf.result index be52fd7f87c..484c42c41bf 100644 --- a/mysql-test/r/udf.result +++ b/mysql-test/r/udf.result @@ -76,6 +76,24 @@ call XXX2(); metaphon(testval) HL drop procedure xxx2; +CREATE TABLE bug19904(n INT, v varchar(10)); +INSERT INTO bug19904 VALUES (1,'one'),(2,'two'),(NULL,NULL),(3,'three'),(4,'four'); +SELECT myfunc_double(n) AS f FROM bug19904; +f +49.00 +50.00 +NULL +51.00 +52.00 +SELECT metaphon(v) AS f FROM bug19904; +f +ON +TW +NULL +0R +FR +DROP TABLE bug19904; +End of 5.0 tests. DROP FUNCTION metaphon; DROP FUNCTION myfunc_double; DROP FUNCTION myfunc_nonexist; diff --git a/mysql-test/t/udf.test b/mysql-test/t/udf.test index e2556692612..f3be08c8537 100644 --- a/mysql-test/t/udf.test +++ b/mysql-test/t/udf.test @@ -99,6 +99,17 @@ delimiter ;// call XXX2(); drop procedure xxx2; +# +# Bug#19904: UDF: not initialized *is_null per row +# + +CREATE TABLE bug19904(n INT, v varchar(10)); +INSERT INTO bug19904 VALUES (1,'one'),(2,'two'),(NULL,NULL),(3,'three'),(4,'four'); +SELECT myfunc_double(n) AS f FROM bug19904; +SELECT metaphon(v) AS f FROM bug19904; +DROP TABLE bug19904; + +--echo End of 5.0 tests. # # Drop the example functions from udf_example @@ -114,3 +125,4 @@ DROP FUNCTION lookup; DROP FUNCTION reverse_lookup; DROP FUNCTION avgcost; + diff --git a/sql/sql_udf.h b/sql/sql_udf.h index d588572a762..d0729deecaa 100644 --- a/sql/sql_udf.h +++ b/sql/sql_udf.h @@ -70,6 +70,7 @@ class udf_handler :public Sql_alloc void cleanup(); double val(my_bool *null_value) { + is_null= 0; if (get_arguments()) { *null_value=1; @@ -88,6 +89,7 @@ class udf_handler :public Sql_alloc } longlong val_int(my_bool *null_value) { + is_null= 0; if (get_arguments()) { *null_value=1; From a182963cc04b7ff0ca27abe33a075ac06e4e0c2e Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 11 Jun 2006 20:46:47 +0200 Subject: [PATCH 07/74] ndb - bug#18781 (5.0) handle rolling upgrade, minor fixes, logging, docs ndb/src/kernel/blocks/dbdict/DictLock.txt: NR signals ndb/src/kernel/blocks/dbdict/Dbdict.cpp: call removeStaleDictLocks at right place, comment why it works more checks, better logging ndb/src/kernel/blocks/dbdict/Dbdict.hpp: call removeStaleDictLocks at right place, comment why it works more checks, better logging ndb/include/kernel/signaldata/DictLock.hpp: 2 more REFs ndb/include/ndb_version.h.in: DICT LOCK appeared in 5.0.23 ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: DICT LOCK rolling upgrade from version < 5.0.23 ndb/src/kernel/blocks/ERROR_codes.txt: more DICT LOCK related testing ndb/test/ndbapi/testDict.cpp: more DICT LOCK related testing --- ndb/include/kernel/signaldata/DictLock.hpp | 4 +- ndb/include/ndb_version.h.in | 2 + ndb/src/kernel/blocks/ERROR_codes.txt | 6 +- ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 101 +++++++++++++++++---- ndb/src/kernel/blocks/dbdict/Dbdict.hpp | 5 +- ndb/src/kernel/blocks/dbdict/DictLock.txt | 94 +++++++++++++++++++ ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 46 +++++++++- ndb/test/ndbapi/testDict.cpp | 67 +++++++++----- 8 files changed, 278 insertions(+), 47 deletions(-) create mode 100644 ndb/src/kernel/blocks/dbdict/DictLock.txt diff --git a/ndb/include/kernel/signaldata/DictLock.hpp b/ndb/include/kernel/signaldata/DictLock.hpp index c8f919f65a8..3e29d762962 100644 --- a/ndb/include/kernel/signaldata/DictLock.hpp +++ b/ndb/include/kernel/signaldata/DictLock.hpp @@ -55,7 +55,9 @@ public: enum ErrorCode { NotMaster = 1, InvalidLockType = 2, - TooManyRequests = 3 + BadUserRef = 3, + TooLate = 4, + TooManyRequests = 5 }; private: Uint32 userPtr; diff --git a/ndb/include/ndb_version.h.in b/ndb/include/ndb_version.h.in index 38b72306d03..7e878803f46 100644 --- a/ndb/include/ndb_version.h.in +++ b/ndb/include/ndb_version.h.in @@ -60,5 +60,7 @@ char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ]; #define NDBD_INCL_NODECONF_VERSION_4 MAKE_VERSION(4,1,17) #define NDBD_INCL_NODECONF_VERSION_5 MAKE_VERSION(5,0,18) +#define NDBD_DICT_LOCK_VERSION_5 MAKE_VERSION(5,0,23) + #endif diff --git a/ndb/src/kernel/blocks/ERROR_codes.txt b/ndb/src/kernel/blocks/ERROR_codes.txt index a63c1bef915..ddb99cb6b56 100644 --- a/ndb/src/kernel/blocks/ERROR_codes.txt +++ b/ndb/src/kernel/blocks/ERROR_codes.txt @@ -5,7 +5,7 @@ Next DBACC 3002 Next DBTUP 4013 Next DBLQH 5043 Next DBDICT 6007 -Next DBDIH 7175 +Next DBDIH 7177 Next DBTC 8037 Next CMVMI 9000 Next BACKUP 10022 @@ -312,7 +312,9 @@ Test Crashes in handling node restarts 7170: Crash when receiving START_PERMREF (InitialStartRequired) -7174: Send one fake START_PERMREF (ZNODE_ALREADY_STARTING_ERROR) +7174: Crash starting node before sending DICT_LOCK_REQ +7175: Master sends one fake START_PERMREF (ZNODE_ALREADY_STARTING_ERROR) +7176: Slave NR pretends master does not support DICT lock (rolling upgrade) DICT: 6000 Crash during NR when receiving DICTSTARTREQ diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 73007bd9aad..3cdba251492 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -205,7 +205,7 @@ void Dbdict::execCONTINUEB(Signal* signal) case ZDICT_LOCK_POLL: jam(); - checkDictLockQueue(signal); + checkDictLockQueue(signal, true); break; default : @@ -2836,7 +2836,6 @@ void Dbdict::execNODE_FAILREP(Signal* signal) case BS_NODE_RESTART: jam(); ok = true; - removeStaleDictLocks(signal, theFailedNodes); break; } ndbrequire(ok); @@ -2860,6 +2859,15 @@ void Dbdict::execNODE_FAILREP(Signal* signal) }//if }//for + /* + * NODE_FAILREP guarantees that no "in flight" signal from + * a dead node is accepted, and also that the job buffer contains + * no such (un-executed) signals. Therefore no DICT_UNLOCK_ORD + * from a dead node (leading to master crash) is possible after + * this clean-up removes the lock record. + */ + removeStaleDictLocks(signal, theFailedNodes); + }//execNODE_FAILREP() @@ -12210,7 +12218,7 @@ Dbdict::getIndexAttrMask(TableRecordPtr indexPtr, AttributeMask& mask) const Dbdict::DictLockType* Dbdict::getDictLockType(Uint32 lockType) { - static DictLockType lt[] = { + static const DictLockType lt[] = { { DictLockReq::NodeRestartLock, BS_NODE_RESTART, "NodeRestart" } }; for (int i = 0; i < sizeof(lt)/sizeof(lt[0]); i++) { @@ -12220,12 +12228,40 @@ Dbdict::getDictLockType(Uint32 lockType) return NULL; } +void +Dbdict::sendDictLockInfoEvent(Uint32 pollCount) +{ + DictLockPtr loopPtr; + c_dictLockQueue.first(loopPtr); + unsigned count = 0; + + char queue_buf[100]; + char *p = &queue_buf[0]; + const char *const q = &queue_buf[sizeof(queue_buf)]; + *p = 0; + + while (loopPtr.i != RNIL) { + jam(); + my_snprintf(p, q-p, "%s%u%s", + ++count == 1 ? "" : " ", + (unsigned)refToNode(loopPtr.p->req.userRef), + loopPtr.p->locked ? "L" : ""); + p += strlen(p); + c_dictLockQueue.next(loopPtr); + } + + infoEvent("DICT: lock bs: %d ops: %d poll: %d cnt: %d queue: %s", + (int)c_blockState, + c_opRecordPool.getSize() - c_opRecordPool.getNoOfFree(), + c_dictLockPoll, (int)pollCount, queue_buf); +} + void Dbdict::sendDictLockInfoEvent(DictLockPtr lockPtr, const char* text) { infoEvent("DICT: %s %u for %s", text, - (unsigned int)refToNode(lockPtr.p->req.userRef), lockPtr.p->lt->text); + (unsigned)refToNode(lockPtr.p->req.userRef), lockPtr.p->lt->text); } void @@ -12234,6 +12270,8 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal) jamEntry(); const DictLockReq* req = (const DictLockReq*)&signal->theData[0]; + // make sure bad request crashes slave, not master (us) + if (getOwnNodeId() != c_masterNodeId) { jam(); sendDictLockRef(signal, *req, DictLockRef::NotMaster); @@ -12247,6 +12285,19 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal) return; } + if (req->userRef != signal->getSendersBlockRef() || + getNodeInfo(refToNode(req->userRef)).m_type != NodeInfo::DB) { + jam(); + sendDictLockRef(signal, *req, DictLockRef::BadUserRef); + return; + } + + if (c_aliveNodes.get(refToNode(req->userRef))) { + jam(); + sendDictLockRef(signal, *req, DictLockRef::TooLate); + return; + } + DictLockPtr lockPtr; if (! c_dictLockQueue.seize(lockPtr)) { jam(); @@ -12258,21 +12309,23 @@ Dbdict::execDICT_LOCK_REQ(Signal* signal) lockPtr.p->locked = false; lockPtr.p->lt = lt; - checkDictLockQueue(signal); + checkDictLockQueue(signal, false); if (! lockPtr.p->locked) sendDictLockInfoEvent(lockPtr, "lock request by node"); } void -Dbdict::checkDictLockQueue(Signal* signal) +Dbdict::checkDictLockQueue(Signal* signal, bool poll) { + Uint32 pollCount = ! poll ? 0 : signal->theData[1]; + DictLockPtr lockPtr; do { if (! c_dictLockQueue.first(lockPtr)) { jam(); - setDictLockPoll(signal, false); + setDictLockPoll(signal, false, pollCount); return; } @@ -12299,7 +12352,7 @@ Dbdict::checkDictLockQueue(Signal* signal) // this routine is called again when it is removed for any reason bool on = ! lockPtr.p->locked; - setDictLockPoll(signal, on); + setDictLockPoll(signal, on, pollCount); } void @@ -12326,7 +12379,7 @@ Dbdict::execDICT_UNLOCK_ORD(Signal* signal) c_dictLockQueue.release(lockPtr); - checkDictLockQueue(signal); + checkDictLockQueue(signal, false); } void @@ -12359,21 +12412,32 @@ Dbdict::sendDictLockRef(Signal* signal, DictLockReq req, Uint32 errorCode) // control polling void -Dbdict::setDictLockPoll(Signal* signal, bool on) +Dbdict::setDictLockPoll(Signal* signal, bool on, Uint32 pollCount) { if (on) { jam(); signal->theData[0] = ZDICT_LOCK_POLL; - sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 1); + signal->theData[1] = pollCount + 1; + sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 100, 2); } - if (c_dictLockPoll != on) { + bool change = (c_dictLockPoll != on); + + if (change) { jam(); -#ifdef VM_TRACE - infoEvent("DICT: lock polling %s", on ? "On" : "Off"); -#endif c_dictLockPoll = on; } + + // avoid too many messages if master is stuck busy (BS_NODE_FAILURE) + bool periodic = + pollCount < 8 || + pollCount < 64 && pollCount % 8 == 0 || + pollCount < 512 && pollCount % 64 == 0 || + pollCount < 4096 && pollCount % 512 == 0 || + pollCount % 4096 == 0; // about every 6 minutes + + if (change || periodic) + sendDictLockInfoEvent(pollCount); } // NF handling @@ -12384,6 +12448,11 @@ Dbdict::removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes) DictLockPtr loopPtr; c_dictLockQueue.first(loopPtr); + if (getOwnNodeId() != c_masterNodeId) { + ndbrequire(loopPtr.i == RNIL); + return; + } + while (loopPtr.i != RNIL) { jam(); DictLockPtr lockPtr = loopPtr; @@ -12409,7 +12478,7 @@ Dbdict::removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes) } } - checkDictLockQueue(signal); + checkDictLockQueue(signal, false); } diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index fbad67d8822..9c0bf65b69c 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -1804,14 +1804,15 @@ private: bool c_dictLockPoll; static const DictLockType* getDictLockType(Uint32 lockType); + void sendDictLockInfoEvent(Uint32 pollCount); void sendDictLockInfoEvent(DictLockPtr lockPtr, const char* text); - void checkDictLockQueue(Signal* signal); + void checkDictLockQueue(Signal* signal, bool poll); void sendDictLockConf(Signal* signal, DictLockPtr lockPtr); void sendDictLockRef(Signal* signal, DictLockReq req, Uint32 errorCode); // control polling i.e. continueB loop - void setDictLockPoll(Signal* signal, bool on); + void setDictLockPoll(Signal* signal, bool on, Uint32 pollCount); // NF handling void removeStaleDictLocks(Signal* signal, const Uint32* theFailedNodes); diff --git a/ndb/src/kernel/blocks/dbdict/DictLock.txt b/ndb/src/kernel/blocks/dbdict/DictLock.txt new file mode 100644 index 00000000000..17f24119e9d --- /dev/null +++ b/ndb/src/kernel/blocks/dbdict/DictLock.txt @@ -0,0 +1,94 @@ +Lock master DICT against schema operations + +Implementation +-------------- + +[ see comments in Dbdict.hpp ] + +Use case: Node startup INR / NR +------------------------------- + +Master DICT (like any block) keeps list of alive nodes (c_aliveNodes). +These are participants in schema ops. + +(1) c_aliveNodes is initialized when DICT starts + in sp3 in READ_NODESCONF from CNTR + +(2) when slave node fails (in any sp of the slave node) + it is removed from c_aliveNodes in NODE_FAILREP + +(3) when slave starts, it is added to c_aliveNodes + in sp4 of the starting node in INCL_NODEREQ + +Slave DIH locks master DICT in sp2 and releases the lock when started. +Based on the constraints: + +- the lock is taken when master DICT is known + DIH reads this in sp2 in READ_NODESCONF + +- the lock is taken before (3) + +- the lock is taken before copying starts and held until it is done + in sp4 DIH meta, DICT meta, tuple data + +- on INR in sp2 in START_PERMREQ the LCP info of the slave is erased + in all DIH in invalidateNodeLCP() - not safe under schema ops + +Signals: + +All but DICT_LOCK are standard v5.0 signals. +s=starting node, m=master, a=all participants, l=local block. + +* sp2 - DICT_LOCK and START_PERM + +DIH/s + DICT_LOCK_REQ + DICT/m + DICT_LOCK_CONF +DIH/s + START_PERMREQ + DIH/m + START_INFOREQ + DIH/a + invalidateNodeLCP() if INR + DIH/a + START_INFOCONF + DIH/m + START_PERMCONF +DIH/s + +* sp4 - START_ME (copy metadata, no changes) + +DIH/s + START_MEREQ + DIH/m + COPY_TABREQ + DIH/s + COPY_TABCONF + DIH/m + DICTSTARTREQ + DICT/s + GET_SCHEMA_INFOREQ + DICT/m + SCHEMA_INFO + DICT/s + DICTSTARTCONF + DIH/m + INCL_NODEREQ + DIH/a + INCL_NODEREQ + ANY/l + INCL_NODECONF + DIH/a + INCL_NODECONF + DIH/m + START_MECONF +DIH/s + +* sp7 - release DICT lock + +DIH/s + DICT_UNLOCK_ORD + DICT/m + +# vim: set et sw=4: diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index c37461a1f65..352053bef10 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -1594,6 +1594,9 @@ void Dbdih::nodeRestartPh2Lab(Signal* signal) */ ndbrequire(c_dictLockSlavePtrI_nodeRestart == RNIL); + // check that we are not yet taking part in schema ops + CRASH_INSERTION(7174); + Uint32 lockType = DictLockReq::NodeRestartLock; Callback c = { safe_cast(&Dbdih::recvDictLockConf_nodeRestart), 0 }; sendDictLockReq(signal, lockType, c); @@ -1746,7 +1749,7 @@ void Dbdih::execSTART_PERMREQ(Signal* signal) ndbrequire(refToNode(retRef) == nodeId); if ((c_nodeStartMaster.activeState) || (c_nodeStartMaster.wait != ZFALSE) || - ERROR_INSERTED_CLEAR(7174)) { + ERROR_INSERTED_CLEAR(7175)) { jam(); signal->theData[0] = nodeId; signal->theData[1] = StartPermRef::ZNODE_ALREADY_STARTING_ERROR; @@ -14709,6 +14712,34 @@ Dbdih::sendDictLockReq(Signal* signal, Uint32 lockType, Callback c) lockPtr.p->locked = false; lockPtr.p->callback = c; + // handle rolling upgrade + { + Uint32 masterVersion = getNodeInfo(cmasterNodeId).m_version; + + unsigned int get_major = getMajor(masterVersion); + unsigned int get_minor = getMinor(masterVersion); + unsigned int get_build = getBuild(masterVersion); + + ndbrequire(get_major == 4 || get_major == 5); + + if (masterVersion < NDBD_DICT_LOCK_VERSION_5 || + ERROR_INSERTED(7176)) { + jam(); + + infoEvent("DIH: detect upgrade: master node %u old version %u.%u.%u", + (unsigned int)cmasterNodeId, get_major, get_minor, get_build); + + DictLockConf* conf = (DictLockConf*)&signal->theData[0]; + conf->userPtr = lockPtr.i; + conf->lockType = lockType; + conf->lockPtr = ZNIL; + + sendSignal(reference(), GSN_DICT_LOCK_CONF, signal, + DictLockConf::SignalLength, JBB); + return; + } + } + BlockReference dictMasterRef = calcDictBlockRef(cmasterNodeId); sendSignal(dictMasterRef, GSN_DICT_LOCK_REQ, signal, DictLockReq::SignalLength, JBB); @@ -14758,6 +14789,19 @@ Dbdih::sendDictUnlockOrd(Signal* signal, Uint32 lockSlavePtrI) c_dictLockSlavePool.release(lockPtr); + // handle rolling upgrade + { + Uint32 masterVersion = getNodeInfo(cmasterNodeId).m_version; + + unsigned int get_major = getMajor(masterVersion); + ndbrequire(get_major == 4 || get_major == 5); + + if (masterVersion < NDBD_DICT_LOCK_VERSION_5 || + ERROR_INSERTED(7176)) { + return; + } + } + BlockReference dictMasterRef = calcDictBlockRef(cmasterNodeId); sendSignal(dictMasterRef, GSN_DICT_UNLOCK_ORD, signal, DictUnlockOrd::SignalLength, JBB); diff --git a/ndb/test/ndbapi/testDict.cpp b/ndb/test/ndbapi/testDict.cpp index 2cfb78f143e..397f41b3d4e 100644 --- a/ndb/test/ndbapi/testDict.cpp +++ b/ndb/test/ndbapi/testDict.cpp @@ -1590,17 +1590,18 @@ recv_dict_ops_run(NDBT_Context* ctx) int runRestarts(NDBT_Context* ctx, NDBT_Step* step) { - static int err_master[] = { // non-crashing - 0, - 7174 // send one fake START_PERMREF + static int errlst_master[] = { // non-crashing + 7175, // send one fake START_PERMREF + 0 }; - static int err_node[] = { - 0, - 7121, // crash on START_PERMCONF - 7130 // crash on START_MECONF + static int errlst_node[] = { + 7174, // crash before sending DICT_LOCK_REQ + 7176, // pretend master does not support DICT lock + 7121, // crash at receive START_PERMCONF + 0 }; - const uint err_master_cnt = sizeof(err_master)/sizeof(err_master[0]); - const uint err_node_cnt = sizeof(err_node)/sizeof(err_node[0]); + const uint errcnt_master = sizeof(errlst_master)/sizeof(errlst_master[0]); + const uint errcnt_node = sizeof(errlst_node)/sizeof(errlst_node[0]); myRandom48Init(NdbTick_CurrentMillisecond()); NdbRestarter restarter; @@ -1632,7 +1633,7 @@ runRestarts(NDBT_Context* ctx, NDBT_Step* step) nodeIdList[nodeIdCnt++] = nodeId; } - if (numnodes >= 4) { + if (numnodes >= 4 && myRandom48(2) == 0) { int rand = myRandom48(numnodes); int nodeId = restarter.getRandomNodeOtherNodeGroup(nodeIdList[0], rand); CHECK(nodeId != -1); @@ -1642,6 +1643,7 @@ runRestarts(NDBT_Context* ctx, NDBT_Step* step) g_info << "1: master=" << masterNodeId << " nodes=" << nodeIdList[0] << "," << nodeIdList[1] << endl; + const uint timeout = 60; //secs for node wait const unsigned maxsleep = 2000; //ms bool NF_ops = ctx->getProperty("Restart_NF_ops"); @@ -1655,9 +1657,8 @@ runRestarts(NDBT_Context* ctx, NDBT_Step* step) NdbSleep_MilliSleep(myRandom48(maxsleep)); { - int i = 0; - while (i < nodeIdCnt) { - int nodeId = nodeIdList[i++]; + for (int i = 0; i < nodeIdCnt; i++) { + int nodeId = nodeIdList[i]; bool nostart = true; bool abort = NF_type == 0 ? myRandom48(2) : (NF_type == 2); @@ -1676,9 +1677,31 @@ runRestarts(NDBT_Context* ctx, NDBT_Step* step) } g_info << "1: wait for nostart" << endl; - CHECK(restarter.waitNodesNoStart(nodeIdList, nodeIdCnt) == 0); + CHECK(restarter.waitNodesNoStart(nodeIdList, nodeIdCnt, timeout) == 0); NdbSleep_MilliSleep(myRandom48(maxsleep)); + int err_master = 0; + int err_node[2] = { 0, 0 }; + + if (NR_error) { + err_master = errlst_master[l % errcnt_master]; + + // limitation: cannot have 2 node restarts and crash_insert + // one node may die for real (NF during startup) + + for (int i = 0; i < nodeIdCnt && nodeIdCnt == 1; i++) { + err_node[i] = errlst_node[l % errcnt_node]; + + // 7176 - no DICT lock protection + + if (err_node[i] == 7176) { + g_info << "1: no dict ops due to error insert " + << err_node[i] << endl; + NR_ops = false; + } + } + } + g_info << "1: " << (NR_ops ? "run" : "pause") << " dict ops" << endl; if (! send_dict_ops_cmd(ctx, NR_ops ? 1 : 2)) break; @@ -1689,23 +1712,17 @@ runRestarts(NDBT_Context* ctx, NDBT_Step* step) if (NR_error) { { - int rand = myRandom48(err_master_cnt); - int err = err_master[rand]; + int err = err_master; if (err != 0) { g_info << "1: insert master error " << err << endl; CHECK(restarter.insertErrorInNode(masterNodeId, err) == 0); } } - // limitation: cannot have 2 node restarts and crash_insert - // one node may die for real (NF during startup) + for (int i = 0; i < nodeIdCnt; i++) { + int nodeId = nodeIdList[i]; - int i = 0; - while (i < nodeIdCnt && nodeIdCnt == 1) { - int nodeId = nodeIdList[i++]; - - int rand = myRandom48(err_node_cnt); - int err = err_node[rand]; + int err = err_node[i]; if (err != 0) { g_info << "1: insert node " << nodeId << " error " << err << endl; CHECK(restarter.insertErrorInNode(nodeId, err) == 0); @@ -1715,7 +1732,7 @@ runRestarts(NDBT_Context* ctx, NDBT_Step* step) NdbSleep_MilliSleep(myRandom48(maxsleep)); g_info << "1: wait cluster started" << endl; - CHECK(restarter.waitClusterStarted() == 0); + CHECK(restarter.waitClusterStarted(timeout) == 0); NdbSleep_MilliSleep(myRandom48(maxsleep)); g_info << "1: restart done" << endl; From 1c1570940feccf6ed3249c643f7968be1563f7b6 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 12 Jun 2006 08:54:45 -0400 Subject: [PATCH 08/74] Bug #16206: Superfluous COMMIT event in binlog when updating BDB in autocommit mode This is a modification of serg's and guilhem's suggestion in the bug report, in that it also causes the transaction log to be written to disc. mysql-test/r/bdb.result: Add result. mysql-test/t/bdb.test: Add test sql/log.cc: Create a log-committing event that itself won't be written to the log when we're in autocommit mode. sql/log_event.cc: Add a new subclass of Query_log_event that doesn't write itself to the log, for cases where we only want to flush out the transaction and not also write about this event. sql/log_event.h: Add a new subclass of Query_log_event that doesn't write itself to the log, for cases where we only want to flush out the transaction and not also write about this event. --- mysql-test/r/bdb.result | 34 ++++++++++++++++++++++++++++++++++ mysql-test/t/bdb.test | 35 +++++++++++++++++++++++++++++++++++ sql/log.cc | 22 +++++++++++++++++----- sql/log_event.cc | 27 +++++++++++++++++++++++++++ sql/log_event.h | 21 +++++++++++++++++++++ 5 files changed, 134 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result index af6319afe99..588644a6c66 100644 --- a/mysql-test/r/bdb.result +++ b/mysql-test/r/bdb.result @@ -1928,4 +1928,38 @@ create table t1 (a int) engine=bdb; commit; alter table t1 add primary key(a); drop table t1; +set autocommit=1; +reset master; +create table bug16206 (a int) engine= blackhole; +insert into bug16206 values(1); +start transaction; +insert into bug16206 values(2); +commit; +show binlog events; +Log_name Pos Event_type Server_id End_log_pos Info +f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4 +f n Query 1 n use `test`; create table bug16206 (a int) engine= blackhole +f n Query 1 n use `test`; insert into bug16206 values(1) +f n Query 1 n use `test`; insert into bug16206 values(2) +drop table bug16206; +reset master; +create table bug16206 (a int) engine= bdb; +insert into bug16206 values(0); +insert into bug16206 values(1); +start transaction; +insert into bug16206 values(2); +commit; +insert into bug16206 values(3); +show binlog events; +Log_name Pos Event_type Server_id End_log_pos Info +f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4 +f n Query 1 n use `test`; create table bug16206 (a int) engine= bdb +f n Query 1 n use `test`; insert into bug16206 values(0) +f n Query 1 n use `test`; insert into bug16206 values(1) +f n Query 1 n use `test`; BEGIN +f n Query 1 n use `test`; insert into bug16206 values(2) +f n Query 1 n use `test`; COMMIT +f n Query 1 n use `test`; insert into bug16206 values(3) +drop table bug16206; +set autocommit=0; End of 5.0 tests diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test index d017d91bfb1..d2e3ca5f36e 100644 --- a/mysql-test/t/bdb.test +++ b/mysql-test/t/bdb.test @@ -1019,4 +1019,39 @@ commit; alter table t1 add primary key(a); drop table t1; + +# +# Bug #16206: Superfluous COMMIT event in binlog when updating BDB in autocommit mode +# +set autocommit=1; + +let $VERSION=`select version()`; + +reset master; +create table bug16206 (a int) engine= blackhole; +insert into bug16206 values(1); +start transaction; +insert into bug16206 values(2); +commit; +--replace_result $VERSION VERSION +--replace_column 1 f 2 n 5 n +show binlog events; +drop table bug16206; + +reset master; +create table bug16206 (a int) engine= bdb; +insert into bug16206 values(0); +insert into bug16206 values(1); +start transaction; +insert into bug16206 values(2); +commit; +insert into bug16206 values(3); +--replace_result $VERSION VERSION +--replace_column 1 f 2 n 5 n +show binlog events; +drop table bug16206; + +set autocommit=0; + + --echo End of 5.0 tests diff --git a/sql/log.cc b/sql/log.cc index ba02c9ba082..cfb90d398e6 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -36,6 +36,8 @@ MYSQL_LOG mysql_log, mysql_slow_log, mysql_bin_log; ulong sync_binlog_counter= 0; +static Muted_query_log_event invisible_commit; + static bool test_if_number(const char *str, long *res, bool allow_wildcards); static bool binlog_init(); @@ -94,7 +96,9 @@ static int binlog_end_trans(THD *thd, IO_CACHE *trans_log, Log_event *end_ev) { int error=0; DBUG_ENTER("binlog_end_trans"); - if (end_ev) + + /* NULL denotes ROLLBACK with nothing to replicate */ + if (end_ev != NULL) error= mysql_bin_log.write(thd, trans_log, end_ev); statistic_increment(binlog_cache_use, &LOCK_status); @@ -126,14 +130,19 @@ static int binlog_commit(THD *thd, bool all) DBUG_ASSERT(mysql_bin_log.is_open() && (all || !(thd->options & (OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)))); - if (!my_b_tell(trans_log)) + if (my_b_tell(trans_log) == 0) { // we're here because trans_log was flushed in MYSQL_LOG::log() DBUG_RETURN(0); } - Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE); - qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE) - DBUG_RETURN(binlog_end_trans(thd, trans_log, &qev)); + if (all) + { + Query_log_event qev(thd, STRING_WITH_LEN("COMMIT"), TRUE, FALSE); + qev.error_code= 0; // see comment in MYSQL_LOG::write(THD, IO_CACHE) + DBUG_RETURN(binlog_end_trans(thd, trans_log, &qev)); + } + else + DBUG_RETURN(binlog_end_trans(thd, trans_log, &invisible_commit)); } static int binlog_rollback(THD *thd, bool all) @@ -1813,6 +1822,9 @@ bool MYSQL_LOG::write(THD *thd, IO_CACHE *cache, Log_event *commit_event) DBUG_ENTER("MYSQL_LOG::write(THD *, IO_CACHE *, Log_event *)"); VOID(pthread_mutex_lock(&LOCK_log)); + /* NULL would represent nothing to replicate after ROLLBACK */ + DBUG_ASSERT(commit_event != NULL); + if (likely(is_open())) // Should always be true { uint length; diff --git a/sql/log_event.cc b/sql/log_event.cc index 266d6b064bd..cabf4631284 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1229,6 +1229,18 @@ bool Query_log_event::write(IO_CACHE* file) my_b_safe_write(file, (byte*) query, q_len)) ? 1 : 0; } +/* + Query_log_event::Query_log_event() + + The simplest constructor that could possibly work. This is used for + creating static objects that have a special meaning and are invisible + to the log. +*/ +Query_log_event::Query_log_event() + :Log_event(), data_buf(0) +{ +} + /* Query_log_event::Query_log_event() @@ -1875,6 +1887,21 @@ end: #endif +/************************************************************************** + Muted_query_log_event methods +**************************************************************************/ + +#ifndef MYSQL_CLIENT +/* + Muted_query_log_event::Muted_query_log_event() +*/ +Muted_query_log_event::Muted_query_log_event() + :Query_log_event() +{ +} +#endif + + /************************************************************************** Start_log_event_v3 methods **************************************************************************/ diff --git a/sql/log_event.h b/sql/log_event.h index 0e1eb7cd13c..f1b441dedb1 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -783,6 +783,7 @@ public: void print(FILE* file, PRINT_EVENT_INFO* print_event_info= 0); #endif + Query_log_event(); Query_log_event(const char* buf, uint event_len, const Format_description_log_event *description_event, Log_event_type event_type); @@ -806,6 +807,26 @@ public: /* Writes derived event-specific part of post header. */ }; + +/***************************************************************************** + + Muted Query Log Event class + + Pretends to Log SQL queries, but doesn't actually do so. + + ****************************************************************************/ +class Muted_query_log_event: public Query_log_event +{ +public: +#ifndef MYSQL_CLIENT + Muted_query_log_event(); + + bool write(IO_CACHE* file) { return(false); }; + virtual bool write_post_header_for_derived(IO_CACHE* file) { return FALSE; } +#endif +}; + + #ifdef HAVE_REPLICATION /***************************************************************************** From 4c8144cab9e053986531a2a45a785634d692f0fb Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 13 Jun 2006 17:23:43 +0500 Subject: [PATCH 09/74] bug #16832 (ALGORITHM missed in VIEW's information schema) mysql-test/r/information_schema.result: test result fixed sql/sql_show.cc: /*ALGORITHM=XXX*/ added to the 'query' column in view's information schema --- mysql-test/r/information_schema.result | 18 ++++++------ sql/sql_show.cc | 40 ++++++++++++++++++++++---- 2 files changed, 43 insertions(+), 15 deletions(-) diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 6da07922251..474b5c76313 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -369,11 +369,11 @@ show keys from v4; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment select * from information_schema.views where TABLE_NAME like "v%"; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE -NULL test v0 select sql_no_cache `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER -NULL test v1 select sql_no_cache `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER -NULL test v2 select sql_no_cache `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER -NULL test v3 select sql_no_cache `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER -NULL test v4 select sql_no_cache `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER +NULL test v0 /* ALGORITHM=UNDEFINED */ select sql_no_cache `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER +NULL test v1 /* ALGORITHM=UNDEFINED */ select sql_no_cache `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select sql_no_cache `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER +NULL test v3 /* ALGORITHM=UNDEFINED */ select sql_no_cache `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER +NULL test v4 /* ALGORITHM=UNDEFINED */ select sql_no_cache `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER drop view v0, v1, v2, v3, v4; create table t1 (a int); grant select,update,insert on t1 to mysqltest_1@localhost; @@ -464,9 +464,9 @@ create view v2 (c) as select a from t1 WITH LOCAL CHECK OPTION; create view v3 (c) as select a from t1 WITH CASCADED CHECK OPTION; select * from information_schema.views; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE -NULL test v1 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER -NULL test v2 select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER -NULL test v3 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER +NULL test v1 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER +NULL test v3 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER grant select (a) on test.t1 to joe@localhost with grant option; select * from INFORMATION_SCHEMA.COLUMN_PRIVILEGES; GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE @@ -1121,7 +1121,7 @@ select * from information_schema.views where table_name='v1' or table_name='v2'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE NULL test v1 NONE YES root@localhost DEFINER -NULL test v2 select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER drop view v1, v2; drop table t1; drop user mysqltest_1@localhost; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index ca6a8ddfb6b..60d50c415d5 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -41,6 +41,8 @@ static TYPELIB grant_types = { sizeof(grant_names)/sizeof(char **), static int store_create_info(THD *thd, TABLE_LIST *table_list, String *packet); +static void +append_algorithm(TABLE_LIST *table, String *buff); static int view_store_create_info(THD *thd, TABLE_LIST *table, String *buff); static bool schema_table_store_record(THD *thd, TABLE *table); @@ -1098,6 +1100,28 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet) void view_store_options(THD *thd, TABLE_LIST *table, String *buff) +{ + append_algorithm(table, buff); + append_definer(thd, buff, &table->definer.user, &table->definer.host); + if (table->view_suid) + buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER ")); + else + buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER ")); +} + + +/* + Append DEFINER clause to the given buffer. + + SYNOPSIS + append_definer() + thd [in] thread handle + buffer [inout] buffer to hold DEFINER clause + definer_user [in] user name part of definer + definer_host [in] host name part of definer +*/ + +static void append_algorithm(TABLE_LIST *table, String *buff) { buff->append(STRING_WITH_LEN("ALGORITHM=")); switch ((int8)table->algorithm) { @@ -1113,11 +1137,6 @@ view_store_options(THD *thd, TABLE_LIST *table, String *buff) default: DBUG_ASSERT(0); // never should happen } - append_definer(thd, buff, &table->definer.user, &table->definer.host); - if (table->view_suid) - buff->append(STRING_WITH_LEN("SQL SECURITY DEFINER ")); - else - buff->append(STRING_WITH_LEN("SQL SECURITY INVOKER ")); } @@ -3105,7 +3124,16 @@ static int get_schema_views_record(THD *thd, struct st_table_list *tables, table->field[1]->store(tables->view_db.str, tables->view_db.length, cs); table->field[2]->store(tables->view_name.str, tables->view_name.length, cs); if (grant & SHOW_VIEW_ACL) - table->field[3]->store(tables->query.str, tables->query.length, cs); + { + char buff[2048]; + String qwe_str(buff, sizeof(buff), cs); + qwe_str.length(0); + qwe_str.append(STRING_WITH_LEN("/* ")); + append_algorithm(tables, &qwe_str); + qwe_str.append(STRING_WITH_LEN("*/ ")); + qwe_str.append(tables->query.str, tables->query.length); + table->field[3]->store(qwe_str.ptr(), qwe_str.length(), cs); + } if (tables->with_check != VIEW_CHECK_NONE) { From abe41c553dd3d073b254ebfcf9e974e161df83a7 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 14 Jun 2006 14:18:42 -0400 Subject: [PATCH 10/74] Bug#19262: internal function create_typelib() uses DBUG_ENTER() but not DBUG_RETURN Trivial replacement of return with DBUG_RETURN. sql/sp_head.cc: Trivial replacement of return with DBUG_RETURN --- sql/sp_head.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 3b29a841966..14295072fff 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -565,7 +565,7 @@ create_typelib(MEM_ROOT *mem_root, create_field *field_def, List *src) result->name= ""; if (!(result->type_names=(const char **) alloc_root(mem_root,(sizeof(char *)+sizeof(int))*(result->count+1)))) - return 0; + DBUG_RETURN(0); result->type_lengths= (unsigned int *)(result->type_names + result->count+1); List_iterator it(*src); String conv; @@ -599,7 +599,7 @@ create_typelib(MEM_ROOT *mem_root, create_field *field_def, List *src) result->type_names[result->count]= 0; result->type_lengths[result->count]= 0; } - return result; + DBUG_RETURN(result); } From 8b6c2d312b51f10de453aca1ca2164e7432acf3c Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 19 Jun 2006 22:11:01 +0500 Subject: [PATCH 11/74] bug #20318 (ctype_ucs2_def test fails with embedded) there was two problems about charsets in embedded server 1. mysys/charset.c - defined there default_charset_info variable is modified by both server and client code (particularly when --default-charset option is handled) In embedded server we get two codelines modifying one variable. I created separate default_client_charset_info for client code 2. mysql->charset and mysql->options.charset initialization isn't properly done for embedded server - necessary calls added include/sql_common.h: client charset info default declared libmysqld/lib_sql.cc: thd_init_client_charset calls added libmysqld/libmysqld.c: check_embedded_connection moved to client.c to avoid code duplication sql-common/client.c: charset initialization moved to mysql_init_character_set to be used in embedded server sql/sql_parse.cc: thread client charset initialization moved to thd_init_client_charset to avoid code duplication --- include/sql_common.h | 1 + libmysqld/lib_sql.cc | 7 ++++ libmysqld/libmysqld.c | 48 ++----------------------- sql-common/client.c | 84 ++++++++++++++++++++++++------------------- sql/sql_parse.cc | 59 ++++++++++++++++-------------- 5 files changed, 91 insertions(+), 108 deletions(-) diff --git a/include/sql_common.h b/include/sql_common.h index c07a4a831bb..9fc8d4f457b 100644 --- a/include/sql_common.h +++ b/include/sql_common.h @@ -22,6 +22,7 @@ extern const char *not_error_sqlstate; extern "C" { #endif +extern CHARSET_INFO *default_client_charset_info; MYSQL_FIELD *unpack_fields(MYSQL_DATA *data,MEM_ROOT *alloc,uint fields, my_bool default_value, uint server_capabilities); void free_rows(MYSQL_DATA *cur); diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index bf8c17a71af..56f4200e695 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -41,6 +41,8 @@ static const char *fake_groups[] = { "server", "embedded", 0 }; int check_user(THD *thd, enum enum_server_command command, const char *passwd, uint passwd_len, const char *db, bool check_count); +void thd_init_client_charset(THD *thd, uint cs_number); + C_MODE_START #include #undef ER @@ -532,10 +534,13 @@ err: return NULL; } + #ifdef NO_EMBEDDED_ACCESS_CHECKS int check_embedded_connection(MYSQL *mysql) { THD *thd= (THD*)mysql->thd; + thd_init_client_charset(thd, mysql->charset->number); + thd->update_charset(); thd->host= (char*)my_localhost; thd->host_or_ip= thd->host; thd->user= my_strdup(mysql->user, MYF(0)); @@ -551,6 +556,8 @@ int check_embedded_connection(MYSQL *mysql) char scramble_buff[SCRAMBLE_LENGTH]; int passwd_len; + thd_init_client_charset(thd, mysql->charset->number); + thd->update_charset(); if (mysql->options.client_ip) { thd->host= my_strdup(mysql->options.client_ip, MYF(0)); diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c index 6fa41fb3fd0..a2bd4242c3d 100644 --- a/libmysqld/libmysqld.c +++ b/libmysqld/libmysqld.c @@ -85,49 +85,7 @@ static void end_server(MYSQL *mysql) } -static int mysql_init_charset(MYSQL *mysql) -{ - char charset_name_buff[16], *charset_name; - - if ((charset_name=mysql->options.charset_name)) - { - const char *save=charsets_dir; - if (mysql->options.charset_dir) - charsets_dir=mysql->options.charset_dir; - mysql->charset=get_charset_by_name(mysql->options.charset_name, - MYF(MY_WME)); - charsets_dir=save; - } - else if (mysql->server_language) - { - charset_name=charset_name_buff; - sprintf(charset_name,"%d",mysql->server_language); /* In case of errors */ - mysql->charset=get_charset((uint8) mysql->server_language, MYF(MY_WME)); - } - else - mysql->charset=default_charset_info; - - if (!mysql->charset) - { - mysql->net.last_errno=CR_CANT_READ_CHARSET; - strmov(mysql->net.sqlstate, "HY0000"); - if (mysql->options.charset_dir) - sprintf(mysql->net.last_error,ER(mysql->net.last_errno), - charset_name ? charset_name : "unknown", - mysql->options.charset_dir); - else - { - char cs_dir_name[FN_REFLEN]; - get_charsets_dir(cs_dir_name); - sprintf(mysql->net.last_error,ER(mysql->net.last_errno), - charset_name ? charset_name : "unknown", - cs_dir_name); - } - return mysql->net.last_errno; - } - return 0; -} - +int mysql_init_character_set(MYSQL *mysql); MYSQL * STDCALL mysql_real_connect(MYSQL *mysql,const char *host, const char *user, @@ -203,10 +161,10 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, init_embedded_mysql(mysql, client_flag, db_name); - if (check_embedded_connection(mysql)) + if (mysql_init_character_set(mysql)) goto error; - if (mysql_init_charset(mysql)) + if (check_embedded_connection(mysql)) goto error; /* Send client information for access check */ diff --git a/sql-common/client.c b/sql-common/client.c index 3a598832253..ea8baeeffc7 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -133,6 +133,8 @@ static void mysql_close_free(MYSQL *mysql); static int wait_for_data(my_socket fd, uint timeout); #endif +CHARSET_INFO *default_client_charset_info = &my_charset_latin1; + /**************************************************************************** A modified version of connect(). my_connect() allows you to specify @@ -1424,7 +1426,7 @@ mysql_init(MYSQL *mysql) bzero((char*) (mysql),sizeof(*(mysql))); mysql->options.connect_timeout= CONNECT_TIMEOUT; mysql->last_used_con= mysql->next_slave= mysql->master = mysql; - mysql->charset=default_charset_info; + mysql->charset=default_client_charset_info; strmov(mysql->net.sqlstate, not_error_sqlstate); /* By default, we are a replication pivot. The caller must reset it @@ -1537,6 +1539,50 @@ static MYSQL_METHODS client_methods= #endif }; +C_MODE_START +int mysql_init_character_set(MYSQL *mysql) +{ + NET *net= &mysql->net; + /* Set character set */ + if (!mysql->options.charset_name && + !(mysql->options.charset_name= + my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME)))) + return 1; + + { + const char *save= charsets_dir; + if (mysql->options.charset_dir) + charsets_dir=mysql->options.charset_dir; + mysql->charset=get_charset_by_csname(mysql->options.charset_name, + MY_CS_PRIMARY, MYF(MY_WME)); + charsets_dir= save; + } + + if (!mysql->charset) + { + net->last_errno=CR_CANT_READ_CHARSET; + strmov(net->sqlstate, unknown_sqlstate); + if (mysql->options.charset_dir) + my_snprintf(net->last_error, sizeof(net->last_error)-1, + ER(net->last_errno), + mysql->options.charset_name, + mysql->options.charset_dir); + else + { + char cs_dir_name[FN_REFLEN]; + get_charsets_dir(cs_dir_name); + my_snprintf(net->last_error, sizeof(net->last_error)-1, + ER(net->last_errno), + mysql->options.charset_name, + cs_dir_name); + } + return 1; + } + return 0; +} +C_MODE_END + + MYSQL * STDCALL CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, const char *passwd, const char *db, @@ -1875,42 +1921,8 @@ CLI_MYSQL_REAL_CONNECT(MYSQL *mysql,const char *host, const char *user, goto error; } - /* Set character set */ - if (!mysql->options.charset_name && - !(mysql->options.charset_name= - my_strdup(MYSQL_DEFAULT_CHARSET_NAME,MYF(MY_WME)))) + if (mysql_init_character_set(mysql)) goto error; - - { - const char *save= charsets_dir; - if (mysql->options.charset_dir) - charsets_dir=mysql->options.charset_dir; - mysql->charset=get_charset_by_csname(mysql->options.charset_name, - MY_CS_PRIMARY, MYF(MY_WME)); - charsets_dir= save; - } - - if (!mysql->charset) - { - net->last_errno=CR_CANT_READ_CHARSET; - strmov(net->sqlstate, unknown_sqlstate); - if (mysql->options.charset_dir) - my_snprintf(net->last_error, sizeof(net->last_error)-1, - ER(net->last_errno), - mysql->options.charset_name, - mysql->options.charset_dir); - else - { - char cs_dir_name[FN_REFLEN]; - get_charsets_dir(cs_dir_name); - my_snprintf(net->last_error, sizeof(net->last_error)-1, - ER(net->last_errno), - mysql->options.charset_name, - cs_dir_name); - } - goto error; - } - /* Save connection information */ if (!my_multi_malloc(MYF(0), diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 51ef3f31b26..4c0221c9e9c 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -667,6 +667,37 @@ static void reset_mqh(THD *thd, LEX_USER *lu, bool get_them= 0) #endif /* NO_EMBEDDED_ACCESS_CHECKS */ } +void thd_init_client_charset(THD *thd, uint cs_number) +{ + /* + Use server character set and collation if + - opt_character_set_client_handshake is not set + - client has not specified a character set + - client character set is the same as the servers + - client character set doesn't exists in server + */ + if (!opt_character_set_client_handshake || + !(thd->variables.character_set_client= get_charset(cs_number, MYF(0))) || + !my_strcasecmp(&my_charset_latin1, + global_system_variables.character_set_client->name, + thd->variables.character_set_client->name)) + { + thd->variables.character_set_client= + global_system_variables.character_set_client; + thd->variables.collation_connection= + global_system_variables.collation_connection; + thd->variables.character_set_results= + global_system_variables.character_set_results; + } + else + { + thd->variables.character_set_results= + thd->variables.collation_connection= + thd->variables.character_set_client; + } +} + + /* Perform handshake, authorize client and update thd ACL variables. SYNOPSIS @@ -809,33 +840,7 @@ static int check_connection(THD *thd) thd->client_capabilities|= ((ulong) uint2korr(net->read_pos+2)) << 16; thd->max_client_packet_length= uint4korr(net->read_pos+4); DBUG_PRINT("info", ("client_character_set: %d", (uint) net->read_pos[8])); - /* - Use server character set and collation if - - opt_character_set_client_handshake is not set - - client has not specified a character set - - client character set is the same as the servers - - client character set doesn't exists in server - */ - if (!opt_character_set_client_handshake || - !(thd->variables.character_set_client= - get_charset((uint) net->read_pos[8], MYF(0))) || - !my_strcasecmp(&my_charset_latin1, - global_system_variables.character_set_client->name, - thd->variables.character_set_client->name)) - { - thd->variables.character_set_client= - global_system_variables.character_set_client; - thd->variables.collation_connection= - global_system_variables.collation_connection; - thd->variables.character_set_results= - global_system_variables.character_set_results; - } - else - { - thd->variables.character_set_results= - thd->variables.collation_connection= - thd->variables.character_set_client; - } + thd_init_client_charset(thd, (uint) net->read_pos[8]); thd->update_charset(); end= (char*) net->read_pos+32; } From e9452db1c1b7fb534a44590312d6608640675350 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 21 Jun 2006 01:50:20 +0400 Subject: [PATCH 12/74] Fix for bug#19634 "Re-execution of multi-delete which involve trigger/stored function crashes server". Attempts to execute prepared multi-delete statement which involved trigger or stored function caused server crashes (the same happened for such statements included in stored procedures in cases when one tried to execute them more than once). The problem was caused by yet another incorrect usage of check_table_access() routine (the latter assumes that table list which it gets as argument corresponds to value LEX::query_tables_own_last). We solve this problem by juggling with LEX::query_tables_own_last value when we call check_table_access() for LEX::auxilliary_table_list (better solution is too intrusive and should be done in 5.1). mysql-test/r/sp-prelocking.result: Added test for bug#19634 "Re-execution of multi-delete which involve trigger/ stored function crashes server". mysql-test/t/sp-prelocking.test: Added test for bug#19634 "Re-execution of multi-delete which involve trigger/ stored function crashes server". sql/sql_parse.cc: To call safely check_table_access() for LEX::auxilliary_table_list we have to juggle with LEX::query_tables_own_last value. --- mysql-test/r/sp-prelocking.result | 18 ++++++++++++++ mysql-test/t/sp-prelocking.test | 31 ++++++++++++++++++++++++ sql/sql_parse.cc | 40 +++++++++++++++++++++++++++---- 3 files changed, 85 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/sp-prelocking.result b/mysql-test/r/sp-prelocking.result index 2335513b28a..7d8dd862748 100644 --- a/mysql-test/r/sp-prelocking.result +++ b/mysql-test/r/sp-prelocking.result @@ -237,3 +237,21 @@ deallocate prepare stmt; drop table t1; drop view v1, v2, v3; drop function bug15683; +drop table if exists t1, t2, t3; +drop function if exists bug19634; +create table t1 (id int, data int); +create table t2 (id int); +create table t3 (data int); +create function bug19634() returns int return (select count(*) from t3); +prepare stmt from "delete t1 from t1, t2 where t1.id = t2.id and bug19634()"; +execute stmt; +execute stmt; +deallocate prepare stmt; +create trigger t1_bi before delete on t1 for each row insert into t3 values (old.data); +prepare stmt from "delete t1 from t1, t2 where t1.id = t2.id"; +execute stmt; +execute stmt; +deallocate prepare stmt; +drop function bug19634; +drop table t1, t2, t3; +End of 5.0 tests diff --git a/mysql-test/t/sp-prelocking.test b/mysql-test/t/sp-prelocking.test index a7215462afb..b94de6236d3 100644 --- a/mysql-test/t/sp-prelocking.test +++ b/mysql-test/t/sp-prelocking.test @@ -272,3 +272,34 @@ drop table t1; drop view v1, v2, v3; drop function bug15683; + +# +# Bug#19634 "Re-execution of multi-delete which involve trigger/stored +# function crashes server" +# +--disable_warnings +drop table if exists t1, t2, t3; +drop function if exists bug19634; +--enable_warnings +create table t1 (id int, data int); +create table t2 (id int); +create table t3 (data int); +create function bug19634() returns int return (select count(*) from t3); +prepare stmt from "delete t1 from t1, t2 where t1.id = t2.id and bug19634()"; +# This should not crash server +execute stmt; +execute stmt; +deallocate prepare stmt; + +create trigger t1_bi before delete on t1 for each row insert into t3 values (old.data); +prepare stmt from "delete t1 from t1, t2 where t1.id = t2.id"; + +execute stmt; +execute stmt; +deallocate prepare stmt; + +drop function bug19634; +drop table t1, t2, t3; + + +--echo End of 5.0 tests diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index ba5c2ebf484..7ed96250240 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -5202,8 +5202,26 @@ bool check_global_access(THD *thd, ulong want_access) /* - Check the privilege for all used tables. Table privileges are cached - in the table list for GRANT checking + Check the privilege for all used tables. + + SYNOPSYS + check_table_access() + thd Thread context + want_access Privileges requested + tables List of tables to be checked + no_errors FALSE/TRUE - report/don't report error to + the client (using my_error() call). + + NOTES + Table privileges are cached in the table list for GRANT checking. + This functions assumes that table list used and + thd->lex->query_tables_own_last value correspond to each other + (the latter should be either 0 or point to next_global member + of one of elements of this table list). + + RETURN VALUE + FALSE - OK + TRUE - Access denied */ bool @@ -7068,14 +7086,28 @@ bool multi_delete_precheck(THD *thd, TABLE_LIST *tables) SELECT_LEX *select_lex= &thd->lex->select_lex; TABLE_LIST *aux_tables= (TABLE_LIST *)thd->lex->auxilliary_table_list.first; + TABLE_LIST **save_query_tables_own_last= thd->lex->query_tables_own_last; DBUG_ENTER("multi_delete_precheck"); /* sql_yacc guarantees that tables and aux_tables are not zero */ DBUG_ASSERT(aux_tables != 0); if (check_db_used(thd, tables) || check_db_used(thd,aux_tables) || - check_table_access(thd,SELECT_ACL, tables,0) || - check_table_access(thd,DELETE_ACL, aux_tables,0)) + check_table_access(thd, SELECT_ACL, tables, 0)) DBUG_RETURN(TRUE); + + /* + Since aux_tables list is not part of LEX::query_tables list we + have to juggle with LEX::query_tables_own_last value to be able + call check_table_access() safely. + */ + thd->lex->query_tables_own_last= 0; + if (check_table_access(thd, DELETE_ACL, aux_tables, 0)) + { + thd->lex->query_tables_own_last= save_query_tables_own_last; + DBUG_RETURN(TRUE); + } + thd->lex->query_tables_own_last= save_query_tables_own_last; + if ((thd->options & OPTION_SAFE_UPDATES) && !select_lex->where) { my_message(ER_UPDATE_WITHOUT_KEY_IN_SAFE_MODE, From 72cb1d5049c781ed82f0f0859df74383d0d62824 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 21 Jun 2006 02:23:18 +0300 Subject: [PATCH 13/74] Fix for Bug#18246 "compilation error with tcp_wrapper" sql/mysqld.cc: Fix for Bug#18246 "compilation error with tcp_wrapper" Added wrapper functions. --- include/Makefile.am | 2 +- include/my_libwrap.h | 19 +++++++++++++++++++ mysys/Makefile.am | 2 +- mysys/my_libwrap.c | 39 +++++++++++++++++++++++++++++++++++++++ sql/mysqld.cc | 6 +++--- 5 files changed, 63 insertions(+), 5 deletions(-) create mode 100644 include/my_libwrap.h create mode 100644 mysys/my_libwrap.c diff --git a/include/Makefile.am b/include/Makefile.am index 07c32e3127b..2dbea3fe07f 100644 --- a/include/Makefile.am +++ b/include/Makefile.am @@ -31,7 +31,7 @@ noinst_HEADERS = config-win.h config-os2.h config-netware.h \ my_aes.h my_tree.h hash.h thr_alarm.h \ thr_lock.h t_ctype.h violite.h md5.h base64.h \ mysql_version.h.in my_handler.h my_time.h decimal.h \ - my_user.h + my_user.h my_libwrap.h # mysql_version.h are generated CLEANFILES = mysql_version.h my_config.h readline openssl diff --git a/include/my_libwrap.h b/include/my_libwrap.h new file mode 100644 index 00000000000..a5cc9879e4f --- /dev/null +++ b/include/my_libwrap.h @@ -0,0 +1,19 @@ +/* Copyright (C) 2000 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +extern void my_fromhost(struct request_info *req); +extern int my_hosts_access(struct request_info *req); +extern char *my_eval_client(struct request_info *req); diff --git a/mysys/Makefile.am b/mysys/Makefile.am index d046b2fa3f8..bc84f44cd29 100644 --- a/mysys/Makefile.am +++ b/mysys/Makefile.am @@ -56,7 +56,7 @@ libmysys_a_SOURCES = my_init.c my_getwd.c mf_getdate.c my_mmap.c \ my_gethostbyname.c rijndael.c my_aes.c sha1.c \ my_handler.c my_netware.c my_largepage.c \ my_memmem.c \ - my_windac.c my_access.c base64.c + my_windac.c my_access.c base64.c my_libwrap.c EXTRA_DIST = thr_alarm.c thr_lock.c my_pthread.c my_thr_init.c \ thr_mutex.c thr_rwlock.c libmysys_a_LIBADD = @THREAD_LOBJECTS@ diff --git a/mysys/my_libwrap.c b/mysys/my_libwrap.c new file mode 100644 index 00000000000..29a0ecf3fc6 --- /dev/null +++ b/mysys/my_libwrap.c @@ -0,0 +1,39 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +#include +#ifdef HAVE_LIBWRAP +#include +#include +#ifdef NEED_SYS_SYSLOG_H +#include +#endif /* NEED_SYS_SYSLOG_H */ +#endif + +void my_fromhost(struct request_info *req) +{ + fromhost(req); +} + +int my_hosts_access(struct request_info *req) +{ + hosts_access(req); +} + +char *my_eval_client(struct request_info *req) +{ + eval_client(req); +} diff --git a/sql/mysqld.cc b/sql/mysqld.cc index d7a38d6b715..262a5352ed9 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -4072,8 +4072,8 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused))) struct request_info req; signal(SIGCHLD, SIG_DFL); request_init(&req, RQ_DAEMON, libwrapName, RQ_FILE, new_sock, NULL); - fromhost(&req); - if (!hosts_access(&req)) + my_fromhost(&req); + if (!my_hosts_access(&req)) { /* This may be stupid but refuse() includes an exit(0) @@ -4081,7 +4081,7 @@ pthread_handler_t handle_connections_sockets(void *arg __attribute__((unused))) clean_exit() - same stupid thing ... */ syslog(deny_severity, "refused connect from %s", - eval_client(&req)); + my_eval_client(&req)); /* C++ sucks (the gibberish in front just translates the supplied From e8e52a6a2557f3a6e6e3d50841eb545c9cf8d0cc Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 21 Jun 2006 18:35:19 +0300 Subject: [PATCH 14/74] Cleanup to patch for Bug#18246, "compilation error with tcp_wrapper" include/my_libwrap.h: Changed includes to the header file. mysys/my_libwrap.c: Added comment and .c file now takes needed includes from the corresponding .h file. sql/mysqld.cc: Include this block from my_libwra.h now. Moved two variables out of the otherwise same block. --- include/my_libwrap.h | 9 +++++++++ mysys/my_libwrap.c | 15 +++++++++------ sql/mysqld.cc | 13 +++---------- 3 files changed, 21 insertions(+), 16 deletions(-) diff --git a/include/my_libwrap.h b/include/my_libwrap.h index a5cc9879e4f..6437cbaed84 100644 --- a/include/my_libwrap.h +++ b/include/my_libwrap.h @@ -14,6 +14,15 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#ifdef HAVE_LIBWRAP +#include +#include +#ifdef NEED_SYS_SYSLOG_H +#include +#endif /* NEED_SYS_SYSLOG_H */ + extern void my_fromhost(struct request_info *req); extern int my_hosts_access(struct request_info *req); extern char *my_eval_client(struct request_info *req); + +#endif /* HAVE_LIBWRAP */ diff --git a/mysys/my_libwrap.c b/mysys/my_libwrap.c index 29a0ecf3fc6..be8adbab0a1 100644 --- a/mysys/my_libwrap.c +++ b/mysys/my_libwrap.c @@ -14,14 +14,15 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +/* + This is needed to be able to compile with original libwrap header + files that don't have the prototypes +*/ + #include +#include + #ifdef HAVE_LIBWRAP -#include -#include -#ifdef NEED_SYS_SYSLOG_H -#include -#endif /* NEED_SYS_SYSLOG_H */ -#endif void my_fromhost(struct request_info *req) { @@ -37,3 +38,5 @@ char *my_eval_client(struct request_info *req) { eval_client(req); } + +#endif /* HAVE_LIBWRAP */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 262a5352ed9..b73cd350012 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -120,16 +120,7 @@ extern "C" { // Because of SCO 3.2V4.2 #include #endif /* __WIN__ */ -#ifdef HAVE_LIBWRAP -#include -#include -#ifdef NEED_SYS_SYSLOG_H -#include -#endif /* NEED_SYS_SYSLOG_H */ -int allow_severity = LOG_INFO; -int deny_severity = LOG_WARNING; - -#endif /* HAVE_LIBWRAP */ +#include #ifdef HAVE_SYS_MMAN_H #include @@ -591,6 +582,8 @@ static const char* default_dbug_option; #endif #ifdef HAVE_LIBWRAP const char *libwrapName= NULL; +int allow_severity = LOG_INFO; +int deny_severity = LOG_WARNING; #endif #ifdef HAVE_QUERY_CACHE static ulong query_cache_limit= 0; From edcba74415fc7a2ead3d65628d22efab598a61b8 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 21 Jun 2006 20:27:52 -0700 Subject: [PATCH 15/74] traditional grep does not have -q option --- config/ac-macros/compiler_flag.m4 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config/ac-macros/compiler_flag.m4 b/config/ac-macros/compiler_flag.m4 index 9dda6da72fa..88097c7a62e 100644 --- a/config/ac-macros/compiler_flag.m4 +++ b/config/ac-macros/compiler_flag.m4 @@ -47,7 +47,7 @@ void foo (void) { } EOF if AC_TRY_COMMAND([${CC-cc} $CFLAGS $CPPFLAGS -S -o conftest.s conftest.c 1>&AS_MESSAGE_LOG_FD]) \ - && grep -q .note.GNU-stack conftest.s \ + && grep .note.GNU-stack conftest.s >/dev/null \ && AC_TRY_COMMAND([${CC-cc} $CCASFLAGS $CPPFLAGS -Wa,--noexecstack -c -o conftest.o conftest.s 1>&AS_MESSAGE_LOG_FD]) then From 62683b70fb3a6c386449480dd5f55da690676086 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Jun 2006 14:28:05 +0200 Subject: [PATCH 16/74] Improved fix for bug#18516 (also 19353): 1) Rename the old shell tool "mysql_upgrade", to avoid a name collision. 2) Improve the spec file, to explicitly use a temporary socket. scripts/mysql_upgrade_shell.sh: Rename: scripts/mysql_upgrade.sh -> scripts/mysql_upgrade_shell.sh scripts/Makefile.am: The old shell script "mysql_upgrade" must be renamed, so that its name does not collide with the new binary. support-files/mysql.spec.sh: The previous version was incomplete, as it did not cover the case where the DBA had configured a non-default socket file. Solve that by creating and explicitly providing a temporary directory for the socket just for the duration of "mysql_upgrade", which also can be better protected than the default socket. --- scripts/Makefile.am | 6 ++--- ...ysql_upgrade.sh => mysql_upgrade_shell.sh} | 0 support-files/mysql.spec.sh | 22 +++++++++++++++---- 3 files changed, 21 insertions(+), 7 deletions(-) rename scripts/{mysql_upgrade.sh => mysql_upgrade_shell.sh} (100%) diff --git a/scripts/Makefile.am b/scripts/Makefile.am index 0f68b484f41..a339ebc5b8f 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -32,7 +32,7 @@ bin_SCRIPTS = @server_scripts@ \ mysqldumpslow \ mysql_explain_log \ mysql_tableinfo \ - mysql_upgrade \ + mysql_upgrade_shell \ mysqld_multi \ mysql_create_system_tables @@ -60,7 +60,7 @@ EXTRA_SCRIPTS = make_binary_distribution.sh \ mysql_explain_log.sh \ mysqld_multi.sh \ mysql_tableinfo.sh \ - mysql_upgrade.sh \ + mysql_upgrade_shell.sh \ mysqld_safe.sh \ mysql_create_system_tables.sh @@ -89,7 +89,7 @@ CLEANFILES = @server_scripts@ \ mysqldumpslow \ mysql_explain_log \ mysql_tableinfo \ - mysql_upgrade \ + mysql_upgrade_shell \ mysqld_multi \ make_win_src_distribution \ mysql_create_system_tables diff --git a/scripts/mysql_upgrade.sh b/scripts/mysql_upgrade_shell.sh similarity index 100% rename from scripts/mysql_upgrade.sh rename to scripts/mysql_upgrade_shell.sh diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index abd29b6014a..2f66d64b289 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -490,11 +490,19 @@ chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir # So ensure the server is isolated as much as possible, and start it so that # passwords are not checked. # See the related change in the start script "/etc/init.d/mysql". -chmod 700 $mysql_datadir -%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables -%{_bindir}/mysql_upgrade +if type mktemp >/dev/null 2>&1 +then + mysql_tmp_sockdir=`mktemp -dt` +else + PID=$$ + mysql_tmp_sockdir=/tmp/mysql-$PID + ( umask 077 ; mkdir $mysql_tmp_sockdir ) +fi +chown %{mysqld_user}:%{mysqld_group} $mysql_tmp_sockdir +%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables --socket=$mysql_tmp_sockdir/upgrade.sock +%{_bindir}/mysql_upgrade --socket=$mysql_tmp_sockdir/upgrade.sock %{_sysconfdir}/init.d/mysql stop --skip-networking --skip-grant-tables -chmod 755 $mysql_datadir +rm -fr $mysql_tmp_sockdir # Change permissions again to fix any new files. chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir @@ -732,6 +740,12 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Thu Jun 22 2006 Joerg Bruehe + +- Close a gap of the previous version by explicitly using + a newly created temporary directory for the socket to be used + in the "mysql_upgrade" operation, overriding any local setting. + * Tue Jun 20 2006 Joerg Bruehe - To run "mysql_upgrade", we need a running server; From 4af5e597ef8c22fdf8c4b88ba7d12d113e56e81f Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Jun 2006 19:10:11 +0500 Subject: [PATCH 17/74] Bugs#20392: INSERT_ID session variable has weird value sys_var_insert_id returned LAST_INSERT_ID instead of INSERT_ID. mysql-test/r/variables.result: Adding test case mysql-test/t/variables.test: Adding test case sql/set_var.cc: Fixed that sys_var_insert_id returned last_indert_id instead of insert_id. --- mysql-test/r/variables.result | 12 ++++++++++++ mysql-test/t/variables.test | 10 ++++++++++ sql/set_var.cc | 2 +- 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index 8cee60cf49a..1016cf27c18 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -421,6 +421,18 @@ set tmp_table_size=100; set tx_isolation="READ-COMMITTED"; set wait_timeout=100; set log_warnings=1; +select @@session.insert_id; +@@session.insert_id +1 +set @save_insert_id=@@session.insert_id; +set session insert_id=20; +select @@session.insert_id; +@@session.insert_id +20 +set @@session.insert_id=@save_insert_id; +select @@session.insert_id; +@@session.insert_id +1 create table t1 (a int not null auto_increment, primary key(a)); create table t2 (a int not null auto_increment, primary key(a)); insert into t1 values(null),(null),(null); diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index be1731e7493..d0def5af8d0 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -301,6 +301,16 @@ set tx_isolation="READ-COMMITTED"; set wait_timeout=100; set log_warnings=1; +# +# Bugs: #20392: INSERT_ID session variable has weird value +# +select @@session.insert_id; +set @save_insert_id=@@session.insert_id; +set session insert_id=20; +select @@session.insert_id; +set @@session.insert_id=@save_insert_id; +select @@session.insert_id; + # # key buffer # diff --git a/sql/set_var.cc b/sql/set_var.cc index 003dd4a8ab3..51317dec50d 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -2578,7 +2578,7 @@ bool sys_var_insert_id::update(THD *thd, set_var *var) byte *sys_var_insert_id::value_ptr(THD *thd, enum_var_type type, LEX_STRING *base) { - return (byte*) &thd->current_insert_id; + return (byte*) &thd->next_insert_id; } From 9040e4a9ce866c58ed77738cb13fdcf8fdf4233a Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Jun 2006 19:40:59 +0500 Subject: [PATCH 18/74] Additional test for Bugs#20392: INSERT_ID session variable has weird value sys_var_insert_id returned LAST_INSERT_ID instead of INSERT_ID, as Guilhem suggested. mysql-test/r/variables.result: Additional test mysql-test/t/variables.test: Additional test --- mysql-test/r/variables.result | 10 ++++++++++ mysql-test/t/variables.test | 6 ++++++ 2 files changed, 16 insertions(+) diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index 1016cf27c18..4ddc7e2ab87 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -429,6 +429,16 @@ set session insert_id=20; select @@session.insert_id; @@session.insert_id 20 +set session last_insert_id=100; +select @@session.insert_id; +@@session.insert_id +20 +select @@session.last_insert_id; +@@session.last_insert_id +100 +select @@session.insert_id; +@@session.insert_id +20 set @@session.insert_id=@save_insert_id; select @@session.insert_id; @@session.insert_id diff --git a/mysql-test/t/variables.test b/mysql-test/t/variables.test index d0def5af8d0..68efcafd1e0 100644 --- a/mysql-test/t/variables.test +++ b/mysql-test/t/variables.test @@ -308,6 +308,12 @@ select @@session.insert_id; set @save_insert_id=@@session.insert_id; set session insert_id=20; select @@session.insert_id; + +set session last_insert_id=100; +select @@session.insert_id; +select @@session.last_insert_id; +select @@session.insert_id; + set @@session.insert_id=@save_insert_id; select @@session.insert_id; From 15ac64063197f00a7343fb99613554788cca10b0 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Jun 2006 19:15:03 +0400 Subject: [PATCH 19/74] Bug#15811: extremely long time for mysql client to execute long INSERT The problem was in redundant calls to strlen() in string functions, where we may then return after checking only the small number of characters. No test case is provided since it's a performance fix. strings/ctype-mb.c: Do not use strlen() where arbitrary horizon of at least CHARSET_INFO::mbmaxlen character is sufficient. --- strings/ctype-mb.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/strings/ctype-mb.c b/strings/ctype-mb.c index a3e10ba7650..0d73c7d1e51 100644 --- a/strings/ctype-mb.c +++ b/strings/ctype-mb.c @@ -24,12 +24,12 @@ void my_caseup_str_mb(CHARSET_INFO * cs, char *str) { register uint32 l; - register char *end=str+strlen(str); /* BAR TODO: remove strlen() call */ register uchar *map=cs->to_upper; while (*str) { - if ((l=my_ismbchar(cs, str,end))) + /* Pointing after the '\0' is safe here. */ + if ((l=my_ismbchar(cs, str, str + cs->mbmaxlen))) str+=l; else { @@ -42,12 +42,12 @@ void my_caseup_str_mb(CHARSET_INFO * cs, char *str) void my_casedn_str_mb(CHARSET_INFO * cs, char *str) { register uint32 l; - register char *end=str+strlen(str); register uchar *map=cs->to_lower; while (*str) { - if ((l=my_ismbchar(cs, str,end))) + /* Pointing after the '\0' is safe here. */ + if ((l=my_ismbchar(cs, str, str + cs->mbmaxlen))) str+=l; else { @@ -101,15 +101,18 @@ uint my_casedn_mb(CHARSET_INFO * cs, char *src, uint srclen, return srclen; } +/* + my_strcasecmp_mb() returns 0 if strings are equal, non-zero otherwise. + */ int my_strcasecmp_mb(CHARSET_INFO * cs,const char *s, const char *t) { register uint32 l; - register const char *end=s+strlen(s); register uchar *map=cs->to_upper; - while (smbmaxlen))) { while (l--) if (*s++ != *t++) @@ -120,7 +123,8 @@ int my_strcasecmp_mb(CHARSET_INFO * cs,const char *s, const char *t) else if (map[(uchar) *s++] != map[(uchar) *t++]) return 1; } - return *t; + /* At least one of '*s' and '*t' is zero here. */ + return (*t != *s); } From 67fd3c4a53f585f8e33b5094822cf639a27483de Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Jun 2006 19:29:48 +0400 Subject: [PATCH 20/74] A fix and a test case for Bug#15217 "Using a SP cursor on a table created with PREPARE fails with weird error". More generally, re-executing a stored procedure with a complex SP cursor query could lead to a crash. The cause of the problem was that SP cursor queries were not optimized properly at first execution: their parse tree belongs to sp_instr_cpush, not sp_instr_copen, and thus the tree was tagged "EXECUTED" when the cursor was declared, not when it was opened. This led to loss of optimization transformations performed at first execution, as sp_instr_copen saw that the query is already "EXECUTED" and therefore either not ran first-execution related blocks or wrongly rolled back the transformations caused by first-execution code. The fix is to update the state of the parsed tree only when the tree is executed, as opposed to when the instruction containing the tree is executed. Assignment if i->state is moved to reset_lex_and_exec_core. mysql-test/r/sp.result: Test results fixed (Bug#15217) mysql-test/t/sp.test: Add a test case for Bug#15217 sql/sp_head.cc: Move assignment of stmt_arena->state to reset_lex_and_exec_core --- mysql-test/r/sp.result | 21 +++++++++++++++++++++ mysql-test/t/sp.test | 27 +++++++++++++++++++++++++++ sql/sp_head.cc | 4 +++- 3 files changed, 51 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index ff378f1f43b..d3874c769fa 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -4990,4 +4990,25 @@ CALL bug18037_p2()| DROP FUNCTION bug18037_f1| DROP PROCEDURE bug18037_p1| DROP PROCEDURE bug18037_p2| +drop table if exists t3| +drop procedure if exists bug15217| +create table t3 as select 1| +create procedure bug15217() +begin +declare var1 char(255); +declare cur1 cursor for select * from t3; +open cur1; +fetch cur1 into var1; +select concat('data was: /', var1, '/'); +close cur1; +end | +call bug15217()| +concat('data was: /', var1, '/') +data was: /1/ +flush tables | +call bug15217()| +concat('data was: /', var1, '/') +data was: /1/ +drop table t3| +drop procedure bug15217| drop table t1,t2; diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 1d21a5da187..66498198157 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -5888,6 +5888,33 @@ DROP FUNCTION bug18037_f1| DROP PROCEDURE bug18037_p1| DROP PROCEDURE bug18037_p2| +# +# Bug#15217 "Using a SP cursor on a table created with PREPARE fails with +# weird error". Check that the code that is supposed to work at +# the first execution of a stored procedure actually works for +# sp_instr_copen. + +--disable_warnings +drop table if exists t3| +drop procedure if exists bug15217| +--enable_warnings +create table t3 as select 1| +create procedure bug15217() +begin + declare var1 char(255); + declare cur1 cursor for select * from t3; + open cur1; + fetch cur1 into var1; + select concat('data was: /', var1, '/'); + close cur1; +end | +# Returns expected result +call bug15217()| +flush tables | +# Returns error with garbage as column name +call bug15217()| +drop table t3| +drop procedure bug15217| # # BUG#NNNN: New bug synopsis diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 3b29a841966..ef2f895c8b2 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -1075,7 +1075,6 @@ sp_head::execute(THD *thd) thd->net.no_send_error= 0; if (i->free_list) cleanup_items(i->free_list); - i->state= Query_arena::EXECUTED; /* If we've set thd->user_var_events_alloc to mem_root of this SP @@ -2210,6 +2209,9 @@ sp_lex_keeper::reset_lex_and_exec_core(THD *thd, uint *nextp, m_lex->mark_as_requiring_prelocking(NULL); } thd->rollback_item_tree_changes(); + /* Update the state of the active arena. */ + thd->stmt_arena->state= Query_arena::EXECUTED; + /* Unlike for PS we should not call Item's destructors for newly created From 9a4b76ed64c58204868236f092f3f64dcfea96e2 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Jun 2006 22:11:27 +0500 Subject: [PATCH 21/74] bug #10166 (Signed byte values cause data to be padded) The AsBinary function returns VARCHAR data type with binary collation. It can cause problem for clients that treat that kind of data as different from BLOB type. So now AsBinary returns BLOB. mysql-test/r/gis.result: result fixed mysql-test/t/gis.test: test case added sql/item_geofunc.h: Now we return MYSQL_TYPE_BLOB for asBinary function --- mysql-test/r/gis.result | 10 ++++++++++ mysql-test/t/gis.test | 7 +++++++ sql/item_geofunc.h | 2 ++ 3 files changed, 19 insertions(+) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index bf2f3e2bf03..f7066e7edca 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -661,3 +661,13 @@ POINT(10 10) select (asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))); (asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))) POINT(10 10) +create table t1 (g GEOMETRY); +select * from t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def test t1 t1 g g 255 4294967295 0 Y 144 0 63 +g +select asbinary(g) from t1; +Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr +def asbinary(g) 252 8192 0 Y 128 0 63 +asbinary(g) +drop table t1; diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index 3eb17f3a484..b66b97c2c41 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -364,3 +364,10 @@ select (asWKT(geomfromwkb((0x000000000140240000000000004024000000000000)))); select (asWKT(geomfromwkb((0x010100000000000000000024400000000000002440)))); # End of 4.1 tests + +--enable_metadata +create table t1 (g GEOMETRY); +select * from t1; +select asbinary(g) from t1; +--disable_metadata +drop table t1; diff --git a/sql/item_geofunc.h b/sql/item_geofunc.h index 5f060416ff3..a466b606dc1 100644 --- a/sql/item_geofunc.h +++ b/sql/item_geofunc.h @@ -32,6 +32,7 @@ public: Item_geometry_func(Item *a,Item *b,Item *c) :Item_str_func(a,b,c) {} Item_geometry_func(List &list) :Item_str_func(list) {} void fix_length_and_dec(); + enum_field_types field_type() const { return MYSQL_TYPE_GEOMETRY; } }; class Item_func_geometry_from_text: public Item_geometry_func @@ -67,6 +68,7 @@ public: Item_func_as_wkb(Item *a): Item_geometry_func(a) {} const char *func_name() const { return "aswkb"; } String *val_str(String *); + enum_field_types field_type() const { return MYSQL_TYPE_BLOB; } }; class Item_func_geometry_type: public Item_str_func From e8beb72cc6bc1ad72e341b2b8f20ccaba95dfb38 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Jun 2006 20:23:22 +0200 Subject: [PATCH 22/74] Bug#19408 Test 'func_time' fails on Windows x64 - The setting of "ENV{'TZ'}" doesn't affect the timezone used by MySQL Server on Windows. - Explicitly set timezone to "+03:00" in test case before doing the calculatiosn to check that there is three hours difference between utc and local time. (Magnus' fix) mysql-test/r/func_time.result: Update test results mysql-test/t/func_time.test: Set timezone to GMT-3, to make it possible to use "interval 3 hour" --- mysql-test/r/func_time.result | 2 ++ mysql-test/t/func_time.test | 7 +++++++ 2 files changed, 9 insertions(+) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index 593ce7b26c8..aaa86378626 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -1,4 +1,5 @@ drop table if exists t1,t2,t3; +set time_zone="+03:00"; select from_days(to_days("960101")),to_days(960201)-to_days("19960101"),to_days(date_add(curdate(), interval 1 day))-to_days(curdate()),weekday("1997-11-29"); from_days(to_days("960101")) to_days(960201)-to_days("19960101") to_days(date_add(curdate(), interval 1 day))-to_days(curdate()) weekday("1997-11-29") 1996-01-01 31 1 5 @@ -945,3 +946,4 @@ id day id day 1 2005-06-01 3 2005-07-15 3 2005-07-01 3 2005-07-15 DROP TABLE t1,t2; +set time_zone= @@global.time_zone; diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index d817d016881..05c033f2b22 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -5,6 +5,9 @@ drop table if exists t1,t2,t3; --enable_warnings +# Set timezone to GMT-3, to make it possible to use "interval 3 hour" +set time_zone="+03:00"; + select from_days(to_days("960101")),to_days(960201)-to_days("19960101"),to_days(date_add(curdate(), interval 1 day))-to_days(curdate()),weekday("1997-11-29"); select period_add("9602",-12),period_diff(199505,"9404") ; @@ -335,6 +338,7 @@ select last_day("1997-12-1")+0.0; # Test SAPDB UTC_% functions. This part is TZ dependant (It is supposed that # TZ variable set to GMT-3 + select strcmp(date_sub(localtimestamp(), interval 3 hour), utc_timestamp())=0; select strcmp(date_format(date_sub(localtimestamp(), interval 3 hour),"%T"), utc_time())=0; select strcmp(date_format(date_sub(localtimestamp(), interval 3 hour),"%Y-%m-%d"), utc_date())=0; @@ -513,3 +517,6 @@ SELECT * FROM t1, t2 DROP TABLE t1,t2; # End of 5.0 tests + +# Restore timezone to default +set time_zone= @@global.time_zone; From 0271faa8c2c56d4a589980927bb91c657e28fbfe Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 22 Jun 2006 20:50:38 +0200 Subject: [PATCH 23/74] #19409: Test 'func_timestamp' fails on Windows x64 - The setting of "ENV{'TZ'}" doesn't affect the timezone used by MySQL Server on Windows. - Explicitly set timezone in test cases before doing UTC/localtime conversions so tests produce deterministic results mysql-test/r/func_timestamp.result: Update test results mysql-test/r/type_timestamp.result: Update test results mysql-test/t/func_timestamp.test: Specifically set timezone to make tests that do localtime/UTC conversions deterministic mysql-test/t/type_timestamp.test: Specifically set timezone to make tests that do localtime/UTC conversions deterministic --- mysql-test/r/func_timestamp.result | 2 ++ mysql-test/r/type_timestamp.result | 2 ++ mysql-test/t/func_timestamp.test | 6 ++++++ mysql-test/t/type_timestamp.test | 6 ++++++ 4 files changed, 16 insertions(+) diff --git a/mysql-test/r/func_timestamp.result b/mysql-test/r/func_timestamp.result index d9912f08b72..495fedea9e6 100644 --- a/mysql-test/r/func_timestamp.result +++ b/mysql-test/r/func_timestamp.result @@ -1,4 +1,5 @@ drop table if exists t1; +set time_zone="+03:00"; create table t1 (Zeit time, Tag tinyint not null, Monat tinyint not null, Jahr smallint not null, index(Tag), index(Monat), index(Jahr) ); insert into t1 values ("09:26:00",16,9,1998),("09:26:00",16,9,1998); @@ -9,3 +10,4 @@ Date Unix 1998-9-16 09:26:00 905927160 1998-9-16 09:26:00 905927160 drop table t1; +set time_zone= @@global.time_zone; diff --git a/mysql-test/r/type_timestamp.result b/mysql-test/r/type_timestamp.result index 61ed6bbabf3..0817cc3b6c7 100644 --- a/mysql-test/r/type_timestamp.result +++ b/mysql-test/r/type_timestamp.result @@ -1,4 +1,5 @@ drop table if exists t1,t2; +set time_zone="+03:00"; CREATE TABLE t1 (a int, t timestamp); CREATE TABLE t2 (a int, t datetime); SET TIMESTAMP=1234; @@ -491,3 +492,4 @@ a b c 5 NULL 2001-09-09 04:46:59 6 NULL 2006-06-06 06:06:06 drop table t1; +set time_zone= @@global.time_zone; diff --git a/mysql-test/t/func_timestamp.test b/mysql-test/t/func_timestamp.test index e1bb7e878ee..05a91b06d28 100644 --- a/mysql-test/t/func_timestamp.test +++ b/mysql-test/t/func_timestamp.test @@ -6,6 +6,9 @@ drop table if exists t1; --enable_warnings +# Set timezone to GMT-3, to make it possible to use "interval 3 hour" +set time_zone="+03:00"; + create table t1 (Zeit time, Tag tinyint not null, Monat tinyint not null, Jahr smallint not null, index(Tag), index(Monat), index(Jahr) ); insert into t1 values ("09:26:00",16,9,1998),("09:26:00",16,9,1998); @@ -15,3 +18,6 @@ FROM t1; drop table t1; # End of 4.1 tests + +# Restore timezone to default +set time_zone= @@global.time_zone; diff --git a/mysql-test/t/type_timestamp.test b/mysql-test/t/type_timestamp.test index f96beedbebc..ddfc3f11665 100644 --- a/mysql-test/t/type_timestamp.test +++ b/mysql-test/t/type_timestamp.test @@ -6,6 +6,9 @@ drop table if exists t1,t2; --enable_warnings +# Set timezone to GMT-3, to make it possible to use "interval 3 hour" +set time_zone="+03:00"; + CREATE TABLE t1 (a int, t timestamp); CREATE TABLE t2 (a int, t datetime); SET TIMESTAMP=1234; @@ -322,3 +325,6 @@ select * from t1; drop table t1; # End of 4.1 tests + +# Restore timezone to default +set time_zone= @@global.time_zone; From df4ba783a174b1ff9daa0c1ea29a5da479884708 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Jun 2006 00:32:43 +0200 Subject: [PATCH 24/74] Bug#20588: mysqldump.test may fail, depending on system-wide configuration mysqldump.test calls my_print_defaults in a way that includes the systemwide my.cnf, so the results will be beyond our control and depend on whatever the user has in their my.cnf, namely the [mysqldump] section. call my_print_defaults with --config-file rather than --defaults-extra-file to prevent inclusion of system-wide defaults and use our config-file only. mysql-test/t/mysqldump.test: call my_print_defaults with our setup only, do not include the systemwide my.cnf as that would make the results unpredictable. --- mysql-test/t/mysqldump.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index 4749141f7fe..585fb6d7e77 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -610,7 +610,7 @@ drop database db1; # BUG#15328 Segmentation fault occured if my.cnf is invalid for escape sequence # ---exec $MYSQL_MY_PRINT_DEFAULTS --defaults-extra-file=$MYSQL_TEST_DIR/std_data/bug15328.cnf mysqldump +--exec $MYSQL_MY_PRINT_DEFAULTS --config-file=$MYSQL_TEST_DIR/std_data/bug15328.cnf mysqldump # From 92ad3d5bd7fbfc64b083824f28470eebca5cd199 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Jun 2006 00:37:31 +0200 Subject: [PATCH 25/74] mysql.spec.sh: Disable the simplistic auto dependency scan for test/bench (bug#20078) support-files/mysql.spec.sh: Disable the simplistic auto dependency scan for test/bench (bug#20078) --- support-files/mysql.spec.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 9656851dc9c..854ad2e7ce7 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -148,6 +148,7 @@ Summary: MySQL - Benchmarks and test system Group: Applications/Databases Provides: mysql-bench Obsoletes: mysql-bench +AutoReqProv: no %description bench This package contains MySQL benchmark scripts and data. From 89e415950cf3b40b15e493cb72784f6ad3dc2b64 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Jun 2006 13:19:30 +0500 Subject: [PATCH 26/74] Bug#11228: DESC shows arbitrary column as "PRI" An UNIQUE KEY consisting of NOT NULL columns was displayed as PRIMARY KEY in "DESC t1". According to the code, that was intentional behaviour for some reasons unknown to me. This code was written before bitkeeper time, so I cannot check who and why made this. After discussing on dev-public, a decision was made to remove this code mysql-test/r/key.result: Adding test case. mysql-test/t/key.test: Adding test case. sql/table.cc: Removing old wrong code --- mysql-test/r/key.result | 10 ++++++++++ mysql-test/t/key.test | 11 +++++++++++ sql/table.cc | 21 --------------------- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/mysql-test/r/key.result b/mysql-test/r/key.result index f0a7afa239f..0bc241c0d19 100644 --- a/mysql-test/r/key.result +++ b/mysql-test/r/key.result @@ -326,6 +326,16 @@ alter table t1 add key (c1,c1,c2); ERROR 42S21: Duplicate column name 'c1' drop table t1; create table t1 ( +i1 INT NOT NULL, +i2 INT NOT NULL, +UNIQUE i1idx (i1), +UNIQUE i2idx (i2)); +desc t1; +Field Type Null Key Default Extra +i1 int(11) UNI 0 +i2 int(11) UNI 0 +drop table t1; +create table t1 ( c1 int, c2 varchar(20) not null, primary key (c1), diff --git a/mysql-test/t/key.test b/mysql-test/t/key.test index 85728582c75..796e36cb608 100644 --- a/mysql-test/t/key.test +++ b/mysql-test/t/key.test @@ -321,6 +321,17 @@ alter table t1 add key (c1,c2,c1); alter table t1 add key (c1,c1,c2); drop table t1; +# +# Bug#11228: DESC shows arbitrary column as "PRI" +# +create table t1 ( + i1 INT NOT NULL, + i2 INT NOT NULL, + UNIQUE i1idx (i1), + UNIQUE i2idx (i2)); +desc t1; +drop table t1; + # # Bug#12565 - ERROR 1034 when running simple UPDATE or DELETE # on large MyISAM table diff --git a/sql/table.cc b/sql/table.cc index 8ac64ac198d..513f42665a6 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -567,27 +567,6 @@ int openfrm(const char *name, const char *alias, uint db_stat, uint prgflag, if (outparam->key_info[key].flags & HA_FULLTEXT) outparam->key_info[key].algorithm= HA_KEY_ALG_FULLTEXT; - if (primary_key >= MAX_KEY && (keyinfo->flags & HA_NOSAME)) - { - /* - If the UNIQUE key doesn't have NULL columns and is not a part key - declare this as a primary key. - */ - primary_key=key; - for (i=0 ; i < keyinfo->key_parts ;i++) - { - uint fieldnr= key_part[i].fieldnr; - if (!fieldnr || - outparam->field[fieldnr-1]->null_ptr || - outparam->field[fieldnr-1]->key_length() != - key_part[i].length) - { - primary_key=MAX_KEY; // Can't be used - break; - } - } - } - for (i=0 ; i < keyinfo->key_parts ; key_part++,i++) { if (new_field_pack_flag <= 1) From 88812ad4abf961ba6455de5d980dcd51888d65a7 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Jun 2006 12:17:11 +0200 Subject: [PATCH 27/74] The binary "mysql_upgrade" must be included in the distribution. (bug#18516 + 19353) scripts/make_binary_distribution.sh: The binary "mysql_upgrade" must be included in the distribution. --- scripts/make_binary_distribution.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index 36c941ef6aa..74b1993882e 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -134,7 +134,7 @@ BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \ client/mysql$BS client/mysqlshow$BS client/mysqladmin$BS \ client/mysqldump$BS client/mysqlimport$BS \ client/mysqltest$BS client/mysqlcheck$BS \ - client/mysqlbinlog$BS \ + client/mysqlbinlog$BS client/mysql_upgrade$BS \ tests/mysql_client_test$BS \ libmysqld/examples/mysql_client_test_embedded$BS \ libmysqld/examples/mysqltest_embedded$BS \ From 0f3cc95bf1523754d21cc3a4c59c0d107adc1c16 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Jun 2006 14:50:02 +0200 Subject: [PATCH 28/74] BUG#20622: Fix one-byte buffer overrun in IM directory string handling. The problem was a call to convert_dirname() with a destination buffer that did not have room for the trailing slash added by that function. This could cause the instance manager to crash in some cases. mysys/mf_dirname.c: Clarify in comments that convert_dirname destination must be larger than source to accomodate a trailing slash. server-tools/instance-manager/instance_options.cc: Fix buffer overrun. --- mysys/mf_dirname.c | 4 +++- server-tools/instance-manager/instance_options.cc | 9 +++++++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/mysys/mf_dirname.c b/mysys/mf_dirname.c index 9206aa28078..4d78f039799 100644 --- a/mysys/mf_dirname.c +++ b/mysys/mf_dirname.c @@ -72,7 +72,9 @@ uint dirname_part(my_string to, const char *name) SYNPOSIS convert_dirname() - to Store result here + to Store result here. Must be at least of size + min(FN_REFLEN, strlen(from) + 1) to make room + for adding FN_LIBCHAR at the end. from Original filename from_end Pointer at end of filename (normally end \0) diff --git a/server-tools/instance-manager/instance_options.cc b/server-tools/instance-manager/instance_options.cc index 9389694822a..72621ed1662 100644 --- a/server-tools/instance-manager/instance_options.cc +++ b/server-tools/instance-manager/instance_options.cc @@ -391,8 +391,13 @@ int Instance_options::complete_initialization(const char *default_path, const char *tmp; char *end; - if (!mysqld_path && !(mysqld_path= strdup_root(&alloc, default_path))) - goto err; + if (!mysqld_path) + { + // Need one extra byte, as convert_dirname() adds a slash at the end. + if (!(mysqld_path= alloc_root(&alloc, strlen(default_path) + 2))) + goto err; + strcpy((char *)mysqld_path, default_path); + } // it's safe to cast this to char* since this is a buffer we are allocating end= convert_dirname((char*)mysqld_path, mysqld_path, NullS); From d3ff1c2f7fe4232a3cb5dd8377d5c4feae81e467 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Jun 2006 18:00:49 +0500 Subject: [PATCH 29/74] Bug#15276: MySQL ignores collation-server Problem: mysqld --collation-server=xxx --character-set-server=yyy didn't work as expected: collation_server was set not to xxx, but to the default collation of character set "yyy". With different argument order it worked as expected: mysqld --character-set-server=yyy --collation-server=yyy Fix: initializate default_collation_name to 0 when processing --character-set-server only if --collation-server has not been specified in command line. mysql-test/r/ctype_ucs2_def.result: Adding test case mysql-test/t/ctype_ucs2_def-master.opt: Specifying variables in reverse order, to cover the bug. mysql-test/t/ctype_ucs2_def.test: Adding test case sql/mysqld.cc: Don't clear default_collation_name when processing --character-set-server if collation has already been specified using --collation-server --- mysql-test/r/ctype_ucs2_def.result | 3 +++ mysql-test/t/ctype_ucs2_def-master.opt | 2 +- mysql-test/t/ctype_ucs2_def.test | 5 +++++ sql/mysqld.cc | 6 ++++-- 4 files changed, 13 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/ctype_ucs2_def.result b/mysql-test/r/ctype_ucs2_def.result index 897dbac251c..2f9dc4ae616 100644 --- a/mysql-test/r/ctype_ucs2_def.result +++ b/mysql-test/r/ctype_ucs2_def.result @@ -1,3 +1,6 @@ +show variables like 'collation_server'; +Variable_name Value +collation_server ucs2_unicode_ci show variables like "%character_set_ser%"; Variable_name Value character_set_server ucs2 diff --git a/mysql-test/t/ctype_ucs2_def-master.opt b/mysql-test/t/ctype_ucs2_def-master.opt index 1f884ff1d67..a0b5b061860 100644 --- a/mysql-test/t/ctype_ucs2_def-master.opt +++ b/mysql-test/t/ctype_ucs2_def-master.opt @@ -1 +1 @@ ---default-character-set=ucs2 --default-collation=ucs2_unicode_ci +--default-collation=ucs2_unicode_ci --default-character-set=ucs2 diff --git a/mysql-test/t/ctype_ucs2_def.test b/mysql-test/t/ctype_ucs2_def.test index fb174d551cf..00f636d79dc 100644 --- a/mysql-test/t/ctype_ucs2_def.test +++ b/mysql-test/t/ctype_ucs2_def.test @@ -1,3 +1,8 @@ +# +# MySQL Bug#15276: MySQL ignores collation-server +# +show variables like 'collation_server'; + # # Bug#18004 Connecting crashes server when default charset is UCS2 # diff --git a/sql/mysqld.cc b/sql/mysqld.cc index d7a38d6b715..2feead9e3bd 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -323,6 +323,7 @@ static char *default_character_set_name; static char *character_set_filesystem_name; static char *my_bind_addr_str; static char *default_collation_name; +static char compiled_default_collation_name[]= MYSQL_DEFAULT_COLLATION_NAME; static char mysql_data_home_buff[2]; static struct passwd *user_info; static I_List thread_cache; @@ -6389,7 +6390,7 @@ static void mysql_init_variables(void) /* Variables in libraries */ charsets_dir= 0; default_character_set_name= (char*) MYSQL_DEFAULT_CHARSET_NAME; - default_collation_name= (char*) MYSQL_DEFAULT_COLLATION_NAME; + default_collation_name= compiled_default_collation_name; sys_charset_system.value= (char*) system_charset_info->csname; character_set_filesystem_name= (char*) "binary"; @@ -6551,7 +6552,8 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), strmake(mysql_home,argument,sizeof(mysql_home)-1); break; case 'C': - default_collation_name= 0; + if (default_collation_name == compiled_default_collation_name) + default_collation_name= 0; break; case 'l': opt_log=1; From 95239e1d1c1ab062173ae59acc5fe02bef9897ed Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Jun 2006 11:21:57 -0400 Subject: [PATCH 30/74] BUG#20528 Added missing parenthesis to DBUG_PRINT statement --- mysys/my_lib.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysys/my_lib.c b/mysys/my_lib.c index 03f2d91916d..1c5630ad14e 100644 --- a/mysys/my_lib.c +++ b/mysys/my_lib.c @@ -501,7 +501,7 @@ MY_DIR *my_dir(const char *path, myf MyFlags) if (!(MyFlags & MY_DONT_SORT)) qsort((void *) result->dir_entry, result->number_off_files, sizeof(FILEINFO), (qsort_cmp) comp_names); - DBUG_PRINT(exit, ("found %d files", result->number_off_files)); + DBUG_PRINT("exit", ("found %d files", result->number_off_files)); DBUG_RETURN(result); error: my_errno=errno; From 907acc785da95878f0ff7eb6a5b88b5b18e713a8 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Jun 2006 19:36:54 +0400 Subject: [PATCH 31/74] key.result: After merge fix mysql-test/r/key.result: After merge fix --- mysql-test/r/key.result | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/key.result b/mysql-test/r/key.result index 6c05a3dde8b..a6f05143b3e 100644 --- a/mysql-test/r/key.result +++ b/mysql-test/r/key.result @@ -336,8 +336,8 @@ UNIQUE i1idx (i1), UNIQUE i2idx (i2)); desc t1; Field Type Null Key Default Extra -i1 int(11) UNI 0 -i2 int(11) UNI 0 +i1 int(11) NO UNI +i2 int(11) NO UNI drop table t1; create table t1 ( c1 int, From a4c1111af260181a84898231746e045d161f9da2 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 23 Jun 2006 12:15:39 -0400 Subject: [PATCH 32/74] Bug#20616: drop_temp_table test fails on Windows platform sql/sql_table.cc: Check for FN_DEVCHAR in the table name just before file creation. This allows for temporary tables to contain FN_DEVCHAR in the name. sql/table.cc: Removed the check for FN_DEVCHAR is done at this level because it prevents Windows from creating any table with FN_DEVCHAR in the name. --- sql/sql_table.cc | 17 ++++++++++++++++- sql/table.cc | 8 -------- 2 files changed, 16 insertions(+), 9 deletions(-) diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 275cfbaa088..77c681d4a48 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1656,8 +1656,23 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name, my_casedn_str(files_charset_info, path); create_info->table_options|=HA_CREATE_DELAY_KEY_WRITE; } - else + else + { + #ifdef FN_DEVCHAR + /* check if the table name contains FN_DEVCHAR when defined */ + const char *start= alias; + while (*start != '\0') + { + if (*start == FN_DEVCHAR) + { + my_error(ER_WRONG_TABLE_NAME, MYF(0), alias); + DBUG_RETURN(TRUE); + } + start++; + } + #endif build_table_path(path, sizeof(path), db, alias, reg_ext); + } /* Check if table already exists */ if ((create_info->options & HA_LEX_CREATE_TMP_TABLE) diff --git a/sql/table.cc b/sql/table.cc index 711f250c271..cfdb9bd93aa 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -1614,10 +1614,6 @@ bool check_db_name(char *name) if (*name == '/' || *name == '\\' || *name == FN_LIBCHAR || *name == FN_EXTCHAR) return 1; -#ifdef FN_DEVCHAR - if (*name == FN_DEVCHAR) - return 1; -#endif name++; } return last_char_is_space || (uint) (name - start) > NAME_LEN; @@ -1660,10 +1656,6 @@ bool check_table_name(const char *name, uint length) #endif if (*name == '/' || *name == '\\' || *name == FN_EXTCHAR) return 1; -#ifdef FN_DEVCHAR - if (*name == FN_DEVCHAR) - return 1; -#endif name++; } #if defined(USE_MB) && defined(USE_MB_IDENT) From 4a9a0b9aeb47ffdbacdc886aa19476dbcbc04e13 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 24 Jun 2006 13:11:09 +0200 Subject: [PATCH 33/74] Fix race condition in test case wait_timeout. Sometimes the helper connection (that is watching for the main connection to time out) would itself time out first, causing the test to fail. mysql-test/t/wait_timeout.test: Increase connection timeout in connection wait_con so we will not loose the connection that is watching for the real wait_timeout to trigger. --- mysql-test/t/wait_timeout.test | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/t/wait_timeout.test b/mysql-test/t/wait_timeout.test index 9310c3502b9..8387c08c902 100644 --- a/mysql-test/t/wait_timeout.test +++ b/mysql-test/t/wait_timeout.test @@ -11,6 +11,7 @@ connect (wait_con,localhost,root,,test,,); flush status; # Reset counters connection wait_con; +set session wait_timeout=100; let $retries=300; let $aborted_clients = `SHOW STATUS LIKE 'aborted_clients'`; set @aborted_clients= 0; From 2ad33373d62c22d4fe22a104e5ac0ebaff0d0615 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Jun 2006 23:31:10 +1000 Subject: [PATCH 34/74] BUG#11459 ndb status variables not updated change names of some undocumented ndb status variables to better reflect what their values mean sql/ha_ndbcluster.cc: rename some status variables to better reflect what they show. --- sql/ha_ndbcluster.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 98dd9d5a122..9814e2c84b6 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -160,8 +160,8 @@ static int update_status_variables(Ndb_cluster_connection *c) struct show_var_st ndb_status_variables[]= { {"cluster_node_id", (char*) &ndb_cluster_node_id, SHOW_LONG}, - {"connected_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR}, - {"connected_port", (char*) &ndb_connected_port, SHOW_LONG}, + {"config_from_host", (char*) &ndb_connected_host, SHOW_CHAR_PTR}, + {"config_from_port", (char*) &ndb_connected_port, SHOW_LONG}, // {"number_of_replicas", (char*) &ndb_number_of_replicas, SHOW_LONG}, {"number_of_storage_nodes",(char*) &ndb_number_of_storage_nodes, SHOW_LONG}, {NullS, NullS, SHOW_LONG} From 1c2a13b894ce26712316dfe5a62174b433e959f1 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Jun 2006 19:14:35 +0200 Subject: [PATCH 35/74] Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. The stress test suite revealed some deadlocks. Some were related to the new condition variable (COND_global_read_lock) and some were general problems with the global read lock. It is now necessary to signal COND_global_read_lock whenever COND_refresh is signalled. We need to wait for the release of a global read lock if one is set before every operation that requires a write lock. But we must not wait if we have locked tables by LOCK TABLES. After setting a global read lock a thread waits until all write locks are released. mysql-test/r/lock_multi.result: Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. Added test results. mysql-test/t/lock_multi.test: Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. Added tests for possible deadlocks that did not occur with the stress test suite. mysys/thr_lock.c: Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. Added a protection against an infinite loop that occurs with the test case for Bug #20662. sql/lock.cc: Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. Signal COND_global_read_lock whenever COND_refresh is signalled by using the new function broadcast_refresh(). Added the definition of a new function that signals COND_global_read_lock whenever COND_refresh is signalled. sql/mysql_priv.h: Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. Added a declaration for a new function that signals COND_global_read_lock whenever COND_refresh is signalled. sql/sql_base.cc: Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. Signal COND_global_read_lock whenever COND_refresh is signalled by using the new function broadcast_refresh(). sql/sql_handler.cc: Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. Signal COND_global_read_lock whenever COND_refresh is signalled by using the new function broadcast_refresh(). sql/sql_insert.cc: Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. Removed global read lock handling from inside of INSERT DELAYED. It is handled on a higher level now. sql/sql_parse.cc: Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. Wait for the release of a global read lock if one is set before every operation that requires a write lock. But don't wait if locked tables exist already. sql/sql_table.cc: Bug#16986 - Deadlock condition with MyISAM tables Addendum fixes after changing the condition variable for the global read lock. Removed global read lock handling from inside of CREATE TABLE. It is handled on a higher level now. Signal COND_global_read_lock whenever COND_refresh is signalled by using the new function broadcast_refresh(). --- mysql-test/r/lock_multi.result | 15 +++++ mysql-test/t/lock_multi.test | 50 ++++++++++++++++ mysys/thr_lock.c | 2 + sql/lock.cc | 44 ++++++++++++-- sql/mysql_priv.h | 1 + sql/sql_base.cc | 14 ++--- sql/sql_handler.cc | 11 ++-- sql/sql_insert.cc | 26 +-------- sql/sql_parse.cc | 102 +++++++++++++++++++++++++++------ sql/sql_table.cc | 9 +-- 10 files changed, 208 insertions(+), 66 deletions(-) diff --git a/mysql-test/r/lock_multi.result b/mysql-test/r/lock_multi.result index 2188d58e526..c80108f723a 100644 --- a/mysql-test/r/lock_multi.result +++ b/mysql-test/r/lock_multi.result @@ -67,6 +67,21 @@ Select_priv N use test; use test; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; + FLUSH TABLES WITH READ LOCK; +CREATE TABLE t2 (c1 int); +UNLOCK TABLES; +UNLOCK TABLES; +DROP TABLE t1, t2; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; + FLUSH TABLES WITH READ LOCK; +CREATE TABLE t2 AS SELECT * FROM t1; +ERROR HY000: Table 't2' was not locked with LOCK TABLES +UNLOCK TABLES; +UNLOCK TABLES; +DROP TABLE t1; create table t1 (f1 int(12) unsigned not null auto_increment, primary key(f1)) engine=innodb; lock tables t1 write; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; alter table t1 auto_increment=0; // diff --git a/mysql-test/t/lock_multi.test b/mysql-test/t/lock_multi.test index 905d0699e6a..627c33b3d82 100644 --- a/mysql-test/t/lock_multi.test +++ b/mysql-test/t/lock_multi.test @@ -142,6 +142,7 @@ disconnect con2; --error ER_DB_DROP_EXISTS DROP DATABASE mysqltest_1; +# # Bug#16986 - Deadlock condition with MyISAM tables # connection locker; @@ -170,6 +171,55 @@ connection locker; use test; # connection default; +# +# Test if CREATE TABLE with LOCK TABLE deadlocks. +# +connection writer; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; +# +# This waits until t1 is unlocked. +connection locker; +send FLUSH TABLES WITH READ LOCK; +--sleep 1 +# +# This must not block. +connection writer; +CREATE TABLE t2 (c1 int); +UNLOCK TABLES; +# +# This awakes now. +connection locker; +reap; +UNLOCK TABLES; +# +connection default; +DROP TABLE t1, t2; +# +# Test if CREATE TABLE SELECT with LOCK TABLE deadlocks. +# +connection writer; +CREATE TABLE t1 (c1 int); +LOCK TABLE t1 WRITE; +# +# This waits until t1 is unlocked. +connection locker; +send FLUSH TABLES WITH READ LOCK; +--sleep 1 +# +# This must not block. +connection writer; +--error 1100 +CREATE TABLE t2 AS SELECT * FROM t1; +UNLOCK TABLES; +# +# This awakes now. +connection locker; +reap; +UNLOCK TABLES; +# +connection default; +DROP TABLE t1; # # Bug #17264: MySQL Server freeze diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c index f5a8b618949..51df50a4926 100644 --- a/mysys/thr_lock.c +++ b/mysys/thr_lock.c @@ -204,6 +204,8 @@ static void check_locks(THR_LOCK *lock, const char *where, { if ((int) data->type == (int) TL_READ_NO_INSERT) count++; + /* Protect against infinite loop. */ + DBUG_ASSERT(count <= lock->read_no_write_count); } if (count != lock->read_no_write_count) { diff --git a/sql/lock.cc b/sql/lock.cc index 71384fe7fc6..97a080c5634 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -905,7 +905,7 @@ void unlock_table_name(THD *thd, TABLE_LIST *table_list) if (table_list->table) { hash_delete(&open_cache, (byte*) table_list->table); - (void) pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); } } @@ -997,9 +997,9 @@ end: (default 0, which will unlock all tables) NOTES - One must have a lock on LOCK_open when calling this - This function will send a COND_refresh signal to inform other threads - that the name locks are removed + One must have a lock on LOCK_open when calling this. + This function will broadcast refresh signals to inform other threads + that the name locks are removed. RETURN 0 ok @@ -1013,7 +1013,7 @@ void unlock_table_names(THD *thd, TABLE_LIST *table_list, table != last_table; table= table->next_local) unlock_table_name(thd,table); - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); } @@ -1304,3 +1304,37 @@ bool make_global_read_lock_block_commit(THD *thd) } +/* + Broadcast COND_refresh and COND_global_read_lock. + + SYNOPSIS + broadcast_refresh() + void No parameters. + + DESCRIPTION + Due to a bug in a threading library it could happen that a signal + did not reach its target. A condition for this was that the same + condition variable was used with different mutexes in + pthread_cond_wait(). Some time ago we changed LOCK_open to + LOCK_global_read_lock in global read lock handling. So COND_refresh + was used with LOCK_open and LOCK_global_read_lock. + + We did now also change from COND_refresh to COND_global_read_lock + in global read lock handling. But now it is necessary to signal + both conditions at the same time. + + NOTE + When signalling COND_global_read_lock within the global read lock + handling, it is not necessary to also signal COND_refresh. + + RETURN + void +*/ + +void broadcast_refresh(void) +{ + VOID(pthread_cond_broadcast(&COND_refresh)); + VOID(pthread_cond_broadcast(&COND_global_read_lock)); +} + + diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 6d39f2f7440..54f3d652af4 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -1342,6 +1342,7 @@ void start_waiting_global_read_lock(THD *thd); bool make_global_read_lock_block_commit(THD *thd); bool set_protect_against_global_read_lock(void); void unset_protect_against_global_read_lock(void); +void broadcast_refresh(void); /* Lock based on name */ int lock_and_wait_for_table_name(THD *thd, TABLE_LIST *table_list); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index ba9fa6f6c80..9adf3fe35c0 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -530,7 +530,7 @@ void close_thread_tables(THD *thd, bool lock_in_use, bool skip_derived) if (found_old_table) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } if (!lock_in_use) VOID(pthread_mutex_unlock(&LOCK_open)); @@ -1035,7 +1035,7 @@ TABLE *unlink_open_table(THD *thd, TABLE *list, TABLE *find) } *prev=0; // Notify any 'refresh' threads - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); return start; } @@ -1577,7 +1577,7 @@ bool reopen_table(TABLE *table,bool locked) if (table->triggers) table->triggers->set_table(table); - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); error=0; end: @@ -1678,7 +1678,7 @@ bool reopen_tables(THD *thd,bool get_locks,bool in_refresh) { my_afree((gptr) tables); } - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); *prev=0; DBUG_RETURN(error); } @@ -1715,7 +1715,7 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks, } } if (found) - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); DBUG_VOID_RETURN; } @@ -1807,7 +1807,7 @@ bool drop_locked_tables(THD *thd,const char *db, const char *table_name) } *prev=0; if (found) - VOID(pthread_cond_broadcast(&COND_refresh)); // Signal to refresh + broadcast_refresh(); if (thd->locked_tables && thd->locked_tables->table_count == 0) { my_free((gptr) thd->locked_tables,MYF(0)); @@ -5249,7 +5249,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, Signal any thread waiting for tables to be freed to reopen their tables */ - (void) pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); DBUG_PRINT("info", ("Waiting for refresh signal")); if (!(flags & RTFC_CHECK_KILLED_FLAG) || !thd->killed) { diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 1cd7778a053..0193d4d5355 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -254,7 +254,8 @@ err: DESCRIPTION Though this function takes a list of tables, only the first list entry - will be closed. Broadcasts a COND_refresh condition. + will be closed. + Broadcasts refresh if it closed the table. RETURN FALSE ok @@ -291,7 +292,7 @@ bool mysql_ha_close(THD *thd, TABLE_LIST *tables) if (close_thread_table(thd, table_ptr)) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } VOID(pthread_mutex_unlock(&LOCK_open)); } @@ -608,7 +609,7 @@ err0: tables are closed (if MYSQL_HA_FLUSH_ALL) is set. If 'tables' is NULL and MYSQL_HA_FLUSH_ALL is not set, all HANDLER tables marked for flush are closed. - Broadcasts a COND_refresh condition, for every table closed. + Broadcasts refresh for every table closed. NOTE Since mysql_ha_flush() is called when the base table has to be closed, @@ -704,7 +705,7 @@ int mysql_ha_flush(THD *thd, TABLE_LIST *tables, uint mode_flags, MYSQL_HA_REOPEN_ON_USAGE mark for reopen. DESCRIPTION - Broadcasts a COND_refresh condition, for every table closed. + Broadcasts refresh if it closed the table. The caller must lock LOCK_open. RETURN @@ -742,7 +743,7 @@ static int mysql_ha_flush_table(THD *thd, TABLE **table_ptr, uint mode_flags) if (close_thread_table(thd, table_ptr)) { /* Tell threads waiting for refresh that something has happened */ - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } DBUG_RETURN(0); diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 8ffc6f53a43..15c7f91ba83 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1349,18 +1349,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) */ if (! (tmp= find_handler(thd, table_list))) { - /* - Avoid that a global read lock steps in while we are creating the - new thread. It would block trying to open the table. Hence, the - DI thread and this thread would wait until after the global - readlock is gone. Since the insert thread needs to wait for a - global read lock anyway, we do it right now. Note that - wait_if_global_read_lock() sets a protection against a new - global read lock when it succeeds. This needs to be released by - start_waiting_global_read_lock(). - */ - if (wait_if_global_read_lock(thd, 0, 1)) - goto err; if (!(tmp=new delayed_insert())) { my_error(ER_OUTOFMEMORY,MYF(0),sizeof(delayed_insert)); @@ -1401,11 +1389,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) pthread_cond_wait(&tmp->cond_client,&tmp->mutex); } pthread_mutex_unlock(&tmp->mutex); - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); thd->proc_info="got old table"; if (tmp->thd.killed) { @@ -1441,11 +1424,6 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) err1: thd->fatal_error(); - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); err: pthread_mutex_unlock(&LOCK_delayed_create); DBUG_RETURN(0); // Continue with normal insert @@ -2676,7 +2654,7 @@ bool select_create::send_eof() hash_delete(&open_cache,(byte*) table); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } lock=0; table=0; @@ -2705,7 +2683,7 @@ void select_create::abort() quick_rm_table(table_type, create_table->db, create_table->table_name); /* Tell threads waiting for refresh that something has happened */ if (version != refresh_version) - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); } else if (!create_info->table_existed) close_temporary_table(thd, create_table->db, create_table->table_name); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index ba5c2ebf484..169fe219263 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2337,17 +2337,37 @@ static void reset_one_shot_variables(THD *thd) } -/**************************************************************************** -** mysql_execute_command -** Execute command saved in thd and current_lex->sql_command -****************************************************************************/ +/* + Execute command saved in thd and current_lex->sql_command + + SYNOPSIS + mysql_execute_command() + thd Thread handle + + IMPLEMENTATION + + Before every operation that can request a write lock for a table + wait if a global read lock exists. However do not wait if this + thread has locked tables already. No new locks can be requested + until the other locks are released. The thread that requests the + global read lock waits for write locked tables to become unlocked. + + Note that wait_if_global_read_lock() sets a protection against a new + global read lock when it succeeds. This needs to be released by + start_waiting_global_read_lock() after the operation. + + RETURN + FALSE OK + TRUE Error +*/ bool mysql_execute_command(THD *thd) { - bool res= FALSE; - int result= 0; - LEX *lex= thd->lex; + bool res= FALSE; + bool need_start_waiting= FALSE; // have protection against global read lock + int result= 0; + LEX *lex= thd->lex; /* first SELECT_LEX (have special meaning for many of non-SELECTcommands) */ SELECT_LEX *select_lex= &lex->select_lex; /* first table of first SELECT_LEX */ @@ -2832,7 +2852,8 @@ mysql_execute_command(THD *thd) TABLE in the same way. That way we avoid that a new table is created during a gobal read lock. */ - if (wait_if_global_read_lock(thd, 0, 1)) + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) { res= 1; goto end_with_restore_list; @@ -2857,7 +2878,7 @@ mysql_execute_command(THD *thd) { update_non_unique_table_error(create_table, "CREATE", duplicate); res= 1; - goto end_with_restart_wait; + goto end_with_restore_list; } } /* If we create merge table, we have to test tables in merge, too */ @@ -2873,7 +2894,7 @@ mysql_execute_command(THD *thd) { update_non_unique_table_error(tab, "CREATE", duplicate); res= 1; - goto end_with_restart_wait; + goto end_with_restore_list; } } } @@ -2915,13 +2936,6 @@ mysql_execute_command(THD *thd) send_ok(thd); } -end_with_restart_wait: - /* - Release the protection against the global read lock and wake - everyone, who might want to set a global read lock. - */ - start_waiting_global_read_lock(thd); - /* put tables back for PS rexecuting */ end_with_restore_list: lex->link_first_table_back(create_table, link_to_local); @@ -3039,6 +3053,13 @@ end_with_restore_list: goto error; else { + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + thd->enable_slow_log= opt_log_slow_admin_statements; res= mysql_alter_table(thd, select_lex->db, lex->name, &lex->create_info, @@ -3296,6 +3317,14 @@ end_with_restore_list: break; /* Skip first table, which is the table we are inserting in */ select_lex->context.table_list= first_table->next_local; + + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + res= mysql_insert(thd, all_tables, lex->field_list, lex->many_values, lex->update_list, lex->value_list, lex->duplicates, lex->ignore); @@ -3319,6 +3348,14 @@ end_with_restore_list: select_lex->options|= SELECT_NO_UNLOCK; unit->set_limit(select_lex); + + if (! thd->locked_tables && + ! (need_start_waiting= ! wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + if (!(res= open_and_lock_tables(thd, all_tables))) { /* Skip first table, which is the table we are inserting in */ @@ -3395,6 +3432,14 @@ end_with_restore_list: break; DBUG_ASSERT(select_lex->offset_limit == 0); unit->set_limit(select_lex); + + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + res = mysql_delete(thd, all_tables, select_lex->where, &select_lex->order_list, unit->select_limit_cnt, select_lex->options, @@ -3408,6 +3453,13 @@ end_with_restore_list: (TABLE_LIST *)thd->lex->auxilliary_table_list.first; multi_delete *result; + if (!thd->locked_tables && + !(need_start_waiting= !wait_if_global_read_lock(thd, 0, 1))) + { + res= 1; + break; + } + if ((res= multi_delete_precheck(thd, all_tables))) break; @@ -4974,10 +5026,22 @@ end_with_restore_list: if (lex->sql_command != SQLCOM_CALL && lex->sql_command != SQLCOM_EXECUTE && uc_update_queries[lex->sql_command]<2) thd->row_count_func= -1; - DBUG_RETURN(res || thd->net.report_error); + + goto end; error: - DBUG_RETURN(1); + res= TRUE; + +end: + if (need_start_waiting) + { + /* + Release the protection against the global read lock and wake + everyone, who might want to set a global read lock. + */ + start_waiting_global_read_lock(thd); + } + DBUG_RETURN(res || thd->net.report_error); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 275cfbaa088..49f84aed966 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -1674,8 +1674,6 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name, my_error(ER_TABLE_EXISTS_ERROR, MYF(0), alias); DBUG_RETURN(TRUE); } - if (wait_if_global_read_lock(thd, 0, 1)) - DBUG_RETURN(TRUE); VOID(pthread_mutex_lock(&LOCK_open)); if (!internal_tmp_table && !(create_info->options & HA_LEX_CREATE_TMP_TABLE)) { @@ -1743,7 +1741,6 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name, end: VOID(pthread_mutex_unlock(&LOCK_open)); - start_waiting_global_read_lock(thd); thd->proc_info="After create"; DBUG_RETURN(error); @@ -1923,7 +1920,7 @@ void close_cached_table(THD *thd, TABLE *table) thd->open_tables=unlink_open_table(thd,thd->open_tables,table); /* When lock on LOCK_open is freed other threads can continue */ - pthread_cond_broadcast(&COND_refresh); + broadcast_refresh(); DBUG_VOID_RETURN; } @@ -3894,7 +3891,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, if (error) { VOID(pthread_mutex_unlock(&LOCK_open)); - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); goto err; } thd->proc_info="end"; @@ -3904,7 +3901,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, Query_log_event qinfo(thd, thd->query, thd->query_length, FALSE, FALSE); mysql_bin_log.write(&qinfo); } - VOID(pthread_cond_broadcast(&COND_refresh)); + broadcast_refresh(); VOID(pthread_mutex_unlock(&LOCK_open)); #ifdef HAVE_BERKELEY_DB if (old_db_type == DB_TYPE_BERKELEY_DB) From d6bcbfbe92db6aa3cb955ed0fb1f1a8e6f8bbb60 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 00:47:52 +0400 Subject: [PATCH 36/74] A fix and a test case for Bug#19022 "Memory bug when switching db during trigger execution" Bug#17199 "Problem when view calls function from another database." Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements" Documentation note: this patch introduces a change in behaviour of prepared statements. This patch adds a few new invariants with regard to how THD::db should be used. These invariants should be preserved in future: - one should never refer to THD::db by pointer and always make a deep copy (strmake, strdup) - one should never compare two databases by pointer, but use strncmp or my_strncasecmp - TABLE_LIST object table->db should be always initialized in the parser or by creator of the object. For prepared statements it means that if the current database is changed after a statement is prepared, the database that was current at prepare remains active. This also means that you can not prepare a statement that implicitly refers to the current database if the latter is not set. This is not documented, and therefore needs documentation. This is NOT a change in behavior for almost all SQL statements except: - ALTER TABLE t1 RENAME t2 - OPTIMIZE TABLE t1 - ANALYZE TABLE t1 - TRUNCATE TABLE t1 -- until this patch t1 or t2 could be evaluated at the first execution of prepared statement. CURRENT_DATABASE() still works OK and is evaluated at every execution of prepared statement. Note, that in stored routines this is not an issue as the default database is the database of the stored procedure and "use" statement is prohibited in stored routines. This patch makes obsolete the use of check_db_used (it was never used in the old code too) and all other places that check for table->db and assign it from THD::db if it's NULL, except the parser. How this patch was created: THD::{db,db_length} were replaced with a LEX_STRING, THD::db. All the places that refer to THD::{db,db_length} were manually checked and: - if the place uses thd->db by pointer, it was fixed to make a deep copy - if a place compared two db pointers, it was fixed to compare them by value (via strcmp/my_strcasecmp, whatever was approproate) Then this intermediate patch was used to write a smaller patch that does the same thing but without a rename. TODO in 5.1: - remove check_db_used - deploy THD::set_db in mysql_change_db See also comments to individual files. mysql-test/r/create.result: Modify the result file: a database can never be NULL. mysql-test/r/ps.result: Update test results (Bug#17199 et al) mysql-test/r/sp.result: Update test results (Bug#17199 et al) mysql-test/t/create.test: Update the id of the returned error. mysql-test/t/ps.test: Add test coverage for prepared statements and current database. In scope of work on Bug#17199 "Problem when view calls function from another database." mysql-test/t/sp.test: Add a test case for Bug#17199 "Problem when view calls function from another database." and Bug#18444 "Fully qualified stored function names don't work correctly in SELECT statements". Test a complementary problem. sql/item_strfunc.cc: Touch the code that reads thd->db (cleanup). sql/log_event.cc: While we are at it, replace direct access to thd->db with a method. Should simplify future conversion of THD::db to LEX_STRING. sql/slave.cc: While we are at it, replace direct access to thd->db with a method. Should simplify future conversion of THD::db to LEX_STRING. sql/slave.h: Remove a declaration for a method that is used only in one module. sql/sp.cc: Rewrite sp_use_new_db: this is a cleanup that I needed in order to understand this function and ensure that it has no bugs. sql/sp.h: Add a new declaration for sp_use_new_db (uses LEX_STRINGs) and a comment. sql/sp_head.cc: - drop sp_name_current_db_new - a creator of sp_name class that was used when sp_name was created for an identifier without an explicitly initialized database. Now we pass thd->db to constructor of sp_name right in the parser. - rewrite sp_head::init_strings: name->m_db is always set now - use the new variant of sp_use_new_db - we don't need to update thd->db with SP MEM_ROOT pointer anymore when parsing a stored procedure, as noone will refer to it (yes!) sql/sp_head.h: - remove unneded methods and members sql/sql_class.h: - introduce 3 THD methods to work with THD::db: .set_db to assign the current database .reset_db to reset the current database (temporarily) or set it to NULL .opt_copy_db_to - to deep-copy thd->db to a pointer if it's not NULL sql/sql_db.cc: While we are at it, replace direct access to thd->db with a method. Should simplify future conversion of THD::db to LEX_STRING. sql/sql_insert.cc: - replace checks with asserts: table_list->db must be always set in the parser. sql/sql_lex.h: - add a comment sql/sql_parse.cc: - implement the invariant described in the changeset comment. - remove juggling with lex->sphead in SQLCOM_CREATE_PROCEDURE: now db_load_routine uses its own LEX object and doesn't damage the main LEX. - add DBUG_ASSERT(0) to unused "check_db_used" sql/sql_table.cc: - replace a check with an assert (table_ident->db) sql/sql_trigger.cc: While we are at it, replace direct access to thd->db with a method. Should simplify future conversion of THD::db to LEX_STRING. sql/sql_udf.cc: - use thd->set_db instead of direct modification of to thd->db sql/sql_view.cc: - replace a check with an assert (view->db) sql/sql_yacc.yy: - make sure that we always copy table->db or name->db or ident->db or select_lex->db from thd->db if the former is not set. If thd->db is not set but is accessed, return an error. sql/tztime.cc: - be nice, never copy thd->db by pointer. --- mysql-test/r/create.result | 2 +- mysql-test/r/ps.result | 105 +++++++++++++++++++ mysql-test/r/sp.result | 46 ++++++++ mysql-test/t/create.test | 2 +- mysql-test/t/ps.test | 118 +++++++++++++++++++++ mysql-test/t/sp.test | 46 ++++++++ sql/item_strfunc.cc | 4 +- sql/log_event.cc | 20 ++-- sql/slave.cc | 16 +-- sql/slave.h | 4 - sql/sp.cc | 107 +++++++++++-------- sql/sp.h | 14 +-- sql/sp_head.cc | 66 ++++-------- sql/sp_head.h | 10 -- sql/sql_class.h | 43 +++++++- sql/sql_db.cc | 11 +- sql/sql_insert.cc | 15 ++- sql/sql_lex.h | 5 + sql/sql_parse.cc | 210 ++++++++++++++++--------------------- sql/sql_table.cc | 3 +- sql/sql_trigger.cc | 9 +- sql/sql_udf.cc | 6 +- sql/sql_view.cc | 9 +- sql/sql_yacc.yy | 47 ++++++++- sql/tztime.cc | 6 +- 25 files changed, 634 insertions(+), 290 deletions(-) diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index 27a6c8a9d03..c5b77ea4925 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -607,7 +607,7 @@ create database mysqltest; use mysqltest; drop database mysqltest; create table test.t1 like x; -ERROR 42000: Incorrect database name 'NULL' +ERROR 3D000: No database selected drop table if exists test.t1; create database mysqltest; use mysqltest; diff --git a/mysql-test/r/ps.result b/mysql-test/r/ps.result index abebfc8cd93..3ce2f5169e2 100644 --- a/mysql-test/r/ps.result +++ b/mysql-test/r/ps.result @@ -1158,3 +1158,108 @@ Warnings: Error 1146 Table 'test.t4' doesn't exist deallocate prepare stmt; drop table t1, t2, t3; +create database mysqltest_long_database_name_to_thrash_heap; +use test; +create table t1 (i int); +prepare stmt from "alter table test.t1 rename t1"; +use mysqltest_long_database_name_to_thrash_heap; +execute stmt; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +prepare stmt from "alter table test.t1 rename t1"; +use test; +execute stmt; +show tables like 't1'; +Tables_in_test (t1) +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +t1 +deallocate prepare stmt; +use mysqltest_long_database_name_to_thrash_heap; +prepare stmt_create from "create table t1 (i int)"; +prepare stmt_insert from "insert into t1 (i) values (1)"; +prepare stmt_update from "update t1 set i=2"; +prepare stmt_delete from "delete from t1 where i=2"; +prepare stmt_select from "select * from t1"; +prepare stmt_alter from "alter table t1 add column (b int)"; +prepare stmt_alter1 from "alter table t1 drop column b"; +prepare stmt_analyze from "analyze table t1"; +prepare stmt_optimize from "optimize table t1"; +prepare stmt_show from "show tables like 't1'"; +prepare stmt_truncate from "truncate table t1"; +prepare stmt_drop from "drop table t1"; +drop table t1; +use test; +execute stmt_create; +show tables like 't1'; +Tables_in_test (t1) +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +t1 +use test; +execute stmt_insert; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +i +1 +execute stmt_update; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +i +2 +execute stmt_delete; +execute stmt_select; +i +execute stmt_alter; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +Field Type Null Key Default Extra +i int(11) YES NULL +b int(11) YES NULL +execute stmt_alter1; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +Field Type Null Key Default Extra +i int(11) YES NULL +execute stmt_analyze; +Table Op Msg_type Msg_text +mysqltest_long_database_name_to_thrash_heap.t1 analyze status Table is already up to date +execute stmt_optimize; +Table Op Msg_type Msg_text +mysqltest_long_database_name_to_thrash_heap.t1 optimize status Table is already up to date +execute stmt_show; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +t1 +execute stmt_truncate; +execute stmt_drop; +show tables like 't1'; +Tables_in_test (t1) +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +Tables_in_mysqltest_long_database_name_to_thrash_heap (t1) +drop database mysqltest_long_database_name_to_thrash_heap; +prepare stmt_create from "create table t1 (i int)"; +ERROR 3D000: No database selected +prepare stmt_insert from "insert into t1 (i) values (1)"; +ERROR 3D000: No database selected +prepare stmt_update from "update t1 set i=2"; +ERROR 3D000: No database selected +prepare stmt_delete from "delete from t1 where i=2"; +ERROR 3D000: No database selected +prepare stmt_select from "select * from t1"; +ERROR 3D000: No database selected +prepare stmt_alter from "alter table t1 add column (b int)"; +ERROR 3D000: No database selected +prepare stmt_alter1 from "alter table t1 drop column b"; +ERROR 3D000: No database selected +prepare stmt_analyze from "analyze table t1"; +ERROR 3D000: No database selected +prepare stmt_optimize from "optimize table t1"; +ERROR 3D000: No database selected +prepare stmt_show from "show tables like 't1'"; +ERROR 3D000: No database selected +prepare stmt_truncate from "truncate table t1"; +ERROR 3D000: No database selected +prepare stmt_drop from "drop table t1"; +ERROR 3D000: No database selected +create temporary table t1 (i int); +ERROR 3D000: No database selected +use test; diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index ff378f1f43b..a2b36b11a2e 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -4990,4 +4990,50 @@ CALL bug18037_p2()| DROP FUNCTION bug18037_f1| DROP PROCEDURE bug18037_p1| DROP PROCEDURE bug18037_p2| +use test| +create table t3 (i int)| +insert into t3 values (1), (2)| +create database mysqltest1| +use mysqltest1| +create function bug17199() returns varchar(2) deterministic return 'ok'| +use test| +select *, mysqltest1.bug17199() from t3| +i mysqltest1.bug17199() +1 ok +2 ok +use mysqltest1| +create function bug18444(i int) returns int no sql deterministic return i + 1| +use test| +select mysqltest1.bug18444(i) from t3| +mysqltest1.bug18444(i) +2 +3 +drop database mysqltest1| +create database mysqltest1 charset=utf8| +create database mysqltest2 charset=utf8| +create procedure mysqltest1.p1() +begin +-- alters the default collation of database test +alter database character set koi8r; +end| +use mysqltest1| +call p1()| +show create database mysqltest1| +Database Create Database +mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */ +show create database mysqltest2| +Database Create Database +mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */ +alter database mysqltest1 character set utf8| +use mysqltest2| +call mysqltest1.p1()| +show create database mysqltest1| +Database Create Database +mysqltest1 CREATE DATABASE `mysqltest1` /*!40100 DEFAULT CHARACTER SET koi8r */ +show create database mysqltest2| +Database Create Database +mysqltest2 CREATE DATABASE `mysqltest2` /*!40100 DEFAULT CHARACTER SET utf8 */ +drop database mysqltest1| +drop database mysqltest2| +use test| drop table t1,t2; diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index e22c2b5c426..07edbf206fe 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -517,7 +517,7 @@ DROP TABLE t12913; create database mysqltest; use mysqltest; drop database mysqltest; ---error 1102 +--error ER_NO_DB_ERROR create table test.t1 like x; --disable_warnings drop table if exists test.t1; diff --git a/mysql-test/t/ps.test b/mysql-test/t/ps.test index e3f3e37cd4c..ff66b265fae 100644 --- a/mysql-test/t/ps.test +++ b/mysql-test/t/ps.test @@ -1146,4 +1146,122 @@ execute stmt; execute stmt; deallocate prepare stmt; drop table t1, t2, t3; + +# +# Bug#17199 "Table not found" error occurs if the query contains a call +# to a function from another database. +# Test prepared statements- related behaviour. +# +# +# ALTER TABLE RENAME and Prepared Statements: wrong DB name buffer was used +# in ALTER ... RENAME which caused memory corruption in prepared statements. +# No need to fix this problem in 4.1 as ALTER TABLE is not allowed in +# Prepared Statements in 4.1. +# +create database mysqltest_long_database_name_to_thrash_heap; +use test; +create table t1 (i int); +prepare stmt from "alter table test.t1 rename t1"; +use mysqltest_long_database_name_to_thrash_heap; +execute stmt; +show tables like 't1'; +prepare stmt from "alter table test.t1 rename t1"; +use test; +execute stmt; +show tables like 't1'; +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +deallocate prepare stmt; +# +# Check that a prepared statement initializes its current database at +# PREPARE, and then works correctly even if the current database has been +# changed. +# +use mysqltest_long_database_name_to_thrash_heap; +# Necessary for preparation of INSERT/UPDATE/DELETE to succeed +prepare stmt_create from "create table t1 (i int)"; +prepare stmt_insert from "insert into t1 (i) values (1)"; +prepare stmt_update from "update t1 set i=2"; +prepare stmt_delete from "delete from t1 where i=2"; +prepare stmt_select from "select * from t1"; +prepare stmt_alter from "alter table t1 add column (b int)"; +prepare stmt_alter1 from "alter table t1 drop column b"; +prepare stmt_analyze from "analyze table t1"; +prepare stmt_optimize from "optimize table t1"; +prepare stmt_show from "show tables like 't1'"; +prepare stmt_truncate from "truncate table t1"; +prepare stmt_drop from "drop table t1"; +# Drop the table that was used to prepare INSERT/UPDATE/DELETE: we will +# create a new one by executing stmt_create +drop table t1; +# Switch the current database +use test; +# Check that all prepared statements operate on the database that was +# active at PREPARE +execute stmt_create; +# should return empty set +show tables like 't1'; +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +use test; +execute stmt_insert; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_update; +select * from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_delete; +execute stmt_select; +execute stmt_alter; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_alter1; +show columns from mysqltest_long_database_name_to_thrash_heap.t1; +execute stmt_analyze; +execute stmt_optimize; +execute stmt_show; +execute stmt_truncate; +execute stmt_drop; +show tables like 't1'; +use mysqltest_long_database_name_to_thrash_heap; +show tables like 't1'; +# +# Attempt a statement PREPARE when there is no current database: +# is expected to return an error. +# +drop database mysqltest_long_database_name_to_thrash_heap; +--error ER_NO_DB_ERROR +prepare stmt_create from "create table t1 (i int)"; +--error ER_NO_DB_ERROR +prepare stmt_insert from "insert into t1 (i) values (1)"; +--error ER_NO_DB_ERROR +prepare stmt_update from "update t1 set i=2"; +--error ER_NO_DB_ERROR +prepare stmt_delete from "delete from t1 where i=2"; +--error ER_NO_DB_ERROR +prepare stmt_select from "select * from t1"; +--error ER_NO_DB_ERROR +prepare stmt_alter from "alter table t1 add column (b int)"; +--error ER_NO_DB_ERROR +prepare stmt_alter1 from "alter table t1 drop column b"; +--error ER_NO_DB_ERROR +prepare stmt_analyze from "analyze table t1"; +--error ER_NO_DB_ERROR +prepare stmt_optimize from "optimize table t1"; +--error ER_NO_DB_ERROR +prepare stmt_show from "show tables like 't1'"; +--error ER_NO_DB_ERROR +prepare stmt_truncate from "truncate table t1"; +--error ER_NO_DB_ERROR +prepare stmt_drop from "drop table t1"; +# +# The above has automatically deallocated all our statements. +# +# Attempt to CREATE a temporary table when no DB used: it should fail +# This proves that no table can be used without explicit specification of +# its database if there is no current database. +# +--error ER_NO_DB_ERROR +create temporary table t1 (i int); +# +# Restore the old environemnt +# +use test; # End of 5.0 tests diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index 1d21a5da187..c0dd785a8ce 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -5888,6 +5888,52 @@ DROP FUNCTION bug18037_f1| DROP PROCEDURE bug18037_p1| DROP PROCEDURE bug18037_p2| +# +# Bug#17199: "Table not found" error occurs if the query contains a call +# to a function from another database. +# See also ps.test for an additional test case for this bug. +# +use test| +create table t3 (i int)| +insert into t3 values (1), (2)| +create database mysqltest1| +use mysqltest1| +create function bug17199() returns varchar(2) deterministic return 'ok'| +use test| +select *, mysqltest1.bug17199() from t3| +# +# Bug#18444: Fully qualified stored function names don't work correctly +# in select statements +# +use mysqltest1| +create function bug18444(i int) returns int no sql deterministic return i + 1| +use test| +select mysqltest1.bug18444(i) from t3| +drop database mysqltest1| +# +# Check that current database has no influence to a stored procedure +# +create database mysqltest1 charset=utf8| +create database mysqltest2 charset=utf8| +create procedure mysqltest1.p1() +begin +-- alters the default collation of database test + alter database character set koi8r; +end| +use mysqltest1| +call p1()| +show create database mysqltest1| +show create database mysqltest2| +alter database mysqltest1 character set utf8| +use mysqltest2| +call mysqltest1.p1()| +show create database mysqltest1| +show create database mysqltest2| +drop database mysqltest1| +drop database mysqltest2| +# +# Restore the old environemnt +use test| # # BUG#NNNN: New bug synopsis diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index ce9897afeed..3f728958df1 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1640,13 +1640,13 @@ String *Item_func_database::val_str(String *str) { DBUG_ASSERT(fixed == 1); THD *thd= current_thd; - if (!thd->db) + if (thd->db == NULL) { null_value= 1; return 0; } else - str->copy((const char*) thd->db,(uint) strlen(thd->db),system_charset_info); + str->copy(thd->db, thd->db_length, system_charset_info); return str; } diff --git a/sql/log_event.cc b/sql/log_event.cc index 266d6b064bd..9d6b223f2d7 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1856,9 +1856,10 @@ end: don't suffer from these assignments to 0 as DROP TEMPORARY TABLE uses the db.table syntax. */ - thd->db= thd->catalog= 0; // prevent db from being freed + thd->catalog= 0; + thd->reset_db(NULL, 0); // prevent db from being freed thd->query= 0; // just to be sure - thd->query_length= thd->db_length =0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); close_thread_tables(thd); free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); @@ -2845,7 +2846,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, TABLE_LIST tables; bzero((char*) &tables,sizeof(tables)); - tables.db = thd->db; + tables.db= thd->strmake(thd->db, thd->db_length); tables.alias = tables.table_name = (char*) table_name; tables.lock_type = TL_WRITE; tables.updating= 1; @@ -2940,7 +2941,7 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, ex.skip_lines = skip_lines; List field_list; thd->main_lex.select_lex.context.resolve_in_table_list_only(&tables); - set_fields(thd->db, field_list, &thd->main_lex.select_lex.context); + set_fields(tables.db, field_list, &thd->main_lex.select_lex.context); thd->variables.pseudo_thread_id= thread_id; List set_fields; if (net) @@ -2987,11 +2988,12 @@ int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, error: thd->net.vio = 0; - char *save_db= thd->db; + const char *remember_db= thd->db; VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->db= thd->catalog= 0; + thd->catalog= 0; + thd->reset_db(NULL, 0); thd->query= 0; - thd->query_length= thd->db_length= 0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); close_thread_tables(thd); if (thd->query_error) @@ -3008,7 +3010,7 @@ error: } slave_print_error(rli,sql_errno,"\ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", - err, (char*)table_name, print_slave_db_safe(save_db)); + err, (char*)table_name, print_slave_db_safe(remember_db)); free_root(thd->mem_root,MYF(MY_KEEP_PREALLOC)); return 1; } @@ -3018,7 +3020,7 @@ Error '%s' running LOAD DATA INFILE on table '%s'. Default database: '%s'", { slave_print_error(rli,ER_UNKNOWN_ERROR, "\ Fatal error running LOAD DATA INFILE on table '%s'. Default database: '%s'", - (char*)table_name, print_slave_db_safe(save_db)); + (char*)table_name, print_slave_db_safe(remember_db)); return 1; } diff --git a/sql/slave.cc b/sql/slave.cc index caeefc1ad3c..d884e54d60d 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1581,9 +1581,8 @@ static int create_table_from_dump(THD* thd, MYSQL *mysql, const char* db, // save old db in case we are creating in a different database save_db = thd->db; save_db_length= thd->db_length; - thd->db = (char*)db; - DBUG_ASSERT(thd->db != 0); - thd->db_length= strlen(thd->db); + DBUG_ASSERT(db != 0); + thd->reset_db((char*)db, strlen(db)); mysql_parse(thd, thd->query, packet_len); // run create table thd->db = save_db; // leave things the way the were before thd->db_length= save_db_length; @@ -3704,8 +3703,9 @@ err: sql_print_information("Slave I/O thread exiting, read up to log '%s', position %s", IO_RPL_LOG_NAME, llstr(mi->master_log_pos,llbuff)); VOID(pthread_mutex_lock(&LOCK_thread_count)); - thd->query = thd->db = 0; // extra safety - thd->query_length= thd->db_length= 0; + thd->query= 0; // extra safety + thd->query_length= 0; + thd->reset_db(NULL, 0); VOID(pthread_mutex_unlock(&LOCK_thread_count)); if (mysql) { @@ -3912,8 +3912,10 @@ the slave SQL thread with \"SLAVE START\". We stopped at log \ should already have done these assignments (each event which sets these variables is supposed to set them to 0 before terminating)). */ - thd->query= thd->db= thd->catalog= 0; - thd->query_length= thd->db_length= 0; + thd->catalog= 0; + thd->reset_db(NULL, 0); + thd->query= 0; + thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); thd->proc_info = "Waiting for slave mutex on exit"; pthread_mutex_lock(&rli->run_lock); diff --git a/sql/slave.h b/sql/slave.h index 040ce4eaf85..ebbb1e64df5 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -526,10 +526,6 @@ int start_slave_thread(pthread_handler h_func, pthread_mutex_t* start_lock, MASTER_INFO* mi, bool high_priority); -/* If fd is -1, dump to NET */ -int mysql_table_dump(THD* thd, const char* db, - const char* tbl_name, int fd = -1); - /* retrieve table from master and copy to slave*/ int fetch_master_table(THD* thd, const char* db_name, const char* table_name, MASTER_INFO* mi, MYSQL* mysql, bool overwrite); diff --git a/sql/sp.cc b/sql/sp.cc index cae7a56fa57..553465ebff8 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -404,7 +404,8 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, { LEX *old_lex= thd->lex, newlex; String defstr; - char olddb[128]; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; ulong old_sql_mode= thd->variables.sql_mode; ha_rows old_select_limit= thd->variables.select_limit; @@ -450,9 +451,7 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, goto end; } - dbchanged= FALSE; - if ((ret= sp_use_new_db(thd, name->m_db.str, olddb, sizeof(olddb), - 1, &dbchanged))) + if ((ret= sp_use_new_db(thd, name->m_db, &old_db, 1, &dbchanged))) goto end; lex_start(thd, (uchar*)defstr.c_ptr(), defstr.length()); @@ -462,14 +461,14 @@ db_load_routine(THD *thd, int type, sp_name *name, sp_head **sphp, { sp_head *sp= newlex.sphead; - if (dbchanged && (ret= mysql_change_db(thd, olddb, 1))) + if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1))) goto end; delete sp; ret= SP_PARSE_ERROR; } else { - if (dbchanged && (ret= mysql_change_db(thd, olddb, 1))) + if (dbchanged && (ret= mysql_change_db(thd, old_db.str, 1))) goto end; *sphp= newlex.sphead; (*sphp)->set_definer(&definer_user_name, &definer_host_name); @@ -505,15 +504,14 @@ db_create_routine(THD *thd, int type, sp_head *sp) int ret; TABLE *table; char definer[USER_HOST_BUFF_SIZE]; - char olddb[128]; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; DBUG_ENTER("db_create_routine"); DBUG_PRINT("enter", ("type: %d name: %.*s",type,sp->m_name.length, sp->m_name.str)); - dbchanged= FALSE; - if ((ret= sp_use_new_db(thd, sp->m_db.str, olddb, sizeof(olddb), - 0, &dbchanged))) + if ((ret= sp_use_new_db(thd, sp->m_db, &old_db, 0, &dbchanged))) { ret= SP_NO_DB_ERROR; goto done; @@ -641,7 +639,7 @@ db_create_routine(THD *thd, int type, sp_head *sp) done: close_thread_tables(thd); if (dbchanged) - (void)mysql_change_db(thd, olddb, 1); + (void) mysql_change_db(thd, old_db.str, 1); DBUG_RETURN(ret); } @@ -1814,49 +1812,76 @@ create_string(THD *thd, String *buf, } -// -// Utilities... -// + +/* + Change the current database if needed. + + SYNOPSIS + sp_use_new_db() + thd thread handle + + new_db new database name (a string and its length) + + old_db [IN] str points to a buffer where to store the old + database, length contains the size of the buffer + [OUT] if old db was not NULL, its name is copied + to the buffer pointed at by str and length is updated + accordingly. Otherwise str[0] is set to '\0' and length + is set to 0. The out parameter should be used only if + the database name has been changed (see dbchangedp). + + dbchangedp [OUT] is set to TRUE if the current database is changed, + FALSE otherwise. A database is not changed if the old + name is the same as the new one, both names are empty, + or an error has occurred. + + RETURN VALUE + 0 success + 1 access denied or out of memory (the error message is + set in THD) +*/ int -sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddblen, +sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db, bool no_access_check, bool *dbchangedp) { - bool changeit; + int ret; + static char empty_c_string[1]= {0}; /* used for not defined db */ DBUG_ENTER("sp_use_new_db"); - DBUG_PRINT("enter", ("newdb: %s", newdb)); + DBUG_PRINT("enter", ("newdb: %s", new_db.str)); - if (! newdb) - newdb= (char *)""; - if (thd->db && thd->db[0]) + /* + Set new_db to an empty string if it's NULL, because mysql_change_db + requires a non-NULL argument. + new_db.str can be NULL only if we're restoring the old database after + execution of a stored procedure and there were no current database + selected. The stored procedure itself must always have its database + initialized. + */ + if (new_db.str == NULL) + new_db.str= empty_c_string; + + if (thd->db) { - if (my_strcasecmp(system_charset_info, thd->db, newdb) == 0) - changeit= 0; - else - { - changeit= 1; - strnmov(olddb, thd->db, olddblen); - } + old_db->length= (strmake(old_db->str, thd->db, old_db->length) - + old_db->str); } else - { // thd->db empty - if (newdb[0]) - changeit= 1; - else - changeit= 0; - olddb[0] = '\0'; + { + old_db->str[0]= '\0'; + old_db->length= 0; } - if (!changeit) + + /* Don't change the database if the new name is the same as the old one. */ + if (my_strcasecmp(system_charset_info, old_db->str, new_db.str) == 0) { *dbchangedp= FALSE; DBUG_RETURN(0); } - else - { - int ret= mysql_change_db(thd, newdb, no_access_check); - if (! ret) - *dbchangedp= TRUE; - DBUG_RETURN(ret); - } + ret= mysql_change_db(thd, new_db.str, no_access_check); + + *dbchangedp= ret == 0; + DBUG_RETURN(ret); } + diff --git a/sql/sp.h b/sql/sp.h index 2587a9b115a..631b8a87aa2 100644 --- a/sql/sp.h +++ b/sql/sp.h @@ -104,15 +104,15 @@ extern "C" byte* sp_sroutine_key(const byte *ptr, uint *plen, my_bool first); TABLE *open_proc_table_for_read(THD *thd, Open_tables_state *backup); void close_proc_table(THD *thd, Open_tables_state *backup); -// -// Utilities... -// -// Do a "use newdb". The current db is stored at olddb. -// If newdb is the same as the current one, nothing is changed. -// dbchangedp is set to true if the db was actually changed. +/* + Do a "use new_db". The current db is stored at old_db. If new_db is the + same as the current one, nothing is changed. dbchangedp is set to true if + the db was actually changed. +*/ + int -sp_use_new_db(THD *thd, char *newdb, char *olddb, uint olddbmax, +sp_use_new_db(THD *thd, LEX_STRING new_db, LEX_STRING *old_db, bool no_access_check, bool *dbchangedp); #endif /* _SP_H_ */ diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 3b29a841966..02eed207f55 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -376,24 +376,6 @@ sp_name::init_qname(THD *thd) m_name.length, m_name.str); } -sp_name * -sp_name_current_db_new(THD *thd, LEX_STRING name) -{ - sp_name *qname; - - if (! thd->db) - qname= new sp_name(name); - else - { - LEX_STRING db; - - db.length= strlen(thd->db); - db.str= thd->strmake(thd->db, db.length); - qname= new sp_name(db, name); - } - qname->init_qname(thd); - return qname; -} /* Check that the name 'ident' is ok. It's assumed to be an 'ident' @@ -504,27 +486,20 @@ sp_head::init_strings(THD *thd, LEX *lex, sp_name *name) /* During parsing, we must use thd->mem_root */ MEM_ROOT *root= thd->mem_root; - /* We have to copy strings to get them into the right memroot */ - if (name) - { - m_db.length= name->m_db.length; - if (name->m_db.length == 0) - m_db.str= NULL; - else - m_db.str= strmake_root(root, name->m_db.str, name->m_db.length); - m_name.length= name->m_name.length; - m_name.str= strmake_root(root, name->m_name.str, name->m_name.length); + DBUG_ASSERT(name); + /* Must be initialized in the parser */ + DBUG_ASSERT(name->m_db.str && name->m_db.length); - if (name->m_qname.length == 0) - name->init_qname(thd); - m_qname.length= name->m_qname.length; - m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length); - } - else if (thd->db) - { - m_db.length= thd->db_length; - m_db.str= strmake_root(root, thd->db, m_db.length); - } + /* We have to copy strings to get them into the right memroot */ + m_db.length= name->m_db.length; + m_db.str= strmake_root(root, name->m_db.str, name->m_db.length); + m_name.length= name->m_name.length; + m_name.str= strmake_root(root, name->m_name.str, name->m_name.length); + + if (name->m_qname.length == 0) + name->init_qname(thd); + m_qname.length= name->m_qname.length; + m_qname.str= strmake_root(root, name->m_qname.str, m_qname.length); if (m_param_begin && m_param_end) { @@ -933,7 +908,8 @@ bool sp_head::execute(THD *thd) { DBUG_ENTER("sp_head::execute"); - char olddb[128]; + char old_db_buf[NAME_LEN+1]; + LEX_STRING old_db= { old_db_buf, sizeof(old_db_buf) }; bool dbchanged; sp_rcontext *ctx; bool err_status= FALSE; @@ -980,10 +956,8 @@ sp_head::execute(THD *thd) m_first_instance->m_last_cached_sp == this) || (m_recursion_level + 1 == m_next_cached_sp->m_recursion_level)); - dbchanged= FALSE; if (m_db.length && - (err_status= sp_use_new_db(thd, m_db.str, olddb, sizeof(olddb), 0, - &dbchanged))) + (err_status= sp_use_new_db(thd, m_db, &old_db, 0, &dbchanged))) goto done; if ((ctx= thd->spcont)) @@ -1155,10 +1129,10 @@ sp_head::execute(THD *thd) { /* No access check when changing back to where we came from. - (It would generate an error from mysql_change_db() when olddb=="") + (It would generate an error from mysql_change_db() when old_db=="") */ if (! thd->killed) - err_status|= mysql_change_db(thd, olddb, 1); + err_status|= mysql_change_db(thd, old_db.str, 1); } m_flags&= ~IS_INVOKED; DBUG_PRINT("info", @@ -1816,9 +1790,6 @@ sp_head::reset_thd_mem_root(THD *thd) (ulong) &mem_root, (ulong) &thd->mem_root)); free_list= thd->free_list; // Keep the old list thd->free_list= NULL; // Start a new one - /* Copy the db, since substatements will point to it */ - m_thd_db= thd->db; - thd->db= thd->strmake(thd->db, thd->db_length); m_thd= thd; DBUG_VOID_RETURN; } @@ -1834,7 +1805,6 @@ sp_head::restore_thd_mem_root(THD *thd) DBUG_PRINT("info", ("mem_root 0x%lx returned from thd mem root 0x%lx", (ulong) &mem_root, (ulong) &thd->mem_root)); thd->free_list= flist; // Restore the old one - thd->db= m_thd_db; // Restore the original db pointer thd->mem_root= m_thd_root; m_thd= NULL; DBUG_VOID_RETURN; diff --git a/sql/sp_head.h b/sql/sp_head.h index d5f49d8a964..073cca2cd12 100644 --- a/sql/sp_head.h +++ b/sql/sp_head.h @@ -61,13 +61,6 @@ public: */ LEX_STRING m_sroutines_key; - sp_name(LEX_STRING name) - : m_name(name) - { - m_db.str= m_qname.str= m_sroutines_key.str= 0; - m_db.length= m_qname.length= m_sroutines_key.length= 0; - } - sp_name(LEX_STRING db, LEX_STRING name) : m_db(db), m_name(name) { @@ -101,8 +94,6 @@ public: {} }; -sp_name * -sp_name_current_db_new(THD *thd, LEX_STRING name); bool check_routine_name(LEX_STRING name); @@ -355,7 +346,6 @@ private: MEM_ROOT *m_thd_root; // Temp. store for thd's mem_root THD *m_thd; // Set if we have reset mem_root - char *m_thd_db; // Original thd->db pointer sp_pcontext *m_pcont; // Parse context List m_lex; // Temp. store for the other lex diff --git a/sql/sql_class.h b/sql/sql_class.h index 0ddba0e6f05..b63f88d7210 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1570,6 +1570,47 @@ public: void restore_sub_statement_state(Sub_statement_state *backup); void set_n_backup_active_arena(Query_arena *set, Query_arena *backup); void restore_active_arena(Query_arena *set, Query_arena *backup); + + /* + Initialize the current database from a NULL-terminated string with length + */ + void set_db(const char *new_db, uint new_db_len) + { + if (new_db) + { + /* Do not reallocate memory if current chunk is big enough. */ + if (db && db_length >= new_db_len) + memcpy(db, new_db, new_db_len+1); + else + { + safeFree(db); + db= my_strdup_with_length(new_db, new_db_len, MYF(MY_WME)); + } + db_length= db ? new_db_len: 0; + } + } + void reset_db(char *new_db, uint new_db_len) + { + db= new_db; + db_length= new_db_len; + } + /* + Copy the current database to the argument. Use the current arena to + allocate memory for a deep copy: current database may be freed after + a statement is parsed but before it's executed. + */ + bool copy_db_to(char **p_db, uint *p_db_length) + { + if (db == NULL) + { + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + return TRUE; + } + *p_db= strmake(db, db_length); + if (p_db_length) + *p_db_length= db_length; + return FALSE; + } }; @@ -1915,7 +1956,7 @@ typedef struct st_sort_buffer { class Table_ident :public Sql_alloc { - public: +public: LEX_STRING db; LEX_STRING table; SELECT_LEX_UNIT *sel; diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 4caa0076c60..348d43dc702 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -773,8 +773,7 @@ exit: { if (!(thd->slave_thread)) /* a slave thread will free it itself */ x_free(thd->db); - thd->db= 0; - thd->db_length= 0; + thd->reset_db(NULL, 0); } exit2: VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); @@ -1186,14 +1185,10 @@ end: { if (!(thd->slave_thread)) my_free(dbname, MYF(0)); - thd->db= NULL; - thd->db_length= 0; + thd->reset_db(NULL, 0); } else - { - thd->db= dbname; // THD::~THD will free this - thd->db_length= db_length; - } + thd->reset_db(dbname, db_length); // THD::~THD will free this #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!no_access_check) sctx->db_access= db_access; diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 26f3b6f5faa..9979b484292 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -298,9 +298,8 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list, { if (thd->locked_tables) { - if (find_locked_table(thd, - table_list->db ? table_list->db : thd->db, - table_list->table_name)) + DBUG_ASSERT(table_list->db); /* Must be set in the parser */ + if (find_locked_table(thd, table_list->db, table_list->table_name)) { my_error(ER_DELAYED_INSERT_TABLE_LOCKED, MYF(0), table_list->table_name); @@ -1332,8 +1331,8 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) TABLE *table; DBUG_ENTER("delayed_get_table"); - if (!table_list->db) - table_list->db=thd->db; + /* Must be set in the parser */ + DBUG_ASSERT(table_list->db); /* Find the thread which handles this table. */ if (!(tmp=find_handler(thd,table_list))) @@ -1372,15 +1371,15 @@ static TABLE *delayed_get_table(THD *thd,TABLE_LIST *table_list) pthread_mutex_lock(&LOCK_thread_count); thread_count++; pthread_mutex_unlock(&LOCK_thread_count); - if (!(tmp->thd.db=my_strdup(table_list->db,MYF(MY_WME))) || - !(tmp->thd.query=my_strdup(table_list->table_name,MYF(MY_WME)))) + tmp->thd.set_db(table_list->db, strlen(table_list->db)); + tmp->thd.query= my_strdup(table_list->table_name,MYF(MY_WME)); + if (tmp->thd.db == NULL || tmp->thd.query == NULL) { delete tmp; my_message(ER_OUT_OF_RESOURCES, ER(ER_OUT_OF_RESOURCES), MYF(0)); goto err1; } tmp->table_list= *table_list; // Needed to open table - tmp->table_list.db= tmp->thd.db; tmp->table_list.alias= tmp->table_list.table_name= tmp->thd.query; tmp->lock(); pthread_mutex_lock(&tmp->mutex); diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 6b5c6ddca60..e736aa13fa2 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -758,6 +758,11 @@ public: *this= *state; } + /* + Direct addition to the list of query tables. + If you are using this function, you must ensure that the table + object, in particular table->db member, is initialized. + */ void add_to_query_tables(TABLE_LIST *table) { *(table->prev_global= query_tables_last)= table; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 213a7730824..fcdd5d91c44 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -93,8 +93,6 @@ const char *xa_state_names[]={ "NON-EXISTING", "ACTIVE", "IDLE", "PREPARED" }; -static char empty_c_string[1]= {0}; // Used for not defined 'db' - #ifdef __WIN__ static void test_signal(int sig_ptr) { @@ -300,8 +298,7 @@ int check_user(THD *thd, enum enum_server_command command, thd->db is saved in caller and needs to be freed by caller if this function returns 0 */ - thd->db= 0; - thd->db_length= 0; + thd->reset_db(NULL, 0); if (mysql_change_db(thd, db, FALSE)) { /* Send the error to the client */ @@ -341,9 +338,8 @@ int check_user(THD *thd, enum enum_server_command command, if connect failed. Also in case of 'CHANGE USER' failure, current database will be switched to 'no database selected'. */ - thd->db= 0; - thd->db_length= 0; - + thd->reset_db(NULL, 0); + USER_RESOURCES ur; int res= acl_getroot(thd, &ur, passwd, passwd_len); #ifndef EMBEDDED_LIBRARY @@ -1316,19 +1312,6 @@ end: DBUG_RETURN(0); } - /* This works because items are allocated with sql_alloc() */ - -void free_items(Item *item) -{ - Item *next; - DBUG_ENTER("free_items"); - for (; item ; item=next) - { - next=item->next; - item->delete_self(); - } - DBUG_VOID_RETURN; -} /* This works because items are allocated with sql_alloc() */ @@ -1340,7 +1323,26 @@ void cleanup_items(Item *item) DBUG_VOID_RETURN; } -int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) +/* + Handle COM_TABLE_DUMP command + + SYNOPSIS + mysql_table_dump + thd thread handle + db database name or an empty string. If empty, + the current database of the connection is used + tbl_name name of the table to dump + + NOTES + This function is written to handle one specific command only. + + RETURN VALUE + 0 success + 1 error, the error message is set in THD +*/ + +static +int mysql_table_dump(THD* thd, char* db, char* tbl_name) { TABLE* table; TABLE_LIST* table_list; @@ -1377,7 +1379,7 @@ int mysql_table_dump(THD* thd, char* db, char* tbl_name, int fd) goto err; } net_flush(&thd->net); - if ((error= table->file->dump(thd,fd))) + if ((error= table->file->dump(thd,-1))) my_error(ER_GET_ERRNO, MYF(0), error); err: @@ -1627,7 +1629,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } tbl_name= strmake(db, packet + 1, db_len)+1; strmake(tbl_name, packet + db_len + 2, tbl_len); - mysql_table_dump(thd, db, tbl_name, -1); + mysql_table_dump(thd, db, tbl_name); break; } case COM_CHANGE_USER: @@ -1801,11 +1803,8 @@ bool dispatch_command(enum enum_server_command command, THD *thd, statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_FIELDS], &LOCK_status); bzero((char*) &table_list,sizeof(table_list)); - if (!(table_list.db=thd->db)) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); + if (thd->copy_db_to(&table_list.db, 0)) break; - } pend= strend(packet); thd->convert_string(&conv_name, system_charset_info, packet, (uint) (pend-packet), thd->charset()); @@ -2152,6 +2151,34 @@ void log_slow_statement(THD *thd) } +/* + Create a TABLE_LIST object for an INFORMATION_SCHEMA table. + + SYNOPSIS + prepare_schema_table() + thd thread handle + lex current lex + table_ident table alias if it's used + schema_table_idx the type of the INFORMATION_SCHEMA table to be + created + + DESCRIPTION + This function is used in the parser to convert a SHOW or DESCRIBE + table_name command to a SELECT from INFORMATION_SCHEMA. + It prepares a SELECT_LEX and a TABLE_LIST object to represent the + given command as a SELECT parse tree. + + NOTES + Due to the way this function works with memory and LEX it cannot + be used outside the parser (parse tree transformations outside + the parser break PS and SP). + + RETURN VALUE + 0 success + 1 out of memory or SHOW commands are not allowed + in this version of the server. +*/ + int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, enum enum_schema_tables schema_table_idx) { @@ -2179,13 +2206,13 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, DBUG_RETURN(1); #else { - char *db= lex->select_lex.db ? lex->select_lex.db : thd->db; - if (!db) + char *db; + if (lex->select_lex.db == NULL && + thd->copy_db_to(&lex->select_lex.db, 0)) { - my_message(ER_NO_DB_ERROR, - ER(ER_NO_DB_ERROR), MYF(0)); /* purecov: inspected */ - DBUG_RETURN(1); /* purecov: inspected */ + DBUG_RETURN(1); } + db= lex->select_lex.db; remove_escape(db); // Fix escaped '_' if (check_db_name(db)) { @@ -2202,11 +2229,6 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident, db); DBUG_RETURN(1); } - /* - We need to do a copy to make this prepared statement safe if this - was thd->db - */ - lex->select_lex.db= thd->strdup(db); break; } #endif @@ -2739,8 +2761,8 @@ mysql_execute_command(THD *thd) case SQLCOM_LOAD_MASTER_TABLE: { DBUG_ASSERT(first_table == all_tables && first_table != 0); - if (!first_table->db) - first_table->db= thd->db; + DBUG_ASSERT(first_table->db); /* Must be set in the parser */ + if (check_access(thd, CREATE_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table))) @@ -2988,25 +3010,8 @@ end_with_restore_list: my_error(ER_WRONG_TABLE_NAME, MYF(0), lex->name); goto error; } - if (!select_lex->db) - { - /* - In the case of ALTER TABLE ... RENAME we should supply the - default database if the new name is not explicitly qualified - by a database. (Bug #11493) - */ - if (lex->alter_info.flags & ALTER_RENAME) - { - if (! thd->db) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - goto error; - } - select_lex->db= thd->db; - } - else - select_lex->db= first_table->db; - } + /* Must be set in the parser */ + DBUG_ASSERT(select_lex->db); if (check_access(thd, ALTER_ACL, first_table->db, &first_table->grant.privilege, 0, 0, test(first_table->schema_table)) || @@ -3685,12 +3690,8 @@ end_with_restore_list: } case SQLCOM_ALTER_DB: { - char *db= lex->name ? lex->name : thd->db; - if (!db) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - break; - } + char *db= lex->name; + DBUG_ASSERT(db); /* Must be set in the parser */ if (!strip_sp(db) || check_db_name(db)) { my_error(ER_WRONG_DB_NAME, MYF(0), lex->name); @@ -4139,23 +4140,11 @@ end_with_restore_list: case SQLCOM_CREATE_SPFUNCTION: { uint namelen; - char *name, *db; + char *name; int result; DBUG_ASSERT(lex->sphead != 0); - - if (!lex->sphead->m_db.str || !lex->sphead->m_db.str[0]) - { - if (!thd->db) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); - delete lex->sphead; - lex->sphead= 0; - goto error; - } - lex->sphead->m_db.length= strlen(thd->db); - lex->sphead->m_db.str= thd->db; - } + DBUG_ASSERT(lex->sphead->m_db.str); /* Must be initialized in the parser */ if (check_access(thd, CREATE_PROC_ACL, lex->sphead->m_db.str, 0, 0, 0, is_schema_db(lex->sphead->m_db.str))) @@ -4272,41 +4261,27 @@ end_with_restore_list: } #endif /* NO_EMBEDDED_ACCESS_CHECKS */ - /* - We need to copy name and db in order to use them for - check_routine_access which is called after lex->sphead has - been deleted. - */ - name= thd->strdup(name); - lex->sphead->m_db.str= db= thd->strmake(lex->sphead->m_db.str, - lex->sphead->m_db.length); res= (result= lex->sphead->create(thd)); if (result == SP_OK) { - /* - We must cleanup the unit and the lex here because - sp_grant_privileges calls (indirectly) db_find_routine, - which in turn may call MYSQLparse with THD::lex. - TODO: fix db_find_routine to use a temporary lex. - */ - lex->unit.cleanup(); - delete lex->sphead; - lex->sphead= 0; #ifndef NO_EMBEDDED_ACCESS_CHECKS /* only add privileges if really neccessary */ if (sp_automatic_privileges && !opt_noacl && check_routine_access(thd, DEFAULT_CREATE_PROC_ACLS, - db, name, + lex->sphead->m_db.str, name, lex->sql_command == SQLCOM_CREATE_PROCEDURE, 1)) { close_thread_tables(thd); - if (sp_grant_privileges(thd, db, name, + if (sp_grant_privileges(thd, lex->sphead->m_db.str, name, lex->sql_command == SQLCOM_CREATE_PROCEDURE)) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_PROC_AUTO_GRANT_FAIL, ER(ER_PROC_AUTO_GRANT_FAIL)); } #endif + lex->unit.cleanup(); + delete lex->sphead; + lex->sphead= 0; send_ok(thd); } else @@ -4721,7 +4696,8 @@ end_with_restore_list: view_store_options(thd, first_table, &buff); buff.append(STRING_WITH_LEN("VIEW ")); /* Test if user supplied a db (ie: we did not use thd->db) */ - if (first_table->db != thd->db && first_table->db[0]) + if (first_table->db && first_table->db[0] && + (thd->db == NULL || strcmp(first_table->db, thd->db))) { append_identifier(thd, &buff, first_table->db, first_table->db_length); @@ -5244,7 +5220,7 @@ check_table_access(THD *thd, ulong want_access,TABLE_LIST *tables, (want_access & ~EXTRA_ACL) && thd->db) tables->grant.privilege= want_access; - else if (tables->db && tables->db == thd->db) + else if (tables->db && thd->db && strcmp(tables->db, thd->db) == 0) { if (found && !grant_option) // db already checked tables->grant.privilege=found_access; @@ -5392,22 +5368,25 @@ bool check_merge_table_access(THD *thd, char *db, static bool check_db_used(THD *thd,TABLE_LIST *tables) { + char *current_db= NULL; for (; tables; tables= tables->next_global) { - if (!tables->db) + if (tables->db == NULL) { - if (!(tables->db=thd->db)) - { - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), - MYF(0)); /* purecov: tested */ - return TRUE; /* purecov: tested */ - } + /* + This code never works and should be removed in 5.1. All tables + that are added to the list of tables should already have its + database field initialized properly (see st_lex::add_table_to_list). + */ + DBUG_ASSERT(0); + if (thd->copy_db_to(¤t_db, 0)) + return TRUE; + tables->db= current_db; } } return FALSE; } - /**************************************************************************** Check stack size; Send error if there isn't enough stack to continue ****************************************************************************/ @@ -6027,19 +6006,8 @@ TABLE_LIST *st_select_lex::add_table_to_list(THD *thd, ptr->db= table->db.str; ptr->db_length= table->db.length; } - else if (thd->db) - { - ptr->db= thd->db; - ptr->db_length= thd->db_length; - } - else - { - /* The following can't be "" as we may do 'casedn_str()' on it */ - ptr->db= empty_c_string; - ptr->db_length= 0; - } - if (thd->stmt_arena->is_stmt_prepare_or_first_sp_execute()) - ptr->db= thd->strdup(ptr->db); + else if (thd->copy_db_to(&ptr->db, &ptr->db_length)) + DBUG_RETURN(0); ptr->alias= alias_str; if (lower_case_table_names && table->table.length) @@ -7216,6 +7184,8 @@ bool insert_precheck(THD *thd, TABLE_LIST *tables) my_message(ER_WRONG_VALUE_COUNT, ER(ER_WRONG_VALUE_COUNT), MYF(0)); DBUG_RETURN(TRUE); } + if (check_db_used(thd, tables)) + DBUG_RETURN(TRUE); DBUG_RETURN(FALSE); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 9ec8e8db1fb..fbceea84ce5 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2672,7 +2672,8 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, TABLE_LIST src_tables_list; DBUG_ENTER("mysql_create_like_table"); - src_db= table_ident->db.str ? table_ident->db.str : thd->db; + DBUG_ASSERT(table_ident->db.str); /* Must be set in the parser */ + src_db= table_ident->db.str; /* Validate the source table diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index f943b014118..db1d1a10b11 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -932,8 +932,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, save_db.str= thd->db; save_db.length= thd->db_length; - thd->db_length= strlen(db); - thd->db= (char *) db; + thd->reset_db((char*) db, strlen(db)); while ((trg_create_str= it++)) { trg_sql_mode= itm++; @@ -1035,8 +1034,7 @@ bool Table_triggers_list::check_n_load(THD *thd, const char *db, lex_end(&lex); } - thd->db= save_db.str; - thd->db_length= save_db.length; + thd->reset_db(save_db.str, save_db.length); thd->lex= old_lex; thd->spcont= save_spcont; thd->variables.sql_mode= save_sql_mode; @@ -1049,8 +1047,7 @@ err_with_lex_cleanup: thd->lex= old_lex; thd->spcont= save_spcont; thd->variables.sql_mode= save_sql_mode; - thd->db= save_db.str; - thd->db_length= save_db.length; + thd->reset_db(save_db.str, save_db.length); DBUG_RETURN(1); } diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index 6269c0a2eb3..95589a58b37 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -140,6 +140,7 @@ void udf_init() READ_RECORD read_record_info; TABLE *table; int error; + char db[]= "mysql"; /* A subject to casednstr, can't be constant */ DBUG_ENTER("ufd_init"); if (initialized) @@ -161,13 +162,12 @@ void udf_init() initialized = 1; new_thd->thread_stack= (char*) &new_thd; new_thd->store_globals(); - new_thd->db= my_strdup("mysql", MYF(0)); - new_thd->db_length=5; + new_thd->set_db(db, sizeof(db)-1); bzero((gptr) &tables,sizeof(tables)); tables.alias= tables.table_name= (char*) "func"; tables.lock_type = TL_READ; - tables.db=new_thd->db; + tables.db= db; if (simple_open_n_lock_tables(new_thd, &tables)) { diff --git a/sql/sql_view.cc b/sql/sql_view.cc index 0f836bd58ff..1561ade78af 100644 --- a/sql/sql_view.cc +++ b/sql/sql_view.cc @@ -452,15 +452,15 @@ bool mysql_create_view(THD *thd, */ for (sl= select_lex; sl; sl= sl->next_select()) { - char *db= view->db ? view->db : thd->db; + DBUG_ASSERT(view->db); /* Must be set in the parser */ List_iterator_fast it(sl->item_list); Item *item; - fill_effective_table_privileges(thd, &view->grant, db, + fill_effective_table_privileges(thd, &view->grant, view->db, view->table_name); while ((item= it++)) { Item_field *fld; - uint priv= (get_column_grant(thd, &view->grant, db, + uint priv= (get_column_grant(thd, &view->grant, view->db, view->table_name, item->name) & VIEW_ANY_ACL); if ((fld= item->filed_for_view_update())) @@ -641,8 +641,7 @@ static int mysql_register_view(THD *thd, TABLE_LIST *view, if (!parser->ok() || !is_equal(&view_type, parser->type())) { - my_error(ER_WRONG_OBJECT, MYF(0), - (view->db ? view->db : thd->db), view->table_name, "VIEW"); + my_error(ER_WRONG_OBJECT, MYF(0), view->db, view->table_name, "VIEW"); DBUG_RETURN(-1); } diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index b2dbc517fa4..954024df500 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1237,12 +1237,18 @@ sp_name: } | ident { + THD *thd= YYTHD; + LEX_STRING db; if (check_routine_name($1)) { my_error(ER_SP_WRONG_NAME, MYF(0), $1.str); YYABORT; } - $$= sp_name_current_db_new(YYTHD, $1); + if (thd->copy_db_to(&db.str, &db.length)) + YYABORT; + $$= new sp_name(db, $1); + if ($$) + $$->init_qname(YYTHD); } ; @@ -2405,14 +2411,26 @@ create2: | LIKE table_ident { LEX *lex=Lex; + THD *thd= lex->thd; if (!(lex->name= (char *)$2)) YYABORT; + if ($2->db.str == NULL && + thd->copy_db_to(&($2->db.str), &($2->db.length))) + { + YYABORT; + } } | '(' LIKE table_ident ')' { LEX *lex=Lex; + THD *thd= lex->thd; if (!(lex->name= (char *)$3)) YYABORT; + if ($3->db.str == NULL && + thd->copy_db_to(&($3->db.str), &($3->db.length))) + { + YYABORT; + } } ; @@ -3240,7 +3258,9 @@ alter: lex->key_list.empty(); lex->col_list.empty(); lex->select_lex.init_order(); - lex->select_lex.db=lex->name=0; + lex->select_lex.db= + ((TABLE_LIST*) lex->select_lex.table_list.first)->db; + lex->name=0; bzero((char*) &lex->create_info,sizeof(lex->create_info)); lex->create_info.db_type= DB_TYPE_DEFAULT; lex->create_info.default_table_charset= NULL; @@ -3258,8 +3278,11 @@ alter: opt_create_database_options { LEX *lex=Lex; + THD *thd= Lex->thd; lex->sql_command=SQLCOM_ALTER_DB; lex->name= $3; + if (lex->name == NULL && thd->copy_db_to(&lex->name, NULL)) + YYABORT; } | ALTER PROCEDURE sp_name { @@ -3421,14 +3444,20 @@ alter_list_item: | RENAME opt_to table_ident { LEX *lex=Lex; + THD *thd= lex->thd; lex->select_lex.db=$3->db.str; - lex->name= $3->table.str; + if (lex->select_lex.db == NULL && + thd->copy_db_to(&lex->select_lex.db, NULL)) + { + YYABORT; + } if (check_table_name($3->table.str,$3->table.length) || $3->db.str && check_db_name($3->db.str)) { my_error(ER_WRONG_TABLE_NAME, MYF(0), $3->table.str); YYABORT; } + lex->name= $3->table.str; lex->alter_info.flags|= ALTER_RENAME; } | CONVERT_SYM TO_SYM charset charset_name_or_default opt_collate @@ -4742,7 +4771,13 @@ simple_expr: #endif /* HAVE_DLOPEN */ { LEX *lex= Lex; - sp_name *name= sp_name_current_db_new(YYTHD, $1); + THD *thd= lex->thd; + LEX_STRING db; + if (thd->copy_db_to(&db.str, &db.length)) + YYABORT; + sp_name *name= new sp_name(db, $1); + if (name) + name->init_qname(thd); sp_add_used_routine(lex, YYTHD, name, TYPE_ENUM_FUNCTION); if ($4) @@ -8460,7 +8495,9 @@ grant_ident: '*' { LEX *lex= Lex; - lex->current_select->db= lex->thd->db; + THD *thd= lex->thd; + if (thd->copy_db_to(&lex->current_select->db, NULL)) + YYABORT; if (lex->grant == GLOBAL_ACLS) lex->grant = DB_ACLS & ~GRANT_ACL; else if (lex->columns.elements) diff --git a/sql/tztime.cc b/sql/tztime.cc index 079abfc9299..d12aef47b40 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1548,6 +1548,7 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) TABLE *table; Tz_names_entry *tmp_tzname; my_bool return_val= 1; + char db[]= "mysql"; int res; DBUG_ENTER("my_tz_init"); @@ -1604,13 +1605,12 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) leap seconds shared by all time zones. */ - thd->db= my_strdup("mysql",MYF(0)); - thd->db_length= 5; // Safety + thd->set_db(db, sizeof(db)-1); bzero((char*) &tables_buff, sizeof(TABLE_LIST)); tables_buff[0].alias= tables_buff[0].table_name= (char*)"time_zone_leap_second"; tables_buff[0].lock_type= TL_READ; - tables_buff[0].db= thd->db; + tables_buff[0].db= db; /* Fill TABLE_LIST for the rest of the time zone describing tables and link it to first one. From c90f464d8fc051a338b0b99849befd919e4e3431 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 26 Jun 2006 23:44:17 +0200 Subject: [PATCH 37/74] make_sharedlib_distribution.sh: For compatibility, don't use {..,..} in pattern matching make_binary_distribution.sh: Added .dylib and .sl as shared library extensions scripts/make_binary_distribution.sh: Added .dylib and .sl as shared library extensions scripts/make_sharedlib_distribution.sh: For compatibility, don't use {..,..} in pattern matching --- scripts/make_binary_distribution.sh | 18 ++++++++++++++---- scripts/make_sharedlib_distribution.sh | 6 ++++-- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index f372762c05d..396c4f83bac 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -154,11 +154,21 @@ if [ $BASE_SYSTEM = "netware" ] ; then fi for i in \ - libmysql/.libs/libmysqlclient.a libmysql/.libs/libmysqlclient.so* \ - libmysql/libmysqlclient.* libmysql_r/.libs/libmysqlclient_r.a \ - libmysql_r/.libs/libmysqlclient_r.so* libmysql_r/libmysqlclient_r.* \ + libmysql/.libs/libmysqlclient.a \ + libmysql/.libs/libmysqlclient.so* \ + libmysql/.libs/libmysqlclient.sl* \ + libmysql/.libs/libmysqlclient*.dylib \ + libmysql/libmysqlclient.* \ + libmysql_r/.libs/libmysqlclient_r.a \ + libmysql_r/.libs/libmysqlclient_r.so* \ + libmysql_r/.libs/libmysqlclient_r.sl* \ + libmysql_r/.libs/libmysqlclient_r*.dylib \ + libmysql_r/libmysqlclient_r.* \ + libmysqld/.libs/libmysqld.a \ + libmysqld/.libs/libmysqld.so* \ + libmysqld/.libs/libmysqld.sl* \ + libmysqld/.libs/libmysqld*.dylib \ mysys/libmysys.a strings/libmystrings.a dbug/libdbug.a \ - libmysqld/.libs/libmysqld.a libmysqld/.libs/libmysqld.so* \ libmysqld/libmysqld.a netware/libmysql.imp do if [ -f $i ] diff --git a/scripts/make_sharedlib_distribution.sh b/scripts/make_sharedlib_distribution.sh index fbc945e445a..c475d0e14a4 100644 --- a/scripts/make_sharedlib_distribution.sh +++ b/scripts/make_sharedlib_distribution.sh @@ -45,9 +45,11 @@ fi mkdir -p $BASE/lib for i in \ - libmysql/.libs/libmysqlclient.s{l,o}* \ + libmysql/.libs/libmysqlclient.so* \ + libmysql/.libs/libmysqlclient.sl* \ libmysql/.libs/libmysqlclient*.dylib \ - libmysql_r/.libs/libmysqlclient_r.s{l,o}* \ + libmysql_r/.libs/libmysqlclient_r.so* \ + libmysql_r/.libs/libmysqlclient_r.sl* \ libmysql_r/.libs/libmysqlclient_r*.dylib do if [ -f $i ] From 0335013f17743299cf32731e1db986170672acbe Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 10:02:58 +0200 Subject: [PATCH 38/74] Bug #19852 Restoring backup made from cluster with full data memory fails - make sure to allocate just enough pages in the fragments by using the actual row count from the backup, to avoid over allocation of pages to fragments, and thus avoid the bug ndb/include/kernel/GlobalSignalNumbers.h: Bug #19852 Restoring backup made from cluster with full data memory fails - distribute fragment complete to all participants to update row count ndb/include/kernel/signaldata/BackupContinueB.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - time slica writing of fragment info to ctl file ndb/include/kernel/signaldata/BackupImpl.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - 32 -> 64 bit on bytes and records - new signal fragment complete to all participants ndb/include/kernel/signaldata/BackupSignalData.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - 32 -> 64 bit on bytes and records ndb/include/kernel/signaldata/DictTabInfo.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - add min and max rows to dict tab info ndb/include/kernel/signaldata/LqhFrag.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added min and max rows to add frag req ndb/include/kernel/signaldata/TupFrag.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added min and max rows to add frag req ndb/include/ndbapi/NdbDictionary.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added get/set of min max rows ndb/src/common/debugger/signaldata/BackupImpl.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - 32 -> 64 bit on bytes and records ndb/src/common/debugger/signaldata/BackupSignalData.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - 32 -> 64 bit on bytes and records ndb/src/common/debugger/signaldata/DictTabInfo.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added min and max rows to dict tab info ndb/src/common/debugger/signaldata/LqhFrag.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added min and max rows to frag req ndb/src/kernel/blocks/backup/Backup.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - new section in backup with per fragment info in ctl file - 32 -> 64 bit on bytes and records ndb/src/kernel/blocks/backup/Backup.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - new section in backup with per fragment info in ctl file - 32 -> 64 bit on bytes and records ndb/src/kernel/blocks/backup/BackupFormat.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - new section in backup with per fragment info in ctl file - 32 -> 64 bit on bytes and records ndb/src/kernel/blocks/backup/BackupInit.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - new signal fragment complete to all participants ndb/src/kernel/blocks/dbdict/Dbdict.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added max and min rows to dict table object ndb/src/kernel/blocks/dbdict/Dbdict.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added max and min rows to dict table object ndb/src/kernel/blocks/dblqh/Dblqh.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added min and max rows to frag req ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added min and max rows to frag req ndb/src/kernel/blocks/dbtup/Dbtup.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added min and max rows to frag req ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - added min and max rows to frag req - move memory allocation to fragment to after adding of attributes to get correct headsize - allocate pages to fragments according to min rows setting ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - grow page allocation starting from 2 irrespective of first page allocation ndb/src/mgmsrv/MgmtSrvr.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - 32 -> 64 bits on bytes and records ndb/src/mgmsrv/MgmtSrvr.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - 32 -> 64 bits on bytes and records ndb/src/ndbapi/NdbDictionary.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - min and max rows in dict ndb/src/ndbapi/NdbDictionaryImpl.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - min and max rows in dict ndb/src/ndbapi/NdbDictionaryImpl.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - min and max rows in dict ndb/tools/restore/Restore.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - add retrieval of fragment info ndb/tools/restore/Restore.hpp: Bug #19852 Restoring backup made from cluster with full data memory fails - add retrieval of fragment info ndb/tools/restore/consumer_restore.cpp: Bug #19852 Restoring backup made from cluster with full data memory fails - set min in restore to the actual row count (this is the actual bug fix) sql/ha_ndbcluster.cc: Bug #19852 Restoring backup made from cluster with full data memory fails - set min and max rows according to sql definition --- ndb/include/kernel/GlobalSignalNumbers.h | 4 +- .../kernel/signaldata/BackupContinueB.hpp | 3 +- ndb/include/kernel/signaldata/BackupImpl.hpp | 22 ++- .../kernel/signaldata/BackupSignalData.hpp | 8 +- ndb/include/kernel/signaldata/DictTabInfo.hpp | 11 ++ ndb/include/kernel/signaldata/LqhFrag.hpp | 25 +-- ndb/include/kernel/signaldata/TupFrag.hpp | 15 +- ndb/include/ndbapi/NdbDictionary.hpp | 14 ++ .../common/debugger/signaldata/BackupImpl.cpp | 6 +- .../debugger/signaldata/BackupSignalData.cpp | 6 +- .../debugger/signaldata/DictTabInfo.cpp | 8 + .../common/debugger/signaldata/LqhFrag.cpp | 6 +- ndb/src/kernel/blocks/backup/Backup.cpp | 163 +++++++++++++++--- ndb/src/kernel/blocks/backup/Backup.hpp | 15 +- ndb/src/kernel/blocks/backup/BackupFormat.hpp | 17 +- ndb/src/kernel/blocks/backup/BackupInit.cpp | 3 + ndb/src/kernel/blocks/dbdict/Dbdict.cpp | 32 +++- ndb/src/kernel/blocks/dbdict/Dbdict.hpp | 4 + ndb/src/kernel/blocks/dblqh/Dblqh.hpp | 10 +- ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 94 ++++++---- ndb/src/kernel/blocks/dbtup/Dbtup.hpp | 5 +- ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp | 79 +++++---- ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp | 6 +- ndb/src/mgmsrv/MgmtSrvr.cpp | 10 +- ndb/src/mgmsrv/MgmtSrvr.hpp | 4 +- ndb/src/ndbapi/NdbDictionary.cpp | 24 +++ ndb/src/ndbapi/NdbDictionaryImpl.cpp | 20 +++ ndb/src/ndbapi/NdbDictionaryImpl.hpp | 3 + ndb/tools/restore/Restore.cpp | 58 ++++++- ndb/tools/restore/Restore.hpp | 15 ++ ndb/tools/restore/consumer_restore.cpp | 10 ++ sql/ha_ndbcluster.cc | 10 +- 32 files changed, 570 insertions(+), 140 deletions(-) diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h index 98b6ce7d949..a84f3130abf 100644 --- a/ndb/include/kernel/GlobalSignalNumbers.h +++ b/ndb/include/kernel/GlobalSignalNumbers.h @@ -611,8 +611,6 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_WAIT_GCP_REF 500 #define GSN_WAIT_GCP_CONF 501 -/* 502 not used */ - /** * Trigger and index signals */ @@ -682,6 +680,8 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_BACKUP_FRAGMENT_REF 546 #define GSN_BACKUP_FRAGMENT_CONF 547 +#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 502 + #define GSN_STOP_BACKUP_REQ 548 #define GSN_STOP_BACKUP_REF 549 #define GSN_STOP_BACKUP_CONF 550 diff --git a/ndb/include/kernel/signaldata/BackupContinueB.hpp b/ndb/include/kernel/signaldata/BackupContinueB.hpp index d3d3f79f310..fe3f48444ec 100644 --- a/ndb/include/kernel/signaldata/BackupContinueB.hpp +++ b/ndb/include/kernel/signaldata/BackupContinueB.hpp @@ -31,7 +31,8 @@ private: BUFFER_UNDERFLOW = 1, BUFFER_FULL_SCAN = 2, BUFFER_FULL_FRAG_COMPLETE = 3, - BUFFER_FULL_META = 4 + BUFFER_FULL_META = 4, + BACKUP_FRAGMENT_INFO = 5 }; }; diff --git a/ndb/include/kernel/signaldata/BackupImpl.hpp b/ndb/include/kernel/signaldata/BackupImpl.hpp index 298440ad377..07ab5bc543b 100644 --- a/ndb/include/kernel/signaldata/BackupImpl.hpp +++ b/ndb/include/kernel/signaldata/BackupImpl.hpp @@ -258,15 +258,31 @@ class BackupFragmentConf { friend bool printBACKUP_FRAGMENT_CONF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 6 ); + STATIC_CONST( SignalLength = 8 ); private: Uint32 backupId; Uint32 backupPtr; Uint32 tableId; Uint32 fragmentNo; - Uint32 noOfRecords; - Uint32 noOfBytes; + Uint32 noOfRecordsLow; + Uint32 noOfBytesLow; + Uint32 noOfRecordsHigh; + Uint32 noOfBytesHigh; +}; + +class BackupFragmentCompleteRep { +public: + STATIC_CONST( SignalLength = 8 ); + + Uint32 backupId; + Uint32 backupPtr; + Uint32 tableId; + Uint32 fragmentNo; + Uint32 noOfTableRowsLow; + Uint32 noOfFragmentRowsLow; + Uint32 noOfTableRowsHigh; + Uint32 noOfFragmentRowsHigh; }; class StopBackupReq { diff --git a/ndb/include/kernel/signaldata/BackupSignalData.hpp b/ndb/include/kernel/signaldata/BackupSignalData.hpp index e1b8c6203a1..9e34ea3a211 100644 --- a/ndb/include/kernel/signaldata/BackupSignalData.hpp +++ b/ndb/include/kernel/signaldata/BackupSignalData.hpp @@ -201,17 +201,19 @@ class BackupCompleteRep { friend bool printBACKUP_COMPLETE_REP(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 8 + NdbNodeBitmask::Size ); + STATIC_CONST( SignalLength = 10 + NdbNodeBitmask::Size ); private: Uint32 senderData; Uint32 backupId; Uint32 startGCP; Uint32 stopGCP; - Uint32 noOfBytes; - Uint32 noOfRecords; + Uint32 noOfBytesLow; + Uint32 noOfRecordsLow; Uint32 noOfLogBytes; Uint32 noOfLogRecords; NdbNodeBitmask nodes; + Uint32 noOfBytesHigh; + Uint32 noOfRecordsHigh; }; /** diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp index bc4817f0cf3..3fcae69aa74 100644 --- a/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -117,9 +117,16 @@ public: CustomTriggerId = 25, FrmLen = 26, FrmData = 27, + FragmentCount = 128, // No of fragments in table (!fragment replicas) FragmentDataLen = 129, FragmentData = 130, // CREATE_FRAGMENTATION reply + + MaxRowsLow = 139, + MaxRowsHigh = 140, + MinRowsLow = 133, + MinRowsHigh = 144, + TableEnd = 999, AttributeName = 1000, // String, Mandatory @@ -263,6 +270,10 @@ public: Uint32 FragmentCount; Uint32 FragmentDataLen; Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2]; + Uint32 MaxRowsLow; + Uint32 MaxRowsHigh; + Uint32 MinRowsLow; + Uint32 MinRowsHigh; void init(); }; diff --git a/ndb/include/kernel/signaldata/LqhFrag.hpp b/ndb/include/kernel/signaldata/LqhFrag.hpp index 13dfafcc653..50b0caaba07 100644 --- a/ndb/include/kernel/signaldata/LqhFrag.hpp +++ b/ndb/include/kernel/signaldata/LqhFrag.hpp @@ -104,7 +104,7 @@ class LqhFragReq { friend bool printLQH_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 25 ); + STATIC_CONST( SignalLength = 24 ); enum RequestInfo { CreateInRunning = 0x8000000, @@ -115,27 +115,32 @@ private: Uint32 senderData; Uint32 senderRef; Uint32 fragmentId; - Uint32 requestInfo; + Uint8 requestInfo; + Uint8 unused1; + Uint16 noOfAttributes; Uint32 tableId; Uint32 localKeyLength; - Uint32 maxLoadFactor; - Uint32 minLoadFactor; - Uint32 kValue; + Uint16 maxLoadFactor; + Uint16 minLoadFactor; + Uint16 kValue; + Uint8 tableType; // DictTabInfo::TableType + Uint8 GCPIndicator; Uint32 lh3DistrBits; Uint32 lh3PageBits; - Uint32 noOfAttributes; Uint32 noOfNullAttributes; - Uint32 noOfPagesToPreAllocate; + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; Uint32 schemaVersion; Uint32 keyLength; Uint32 nextLCP; Uint32 noOfKeyAttr; - Uint32 noOfNewAttr; // noOfCharsets in upper half + Uint16 noOfNewAttr; + Uint16 noOfCharsets; Uint32 checksumIndicator; Uint32 noOfAttributeGroups; - Uint32 GCPIndicator; Uint32 startGci; - Uint32 tableType; // DictTabInfo::TableType Uint32 primaryTableId; // table of index or RNIL }; diff --git a/ndb/include/kernel/signaldata/TupFrag.hpp b/ndb/include/kernel/signaldata/TupFrag.hpp index 5fb9d7bcf42..c9f2ad5382f 100644 --- a/ndb/include/kernel/signaldata/TupFrag.hpp +++ b/ndb/include/kernel/signaldata/TupFrag.hpp @@ -30,7 +30,7 @@ class TupFragReq { friend class Dblqh; friend class Dbtup; public: - STATIC_CONST( SignalLength = 14 ); + STATIC_CONST( SignalLength = 17 ); private: Uint32 userPtr; Uint32 userRef; @@ -38,7 +38,18 @@ private: Uint32 tableId; Uint32 noOfAttr; Uint32 fragId; - Uint32 todo[8]; + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; + Uint32 noOfNullAttr; + Uint32 schemaVersion; + Uint32 noOfKeyAttr; + Uint16 noOfNewAttr; + Uint16 noOfCharsets; + Uint32 checksumIndicator; + Uint32 noOfAttributeGroups; + Uint32 globalCheckpointIdIndicator; }; class TupFragConf { diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 1413931035d..e67a0253096 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -722,6 +722,20 @@ public: */ void setObjectType(Object::Type type); + /** + * Set/Get Maximum number of rows in table (only used to calculate + * number of partitions). + */ + void setMaxRows(Uint64 maxRows); + Uint64 getMaxRows() const; + + /** + * Set/Get Minimum number of rows in table (only used to calculate + * number of partitions). + */ + void setMinRows(Uint64 minRows); + Uint64 getMinRows() const; + /** @} *******************************************************************/ #ifndef DOXYGEN_SHOULD_SKIP_INTERNAL diff --git a/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/ndb/src/common/debugger/signaldata/BackupImpl.cpp index e9b0188d93b..855db0834bc 100644 --- a/ndb/src/common/debugger/signaldata/BackupImpl.cpp +++ b/ndb/src/common/debugger/signaldata/BackupImpl.cpp @@ -100,8 +100,10 @@ printBACKUP_FRAGMENT_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 b){ BackupFragmentConf* sig = (BackupFragmentConf*)data; fprintf(out, " backupPtr: %d backupId: %d\n", sig->backupPtr, sig->backupId); - fprintf(out, " tableId: %d fragmentNo: %d records: %d bytes: %d\n", - sig->tableId, sig->fragmentNo, sig->noOfRecords, sig->noOfBytes); + fprintf(out, " tableId: %d fragmentNo: %d records: %llu bytes: %llu\n", + sig->tableId, sig->fragmentNo, + sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32), + sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32)); return true; } diff --git a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp index 4b0a0e07b66..27fed22ac72 100644 --- a/ndb/src/common/debugger/signaldata/BackupSignalData.cpp +++ b/ndb/src/common/debugger/signaldata/BackupSignalData.cpp @@ -72,11 +72,11 @@ printBACKUP_ABORT_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 bno){ bool printBACKUP_COMPLETE_REP(FILE * out, const Uint32 * data, Uint32 len, Uint16 b){ BackupCompleteRep* sig = (BackupCompleteRep*)data; - fprintf(out, " senderData: %d backupId: %d records: %d bytes: %d\n", + fprintf(out, " senderData: %d backupId: %d records: %llu bytes: %llu\n", sig->senderData, sig->backupId, - sig->noOfRecords, - sig->noOfBytes); + sig->noOfRecordsLow + (((Uint64)sig->noOfRecordsHigh) << 32), + sig->noOfBytesLow + (((Uint64)sig->noOfBytesHigh) << 32)); return true; } diff --git a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp index 43c129347c0..a1d8d82474d 100644 --- a/ndb/src/common/debugger/signaldata/DictTabInfo.cpp +++ b/ndb/src/common/debugger/signaldata/DictTabInfo.cpp @@ -48,6 +48,10 @@ DictTabInfo::TableMapping[] = { DTIMAP(Table, FragmentCount, FragmentCount), DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, MAX_FRAGMENT_DATA_BYTES), DTIMAPB(Table, FragmentData, FragmentData, 0, MAX_FRAGMENT_DATA_BYTES, FragmentDataLen), + DTIMAP(Table, MaxRowsLow, MaxRowsLow), + DTIMAP(Table, MaxRowsHigh, MaxRowsHigh), + DTIMAP(Table, MinRowsLow, MinRowsLow), + DTIMAP(Table, MinRowsHigh, MinRowsHigh), DTIBREAK(AttributeName) }; @@ -124,6 +128,10 @@ DictTabInfo::Table::init(){ FragmentCount = 0; FragmentDataLen = 0; memset(FragmentData, 0, sizeof(FragmentData)); + MaxRowsLow = 0; + MaxRowsHigh = 0; + MinRowsLow = 0; + MinRowsHigh = 0; } void diff --git a/ndb/src/common/debugger/signaldata/LqhFrag.cpp b/ndb/src/common/debugger/signaldata/LqhFrag.cpp index 6d727959a67..3175582c3a2 100644 --- a/ndb/src/common/debugger/signaldata/LqhFrag.cpp +++ b/ndb/src/common/debugger/signaldata/LqhFrag.cpp @@ -37,8 +37,10 @@ printLQH_FRAG_REQ(FILE * output, const Uint32 * theData, Uint32 len, Uint16 recB fprintf(output, " noOfAttributes: %d noOfNullAttributes: %d keyLength: %d\n", sig->noOfAttributes, sig->noOfNullAttributes, sig->keyLength); - fprintf(output, " noOfPagesToPreAllocate: %d schemaVersion: %d nextLCP: %d\n", - sig->noOfPagesToPreAllocate, sig->schemaVersion, sig->nextLCP); + fprintf(output, " maxRowsLow/High: %u/%u minRowsLow/High: %u/%u\n", + sig->maxRowsLow, sig->maxRowsHigh, sig->minRowsLow, sig->minRowsHigh); + fprintf(output, " schemaVersion: %d nextLCP: %d\n", + sig->schemaVersion, sig->nextLCP); return true; } diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index f9089355475..43c1de5e2b3 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -266,6 +266,65 @@ Backup::execCONTINUEB(Signal* signal) const Uint32 Tdata2 = signal->theData[2]; switch(Tdata0) { + case BackupContinueB::BACKUP_FRAGMENT_INFO: + { + const Uint32 ptr_I = Tdata1; + Uint32 tabPtr_I = Tdata2; + Uint32 fragPtr_I = signal->theData[3]; + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, ptr_I); + TablePtr tabPtr; + ptr.p->tables.getPtr(tabPtr, tabPtr_I); + FragmentPtr fragPtr; + tabPtr.p->fragments.getPtr(fragPtr, fragPtr_I); + + BackupFilePtr filePtr; + ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); + + const Uint32 sz = sizeof(BackupFormat::CtlFile::FragmentInfo) >> 2; + Uint32 * dst; + if (!filePtr.p->operation.dataBuffer.getWritePtr(&dst, sz)) + { + sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 4); + return; + } + + BackupFormat::CtlFile::FragmentInfo * fragInfo = + (BackupFormat::CtlFile::FragmentInfo*)dst; + fragInfo->SectionType = htonl(BackupFormat::FRAGMENT_INFO); + fragInfo->SectionLength = htonl(sz); + fragInfo->TableId = htonl(fragPtr.p->tableId); + fragInfo->FragmentNo = htonl(fragPtr_I); + fragInfo->NoOfRecordsLow = htonl(fragPtr.p->noOfRecords & 0xFFFFFFFF); + fragInfo->NoOfRecordsHigh = htonl(fragPtr.p->noOfRecords >> 32); + fragInfo->FilePosLow = htonl(0 & 0xFFFFFFFF); + fragInfo->FilePosHigh = htonl(0 >> 32); + + filePtr.p->operation.dataBuffer.updateWritePtr(sz); + + fragPtr_I++; + if (fragPtr_I == tabPtr.p->fragments.getSize()) + { + signal->theData[0] = tabPtr.p->tableId; + signal->theData[1] = 0; // unlock + EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); + + fragPtr_I = 0; + ptr.p->tables.next(tabPtr); + if ((tabPtr_I = tabPtr.i) == RNIL) + { + closeFiles(signal, ptr); + return; + } + } + signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO; + signal->theData[1] = ptr_I; + signal->theData[2] = tabPtr_I; + signal->theData[3] = fragPtr_I; + sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB); + return; + } case BackupContinueB::START_FILE_THREAD: case BackupContinueB::BUFFER_UNDERFLOW: { @@ -455,7 +514,7 @@ Backup::findTable(const BackupRecordPtr & ptr, return false; } -static Uint32 xps(Uint32 x, Uint64 ms) +static Uint32 xps(Uint64 x, Uint64 ms) { float fx = x; float fs = ms; @@ -469,9 +528,9 @@ static Uint32 xps(Uint32 x, Uint64 ms) } struct Number { - Number(Uint32 r) { val = r;} - Number & operator=(Uint32 r) { val = r; return * this; } - Uint32 val; + Number(Uint64 r) { val = r;} + Number & operator=(Uint64 r) { val = r; return * this; } + Uint64 val; }; NdbOut & @@ -545,8 +604,10 @@ Backup::execBACKUP_COMPLETE_REP(Signal* signal) startTime = NdbTick_CurrentMillisecond() - startTime; ndbout_c("Backup %d has completed", rep->backupId); - const Uint32 bytes = rep->noOfBytes; - const Uint32 records = rep->noOfRecords; + const Uint64 bytes = + rep->noOfBytesLow + (((Uint64)rep->noOfBytesHigh) << 32); + const Uint64 records = + rep->noOfRecordsLow + (((Uint64)rep->noOfRecordsHigh) << 32); Number rps = xps(records, startTime); Number bps = xps(bytes, startTime); @@ -1905,8 +1966,10 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) const Uint32 tableId = conf->tableId; const Uint32 fragmentNo = conf->fragmentNo; const Uint32 nodeId = refToNode(signal->senderBlockRef()); - const Uint32 noOfBytes = conf->noOfBytes; - const Uint32 noOfRecords = conf->noOfRecords; + const Uint64 noOfBytes = + conf->noOfBytesLow + (((Uint64)conf->noOfBytesHigh) << 32); + const Uint64 noOfRecords = + conf->noOfRecordsLow + (((Uint64)conf->noOfRecordsHigh) << 32); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); @@ -1918,9 +1981,13 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); + tabPtr.p->noOfRecords += noOfRecords; + FragmentPtr fragPtr; tabPtr.p->fragments.getPtr(fragPtr, fragmentNo); + fragPtr.p->noOfRecords = noOfRecords; + ndbrequire(fragPtr.p->scanned == 0); ndbrequire(fragPtr.p->scanning == 1); ndbrequire(fragPtr.p->node == nodeId); @@ -1944,6 +2011,24 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) } else { + NodeBitmask nodes = ptr.p->nodes; + nodes.clear(getOwnNodeId()); + if (!nodes.isclear()) + { + BackupFragmentCompleteRep *rep = + (BackupFragmentCompleteRep*)signal->getDataPtrSend(); + rep->backupId = ptr.p->backupId; + rep->backupPtr = ptr.i; + rep->tableId = tableId; + rep->fragmentNo = fragmentNo; + rep->noOfTableRowsLow = (Uint32)(tabPtr.p->noOfRecords & 0xFFFFFFFF); + rep->noOfTableRowsHigh = (Uint32)(tabPtr.p->noOfRecords >> 32); + rep->noOfFragmentRowsLow = (Uint32)(noOfRecords & 0xFFFFFFFF); + rep->noOfFragmentRowsHigh = (Uint32)(noOfRecords >> 32); + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + sendSignal(rg, GSN_BACKUP_FRAGMENT_COMPLETE_REP, signal, + BackupFragmentCompleteRep::SignalLength, JBB); + } nextFragment(signal, ptr); } } @@ -2006,6 +2091,29 @@ err: execABORT_BACKUP_ORD(signal); } +void +Backup::execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal) +{ + jamEntry(); + BackupFragmentCompleteRep * rep = + (BackupFragmentCompleteRep*)signal->getDataPtr(); + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, rep->backupPtr); + + TablePtr tabPtr; + ndbrequire(findTable(ptr, tabPtr, rep->tableId)); + + tabPtr.p->noOfRecords = + rep->noOfTableRowsLow + (((Uint64)rep->noOfTableRowsHigh) << 32); + + FragmentPtr fragPtr; + tabPtr.p->fragments.getPtr(fragPtr, rep->fragmentNo); + + fragPtr.p->noOfRecords = + rep->noOfFragmentRowsLow + (((Uint64)rep->noOfFragmentRowsHigh) << 32); +} + /***************************************************************************** * * Master functionallity - Drop triggers @@ -2206,8 +2314,10 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) rep->senderData = ptr.p->clientData; rep->startGCP = ptr.p->startGCP; rep->stopGCP = ptr.p->stopGCP; - rep->noOfBytes = ptr.p->noOfBytes; - rep->noOfRecords = ptr.p->noOfRecords; + rep->noOfBytesLow = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF); + rep->noOfRecordsLow = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF); + rep->noOfBytesHigh = (Uint32)(ptr.p->noOfBytes >> 32); + rep->noOfRecordsHigh = (Uint32)(ptr.p->noOfRecords >> 32); rep->noOfLogBytes = ptr.p->noOfLogBytes; rep->noOfLogRecords = ptr.p->noOfLogRecords; rep->nodes = ptr.p->nodes; @@ -2220,12 +2330,14 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) signal->theData[2] = ptr.p->backupId; signal->theData[3] = ptr.p->startGCP; signal->theData[4] = ptr.p->stopGCP; - signal->theData[5] = ptr.p->noOfBytes; - signal->theData[6] = ptr.p->noOfRecords; + signal->theData[5] = (Uint32)(ptr.p->noOfBytes & 0xFFFFFFFF); + signal->theData[6] = (Uint32)(ptr.p->noOfRecords & 0xFFFFFFFF); signal->theData[7] = ptr.p->noOfLogBytes; signal->theData[8] = ptr.p->noOfLogRecords; ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB); + signal->theData[9+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfBytes >> 32); + signal->theData[10+NdbNodeBitmask::Size] = (Uint32)(ptr.p->noOfRecords >> 32); + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 11+NdbNodeBitmask::Size, JBB); } else { @@ -2988,6 +3100,7 @@ Backup::parseTableDescription(Signal* signal, BackupRecordPtr ptr, Uint32 len) /** * Initialize table object */ + tabPtr.p->noOfRecords = 0; tabPtr.p->schemaVersion = tmpTab.TableVersion; tabPtr.p->noOfAttributes = tmpTab.NoOfAttributes; tabPtr.p->noOfNull = 0; @@ -3695,8 +3808,10 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) conf->backupPtr = ptr.i; conf->tableId = filePtr.p->tableId; conf->fragmentNo = filePtr.p->fragmentNo; - conf->noOfRecords = op.noOfRecords; - conf->noOfBytes = op.noOfBytes; + conf->noOfRecordsLow = (Uint32)(op.noOfRecords & 0xFFFFFFFF); + conf->noOfRecordsHigh = (Uint32)(op.noOfRecords >> 32); + conf->noOfBytesLow = (Uint32)(op.noOfBytes & 0xFFFFFFFF); + conf->noOfBytesHigh = (Uint32)(op.noOfBytes >> 32); sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal, BackupFragmentConf::SignalLength, JBB); @@ -4123,20 +4238,18 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) gcp->StartGCP = htonl(startGCP); gcp->StopGCP = htonl(stopGCP - 1); filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz); - } - { - TablePtr tabPtr; - for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL; - ptr.p->tables.next(tabPtr)) { - signal->theData[0] = tabPtr.p->tableId; - signal->theData[1] = 0; // unlock - EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); + TablePtr tabPtr; + ptr.p->tables.first(tabPtr); + + signal->theData[0] = BackupContinueB::BACKUP_FRAGMENT_INFO; + signal->theData[1] = ptr.i; + signal->theData[2] = tabPtr.i; + signal->theData[3] = 0; + sendSignal(BACKUP_REF, GSN_CONTINUEB, signal, 4, JBB); } } - - closeFiles(signal, ptr); } void diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp index c455e32fa67..e37923da749 100644 --- a/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/ndb/src/kernel/blocks/backup/Backup.hpp @@ -68,6 +68,7 @@ protected: void execBACKUP_DATA(Signal* signal); void execSTART_BACKUP_REQ(Signal* signal); void execBACKUP_FRAGMENT_REQ(Signal* signal); + void execBACKUP_FRAGMENT_COMPLETE_REP(Signal* signal); void execSTOP_BACKUP_REQ(Signal* signal); void execBACKUP_STATUS_REQ(Signal* signal); void execABORT_BACKUP_ORD(Signal* signal); @@ -183,10 +184,12 @@ public: typedef Ptr AttributePtr; struct Fragment { + Uint64 noOfRecords; Uint32 tableId; - Uint32 node; - Uint16 scanned; // 0 = not scanned x = scanned by node x - Uint16 scanning; // 0 = not scanning x = scanning on node x + Uint8 node; + Uint8 scanned; // 0 = not scanned x = scanned by node x + Uint8 scanning; // 0 = not scanning x = scanning on node x + Uint8 unused1; Uint32 nextPool; }; typedef Ptr FragmentPtr; @@ -194,6 +197,8 @@ public: struct Table { Table(ArrayPool &, ArrayPool &); + Uint64 noOfRecords; + Uint32 tableId; Uint32 schemaVersion; Uint32 tableType; @@ -269,8 +274,8 @@ public: Uint32 tablePtr; // Ptr.i to current table FsBuffer dataBuffer; - Uint32 noOfRecords; - Uint32 noOfBytes; + Uint64 noOfRecords; + Uint64 noOfBytes; Uint32 maxRecordSize; private: diff --git a/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/ndb/src/kernel/blocks/backup/BackupFormat.hpp index 65dd2ad9053..b8ffff3a294 100644 --- a/ndb/src/kernel/blocks/backup/BackupFormat.hpp +++ b/ndb/src/kernel/blocks/backup/BackupFormat.hpp @@ -32,7 +32,8 @@ struct BackupFormat { FRAGMENT_FOOTER = 3, TABLE_LIST = 4, TABLE_DESCRIPTION = 5, - GCP_ENTRY = 6 + GCP_ENTRY = 6, + FRAGMENT_INFO = 7 }; struct FileHeader { @@ -126,6 +127,20 @@ struct BackupFormat { Uint32 StartGCP; Uint32 StopGCP; }; + + /** + * Fragment Info + */ + struct FragmentInfo { + Uint32 SectionType; + Uint32 SectionLength; + Uint32 TableId; + Uint32 FragmentNo; + Uint32 NoOfRecordsLow; + Uint32 NoOfRecordsHigh; + Uint32 FilePosLow; + Uint32 FilePosHigh; + }; }; /** diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp index 4c734d58c8e..96c11468939 100644 --- a/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp @@ -97,6 +97,9 @@ Backup::Backup(const Configuration & conf) : addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ); addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF); addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF); + + addRecSignal(GSN_BACKUP_FRAGMENT_COMPLETE_REP, + &Backup::execBACKUP_FRAGMENT_COMPLETE_REP); addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ); addRecSignal(GSN_STOP_BACKUP_REF, &Backup::execSTOP_BACKUP_REF); diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index ca9daca428b..1f7fd8e6fa5 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -281,6 +281,10 @@ Dbdict::packTableIntoPagesImpl(SimpleProperties::Writer & w, w.add(DictTabInfo::TableKValue, tablePtr.p->kValue); w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType); w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType); + w.add(DictTabInfo::MaxRowsLow, tablePtr.p->maxRowsLow); + w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh); + w.add(DictTabInfo::MinRowsLow, tablePtr.p->minRowsLow); + w.add(DictTabInfo::MinRowsHigh, tablePtr.p->minRowsHigh); if(!signal) { @@ -1525,6 +1529,10 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) tablePtr.p->minLoadFactor = 70; tablePtr.p->noOfPrimkey = 1; tablePtr.p->tupKeyLength = 1; + tablePtr.p->maxRowsLow = 0; + tablePtr.p->maxRowsHigh = 0; + tablePtr.p->minRowsLow = 0; + tablePtr.p->minRowsHigh = 0; tablePtr.p->storedTable = true; tablePtr.p->tableType = DictTabInfo::UserTable; tablePtr.p->primaryTableId = RNIL; @@ -4464,6 +4472,13 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { Uint32 lhPageBits = 0; ::calcLHbits(&lhPageBits, &lhDistrBits, fragId, fragCount); + Uint64 maxRows = tabPtr.p->maxRowsLow + + (((Uint64)tabPtr.p->maxRowsHigh) << 32); + Uint64 minRows = tabPtr.p->minRowsLow + + (((Uint64)tabPtr.p->minRowsHigh) << 32); + maxRows = (maxRows + fragCount - 1) / fragCount; + minRows = (minRows + fragCount - 1) / fragCount; + { LqhFragReq* req = (LqhFragReq*)signal->getDataPtrSend(); req->senderData = senderData; @@ -4479,7 +4494,10 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->lh3PageBits = 0; //lhPageBits; req->noOfAttributes = tabPtr.p->noOfAttributes; req->noOfNullAttributes = tabPtr.p->noOfNullBits; - req->noOfPagesToPreAllocate = 0; + req->maxRowsLow = maxRows & 0xFFFFFFFF; + req->maxRowsHigh = maxRows >> 32; + req->minRowsLow = minRows & 0xFFFFFFFF; + req->minRowsHigh = minRows >> 32; req->schemaVersion = tabPtr.p->tableVersion; Uint32 keyLen = tabPtr.p->tupKeyLength; req->keyLength = keyLen; // wl-2066 no more "long keys" @@ -4487,8 +4505,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->noOfKeyAttr = tabPtr.p->noOfPrimkey; req->noOfNewAttr = 0; - // noOfCharsets passed to TUP in upper half - req->noOfNewAttr |= (tabPtr.p->noOfCharsets << 16); + req->noOfCharsets = tabPtr.p->noOfCharsets; req->checksumIndicator = 1; req->noOfAttributeGroups = 1; req->GCPIndicator = 0; @@ -5054,6 +5071,15 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType; tablePtr.p->kValue = tableDesc.TableKValue; tablePtr.p->fragmentCount = tableDesc.FragmentCount; + tablePtr.p->maxRowsLow = tableDesc.MaxRowsLow; + tablePtr.p->maxRowsHigh = tableDesc.MaxRowsHigh; + tablePtr.p->minRowsLow = tableDesc.MinRowsLow; + tablePtr.p->minRowsHigh = tableDesc.MinRowsHigh; + + Uint64 maxRows = + (((Uint64)tablePtr.p->maxRowsHigh) << 32) + tablePtr.p->maxRowsLow; + Uint64 minRows = + (((Uint64)tablePtr.p->minRowsHigh) << 32) + tablePtr.p->minRowsLow; tablePtr.p->frmLen = tableDesc.FrmLen; memcpy(tablePtr.p->frmData, tableDesc.FrmData, tableDesc.FrmLen); diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 6b78fb86534..e4788898cc8 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -131,6 +131,10 @@ public: * on disk. Index trigger ids are volatile. */ struct TableRecord : public MetaData::Table { + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; /**************************************************** * Support variables for table handling ****************************************************/ diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index 1ed383853ba..f8e6292f7f2 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -443,7 +443,6 @@ public: UintR dictConnectptr; UintR fragmentPtr; UintR nextAddfragrec; - UintR noOfAllocPages; UintR schemaVer; UintR tup1Connectptr; UintR tup2Connectptr; @@ -465,12 +464,17 @@ public: Uint16 totalAttrReceived; Uint16 fragCopyCreation; Uint16 noOfKeyAttr; - Uint32 noOfNewAttr; // noOfCharsets in upper half + Uint16 noOfNewAttr; + Uint16 noOfCharsets; Uint16 noOfAttributeGroups; Uint16 lh3DistrBits; Uint16 tableType; Uint16 primaryTableId; - };// Size 108 bytes + Uint32 maxRowsLow; + Uint32 maxRowsHigh; + Uint32 minRowsLow; + Uint32 minRowsHigh; + };// Size 124 bytes typedef Ptr AddFragRecordPtr; /* $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ */ diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 56e93e6ee01..ecb67d04050 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -939,12 +939,16 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) Uint8 tlh = req->lh3PageBits; Uint32 tnoOfAttr = req->noOfAttributes; Uint32 tnoOfNull = req->noOfNullAttributes; - Uint32 noOfAlloc = req->noOfPagesToPreAllocate; + Uint32 maxRowsLow = req->maxRowsLow; + Uint32 maxRowsHigh = req->maxRowsHigh; + Uint32 minRowsLow = req->minRowsLow; + Uint32 minRowsHigh = req->minRowsHigh; Uint32 tschemaVersion = req->schemaVersion; Uint32 ttupKeyLength = req->keyLength; Uint32 nextLcp = req->nextLCP; Uint32 noOfKeyAttr = req->noOfKeyAttr; Uint32 noOfNewAttr = req->noOfNewAttr; + Uint32 noOfCharsets = req->noOfCharsets; Uint32 checksumIndicator = req->checksumIndicator; Uint32 noOfAttributeGroups = req->noOfAttributeGroups; Uint32 gcpIndicator = req->GCPIndicator; @@ -1042,7 +1046,10 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) addfragptr.p->m_senderAttrPtr = RNIL; addfragptr.p->noOfAttr = tnoOfAttr; addfragptr.p->noOfNull = tnoOfNull; - addfragptr.p->noOfAllocPages = noOfAlloc; + addfragptr.p->maxRowsLow = maxRowsLow; + addfragptr.p->maxRowsHigh = maxRowsHigh; + addfragptr.p->minRowsLow = minRowsLow; + addfragptr.p->minRowsHigh = minRowsHigh; addfragptr.p->tabId = tabptr.i; addfragptr.p->totalAttrReceived = 0; addfragptr.p->attrSentToTup = ZNIL;/* TO FIND PROGRAMMING ERRORS QUICKLY */ @@ -1052,6 +1059,7 @@ void Dblqh::execLQHFRAGREQ(Signal* signal) addfragptr.p->addfragErrorCode = 0; addfragptr.p->noOfKeyAttr = noOfKeyAttr; addfragptr.p->noOfNewAttr = noOfNewAttr; + addfragptr.p->noOfCharsets = noOfCharsets; addfragptr.p->checksumIndicator = checksumIndicator; addfragptr.p->noOfAttributeGroups = noOfAttributeGroups; addfragptr.p->GCPIndicator = gcpIndicator; @@ -1221,47 +1229,56 @@ Dblqh::sendAddFragReq(Signal* signal) ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord); if (addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP || addfragptr.p->addfragStatus == AddFragRecord::WAIT_ONE_TUP) { + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); if (DictTabInfo::isTable(addfragptr.p->tableType) || DictTabInfo::isHashIndex(addfragptr.p->tableType)) { jam(); - signal->theData[0] = addfragptr.i; - signal->theData[1] = cownref; - signal->theData[2] = 0; /* ADD TABLE */ - signal->theData[3] = addfragptr.p->tabId; - signal->theData[4] = addfragptr.p->noOfAttr; - signal->theData[5] = + tupFragReq->userPtr = addfragptr.i; + tupFragReq->userRef = cownref; + tupFragReq->reqInfo = 0; /* ADD TABLE */ + tupFragReq->tableId = addfragptr.p->tabId; + tupFragReq->noOfAttr = addfragptr.p->noOfAttr; + tupFragReq->fragId = addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ? addfragptr.p->fragid1 : addfragptr.p->fragid2; - signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1; - signal->theData[7] = addfragptr.p->noOfNull; - signal->theData[8] = addfragptr.p->schemaVer; - signal->theData[9] = addfragptr.p->noOfKeyAttr; - signal->theData[10] = addfragptr.p->noOfNewAttr; - signal->theData[11] = addfragptr.p->checksumIndicator; - signal->theData[12] = addfragptr.p->noOfAttributeGroups; - signal->theData[13] = addfragptr.p->GCPIndicator; + tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow; + tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh; + tupFragReq->minRowsLow = addfragptr.p->minRowsLow; + tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh; + tupFragReq->noOfNullAttr = addfragptr.p->noOfNull; + tupFragReq->schemaVersion = addfragptr.p->schemaVer; + tupFragReq->noOfKeyAttr = addfragptr.p->noOfKeyAttr; + tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr; + tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; + tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; + tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups; + tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, TupFragReq::SignalLength, JBB); return; } if (DictTabInfo::isOrderedIndex(addfragptr.p->tableType)) { jam(); - signal->theData[0] = addfragptr.i; - signal->theData[1] = cownref; - signal->theData[2] = 0; /* ADD TABLE */ - signal->theData[3] = addfragptr.p->tabId; - signal->theData[4] = 1; /* ordered index: one array attr */ - signal->theData[5] = + tupFragReq->userPtr = addfragptr.i; + tupFragReq->userRef = cownref; + tupFragReq->reqInfo = 0; /* ADD TABLE */ + tupFragReq->tableId = addfragptr.p->tabId; + tupFragReq->noOfAttr = 1; /* ordered index: one array attr */ + tupFragReq->fragId = addfragptr.p->addfragStatus == AddFragRecord::WAIT_TWO_TUP ? addfragptr.p->fragid1 : addfragptr.p->fragid2; - signal->theData[6] = (addfragptr.p->noOfAllocPages >> 1) + 1; - signal->theData[7] = 0; /* ordered index: no nullable */ - signal->theData[8] = addfragptr.p->schemaVer; - signal->theData[9] = 1; /* ordered index: one key */ - signal->theData[10] = addfragptr.p->noOfNewAttr; - signal->theData[11] = addfragptr.p->checksumIndicator; - signal->theData[12] = addfragptr.p->noOfAttributeGroups; - signal->theData[13] = addfragptr.p->GCPIndicator; + tupFragReq->maxRowsLow = addfragptr.p->maxRowsLow; + tupFragReq->maxRowsHigh = addfragptr.p->maxRowsHigh; + tupFragReq->minRowsLow = addfragptr.p->minRowsLow; + tupFragReq->minRowsHigh = addfragptr.p->minRowsHigh; + tupFragReq->noOfNullAttr = 0; /* ordered index: no nullable */ + tupFragReq->schemaVersion = addfragptr.p->schemaVer; + tupFragReq->noOfKeyAttr = 1; /* ordered index: one key */ + tupFragReq->noOfNewAttr = addfragptr.p->noOfNewAttr; + tupFragReq->noOfCharsets = addfragptr.p->noOfCharsets; + tupFragReq->checksumIndicator = addfragptr.p->checksumIndicator; + tupFragReq->noOfAttributeGroups = addfragptr.p->noOfAttributeGroups; + tupFragReq->globalCheckpointIdIndicator = addfragptr.p->GCPIndicator; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, TupFragReq::SignalLength, JBB); return; @@ -1580,28 +1597,35 @@ void Dblqh::abortAddFragOps(Signal* signal) { fragptr.i = addfragptr.p->fragmentPtr; ptrCheckGuard(fragptr, cfragrecFileSize, fragrecord); - signal->theData[0] = (Uint32)-1; if (addfragptr.p->tup1Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tup1Connectptr; + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); + tupFragReq->userPtr = (Uint32)-1; + tupFragReq->userRef = addfragptr.p->tup1Connectptr; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB); addfragptr.p->tup1Connectptr = RNIL; } if (addfragptr.p->tup2Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tup2Connectptr; + TupFragReq* const tupFragReq = (TupFragReq*)signal->getDataPtrSend(); + tupFragReq->userPtr = (Uint32)-1; + tupFragReq->userRef = addfragptr.p->tup2Connectptr; sendSignal(fragptr.p->tupBlockref, GSN_TUPFRAGREQ, signal, 2, JBB); addfragptr.p->tup2Connectptr = RNIL; } if (addfragptr.p->tux1Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tux1Connectptr; + TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend(); + tuxFragReq->userPtr = (Uint32)-1; + tuxFragReq->userRef = addfragptr.p->tux1Connectptr; sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB); addfragptr.p->tux1Connectptr = RNIL; } if (addfragptr.p->tux2Connectptr != RNIL) { jam(); - signal->theData[1] = addfragptr.p->tux2Connectptr; + TuxFragReq* const tuxFragReq = (TuxFragReq*)signal->getDataPtrSend(); + tuxFragReq->userPtr = (Uint32)-1; + tuxFragReq->userRef = addfragptr.p->tux2Connectptr; sendSignal(fragptr.p->tuxBlockref, GSN_TUXFRAGREQ, signal, 2, JBB); addfragptr.p->tux2Connectptr = RNIL; } diff --git a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index cf3c6056d65..41194fba82c 100644 --- a/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -496,7 +496,8 @@ struct DiskBufferSegmentInfo { typedef Ptr DiskBufferSegmentInfoPtr; struct Fragoperrec { - bool definingFragment; + Uint64 minRows; + Uint64 maxRows; Uint32 nextFragoprec; Uint32 lqhPtrFrag; Uint32 fragidFrag; @@ -509,6 +510,7 @@ struct Fragoperrec { Uint32 charsetIndex; BlockReference lqhBlockrefFrag; bool inUse; + bool definingFragment; }; typedef Ptr FragoperrecPtr; @@ -560,6 +562,7 @@ struct Fragrecord { Uint32 currentPageRange; Uint32 rootPageRange; Uint32 noOfPages; + Uint32 noOfPagesToGrow; Uint32 emptyPrimPage; Uint32 firstusedOprec; diff --git a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index bacba2a880c..12cd61a17a6 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -41,7 +41,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) { ljamEntry(); - if (signal->theData[0] == (Uint32)-1) { + TupFragReq* tupFragReq = (TupFragReq*)signal->getDataPtr(); + if (tupFragReq->userPtr == (Uint32)-1) { ljam(); abortAddFragOp(signal); return; @@ -51,30 +52,34 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) FragrecordPtr regFragPtr; TablerecPtr regTabPtr; - Uint32 userptr = signal->theData[0]; - Uint32 userblockref = signal->theData[1]; - Uint32 reqinfo = signal->theData[2]; - regTabPtr.i = signal->theData[3]; - Uint32 noOfAttributes = signal->theData[4]; - Uint32 fragId = signal->theData[5]; - Uint32 noOfNullAttr = signal->theData[7]; - /* Uint32 schemaVersion = signal->theData[8];*/ - Uint32 noOfKeyAttr = signal->theData[9]; + Uint32 userptr = tupFragReq->userPtr; + Uint32 userblockref = tupFragReq->userRef; + Uint32 reqinfo = tupFragReq->reqInfo; + regTabPtr.i = tupFragReq->tableId; + Uint32 noOfAttributes = tupFragReq->noOfAttr; + Uint32 fragId = tupFragReq->fragId; + Uint32 noOfNullAttr = tupFragReq->noOfNullAttr; + /* Uint32 schemaVersion = tupFragReq->schemaVersion;*/ + Uint32 noOfKeyAttr = tupFragReq->noOfKeyAttr; - Uint32 noOfNewAttr = (signal->theData[10] & 0xFFFF); - /* DICT sends number of character sets in upper half */ - Uint32 noOfCharsets = (signal->theData[10] >> 16); + Uint32 noOfNewAttr = tupFragReq->noOfNewAttr; + Uint32 noOfCharsets = tupFragReq->noOfCharsets; - Uint32 checksumIndicator = signal->theData[11]; - Uint32 noOfAttributeGroups = signal->theData[12]; - Uint32 globalCheckpointIdIndicator = signal->theData[13]; + Uint32 checksumIndicator = tupFragReq->checksumIndicator; + Uint32 noOfAttributeGroups = tupFragReq->noOfAttributeGroups; + Uint32 globalCheckpointIdIndicator = tupFragReq->globalCheckpointIdIndicator; + + Uint64 maxRows = + (((Uint64)tupFragReq->maxRowsHigh) << 32) + tupFragReq->maxRowsLow; + Uint64 minRows = + (((Uint64)tupFragReq->minRowsHigh) << 32) + tupFragReq->minRowsLow; #ifndef VM_TRACE // config mismatch - do not crash if release compiled if (regTabPtr.i >= cnoOfTablerec) { ljam(); - signal->theData[0] = userptr; - signal->theData[1] = 800; + tupFragReq->userPtr = userptr; + tupFragReq->userRef = 800; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); return; } @@ -83,8 +88,8 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) ptrCheckGuard(regTabPtr, cnoOfTablerec, tablerec); if (cfirstfreeFragopr == RNIL) { ljam(); - signal->theData[0] = userptr; - signal->theData[1] = ZNOFREE_FRAGOP_ERROR; + tupFragReq->userPtr = userptr; + tupFragReq->userRef = ZNOFREE_FRAGOP_ERROR; sendSignal(userblockref, GSN_TUPFRAGREF, signal, 2, JBB); return; }//if @@ -100,6 +105,9 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) fragOperPtr.p->noOfNewAttrCount = noOfNewAttr; fragOperPtr.p->charsetIndex = 0; fragOperPtr.p->currNullBit = 0; + // remove in 5.1, 2 fragments per fragment in 5.0 + fragOperPtr.p->minRows = (minRows + 1)/2; + fragOperPtr.p->maxRows = (maxRows + 1)/2; ndbrequire(reqinfo == ZADDFRAG); @@ -141,16 +149,6 @@ void Dbtup::execTUPFRAGREQ(Signal* signal) regFragPtr.p->fragmentId = fragId; regFragPtr.p->checkpointVersion = RNIL; - Uint32 noAllocatedPages = 2; - noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); - - if (noAllocatedPages == 0) { - ljam(); - terrorCode = ZNO_PAGES_ALLOCATED_ERROR; - fragrefuse3Lab(signal, fragOperPtr, regFragPtr, regTabPtr.p, fragId); - return; - }//if - if (ERROR_INSERTED(4007) && regTabPtr.p->fragid[0] == fragId || ERROR_INSERTED(4008) && regTabPtr.p->fragid[1] == fragId) { ljam(); @@ -407,6 +405,27 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) CLEAR_ERROR_INSERT_VALUE; return; } + + if (lastAttr) + { + ljam(); + Uint32 noRowsPerPage = ZWORDS_ON_PAGE/regTabPtr.p->tupheadsize; + Uint32 noAllocatedPages = + (fragOperPtr.p->minRows + noRowsPerPage - 1 )/ noRowsPerPage; + if (fragOperPtr.p->minRows == 0) + noAllocatedPages = 2; + else if (noAllocatedPages == 0) + noAllocatedPages = 2; + noAllocatedPages = allocFragPages(regFragPtr.p, noAllocatedPages); + + if (noAllocatedPages == 0) { + ljam(); + terrorCode = ZNO_PAGES_ALLOCATED_ERROR; + addattrrefuseLab(signal, regFragPtr, fragOperPtr, regTabPtr.p, fragId); + return; + }//if + } + /* **************************************************************** */ /* ************** TUP_ADD_ATTCONF ****************** */ /* **************************************************************** */ diff --git a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp index 1f674876642..acdb73704cb 100644 --- a/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp +++ b/ndb/src/kernel/blocks/dbtup/DbtupPageMap.cpp @@ -332,6 +332,7 @@ void Dbtup::initFragRange(Fragrecord* const regFragPtr) regFragPtr->rootPageRange = RNIL; regFragPtr->currentPageRange = RNIL; regFragPtr->noOfPages = 0; + regFragPtr->noOfPagesToGrow = 2; regFragPtr->nextStartRange = 0; }//initFragRange() @@ -393,9 +394,10 @@ Uint32 Dbtup::allocFragPages(Fragrecord* const regFragPtr, Uint32 tafpNoAllocReq void Dbtup::allocMoreFragPages(Fragrecord* const regFragPtr) { - Uint32 noAllocPages = regFragPtr->noOfPages >> 3; // 12.5% - noAllocPages += regFragPtr->noOfPages >> 4; // 6.25% + Uint32 noAllocPages = regFragPtr->noOfPagesToGrow >> 3; // 12.5% + noAllocPages += regFragPtr->noOfPagesToGrow >> 4; // 6.25% noAllocPages += 2; + regFragPtr->noOfPagesToGrow += noAllocPages; /* -----------------------------------------------------------------*/ // We will grow by 18.75% plus two more additional pages to grow // a little bit quicker in the beginning. diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index b9466ed1173..69c0286a1de 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -2380,14 +2380,20 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) event.Event = BackupEvent::BackupCompleted; event.Completed.BackupId = rep->backupId; - event.Completed.NoOfBytes = rep->noOfBytes; + event.Completed.NoOfBytes = rep->noOfBytesLow; event.Completed.NoOfLogBytes = rep->noOfLogBytes; - event.Completed.NoOfRecords = rep->noOfRecords; + event.Completed.NoOfRecords = rep->noOfRecordsLow; event.Completed.NoOfLogRecords = rep->noOfLogRecords; event.Completed.stopGCP = rep->stopGCP; event.Completed.startGCP = rep->startGCP; event.Nodes = rep->nodes; + if (signal->header.theLength >= BackupCompleteRep::SignalLength) + { + event.Completed.NoOfBytes += ((Uint64)rep->noOfBytesHigh) << 32; + event.Completed.NoOfRecords += ((Uint64)rep->noOfRecordsHigh) << 32; + } + backupId = rep->backupId; return 0; } diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 7811cf0e5d1..187f225470a 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -323,9 +323,9 @@ public: Uint32 ErrorCode; } FailedToStart ; struct { + Uint64 NoOfBytes; + Uint64 NoOfRecords; Uint32 BackupId; - Uint32 NoOfBytes; - Uint32 NoOfRecords; Uint32 NoOfLogBytes; Uint32 NoOfLogRecords; Uint32 startGCP; diff --git a/ndb/src/ndbapi/NdbDictionary.cpp b/ndb/src/ndbapi/NdbDictionary.cpp index a342a5d5926..a0a3dd431b8 100644 --- a/ndb/src/ndbapi/NdbDictionary.cpp +++ b/ndb/src/ndbapi/NdbDictionary.cpp @@ -385,6 +385,30 @@ NdbDictionary::Table::getNoOfPrimaryKeys() const { return m_impl.m_noOfKeys; } +void +NdbDictionary::Table::setMaxRows(Uint64 maxRows) +{ + m_impl.m_max_rows = maxRows; +} + +Uint64 +NdbDictionary::Table::getMaxRows() const +{ + return m_impl.m_max_rows; +} + +void +NdbDictionary::Table::setMinRows(Uint64 minRows) +{ + m_impl.m_min_rows = minRows; +} + +Uint64 +NdbDictionary::Table::getMinRows() const +{ + return m_impl.m_min_rows; +} + const char* NdbDictionary::Table::getPrimaryKey(int no) const { int count = 0; diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index bd50440b3c0..ce348b616c9 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -319,6 +319,8 @@ NdbTableImpl::init(){ m_noOfDistributionKeys= 0; m_noOfBlobs= 0; m_replicaCount= 0; + m_min_rows = 0; + m_max_rows = 0; } bool @@ -416,6 +418,9 @@ NdbTableImpl::assign(const NdbTableImpl& org) m_version = org.m_version; m_status = org.m_status; + + m_max_rows = org.m_max_rows; + m_min_rows = org.m_min_rows; } void NdbTableImpl::setName(const char * name) @@ -1302,6 +1307,12 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, fragmentTypeMapping, (Uint32)NdbDictionary::Object::FragUndefined); + Uint64 max_rows = ((Uint64)tableDesc.MaxRowsHigh) << 32; + max_rows += tableDesc.MaxRowsLow; + impl->m_max_rows = max_rows; + Uint64 min_rows = ((Uint64)tableDesc.MinRowsHigh) << 32; + min_rows += tableDesc.MinRowsLow; + impl->m_min_rows = min_rows; impl->m_logging = tableDesc.TableLoggedFlag; impl->m_kvalue = tableDesc.TableKValue; impl->m_minLoadFactor = tableDesc.MinLoadFactor; @@ -1630,7 +1641,16 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, tmpTab.MaxLoadFactor = impl.m_maxLoadFactor; tmpTab.TableType = DictTabInfo::UserTable; tmpTab.NoOfAttributes = sz; + tmpTab.MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32); + tmpTab.MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF); + tmpTab.MinRowsHigh = (Uint32)(impl.m_min_rows >> 32); + tmpTab.MinRowsLow = (Uint32)(impl.m_min_rows & 0xFFFFFFFF); + Uint64 maxRows = + (((Uint64)tmpTab.MaxRowsHigh) << 32) + tmpTab.MaxRowsLow; + Uint64 minRows = + (((Uint64)tmpTab.MinRowsHigh) << 32) + tmpTab.MinRowsLow; + tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType, fragmentTypeMapping, DictTabInfo::AllNodesSmallTable); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/ndb/src/ndbapi/NdbDictionaryImpl.hpp index bc9894497f8..dfccf120228 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -130,6 +130,9 @@ public: Uint32 m_hashpointerValue; Vector m_fragments; + Uint64 m_max_rows; + Uint64 m_min_rows; + bool m_logging; int m_kvalue; int m_minLoadFactor; diff --git a/ndb/tools/restore/Restore.cpp b/ndb/tools/restore/Restore.cpp index 6ac06f8a6f8..a808a48b558 100644 --- a/ndb/tools/restore/Restore.cpp +++ b/ndb/tools/restore/Restore.cpp @@ -80,7 +80,12 @@ RestoreMetaData::RestoreMetaData(const char* path, Uint32 nodeId, Uint32 bNo) { RestoreMetaData::~RestoreMetaData(){ for(Uint32 i= 0; i < allTables.size(); i++) - delete allTables[i]; + { + TableS *table = allTables[i]; + for(Uint32 j= 0; j < table->m_fragmentInfo.size(); j++) + delete table->m_fragmentInfo[j]; + delete table; + } allTables.clear(); } @@ -111,6 +116,9 @@ RestoreMetaData::loadContent() } if(!readGCPEntry()) return 0; + + if(!readFragmentInfo()) + return 0; return 1; } @@ -192,6 +200,52 @@ RestoreMetaData::readGCPEntry() { return true; } +bool +RestoreMetaData::readFragmentInfo() +{ + BackupFormat::CtlFile::FragmentInfo fragInfo; + TableS * table = 0; + Uint32 tableId = RNIL; + + while (buffer_read(&fragInfo, 4, 2) == 2) + { + fragInfo.SectionType = ntohl(fragInfo.SectionType); + fragInfo.SectionLength = ntohl(fragInfo.SectionLength); + + if (fragInfo.SectionType != BackupFormat::FRAGMENT_INFO) + { + err << "readFragmentInfo invalid section type: " << + fragInfo.SectionType << endl; + return false; + } + + if (buffer_read(&fragInfo.TableId, (fragInfo.SectionLength-2)*4, 1) != 1) + { + err << "readFragmentInfo invalid section length: " << + fragInfo.SectionLength << endl; + return false; + } + + fragInfo.TableId = ntohl(fragInfo.TableId); + if (fragInfo.TableId != tableId) + { + tableId = fragInfo.TableId; + table = getTable(tableId); + } + + FragmentInfo * tmp = new FragmentInfo; + tmp->fragmentNo = ntohl(fragInfo.FragmentNo); + tmp->noOfRecords = ntohl(fragInfo.NoOfRecordsLow) + + (((Uint64)ntohl(fragInfo.NoOfRecordsHigh)) << 32); + tmp->filePosLow = ntohl(fragInfo.FilePosLow); + tmp->filePosHigh = ntohl(fragInfo.FilePosHigh); + + table->m_fragmentInfo.push_back(tmp); + table->m_noOfRecords += tmp->noOfRecords; + } + return true; +} + TableS::TableS(Uint32 version, NdbTableImpl* tableImpl) : m_dictTable(tableImpl) { @@ -199,6 +253,7 @@ TableS::TableS(Uint32 version, NdbTableImpl* tableImpl) m_noOfNullable = m_nullBitmaskSize = 0; m_auto_val_id= ~(Uint32)0; m_max_auto_val= 0; + m_noOfRecords= 0; backupVersion = version; for (int i = 0; i < tableImpl->getNoOfColumns(); i++) @@ -937,4 +992,5 @@ operator<<(NdbOut& ndbout, const TableS & table){ template class Vector; template class Vector; template class Vector; +template class Vector; diff --git a/ndb/tools/restore/Restore.hpp b/ndb/tools/restore/Restore.hpp index 85793baf9df..cf8feb7125c 100644 --- a/ndb/tools/restore/Restore.hpp +++ b/ndb/tools/restore/Restore.hpp @@ -114,6 +114,14 @@ public: AttributeData * getData(int i) const; }; // class TupleS +struct FragmentInfo +{ + Uint32 fragmentNo; + Uint64 noOfRecords; + Uint32 filePosLow; + Uint32 filePosHigh; +}; + class TableS { friend class TupleS; @@ -136,6 +144,9 @@ class TableS { int pos; + Uint64 m_noOfRecords; + Vector m_fragmentInfo; + void createAttr(NdbDictionary::Column *column); public: @@ -146,6 +157,9 @@ public: Uint32 getTableId() const { return m_dictTable->getTableId(); } + Uint32 getNoOfRecords() const { + return m_noOfRecords; + } /* void setMysqlTableName(char * tableName) { strpcpy(mysqlTableName, tableName); @@ -274,6 +288,7 @@ class RestoreMetaData : public BackupFile { bool readMetaTableDesc(); bool readGCPEntry(); + bool readFragmentInfo(); Uint32 readMetaTableList(); Uint32 m_startGCP; diff --git a/ndb/tools/restore/consumer_restore.cpp b/ndb/tools/restore/consumer_restore.cpp index d62ca3f610a..bff63c28716 100644 --- a/ndb/tools/restore/consumer_restore.cpp +++ b/ndb/tools/restore/consumer_restore.cpp @@ -193,6 +193,16 @@ BackupRestore::table(const TableS & table){ copy.setName(split[2].c_str()); + /* + update min and max rows to reflect the table, this to + ensure that memory is allocated properly in the ndb kernel + */ + copy.setMinRows(table.getNoOfRecords()); + if (table.getNoOfRecords() > copy.getMaxRows()) + { + copy.setMaxRows(table.getNoOfRecords()); + } + if (dict->createTable(copy) == -1) { err << "Create table " << table.getTableName() << " failed: " diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 98dd9d5a122..d59eb4d4f77 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4111,7 +4111,11 @@ static int create_ndb_column(NDBCOL &col, static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) { - if (form->s->max_rows == (ha_rows) 0) /* default setting, don't set fragmentation */ + ha_rows max_rows= form->s->max_rows; + ha_rows min_rows= form->s->min_rows; + if (max_rows < min_rows) + max_rows= min_rows; + if (max_rows == (ha_rows)0) /* default setting, don't set fragmentation */ return; /** * get the number of fragments right @@ -4129,7 +4133,6 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) acc_row_size+= 4 + /*safety margin*/ 4; #endif ulonglong acc_fragment_size= 512*1024*1024; - ulonglong max_rows= form->s->max_rows; #if MYSQL_VERSION_ID >= 50100 no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; #else @@ -4153,6 +4156,9 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) ftype= NDBTAB::FragAllSmall; tab.setFragmentType(ftype); } + tab.setMaxRows(max_rows); + tab.setMinRows(min_rows); + fprintf(stderr, "max/min %llu %llu\n", max_rows, min_rows); } int ha_ndbcluster::create(const char *name, From d2da3c9685d1105f7e021c9b05b2b0248135a897 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 11:26:00 +0200 Subject: [PATCH 39/74] Bug #19852 Restoring backup made from cluster with full data memory fails - correction of previous patch --- ndb/include/kernel/GlobalSignalNumbers.h | 4 ++-- ndb/include/kernel/signaldata/DictTabInfo.hpp | 2 +- sql/ha_ndbcluster.cc | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/ndb/include/kernel/GlobalSignalNumbers.h b/ndb/include/kernel/GlobalSignalNumbers.h index a84f3130abf..fcb0a87020f 100644 --- a/ndb/include/kernel/GlobalSignalNumbers.h +++ b/ndb/include/kernel/GlobalSignalNumbers.h @@ -680,7 +680,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_BACKUP_FRAGMENT_REF 546 #define GSN_BACKUP_FRAGMENT_CONF 547 -#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 502 +#define GSN_BACKUP_FRAGMENT_COMPLETE_REP 575 #define GSN_STOP_BACKUP_REQ 548 #define GSN_STOP_BACKUP_REF 549 @@ -731,7 +731,7 @@ extern const GlobalSignalNumber NO_OF_SIGNAL_NAMES; #define GSN_SUB_STOP_REQ 572 #define GSN_SUB_STOP_REF 573 #define GSN_SUB_STOP_CONF 574 -/* 575 unused */ +/* 575 used */ #define GSN_SUB_CREATE_REQ 576 #define GSN_SUB_CREATE_REF 577 #define GSN_SUB_CREATE_CONF 578 diff --git a/ndb/include/kernel/signaldata/DictTabInfo.hpp b/ndb/include/kernel/signaldata/DictTabInfo.hpp index 3fcae69aa74..0a7f6aa3fb3 100644 --- a/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -124,7 +124,7 @@ public: MaxRowsLow = 139, MaxRowsHigh = 140, - MinRowsLow = 133, + MinRowsLow = 143, MinRowsHigh = 144, TableEnd = 999, diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index d59eb4d4f77..ced85d1a339 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -4158,7 +4158,6 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) } tab.setMaxRows(max_rows); tab.setMinRows(min_rows); - fprintf(stderr, "max/min %llu %llu\n", max_rows, min_rows); } int ha_ndbcluster::create(const char *name, From e3ef15ea37dbbd0e999fa7f3fb0ca7bddc4a11cf Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 14:56:24 +0400 Subject: [PATCH 40/74] Fix compilation failures on Windows caused by the patch for Bug#17199. Fix a minor issue with Bug#16206 (bdb.test failed if the tree is compiled without blackhole). include/my_sys.h: Change declaration of my_strdup_with_length to accept const char *, not const byte *: in 5 places out of 6 where this function is used, it's being passed char *, not byte * mysql-test/r/bdb.result: Remove dependency on an optional engine (updated test results). mysql-test/t/bdb.test: Remove dependency on an optional engine. mysys/my_malloc.c: my_strdup_with_length: const byte * -> const char * mysys/safemalloc.c: my_strdup_with_length: const byte * -> const char * sql/ha_federated.cc: my_strdup_with_length: const byte * -> const char * sql/log_event.cc: my_strdup_with_length: const byte * -> const char * sql/set_var.cc: my_strdup_with_length: const byte * -> const char * sql/sql_class.h: Change db_length type to uint from uint32 (see also table.h) sql/table.h: Change the type of db_length to uint from uint32: LEX_STRING uses uint for length, we need a small and consistent set of types to store length to minimize cast and compile failures. --- include/my_sys.h | 2 +- mysql-test/r/bdb.result | 4 ++-- mysql-test/t/bdb.test | 2 +- mysys/my_malloc.c | 2 +- mysys/safemalloc.c | 2 +- sql/ha_federated.cc | 3 +-- sql/log_event.cc | 4 ++-- sql/set_var.cc | 2 +- sql/sql_class.h | 2 +- sql/table.h | 3 ++- 10 files changed, 13 insertions(+), 13 deletions(-) diff --git a/include/my_sys.h b/include/my_sys.h index 229389f1ac5..cbd7c79fa11 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -157,7 +157,7 @@ extern gptr my_realloc(gptr oldpoint,uint Size,myf MyFlags); extern void my_no_flags_free(gptr ptr); extern gptr my_memdup(const byte *from,uint length,myf MyFlags); extern char *my_strdup(const char *from,myf MyFlags); -extern char *my_strdup_with_length(const byte *from, uint length, +extern char *my_strdup_with_length(const char *from, uint length, myf MyFlags); /* we do use FG (as a no-op) in below so that a typo on FG is caught */ #define my_free(PTR,FG) ((void)FG,my_no_flags_free(PTR)) diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result index 588644a6c66..ee7cdceefda 100644 --- a/mysql-test/r/bdb.result +++ b/mysql-test/r/bdb.result @@ -1930,7 +1930,7 @@ alter table t1 add primary key(a); drop table t1; set autocommit=1; reset master; -create table bug16206 (a int) engine= blackhole; +create table bug16206 (a int); insert into bug16206 values(1); start transaction; insert into bug16206 values(2); @@ -1938,7 +1938,7 @@ commit; show binlog events; Log_name Pos Event_type Server_id End_log_pos Info f n Format_desc 1 n Server ver: VERSION, Binlog ver: 4 -f n Query 1 n use `test`; create table bug16206 (a int) engine= blackhole +f n Query 1 n use `test`; create table bug16206 (a int) f n Query 1 n use `test`; insert into bug16206 values(1) f n Query 1 n use `test`; insert into bug16206 values(2) drop table bug16206; diff --git a/mysql-test/t/bdb.test b/mysql-test/t/bdb.test index d2e3ca5f36e..ec05eeb3c34 100644 --- a/mysql-test/t/bdb.test +++ b/mysql-test/t/bdb.test @@ -1028,7 +1028,7 @@ set autocommit=1; let $VERSION=`select version()`; reset master; -create table bug16206 (a int) engine= blackhole; +create table bug16206 (a int); insert into bug16206 values(1); start transaction; insert into bug16206 values(2); diff --git a/mysys/my_malloc.c b/mysys/my_malloc.c index 3f601a42dc9..f33db2655c4 100644 --- a/mysys/my_malloc.c +++ b/mysys/my_malloc.c @@ -83,7 +83,7 @@ char *my_strdup(const char *from, myf my_flags) } -char *my_strdup_with_length(const byte *from, uint length, myf my_flags) +char *my_strdup_with_length(const char *from, uint length, myf my_flags) { gptr ptr; if ((ptr=my_malloc(length+1,my_flags)) != 0) diff --git a/mysys/safemalloc.c b/mysys/safemalloc.c index 6cdf98c5f5f..f6d6644859e 100644 --- a/mysys/safemalloc.c +++ b/mysys/safemalloc.c @@ -525,7 +525,7 @@ char *_my_strdup(const char *from, const char *filename, uint lineno, } /* _my_strdup */ -char *_my_strdup_with_length(const byte *from, uint length, +char *_my_strdup_with_length(const char *from, uint length, const char *filename, uint lineno, myf MyFlags) { diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index c6d5c77803b..02bcde43f11 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -632,8 +632,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, DBUG_PRINT("info", ("Length %d \n", table->s->connect_string.length)); DBUG_PRINT("info", ("String %.*s \n", table->s->connect_string.length, table->s->connect_string.str)); - share->scheme= my_strdup_with_length((const byte*)table->s-> - connect_string.str, + share->scheme= my_strdup_with_length(table->s->connect_string.str, table->s->connect_string.length, MYF(0)); diff --git a/sql/log_event.cc b/sql/log_event.cc index e93c2855199..b4707826205 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -3123,7 +3123,7 @@ Rotate_log_event::Rotate_log_event(THD* thd_arg, llstr(pos_arg, buff), flags)); #endif if (flags & DUP_NAME) - new_log_ident= my_strdup_with_length((const byte*) new_log_ident_arg, + new_log_ident= my_strdup_with_length(new_log_ident_arg, ident_len, MYF(MY_WME)); DBUG_VOID_RETURN; } @@ -3147,7 +3147,7 @@ Rotate_log_event::Rotate_log_event(const char* buf, uint event_len, (header_size+post_header_len)); ident_offset = post_header_len; set_if_smaller(ident_len,FN_REFLEN-1); - new_log_ident= my_strdup_with_length((byte*) buf + ident_offset, + new_log_ident= my_strdup_with_length(buf + ident_offset, (uint) ident_len, MYF(MY_WME)); DBUG_VOID_RETURN; diff --git a/sql/set_var.cc b/sql/set_var.cc index 003dd4a8ab3..aa58f7d41fc 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -1133,7 +1133,7 @@ bool update_sys_var_str(sys_var_str *var_str, rw_lock_t *var_mutex, uint new_length= (var ? var->value->str_value.length() : 0); if (!old_value) old_value= (char*) ""; - if (!(res= my_strdup_with_length((byte*)old_value, new_length, MYF(0)))) + if (!(res= my_strdup_with_length(old_value, new_length, MYF(0)))) return 1; /* Replace the old value in such a way that the any thread using diff --git a/sql/sql_class.h b/sql/sql_class.h index 47150912c52..1ba104df2a4 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1300,7 +1300,7 @@ public: pthread_t real_id; uint tmp_table, global_read_lock; uint server_status,open_options,system_thread; - uint32 db_length; + uint db_length; uint select_number; //number of select (used for EXPLAIN) /* variables.transaction_isolation is reset to this after each commit */ enum_tx_isolation session_tx_isolation; diff --git a/sql/table.h b/sql/table.h index 106421d7a17..ebb4481ef3a 100644 --- a/sql/table.h +++ b/sql/table.h @@ -599,7 +599,8 @@ typedef struct st_table_list thr_lock_type lock_type; uint outer_join; /* Which join type */ uint shared; /* Used in multi-upd */ - uint32 db_length, table_name_length; + uint db_length; + uint32 table_name_length; bool updatable; /* VIEW/TABLE can be updated now */ bool straight; /* optimize with prev table */ bool updating; /* for replicate-do/ignore table */ From 44672ae5652ffa1eb148ed94b23f45d5a0f829e9 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 15:39:43 +0400 Subject: [PATCH 41/74] Fix yet another place with an obsolete explicit cast to byte *. sql/set_var.h: Fix yet another place with an obsolete explicit cast to byte * --- sql/set_var.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/set_var.h b/sql/set_var.h index 8e5a94b1e1b..b048428219d 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -935,7 +935,7 @@ public: uint name_length_arg, gptr data_arg) :name_length(name_length_arg), data(data_arg) { - name= my_strdup_with_length((byte*) name_arg, name_length, MYF(MY_WME)); + name= my_strdup_with_length(name_arg, name_length, MYF(MY_WME)); links->push_back(this); } inline bool cmp(const char *name_cmp, uint length) From e832964174c43c6edb5603ab372223c964145a98 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 17:00:24 +0500 Subject: [PATCH 42/74] merging fix --- mysql-test/r/gis.result | 1 + mysql-test/t/gis.test | 2 ++ 2 files changed, 3 insertions(+) diff --git a/mysql-test/r/gis.result b/mysql-test/r/gis.result index 8d1f0bb3937..7a0f689df36 100644 --- a/mysql-test/r/gis.result +++ b/mysql-test/r/gis.result @@ -693,6 +693,7 @@ create table t1(pt GEOMETRY); alter table t1 add primary key pti(pt); ERROR 42000: BLOB/TEXT column 'pt' used in key specification without a key length alter table t1 add primary key pti(pt(20)); +drop table t1; create table t1 (g GEOMETRY); select * from t1; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr diff --git a/mysql-test/t/gis.test b/mysql-test/t/gis.test index 566dbf882ad..4c6ff9b2fe7 100644 --- a/mysql-test/t/gis.test +++ b/mysql-test/t/gis.test @@ -408,6 +408,8 @@ create table t1(pt GEOMETRY); --error 1170 alter table t1 add primary key pti(pt); alter table t1 add primary key pti(pt(20)); +drop table t1; + --enable_metadata create table t1 (g GEOMETRY); select * from t1; From 7982816b8adac32e055849f0e8d15a973889ad1d Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 17:34:14 +0400 Subject: [PATCH 43/74] Fix yet another place that used uint32 instead of uint. --- sql/slave.cc | 2 +- sql/slave.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/slave.cc b/sql/slave.cc index aed2a41a1e6..4da447c4bc3 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1177,7 +1177,7 @@ bool net_request_file(NET* net, const char* fname) } -const char *rewrite_db(const char* db, uint32 *new_len) +const char *rewrite_db(const char* db, uint *new_len) { if (replicate_rewrite_db.is_empty() || !db) return db; diff --git a/sql/slave.h b/sql/slave.h index ebbb1e64df5..7f08105c0b9 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -550,7 +550,7 @@ int add_table_rule(HASH* h, const char* table_spec); int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec); void init_table_rule_hash(HASH* h, bool* h_inited); void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited); -const char *rewrite_db(const char* db, uint32 *new_db_len); +const char *rewrite_db(const char* db, uint *new_db_len); const char *print_slave_db_safe(const char *db); int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code); void skip_load_data_infile(NET* net); From 4b36c1d8ffa33a9684342f7bae969b0e163c469f Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 17:40:19 +0300 Subject: [PATCH 44/74] Bug #16458: Simple SELECT FOR UPDATE causes "Result Set not updatable" error 'SELECT DISTINCT a,b FROM t1' should not use temp table if there is unique index (or primary key) on a. There are a number of other similar cases that can be calculated without the use of a temp table : multi-part unique indexes, primary keys or using GROUP BY instead of DISTINCT. When a GROUP BY/DISTINCT clause contains all key parts of a unique index, then it is guaranteed that the fields of the clause will be unique, therefore we can optimize away GROUP BY/DISTINCT altogether. This optimization has two effects: * there is no need to create a temporary table to compute the GROUP/DISTINCT operation (or the temporary table will be smaller if only GROUP is removed and DISTINCT stays or if DISTINCT is removed and GROUP BY stays) * this causes the statement in effect to become updatable in Connector/Java because the result set columns will be direct reference to the primary key of the table (instead to the temporary table that it currently references). Implemented a check that will optimize away GROUP BY/DISTINCT for queries like the above. Currently it will work only for single non-constant table in the FROM clause. mysql-test/r/distinct.result: Bug #16458: Simple SELECT FOR UPDATE causes "Result Set not updatable" error - test case mysql-test/t/distinct.test: Bug #16458: Simple SELECT FOR UPDATE causes "Result Set not updatable" error - test case sql/sql_select.cc: Bug #16458: Simple SELECT FOR UPDATE causes "Result Set not updatable" error - disable GROUP BY if contains the fields of a unique index. --- mysql-test/r/distinct.result | 51 +++++++++++ mysql-test/t/distinct.test | 28 ++++++ sql/sql_select.cc | 168 +++++++++++++++++++++++++++++++++++ 3 files changed, 247 insertions(+) diff --git a/mysql-test/r/distinct.result b/mysql-test/r/distinct.result index 8932285b5d0..c6c614a5646 100644 --- a/mysql-test/r/distinct.result +++ b/mysql-test/r/distinct.result @@ -504,3 +504,54 @@ a 2 b 2 2 4 3 2 5 DROP TABLE t1,t2; +CREATE TABLE t1(a INT PRIMARY KEY, b INT); +INSERT INTO t1 VALUES (1,1), (2,1), (3,1); +EXPLAIN SELECT DISTINCT a FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 4 NULL 3 Using index +EXPLAIN SELECT DISTINCT a,b FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 +EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1_1 ALL NULL NULL NULL NULL 3 Using temporary +1 SIMPLE t1_2 index NULL PRIMARY 4 NULL 3 Using index; Distinct +EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2 +WHERE t1_1.a = t1_2.a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1_1 ALL PRIMARY NULL NULL NULL 3 Using temporary +1 SIMPLE t1_2 eq_ref PRIMARY PRIMARY 4 test.t1_1.a 1 Using index; Distinct +EXPLAIN SELECT a FROM t1 GROUP BY a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index NULL PRIMARY 4 NULL 3 Using index +EXPLAIN SELECT a,b FROM t1 GROUP BY a,b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 +EXPLAIN SELECT DISTINCT a,b FROM t1 GROUP BY a,b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 +CREATE TABLE t2(a INT, b INT, c INT, d INT, PRIMARY KEY (a,b)); +INSERT INTO t2 VALUES (1,1,1,50), (1,2,3,40), (2,1,3,4); +EXPLAIN SELECT DISTINCT a FROM t2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index +EXPLAIN SELECT DISTINCT a,a FROM t2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index; Using temporary +EXPLAIN SELECT DISTINCT b,a FROM t2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index +EXPLAIN SELECT DISTINCT a,c FROM t2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 3 Using temporary +EXPLAIN SELECT DISTINCT c,a,b FROM t2; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 3 +EXPLAIN SELECT DISTINCT a,b,d FROM t2 GROUP BY c,b,d; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 3 Using temporary; Using filesort +CREATE UNIQUE INDEX c_b_unq ON t2 (c,b); +EXPLAIN SELECT DISTINCT a,b,d FROM t2 GROUP BY c,b,d; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 3 +DROP TABLE t1,t2; diff --git a/mysql-test/t/distinct.test b/mysql-test/t/distinct.test index f2fe1ec6372..8ca6f350b8d 100644 --- a/mysql-test/t/distinct.test +++ b/mysql-test/t/distinct.test @@ -348,6 +348,34 @@ SELECT DISTINCT a, b, 2 FROM t2; SELECT DISTINCT 2, a, b FROM t2; SELECT DISTINCT a, 2, b FROM t2; +DROP TABLE t1,t2; +# +# Bug#16458: Simple SELECT FOR UPDATE causes "Result Set not updatable" +# error. +# +CREATE TABLE t1(a INT PRIMARY KEY, b INT); +INSERT INTO t1 VALUES (1,1), (2,1), (3,1); +EXPLAIN SELECT DISTINCT a FROM t1; +EXPLAIN SELECT DISTINCT a,b FROM t1; +EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2; +EXPLAIN SELECT DISTINCT t1_1.a, t1_1.b FROM t1 t1_1, t1 t1_2 + WHERE t1_1.a = t1_2.a; +EXPLAIN SELECT a FROM t1 GROUP BY a; +EXPLAIN SELECT a,b FROM t1 GROUP BY a,b; +EXPLAIN SELECT DISTINCT a,b FROM t1 GROUP BY a,b; + +CREATE TABLE t2(a INT, b INT, c INT, d INT, PRIMARY KEY (a,b)); +INSERT INTO t2 VALUES (1,1,1,50), (1,2,3,40), (2,1,3,4); +EXPLAIN SELECT DISTINCT a FROM t2; +EXPLAIN SELECT DISTINCT a,a FROM t2; +EXPLAIN SELECT DISTINCT b,a FROM t2; +EXPLAIN SELECT DISTINCT a,c FROM t2; +EXPLAIN SELECT DISTINCT c,a,b FROM t2; + +EXPLAIN SELECT DISTINCT a,b,d FROM t2 GROUP BY c,b,d; +CREATE UNIQUE INDEX c_b_unq ON t2 (c,b); +EXPLAIN SELECT DISTINCT a,b,d FROM t2 GROUP BY c,b,d; + DROP TABLE t1,t2; # End of 4.1 tests diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 709ff9726bb..cb23662f42c 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -114,6 +114,10 @@ static Item* part_of_refkey(TABLE *form,Field *field); static uint find_shortest_key(TABLE *table, const key_map *usable_keys); static bool test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order, ha_rows select_limit, bool no_changes); +static bool list_contains_unique_index(TABLE *table, + bool (*find_func) (Field *, void *), void *data); +static bool find_field_in_item_list (Field *field, void *data); +static bool find_field_in_order_list (Field *field, void *data); static int create_sort_index(THD *thd, JOIN *join, ORDER *order, ha_rows filesort_limit, ha_rows select_limit); static int remove_duplicates(JOIN *join,TABLE *entry,List &fields, @@ -695,6 +699,36 @@ JOIN::optimize() if (old_group_list && !group_list) select_distinct= 0; } + /* + Check if we can optimize away GROUP BY/DISTINCT. + We can do that if there are no aggregate functions and the + fields in DISTINCT clause (if present) and/or columns in GROUP BY + (if present) contain direct references to all key parts of + an unique index (in whatever order). + Note that the unique keys for DISTINCT and GROUP BY should not + be the same (as long as they are unique). + + The FROM clause must contain a single non-constant table. + */ + if (tables - const_tables == 1 && (group_list || select_distinct) && + !tmp_table_param.sum_func_count) + { + if (group_list && + list_contains_unique_index(join_tab[const_tables].table, + find_field_in_order_list, + (void *) group_list)) + { + group_list= 0; + group= 0; + } + if (select_distinct && + list_contains_unique_index(join_tab[const_tables].table, + find_field_in_item_list, + (void *) &fields_list)) + { + select_distinct= 0; + } + } if (!group_list && group) { order=0; // The output has only one row @@ -7376,6 +7410,140 @@ test_if_subkey(ORDER *order, TABLE *table, uint ref, uint ref_key_parts, return best; } + +/* + Check if GROUP BY/DISTINCT can be optimized away because the set is + already known to be distinct. + + SYNOPSIS + list_contains_unique_index () + table The table to operate on. + find_func function to iterate over the list and search + for a field + + DESCRIPTION + Used in removing the GROUP BY/DISTINCT of the following types of + statements: + SELECT [DISTINCT] ... FROM + [GROUP BY ,...] + + If (a,b,c is distinct) + then ,{whatever} is also distinct + + This function checks if all the key parts of any of the unique keys + of the table are referenced by a list : either the select list + through find_field_in_item_list or GROUP BY list through + find_field_in_order_list. + If the above holds then we can safely remove the GROUP BY/DISTINCT, + as no result set can be more distinct than an unique key. + + RETURN VALUE + 1 found + 0 not found. +*/ + +static bool +list_contains_unique_index(TABLE *table, + bool (*find_func) (Field *, void *), void *data) +{ + for (uint keynr= 0; keynr < table->keys; keynr++) + { + if (keynr == table->primary_key || + (table->key_info[keynr].flags & HA_NOSAME)) + { + KEY *keyinfo= table->key_info + keynr; + KEY_PART_INFO *key_part, *key_part_end; + + for (key_part=keyinfo->key_part, + key_part_end=key_part+ keyinfo->key_parts; + key_part < key_part_end; + key_part++) + { + if (!find_func(key_part->field, data)) + break; + } + if (key_part == key_part_end) + return 1; + } + } + return 0; +} + + +/* + Helper function for list_contains_unique_index. + Find a field reference in a list of ORDER structures. + + SYNOPSIS + find_field_in_order_list () + field The field to search for. + data ORDER *.The list to search in + + DESCRIPTION + Finds a direct reference of the Field in the list. + + RETURN VALUE + 1 found + 0 not found. +*/ + +static bool +find_field_in_order_list (Field *field, void *data) +{ + ORDER *group= (ORDER *) data; + bool part_found= 0; + for (ORDER *tmp_group= group; tmp_group; tmp_group=tmp_group->next) + { + Item *item= (*tmp_group->item)->real_item(); + if (item->type() == Item::FIELD_ITEM && + ((Item_field*) item)->field->eq(field)) + { + part_found= 1; + break; + } + } + return part_found; +} + + +/* + Helper function for list_contains_unique_index. + Find a field reference in a dynamic list of Items. + + SYNOPSIS + find_field_in_item_list () + field in The field to search for. + data in List *.The list to search in + + DESCRIPTION + Finds a direct reference of the Field in the list. + + RETURN VALUE + 1 found + 0 not found. +*/ + +static bool +find_field_in_item_list (Field *field, void *data) +{ + List *fields= (List *) data; + bool part_found= 0; + List_iterator li(*fields); + Item *item; + + while ((item= li++)) + { + if (item->type() == Item::FIELD_ITEM && + ((Item_field*) item)->field->eq(field)) + { + part_found= 1; + break; + } + } + return part_found; +} + + /* Test if we can skip the ORDER BY by using an index. From 82d127b55bbbeaf7fa263122365143f7727b7f10 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 19:33:59 +0400 Subject: [PATCH 45/74] Dec. 31st, 9999 is still a valid date, only starting with Jan 1st 10000 things become invalid (Bug #12356) mysql-test/r/func_sapdb.result: test cases for date range edge cases added mysql-test/r/func_time.result: test cases for date range edge cases added mysql-test/t/func_sapdb.test: test cases for date range edge cases added mysql-test/t/func_time.test: test cases for date range edge cases added --- mysql-test/r/func_sapdb.result | 6 ++++++ mysql-test/r/func_time.result | 6 ++++++ mysql-test/t/func_sapdb.test | 2 ++ mysql-test/t/func_time.test | 6 ++++++ sql/item_timefunc.cc | 11 ++++++----- 5 files changed, 26 insertions(+), 5 deletions(-) diff --git a/mysql-test/r/func_sapdb.result b/mysql-test/r/func_sapdb.result index ea40e1559fd..b18885e218a 100644 --- a/mysql-test/r/func_sapdb.result +++ b/mysql-test/r/func_sapdb.result @@ -71,6 +71,12 @@ makedate(1997,1) select makedate(1997,0); makedate(1997,0) NULL +select makedate(9999,365); +makedate(9999,365) +9999-12-31 +select makedate(9999,366); +makedate(9999,366) +NULL select addtime("1997-12-31 23:59:59.999999", "1 1:1:1.000002"); addtime("1997-12-31 23:59:59.999999", "1 1:1:1.000002") 1998-01-02 01:01:01.000001 diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index c90a4258036..fab0bf01f58 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -352,6 +352,12 @@ extract(SECOND FROM "1999-01-02 10:11:12") select extract(MONTH FROM "2001-02-00"); extract(MONTH FROM "2001-02-00") 2 +SELECT DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); +DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE) +9999-12-31 00:00:00 +SELECT DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); +DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE) +9999-12-31 00:00:00 SELECT "1900-01-01 00:00:00" + INTERVAL 2147483648 SECOND; "1900-01-01 00:00:00" + INTERVAL 2147483648 SECOND 1968-01-20 03:14:08 diff --git a/mysql-test/t/func_sapdb.test b/mysql-test/t/func_sapdb.test index 8fd793f067b..930ad37c60c 100644 --- a/mysql-test/t/func_sapdb.test +++ b/mysql-test/t/func_sapdb.test @@ -37,6 +37,8 @@ select weekofyear("1997-11-31 23:59:59.000001"); select makedate(1997,1); select makedate(1997,0); +select makedate(9999,365); +select makedate(9999,366); #Time functions diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index d69545712c8..b232fb14e1e 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -139,6 +139,12 @@ select extract(MINUTE_SECOND FROM "10:11:12"); select extract(SECOND FROM "1999-01-02 10:11:12"); select extract(MONTH FROM "2001-02-00"); +# +# MySQL Bugs: #12356: DATE_SUB or DATE_ADD incorrectly returns null +# +SELECT DATE_SUB(str_to_date('9999-12-31 00:01:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); +SELECT DATE_ADD(str_to_date('9999-12-30 23:59:00','%Y-%m-%d %H:%i:%s'), INTERVAL 1 MINUTE); + # # Test big intervals (Bug #3498) # diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 8d3e768b74e..27876096bc5 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -27,6 +27,7 @@ /* TODO: Move month and days to language files */ +/* Day number for Dec 31st, 9999 */ #define MAX_DAY_NUMBER 3652424L static const char *month_names[]= @@ -401,7 +402,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, if (yearday > 0) { uint days= calc_daynr(l_time->year,1,1) + yearday - 1; - if (days <= 0 || days >= MAX_DAY_NUMBER) + if (days <= 0 || days > MAX_DAY_NUMBER) goto err; get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day); } @@ -447,7 +448,7 @@ static bool extract_date_time(DATE_TIME_FORMAT *format, (weekday - 1); } - if (days <= 0 || days >= MAX_DAY_NUMBER) + if (days <= 0 || days > MAX_DAY_NUMBER) goto err; get_date_from_daynr(days,&l_time->year,&l_time->month,&l_time->day); } @@ -1931,7 +1932,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) ltime->hour= (uint) (sec/3600); daynr= calc_daynr(ltime->year,ltime->month,1) + days; /* Day number from year 0 to 9999-12-31 */ - if ((ulonglong) daynr >= MAX_DAY_NUMBER) + if ((ulonglong) daynr > MAX_DAY_NUMBER) goto null_date; get_date_from_daynr((long) daynr, <ime->year, <ime->month, <ime->day); @@ -1941,7 +1942,7 @@ bool Item_date_add_interval::get_date(TIME *ltime, uint fuzzy_date) period= (calc_daynr(ltime->year,ltime->month,ltime->day) + sign * (long) interval.day); /* Daynumber from year 0 to 9999-12-31 */ - if ((ulong) period >= MAX_DAY_NUMBER) + if ((ulong) period > MAX_DAY_NUMBER) goto null_date; get_date_from_daynr((long) period,<ime->year,<ime->month,<ime->day); break; @@ -2412,7 +2413,7 @@ String *Item_func_makedate::val_str(String *str) days= calc_daynr(yearnr,1,1) + daynr - 1; /* Day number from year 0 to 9999-12-31 */ - if (days >= 0 && days < MAX_DAY_NUMBER) + if (days >= 0 && days <= MAX_DAY_NUMBER) { null_value=0; get_date_from_daynr(days,&l_time.year,&l_time.month,&l_time.day); From d8e9dd6110c98e11181d12b4c44da3cc5c09a08d Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 18:17:53 +0200 Subject: [PATCH 46/74] Move "mysqldumpslow" from the client RPM to the server RPM (bug#20216). --- support-files/mysql.spec.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index d11fc5ef440..5796e776b83 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -469,6 +469,7 @@ fi %attr(755, root, root) %{_bindir}/mysql_convert_table_format %attr(755, root, root) %{_bindir}/mysqld_multi %attr(755, root, root) %{_bindir}/mysqld_safe +%attr(755, root, root) %{_bindir}/mysqldumpslow %attr(755, root, root) %{_bindir}/mysql_explain_log %attr(755, root, root) %{_bindir}/mysql_fix_extensions %attr(755, root, root) %{_bindir}/mysql_fix_privilege_tables @@ -506,7 +507,6 @@ fi %attr(755, root, root) %{_bindir}/mysqlbinlog %attr(755, root, root) %{_bindir}/mysqlcheck %attr(755, root, root) %{_bindir}/mysqldump -%attr(755, root, root) %{_bindir}/mysqldumpslow %attr(755, root, root) %{_bindir}/mysqlimport %attr(755, root, root) %{_bindir}/mysqlshow @@ -578,6 +578,10 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Tue Jun 27 2006 Joerg Bruehe + +- move "mysqldumpslow" from the client RPM to the server RPM (bug#20216) + * Sat May 20 2006 Kent Boortz - Always compile for PIC, position independent code. From f4a07612efd66a559fe06810deee5e38cd96d03c Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 22:22:43 +0500 Subject: [PATCH 47/74] BUG#1662 - ALTER TABLE LIKE ignores DATA/INDEX DIRECTPORY Produce a warning if DATA/INDEX DIRECTORY is specified in ALTER TABLE statement. Ignoring of these options is documented in the symbolic links section of the manual. mysql-test/r/symlink.result: Modified test result according to fix for BUG#1662. sql/sql_parse.cc: Produce a warning if DATA/INDEX DIRECTORY is specified in ALTER TABLE statement. --- mysql-test/r/symlink.result | 6 ++++++ sql/sql_parse.cc | 6 ++++++ 2 files changed, 12 insertions(+) diff --git a/mysql-test/r/symlink.result b/mysql-test/r/symlink.result index caff53f8fd7..f6779689133 100644 --- a/mysql-test/r/symlink.result +++ b/mysql-test/r/symlink.result @@ -65,18 +65,24 @@ t9 CREATE TABLE `t9` ( ) ENGINE=MyISAM AUTO_INCREMENT=16725 DEFAULT CHARSET=latin1 DATA DIRECTORY='TEST_DIR/var/tmp/' INDEX DIRECTORY='TEST_DIR/var/run/' drop database mysqltest; create table t1 (a int not null) engine=myisam; +Warnings: +Warning 0 DATA DIRECTORY option ignored show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL default '0' ) ENGINE=MyISAM DEFAULT CHARSET=latin1 alter table t1 add b int; +Warnings: +Warning 0 DATA DIRECTORY option ignored show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL default '0', `b` int(11) default NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 +Warnings: +Warning 0 INDEX DIRECTORY option ignored show create table t1; Table Create Table t1 CREATE TABLE `t1` ( diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 504339684ce..fbe36bfdc4a 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2678,6 +2678,12 @@ unsent_create_error: } } /* Don't yet allow changing of symlinks with ALTER TABLE */ + if (lex->create_info.data_file_name) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0, + "DATA DIRECTORY option ignored"); + if (lex->create_info.index_file_name) + push_warning(thd, MYSQL_ERROR::WARN_LEVEL_WARN, 0, + "INDEX DIRECTORY option ignored"); lex->create_info.data_file_name=lex->create_info.index_file_name=0; /* ALTER TABLE ends previous transaction */ if (end_active_trans(thd)) From 90cb4c03fd2963a395af308da26efde3f8f97f42 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 21:28:32 +0400 Subject: [PATCH 48/74] Bug#17203: "sql_no_cache sql_cache" in views created from prepared statement The problem was that we restored SQL_CACHE, SQL_NO_CACHE flags in SELECT statement from internal structures based on value set later at runtime, not the original value set by the user. The solution is to remember that original value. mysql-test/r/auto_increment.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/func_compress.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/func_math.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/func_system.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/func_time.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/information_schema.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/query_cache.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/rpl_get_lock.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/rpl_master_pos_wait.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/show_check.result: Add result for bug#17203. mysql-test/r/subselect.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/type_blob.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/variables.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/r/view.result: Update result to not report SQL_NO_CACHE if it wasn't there at first place. mysql-test/t/show_check.test: Add test case for bug#17203. sql/sql_lex.cc: Reset SELECT_LEX::sql_cache together with SELECT_LEX::options. sql/sql_lex.h: Add SELECT_LEX::sql_cache field to store original user setting. sql/sql_select.cc: Output SQL_CACHE and SQL_NO_CACHE depending on stored original user setting. sql/sql_yacc.yy: Make effect of SQL_CACHE and SQL_NO_CACHE mutually exclusive. Ignore SQL_CACHE if SQL_NO_CACHE was used. Remember what was set by the user. Reset SELECT_LEX::sql_cache together with SELECT_LEX::options. --- mysql-test/r/auto_increment.result | 2 +- mysql-test/r/func_compress.result | 4 +- mysql-test/r/func_math.result | 2 +- mysql-test/r/func_system.result | 2 +- mysql-test/r/func_time.result | 2 +- mysql-test/r/information_schema.result | 12 ++--- mysql-test/r/query_cache.result | 2 +- mysql-test/r/rpl_get_lock.result | 2 +- mysql-test/r/rpl_master_pos_wait.result | 2 +- mysql-test/r/show_check.result | 60 +++++++++++++++++++++ mysql-test/r/subselect.result | 8 +-- mysql-test/r/type_blob.result | 2 +- mysql-test/r/variables.result | 6 +-- mysql-test/r/view.result | 14 ++--- mysql-test/t/show_check.test | 72 +++++++++++++++++++++++++ sql/sql_lex.cc | 3 ++ sql/sql_lex.h | 8 +++ sql/sql_select.cc | 17 ++++-- sql/sql_yacc.yy | 16 +++++- 19 files changed, 200 insertions(+), 36 deletions(-) diff --git a/mysql-test/r/auto_increment.result b/mysql-test/r/auto_increment.result index 3797af11a11..afbff905699 100644 --- a/mysql-test/r/auto_increment.result +++ b/mysql-test/r/auto_increment.result @@ -143,7 +143,7 @@ explain extended select last_insert_id(); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache last_insert_id() AS `last_insert_id()` +Note 1003 select last_insert_id() AS `last_insert_id()` insert into t1 set i = 254; ERROR 23000: Duplicate entry '254' for key 1 select last_insert_id(); diff --git a/mysql-test/r/func_compress.result b/mysql-test/r/func_compress.result index 8d6fa9927ce..e3d31566741 100644 --- a/mysql-test/r/func_compress.result +++ b/mysql-test/r/func_compress.result @@ -11,7 +11,7 @@ explain extended select uncompress(compress(@test_compress_string)); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache uncompress(compress((@test_compress_string))) AS `uncompress(compress(@test_compress_string))` +Note 1003 select uncompress(compress((@test_compress_string))) AS `uncompress(compress(@test_compress_string))` select uncompressed_length(compress(@test_compress_string))=length(@test_compress_string); uncompressed_length(compress(@test_compress_string))=length(@test_compress_string) 1 @@ -19,7 +19,7 @@ explain extended select uncompressed_length(compress(@test_compress_string))=len id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache (uncompressed_length(compress((@test_compress_string))) = length((@test_compress_string))) AS `uncompressed_length(compress(@test_compress_string))=length(@test_compress_string)` +Note 1003 select (uncompressed_length(compress((@test_compress_string))) = length((@test_compress_string))) AS `uncompressed_length(compress(@test_compress_string))=length(@test_compress_string)` select uncompressed_length(compress(@test_compress_string)); uncompressed_length(compress(@test_compress_string)) 117 diff --git a/mysql-test/r/func_math.result b/mysql-test/r/func_math.result index 43748257203..fc9bfb3b612 100644 --- a/mysql-test/r/func_math.result +++ b/mysql-test/r/func_math.result @@ -90,7 +90,7 @@ explain extended select rand(999999),rand(); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache rand(999999) AS `rand(999999)`,rand() AS `rand()` +Note 1003 select rand(999999) AS `rand(999999)`,rand() AS `rand()` select pi(),format(sin(pi()/2),6),format(cos(pi()/2),6),format(abs(tan(pi())),6),format(cot(1),6),format(asin(1),6),format(acos(0),6),format(atan(1),6); pi() format(sin(pi()/2),6) format(cos(pi()/2),6) format(abs(tan(pi())),6) format(cot(1),6) format(asin(1),6) format(acos(0),6) format(atan(1),6) 3.141593 1.000000 0.000000 0.000000 0.642093 1.570796 1.570796 0.785398 diff --git a/mysql-test/r/func_system.result b/mysql-test/r/func_system.result index 1c1c6dff21e..00bef09715d 100644 --- a/mysql-test/r/func_system.result +++ b/mysql-test/r/func_system.result @@ -41,7 +41,7 @@ explain extended select database(), user(); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache database() AS `database()`,user() AS `user()` +Note 1003 select database() AS `database()`,user() AS `user()` create table t1 (version char(40)) select database(), user(), version() as 'version'; show create table t1; Table Create Table diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index aaa86378626..a10e6675522 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -723,7 +723,7 @@ explain extended select period_add("9602",-12),period_diff(199505,"9404"),from_d id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache period_add(_latin1'9602',-(12)) AS `period_add("9602",-12)`,period_diff(199505,_latin1'9404') AS `period_diff(199505,"9404")`,from_days(to_days(_latin1'960101')) AS `from_days(to_days("960101"))`,dayofmonth(_latin1'1997-01-02') AS `dayofmonth("1997-01-02")`,month(_latin1'1997-01-02') AS `month("1997-01-02")`,monthname(_latin1'1972-03-04') AS `monthname("1972-03-04")`,dayofyear(_latin1'0000-00-00') AS `dayofyear("0000-00-00")`,hour(_latin1'1997-03-03 23:03:22') AS `HOUR("1997-03-03 23:03:22")`,minute(_latin1'23:03:22') AS `MINUTE("23:03:22")`,second(230322) AS `SECOND(230322)`,quarter(980303) AS `QUARTER(980303)`,week(_latin1'1998-03-03',0) AS `WEEK("1998-03-03")`,yearweek(_latin1'2000-01-01',1) AS `yearweek("2000-01-01",1)`,week(19950101,1) AS `week(19950101,1)`,year(_latin1'98-02-03') AS `year("98-02-03")`,(weekday(curdate()) - weekday(now())) AS `weekday(curdate())-weekday(now())`,dayname(_latin1'1962-03-03') AS `dayname("1962-03-03")`,unix_timestamp() AS `unix_timestamp()`,sec_to_time((time_to_sec(_latin1'0:30:47') / 6.21)) AS `sec_to_time(time_to_sec("0:30:47")/6.21)`,curtime() AS `curtime()`,utc_time() AS `utc_time()`,curdate() AS `curdate()`,utc_date() AS `utc_date()`,utc_timestamp() AS `utc_timestamp()`,date_format(_latin1'1997-01-02 03:04:05',_latin1'%M %W %D %Y %y %m %d %h %i %s %w') AS `date_format("1997-01-02 03:04:05", "%M %W %D %Y %y %m %d %h %i %s %w")`,from_unixtime(unix_timestamp(_latin1'1994-03-02 10:11:12')) AS `from_unixtime(unix_timestamp("1994-03-02 10:11:12"))`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `"1997-12-31 23:59:59" + INTERVAL 1 SECOND`,(_latin1'1998-01-01 00:00:00' - interval 1 second) AS `"1998-01-01 00:00:00" - INTERVAL 1 SECOND`,(_latin1'1997-12-31' + interval 1 day) AS `INTERVAL 1 DAY + "1997-12-31"`,extract(year from _latin1'1999-01-02 10:11:12') AS `extract(YEAR FROM "1999-01-02 10:11:12")`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND)` +Note 1003 select period_add(_latin1'9602',-(12)) AS `period_add("9602",-12)`,period_diff(199505,_latin1'9404') AS `period_diff(199505,"9404")`,from_days(to_days(_latin1'960101')) AS `from_days(to_days("960101"))`,dayofmonth(_latin1'1997-01-02') AS `dayofmonth("1997-01-02")`,month(_latin1'1997-01-02') AS `month("1997-01-02")`,monthname(_latin1'1972-03-04') AS `monthname("1972-03-04")`,dayofyear(_latin1'0000-00-00') AS `dayofyear("0000-00-00")`,hour(_latin1'1997-03-03 23:03:22') AS `HOUR("1997-03-03 23:03:22")`,minute(_latin1'23:03:22') AS `MINUTE("23:03:22")`,second(230322) AS `SECOND(230322)`,quarter(980303) AS `QUARTER(980303)`,week(_latin1'1998-03-03',0) AS `WEEK("1998-03-03")`,yearweek(_latin1'2000-01-01',1) AS `yearweek("2000-01-01",1)`,week(19950101,1) AS `week(19950101,1)`,year(_latin1'98-02-03') AS `year("98-02-03")`,(weekday(curdate()) - weekday(now())) AS `weekday(curdate())-weekday(now())`,dayname(_latin1'1962-03-03') AS `dayname("1962-03-03")`,unix_timestamp() AS `unix_timestamp()`,sec_to_time((time_to_sec(_latin1'0:30:47') / 6.21)) AS `sec_to_time(time_to_sec("0:30:47")/6.21)`,curtime() AS `curtime()`,utc_time() AS `utc_time()`,curdate() AS `curdate()`,utc_date() AS `utc_date()`,utc_timestamp() AS `utc_timestamp()`,date_format(_latin1'1997-01-02 03:04:05',_latin1'%M %W %D %Y %y %m %d %h %i %s %w') AS `date_format("1997-01-02 03:04:05", "%M %W %D %Y %y %m %d %h %i %s %w")`,from_unixtime(unix_timestamp(_latin1'1994-03-02 10:11:12')) AS `from_unixtime(unix_timestamp("1994-03-02 10:11:12"))`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `"1997-12-31 23:59:59" + INTERVAL 1 SECOND`,(_latin1'1998-01-01 00:00:00' - interval 1 second) AS `"1998-01-01 00:00:00" - INTERVAL 1 SECOND`,(_latin1'1997-12-31' + interval 1 day) AS `INTERVAL 1 DAY + "1997-12-31"`,extract(year from _latin1'1999-01-02 10:11:12') AS `extract(YEAR FROM "1999-01-02 10:11:12")`,(_latin1'1997-12-31 23:59:59' + interval 1 second) AS `date_add("1997-12-31 23:59:59",INTERVAL 1 SECOND)` SET @TMP=NOW(); CREATE TABLE t1 (d DATETIME); INSERT INTO t1 VALUES (NOW()); diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 6da07922251..63af90aa0f1 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -369,11 +369,11 @@ show keys from v4; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment select * from information_schema.views where TABLE_NAME like "v%"; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE -NULL test v0 select sql_no_cache `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER -NULL test v1 select sql_no_cache `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER -NULL test v2 select sql_no_cache `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER -NULL test v3 select sql_no_cache `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER -NULL test v4 select sql_no_cache `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER +NULL test v0 select `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER +NULL test v1 select `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER +NULL test v2 select `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER +NULL test v3 select `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER +NULL test v4 select `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER drop view v0, v1, v2, v3, v4; create table t1 (a int); grant select,update,insert on t1 to mysqltest_1@localhost; @@ -687,7 +687,7 @@ Warnings: Warning 1356 View 'test.v2' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them show create table v3; View Create View -v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS select sql_no_cache `test`.`sub1`(1) AS `c` +v3 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v3` AS select `test`.`sub1`(1) AS `c` Warnings: Warning 1356 View 'test.v3' references invalid table(s) or column(s) or function(s) or definer/invoker of view lack rights to use them drop view v2; diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result index 4bf4ebb910d..926a980f9c4 100644 --- a/mysql-test/r/query_cache.result +++ b/mysql-test/r/query_cache.result @@ -231,7 +231,7 @@ explain extended select benchmark(1,1) from t1; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 system NULL NULL NULL NULL 0 const row not found Warnings: -Note 1003 select sql_no_cache benchmark(1,1) AS `benchmark(1,1)` from `test`.`t1` +Note 1003 select benchmark(1,1) AS `benchmark(1,1)` from `test`.`t1` show status like "Qcache_queries_in_cache"; Variable_name Value Qcache_queries_in_cache 0 diff --git a/mysql-test/r/rpl_get_lock.result b/mysql-test/r/rpl_get_lock.result index 26f33bfb42c..da300d99964 100644 --- a/mysql-test/r/rpl_get_lock.result +++ b/mysql-test/r/rpl_get_lock.result @@ -25,7 +25,7 @@ explain extended select is_free_lock("lock"), is_used_lock("lock"); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache is_free_lock(_latin1'lock') AS `is_free_lock("lock")`,is_used_lock(_latin1'lock') AS `is_used_lock("lock")` +Note 1003 select is_free_lock(_latin1'lock') AS `is_free_lock("lock")`,is_used_lock(_latin1'lock') AS `is_used_lock("lock")` select is_free_lock("lock2"); is_free_lock("lock2") 1 diff --git a/mysql-test/r/rpl_master_pos_wait.result b/mysql-test/r/rpl_master_pos_wait.result index e92d1ffa361..2f3e47999cf 100644 --- a/mysql-test/r/rpl_master_pos_wait.result +++ b/mysql-test/r/rpl_master_pos_wait.result @@ -11,7 +11,7 @@ explain extended select master_pos_wait('master-bin.999999',0,2); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache master_pos_wait(_latin1'master-bin.999999',0,2) AS `master_pos_wait('master-bin.999999',0,2)` +Note 1003 select master_pos_wait(_latin1'master-bin.999999',0,2) AS `master_pos_wait('master-bin.999999',0,2)` select master_pos_wait('master-bin.999999',0); stop slave sql_thread; master_pos_wait('master-bin.999999',0) diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index 61a820b4469..994501767ba 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -565,3 +565,63 @@ DROP TABLE tyt2; DROP TABLE urkunde; SHOW TABLES FROM non_existing_database; ERROR 42000: Unknown database 'non_existing_database' +DROP VIEW IF EXISTS v1; +DROP PROCEDURE IF EXISTS p1; +CREATE VIEW v1 AS SELECT 1; +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select 1 AS `1` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_CACHE 1; +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache 1 AS `1` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_NO_CACHE 1; +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache 1 AS `1` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select now() AS `NOW()` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache now() AS `NOW()` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_NO_CACHE NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_NO_CACHE SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()` +DROP VIEW v1; +CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache now() AS `NOW()` +DROP VIEW v1; +CREATE PROCEDURE p1() +BEGIN +SET @s= 'CREATE VIEW v1 AS SELECT SQL_CACHE 1'; +PREPARE stmt FROM @s; +EXECUTE stmt; +DROP PREPARE stmt; +END | +CALL p1(); +SHOW CREATE VIEW v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_cache 1 AS `1` +DROP PROCEDURE p1; +DROP VIEW v1; diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index e4bc59e4d19..07630ffee0f 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -1019,19 +1019,19 @@ id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found 2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 const row not found Warnings: -Note 1003 select sql_no_cache (select sql_no_cache rand() AS `RAND()` from `test`.`t1`) AS `(SELECT RAND() FROM t1)` from `test`.`t1` +Note 1003 select (select rand() AS `RAND()` from `test`.`t1`) AS `(SELECT RAND() FROM t1)` from `test`.`t1` EXPLAIN EXTENDED SELECT (SELECT ENCRYPT('test') FROM t1) FROM t1; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found 2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 const row not found Warnings: -Note 1003 select sql_no_cache (select sql_no_cache encrypt(_latin1'test') AS `ENCRYPT('test')` from `test`.`t1`) AS `(SELECT ENCRYPT('test') FROM t1)` from `test`.`t1` +Note 1003 select (select encrypt(_latin1'test') AS `ENCRYPT('test')` from `test`.`t1`) AS `(SELECT ENCRYPT('test') FROM t1)` from `test`.`t1` EXPLAIN EXTENDED SELECT (SELECT BENCHMARK(1,1) FROM t1) FROM t1; id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 system NULL NULL NULL NULL 0 const row not found 2 UNCACHEABLE SUBQUERY t1 system NULL NULL NULL NULL 0 const row not found Warnings: -Note 1003 select sql_no_cache (select sql_no_cache benchmark(1,1) AS `BENCHMARK(1,1)` from `test`.`t1`) AS `(SELECT BENCHMARK(1,1) FROM t1)` from `test`.`t1` +Note 1003 select (select benchmark(1,1) AS `BENCHMARK(1,1)` from `test`.`t1`) AS `(SELECT BENCHMARK(1,1) FROM t1)` from `test`.`t1` drop table t1; CREATE TABLE `t1` ( `mot` varchar(30) character set latin1 NOT NULL default '', @@ -1126,7 +1126,7 @@ id select_type table type possible_keys key key_len ref rows Extra 2 UNCACHEABLE SUBQUERY t1 ALL NULL NULL NULL NULL 3 3 UNCACHEABLE SUBQUERY t1 ALL NULL NULL NULL NULL 3 Warnings: -Note 1003 select sql_no_cache `test`.`t1`.`a` AS `a`,(select sql_no_cache (select sql_no_cache rand() AS `rand()` from `test`.`t1` limit 1) AS `(select rand() from t1 limit 1)` from `test`.`t1` limit 1) AS `(select (select rand() from t1 limit 1) from t1 limit 1)` from `test`.`t1` +Note 1003 select `test`.`t1`.`a` AS `a`,(select (select rand() AS `rand()` from `test`.`t1` limit 1) AS `(select rand() from t1 limit 1)` from `test`.`t1` limit 1) AS `(select (select rand() from t1 limit 1) from t1 limit 1)` from `test`.`t1` drop table t1; select t1.Continent, t2.Name, t2.Population from t1 LEFT JOIN t2 ON t1.Code = t2.Country where t2.Population IN (select max(t2.Population) AS Population from t2, t1 where t2.Country = t1.Code group by Continent); ERROR 42S02: Table 'test.t1' doesn't exist diff --git a/mysql-test/r/type_blob.result b/mysql-test/r/type_blob.result index b366b1ed755..4fd220045c2 100644 --- a/mysql-test/r/type_blob.result +++ b/mysql-test/r/type_blob.result @@ -517,7 +517,7 @@ coercibility(load_file('../../std_data/words.dat')); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache charset(load_file(_latin1'../../std_data/words.dat')) AS `charset(load_file('../../std_data/words.dat'))`,collation(load_file(_latin1'../../std_data/words.dat')) AS `collation(load_file('../../std_data/words.dat'))`,coercibility(load_file(_latin1'../../std_data/words.dat')) AS `coercibility(load_file('../../std_data/words.dat'))` +Note 1003 select charset(load_file(_latin1'../../std_data/words.dat')) AS `charset(load_file('../../std_data/words.dat'))`,collation(load_file(_latin1'../../std_data/words.dat')) AS `collation(load_file('../../std_data/words.dat'))`,coercibility(load_file(_latin1'../../std_data/words.dat')) AS `coercibility(load_file('../../std_data/words.dat'))` update t1 set imagem=load_file('../../std_data/words.dat') where id=1; select if(imagem is null, "ERROR", "OK"),length(imagem) from t1 where id = 1; if(imagem is null, "ERROR", "OK") length(imagem) diff --git a/mysql-test/r/variables.result b/mysql-test/r/variables.result index 8cee60cf49a..cd51ff9485f 100644 --- a/mysql-test/r/variables.result +++ b/mysql-test/r/variables.result @@ -75,7 +75,7 @@ explain extended select @t1:=(@t2:=1)+@t3:=4,@t1,@t2,@t3; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache (@t1:=((@t2:=1) + (@t3:=4))) AS `@t1:=(@t2:=1)+@t3:=4`,(@t1) AS `@t1`,(@t2) AS `@t2`,(@t3) AS `@t3` +Note 1003 select (@t1:=((@t2:=1) + (@t3:=4))) AS `@t1:=(@t2:=1)+@t3:=4`,(@t1) AS `@t1`,(@t2) AS `@t2`,(@t3) AS `@t3` select @t5; @t5 1.23456 @@ -135,7 +135,7 @@ explain extended select last_insert_id(345); id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache last_insert_id(345) AS `last_insert_id(345)` +Note 1003 select last_insert_id(345) AS `last_insert_id(345)` select @@IDENTITY,last_insert_id(), @@identity; @@IDENTITY last_insert_id() @@identity 345 345 345 @@ -143,7 +143,7 @@ explain extended select @@IDENTITY,last_insert_id(), @@identity; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL No tables used Warnings: -Note 1003 select sql_no_cache 345 AS `@@IDENTITY`,last_insert_id() AS `last_insert_id()`,345 AS `@@identity` +Note 1003 select 345 AS `@@IDENTITY`,last_insert_id() AS `last_insert_id()`,345 AS `@@identity` set big_tables=OFF, big_tables=ON, big_tables=0, big_tables=1, big_tables="OFF", big_tables="ON"; set global concurrent_insert=2; show variables like 'concurrent_insert'; diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 5bb407f4256..72cffb9531c 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -672,7 +672,7 @@ drop table t1; CREATE VIEW v1 (f1,f2,f3,f4) AS SELECT connection_id(), pi(), current_user(), version(); SHOW CREATE VIEW v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache connection_id() AS `f1`,pi() AS `f2`,current_user() AS `f3`,version() AS `f4` +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select connection_id() AS `f1`,pi() AS `f2`,current_user() AS `f3`,version() AS `f4` drop view v1; create table t1 (s1 int); create table t2 (s2 int); @@ -787,7 +787,7 @@ create function `f``1` () returns int return 5; create view v1 as select test.`f``1` (); show create view v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `test`.`f``1`() AS `test.``f````1`` ()` +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`f``1`() AS `test.``f````1`` ()` select * from v1; test.`f``1` () 5 @@ -1868,14 +1868,14 @@ create table t2 (b timestamp default now()); create view v1 as select a,b,t1.a < now() from t1,t2 where t1.a < now(); SHOW CREATE VIEW v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a`,`t2`.`b` AS `b`,(`t1`.`a` < now()) AS `t1.a < now()` from (`t1` join `t2`) where (`t1`.`a` < now()) +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a`,`t2`.`b` AS `b`,(`t1`.`a` < now()) AS `t1.a < now()` from (`t1` join `t2`) where (`t1`.`a` < now()) drop view v1; drop table t1, t2; CREATE TABLE t1 ( a varchar(50) ); CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = CURRENT_USER(); SHOW CREATE VIEW v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a` from `t1` where (`t1`.`a` = current_user()) +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where (`t1`.`a` = current_user()) DROP VIEW v1; CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = VERSION(); SHOW CREATE VIEW v1; @@ -1885,7 +1885,7 @@ DROP VIEW v1; CREATE VIEW v1 AS SELECT * FROM t1 WHERE a = DATABASE(); SHOW CREATE VIEW v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `t1`.`a` AS `a` from `t1` where (`t1`.`a` = database()) +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `t1`.`a` AS `a` from `t1` where (`t1`.`a` = database()) DROP VIEW v1; DROP TABLE t1; CREATE TABLE t1 (col1 time); @@ -2538,7 +2538,7 @@ show create view v1; drop view v1; // View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache `test`.`t1`.`id` AS `id` from `t1` +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select `test`.`t1`.`id` AS `id` from `t1` create table t1(f1 int, f2 int); create view v1 as select ta.f1 as a, tb.f1 as b from t1 ta, t1 tb where ta.f1=tb .f1 and ta.f2=tb.f2; @@ -2683,7 +2683,7 @@ SELECT (year(now())-year(DOB)) AS Age FROM t1 HAVING Age < 75; SHOW CREATE VIEW v1; View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select sql_no_cache (year(now()) - year(`t1`.`DOB`)) AS `Age` from `t1` having (`Age` < 75) +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select (year(now()) - year(`t1`.`DOB`)) AS `Age` from `t1` having (`Age` < 75) SELECT (year(now())-year(DOB)) AS Age FROM t1 HAVING Age < 75; Age 42 diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test index 52d262677ff..6937cbe949d 100644 --- a/mysql-test/t/show_check.test +++ b/mysql-test/t/show_check.test @@ -424,3 +424,75 @@ DROP TABLE urkunde; # --error 1049 SHOW TABLES FROM non_existing_database; + + +# +# Bug#17203: "sql_no_cache sql_cache" in views created from prepared +# statement +# +# The problem was that initial user setting was forgotten, and current +# runtime-determined values of the flags were shown instead. +# +--disable_warnings +DROP VIEW IF EXISTS v1; +DROP PROCEDURE IF EXISTS p1; +--enable_warnings + +# Check that SHOW CREATE VIEW shows SQL_CACHE flag exaclty as +# specified by the user. +CREATE VIEW v1 AS SELECT 1; +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_CACHE 1; +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_NO_CACHE 1; +SHOW CREATE VIEW v1; +DROP VIEW v1; + +# Usage of NOW() disables caching, but we still have show what the +# user have specified. +CREATE VIEW v1 AS SELECT NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_NO_CACHE NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +# Check that SQL_NO_CACHE always wins. +CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_NO_CACHE SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +CREATE VIEW v1 AS SELECT SQL_CACHE SQL_NO_CACHE SQL_CACHE NOW(); +SHOW CREATE VIEW v1; +DROP VIEW v1; + +# Check CREATE VIEW in a prepared statement in a procedure. +delimiter |; +CREATE PROCEDURE p1() +BEGIN + SET @s= 'CREATE VIEW v1 AS SELECT SQL_CACHE 1'; + PREPARE stmt FROM @s; + EXECUTE stmt; + DROP PREPARE stmt; +END | +delimiter ;| +CALL p1(); +SHOW CREATE VIEW v1; + +DROP PROCEDURE p1; +DROP VIEW v1; + +# End of 5.0 tests. diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 0bbfc64e272..47af816f41d 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -140,6 +140,7 @@ void lex_start(THD *thd, uchar *buf,uint length) lex->select_lex.link_next= lex->select_lex.slave= lex->select_lex.next= 0; lex->select_lex.link_prev= (st_select_lex_node**)&(lex->all_selects_list); lex->select_lex.options= 0; + lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED; lex->select_lex.init_order(); lex->select_lex.group_list.empty(); lex->describe= 0; @@ -1063,6 +1064,7 @@ int MYSQLlex(void *arg, void *yythd) void st_select_lex_node::init_query() { options= 0; + sql_cache= SQL_CACHE_UNSPECIFIED; linkage= UNSPECIFIED_TYPE; no_error= no_table_names_allowed= 0; uncacheable= 0; @@ -1139,6 +1141,7 @@ void st_select_lex::init_select() table_join_options= 0; in_sum_expr= with_wild= 0; options= 0; + sql_cache= SQL_CACHE_UNSPECIFIED; braces= 0; when_list.empty(); expr_list.empty(); diff --git a/sql/sql_lex.h b/sql/sql_lex.h index d63c6ef9f20..285e1d6d5a6 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -311,6 +311,14 @@ protected: public: ulonglong options; + + /* + In sql_cache we store SQL_CACHE flag as specified by user to be + able to restore SELECT statement from internal structures. + */ + enum e_sql_cache { SQL_CACHE_UNSPECIFIED, SQL_NO_CACHE, SQL_CACHE }; + e_sql_cache sql_cache; + /* result of this query can't be cached, bit field, can be : UNCACHEABLE_DEPENDENT diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 9f317842d98..4899c1e16a3 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -14277,10 +14277,19 @@ void st_select_lex::print(THD *thd, String *str) str->append(STRING_WITH_LEN("sql_buffer_result ")); if (options & OPTION_FOUND_ROWS) str->append(STRING_WITH_LEN("sql_calc_found_rows ")); - if (!thd->lex->safe_to_cache_query) - str->append(STRING_WITH_LEN("sql_no_cache ")); - if (options & OPTION_TO_QUERY_CACHE) - str->append(STRING_WITH_LEN("sql_cache ")); + switch (sql_cache) + { + case SQL_NO_CACHE: + str->append(STRING_WITH_LEN("sql_no_cache ")); + break; + case SQL_CACHE: + str->append(STRING_WITH_LEN("sql_cache ")); + break; + case SQL_CACHE_UNSPECIFIED: + break; + default: + DBUG_ASSERT(0); + } //Item List bool first= 1; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index e45be1ef148..952a8eb44ea 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -4002,10 +4002,21 @@ select_option: YYABORT; Select->options|= OPTION_FOUND_ROWS; } - | SQL_NO_CACHE_SYM { Lex->safe_to_cache_query=0; } + | SQL_NO_CACHE_SYM + { + Lex->safe_to_cache_query=0; + Lex->select_lex.options&= ~OPTION_TO_QUERY_CACHE; + Lex->select_lex.sql_cache= SELECT_LEX::SQL_NO_CACHE; + } | SQL_CACHE_SYM { - Lex->select_lex.options|= OPTION_TO_QUERY_CACHE; + /* Honor this flag only if SQL_NO_CACHE wasn't specified. */ + if (Lex->select_lex.sql_cache != SELECT_LEX::SQL_NO_CACHE) + { + Lex->safe_to_cache_query=1; + Lex->select_lex.options|= OPTION_TO_QUERY_CACHE; + Lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE; + } } | ALL { Select->options|= SELECT_ALL; } ; @@ -6319,6 +6330,7 @@ truncate: LEX* lex= Lex; lex->sql_command= SQLCOM_TRUNCATE; lex->select_lex.options= 0; + lex->select_lex.sql_cache= SELECT_LEX::SQL_CACHE_UNSPECIFIED; lex->select_lex.init_order(); } ; From 0604daa4c7f173eba5662df6aaaebf39dae6f964 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 20:18:03 +0200 Subject: [PATCH 49/74] Revert all previous attempts to call "mysql_upgrade" during RPM upgrade. This finishes bug#18516, as far as "generic RPMs" are concerned. support-files/mysql.spec.sh: Revert all previous attempts to call "mysql_upgrade" during RPM upgrade, there are some more aspects which need to be solved before this is possible. For now, just ensure the binary "mysql_upgrade" is delivered and installed. This finishes bug#18516, as far as "generic RPMs" are concerned. --- support-files/mysql.spec.sh | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index a452811dc91..9f08d9330d4 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -485,25 +485,7 @@ chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir # Initiate databases if needed %{_bindir}/mysql_install_db --rpm --user=%{mysqld_user} -# Upgrade databases if needed -# This must be done as database user "root", who should be password-protected, -# but this password is not available here. -# So ensure the server is isolated as much as possible, and start it so that -# passwords are not checked. -# See the related change in the start script "/etc/init.d/mysql". -if type mktemp >/dev/null 2>&1 -then - mysql_tmp_sockdir=`mktemp -dt` -else - PID=$$ - mysql_tmp_sockdir=/tmp/mysql-$PID - ( umask 077 ; mkdir $mysql_tmp_sockdir ) -fi -chown %{mysqld_user}:%{mysqld_group} $mysql_tmp_sockdir -%{_sysconfdir}/init.d/mysql start --skip-networking --skip-grant-tables --socket=$mysql_tmp_sockdir/upgrade.sock -%{_bindir}/mysql_upgrade --socket=$mysql_tmp_sockdir/upgrade.sock -%{_sysconfdir}/init.d/mysql stop --skip-networking --skip-grant-tables -rm -fr $mysql_tmp_sockdir +# Upgrade databases if needed would go here - but it cannot be automated yet # Change permissions again to fix any new files. chown -R %{mysqld_user}:%{mysqld_group} $mysql_datadir @@ -741,6 +723,12 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Tue Jun 27 2006 Joerg Bruehe + +- Revert all previous attempts to call "mysql_upgrade" during RPM upgrade, + there are some more aspects which need to be solved before this is possible. + For now, just ensure the binary "mysql_upgrade" is delivered and installed. + * Thu Jun 22 2006 Joerg Bruehe - Close a gap of the previous version by explicitly using From 88afd72b47973afa087a34299f1a3a9e12d143ca Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 17:16:02 -0700 Subject: [PATCH 50/74] Bug #18005: Creating a trigger on mysql.event leads to server crash on scheduler startup Bug #18361: Triggers on mysql.user table cause server crash Because they do not work, we do not allow creating triggers on tables within the 'mysql' schema. (They may be made to work and re-enabled at some later date, but not in 5.0 or 5.1.) mysql-test/r/trigger.result: Add new results mysql-test/t/trigger.test: Add new regression test for creating triggers on system schema sql/share/errmsg.txt: Add new error message sql/sql_trigger.cc: Disallow creating triggers on tables in the 'mysql' schema --- mysql-test/r/trigger.result | 12 ++++++++++++ mysql-test/t/trigger.test | 24 +++++++++++++++++++++++- sql/share/errmsg.txt | 2 ++ sql/sql_trigger.cc | 9 +++++++++ 4 files changed, 46 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result index d4791c6b117..4fa7a9ca8bd 100644 --- a/mysql-test/r/trigger.result +++ b/mysql-test/r/trigger.result @@ -1078,3 +1078,15 @@ i1 43 51 DROP TABLE t1; +create trigger wont_work after update on mysql.user for each row +begin +set @a:= 1; +end| +ERROR HY000: Triggers can not be created on system tables +use mysql| +create trigger wont_work after update on event for each row +begin +set @a:= 1; +end| +ERROR HY000: Triggers can not be created on system tables +End of 5.0 tests diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test index 3743d8f5c76..6c9b5063f32 100644 --- a/mysql-test/t/trigger.test +++ b/mysql-test/t/trigger.test @@ -1281,4 +1281,26 @@ SELECT * FROM t1; DROP TABLE t1; -# End of 5.0 tests +# +# Bug #18005: Creating a trigger on mysql.event leads to server crash on +# scheduler startup +# +# Bug #18361: Triggers on mysql.user table cause server crash +# +# We don't allow triggers on the mysql schema +delimiter |; +--error ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA +create trigger wont_work after update on mysql.user for each row +begin + set @a:= 1; +end| +# Try when we're already using the mysql schema +use mysql| +--error ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA +create trigger wont_work after update on event for each row +begin + set @a:= 1; +end| +delimiter ;| + +--echo End of 5.0 tests diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index 4e7b9200d88..9b20c37ece2 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -5619,3 +5619,5 @@ ER_NON_GROUPING_FIELD_USED 42000 eng "non-grouping field '%-.64s' is used in %-.64s clause" ER_TABLE_CANT_HANDLE_SPKEYS eng "The used table type doesn't support SPATIAL indexes" +ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA + eng "Triggers can not be created on system tables" diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index f943b014118..74e36de500c 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -183,6 +183,15 @@ bool mysql_create_or_drop_trigger(THD *thd, TABLE_LIST *tables, bool create) !(tables= add_table_for_trigger(thd, thd->lex->spname))) DBUG_RETURN(TRUE); + /* + We don't allow creating triggers on tables in the 'mysql' schema + */ + if (create && !my_strcasecmp(system_charset_info, "mysql", tables->db)) + { + my_error(ER_NO_TRIGGERS_ON_SYSTEM_SCHEMA, MYF(0)); + DBUG_RETURN(TRUE); + } + /* We should have only one table in table list. */ DBUG_ASSERT(tables->next_global == 0); From 39defccfd4ebe99c9c827ef07cdf7514e52f5951 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 10:21:01 +0400 Subject: [PATCH 51/74] Fixing BUG#17719 "Delete of binlog files fails on Windows" and BUG#19208 "Test 'rpl000017' hangs on Windows". Both bugs are caused by attempting to delete an opened file and to create immediatedly a new one with the same name. On Windows it can be supported only on NT-platforms (by using FILE_SHARE_DELETE mode and with renaming the file before deletion). Because deleting not-closed files is not supported on all platforms (e.g. Win 98|ME) this is to be considered harmful and should be eliminated by a "code redesign". VC++Files/mysys/mysys.vcproj: To be sure that __NT__ is defined for Win configurations. Temporary, to be changed in more appropriate way. include/my_sys.h: Adding my_delete_allow_opened to be invoked to delete a (possibly) not closed file on Windows NT-platforms. mysys/my_delete.c: Adding nt_share_delete() function implementing a (possibly) not closed file deletion on Windows NT. sql/log.cc: MYSQL_LOG::reset_logs(): Deleting usually not closed binlog files. --- VC++Files/mysys/mysys.vcproj | 10 +++---- include/my_sys.h | 8 ++++++ mysys/my_delete.c | 51 ++++++++++++++++++++++++++++++++++++ sql/log.cc | 4 +-- 4 files changed, 66 insertions(+), 7 deletions(-) diff --git a/VC++Files/mysys/mysys.vcproj b/VC++Files/mysys/mysys.vcproj index 1053b605119..2c834cab5b2 100644 --- a/VC++Files/mysys/mysys.vcproj +++ b/VC++Files/mysys/mysys.vcproj @@ -22,7 +22,7 @@ Optimization="0" OptimizeForProcessor="2" AdditionalIncludeDirectories="../include,../zlib" - PreprocessorDefinitions="_DEBUG;SAFEMALLOC;SAFE_MUTEX;_WINDOWS;USE_SYMDIR" + PreprocessorDefinitions="__NT__;_DEBUG;SAFEMALLOC;SAFE_MUTEX;_WINDOWS;USE_SYMDIR" RuntimeLibrary="1" PrecompiledHeaderFile=".\debug/mysys.pch" AssemblerListingLocation=".\debug/" @@ -71,7 +71,7 @@ InlineFunctionExpansion="1" OptimizeForProcessor="2" AdditionalIncludeDirectories="../include,../zlib" - PreprocessorDefinitions="USE_SYMDIR;NDEBUG;DBUG_OFF;_WINDOWS" + PreprocessorDefinitions="__NT__;USE_SYMDIR;NDEBUG;DBUG_OFF;_WINDOWS" StringPooling="TRUE" RuntimeLibrary="0" EnableFunctionLevelLinking="TRUE" @@ -121,7 +121,7 @@ InlineFunctionExpansion="1" OptimizeForProcessor="2" AdditionalIncludeDirectories="../include,../zlib" - PreprocessorDefinitions="DBUG_OFF;_WINDOWS;NDEBUG" + PreprocessorDefinitions="__NT__;DBUG_OFF;_WINDOWS;NDEBUG" StringPooling="TRUE" RuntimeLibrary="0" EnableFunctionLevelLinking="TRUE" @@ -170,7 +170,7 @@ Optimization="0" OptimizeForProcessor="2" AdditionalIncludeDirectories="../include,../zlib" - PreprocessorDefinitions="_DEBUG;SAFEMALLOC;SAFE_MUTEX;_WINDOWS;USE_SYMDIR;USE_TLS" + PreprocessorDefinitions="__NT__;_DEBUG;SAFEMALLOC;SAFE_MUTEX;_WINDOWS;USE_SYMDIR;USE_TLS" RuntimeLibrary="1" PrecompiledHeaderFile=".\mysys___Win32_TLS_DEBUG/mysys.pch" AssemblerListingLocation=".\mysys___Win32_TLS_DEBUG/" @@ -219,7 +219,7 @@ InlineFunctionExpansion="1" OptimizeForProcessor="2" AdditionalIncludeDirectories="../include,../zlib" - PreprocessorDefinitions="DBUG_OFF;_WINDOWS;NDEBUG;USE_TLS" + PreprocessorDefinitions="__NT__;DBUG_OFF;_WINDOWS;NDEBUG;USE_TLS" StringPooling="TRUE" RuntimeLibrary="0" EnableFunctionLevelLinking="TRUE" diff --git a/include/my_sys.h b/include/my_sys.h index 229389f1ac5..6457113d282 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -541,6 +541,7 @@ typedef int (*Process_option_func)(void *ctx, const char *group_name, #include + /* Prototypes for mysys and my_func functions */ extern int my_copy(const char *from,const char *to,myf MyFlags); @@ -613,6 +614,13 @@ extern File my_sopen(const char *path, int oflag, int shflag, int pmode); #endif extern int check_if_legal_filename(const char *path); +#if defined(__WIN__) && defined(__NT__) +extern int nt_share_delete(const char *name,myf MyFlags); +#define my_delete_allow_opened(fname,flags) nt_share_delete((fname),(flags)) +#else +#define my_delete_allow_opened(fname,flags) my_delete((fname),(flags)) +#endif + #ifndef TERMINATE extern void TERMINATE(FILE *file); #endif diff --git a/mysys/my_delete.c b/mysys/my_delete.c index 5670f03da64..de2a9814a56 100644 --- a/mysys/my_delete.c +++ b/mysys/my_delete.c @@ -32,3 +32,54 @@ int my_delete(const char *name, myf MyFlags) } DBUG_RETURN(err); } /* my_delete */ + +#if defined(__WIN__) && defined(__NT__) +/* + Delete file which is possibly not closed. + + This function is intended to be used exclusively as a temporal solution + for Win NT in case when it is needed to delete a not closed file (note + that the file must be opened everywhere with FILE_SHARE_DELETE mode). + Deleting not-closed files can not be supported on Win 98|ME (and because + of that is considered harmful). + + The function deletes the file with its preliminary renaming. This is + because when not-closed share-delete file is deleted it still lives on + a disk until it will not be closed everwhere. This may conflict with an + attempt to create a new file with the same name. The deleted file is + renamed to ..deleted where - the initial name of the + file, - a hexadecimal number chosen to make the temporal name to + be unique. +*/ +int nt_share_delete(const char *name, myf MyFlags) +{ + char buf[MAX_PATH + 20]; + ulong cnt; + DBUG_ENTER("nt_share_delete"); + DBUG_PRINT("my",("name %s MyFlags %d", name, MyFlags)); + + for (cnt= GetTickCount(); cnt; cnt--) + { + sprintf(buf, "%s.%08X.deleted", name, cnt); + if (MoveFile(name, buf)) + break; + + if ((errno= GetLastError()) == ERROR_ALREADY_EXISTS) + continue; + + DBUG_PRINT("warning", ("Failed to rename %s to %s, errno: %d", + name, buf, errno)); + break; + } + + if (DeleteFile(buf)) + DBUG_RETURN(0); + + my_errno= GetLastError(); + if (MyFlags & (MY_FAE+MY_WME)) + my_error(EE_DELETE, MYF(ME_BELL + ME_WAITTANG + (MyFlags & ME_NOINPUT)), + name, my_errno); + + DBUG_RETURN(-1); +} +#endif diff --git a/sql/log.cc b/sql/log.cc index ba02c9ba082..baa92f748ab 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -959,14 +959,14 @@ bool MYSQL_LOG::reset_logs(THD* thd) for (;;) { - my_delete(linfo.log_file_name, MYF(MY_WME)); + my_delete_allow_opened(linfo.log_file_name, MYF(MY_WME)); if (find_next_log(&linfo, 0)) break; } /* Start logging with a new file */ close(LOG_CLOSE_INDEX); - my_delete(index_file_name, MYF(MY_WME)); // Reset (open will update) + my_delete_allow_opened(index_file_name, MYF(MY_WME)); // Reset (open will update) if (!thd->slave_thread) need_start_event=1; if (!open_index_file(index_file_name, 0)) From 2ee3ef2edc0e9b199ee09125e44cfeb1e7905a3b Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 27 Jun 2006 23:49:48 -0700 Subject: [PATCH 52/74] BUG #19773 Final-review fixes per Monty, pre-push. OK'd for push. Please see each file's comments. mysql-test/r/federated.result: BUG #19773 Results for multi-table deletes, updates mysql-test/t/federated.test: BUG #19773 Test multi table update and delete. Added drop table to end of previous test. sql/ha_federated.cc: BUG #19773 Post-review changes, per Monty. 3rd patch, OK'd for push. - Added index_read_idx_with_result_set, which uses the result set passed to it - Hash by entire connection scheme - Protected store_result result set for table scan by adding a method result set to index_read_idx and index_read which is passed to index_read_with_result, which in turn iterates over the single record via read_next. This is a change from having two result sets in the first two patches. This keeps the code clean and avoids the need for yet another result set. - Rewrote ::position and ::rnd_pos to store position - if primary key use primary key, if not, use record buffer. - Rewrote get_share to store hash with connect string vs. table name - delete_row added subtration of "records" by affected->rows - Added read_next to handle what rnd_next used to do (converting raw record to query and vice versa) - Removed many DBUG_PRINT lines - Removed memset initialisation since subsequent loop accomplishes - Removed un-necessary mysql_free_result lines sql/ha_federated.h: BUG #19773 Fixed "SET " to " SET " to make sure built statements are built with "UPDATE `t1` SET .." instead of "UPDATE `t1`SET" --- mysql-test/r/federated.result | 88 +++++++ mysql-test/t/federated.test | 51 +++++ sql/ha_federated.cc | 415 ++++++++++++++++++---------------- sql/ha_federated.h | 16 +- 4 files changed, 376 insertions(+), 194 deletions(-) diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result index f11da4ee62f..49974615c68 100644 --- a/mysql-test/r/federated.result +++ b/mysql-test/r/federated.result @@ -1558,6 +1558,8 @@ id 3 4 5 +DROP TABLE federated.t1; +DROP TABLE federated.t1; DROP TABLE IF EXISTS federated.bug_17377_table; CREATE TABLE federated.bug_17377_table ( `fld_cid` bigint(20) NOT NULL auto_increment, @@ -1601,6 +1603,92 @@ fld_cid fld_name fld_parentid fld_delt 5 Torkel 0 0 DROP TABLE federated.t1; DROP TABLE federated.bug_17377_table; +create table federated.t1 (i1 int, i2 int, i3 int); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); +create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t1'; +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 2 2 +3 7 12 +4 5 2 +9 10 15 +select * from federated.t2; +id c1 c2 +9 abc def +5 opq lmn +2 test t t test +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 15 2 +3 7 12 +4 5 2 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +5 opq lmn +9 abc ppc +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +2 15 2 +3 7 12 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +9 abc ppc +drop table federated.t1, federated.t2; +drop table federated.t1, federated.t2; +create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); +create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t1'; +create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 2 2 +3 7 12 +4 5 2 +9 10 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t t test +5 opq lmn +9 abc def +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +1 5 10 +2 15 2 +3 7 12 +4 5 2 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +5 opq lmn +9 abc ppc +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +i1 i2 i3 +2 15 2 +3 7 12 +9 15 15 +select * from federated.t2 order by id; +id c1 c2 +2 test t ppc +9 abc ppc +drop table federated.t1, federated.t2; +drop table federated.t1, federated.t2; DROP TABLE IF EXISTS federated.t1; DROP DATABASE IF EXISTS federated; DROP TABLE IF EXISTS federated.t1; diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test index 80b31c610a2..780008cf13a 100644 --- a/mysql-test/t/federated.test +++ b/mysql-test/t/federated.test @@ -1254,6 +1254,10 @@ SELECT LAST_INSERT_ID(); INSERT INTO federated.t1 VALUES (); SELECT LAST_INSERT_ID(); SELECT * FROM federated.t1; +DROP TABLE federated.t1; + +connection slave; +DROP TABLE federated.t1; # # Bug#17377 Federated Engine returns wrong Data, always the rows @@ -1309,5 +1313,52 @@ DROP TABLE federated.t1; connection slave; DROP TABLE federated.bug_17377_table; +# +# BUG 19773 Crash when using multi-table updates, deletes +# with federated tables +# +connection slave; +create table federated.t1 (i1 int, i2 int, i3 int); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); + +connection master; +eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +select * from federated.t2; +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +drop table federated.t1, federated.t2; +connection slave; +drop table federated.t1, federated.t2; + +# Test multi updates and deletes with keys +connection slave; +create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); + +connection master; +eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; +insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); +insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +update federated.t1,federated.t2 set t1.i2=15, t2.c2="ppc" where t1.i1=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +delete federated.t1.*,federated.t2.* from federated.t1,federated.t2 where t1.i2=t2.id; +select * from federated.t1 order by i1; +select * from federated.t2 order by id; +drop table federated.t1, federated.t2; + +connection slave; +drop table federated.t1, federated.t2; source include/federated_cleanup.inc; diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index c6d5c77803b..291c9d19e36 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -32,13 +32,14 @@ so to read, that data has to be parsed into fields, to write, fields have to be stored in this format to write to this data file. - With MySQL Federated storage engine, there will be no local files for each - table's data (such as .MYD). A foreign database will store the data that would - normally be in this file. This will necessitate the use of MySQL client API - to read, delete, update, insert this data. The data will have to be retrieve - via an SQL call "SELECT * FROM users". Then, to read this data, it will have - to be retrieved via mysql_fetch_row one row at a time, then converted from - the column in this select into the format that the handler expects. + With MySQL Federated storage engine, there will be no local files + for each table's data (such as .MYD). A foreign database will store + the data that would normally be in this file. This will necessitate + the use of MySQL client API to read, delete, update, insert this + data. The data will have to be retrieve via an SQL call "SELECT * + FROM users". Then, to read this data, it will have to be retrieved + via mysql_fetch_row one row at a time, then converted from the + column in this select into the format that the handler expects. The create table will simply create the .frm file, and within the "CREATE TABLE" SQL, there SHALL be any of the following : @@ -395,8 +396,8 @@ handlerton federated_hton= { static byte *federated_get_key(FEDERATED_SHARE *share, uint *length, my_bool not_used __attribute__ ((unused))) { - *length= share->table_name_length; - return (byte*) share->table_name; + *length= share->connect_string_length; + return (byte*) share->scheme; } /* @@ -416,7 +417,7 @@ bool federated_db_init() DBUG_ENTER("federated_db_init"); if (pthread_mutex_init(&federated_mutex, MY_MUTEX_INIT_FAST)) goto error; - if (hash_init(&federated_open_tables, system_charset_info, 32, 0, 0, + if (hash_init(&federated_open_tables, &my_charset_bin, 32, 0, 0, (hash_get_key) federated_get_key, 0, 0)) { VOID(pthread_mutex_destroy(&federated_mutex)); @@ -513,6 +514,7 @@ static int check_foreign_data_source(FEDERATED_SHARE *share, } else { + int escaped_table_name_length= 0; /* Since we do not support transactions at this version, we can let the client API silently reconnect. For future versions, we will need more @@ -531,17 +533,16 @@ static int check_foreign_data_source(FEDERATED_SHARE *share, query.append(FEDERATED_STAR); query.append(FEDERATED_FROM); query.append(FEDERATED_BTICK); - escape_string_for_mysql(&my_charset_bin, (char *)escaped_table_name, + escaped_table_name_length= + escape_string_for_mysql(&my_charset_bin, (char*)escaped_table_name, sizeof(escaped_table_name), share->table_name, share->table_name_length); - query.append(escaped_table_name); + query.append(escaped_table_name, escaped_table_name_length); query.append(FEDERATED_BTICK); query.append(FEDERATED_WHERE); query.append(FEDERATED_FALSE); - DBUG_PRINT("info", ("check_foreign_data_source query %s", - query.c_ptr_quick())); if (mysql_real_query(mysql, query.ptr(), query.length())) { error_code= table_create_flag ? @@ -637,8 +638,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, table->s->connect_string.length, MYF(0)); - // Add a null for later termination of table name - share->scheme[table->s->connect_string.length]= 0; + share->connect_string_length= table->s->connect_string.length; DBUG_PRINT("info",("parse_url alloced share->scheme %lx", share->scheme)); /* @@ -704,7 +704,7 @@ static int parse_url(FEDERATED_SHARE *share, TABLE *table, share->table_name++; share->table_name_length= strlen(share->table_name); - + /* make sure there's not an extra / */ if ((strchr(share->table_name, '/'))) goto error; @@ -740,8 +740,7 @@ error: ha_federated::ha_federated(TABLE *table_arg) :handler(&federated_hton, table_arg), - mysql(0), stored_result(0), - ref_length(sizeof(MYSQL_ROW_OFFSET)), current_position(0) + mysql(0), stored_result(0) {} @@ -752,6 +751,7 @@ ha_federated::ha_federated(TABLE *table_arg) convert_row_to_internal_format() record Byte pointer to record row MySQL result set row from fetchrow() + result Result set to use DESCRIPTION This method simply iterates through a row returned via fetchrow with @@ -764,14 +764,15 @@ ha_federated::ha_federated(TABLE *table_arg) 0 After fields have had field values stored from record */ -uint ha_federated::convert_row_to_internal_format(byte *record, MYSQL_ROW row) +uint ha_federated::convert_row_to_internal_format(byte *record, + MYSQL_ROW row, + MYSQL_RES *result) { ulong *lengths; Field **field; DBUG_ENTER("ha_federated::convert_row_to_internal_format"); - lengths= mysql_fetch_lengths(stored_result); - memset(record, 0, table->s->null_bytes); + lengths= mysql_fetch_lengths(result); for (field= table->field; *field; field++) { @@ -1299,12 +1300,11 @@ next_loop: static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) { - char *select_query, *tmp_table_name; + char *select_query; char query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - uint tmp_table_name_length; Field **field; String query(query_buffer, sizeof(query_buffer), &my_charset_bin); - FEDERATED_SHARE *share; + FEDERATED_SHARE *share= NULL, tmp_share; /* In order to use this string, we must first zero it's length, or it will contain garbage @@ -1312,12 +1312,15 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) query.length(0); pthread_mutex_lock(&federated_mutex); - tmp_table_name= (char *)table->s->table_name; - tmp_table_name_length= (uint) strlen(tmp_table_name); + if (parse_url(&tmp_share, table, 0)) + goto error; + + /* TODO: change tmp_share.scheme to LEX_STRING object */ if (!(share= (FEDERATED_SHARE *) hash_search(&federated_open_tables, - (byte*) table_name, - strlen(table_name)))) + (byte*) tmp_share.scheme, + tmp_share. + connect_string_length))) { query.set_charset(system_charset_info); query.append(FEDERATED_SELECT); @@ -1335,24 +1338,20 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) if (!(share= (FEDERATED_SHARE *) my_multi_malloc(MYF(MY_WME), &share, sizeof(*share), - &tmp_table_name, tmp_table_name_length+ 1, &select_query, query.length()+table->s->connect_string.length+1, NullS))) - { - pthread_mutex_unlock(&federated_mutex); - return NULL; - } - - if (parse_url(share, table, 0)) goto error; + memcpy(share, &tmp_share, sizeof(tmp_share)); + + share->table_name_length= strlen(share->table_name); + /* TODO: share->table_name to LEX_STRING object */ query.append(share->table_name, share->table_name_length); query.append(FEDERATED_BTICK); share->select_query= select_query; strmov(share->select_query, query.ptr()); share->use_count= 0; - share->table_name_length= strlen(share->table_name); DBUG_PRINT("info", ("share->select_query %s", share->select_query)); @@ -1368,11 +1367,8 @@ static FEDERATED_SHARE *get_share(const char *table_name, TABLE *table) error: pthread_mutex_unlock(&federated_mutex); - if (share->scheme) - { - my_free((gptr) share->scheme, MYF(0)); - share->scheme= 0; - } + my_free((gptr) tmp_share.scheme, MYF(MY_ALLOW_ZERO_PTR)); + my_free((gptr) share, MYF(MY_ALLOW_ZERO_PTR)); return NULL; } @@ -1392,13 +1388,7 @@ static int free_share(FEDERATED_SHARE *share) { hash_delete(&federated_open_tables, (byte*) share); my_free((gptr) share->scheme, MYF(MY_ALLOW_ZERO_PTR)); - share->scheme= 0; - if (share->socket) - { - my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR)); - share->socket= 0; - } - + my_free((gptr) share->socket, MYF(MY_ALLOW_ZERO_PTR)); thr_lock_delete(&share->lock); VOID(pthread_mutex_destroy(&share->mutex)); my_free((gptr) share, MYF(0)); @@ -1460,22 +1450,29 @@ int ha_federated::open(const char *name, int mode, uint test_if_locked) /* Connect to foreign database mysql_real_connect() */ mysql= mysql_init(0); - if (!mysql_real_connect(mysql, - share->hostname, - share->username, - share->password, - share->database, - share->port, - share->socket, 0)) + if (!mysql || !mysql_real_connect(mysql, + share->hostname, + share->username, + share->password, + share->database, + share->port, + share->socket, 0)) { + free_share(share); DBUG_RETURN(stash_remote_error()); } /* Since we do not support transactions at this version, we can let the client - API silently reconnect. For future versions, we will need more logic to deal - with transactions + API silently reconnect. For future versions, we will need more logic to + deal with transactions */ mysql->reconnect= 1; + + ref_length= (table->s->primary_key != MAX_KEY ? + table->key_info[table->s->primary_key].key_length : + table->s->reclength); + DBUG_PRINT("info", ("ref_length: %u", ref_length)); + DBUG_RETURN(0); } @@ -1499,13 +1496,12 @@ int ha_federated::close(void) /* free the result set */ if (stored_result) { - DBUG_PRINT("info", - ("mysql_free_result result at address %lx", stored_result)); mysql_free_result(stored_result); stored_result= 0; } /* Disconnect from mysql */ - mysql_close(mysql); + if (mysql) // QQ is this really needed + mysql_close(mysql); retval= free_share(share); DBUG_RETURN(retval); @@ -1695,15 +1691,13 @@ int ha_federated::write_row(byte *buf) /* add the values */ insert_string.append(values_string); - DBUG_PRINT("info", ("insert query %s", insert_string.c_ptr_quick())); - if (mysql_real_query(mysql, insert_string.ptr(), insert_string.length())) { DBUG_RETURN(stash_remote_error()); } /* - If the table we've just written a record to contains an auto_increment field, - then store the last_insert_id() value from the foreign server + If the table we've just written a record to contains an auto_increment + field, then store the last_insert_id() value from the foreign server */ if (table->next_number_field) update_auto_increment(); @@ -1772,7 +1766,7 @@ int ha_federated::repair(THD* thd, HA_CHECK_OPT* check_opt) query.append(FEDERATED_EXTENDED); if (check_opt->sql_flags & TT_USEFRM) query.append(FEDERATED_USE_FRM); - + if (mysql_real_query(mysql, query.ptr(), query.length())) { DBUG_RETURN(stash_remote_error()); @@ -1924,7 +1918,7 @@ int ha_federated::update_row(const byte *old_data, byte *new_data) /* This will delete a row. 'buf' will contain a copy of the row to be =deleted. The server will call this right after the current row has been called (from - either a previous rnd_nexT() or index call). + either a previous rnd_next() or index call). If you keep a pointer to the last row or can access a primary key it will make doing the deletion quite a bit easier. Keep in mind that the server does no guarentee consecutive deletions. @@ -1984,6 +1978,7 @@ int ha_federated::delete_row(const byte *buf) DBUG_RETURN(stash_remote_error()); } deleted+= mysql->affected_rows; + records-= mysql->affected_rows; DBUG_PRINT("info", ("rows deleted %d rows deleted for all time %d", int(mysql->affected_rows), deleted)); @@ -2000,12 +1995,15 @@ int ha_federated::delete_row(const byte *buf) */ int ha_federated::index_read(byte *buf, const byte *key, - uint key_len, enum ha_rkey_function find_flag) + uint key_len, ha_rkey_function find_flag) { - int retval; DBUG_ENTER("ha_federated::index_read"); - retval= index_read_idx(buf, active_index, key, key_len, find_flag); - DBUG_RETURN(retval); + + if (stored_result) + mysql_free_result(stored_result); + DBUG_RETURN(index_read_idx_with_result_set(buf, active_index, key, + key_len, find_flag, + &stored_result)); } @@ -2014,26 +2012,60 @@ int ha_federated::index_read(byte *buf, const byte *key, row if any. This is only used to read whole keys. This method is called via index_read in the case of a WHERE clause using - a regular non-primary key index, OR is called DIRECTLY when the WHERE clause + a primary key index OR is called DIRECTLY when the WHERE clause uses a PRIMARY KEY index. + + NOTES + This uses an internal result set that is deleted before function + returns. We need to be able to be calable from ha_rnd_pos() */ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, uint key_len, enum ha_rkey_function find_flag) +{ + int retval; + MYSQL_RES *mysql_result; + DBUG_ENTER("ha_federated::index_read_idx"); + + if ((retval= index_read_idx_with_result_set(buf, index, key, + key_len, find_flag, + &mysql_result))) + DBUG_RETURN(retval); + mysql_free_result(mysql_result); + DBUG_RETURN(retval); +} + + +/* + Create result set for rows matching query and return first row + + RESULT + 0 ok In this case *result will contain the result set + table->status == 0 + # error In this case *result will contain 0 + table->status == STATUS_NOT_FOUND +*/ + +int ha_federated::index_read_idx_with_result_set(byte *buf, uint index, + const byte *key, + uint key_len, + ha_rkey_function find_flag, + MYSQL_RES **result) { int retval; char error_buffer[FEDERATED_QUERY_BUFFER_SIZE]; char index_value[STRING_BUFFER_USUAL_SIZE]; char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; - String index_string(index_value, + String index_string(index_value, sizeof(index_value), &my_charset_bin); String sql_query(sql_query_buffer, sizeof(sql_query_buffer), &my_charset_bin); key_range range; - DBUG_ENTER("ha_federated::index_read_idx"); + DBUG_ENTER("ha_federated::index_read_idx_with_result_set"); + *result= 0; // In case of errors index_string.length(0); sql_query.length(0); statistic_increment(table->in_use->status_var.ha_read_key_count, @@ -2050,20 +2082,6 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, NULL, 0); sql_query.append(index_string); - DBUG_PRINT("info", - ("current key %d key value %s index_string value %s length %d", - index, (char*) key, index_string.c_ptr_quick(), - index_string.length())); - - DBUG_PRINT("info", - ("current position %d sql_query %s", current_position, - sql_query.c_ptr_quick())); - - if (stored_result) - { - mysql_free_result(stored_result); - stored_result= 0; - } if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) { my_sprintf(error_buffer, (error_buffer, "error: %d '%s'", @@ -2071,53 +2089,44 @@ int ha_federated::index_read_idx(byte *buf, uint index, const byte *key, retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE; goto error; } - stored_result= mysql_store_result(mysql); - - if (!stored_result) + if (!(*result= mysql_store_result(mysql))) { retval= HA_ERR_END_OF_FILE; goto error; } - /* - This basically says that the record in table->record[0] is legal, - and that it is ok to use this record, for whatever reason, such - as with a join (without it, joins will not work) - */ - table->status= 0; + if (!(retval= read_next(buf, *result))) + DBUG_RETURN(retval); - retval= rnd_next(buf); + mysql_free_result(*result); + *result= 0; + table->status= STATUS_NOT_FOUND; DBUG_RETURN(retval); error: - if (stored_result) - { - mysql_free_result(stored_result); - stored_result= 0; - } table->status= STATUS_NOT_FOUND; my_error(retval, MYF(0), error_buffer); DBUG_RETURN(retval); } + /* Initialized at each key walk (called multiple times unlike rnd_init()) */ + int ha_federated::index_init(uint keynr) { DBUG_ENTER("ha_federated::index_init"); - DBUG_PRINT("info", - ("table: '%s' key: %d", table->s->table_name, keynr)); + DBUG_PRINT("info", ("table: '%s' key: %u", table->s->table_name, keynr)); active_index= keynr; DBUG_RETURN(0); } -/* - int read_range_first(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted); +/* + Read first range */ + int ha_federated::read_range_first(const key_range *start_key, - const key_range *end_key, - bool eq_range, bool sorted) + const key_range *end_key, + bool eq_range, bool sorted) { char sql_query_buffer[FEDERATED_QUERY_BUFFER_SIZE]; int retval; @@ -2126,8 +2135,7 @@ int ha_federated::read_range_first(const key_range *start_key, &my_charset_bin); DBUG_ENTER("ha_federated::read_range_first"); - if (start_key == NULL && end_key == NULL) - DBUG_RETURN(0); + DBUG_ASSERT(!(start_key == NULL && end_key == NULL)); sql_query.length(0); sql_query.append(share->select_query); @@ -2135,6 +2143,11 @@ int ha_federated::read_range_first(const key_range *start_key, &table->key_info[active_index], start_key, end_key, 0); + if (stored_result) + { + mysql_free_result(stored_result); + stored_result= 0; + } if (mysql_real_query(mysql, sql_query.ptr(), sql_query.length())) { retval= ER_QUERY_ON_FOREIGN_DATA_SOURCE; @@ -2142,38 +2155,21 @@ int ha_federated::read_range_first(const key_range *start_key, } sql_query.length(0); - if (stored_result) - { - DBUG_PRINT("info", - ("mysql_free_result address %lx", stored_result)); - mysql_free_result(stored_result); - stored_result= 0; - } - stored_result= mysql_store_result(mysql); - - if (!stored_result) + if (!(stored_result= mysql_store_result(mysql))) { retval= HA_ERR_END_OF_FILE; goto error; } - - /* This was successful, please let it be known! */ - table->status= 0; - retval= rnd_next(table->record[0]); + retval= read_next(table->record[0], stored_result); DBUG_RETURN(retval); error: - table->status= STATUS_NOT_FOUND; - if (stored_result) - { - DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result)); - mysql_free_result(stored_result); - stored_result= 0; - } - DBUG_RETURN(retval); + table->status= STATUS_NOT_FOUND; + DBUG_RETURN(retval); } + int ha_federated::read_range_next() { int retval; @@ -2186,13 +2182,13 @@ int ha_federated::read_range_next() /* Used to read forward through the index. */ int ha_federated::index_next(byte *buf) { - int retval; DBUG_ENTER("ha_federated::index_next"); statistic_increment(table->in_use->status_var.ha_read_next_count, &LOCK_status); - retval= rnd_next(buf); - DBUG_RETURN(retval); + DBUG_RETURN(read_next(buf, stored_result)); } + + /* rnd_init() is called when the system wants the storage engine to do a table scan. @@ -2246,11 +2242,8 @@ int ha_federated::rnd_init(bool scan) if (scan) { - DBUG_PRINT("info", ("share->select_query %s", share->select_query)); if (stored_result) { - DBUG_PRINT("info", - ("mysql_free_result address %lx", stored_result)); mysql_free_result(stored_result); stored_result= 0; } @@ -2267,27 +2260,25 @@ int ha_federated::rnd_init(bool scan) DBUG_RETURN(0); error: - DBUG_RETURN(stash_remote_error()); + DBUG_RETURN(stash_remote_error()); } + int ha_federated::rnd_end() { - int retval; DBUG_ENTER("ha_federated::rnd_end"); - - if (stored_result) - { - DBUG_PRINT("info", ("mysql_free_result address %lx", stored_result)); - mysql_free_result(stored_result); - stored_result= 0; - } - retval= index_end(); - DBUG_RETURN(retval); + DBUG_RETURN(index_end()); } + int ha_federated::index_end(void) { DBUG_ENTER("ha_federated::index_end"); + if (stored_result) + { + mysql_free_result(stored_result); + stored_result= 0; + } active_index= MAX_KEY; DBUG_RETURN(0); } @@ -2304,8 +2295,6 @@ int ha_federated::index_end(void) int ha_federated::rnd_next(byte *buf) { - int retval; - MYSQL_ROW row; DBUG_ENTER("ha_federated::rnd_next"); if (stored_result == 0) @@ -2313,32 +2302,60 @@ int ha_federated::rnd_next(byte *buf) /* Return value of rnd_init is not always checked (see records.cc), so we can get here _even_ if there is _no_ pre-fetched result-set! - TODO: fix it. - */ + TODO: fix it. We can delete this in 5.1 when rnd_init() is checked. + */ DBUG_RETURN(1); } - + DBUG_RETURN(read_next(buf, stored_result)); +} + + +/* + ha_federated::read_next + + reads from a result set and converts to mysql internal + format + + SYNOPSIS + field_in_record_is_null() + buf byte pointer to record + result mysql result set + + DESCRIPTION + This method is a wrapper method that reads one record from a result + set and converts it to the internal table format + + RETURN VALUE + 1 error + 0 no error +*/ + +int ha_federated::read_next(byte *buf, MYSQL_RES *result) +{ + int retval; + my_ulonglong num_rows; + MYSQL_ROW row; + DBUG_ENTER("ha_federated::read_next"); + + table->status= STATUS_NOT_FOUND; // For easier return + /* Fetch a row, insert it back in a row format. */ - current_position= stored_result->data_cursor; - DBUG_PRINT("info", ("current position %d", current_position)); - if (!(row= mysql_fetch_row(stored_result))) + if (!(row= mysql_fetch_row(result))) DBUG_RETURN(HA_ERR_END_OF_FILE); - retval= convert_row_to_internal_format(buf, row); + if (!(retval= convert_row_to_internal_format(buf, row, result))) + table->status= 0; + DBUG_RETURN(retval); } /* - 'position()' is called after each call to rnd_next() if the data needs to be - ordered. You can do something like the following to store the position: - my_store_ptr(ref, ref_length, current_position); + store reference to current row so that we can later find it for + a re-read, update or delete. - The server uses ref to store data. ref_length in the above case is the size - needed to store current_position. ref is just a byte array that the server - will maintain. If you are using offsets to mark rows, then current_position - should be the offset. If it is a primary key like in BDB, then it needs to - be a primary key. + In case of federated, a reference is either a primary key or + the whole record. Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc. */ @@ -2346,32 +2363,44 @@ int ha_federated::rnd_next(byte *buf) void ha_federated::position(const byte *record) { DBUG_ENTER("ha_federated::position"); - /* my_store_ptr Add seek storage */ - *(MYSQL_ROW_OFFSET *) ref= current_position; // ref is always aligned + if (table->s->primary_key != MAX_KEY) + key_copy(ref, (byte *)record, table->key_info + table->s->primary_key, + ref_length); + else + memcpy(ref, record, ref_length); DBUG_VOID_RETURN; } /* This is like rnd_next, but you are given a position to use to determine the - row. The position will be of the type that you stored in ref. You can use - ha_get_ptr(pos,ref_length) to retrieve whatever key or position you saved - when position() was called. + row. The position will be of the type that you stored in ref. - This method is required for an ORDER BY. + This method is required for an ORDER BY Called from filesort.cc records.cc sql_insert.cc sql_select.cc sql_update.cc. */ + int ha_federated::rnd_pos(byte *buf, byte *pos) { + int result; DBUG_ENTER("ha_federated::rnd_pos"); - statistic_increment(table->in_use->status_var.ha_read_rnd_count, &LOCK_status); - memcpy_fixed(¤t_position, pos, sizeof(MYSQL_ROW_OFFSET)); - stored_result->current_row= 0; - stored_result->data_cursor= current_position; - DBUG_RETURN(rnd_next(buf)); + if (table->s->primary_key != MAX_KEY) + { + /* We have a primary key, so use index_read_idx to find row */ + result= index_read_idx(buf, table->s->primary_key, pos, + ref_length, HA_READ_KEY_EXACT); + } + else + { + /* otherwise, get the old record ref as obtained in ::position */ + memcpy(buf, pos, ref_length); + result= 0; + } + table->status= result ? STATUS_NOT_FOUND : 0; + DBUG_RETURN(result); } @@ -2476,18 +2505,22 @@ void ha_federated::info(uint flag) delete_length = ? */ if (row[4] != NULL) - records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error); - if (row[5] != NULL) - mean_rec_length= (ha_rows) my_strtoll10(row[5], (char**) 0, &error); + records= (ha_rows) my_strtoll10(row[4], (char**) 0, &error); + + mean_rec_length= table->s->reclength; + data_file_length= records * mean_rec_length; + if (row[12] != NULL) - update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error); + update_time= (ha_rows) my_strtoll10(row[12], (char**) 0, &error); if (row[13] != NULL) - check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error); - } - if (flag & HA_STATUS_CONST) - { - block_size= 4096; + check_time= (ha_rows) my_strtoll10(row[13], (char**) 0, &error); } + + /* + size of IO operations (This is based on a good guess, no high science + involved) + */ + block_size= 4096; } if (result) @@ -2498,6 +2531,7 @@ void ha_federated::info(uint flag) error: if (result) mysql_free_result(result); + my_sprintf(error_buffer, (error_buffer, ": %d : %s", mysql_errno(mysql), mysql_error(mysql))); my_error(error_code, MYF(0), error_buffer); @@ -2578,6 +2612,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type) { + DBUG_ENTER("ha_federated::store_lock"); if (lock_type != TL_IGNORE && lock.type == TL_UNLOCK) { /* @@ -2607,7 +2642,7 @@ THR_LOCK_DATA **ha_federated::store_lock(THD *thd, *to++= &lock; - return to; + DBUG_RETURN(to); } /* diff --git a/sql/ha_federated.h b/sql/ha_federated.h index cafd1fe59a5..09c670c9bac 100644 --- a/sql/ha_federated.h +++ b/sql/ha_federated.h @@ -78,7 +78,7 @@ #define FEDERATED_VALUES_LEN sizeof(FEDERATED_VALUES) #define FEDERATED_UPDATE "UPDATE " #define FEDERATED_UPDATE_LEN sizeof(FEDERATED_UPDATE) -#define FEDERATED_SET "SET " +#define FEDERATED_SET " SET " #define FEDERATED_SET_LEN sizeof(FEDERATED_SET) #define FEDERATED_AND " AND " #define FEDERATED_AND_LEN sizeof(FEDERATED_AND) @@ -130,6 +130,7 @@ typedef struct st_federated_share { remote host info, parse_url supplies */ char *scheme; + char *connect_string; char *hostname; char *username; char *password; @@ -139,7 +140,7 @@ typedef struct st_federated_share { char *socket; char *sport; ushort port; - uint table_name_length, use_count; + uint table_name_length, connect_string_length, use_count; pthread_mutex_t mutex; THR_LOCK lock; } FEDERATED_SHARE; @@ -153,7 +154,6 @@ class ha_federated: public handler FEDERATED_SHARE *share; /* Shared lock info */ MYSQL *mysql; /* MySQL connection */ MYSQL_RES *stored_result; - uint ref_length; uint fetch_num; // stores the fetch num MYSQL_ROW_OFFSET current_position; // Current position used by ::position() int remote_error_number; @@ -164,7 +164,8 @@ private: return 0 on success return errorcode otherwise */ - uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row); + uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row, + MYSQL_RES *row); bool create_where_from_key(String *to, KEY *key_info, const key_range *start_key, const key_range *end_key, @@ -298,6 +299,13 @@ public: THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); //required virtual bool get_error_message(int error, String *buf); + + int read_next(byte *buf, MYSQL_RES *result); + int index_read_idx_with_result_set(byte *buf, uint index, + const byte *key, + uint key_len, + ha_rkey_function find_flag, + MYSQL_RES **result); }; bool federated_db_init(void); From cd3dedc98125669f0624b53f03dcef638e96ac83 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 14:30:13 +0400 Subject: [PATCH 53/74] Adding __NT__ to Max Win32 configuration. --- VC++Files/sql/mysqld.vcproj | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VC++Files/sql/mysqld.vcproj b/VC++Files/sql/mysqld.vcproj index 3f20cffec0a..c9675f3fd8a 100644 --- a/VC++Files/sql/mysqld.vcproj +++ b/VC++Files/sql/mysqld.vcproj @@ -85,7 +85,7 @@ InlineFunctionExpansion="1" OptimizeForProcessor="2" AdditionalIncludeDirectories="../bdb/build_win32,../include,../regex,../extra/yassl/include,../zlib" - PreprocessorDefinitions="NDEBUG;DBUG_OFF;USE_SYMDIR;HAVE_INNOBASE_DB;HAVE_BERKELEY_DB;HAVE_ARCHIVE_DB;HAVE_BLACKHOLE_DB;HAVE_EXAMPLE_DB;HAVE_FEDERATED_DB;MYSQL_SERVER;_WINDOWS;_CONSOLE;HAVE_DLOPEN" + PreprocessorDefinitions="__NT__;NDEBUG;DBUG_OFF;USE_SYMDIR;HAVE_INNOBASE_DB;HAVE_BERKELEY_DB;HAVE_ARCHIVE_DB;HAVE_BLACKHOLE_DB;HAVE_EXAMPLE_DB;HAVE_FEDERATED_DB;MYSQL_SERVER;_WINDOWS;_CONSOLE;HAVE_DLOPEN" StringPooling="TRUE" RuntimeLibrary="0" EnableFunctionLevelLinking="TRUE" From f659c1b7402ebe991e17860c27bdd141bfaa7361 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 12:30:14 +0200 Subject: [PATCH 54/74] BUG#20739: __NT__ not probably defined for mysys project. Make sure for the mysys project that __NT__ is defined in *nt solution configurations (but not in other configurations). VC++Files/mysql.sln: Define __NT__ in mysys for *nt configurations. VC++Files/mysys/mysys.vcproj: Add configurations with __NT__ defined. mysql-test/mysql-test-run.pl: Also allow testing a "Max nt" build. --- VC++Files/mysql.sln | 20 +++---- VC++Files/mysys/mysys.vcproj | 104 ++++++++++++++++++++++++++++++++++- mysql-test/mysql-test-run.pl | 3 +- 3 files changed, 114 insertions(+), 13 deletions(-) diff --git a/VC++Files/mysql.sln b/VC++Files/mysql.sln index 3e3e4c67e17..a8c659ad71e 100644 --- a/VC++Files/mysql.sln +++ b/VC++Files/mysql.sln @@ -1110,8 +1110,8 @@ Global {DB28DE80-837F-4497-9AA9-CC0A20584C98}.Release.Build.0 = Release|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic.ActiveCfg = TLS|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic.Build.0 = TLS|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.ActiveCfg = Release|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.Build.0 = Release|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.ActiveCfg = Release nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.Build.0 = Release nt|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Debug.ActiveCfg = Debug|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Debug.Build.0 = Debug|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Embedded_Classic.ActiveCfg = TLS|Win32 @@ -1126,18 +1126,18 @@ Global {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Embedded_Release.Build.0 = TLS|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max.ActiveCfg = Max|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max.Build.0 = Max|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max nt.ActiveCfg = Max|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max nt.Build.0 = Max|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.ActiveCfg = Release|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.Build.0 = Release|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max nt.ActiveCfg = Max nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max nt.Build.0 = Max nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.ActiveCfg = Release nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.Build.0 = Release nt|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro.ActiveCfg = Release|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro.Build.0 = Release|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl.ActiveCfg = Release|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl.Build.0 = Release|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.ActiveCfg = Release|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.Build.0 = Release|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.ActiveCfg = Release|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.Build.0 = Release|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.ActiveCfg = Release nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.Build.0 = Release nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.ActiveCfg = Release nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.Build.0 = Release nt|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Release.ActiveCfg = Release|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Release.Build.0 = Release|Win32 {AC47623D-933C-4A80-83BB-B6AF7CB28B4B}.classic.ActiveCfg = classic|Win32 diff --git a/VC++Files/mysys/mysys.vcproj b/VC++Files/mysys/mysys.vcproj index 2c834cab5b2..828fd8ec213 100644 --- a/VC++Files/mysys/mysys.vcproj +++ b/VC++Files/mysys/mysys.vcproj @@ -65,6 +65,56 @@ ConfigurationType="4" UseOfMFC="0" ATLMinimizesCRunTimeLibraryUsage="FALSE"> + + + + + + + + + + + + + + + + + + + + + + + + + + + + Date: Wed, 28 Jun 2006 12:40:17 +0200 Subject: [PATCH 55/74] Bug#19857: When a user with CREATE ROUTINE priv creates a routine it results in NULL p/w sp_grant_privileges(), the function that GRANTs EXECUTE + ALTER privs on a SP, did so creating a user-entry with not password; mysql_routine_grant() would then write that "change" to the user-table. mysql-test/r/sp-security.result: prove that creating a stored procedure will not destroy the creator's password mysql-test/t/sp-security.test: prove that creating a stored procedure will not destroy the creator's password sql/sql_acl.cc: get password from ACLs, convert to correct format, and use it when forcing GRANTS for SPs --- mysql-test/r/sp-security.result | 31 +++++++++++++++++ mysql-test/t/sp-security.test | 46 +++++++++++++++++++++++++ sql/sql_acl.cc | 59 ++++++++++++++++++++++++--------- 3 files changed, 121 insertions(+), 15 deletions(-) diff --git a/mysql-test/r/sp-security.result b/mysql-test/r/sp-security.result index 04f2f58ba37..a53b4c4d246 100644 --- a/mysql-test/r/sp-security.result +++ b/mysql-test/r/sp-security.result @@ -420,3 +420,34 @@ ERROR HY000: There is no 'mysqltest_1'@'localhost' registered ---> connection: root DROP USER mysqltest_2@localhost; DROP DATABASE mysqltest; +GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO +user19857@localhost; +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; +Host User Password +localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C + +---> connection: mysqltest_2_con +use test; +CREATE PROCEDURE sp19857() DETERMINISTIC +BEGIN +DECLARE a INT; +SET a=1; +SELECT a; +END // +SHOW CREATE PROCEDURE test.sp19857; +Procedure sql_mode Create Procedure +sp19857 CREATE DEFINER=`user19857`@`localhost` PROCEDURE `sp19857`() + DETERMINISTIC +BEGIN +DECLARE a INT; +SET a=1; +SELECT a; +END +DROP PROCEDURE IF EXISTS test.sp19857; + +---> connection: root +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; +Host User Password +localhost user19857 *82DC221D557298F6CE9961037DB1C90604792F5C +DROP USER user19857@localhost; diff --git a/mysql-test/t/sp-security.test b/mysql-test/t/sp-security.test index a8c3c0a22eb..d323b180216 100644 --- a/mysql-test/t/sp-security.test +++ b/mysql-test/t/sp-security.test @@ -744,4 +744,50 @@ DROP USER mysqltest_2@localhost; DROP DATABASE mysqltest; +# +# Bug#19857 - When a user with CREATE ROUTINE priv creates a routine, +# it results in NULL p/w +# + +# Can't test with embedded server that doesn't support grants + +GRANT USAGE ON *.* TO user19857@localhost IDENTIFIED BY 'meow'; +GRANT SELECT, INSERT, UPDATE, DELETE, CREATE ROUTINE, ALTER ROUTINE ON test.* TO +user19857@localhost; +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; + +--connect (mysqltest_2_con,localhost,user19857,meow,test) +--echo +--echo ---> connection: mysqltest_2_con +--connection mysqltest_2_con + +use test; + +DELIMITER //; + CREATE PROCEDURE sp19857() DETERMINISTIC + BEGIN + DECLARE a INT; + SET a=1; + SELECT a; + END // +DELIMITER ;// + +SHOW CREATE PROCEDURE test.sp19857; + +--disconnect mysqltest_2_con +--connect (mysqltest_2_con,localhost,user19857,meow,test) +--connection mysqltest_2_con + +DROP PROCEDURE IF EXISTS test.sp19857; + +--echo +--echo ---> connection: root +--connection con1root + +--disconnect mysqltest_2_con + +SELECT Host,User,Password FROM mysql.user WHERE User='user19857'; + +DROP USER user19857@localhost; + # End of 5.0 bugs. diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 8b235d26d37..124d3566b19 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -5601,25 +5601,30 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name, TABLE_LIST tables[1]; List user_list; bool result; + ACL_USER *au; + char passwd_buff[SCRAMBLED_PASSWORD_CHAR_LENGTH+1]; DBUG_ENTER("sp_grant_privileges"); if (!(combo=(LEX_USER*) thd->alloc(sizeof(st_lex_user)))) DBUG_RETURN(TRUE); combo->user.str= sctx->user; - + VOID(pthread_mutex_lock(&acl_cache->lock)); - if (!find_acl_user(combo->host.str=(char*)sctx->host_or_ip, combo->user.str, - FALSE) && - !find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str, - FALSE) && - !find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str, - FALSE) && - !find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE)) - { - VOID(pthread_mutex_unlock(&acl_cache->lock)); - DBUG_RETURN(TRUE); - } + + if ((au= find_acl_user(combo->host.str=(char*)sctx->host_or_ip,combo->user.str,FALSE))) + goto found_acl; + if ((au= find_acl_user(combo->host.str=(char*)sctx->host, combo->user.str,FALSE))) + goto found_acl; + if ((au= find_acl_user(combo->host.str=(char*)sctx->ip, combo->user.str,FALSE))) + goto found_acl; + if((au= find_acl_user(combo->host.str=(char*)"%", combo->user.str, FALSE))) + goto found_acl; + + VOID(pthread_mutex_unlock(&acl_cache->lock)); + DBUG_RETURN(TRUE); + + found_acl: VOID(pthread_mutex_unlock(&acl_cache->lock)); bzero((char*)tables, sizeof(TABLE_LIST)); @@ -5627,13 +5632,37 @@ bool sp_grant_privileges(THD *thd, const char *sp_db, const char *sp_name, tables->db= (char*)sp_db; tables->table_name= tables->alias= (char*)sp_name; - + combo->host.length= strlen(combo->host.str); combo->user.length= strlen(combo->user.str); combo->host.str= thd->strmake(combo->host.str,combo->host.length); combo->user.str= thd->strmake(combo->user.str,combo->user.length); - combo->password.str= (char*)""; - combo->password.length= 0; + + + if(au && au->salt_len) + { + if (au->salt_len == SCRAMBLE_LENGTH) + { + make_password_from_salt(passwd_buff, au->salt); + combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH; + } + else if (au->salt_len == SCRAMBLE_LENGTH_323) + { + make_password_from_salt_323(passwd_buff, (ulong *) au->salt); + combo->password.length= SCRAMBLED_PASSWORD_CHAR_LENGTH_323; + } + else + { + my_error(ER_PASSWD_LENGTH, MYF(0), SCRAMBLED_PASSWORD_CHAR_LENGTH); + return -1; + } + combo->password.str= passwd_buff; + } + else + { + combo->password.str= (char*)""; + combo->password.length= 0; + } if (user_list.push_back(combo)) DBUG_RETURN(TRUE); From a0837ecec49cbd258cb9f0a3cd83ecef90355179 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 21:52:24 +1000 Subject: [PATCH 56/74] BUG#19894 Data nodes fail during loading data if NoOfFragmentLogFiles=1 change default minimum to 3 bug is *very* timing dependent, unable to reproduce here, but theoretically possible. ndb/src/mgmsrv/ConfigInfo.cpp: change minimum NoOfFragmentLogFiles to 3 --- ndb/src/mgmsrv/ConfigInfo.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index 66a400a3e22..3b257e2da48 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -857,7 +857,7 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { false, ConfigInfo::CI_INT, "8", - "1", + "3", STR_VALUE(MAX_INT_RNIL) }, { From ffaacf0dd3caea93fdc3af12954dcdf05c1897ac Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 14:25:17 +0200 Subject: [PATCH 57/74] BUG#20739 Improved definition of mysys configuration for -nt builds. VC++Files/mysql.sln: Use the name 'nt' instead of 'Release' for configuration. VC++Files/mysys/mysys.vcproj: Use the name 'nt' instead of 'Release' for configuration. Use separate output files for NT and non-NT configurations. --- VC++Files/mysql.sln | 16 ++++++++-------- VC++Files/mysys/mysys.vcproj | 30 +++++++++++++++--------------- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/VC++Files/mysql.sln b/VC++Files/mysql.sln index a8c659ad71e..943b38c1f8e 100644 --- a/VC++Files/mysql.sln +++ b/VC++Files/mysql.sln @@ -1110,8 +1110,8 @@ Global {DB28DE80-837F-4497-9AA9-CC0A20584C98}.Release.Build.0 = Release|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic.ActiveCfg = TLS|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic.Build.0 = TLS|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.ActiveCfg = Release nt|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.Build.0 = Release nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.ActiveCfg = nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.classic nt.Build.0 = nt|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Debug.ActiveCfg = Debug|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Debug.Build.0 = Debug|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Embedded_Classic.ActiveCfg = TLS|Win32 @@ -1128,16 +1128,16 @@ Global {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max.Build.0 = Max|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max nt.ActiveCfg = Max nt|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Max nt.Build.0 = Max nt|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.ActiveCfg = Release nt|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.Build.0 = Release nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.ActiveCfg = nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.nt.Build.0 = nt|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro.ActiveCfg = Release|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro.Build.0 = Release|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl.ActiveCfg = Release|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl.Build.0 = Release|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.ActiveCfg = Release nt|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.Build.0 = Release nt|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.ActiveCfg = Release nt|Win32 - {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.Build.0 = Release nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.ActiveCfg = nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro gpl nt.Build.0 = nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.ActiveCfg = nt|Win32 + {44D9C7DC-6636-4B82-BD01-6876C64017DF}.pro nt.Build.0 = nt|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Release.ActiveCfg = Release|Win32 {44D9C7DC-6636-4B82-BD01-6876C64017DF}.Release.Build.0 = Release|Win32 {AC47623D-933C-4A80-83BB-B6AF7CB28B4B}.classic.ActiveCfg = classic|Win32 diff --git a/VC++Files/mysys/mysys.vcproj b/VC++Files/mysys/mysys.vcproj index 828fd8ec213..3885e18cea8 100644 --- a/VC++Files/mysys/mysys.vcproj +++ b/VC++Files/mysys/mysys.vcproj @@ -110,8 +110,8 @@ @@ -125,10 +125,10 @@ StringPooling="TRUE" RuntimeLibrary="0" EnableFunctionLevelLinking="TRUE" - PrecompiledHeaderFile=".\max/mysys.pch" - AssemblerListingLocation=".\max/" - ObjectFile=".\max/" - ProgramDataBaseFileName=".\max/" + PrecompiledHeaderFile=".\max_nt/mysys.pch" + AssemblerListingLocation=".\max_nt/" + ObjectFile=".\max_nt/" + ProgramDataBaseFileName=".\max_nt/" WarningLevel="3" SuppressStartupBanner="TRUE" CompileAs="0"/> @@ -136,7 +136,7 @@ Name="VCCustomBuildTool"/> @@ -209,9 +209,9 @@ Name="VCAuxiliaryManagedWrapperGeneratorTool"/> @@ -225,10 +225,10 @@ StringPooling="TRUE" RuntimeLibrary="0" EnableFunctionLevelLinking="TRUE" - PrecompiledHeaderFile=".\release/mysys.pch" - AssemblerListingLocation=".\release/" - ObjectFile=".\release/" - ProgramDataBaseFileName=".\release/" + PrecompiledHeaderFile=".\nt/mysys.pch" + AssemblerListingLocation=".\nt/" + ObjectFile=".\nt/" + ProgramDataBaseFileName=".\nt/" WarningLevel="3" SuppressStartupBanner="TRUE" CompileAs="0"/> @@ -236,7 +236,7 @@ Name="VCCustomBuildTool"/> From 8f42d836dc3f7e2ce71b7116602710515dae5ebc Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 15:53:54 +0300 Subject: [PATCH 58/74] 4.1->5.0 merge for bug #16458 mysql-test/r/distinct.result: 4.1->5.0 merge for bug #16458 * 5.0 is better in detecting duplicate columns sql/sql_select.cc: 4.1->5.0 merge for bug #16458 * Should not do the optimization if using index for group by * chnaged structures in 5.0 --- mysql-test/r/distinct.result | 2 +- sql/sql_select.cc | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/distinct.result b/mysql-test/r/distinct.result index a2ff10594e6..a3d1e8bf3bb 100644 --- a/mysql-test/r/distinct.result +++ b/mysql-test/r/distinct.result @@ -537,7 +537,7 @@ id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index EXPLAIN SELECT DISTINCT a,a FROM t2; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index; Using temporary +1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index EXPLAIN SELECT DISTINCT b,a FROM t2; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t2 index NULL PRIMARY 8 NULL 3 Using index diff --git a/sql/sql_select.cc b/sql/sql_select.cc index eec572a6dfc..3ecac9532ca 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -874,7 +874,11 @@ JOIN::optimize() The FROM clause must contain a single non-constant table. */ if (tables - const_tables == 1 && (group_list || select_distinct) && - !tmp_table_param.sum_func_count) + !tmp_table_param.sum_func_count && + (!join_tab[const_tables].select || + !join_tab[const_tables].select->quick || + join_tab[const_tables].select->quick->get_type() != + QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) { if (group_list && list_contains_unique_index(join_tab[const_tables].table, @@ -11279,9 +11283,9 @@ static bool list_contains_unique_index(TABLE *table, bool (*find_func) (Field *, void *), void *data) { - for (uint keynr= 0; keynr < table->keys; keynr++) + for (uint keynr= 0; keynr < table->s->keys; keynr++) { - if (keynr == table->primary_key || + if (keynr == table->s->primary_key || (table->key_info[keynr].flags & HA_NOSAME)) { KEY *keyinfo= table->key_info + keynr; From 1fdccc89032524d51085f888a0d2356f7b6ebbfe Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 15:07:41 +0200 Subject: [PATCH 59/74] Disabled test case for Windows (BUG#20753) --- mysql-test/t/rpl_openssl.test | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mysql-test/t/rpl_openssl.test b/mysql-test/t/rpl_openssl.test index 7d769ad448e..af70a1a9453 100644 --- a/mysql-test/t/rpl_openssl.test +++ b/mysql-test/t/rpl_openssl.test @@ -1,3 +1,7 @@ +# TODO: THIS TEST DOES NOT WORK ON WINDOWS +# This should be fixed. +--source include/not_windows.inc + source include/have_openssl.inc; source include/master-slave.inc; From 48b09e2a51a1c26ab48af4c1cfb45519517202b3 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 15:15:49 +0200 Subject: [PATCH 60/74] BUG#20739. In the Windows build files, the "Max nt" configuration for some reason had the mysql_client_test project disabled. Enable it. VC++Files/mysql.sln: The "Max nt" configuration for some reason had the mysql_client_test project disabled. Enable it. --- VC++Files/mysql.sln | 1 + 1 file changed, 1 insertion(+) diff --git a/VC++Files/mysql.sln b/VC++Files/mysql.sln index 943b38c1f8e..bd0cae1d5d8 100644 --- a/VC++Files/mysql.sln +++ b/VC++Files/mysql.sln @@ -1427,6 +1427,7 @@ Global {DA224DAB-5006-42BE-BB77-16E8BE5326D5}.Max.ActiveCfg = Release|Win32 {DA224DAB-5006-42BE-BB77-16E8BE5326D5}.Max.Build.0 = Release|Win32 {DA224DAB-5006-42BE-BB77-16E8BE5326D5}.Max nt.ActiveCfg = Release|Win32 + {DA224DAB-5006-42BE-BB77-16E8BE5326D5}.Max nt.Build.0 = Release|Win32 {DA224DAB-5006-42BE-BB77-16E8BE5326D5}.nt.ActiveCfg = Release|Win32 {DA224DAB-5006-42BE-BB77-16E8BE5326D5}.nt.Build.0 = Release|Win32 {DA224DAB-5006-42BE-BB77-16E8BE5326D5}.pro.ActiveCfg = Release|Win32 From 730d16614db30e76938ab3eccf09ce7c1528d334 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 16:28:29 +0300 Subject: [PATCH 61/74] gcc 4.1 linux warning fixes backported from 5.0. sql/item_cmpfunc.h: gcc 4.1 linux warning fixes backported from 5.0 sql/opt_range.cc: gcc 4.1 linux warning fixes. sql/spatial.h: gcc 4.1 linux warning fixes backported from 5.0 sql/sql_select.h: gcc 4.1 linux warning fixes. sql/sql_update.cc: gcc 4.1 linux warning fixes. --- sql/item_cmpfunc.h | 14 ++++++++++++++ sql/opt_range.cc | 4 ++-- sql/spatial.h | 4 ++++ sql/sql_select.h | 2 +- sql/sql_update.cc | 2 +- 5 files changed, 22 insertions(+), 4 deletions(-) diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 68852b5a5f6..73abe208d9e 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -124,6 +124,8 @@ public: class Comp_creator { public: + Comp_creator() {} /* Remove gcc warning */ + virtual ~Comp_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const = 0; virtual const char* symbol(bool invert) const = 0; virtual bool eqne_op() const = 0; @@ -133,6 +135,8 @@ public: class Eq_creator :public Comp_creator { public: + Eq_creator() {} /* Remove gcc warning */ + virtual ~Eq_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? "<>" : "="; } virtual bool eqne_op() const { return 1; } @@ -142,6 +146,8 @@ public: class Ne_creator :public Comp_creator { public: + Ne_creator() {} /* Remove gcc warning */ + virtual ~Ne_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? "=" : "<>"; } virtual bool eqne_op() const { return 1; } @@ -151,6 +157,8 @@ public: class Gt_creator :public Comp_creator { public: + Gt_creator() {} /* Remove gcc warning */ + virtual ~Gt_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? "<=" : ">"; } virtual bool eqne_op() const { return 0; } @@ -160,6 +168,8 @@ public: class Lt_creator :public Comp_creator { public: + Lt_creator() {} /* Remove gcc warning */ + virtual ~Lt_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? ">=" : "<"; } virtual bool eqne_op() const { return 0; } @@ -169,6 +179,8 @@ public: class Ge_creator :public Comp_creator { public: + Ge_creator() {} /* Remove gcc warning */ + virtual ~Ge_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? "<" : ">="; } virtual bool eqne_op() const { return 0; } @@ -178,6 +190,8 @@ public: class Le_creator :public Comp_creator { public: + Le_creator() {} /* Remove gcc warning */ + virtual ~Le_creator() {} /* Remove gcc warning */ virtual Item_bool_func2* create(Item *a, Item *b) const; virtual const char* symbol(bool invert) const { return invert? ">" : "<="; } virtual bool eqne_op() const { return 0; } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 67141aab6ce..34f11e4968a 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -63,8 +63,8 @@ public: SEL_ARG(Field *field, uint8 part, char *min_value, char *max_value, uint8 min_flag, uint8 max_flag, uint8 maybe_flag); SEL_ARG(enum Type type_arg) - :elements(1),use_count(1),left(0),next_key_part(0),color(BLACK), - type(type_arg),min_flag(0) + :min_flag(0),elements(1),use_count(1),left(0),next_key_part(0), + color(BLACK), type(type_arg) {} inline bool is_same(SEL_ARG *arg) { diff --git a/sql/spatial.h b/sql/spatial.h index 206958b3eaf..378233a2156 100644 --- a/sql/spatial.h +++ b/sql/spatial.h @@ -165,6 +165,8 @@ struct Geometry_buffer; class Geometry { public: + Geometry() {} /* remove gcc warning */ + virtual ~Geometry() {} /* remove gcc warning */ static void *operator new(size_t size, void *buffer) { return buffer; @@ -173,6 +175,8 @@ public: static void operator delete(void *ptr, void *buffer) {} + static void operator delete(void *buffer) {} /* remove gcc warning */ + enum wkbType { wkb_point= 1, diff --git a/sql/sql_select.h b/sql/sql_select.h index 75cd0b4d797..c61ef4fb92b 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -229,7 +229,7 @@ class JOIN :public Sql_alloc } JOIN(JOIN &join) - :fields_list(join.fields_list) + :Sql_alloc(), fields_list(join.fields_list) { init(join.thd, join.fields_list, join.select_options, join.result); diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 16423b39786..089d0bf0660 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1094,7 +1094,7 @@ bool multi_update::send_data(List ¬_used_values) memcpy((char*) tmp_table->field[0]->ptr, (char*) table->file->ref, table->file->ref_length); /* Write row, ignoring duplicated updates to a row */ - if (error= tmp_table->file->write_row(tmp_table->record[0])) + if ((error= tmp_table->file->write_row(tmp_table->record[0]))) { if (error != HA_ERR_FOUND_DUPP_KEY && error != HA_ERR_FOUND_DUPP_UNIQUE && From 9016a6be2072e93d72656fa7c7822ee93d2000a9 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 12:18:21 -0700 Subject: [PATCH 62/74] BUG #19773 Pushbuild fixes to result file, test, and header file for federated. mysql-test/r/federated.result: BUG #19773 Pushbuild fixes - result file had hard-coded port mysql-test/t/federated.test: BUG #19773 Pushbuild fixes Test was missing --replace_result sql/ha_federated.h: BUG #19773 HPUX and Windows failed with variable named row and *row in method declaration --- mysql-test/r/federated.result | 4 ++-- mysql-test/t/federated.test | 2 ++ sql/ha_federated.h | 4 ++-- 3 files changed, 6 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result index 49974615c68..9194439efd7 100644 --- a/mysql-test/r/federated.result +++ b/mysql-test/r/federated.result @@ -1605,7 +1605,7 @@ DROP TABLE federated.t1; DROP TABLE federated.bug_17377_table; create table federated.t1 (i1 int, i2 int, i3 int); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); -create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t1'; +create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); @@ -1648,7 +1648,7 @@ drop table federated.t1, federated.t2; drop table federated.t1, federated.t2; create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); -create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t1'; +create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test index 780008cf13a..2eacc2ba990 100644 --- a/mysql-test/t/federated.test +++ b/mysql-test/t/federated.test @@ -1322,6 +1322,7 @@ create table federated.t1 (i1 int, i2 int, i3 int); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); @@ -1344,6 +1345,7 @@ create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); diff --git a/sql/ha_federated.h b/sql/ha_federated.h index 09c670c9bac..85474d142a3 100644 --- a/sql/ha_federated.h +++ b/sql/ha_federated.h @@ -165,8 +165,8 @@ private: return errorcode otherwise */ uint convert_row_to_internal_format(byte *buf, MYSQL_ROW row, - MYSQL_RES *row); - bool create_where_from_key(String *to, KEY *key_info, + MYSQL_RES *result); + bool create_where_from_key(String *to, KEY *key_info, const key_range *start_key, const key_range *end_key, bool records_in_range); From 88843709d8d2bb07794f3c13084bf33e5dea8662 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 23:47:45 +0400 Subject: [PATCH 63/74] A fix for Bug#19022 "Memory bug when switching db during trigger execution". No test case as the bug is in an existing test case (rpl_trigger.test when it is run under valgrind). The warning was caused by memory corruption in replication slave: thd->db was pointing at a stack address that was previously used by sp_head::execute()::old_db. This happened because mysql_change_db behaved differently in replication slave and did not make a copy of the argument to assign to thd->db. The solution is to always free the old value of thd->db and allocate a new copy, regardless whether we're running in a replication slave or not. sql/log_event.cc: Move rewrite_db to log_event.cc, the only place where it is used. sql/slave.cc: Move rewrite_db to log_event.cc sql/slave.h: Remove an unneeded declaration. sql/sql_class.h: Fix set_db to always free the old db, even if the argument is NULL. Add a comment. sql/sql_db.cc: Always make a deep copy of the argument in mysql_change_db, even if running in a replication slave. This is necessary because sp_use_new_db (stored procedures) assumes that mysql_change_db always makes a deep copy of the argument, and thus passes a pointer to stack into it. This assumption was true for all cases except the replication slave thread. --- sql/log_event.cc | 32 +++++++--- sql/slave.cc | 18 ------ sql/slave.h | 1 - sql/sql_class.h | 24 ++++---- sql/sql_db.cc | 157 ++++++++++++++++++++++------------------------- 5 files changed, 111 insertions(+), 121 deletions(-) diff --git a/sql/log_event.cc b/sql/log_event.cc index b4707826205..cf5dbb1e77c 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1635,14 +1635,33 @@ void Query_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) */ #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) + +static const char *rewrite_db(const char *db) +{ + if (replicate_rewrite_db.is_empty() || db == NULL) + return db; + I_List_iterator it(replicate_rewrite_db); + i_string_pair* tmp; + + while ((tmp=it++)) + { + if (strcmp(tmp->key, db) == 0) + return tmp->val; + } + return db; +} + + int Query_log_event::exec_event(struct st_relay_log_info* rli) { return exec_event(rli, query, q_len); } -int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query_arg, uint32 q_len_arg) +int Query_log_event::exec_event(struct st_relay_log_info* rli, + const char *query_arg, uint32 q_len_arg) { + const char *new_db= rewrite_db(db); int expected_error,actual_error= 0; /* Colleagues: please never free(thd->catalog) in MySQL. This would lead to @@ -1651,8 +1670,7 @@ int Query_log_event::exec_event(struct st_relay_log_info* rli, const char *query Thank you. */ thd->catalog= catalog_len ? (char *) catalog : (char *)""; - thd->db_length= db_len; - thd->db= (char*) rewrite_db(db, &thd->db_length); + thd->set_db(new_db, strlen(new_db)); /* allocates a copy of 'db' */ thd->variables.auto_increment_increment= auto_increment_increment; thd->variables.auto_increment_offset= auto_increment_offset; @@ -1869,7 +1887,7 @@ end: TABLE uses the db.table syntax. */ thd->catalog= 0; - thd->reset_db(NULL, 0); // prevent db from being freed + thd->set_db(NULL, 0); /* will free the current database */ thd->query= 0; // just to be sure thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); @@ -2817,8 +2835,8 @@ void Load_log_event::set_fields(const char* affected_db, int Load_log_event::exec_event(NET* net, struct st_relay_log_info* rli, bool use_rli_only_for_errors) { - thd->db_length= db_len; - thd->db= (char*) rewrite_db(db, &thd->db_length); + const char *new_db= rewrite_db(db); + thd->set_db(new_db, strlen(new_db)); DBUG_ASSERT(thd->query == 0); thd->query_length= 0; // Should not be needed thd->query_error= 0; @@ -3018,7 +3036,7 @@ error: const char *remember_db= thd->db; VOID(pthread_mutex_lock(&LOCK_thread_count)); thd->catalog= 0; - thd->reset_db(NULL, 0); + thd->set_db(NULL, 0); /* will free the current database */ thd->query= 0; thd->query_length= 0; VOID(pthread_mutex_unlock(&LOCK_thread_count)); diff --git a/sql/slave.cc b/sql/slave.cc index 4da447c4bc3..b284f4a6a16 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1177,24 +1177,6 @@ bool net_request_file(NET* net, const char* fname) } -const char *rewrite_db(const char* db, uint *new_len) -{ - if (replicate_rewrite_db.is_empty() || !db) - return db; - I_List_iterator it(replicate_rewrite_db); - i_string_pair* tmp; - - while ((tmp=it++)) - { - if (!strcmp(tmp->key, db)) - { - *new_len= (uint32)strlen(tmp->val); - return tmp->val; - } - } - return db; -} - /* From other comments and tests in code, it looks like sometimes Query_log_event and Load_log_event can have db == 0 diff --git a/sql/slave.h b/sql/slave.h index 7f08105c0b9..c355f7172a9 100644 --- a/sql/slave.h +++ b/sql/slave.h @@ -550,7 +550,6 @@ int add_table_rule(HASH* h, const char* table_spec); int add_wild_table_rule(DYNAMIC_ARRAY* a, const char* table_spec); void init_table_rule_hash(HASH* h, bool* h_inited); void init_table_rule_array(DYNAMIC_ARRAY* a, bool* a_inited); -const char *rewrite_db(const char* db, uint *new_db_len); const char *print_slave_db_safe(const char *db); int check_expected_error(THD* thd, RELAY_LOG_INFO* rli, int error_code); void skip_load_data_infile(NET* net); diff --git a/sql/sql_class.h b/sql/sql_class.h index 1ba104df2a4..eb075dd54bb 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1574,21 +1574,23 @@ public: /* Initialize the current database from a NULL-terminated string with length + If we run out of memory, we free the current database and return TRUE. + This way the user will notice the error as there will be no current + database selected (in addition to the error message set by malloc). */ - void set_db(const char *new_db, uint new_db_len) + bool set_db(const char *new_db, uint new_db_len) { - if (new_db) + /* Do not reallocate memory if current chunk is big enough. */ + if (db && new_db && db_length >= new_db_len) + memcpy(db, new_db, new_db_len+1); + else { - /* Do not reallocate memory if current chunk is big enough. */ - if (db && db_length >= new_db_len) - memcpy(db, new_db, new_db_len+1); - else - { - safeFree(db); - db= my_strdup_with_length(new_db, new_db_len, MYF(MY_WME)); - } - db_length= db ? new_db_len: 0; + x_free(db); + db= new_db ? my_strdup_with_length(new_db, new_db_len, MYF(MY_WME)) : + NULL; } + db_length= db ? new_db_len : 0; + return new_db && !db; } void reset_db(char *new_db, uint new_db_len) { diff --git a/sql/sql_db.cc b/sql/sql_db.cc index 44947384b32..902539dfdec 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -781,32 +781,13 @@ bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent) exit: (void)sp_drop_db_routines(thd, db); /* QQ Ignore errors for now */ /* - If this database was the client's selected database, we silently change the - client's selected database to nothing (to have an empty SELECT DATABASE() - in the future). For this we free() thd->db and set it to 0. But we don't do - free() for the slave thread. Indeed, doing a x_free() on it leads to nasty - problems (i.e. long painful debugging) because in this thread, thd->db is - the same as data_buf and db of the Query_log_event which is dropping the - database. So if you free() thd->db, you're freeing data_buf. You set - thd->db to 0 but not data_buf (thd->db and data_buf are two distinct - pointers which point to the same place). Then in ~Query_log_event(), we - have 'if (data_buf) free(data_buf)' data_buf is !=0 so this makes a - DOUBLE free(). - Side effects of this double free() are, randomly (depends on the machine), - when the slave is replicating a DROP DATABASE: - - garbage characters in the error message: - "Error 'Can't drop database 'test2'; database doesn't exist' on query - 'h4zI©'" - - segfault - - hang in "free(vio)" (yes!) in the I/O or SQL slave threads (so slave - server hangs at shutdown etc). + If this database was the client's selected database, we silently + change the client's selected database to nothing (to have an empty + SELECT DATABASE() in the future). For this we free() thd->db and set + it to 0. */ if (thd->db && !strcmp(thd->db, db)) - { - if (!(thd->slave_thread)) /* a slave thread will free it itself */ - x_free(thd->db); - thd->reset_db(NULL, 0); - } + thd->set_db(NULL, 0); VOID(pthread_mutex_unlock(&LOCK_mysql_create_db)); start_waiting_global_read_lock(thd); exit2: @@ -1099,38 +1080,52 @@ err: /* - Change default database. + Change the current database. SYNOPSIS mysql_change_db() - thd Thread handler - name Databasename - no_access_check True don't do access check. In this case name may be "" + thd thread handle + name database name + no_access_check if TRUE, don't do access check. In this + case name may be "" DESCRIPTION - Becasue the database name may have been given directly from the - communication packet (in case of 'connect' or 'COM_INIT_DB') - we have to do end space removal in this function. + Check that the database name corresponds to a valid and + existent database, check access rights (unless called with + no_access_check), and set the current database. This function + is called to change the current database upon user request + (COM_CHANGE_DB command) or temporarily, to execute a stored + routine. NOTES - Do as little as possible in this function, as it is not called for the - replication slave SQL thread (for that thread, setting of thd->db is done - in ::exec_event() methods of log_event.cc). + This function is not the only way to switch the database that + is currently employed. When the replication slave thread + switches the database before executing a query, it calls + thd->set_db directly. However, if the query, in turn, uses + a stored routine, the stored routine will use this function, + even if it's run on the slave. - This function does not send anything, including error messages to the - client, if that should be sent to the client, call net_send_error after - this function. + This function allocates the name of the database on the system + heap: this is necessary to be able to uniformly change the + database from any module of the server. Up to 5.0 different + modules were using different memory to store the name of the + database, and this led to memory corruption: a stack pointer + set by Stored Procedures was used by replication after the + stack address was long gone. + + This function does not send anything, including error + messages, to the client. If that should be sent to the client, + call net_send_error after this function. RETURN VALUES - 0 ok + 0 OK 1 error */ bool mysql_change_db(THD *thd, const char *name, bool no_access_check) { - int length, db_length; - char *dbname= thd->slave_thread ? (char *) name : - my_strdup((char *) name, MYF(MY_WME)); + int path_length, db_length; + char *db_name; char path[FN_REFLEN]; HA_CREATE_INFO create; bool system_db= 0; @@ -1142,32 +1137,35 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check) DBUG_ENTER("mysql_change_db"); DBUG_PRINT("enter",("name: '%s'",name)); - LINT_INIT(db_length); - - /* dbname can only be NULL if malloc failed */ - if (!dbname || !(db_length= strlen(dbname))) + if (name == NULL || name[0] == '\0' && no_access_check == FALSE) { - if (no_access_check && dbname) - { - /* Called from SP when orignal database was not set */ - system_db= 1; - goto end; - } - if (!(thd->slave_thread)) - x_free(dbname); /* purecov: inspected */ - my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), - MYF(0)); /* purecov: inspected */ + my_message(ER_NO_DB_ERROR, ER(ER_NO_DB_ERROR), MYF(0)); DBUG_RETURN(1); /* purecov: inspected */ } - if (check_db_name(dbname)) + else if (name[0] == '\0') { - my_error(ER_WRONG_DB_NAME, MYF(0), dbname); - if (!(thd->slave_thread)) - my_free(dbname, MYF(0)); + /* Called from SP to restore the original database, which was NULL */ + DBUG_ASSERT(no_access_check); + system_db= 1; + db_name= NULL; + db_length= 0; + goto end; + } + /* + Now we need to make a copy because check_db_name requires a + non-constant argument. TODO: fix check_db_name. + */ + if ((db_name= my_strdup(name, MYF(MY_WME))) == NULL) + DBUG_RETURN(1); /* the error is set */ + db_length= strlen(db_name); + if (check_db_name(db_name)) + { + my_error(ER_WRONG_DB_NAME, MYF(0), db_name); + my_free(db_name, MYF(0)); DBUG_RETURN(1); } - DBUG_PRINT("info",("Use database: %s", dbname)); - if (!my_strcasecmp(system_charset_info, dbname, information_schema_name.str)) + DBUG_PRINT("info",("Use database: %s", db_name)); + if (!my_strcasecmp(system_charset_info, db_name, information_schema_name.str)) { system_db= 1; #ifndef NO_EMBEDDED_ACCESS_CHECKS @@ -1182,45 +1180,36 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check) if (test_all_bits(sctx->master_access, DB_ACLS)) db_access=DB_ACLS; else - db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, dbname, 0) | + db_access= (acl_get(sctx->host, sctx->ip, sctx->priv_user, db_name, 0) | sctx->master_access); if (!(db_access & DB_ACLS) && (!grant_option || - check_grant_db(thd,dbname))) + check_grant_db(thd,db_name))) { my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), sctx->priv_user, sctx->priv_host, - dbname); + db_name); mysql_log.write(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR), - sctx->priv_user, sctx->priv_host, dbname); - if (!(thd->slave_thread)) - my_free(dbname,MYF(0)); + sctx->priv_user, sctx->priv_host, db_name); + my_free(db_name, MYF(0)); DBUG_RETURN(1); } } #endif - (void) sprintf(path,"%s/%s",mysql_data_home,dbname); - length=unpack_dirname(path,path); // Convert if not unix - if (length && path[length-1] == FN_LIBCHAR) - path[length-1]=0; // remove ending '\' + (void) sprintf(path,"%s/%s", mysql_data_home, db_name); + path_length= unpack_dirname(path, path); // Convert if not UNIX + if (path_length && path[path_length-1] == FN_LIBCHAR) + path[path_length-1]= '\0'; // remove ending '\' if (my_access(path,F_OK)) { - my_error(ER_BAD_DB_ERROR, MYF(0), dbname); - if (!(thd->slave_thread)) - my_free(dbname,MYF(0)); + my_error(ER_BAD_DB_ERROR, MYF(0), db_name); + my_free(db_name, MYF(0)); DBUG_RETURN(1); } end: - if (!(thd->slave_thread)) - x_free(thd->db); - if (dbname && dbname[0] == 0) - { - if (!(thd->slave_thread)) - my_free(dbname, MYF(0)); - thd->reset_db(NULL, 0); - } - else - thd->reset_db(dbname, db_length); // THD::~THD will free this + x_free(thd->db); + DBUG_ASSERT(db_name == NULL || db_name[0] != '\0'); + thd->reset_db(db_name, db_length); // THD::~THD will free this #ifndef NO_EMBEDDED_ACCESS_CHECKS if (!no_access_check) sctx->db_access= db_access; From 837c9719c436faa083872fda118891c94bbc3403 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 28 Jun 2006 23:50:50 +0400 Subject: [PATCH 64/74] Bug#10946: Confusing error messeges in the case of duplicate trigger definition It was hard to distinguish case, when one was unable to create trigger on the table because trigger with same action time and event already existed for this table, from the case, when one tried to create trigger with name which was already occupied by some other trigger, since in both these cases we emitted ER_TRG_ALREADY_EXISTS error and message. Now we emit ER_NOT_SUPPORTED_YET error with appropriate additional message in the first case. There is no sense in introducing separate error for this situation since we plan to get rid of this limitation eventually. mysql-test/r/trigger.result: Update result for new error message. mysql-test/t/trigger.test: Update test for new error code. sql/sql_trigger.cc: If there is already a trigger with the same activation time, report an "Unsupported yet" error. --- mysql-test/r/trigger.result | 2 +- mysql-test/t/trigger.test | 2 +- sql/sql_trigger.cc | 4 +++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/mysql-test/r/trigger.result b/mysql-test/r/trigger.result index d4791c6b117..531ac5c0418 100644 --- a/mysql-test/r/trigger.result +++ b/mysql-test/r/trigger.result @@ -295,7 +295,7 @@ create trigger trg before insert on t1 for each row set @a:=1; create trigger trg after insert on t1 for each row set @a:=1; ERROR HY000: Trigger already exists create trigger trg2 before insert on t1 for each row set @a:=1; -ERROR HY000: Trigger already exists +ERROR 42000: This version of MySQL doesn't yet support 'multiple triggers with the same action time and event for one table' create trigger trg before insert on t3 for each row set @a:=1; ERROR HY000: Trigger already exists create trigger trg2 before insert on t3 for each row set @a:=1; diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test index 3743d8f5c76..99df8ca600a 100644 --- a/mysql-test/t/trigger.test +++ b/mysql-test/t/trigger.test @@ -321,7 +321,7 @@ create trigger trg before insert on t2 for each row set @a:=1; create trigger trg before insert on t1 for each row set @a:=1; --error 1359 create trigger trg after insert on t1 for each row set @a:=1; ---error 1359 +--error 1235 create trigger trg2 before insert on t1 for each row set @a:=1; --error 1359 create trigger trg before insert on t3 for each row set @a:=1; diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index f943b014118..013a269d3af 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -366,7 +366,9 @@ bool Table_triggers_list::create_trigger(THD *thd, TABLE_LIST *tables, /* We don't allow creation of several triggers of the same type yet */ if (bodies[lex->trg_chistics.event][lex->trg_chistics.action_time]) { - my_message(ER_TRG_ALREADY_EXISTS, ER(ER_TRG_ALREADY_EXISTS), MYF(0)); + my_error(ER_NOT_SUPPORTED_YET, MYF(0), + "multiple triggers with the same action time" + " and event for one table"); return 1; } From d0201bc3714a7139db24afbac26f1fc2ecad67fd Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Jun 2006 10:03:26 +0200 Subject: [PATCH 65/74] Fix test files to work with non-standard ports (MTR_BUILD_THREAD). mysql-test/t/federated.test: Use --replace_result to make test work on non-standard ports. mysql-test/r/federated.result: Use --replace_result to make test work on non-standard ports. --- mysql-test/r/federated.result | 8 ++++---- mysql-test/t/federated.test | 4 ++++ 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result index 49974615c68..2eb0c81ec2e 100644 --- a/mysql-test/r/federated.result +++ b/mysql-test/r/federated.result @@ -1605,8 +1605,8 @@ DROP TABLE federated.t1; DROP TABLE federated.bug_17377_table; create table federated.t1 (i1 int, i2 int, i3 int); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); -create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t1'; -create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; +create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); select * from federated.t1 order by i1; @@ -1648,8 +1648,8 @@ drop table federated.t1, federated.t2; drop table federated.t1, federated.t2; create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); -create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t1'; -create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; +create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; +create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); select * from federated.t1 order by i1; diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test index 780008cf13a..a8b16edc80a 100644 --- a/mysql-test/t/federated.test +++ b/mysql-test/t/federated.test @@ -1322,7 +1322,9 @@ create table federated.t1 (i1 int, i2 int, i3 int); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); @@ -1344,7 +1346,9 @@ create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); connection master; +--replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); From 9ee8e42d1c7b70321e1a9985a5d636e7da12641c Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Jun 2006 10:35:16 +0200 Subject: [PATCH 66/74] Fix test files to work with non-standard ports (MTR_BUILD_THREAD). mysql-test/t/federated.test: Use --replace_result to make test work on non-standard ports. mysql-test/r/federated.result: Use --replace_result to make test work on non-standard ports. --- mysql-test/r/federated.result | 4 ++-- mysql-test/t/federated.test | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result index 9194439efd7..2eb0c81ec2e 100644 --- a/mysql-test/r/federated.result +++ b/mysql-test/r/federated.result @@ -1606,7 +1606,7 @@ DROP TABLE federated.bug_17377_table; create table federated.t1 (i1 int, i2 int, i3 int); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; -create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); select * from federated.t1 order by i1; @@ -1649,7 +1649,7 @@ drop table federated.t1, federated.t2; create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; -create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; +create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); select * from federated.t1 order by i1; diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test index 2eacc2ba990..a8b16edc80a 100644 --- a/mysql-test/t/federated.test +++ b/mysql-test/t/federated.test @@ -1324,6 +1324,7 @@ create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); connection master; --replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); @@ -1347,6 +1348,7 @@ create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key ( connection master; --replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); From 474c4e9206302532c8fbd82fc4fa7c434f72ef16 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Jun 2006 11:39:07 +0200 Subject: [PATCH 67/74] ndb - build Fix compile error for forte ndb/src/kernel/blocks/dbdict/Dbdict.hpp: Fix compile error for forte --- ndb/src/kernel/blocks/dbdict/Dbdict.hpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 91e57720d01..0fa984a4c61 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -1784,6 +1784,8 @@ private: * * XXX only table ops check BlockState */ + struct DictLockType; + friend struct DictLockType; struct DictLockType { DictLockReq::LockType lockType; @@ -1791,6 +1793,9 @@ private: const char* text; }; + struct DictLockRecord; + friend struct DictLockRecord; + struct DictLockRecord { DictLockReq req; const DictLockType* lt; From 8c435bdf7d24ddb400223bd8c07ecda04c3de07e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Jun 2006 13:45:43 +0400 Subject: [PATCH 68/74] Replace all numeric error code with symbolic names in trigger.test. mysql-test/t/trigger.test: Replace all numeric error code with symbolic names. Left are --error 1 for system error, and --error 1100. The symbolic constant for the latter is ER_TABLE_NOT_LOCKED, but using it triggers a bug in test driver due to name prefix collision with 1099 ER_TABLE_NOT_LOCKED_FOR_WRITE. This bug is fixed in 5.1. --- mysql-test/t/trigger.test | 84 +++++++++++++++++++-------------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/mysql-test/t/trigger.test b/mysql-test/t/trigger.test index 91b418e7ca6..95e8eaae83e 100644 --- a/mysql-test/t/trigger.test +++ b/mysql-test/t/trigger.test @@ -237,7 +237,7 @@ begin end| delimiter ;| insert into t3 values (1); ---error 1048 +--error ER_BAD_NULL_ERROR insert into t1 values (4, "four", 1), (5, "five", 2); select * from t1; select * from t2; @@ -295,19 +295,19 @@ drop table t1, t2; create table t1 (i int); create table t3 (i int); ---error 1363 +--error ER_TRG_NO_SUCH_ROW_IN_TRG create trigger trg before insert on t1 for each row set @a:= old.i; ---error 1363 +--error ER_TRG_NO_SUCH_ROW_IN_TRG create trigger trg before delete on t1 for each row set @a:= new.i; ---error 1362 +--error ER_TRG_CANT_CHANGE_ROW create trigger trg before update on t1 for each row set old.i:=1; ---error 1363 +--error ER_TRG_NO_SUCH_ROW_IN_TRG create trigger trg before delete on t1 for each row set new.i:=1; ---error 1362 +--error ER_TRG_CANT_CHANGE_ROW create trigger trg after update on t1 for each row set new.i:=1; ---error 1054 +--error ER_BAD_FIELD_ERROR create trigger trg before update on t1 for each row set new.j:=1; ---error 1054 +--error ER_BAD_FIELD_ERROR create trigger trg before update on t1 for each row set @a:=old.j; @@ -315,25 +315,25 @@ create trigger trg before update on t1 for each row set @a:=old.j; # Let us test various trigger creation errors # Also quickly test table namespace (bug#5892/6182) # ---error 1146 +--error ER_NO_SUCH_TABLE create trigger trg before insert on t2 for each row set @a:=1; create trigger trg before insert on t1 for each row set @a:=1; ---error 1359 +--error ER_TRG_ALREADY_EXISTS create trigger trg after insert on t1 for each row set @a:=1; ---error 1235 +--error ER_NOT_SUPPORTED_YET create trigger trg2 before insert on t1 for each row set @a:=1; ---error 1359 +--error ER_TRG_ALREADY_EXISTS create trigger trg before insert on t3 for each row set @a:=1; create trigger trg2 before insert on t3 for each row set @a:=1; drop trigger trg2; drop trigger trg; ---error 1360 +--error ER_TRG_DOES_NOT_EXIST drop trigger trg; create view v1 as select * from t1; ---error 1347 +--error ER_WRONG_OBJECT create trigger trg before insert on v1 for each row set @a:=1; drop view v1; @@ -341,7 +341,7 @@ drop table t1; drop table t3; create temporary table t1 (i int); ---error 1361 +--error ER_TRG_ON_VIEW_OR_TEMP_TABLE create trigger trg before insert on t1 for each row set @a:=1; drop table t1; @@ -495,47 +495,47 @@ select * from t1; # their main effect. This is because operation on the table row is # executed before "after" trigger and its effect cannot be rolled back # when whole statement fails, because t1 is MyISAM table. ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 values (2, 1); select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR update t1 set k = 2 where i = 2; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR delete from t1 where i = 2; select * from t1; # Should fail and insert only 1 row ---error 1054 +--error ER_BAD_FIELD_ERROR load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (i, k); select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 select 3, 3; select * from t1; # Multi-update working on the fly, again it will update only # one row even if more matches ---error 1054 +--error ER_BAD_FIELD_ERROR update t1, t2 set k = k + 10 where t1.i = t2.i; select * from t1; # The same for multi-update via temp table ---error 1054 +--error ER_BAD_FIELD_ERROR update t1, t2 set k = k + 10 where t1.i = t2.i and k < 3; select * from t1; # Multi-delete on the fly ---error 1054 +--error ER_BAD_FIELD_ERROR delete t1, t2 from t1 straight_join t2 where t1.i = t2.i; select * from t1; # And via temporary storage ---error 1054 +--error ER_BAD_FIELD_ERROR delete t2, t1 from t2 straight_join t1 where t1.i = t2.i; select * from t1; # Prepare table for testing of REPLACE and INSERT ... ON DUPLICATE KEY UPDATE alter table t1 add primary key (i); ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 values (3, 4) on duplicate key update k= k + 10; select * from t1; # The following statement will delete old row and won't # insert new one since after delete trigger will fail. ---error 1054 +--error ER_BAD_FIELD_ERROR replace into t1 values (3, 3); select * from t1; # Also drops all triggers @@ -553,33 +553,33 @@ alter table t1 drop column bt; # The following statements changing t1 should fail and should not # cause any effect on table, since "before" trigger is executed # before operation on the table row. ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 values (3, 3); select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR update t1 set i = 2; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR delete from t1; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR load data infile '../std_data_ln/loaddata5.dat' into table t1 fields terminated by '' enclosed by '' (i, k); select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 select 3, 3; select * from t1; # Both types of multi-update (on the fly and via temp table) ---error 1054 +--error ER_BAD_FIELD_ERROR update t1, t2 set k = k + 10 where t1.i = t2.i; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR update t1, t2 set k = k + 10 where t1.i = t2.i and k < 2; select * from t1; # Both types of multi-delete ---error 1054 +--error ER_BAD_FIELD_ERROR delete t1, t2 from t1 straight_join t2 where t1.i = t2.i; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR delete t2, t1 from t2 straight_join t1 where t1.i = t2.i; select * from t1; # Let us test REPLACE/INSERT ... ON DUPLICATE KEY UPDATE. @@ -587,10 +587,10 @@ select * from t1; # in ordinary INSERT we need to drop "before insert" trigger. alter table t1 add primary key (i); drop trigger bi; ---error 1054 +--error ER_BAD_FIELD_ERROR insert into t1 values (2, 4) on duplicate key update k= k + 10; select * from t1; ---error 1054 +--error ER_BAD_FIELD_ERROR replace into t1 values (2, 4); select * from t1; # Also drops all triggers @@ -608,7 +608,7 @@ insert into t1 values (1, 2); create function bug5893 () returns int return 5; create trigger t1_bu before update on t1 for each row set new.col1= bug5893(); drop function bug5893; ---error 1305 +--error ER_SP_DOES_NOT_EXIST update t1 set col2 = 4; # This should not crash server too. drop trigger t1_bu; @@ -908,9 +908,9 @@ create trigger t1_bi after insert on t1 for each row insert into t3 values (new. # Until we implement proper mechanism for invalidation of PS/SP when table # or SP's are changed these two statements will fail with 'Table ... was # not locked' error (this mechanism should be based on the new TDC). ---error 1100 +--error 1100 #ER_TABLE_NOT_LOCKED execute stmt1; ---error 1100 +--error 1100 #ER_TABLE_NOT_LOCKED call p1(); deallocate prepare stmt1; drop procedure p1; @@ -1186,7 +1186,7 @@ INSERT INTO t1 VALUES (@x); SELECT @x; SET @x=2; ---error 1365 +--error ER_DIVISION_BY_ZERO UPDATE t1 SET i1 = @x; SELECT @x; @@ -1197,7 +1197,7 @@ INSERT INTO t1 VALUES (@x); SELECT @x; SET @x=4; ---error 1365 +--error ER_DIVISION_BY_ZERO UPDATE t1 SET i1 = @x; SELECT @x; From 78c814154a621c5d0992261afd0a84f68867a708 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Jun 2006 13:01:54 +0200 Subject: [PATCH 69/74] fixed too small requestInfo in signal + adopted signal to be as close as possible to 5.1... --- ndb/include/kernel/signaldata/LqhFrag.hpp | 46 +++++++++++------------ 1 file changed, 23 insertions(+), 23 deletions(-) diff --git a/ndb/include/kernel/signaldata/LqhFrag.hpp b/ndb/include/kernel/signaldata/LqhFrag.hpp index 50b0caaba07..72c1537854c 100644 --- a/ndb/include/kernel/signaldata/LqhFrag.hpp +++ b/ndb/include/kernel/signaldata/LqhFrag.hpp @@ -104,7 +104,7 @@ class LqhFragReq { friend bool printLQH_FRAG_REQ(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 24 ); + STATIC_CONST( SignalLength = 23 ); enum RequestInfo { CreateInRunning = 0x8000000, @@ -115,33 +115,33 @@ private: Uint32 senderData; Uint32 senderRef; Uint32 fragmentId; - Uint8 requestInfo; - Uint8 unused1; + Uint32 requestInfo; + Uint32 maxLoadFactor; + Uint32 minLoadFactor; + Uint32 kValue; + Uint32 schemaVersion; + Uint32 nextLCP; + Uint16 noOfNewAttr; + Uint16 noOfCharsets; + Uint32 startGci; + Uint32 tableType; // DictTabInfo::TableType + Uint32 primaryTableId; // table of index or RNIL + Uint16 tableId; + Uint16 localKeyLength; + Uint16 lh3DistrBits; + Uint16 lh3PageBits; Uint16 noOfAttributes; - Uint32 tableId; - Uint32 localKeyLength; - Uint16 maxLoadFactor; - Uint16 minLoadFactor; - Uint16 kValue; - Uint8 tableType; // DictTabInfo::TableType - Uint8 GCPIndicator; - Uint32 lh3DistrBits; - Uint32 lh3PageBits; - Uint32 noOfNullAttributes; + Uint16 noOfNullAttributes; + Uint16 noOfPagesToPreAllocate; + Uint16 keyLength; + Uint16 noOfKeyAttr; + Uint8 checksumIndicator; + Uint8 GCPIndicator; + Uint32 noOfAttributeGroups; Uint32 maxRowsLow; Uint32 maxRowsHigh; Uint32 minRowsLow; Uint32 minRowsHigh; - Uint32 schemaVersion; - Uint32 keyLength; - Uint32 nextLCP; - Uint32 noOfKeyAttr; - Uint16 noOfNewAttr; - Uint16 noOfCharsets; - Uint32 checksumIndicator; - Uint32 noOfAttributeGroups; - Uint32 startGci; - Uint32 primaryTableId; // table of index or RNIL }; class LqhFragConf { From ce5ed66f2abc0dfc3779b5e59b875f758545b29b Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Jun 2006 16:31:08 +0400 Subject: [PATCH 70/74] A post-merge fix. --- mysql-test/r/information_schema.result | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 63af90aa0f1..64969fcdf44 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -369,11 +369,11 @@ show keys from v4; Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment select * from information_schema.views where TABLE_NAME like "v%"; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE -NULL test v0 select `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER -NULL test v1 select `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER -NULL test v2 select `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER -NULL test v3 select `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER -NULL test v4 select `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER +NULL test v0 /* ALGORITHM=UNDEFINED */ select `schemata`.`SCHEMA_NAME` AS `c` from `information_schema`.`schemata` NONE NO root@localhost DEFINER +NULL test v1 /* ALGORITHM=UNDEFINED */ select `tables`.`TABLE_NAME` AS `c` from `information_schema`.`tables` where (`tables`.`TABLE_NAME` = _utf8'v1') NONE NO root@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select `columns`.`COLUMN_NAME` AS `c` from `information_schema`.`columns` where (`columns`.`TABLE_NAME` = _utf8'v2') NONE NO root@localhost DEFINER +NULL test v3 /* ALGORITHM=UNDEFINED */ select `character_sets`.`CHARACTER_SET_NAME` AS `c` from `information_schema`.`character_sets` where (`character_sets`.`CHARACTER_SET_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER +NULL test v4 /* ALGORITHM=UNDEFINED */ select `collations`.`COLLATION_NAME` AS `c` from `information_schema`.`collations` where (`collations`.`COLLATION_NAME` like _utf8'latin1%') NONE NO root@localhost DEFINER drop view v0, v1, v2, v3, v4; create table t1 (a int); grant select,update,insert on t1 to mysqltest_1@localhost; @@ -464,9 +464,9 @@ create view v2 (c) as select a from t1 WITH LOCAL CHECK OPTION; create view v3 (c) as select a from t1 WITH CASCADED CHECK OPTION; select * from information_schema.views; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE -NULL test v1 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER -NULL test v2 select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER -NULL test v3 select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER +NULL test v1 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` LOCAL YES root@localhost DEFINER +NULL test v3 /* ALGORITHM=UNDEFINED */ select `test`.`t1`.`a` AS `c` from `test`.`t1` CASCADED YES root@localhost DEFINER grant select (a) on test.t1 to joe@localhost with grant option; select * from INFORMATION_SCHEMA.COLUMN_PRIVILEGES; GRANTEE TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME PRIVILEGE_TYPE IS_GRANTABLE @@ -1121,7 +1121,7 @@ select * from information_schema.views where table_name='v1' or table_name='v2'; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME VIEW_DEFINITION CHECK_OPTION IS_UPDATABLE DEFINER SECURITY_TYPE NULL test v1 NONE YES root@localhost DEFINER -NULL test v2 select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER +NULL test v2 /* ALGORITHM=UNDEFINED */ select 1 AS `1` NONE NO mysqltest_1@localhost DEFINER drop view v1, v2; drop table t1; drop user mysqltest_1@localhost; From 6878aa96b6a3fd04965786a9b7fb2efa6623f1dc Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Jun 2006 16:38:14 +0200 Subject: [PATCH 71/74] Increment the version number. --- configure.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.in b/configure.in index fe111b8a671..48454a11309 100644 --- a/configure.in +++ b/configure.in @@ -7,7 +7,7 @@ AC_INIT(sql/mysqld.cc) AC_CANONICAL_SYSTEM # The Docs Makefile.am parses this line! # remember to also change ndb version below and update version.c in ndb -AM_INIT_AUTOMAKE(mysql, 5.0.23) +AM_INIT_AUTOMAKE(mysql, 5.0.24) AM_CONFIG_HEADER(config.h) PROTOCOL_VERSION=10 @@ -19,7 +19,7 @@ SHARED_LIB_VERSION=$SHARED_LIB_MAJOR_VERSION:0:0 # ndb version NDB_VERSION_MAJOR=5 NDB_VERSION_MINOR=0 -NDB_VERSION_BUILD=23 +NDB_VERSION_BUILD=24 NDB_VERSION_STATUS="" # Set all version vars based on $VERSION. How do we do this more elegant ? From d326551bdcad71b75dc0ff8ad5eca6af54fb466f Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Jun 2006 17:00:48 +0200 Subject: [PATCH 72/74] Test "federated" failed because the log contains fixed port numbers. Re-apply a patch by Knielsen in the 5.0.23 build clone: Replace port number by "SLAVE_PORT". mysql-test/r/federated.result: Re-apply a patch by Knielsen in the 5.0.23 build clone: Replace port number by "SLAVE_PORT". mysql-test/t/federated.test: Re-apply a patch by Knielsen in the 5.0.23 build clone: Replace port number by "SLAVE_PORT". --- mysql-test/r/federated.result | 4 ++-- mysql-test/t/federated.test | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/federated.result b/mysql-test/r/federated.result index 9194439efd7..2eb0c81ec2e 100644 --- a/mysql-test/r/federated.result +++ b/mysql-test/r/federated.result @@ -1606,7 +1606,7 @@ DROP TABLE federated.bug_17377_table; create table federated.t1 (i1 int, i2 int, i3 int); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; -create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; +create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); select * from federated.t1 order by i1; @@ -1649,7 +1649,7 @@ drop table federated.t1, federated.t2; create table federated.t1 (i1 int, i2 int, i3 int, primary key (i1)); create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key (id)); create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1'; -create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:9308/federated/t2'; +create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); select * from federated.t1 order by i1; diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test index 2eacc2ba990..a8b16edc80a 100644 --- a/mysql-test/t/federated.test +++ b/mysql-test/t/federated.test @@ -1324,6 +1324,7 @@ create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)); connection master; --replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t1 (i1 int, i2 int, i3 int) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t2 (id int, c1 varchar(20), c2 varchar(20)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); @@ -1347,6 +1348,7 @@ create table federated.t2 (id int, c1 varchar(20), c2 varchar(20), primary key ( connection master; --replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t1 (i1 int auto_increment not null, i2 int, i3 int, primary key (i1)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1'; +--replace_result $SLAVE_MYPORT SLAVE_PORT eval create table federated.t2 (id int auto_increment not null, c1 varchar(20), c2 varchar(20), primary key(id)) ENGINE=FEDERATED CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t2'; insert into federated.t1 values (1,5,10),(3,7,12),(4,5,2),(9,10,15),(2,2,2); insert into federated.t2 values (9,"abc","def"),(5,"opq","lmn"),(2,"test t","t test"); From 5d506d6b6255589dd4c28958ee03be59c690f4cf Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Jun 2006 20:55:21 +0200 Subject: [PATCH 73/74] Bug #19202 Incorrect errorhandling in select count(*) wrt temporary error --- sql/ha_ndbcluster.cc | 89 ++++++++++++++++++++++++++++++-------------- 1 file changed, 62 insertions(+), 27 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index ffd5932a5c1..11fdd33fad9 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -270,6 +270,7 @@ void ha_ndbcluster::records_update() { Ndb *ndb= get_ndb(); Uint64 rows; + ndb->setDatabaseName(m_dbname); if(ndb_get_table_statistics(ndb, m_tabname, &rows, 0) == 0){ info->records= rows; } @@ -2876,6 +2877,7 @@ void ha_ndbcluster::info(uint flag) DBUG_VOID_RETURN; Ndb *ndb= get_ndb(); Uint64 rows= 100; + ndb->setDatabaseName(m_dbname); if (current_thd->variables.ndb_use_exact_count) ndb_get_table_statistics(ndb, m_tabname, &rows, 0); records= rows; @@ -5228,34 +5230,53 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, { DBUG_ENTER("ndb_get_table_statistics"); DBUG_PRINT("enter", ("table: %s", table)); - NdbConnection* pTrans= ndb->startTransaction(); - do + NdbConnection* pTrans; + NdbError error; + int retries= 10; + int retry_sleep= 30 * 1000; /* 30 milliseconds */ + + do { - if (pTrans == NULL) - break; - - NdbScanOperation* pOp= pTrans->getNdbScanOperation(table); - if (pOp == NULL) - break; - - NdbResultSet* rs= pOp->readTuples(NdbOperation::LM_CommittedRead); - if (rs == 0) - break; - - int check= pOp->interpret_exit_last_row(); - if (check == -1) - break; - Uint64 rows, commits; + Uint64 sum_rows= 0; + Uint64 sum_commits= 0; + NdbScanOperation*pOp; + NdbResultSet *rs; + int check; + + if ((pTrans= ndb->startTransaction()) == NULL) + { + error= ndb->getNdbError(); + goto retry; + } + + if ((pOp= pTrans->getNdbScanOperation(table)) == NULL) + { + error= pTrans->getNdbError(); + goto retry; + } + + if ((rs= pOp->readTuples(NdbOperation::LM_CommittedRead)) == 0) + { + error= pOp->getNdbError(); + goto retry; + } + + if (pOp->interpret_exit_last_row() == -1) + { + error= pOp->getNdbError(); + goto retry; + } + pOp->getValue(NdbDictionary::Column::ROW_COUNT, (char*)&rows); pOp->getValue(NdbDictionary::Column::COMMIT_COUNT, (char*)&commits); - check= pTrans->execute(NoCommit, AbortOnError, TRUE); - if (check == -1) - break; + if (pTrans->execute(NoCommit, AbortOnError, TRUE) == -1) + { + error= pTrans->getNdbError(); + goto retry; + } - Uint64 sum_rows= 0; - Uint64 sum_commits= 0; while((check= rs->nextResult(TRUE, TRUE)) == 0) { sum_rows+= rows; @@ -5263,7 +5284,10 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, } if (check == -1) - break; + { + error= pOp->getNdbError(); + goto retry; + } rs->close(TRUE); @@ -5274,11 +5298,22 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, * commit_count= sum_commits; DBUG_PRINT("exit", ("records: %u commits: %u", sum_rows, sum_commits)); DBUG_RETURN(0); - } while(0); - ndb->closeTransaction(pTrans); - DBUG_PRINT("exit", ("failed")); - DBUG_RETURN(-1); +retry: + if (pTrans) + { + ndb->closeTransaction(pTrans); + pTrans= NULL; + } + if (error.status == NdbError::TemporaryError && retries--) + { + my_sleep(retry_sleep); + continue; + } + break; + } while(1); + DBUG_PRINT("exit", ("failed, error %u(%s)", error.code, error.message)); + ERR_RETURN(error); } /* From d6fe37ae70a483aad8488509650f92411204606c Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 29 Jun 2006 22:11:29 +0200 Subject: [PATCH 74/74] corrected merge error --- sql/ha_ndbcluster.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 7f433f06cf9..e4ff39797ca 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -5846,8 +5846,11 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, { Uint64 rows, commits, mem; Uint32 size; + Uint32 count= 0; Uint64 sum_rows= 0; Uint64 sum_commits= 0; + Uint64 sum_row_size= 0; + Uint64 sum_mem= 0; NdbScanOperation*pOp; NdbResultSet *rs; int check;