From e1b8a004fea3aca320d3d8fa36dfd7932ab045ca Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 25 Jan 2005 12:06:55 -0800 Subject: [PATCH 01/53] Always call vio_in_addr() so that thd->remote is always initialized. (Bug #5569) vio/viosocket.c: Remove comment that is no longer correct sql/sql_parse.cc: Always call vio_in_addr() on successful connection, so that thd->remote always gets set vio/viossl.c: Remove comment that is no longer correct --- sql/sql_parse.cc | 5 +++-- vio/viosocket.c | 2 +- vio/viossl.c | 2 +- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 1aeb158dc11..efb534ed439 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -501,6 +501,9 @@ check_connections(THD *thd) thd->thread_id)); DBUG_PRINT("info",("New connection received on %s", vio_description(net->vio))); + + vio_in_addr(net->vio,&thd->remote.sin_addr); + if (!thd->host) // If TCP/IP connection { char ip[30]; @@ -521,7 +524,6 @@ check_connections(THD *thd) #endif if (!(specialflag & SPECIAL_NO_RESOLVE)) { - vio_in_addr(net->vio,&thd->remote.sin_addr); thd->host=ip_to_hostname(&thd->remote.sin_addr,&connect_errors); /* Cut very long hostnames to avoid possible overflows */ if (thd->host) @@ -543,7 +545,6 @@ check_connections(THD *thd) DBUG_PRINT("info",("Host: %s",thd->host)); thd->host_or_ip= thd->host; thd->ip= 0; - bzero((char*) &thd->remote,sizeof(struct sockaddr)); } vio_keepalive(net->vio, TRUE); diff --git a/vio/viosocket.c b/vio/viosocket.c index ad156fc33bf..1b6f46c57cf 100644 --- a/vio/viosocket.c +++ b/vio/viosocket.c @@ -295,7 +295,7 @@ void vio_in_addr(Vio *vio, struct in_addr *in) { DBUG_ENTER("vio_in_addr"); if (vio->localhost) - bzero((char*) in, sizeof(*in)); /* This should never be executed */ + bzero((char*) in, sizeof(*in)); else *in=vio->remote.sin_addr; DBUG_VOID_RETURN; diff --git a/vio/viossl.c b/vio/viossl.c index a489cb98f98..07713c83763 100644 --- a/vio/viossl.c +++ b/vio/viossl.c @@ -259,7 +259,7 @@ void vio_ssl_in_addr(Vio *vio, struct in_addr *in) { DBUG_ENTER("vio_ssl_in_addr"); if (vio->localhost) - bzero((char*) in, sizeof(*in)); /* This should never be executed */ + bzero((char*) in, sizeof(*in)); else *in=vio->remote.sin_addr; DBUG_VOID_RETURN; From 0a507d2cf1244d67dbe702343d34b3ff78089374 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 31 Jan 2005 18:11:26 +0100 Subject: [PATCH 02/53] Fixed a bug in the ndbd scheduler with send of packed signals. Fixing this bugs improves performance by 40% for very small read statements and with 12-13 % for very simple updating transactions (flexBench -o 10000) in single threaded application. Removes a fixed cost of around 100.000 cycles every time the ndbd process wakes up to execute some queries. ndb/src/kernel/vm/FastScheduler.cpp: Integrate sendPacked into doJob The lack of integration meant that several loops in ipControlLoop were executed each time the ndbd process woke up, also for reads it meant that response was divided in two TCP/IP packets Is necessary to integrate this with overload protection ndb/src/kernel/vm/ThreadConfig.cpp: Integrate sendPacked into doJob The lack of integration meant that several loops in ipControlLoop were executed each time the ndbd process woke up, also for reads it meant that response was divided in two TCP/IP packets Is necessary to integrate this with overload protection --- ndb/src/kernel/vm/FastScheduler.cpp | 33 +++++++++++++++++++---------- ndb/src/kernel/vm/ThreadConfig.cpp | 3 --- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/ndb/src/kernel/vm/FastScheduler.cpp b/ndb/src/kernel/vm/FastScheduler.cpp index eca456d26dd..d05c02360a7 100644 --- a/ndb/src/kernel/vm/FastScheduler.cpp +++ b/ndb/src/kernel/vm/FastScheduler.cpp @@ -76,19 +76,26 @@ FastScheduler::activateSendPacked() globalData.loopMax = 2048; }//FastScheduler::activateSendPacked() +//------------------------------------------------------------------------ +// sendPacked is executed at the end of the loop. +// To ensure that we don't send any messages before executing all local +// packed signals we do another turn in the loop (unless we have already +// executed too many signals in the loop). +//------------------------------------------------------------------------ void FastScheduler::doJob() { + Uint32 init_loopCount = 0; + Uint32 TminLoops = getBOccupancy() + EXTRA_SIGNALS_PER_DO_JOB; + Uint32 TloopMax = (Uint32)globalData.loopMax; + if (TminLoops < TloopMax) { + TloopMax = TminLoops; + }//if + if (TloopMax < MIN_NUMBER_OF_SIG_PER_DO_JOB) { + TloopMax = MIN_NUMBER_OF_SIG_PER_DO_JOB; + }//if do{ - Uint32 loopCount = 0; - Uint32 TminLoops = getBOccupancy() + EXTRA_SIGNALS_PER_DO_JOB; - Uint32 TloopMax = (Uint32)globalData.loopMax; - if (TminLoops < TloopMax) { - TloopMax = TminLoops; - }//if - if (TloopMax < MIN_NUMBER_OF_SIG_PER_DO_JOB) { - TloopMax = MIN_NUMBER_OF_SIG_PER_DO_JOB; - }//if + Uint32 loopCount = init_loopCount; register Uint32 tHighPrio = globalData.highestAvailablePrio; register Signal* signal = getVMSignals(); while ((tHighPrio < LEVEL_IDLE) && (loopCount < TloopMax)) { @@ -151,7 +158,7 @@ FastScheduler::doJob() if (globalData.sendPackedActivated == 1) { Uint32 t1 = theDoJobTotalCounter; Uint32 t2 = theDoJobCallCounter; - t1 += loopCount; + t1 += (loopCount - init_loopCount); t2++; theDoJobTotalCounter = t1; theDoJobCallCounter = t2; @@ -161,7 +168,11 @@ FastScheduler::doJob() theDoJobTotalCounter = 0; }//if }//if - } while (getBOccupancy() > MAX_OCCUPANCY); + init_loopCount = loopCount; + sendPacked(); + } while ((getBOccupancy() > MAX_OCCUPANCY) || + ((init_loopCount < TloopMax) && + (globalData.highestAvailablePrio < LEVEL_IDLE))); }//FastScheduler::doJob() void FastScheduler::sendPacked() diff --git a/ndb/src/kernel/vm/ThreadConfig.cpp b/ndb/src/kernel/vm/ThreadConfig.cpp index 4844bb9a477..76fcc4ba84f 100644 --- a/ndb/src/kernel/vm/ThreadConfig.cpp +++ b/ndb/src/kernel/vm/ThreadConfig.cpp @@ -173,9 +173,6 @@ void ThreadConfig::ipControlLoop() // until all buffers are empty or until we have executed 2048 signals. //-------------------------------------------------------------------- globalScheduler.doJob(); - - globalScheduler.sendPacked(); - }//while globalData.incrementWatchDogCounter(6); From 77cdf79fb4e19b6fceba93596f7ce786620c83a6 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 31 Jan 2005 19:35:12 -0800 Subject: [PATCH 03/53] Add 'debug' to mysqladmin --help output. (Bug #8207) client/mysqladmin.cc: Add documentation for 'debug' command --- client/mysqladmin.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index d390a152fc7..0da7d5b3acf 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -1008,6 +1008,7 @@ static void usage(void) print_defaults("my",load_default_groups); puts("\nWhere command is a one or more of: (Commands may be shortened)\n\ create databasename Create a new database\n\ + debug Instruct server to write debug information to log\n\ drop databasename Delete a database and all its tables\n\ extended-status Gives an extended status message from the server\n\ flush-hosts Flush all cached hosts\n\ From c53184ebb775290d2e0f8397815832f1c84a8a5c Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 1 Feb 2005 16:27:08 +0200 Subject: [PATCH 04/53] Proper fix for comparision with ' ' (Bug #7788 "Table is full" occurs during a multitable update") client/mysqldump.c: Style fixes innobase/include/univ.i: UNIV_DEBUG should not depend on configure --debug but on --debug=full mysql-test/r/compare.result: Added test to find bug in previous bugfix mysql-test/t/compare.test: Added test to find bug in previous bugfix mysys/my_handler.c: Proper fix for comparision with ' ' strings/ctype-big5.c: Proper fix for comparision with ' ' strings/ctype-bin.c: Proper fix for comparision with ' ' strings/ctype-gbk.c: Proper fix for comparision with ' ' strings/ctype-latin1.c: Proper fix for comparision with ' ' strings/ctype-mb.c: Proper fix for comparision with ' ' strings/ctype-simple.c: Proper fix for comparision with ' ' strings/ctype-sjis.c: Proper fix for comparision with ' ' strings/ctype-tis620.c: Proper fix for comparision with ' ' strings/ctype-ucs2.c: Proper fix for comparision with ' ' strings/ctype-utf8.c: Proper fix for comparision with ' ' --- client/mysqldump.c | 36 ++++++++++++++++++------------------ innobase/include/univ.i | 4 ---- mysql-test/r/compare.result | 3 +++ mysql-test/t/compare.test | 2 ++ mysys/my_handler.c | 6 +++--- strings/ctype-big5.c | 4 ++-- strings/ctype-bin.c | 4 ++-- strings/ctype-gbk.c | 4 ++-- strings/ctype-latin1.c | 4 ++-- strings/ctype-mb.c | 4 ++-- strings/ctype-simple.c | 6 +++--- strings/ctype-sjis.c | 4 ++-- strings/ctype-tis620.c | 4 ++-- strings/ctype-ucs2.c | 4 ++-- strings/ctype-utf8.c | 4 ++-- 15 files changed, 47 insertions(+), 46 deletions(-) diff --git a/client/mysqldump.c b/client/mysqldump.c index afaa2dc5a6d..52255ccb896 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -2091,27 +2091,27 @@ static int dump_all_tables_in_db(char *database) RETURN void */ -static void get_actual_table_name( const char *old_table_name, - char *new_table_name, - int buf_size ) + +static void get_actual_table_name(const char *old_table_name, + char *new_table_name, + int buf_size) { - MYSQL_RES *tableRes; - MYSQL_ROW row; - char query[ NAME_LEN + 50 ]; + MYSQL_RES *tableRes; + MYSQL_ROW row; + char query[ NAME_LEN + 50 ]; + DBUG_ENTER("get_actual_table_name"); - DBUG_ENTER("get_actual_table_name"); + sprintf( query, "SHOW TABLES LIKE '%s'", old_table_name); + if (mysql_query_with_error_report(sock, 0, query)) + { + safe_exit(EX_MYSQLERR); + } - sprintf( query, "SHOW TABLES LIKE '%s'", old_table_name ); - if (mysql_query_with_error_report(sock, 0, query)) - { - safe_exit(EX_MYSQLERR); - } - - tableRes = mysql_store_result( sock ); - row = mysql_fetch_row( tableRes ); - strncpy( new_table_name, row[0], buf_size ); - mysql_free_result(tableRes); -} /* get_actual_table_name */ + tableRes= mysql_store_result( sock ); + row= mysql_fetch_row( tableRes ); + strmake(new_table_name, row[0], buf_size-1); + mysql_free_result(tableRes); +} static int dump_selected_tables(char *db, char **table_names, int tables) diff --git a/innobase/include/univ.i b/innobase/include/univ.i index 6ae4fe1c2ce..625978ffc38 100644 --- a/innobase/include/univ.i +++ b/innobase/include/univ.i @@ -80,10 +80,6 @@ memory is read outside the allocated blocks. */ /* Make a non-inline debug version */ -#ifdef DBUG_ON -#define UNIV_DEBUG -#endif /* DBUG_ON */ - /* #define UNIV_DEBUG #define UNIV_MEM_DEBUG diff --git a/mysql-test/r/compare.result b/mysql-test/r/compare.result index 49ec2dd85cc..6f667aabac0 100644 --- a/mysql-test/r/compare.result +++ b/mysql-test/r/compare.result @@ -39,3 +39,6 @@ DROP TABLE t1; SELECT CHAR(31) = '', '' = CHAR(31); CHAR(31) = '' '' = CHAR(31) 0 0 +SELECT CHAR(30) = '', '' = CHAR(30); +CHAR(30) = '' '' = CHAR(30) +0 0 diff --git a/mysql-test/t/compare.test b/mysql-test/t/compare.test index e3c042e608a..bc20786227b 100644 --- a/mysql-test/t/compare.test +++ b/mysql-test/t/compare.test @@ -33,3 +33,5 @@ DROP TABLE t1; # Bug #8134: Comparison against CHAR(31) at end of string SELECT CHAR(31) = '', '' = CHAR(31); +# Extra test +SELECT CHAR(30) = '', '' = CHAR(30); diff --git a/mysys/my_handler.c b/mysys/my_handler.c index df1e9e55e0a..5ee181ca78e 100644 --- a/mysys/my_handler.c +++ b/mysys/my_handler.c @@ -43,7 +43,7 @@ static int compare_bin(uchar *a, uint a_length, uchar *b, uint b_length, return 0; if (skip_end_space && a_length != b_length) { - int swap= 0; + int swap= 1; /* We are using space compression. We have to check if longer key has next character < ' ', in which case it's less than the shorter @@ -57,12 +57,12 @@ static int compare_bin(uchar *a, uint a_length, uchar *b, uint b_length, /* put shorter key in a */ a_length= b_length; a= b; - swap= -1 ^ 1; /* swap sign of result */ + swap= -1; /* swap sign of result */ } for (end= a + a_length-length; a < end ; a++) { if (*a != ' ') - return ((int) *a - (int) ' ') ^ swap; + return (*a < ' ') ? -swap : swap; } return 0; } diff --git a/strings/ctype-big5.c b/strings/ctype-big5.c index 997b8ce93d6..270b02212af 100644 --- a/strings/ctype-big5.c +++ b/strings/ctype-big5.c @@ -271,7 +271,7 @@ static int my_strnncollsp_big5(CHARSET_INFO * cs __attribute__((unused)), if (!res && a_length != b_length) { const uchar *end; - int swap= 0; + int swap= 1; /* Check the next not space character of the longer key. If it's < ' ', then it's smaller than the other key. @@ -286,7 +286,7 @@ static int my_strnncollsp_big5(CHARSET_INFO * cs __attribute__((unused)), for (end= a + a_length-length; a < end ; a++) { if (*a != ' ') - return ((int) *a - (int) ' ') ^ swap; + return (*a < ' ') ? -swap : swap; } } return res; diff --git a/strings/ctype-bin.c b/strings/ctype-bin.c index 95c52512243..618879607ec 100644 --- a/strings/ctype-bin.c +++ b/strings/ctype-bin.c @@ -157,7 +157,7 @@ static int my_strnncollsp_8bit_bin(CHARSET_INFO * cs __attribute__((unused)), } if (a_length != b_length) { - int swap= 0; + int swap= 1; /* Check the next not space character of the longer key. If it's < ' ', then it's smaller than the other key. @@ -172,7 +172,7 @@ static int my_strnncollsp_8bit_bin(CHARSET_INFO * cs __attribute__((unused)), for (end= a + a_length-length; a < end ; a++) { if (*a != ' ') - return ((int) *a - (int) ' ') ^ swap; + return (*a < ' ') ? -swap : swap; } } return 0; diff --git a/strings/ctype-gbk.c b/strings/ctype-gbk.c index 731ad58a2fb..9daa9f90f3c 100644 --- a/strings/ctype-gbk.c +++ b/strings/ctype-gbk.c @@ -2632,7 +2632,7 @@ static int my_strnncollsp_gbk(CHARSET_INFO * cs __attribute__((unused)), if (!res && a_length != b_length) { const uchar *end; - int swap= 0; + int swap= 1; /* Check the next not space character of the longer key. If it's < ' ', then it's smaller than the other key. @@ -2647,7 +2647,7 @@ static int my_strnncollsp_gbk(CHARSET_INFO * cs __attribute__((unused)), for (end= a + a_length-length; a < end ; a++) { if (*a != ' ') - return ((int) *a - (int) ' ') ^ swap; + return (*a < ' ') ? -swap : swap; } } return res; diff --git a/strings/ctype-latin1.c b/strings/ctype-latin1.c index 32d9a227c2f..4ab101add5b 100644 --- a/strings/ctype-latin1.c +++ b/strings/ctype-latin1.c @@ -611,7 +611,7 @@ static int my_strnncollsp_latin1_de(CHARSET_INFO *cs __attribute__((unused)), if (a != a_end || b != b_end) { - int swap= 0; + int swap= 1; /* Check the next not space character of the longer key. If it's < ' ', then it's smaller than the other key. @@ -626,7 +626,7 @@ static int my_strnncollsp_latin1_de(CHARSET_INFO *cs __attribute__((unused)), for ( ; a < a_end ; a++) { if (*a != ' ') - return ((int) *a - (int) ' ') ^ swap; + return (*a < ' ') ? -swap : swap; } } return 0; diff --git a/strings/ctype-mb.c b/strings/ctype-mb.c index 731fc460cef..6cf48291c91 100644 --- a/strings/ctype-mb.c +++ b/strings/ctype-mb.c @@ -389,7 +389,7 @@ static int my_strnncollsp_mb_bin(CHARSET_INFO * cs __attribute__((unused)), } if (a_length != b_length) { - int swap= 0; + int swap= 1; /* Check the next not space character of the longer key. If it's < ' ', then it's smaller than the other key. @@ -404,7 +404,7 @@ static int my_strnncollsp_mb_bin(CHARSET_INFO * cs __attribute__((unused)), for (end= a + a_length-length; a < end ; a++) { if (*a != ' ') - return ((int) *a - (int) ' ') ^ swap; + return (*a < ' ') ? -swap : swap; } } return 0; diff --git a/strings/ctype-simple.c b/strings/ctype-simple.c index 4dc6a1be27b..1a09b16a264 100644 --- a/strings/ctype-simple.c +++ b/strings/ctype-simple.c @@ -143,7 +143,7 @@ int my_strnncollsp_simple(CHARSET_INFO * cs, const uchar *a, uint a_length, } if (a_length != b_length) { - int swap= 0; + int swap= 1; /* Check the next not space character of the longer key. If it's < ' ', then it's smaller than the other key. @@ -153,12 +153,12 @@ int my_strnncollsp_simple(CHARSET_INFO * cs, const uchar *a, uint a_length, /* put shorter key in s */ a_length= b_length; a= b; - swap= -1^1; /* swap sign of result */ + swap= -1; /* swap sign of result */ } for (end= a + a_length-length; a < end ; a++) { if (*a != ' ') - return ((int) *a - (int) ' ') ^ swap; + return (*a < ' ') ? -swap : swap; } } return 0; diff --git a/strings/ctype-sjis.c b/strings/ctype-sjis.c index c0b33a13cdd..0cb30a9b6ee 100644 --- a/strings/ctype-sjis.c +++ b/strings/ctype-sjis.c @@ -251,7 +251,7 @@ static int my_strnncollsp_sjis(CHARSET_INFO *cs __attribute__((unused)), int res= my_strnncoll_sjis_internal(cs, &a, a_length, &b, b_length); if (!res && (a != a_end || b != b_end)) { - int swap= 0; + int swap= 1; /* Check the next not space character of the longer key. If it's < ' ', then it's smaller than the other key. @@ -266,7 +266,7 @@ static int my_strnncollsp_sjis(CHARSET_INFO *cs __attribute__((unused)), for (; a < a_end ; a++) { if (*a != ' ') - return ((int) *a - (int) ' ') ^ swap; + return (*a < ' ') ? -swap : swap; } } return res; diff --git a/strings/ctype-tis620.c b/strings/ctype-tis620.c index 3a43c556ac8..6a6c55d214e 100644 --- a/strings/ctype-tis620.c +++ b/strings/ctype-tis620.c @@ -589,7 +589,7 @@ int my_strnncollsp_tis620(CHARSET_INFO * cs __attribute__((unused)), } if (a_length != b_length) { - int swap= 0; + int swap= 1; /* Check the next not space character of the longer key. If it's < ' ', then it's smaller than the other key. @@ -605,7 +605,7 @@ int my_strnncollsp_tis620(CHARSET_INFO * cs __attribute__((unused)), { if (*a != ' ') { - res= ((int) *a - (int) ' ') ^ swap; + res= (*a < ' ') ? -swap : swap; goto ret; } } diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c index 936e2b6fdce..ea11f8816a5 100644 --- a/strings/ctype-ucs2.c +++ b/strings/ctype-ucs2.c @@ -275,7 +275,7 @@ static int my_strnncollsp_ucs2(CHARSET_INFO *cs __attribute__((unused)), if (slen != tlen) { - int swap= 0; + int swap= 1; if (slen < tlen) { s= t; @@ -286,7 +286,7 @@ static int my_strnncollsp_ucs2(CHARSET_INFO *cs __attribute__((unused)), for ( ; s < se ; s+= 2) { if (s[0] || s[1] != ' ') - return (((int)s[0] << 8) + (int) s[1] - (int) ' ') ^ swap; + return (s[0] == 0 && s[1] < ' ') ? -swap : swap; } } return 0; diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c index 502d0ec285e..486d428bf1d 100644 --- a/strings/ctype-utf8.c +++ b/strings/ctype-utf8.c @@ -2077,7 +2077,7 @@ static int my_strnncollsp_utf8(CHARSET_INFO *cs, if (slen != tlen) { - int swap= 0; + int swap= 1; if (slen < tlen) { slen= tlen; @@ -2098,7 +2098,7 @@ static int my_strnncollsp_utf8(CHARSET_INFO *cs, for ( ; s < se; s++) { if (*s != ' ') - return ((int)*s - (int) ' ') ^ swap; + return (*s < ' ') ? -swap : swap; } } return 0; From 3a890c8a874fe52dcac99864b6c4785de0210549 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 1 Feb 2005 16:49:23 +0100 Subject: [PATCH 05/53] ndb - 1) New testcase Check every combination of ins/upd/del of length 5 Check reading savepoint's 2) Fix 1 liner in acc wrt committing read ndb/include/ndbapi/NdbConnection.hpp: Make testcase friend ndb/src/kernel/blocks/dbacc/DbaccMain.cpp: Fix so that committing a READ can _not_ result in setting elementIsDisappeared ndb/test/include/NDBT_Test.hpp: Make copy of testcase name ndb/test/ndbapi/testOperations.cpp: New testcase Check every combination of ins/upd/del of length 5 Check reading savepoint's ndb/test/src/HugoOperations.cpp: Close transaction in destructor ndb/test/src/NDBT_Test.cpp: Make copy of testcase name --- ndb/include/ndbapi/NdbConnection.hpp | 2 + ndb/src/kernel/blocks/dbacc/DbaccMain.cpp | 3 +- ndb/test/include/NDBT_Test.hpp | 4 +- ndb/test/ndbapi/testOperations.cpp | 283 ++++++++++++++++++++++ ndb/test/src/HugoOperations.cpp | 4 + ndb/test/src/NDBT_Test.cpp | 12 +- 6 files changed, 302 insertions(+), 6 deletions(-) diff --git a/ndb/include/ndbapi/NdbConnection.hpp b/ndb/include/ndbapi/NdbConnection.hpp index 166355cae17..f173cd8ac6e 100644 --- a/ndb/include/ndbapi/NdbConnection.hpp +++ b/ndb/include/ndbapi/NdbConnection.hpp @@ -687,6 +687,8 @@ private: void remove_list(NdbOperation*& head, NdbOperation*); void define_scan_op(NdbIndexScanOperation*); + + friend int runOperations(class NDBT_Context*, class NDBT_Step*); }; inline diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index 5c7cc597672..a82c96beebd 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -5704,7 +5704,8 @@ void Dbacc::commitOperation(Signal* signal) Uint32 tmp2Olq; if ((operationRecPtr.p->commitDeleteCheckFlag == ZFALSE) && - (operationRecPtr.p->operation != ZSCAN_OP)) { + (operationRecPtr.p->operation != ZSCAN_OP) && + (operationRecPtr.p->operation != ZREAD)) { jam(); /* This method is used to check whether the end result of the transaction will be to delete the tuple. In this case all operation will be marked diff --git a/ndb/test/include/NDBT_Test.hpp b/ndb/test/include/NDBT_Test.hpp index 8b69faebde8..a60228c1a5d 100644 --- a/ndb/test/include/NDBT_Test.hpp +++ b/ndb/test/include/NDBT_Test.hpp @@ -188,7 +188,7 @@ public: NDBT_TestCase(NDBT_TestSuite* psuite, const char* name, const char* comment); - virtual ~NDBT_TestCase(){} + virtual ~NDBT_TestCase() {} // This is the default executor of a test case // When a test case is executed it will need to be suplied with a number of @@ -225,6 +225,8 @@ protected: void stopTimer(NDBT_Context*); void printTimer(NDBT_Context*); + BaseString _name; + BaseString _comment; const char* name; const char* comment; NDBT_TestSuite* suite; diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp index 949f08281a5..92cc3e81b1a 100644 --- a/ndb/test/ndbapi/testOperations.cpp +++ b/ndb/test/ndbapi/testOperations.cpp @@ -98,6 +98,11 @@ OperationTestCase matrix[] = { result = NDBT_FAILED; \ break; } +#define C3(b) if (!(b)) { \ + g_err << "ERR: "<< step->getName() \ + << " failed on line " << __LINE__ << endl; \ + abort(); return NDBT_FAILED; } + int runOp(HugoOperations & hugoOps, Ndb * pNdb, @@ -228,11 +233,287 @@ runClearTable(NDBT_Context* ctx, NDBT_Step* step){ return NDBT_OK; } +enum OPS { o_DONE= 0, o_INS= 1, o_UPD= 2, o_DEL= 3 }; +typedef Vector Sequence; + +static +bool +valid(const Sequence& s) +{ + if(s.size() == 0) + return false; + + for(size_t i = 1; i>= 2; + } +} + +static +void +generate(Vector& out, size_t len) +{ + int max= 1; + while(len) + { + max <<= 2; + len--; + } + + len= 1; + for(int i = 0; i= len && valid(tmp)) + { + out.push_back(i); + len= tmp.size(); + } + else + { + //ndbout << "DISCARD: " << tmp << endl; + } + } +} + +int +runOperations(NDBT_Context* ctx, NDBT_Step* step) +{ + const Uint32 DUMMY = 0; + const Uint32 ROW = 1; + + int tmp; + Ndb* pNdb = GETNDB(step); + + Uint32 seqNo = ctx->getProperty("Sequence", (Uint32)0); + Uint32 no_wait = NdbOperation::LM_CommittedRead* + ctx->getProperty("NoWait", (Uint32)1); + + if(seqNo == 0) + { + return NDBT_FAILED; + } + + Sequence seq; + generate(seq, seqNo); + + { + // Dummy row + HugoOperations hugoOps(*ctx->getTab()); + C3(hugoOps.startTransaction(pNdb) == 0); + C3(hugoOps.pkInsertRecord(pNdb, DUMMY, 1, 0) == 0); + C3(hugoOps.execute_Commit(pNdb) == 0); + } + + const bool inital_row= (seq[0] != o_INS); + if(inital_row) + { + HugoOperations hugoOps(*ctx->getTab()); + C3(hugoOps.startTransaction(pNdb) == 0); + C3(hugoOps.pkInsertRecord(pNdb, ROW, 1, 0) == 0); + C3(hugoOps.execute_Commit(pNdb) == 0); + } + + HugoOperations trans1(*ctx->getTab()); + C3(trans1.startTransaction(pNdb) == 0); + for(size_t i = 0; igetTab()); + C3(other.startTransaction(pNdb) == 0); + C3(other.pkReadRecord(pNdb, ROW, 1, (NdbOperation::LockMode)j) == 0); + tmp= other.execute_Commit(pNdb); + if(j == NdbOperation::LM_CommittedRead) + { + C3(inital_row? tmp==0 && other.verifyUpdatesValue(0) == 0 : tmp==626); + } + else + { + C3(tmp == 266); + } + } + + /** + * Verify savepoint read + */ + Uint64 transactionId= trans1.getTransaction()->getTransactionId(); + for(size_t k=0; k<=i+1; k++) + { + for(size_t j = 0; j<3; j++) + { + const NdbOperation::LockMode lm= (NdbOperation::LockMode)j; + + HugoOperations same(*ctx->getTab()); + C3(same.startTransaction(pNdb) == 0); + same.getTransaction()->setTransactionId(transactionId); // Cheat + + /** + * Increase savepoint to k + */ + for(size_t l = 1; l<=k; l++) + { + C3(same.pkReadRecord(pNdb, DUMMY, 1, lm) == 0); // Read dummy row + C3(same.execute_NoCommit(pNdb) == 0); + g_info << "savepoint: " << l << endl; + } + + g_info << "op(" << k << ", " << i << "): " + << " lock mode " << lm << endl; + + C3(same.pkReadRecord(pNdb, ROW, 1, lm) == 0); // Read real row + tmp= same.execute_Commit(pNdb); + if(k == 0) + { + if(inital_row) + { + C3(tmp == 0 && same.verifyUpdatesValue(0) == 0); + } else + { + C3(tmp == 626); + } + } + else + { + switch(seq[k-1]){ + case o_INS: + case o_UPD: + C3(tmp == 0 && same.verifyUpdatesValue(k) == 0); + break; + case o_DEL: + C3(tmp == 626); + break; + case o_DONE: + abort(); + } + } + } + } + } + C3(trans1.execute_Commit(pNdb) == 0); + + return NDBT_OK; +} + int main(int argc, const char** argv){ ndb_init(); + Vector tmp; + generate(tmp, 5); + NDBT_TestSuite ts("testOperations"); + for(size_t i = 0; isetProperty("Sequence", tmp[i]); + pt->addInitializer(new NDBT_Initializer(pt, + "runClearTable", + runClearTable)); + + pt->addStep(new NDBT_ParallelStep(pt, + name.c_str()+1, + runOperations)); + + pt->addFinalizer(new NDBT_Finalizer(pt, + "runClearTable", + runClearTable)); + + ts.addTest(pt); + } + for(Uint32 i = 0; i; +template class Vector; diff --git a/ndb/test/src/HugoOperations.cpp b/ndb/test/src/HugoOperations.cpp index e8e2d992345..caaa3a3a0ee 100644 --- a/ndb/test/src/HugoOperations.cpp +++ b/ndb/test/src/HugoOperations.cpp @@ -401,6 +401,10 @@ HugoOperations::HugoOperations(const NdbDictionary::Table& _tab): HugoOperations::~HugoOperations(){ deallocRows(); + if (pTrans != NULL){ + pTrans->close(); + pTrans = NULL; + } } diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp index bbbde008938..0e5f744d5ea 100644 --- a/ndb/test/src/NDBT_Test.cpp +++ b/ndb/test/src/NDBT_Test.cpp @@ -327,13 +327,17 @@ NDBT_Finalizer::NDBT_Finalizer(NDBT_TestCase* ptest, NDBT_TestCase::NDBT_TestCase(NDBT_TestSuite* psuite, const char* pname, const char* pcomment) : - name(pname) , - comment(pcomment), - suite(psuite){ + name(strdup(pname)) , + comment(strdup(pcomment)), + suite(psuite) +{ + _name.assign(pname); + _comment.assign(pcomment); + name= _name.c_str(); + comment= _comment.c_str(); assert(suite != NULL); } - NDBT_TestCaseImpl1::NDBT_TestCaseImpl1(NDBT_TestSuite* psuite, const char* pname, const char* pcomment) : From 9d548d7f2253ea5dd1f78960f61ba2f0822db177 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 1 Feb 2005 17:08:39 +0100 Subject: [PATCH 06/53] ndb - Put all output from ndb tools during mysql-test-run into log file mysql-test/mysql-test-run.sh: Put all output from ndb_tool into log file mysql-test/t/ndb_autodiscover.test: Put all output from ndb_tool into log file mysql-test/t/ndb_restore.test: Put all output from ndb_tool into log file --- mysql-test/mysql-test-run.sh | 3 +++ mysql-test/t/ndb_autodiscover.test | 10 +++++----- mysql-test/t/ndb_restore.test | 6 +++--- 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index c2ac26217b9..44d08d65759 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -472,6 +472,7 @@ export MASTER_MYPORT MASTER_MYPORT1 SLAVE_MYPORT MYSQL_TCP_PORT MASTER_MYSOCK MA NDBCLUSTER_BASE_PORT=`expr $NDBCLUSTER_PORT + 2` NDBCLUSTER_OPTS="--port=$NDBCLUSTER_PORT --port-base=$NDBCLUSTER_BASE_PORT --data-dir=$MYSQL_TEST_DIR/var --ndb_mgm-extra-opts=$NDB_MGM_EXTRA_OPTS --ndb_mgmd-extra-opts=$NDB_MGMD_EXTRA_OPTS --ndbd-extra-opts=$NDBD_EXTRA_OPTS" NDB_BACKUP_DIR=$MYSQL_TEST_DIR/var/ndbcluster-$NDBCLUSTER_PORT +NDB_TOOLS_OUTPUT=$MYSQL_TEST_DIR/var/log/ndb_tools.log if [ x$SOURCE_DIST = x1 ] ; then MY_BASEDIR=$MYSQL_TEST_DIR @@ -637,6 +638,7 @@ export CLIENT_BINDIR MYSQL_CLIENT_TEST CHARSETSDIR export NDB_TOOLS_DIR export NDB_MGM export NDB_BACKUP_DIR +export NDB_TOOLS_OUTPUT MYSQL_TEST_ARGS="--no-defaults --socket=$MASTER_MYSOCK --database=$DB \ --user=$DBUSER --password=$DBPASSWD --silent -v --skip-safemalloc \ @@ -978,6 +980,7 @@ start_ndbcluster() { if [ ! -z "$USE_NDBCLUSTER" ] then + rm -f $NDBAPI_OUTPUT if [ -z "$USE_RUNNING_NDBCLUSTER" ] then echo "Starting ndbcluster" diff --git a/mysql-test/t/ndb_autodiscover.test b/mysql-test/t/ndb_autodiscover.test index 6551732adba..037115f5e82 100644 --- a/mysql-test/t/ndb_autodiscover.test +++ b/mysql-test/t/ndb_autodiscover.test @@ -199,7 +199,7 @@ insert into t4 values (1, "Automatic"); select * from t4; # Remove the table from NDB -system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 > /dev/null ; +system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 >> $NDB_TOOLS_OUTPUT ; # # Test that correct error is returned @@ -230,7 +230,7 @@ select * from t4; flush tables; # Remove the table from NDB -system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 > /dev/null ; +system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t4 >> $NDB_TOOLS_OUTPUT ; SHOW TABLES; @@ -264,8 +264,8 @@ insert into t8 values (8, "myisam table 8"); insert into t9 values (9); # Remove t3, t5 from NDB -system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t3 > /dev/null ; -system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t5 > /dev/null ; +system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t3 >> $NDB_TOOLS_OUTPUT ; +system exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test t5 >> $NDB_TOOLS_OUTPUT ; # Remove t6, t7 from disk system rm var/master-data/test/t6.frm > /dev/null ; system rm var/master-data/test/t7.frm > /dev/null ; @@ -479,4 +479,4 @@ create table t10 ( insert into t10 values (1, 'kalle'); ---exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test `$NDB_TOOLS_DIR/ndb_show_tables --no-defaults | grep BLOB` > /dev/null 2>&1 || true +--exec $NDB_TOOLS_DIR/ndb_drop_table --no-defaults -d test `$NDB_TOOLS_DIR/ndb_show_tables --no-defaults | grep BLOB` >> $NDB_TOOLS_OUTPUT 2>&1 || true diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test index 09939ec119d..d413453fb0e 100644 --- a/mysql-test/t/ndb_restore.test +++ b/mysql-test/t/ndb_restore.test @@ -141,10 +141,10 @@ create table t8_c engine=ndbcluster as select * from t8; create table t9_c engine=ndbcluster as select * from t9; ---exec $NDB_MGM --no-defaults -e "start backup" > /dev/null +--exec $NDB_MGM --no-defaults -e "start backup" >> $NDB_TOOLS_OUTPUT drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; ---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 > /tmp/ndb_restore.out ---exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 > /tmp/ndb_restore.out +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 1 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-1 >> $NDB_TOOLS_OUTPUT show tables; From cf660b001ef6ba154b1d8d15b534a253dcfc14e0 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 1 Feb 2005 19:01:37 +0100 Subject: [PATCH 07/53] cleanup and streamlining of thread create/exit in ndb --- ndb/include/portlib/NdbThread.h | 2 +- ndb/src/common/portlib/NdbPortLibTest.cpp | 16 ++------ ndb/src/common/portlib/NdbThread.c | 40 +++++++++++-------- .../transporter/TransporterRegistry.cpp | 5 +-- ndb/src/common/util/SocketServer.cpp | 9 ----- ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp | 3 -- .../MemoryChannelTest/MemoryChannelTest.cpp | 4 -- ndb/src/kernel/vm/WatchDog.cpp | 3 -- ndb/src/mgmclient/CommandInterpreter.cpp | 6 +-- ndb/src/mgmsrv/MgmtSrvr.cpp | 10 ----- ndb/src/ndbapi/ClusterMgr.cpp | 4 -- ndb/src/ndbapi/TransporterFacade.cpp | 10 +---- ndb/src/ndbapi/ndb_cluster_connection.cpp | 3 -- ndb/test/ndbapi/benchronja.cpp | 4 +- ndb/test/ndbapi/flexAsynch.cpp | 3 +- ndb/test/ndbapi/flexBench.cpp | 5 +-- ndb/test/ndbapi/flexHammer.cpp | 5 +-- ndb/test/ndbapi/flexScan.cpp | 3 +- ndb/test/ndbapi/flexTT.cpp | 3 +- ndb/test/ndbapi/flexTimedAsynch.cpp | 3 +- ndb/test/ndbapi/flex_bench_mysql.cpp | 29 +++++++------- ndb/test/ndbapi/mainAsyncGenerator.cpp | 2 - ndb/test/src/NDBT_Test.cpp | 1 - ndb/test/tools/transproxy.cpp | 2 - 24 files changed, 55 insertions(+), 120 deletions(-) diff --git a/ndb/include/portlib/NdbThread.h b/ndb/include/portlib/NdbThread.h index 212f7de9384..e86deee4354 100644 --- a/ndb/include/portlib/NdbThread.h +++ b/ndb/include/portlib/NdbThread.h @@ -76,7 +76,7 @@ int NdbThread_WaitFor(struct NdbThread* p_wait_thread, void** status); * * * status: exit code */ -void NdbThread_Exit(int status); +void NdbThread_Exit(void *status); /** * Set thread concurrency level diff --git a/ndb/src/common/portlib/NdbPortLibTest.cpp b/ndb/src/common/portlib/NdbPortLibTest.cpp index 55b9ccec5f2..d7892411851 100644 --- a/ndb/src/common/portlib/NdbPortLibTest.cpp +++ b/ndb/src/common/portlib/NdbPortLibTest.cpp @@ -54,10 +54,7 @@ extern "C" void* thread1func(void* arg) if (arg1 != 7) fail("TEST1", "Wrong arg"); - NdbThread_Exit(returnvalue); - - return NULL; - + return returnvalue; } // test 2 variables and funcs @@ -80,10 +77,7 @@ extern "C" void* test2func(void* arg) fail("TEST2", "Failed to unlock mutex"); int returnvalue = arg1; - NdbThread_Exit(returnvalue); - - return NULL; - + return returnvalue; } @@ -129,8 +123,7 @@ extern "C" void* testfunc(void* arg) } while(tmpVar<100); - NdbThread_Exit(0); - return NULL; + return 0; } extern "C" void* testTryLockfunc(void* arg) @@ -169,8 +162,7 @@ extern "C" void* testTryLockfunc(void* arg) } while(tmpVar<100); - NdbThread_Exit(0); - return NULL; + return 0; } diff --git a/ndb/src/common/portlib/NdbThread.c b/ndb/src/common/portlib/NdbThread.c index 5f2e6021c43..c1137efdb41 100644 --- a/ndb/src/common/portlib/NdbThread.c +++ b/ndb/src/common/portlib/NdbThread.c @@ -17,7 +17,7 @@ #include #include -#include +#include #include #define MAX_THREAD_NAME 16 @@ -39,21 +39,29 @@ struct NdbThread static void* ndb_thread_wrapper(void* _ss){ - void * ret; - struct NdbThread * ss = (struct NdbThread *)_ss; - DBUG_ENTER("ndb_thread_wrapper"); -#ifdef NDB_SHM_TRANSPORTER - if (g_ndb_shm_signum) + my_thread_init(); { - sigset_t mask; - DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum)); - sigemptyset(&mask); - sigaddset(&mask, g_ndb_shm_signum); - pthread_sigmask(SIG_BLOCK, &mask, 0); - } + DBUG_ENTER("ndb_thread_wrapper"); +#ifdef NDB_SHM_TRANSPORTER + if (g_ndb_shm_signum) + { + sigset_t mask; + DBUG_PRINT("info",("Block signum %d",g_ndb_shm_signum)); + sigemptyset(&mask); + sigaddset(&mask, g_ndb_shm_signum); + pthread_sigmask(SIG_BLOCK, &mask, 0); + } #endif - ret= (* ss->func)(ss->object); - DBUG_RETURN(ret); + { + void *ret; + struct NdbThread * ss = (struct NdbThread *)_ss; + ret= (* ss->func)(ss->object); + my_thread_end(); + NdbThread_Exit(ret); + } + /* will never be reached */ + DBUG_RETURN(0); + } } @@ -130,9 +138,9 @@ int NdbThread_WaitFor(struct NdbThread* p_wait_thread, void** status) } -void NdbThread_Exit(int status) +void NdbThread_Exit(void *status) { - pthread_exit(&status); + pthread_exit(status); } diff --git a/ndb/src/common/transporter/TransporterRegistry.cpp b/ndb/src/common/transporter/TransporterRegistry.cpp index 462cde76740..439730435ec 100644 --- a/ndb/src/common/transporter/TransporterRegistry.cpp +++ b/ndb/src/common/transporter/TransporterRegistry.cpp @@ -1104,11 +1104,8 @@ TransporterRegistry::setIOState(NodeId nodeId, IOState state) { static void * run_start_clients_C(void * me) { - my_thread_init(); ((TransporterRegistry*) me)->start_clients_thread(); - my_thread_end(); - NdbThread_Exit(0); - return me; + return 0; } // Run by kernel thread diff --git a/ndb/src/common/util/SocketServer.cpp b/ndb/src/common/util/SocketServer.cpp index 8bee256684d..da06389b5df 100644 --- a/ndb/src/common/util/SocketServer.cpp +++ b/ndb/src/common/util/SocketServer.cpp @@ -186,11 +186,7 @@ extern "C" void* socketServerThread_C(void* _ss){ SocketServer * ss = (SocketServer *)_ss; - - my_thread_init(); ss->doRun(); - my_thread_end(); - NdbThread_Exit(0); return 0; } @@ -309,11 +305,8 @@ void* sessionThread_C(void* _sc){ SocketServer::Session * si = (SocketServer::Session *)_sc; - my_thread_init(); if(!transfer(si->m_socket)){ si->m_stopped = true; - my_thread_end(); - NdbThread_Exit(0); return 0; } @@ -325,8 +318,6 @@ sessionThread_C(void* _sc){ } si->m_stopped = true; - my_thread_end(); - NdbThread_Exit(0); return 0; } diff --git a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp index ad6c0fd5283..f76440a462a 100644 --- a/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp +++ b/ndb/src/kernel/blocks/ndbfs/AsyncFile.cpp @@ -82,7 +82,6 @@ static int numAsyncFiles = 0; extern "C" void * runAsyncFile(void* arg) { - my_thread_init(); ((AsyncFile*)arg)->run(); return (NULL); } @@ -876,8 +875,6 @@ void AsyncFile::endReq() { // Thread is ended with return if (theWriteBuffer) NdbMem_Free(theWriteBuffer); - my_thread_end(); - NdbThread_Exit(0); } diff --git a/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp b/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp index aeab9f7828d..b98c60693f4 100644 --- a/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp +++ b/ndb/src/kernel/blocks/ndbfs/MemoryChannelTest/MemoryChannelTest.cpp @@ -40,7 +40,6 @@ extern "C" void* runProducer(void*arg) NdbSleep_MilliSleep(i); i++; } - NdbThread_Exit(0); return NULL; } @@ -58,7 +57,6 @@ extern "C" void* runConsumer(void* arg) delete p; } - NdbThread_Exit(0); return NULL; } @@ -92,7 +90,6 @@ extern "C" void* runProducer2(void*arg) NdbSleep_MilliSleep(i); i++; } - NdbThread_Exit(0); return NULL; } @@ -111,7 +108,6 @@ extern "C" void* runConsumer2(void* arg) delete p; } ndbout << "Consumer2: " << count << " received" << endl; - NdbThread_Exit(0); return NULL; } diff --git a/ndb/src/kernel/vm/WatchDog.cpp b/ndb/src/kernel/vm/WatchDog.cpp index 4e07dc1df90..23475a478d3 100644 --- a/ndb/src/kernel/vm/WatchDog.cpp +++ b/ndb/src/kernel/vm/WatchDog.cpp @@ -27,10 +27,7 @@ extern "C" void* runWatchDog(void* w){ - my_thread_init(); ((WatchDog*)w)->run(); - my_thread_end(); - NdbThread_Exit(0); return NULL; } diff --git a/ndb/src/mgmclient/CommandInterpreter.cpp b/ndb/src/mgmclient/CommandInterpreter.cpp index cbf7776fe06..025bed2bc09 100644 --- a/ndb/src/mgmclient/CommandInterpreter.cpp +++ b/ndb/src/mgmclient/CommandInterpreter.cpp @@ -457,8 +457,6 @@ event_thread_run(void* m) { NdbMgmHandle handle= *(NdbMgmHandle*)m; - my_thread_init(); - int filter[] = { 15, NDB_MGM_EVENT_CATEGORY_BACKUP, 0 }; int fd = ndb_mgm_listen_event(handle, filter); if (fd > 0) @@ -478,9 +476,7 @@ event_thread_run(void* m) do_event_thread= -1; } - my_thread_end(); - NdbThread_Exit(0); - return 0; + return NULL; } bool diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index f698099141a..66c9a6448aa 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -70,12 +70,7 @@ void * MgmtSrvr::logLevelThread_C(void* m) { MgmtSrvr *mgm = (MgmtSrvr*)m; - my_thread_init(); mgm->logLevelThreadRun(); - - my_thread_end(); - NdbThread_Exit(0); - /* NOTREACHED */ return 0; } @@ -83,12 +78,7 @@ void * MgmtSrvr::signalRecvThread_C(void *m) { MgmtSrvr *mgm = (MgmtSrvr*)m; - my_thread_init(); mgm->signalRecvThreadRun(); - - my_thread_end(); - NdbThread_Exit(0); - /* NOTREACHED */ return 0; } diff --git a/ndb/src/ndbapi/ClusterMgr.cpp b/ndb/src/ndbapi/ClusterMgr.cpp index e10b2e1d82c..1fe0cedbd6c 100644 --- a/ndb/src/ndbapi/ClusterMgr.cpp +++ b/ndb/src/ndbapi/ClusterMgr.cpp @@ -54,7 +54,6 @@ runClusterMgr_C(void * me) #ifdef NDB_OSE NdbSleep_MilliSleep(50); #endif - NdbThread_Exit(0); return NULL; } @@ -560,10 +559,7 @@ extern "C" void* runArbitMgr_C(void* me) { - my_thread_init(); ((ArbitMgr*) me)->threadMain(); - my_thread_end(); - NdbThread_Exit(0); return NULL; } diff --git a/ndb/src/ndbapi/TransporterFacade.cpp b/ndb/src/ndbapi/TransporterFacade.cpp index 031ee6315e8..5582143be44 100644 --- a/ndb/src/ndbapi/TransporterFacade.cpp +++ b/ndb/src/ndbapi/TransporterFacade.cpp @@ -405,11 +405,8 @@ extern "C" void* runSendRequest_C(void * me) { - my_thread_init(); ((TransporterFacade*) me)->threadMainSend(); - my_thread_end(); - NdbThread_Exit(0); - return me; + return 0; } void TransporterFacade::threadMainSend(void) @@ -443,11 +440,8 @@ extern "C" void* runReceiveResponse_C(void * me) { - my_thread_init(); ((TransporterFacade*) me)->threadMainReceive(); - my_thread_end(); - NdbThread_Exit(0); - return me; + return 0; } void TransporterFacade::threadMainReceive(void) diff --git a/ndb/src/ndbapi/ndb_cluster_connection.cpp b/ndb/src/ndbapi/ndb_cluster_connection.cpp index 5df707e211d..ab32d6abb8e 100644 --- a/ndb/src/ndbapi/ndb_cluster_connection.cpp +++ b/ndb/src/ndbapi/ndb_cluster_connection.cpp @@ -87,11 +87,8 @@ const char *Ndb_cluster_connection::get_connectstring(char *buf, extern "C" pthread_handler_decl(run_ndb_cluster_connection_connect_thread, me) { - my_thread_init(); g_run_connect_thread= 1; ((Ndb_cluster_connection_impl*) me)->connect_thread(); - my_thread_end(); - NdbThread_Exit(0); return me; } diff --git a/ndb/test/ndbapi/benchronja.cpp b/ndb/test/ndbapi/benchronja.cpp index 91b2a041186..a7523e8e416 100644 --- a/ndb/test/ndbapi/benchronja.cpp +++ b/ndb/test/ndbapi/benchronja.cpp @@ -984,7 +984,6 @@ void* ThreadExec(void* ThreadData){ delete pMyNdb; pMyNdb = NULL ; ThreadReady[thread_no] = 1; - NdbThread_Exit(0) ; return 0 ; }//if @@ -1197,7 +1196,6 @@ void* ThreadExec(void* ThreadData){ } // for(;;) delete pMyNdb ; - NdbThread_Exit(0) ; - return 0 ; // Compiler is happy now + return 0 ; } diff --git a/ndb/test/ndbapi/flexAsynch.cpp b/ndb/test/ndbapi/flexAsynch.cpp index 1953444d640..4b87b2c70ed 100644 --- a/ndb/test/ndbapi/flexAsynch.cpp +++ b/ndb/test/ndbapi/flexAsynch.cpp @@ -494,8 +494,7 @@ threadLoop(void* ThreadData) delete localNdb; ThreadReady[threadNo] = 1; - NdbThread_Exit(0); - return NULL; // Just to keep compiler happy + return NULL; }//threadLoop() static diff --git a/ndb/test/ndbapi/flexBench.cpp b/ndb/test/ndbapi/flexBench.cpp index 2a2388109a1..cc2bfb391da 100644 --- a/ndb/test/ndbapi/flexBench.cpp +++ b/ndb/test/ndbapi/flexBench.cpp @@ -617,7 +617,7 @@ static void* flexBenchThread(void* pArg) free(attrRefValue) ; free(pOps) ; delete pNdb ; - NdbThread_Exit(0) ; + return 0; // thread exits } pNdb->init(); @@ -934,8 +934,7 @@ static void* flexBenchThread(void* pArg) free(longKeyAttrValue); } // if - NdbThread_Exit(0); - return NULL; // Just to keep compiler happy + return NULL; // Thread exits } diff --git a/ndb/test/ndbapi/flexHammer.cpp b/ndb/test/ndbapi/flexHammer.cpp index 688e70d501a..13cd2d5e561 100644 --- a/ndb/test/ndbapi/flexHammer.cpp +++ b/ndb/test/ndbapi/flexHammer.cpp @@ -612,10 +612,7 @@ flexHammerThread(void* pArg) flexHammerErrorData->resetErrorCounters(); - // And exit using NDBT - NdbThread_Exit(0); - - return NULL; + return NULL; // thread exits } // flexHammerThread diff --git a/ndb/test/ndbapi/flexScan.cpp b/ndb/test/ndbapi/flexScan.cpp index c7f4041a525..4d2c85d6955 100644 --- a/ndb/test/ndbapi/flexScan.cpp +++ b/ndb/test/ndbapi/flexScan.cpp @@ -701,8 +701,7 @@ flexScanThread(void* ThreadData) free(pkValue); } // if - NdbThread_Exit(0); - return NULL; + return NULL; // thread exits } // flexScanThread diff --git a/ndb/test/ndbapi/flexTT.cpp b/ndb/test/ndbapi/flexTT.cpp index 3b976f9f87e..8d5be2bb399 100644 --- a/ndb/test/ndbapi/flexTT.cpp +++ b/ndb/test/ndbapi/flexTT.cpp @@ -389,8 +389,7 @@ threadLoop(void* ThreadData) delete localNdb; ThreadReady[loc_threadNo] = 1; - NdbThread_Exit(0); - return NULL; // Just to keep compiler happy + return NULL; // Thread exits }//threadLoop() static diff --git a/ndb/test/ndbapi/flexTimedAsynch.cpp b/ndb/test/ndbapi/flexTimedAsynch.cpp index 27380cc79fd..2b8c0bdd5f8 100644 --- a/ndb/test/ndbapi/flexTimedAsynch.cpp +++ b/ndb/test/ndbapi/flexTimedAsynch.cpp @@ -406,9 +406,8 @@ threadLoop(void* ThreadData) delete localNdb; ThreadReady[threadNo] = 1; - NdbThread_Exit(0); - return NULL; + return NULL; // thread exits } void executeThread(StartType aType, Ndb* aNdbObject, ThreadNdb* threadInfo) diff --git a/ndb/test/ndbapi/flex_bench_mysql.cpp b/ndb/test/ndbapi/flex_bench_mysql.cpp index c8d4d85bedf..c15175bfb00 100644 --- a/ndb/test/ndbapi/flex_bench_mysql.cpp +++ b/ndb/test/ndbapi/flex_bench_mysql.cpp @@ -710,7 +710,7 @@ static void* flexBenchThread(void* pArg) the_socket_name, 0) == NULL ) { ndbout << "failed" << endl; - NdbThread_Exit(0) ; + return 0; } ndbout << "ok" << endl; @@ -722,7 +722,7 @@ static void* flexBenchThread(void* pArg) if (r) { ndbout << "autocommit on/off failed" << endl; - NdbThread_Exit(0) ; + return 0; } } #endif @@ -741,7 +741,7 @@ static void* flexBenchThread(void* pArg) ndbout << threadNo << endl ; ndbout << "Thread #" << threadNo << " will now exit" << endl ; tResult = 13 ; - NdbThread_Exit(0) ; + return 0; } if (use_ndb) { @@ -750,7 +750,7 @@ static void* flexBenchThread(void* pArg) ndbout << "Failed to get an NDB object" << endl; ndbout << "Thread #" << threadNo << " will now exit" << endl ; tResult = 13; - NdbThread_Exit(0) ; + return 0; } pNdb->waitUntilReady(); return_ndb_object(pNdb, ndb_id); @@ -900,11 +900,11 @@ static void* flexBenchThread(void* pArg) prep_insert[i] = mysql_prepare(&mysql, buf, pos); if (prep_insert[i] == 0) { ndbout << "mysql_prepare: " << mysql_error(&mysql) << endl; - NdbThread_Exit(0) ; + return 0; } if (mysql_bind_param(prep_insert[i], bind_insert)) { ndbout << "mysql_bind_param: " << mysql_error(&mysql) << endl; - NdbThread_Exit(0) ; + return 0; } } @@ -926,11 +926,11 @@ static void* flexBenchThread(void* pArg) prep_update[i] = mysql_prepare(&mysql, buf, pos); if (prep_update[i] == 0) { ndbout << "mysql_prepare: " << mysql_error(&mysql) << endl; - NdbThread_Exit(0) ; + return 0; } if (mysql_bind_param(prep_update[i], bind_update)) { ndbout << "mysql_bind_param: " << mysql_error(&mysql) << endl; - NdbThread_Exit(0) ; + return 0; } } @@ -953,15 +953,15 @@ static void* flexBenchThread(void* pArg) prep_read[i] = mysql_prepare(&mysql, buf, pos); if (prep_read[i] == 0) { ndbout << "mysql_prepare: " << mysql_error(&mysql) << endl; - NdbThread_Exit(0) ; + return 0; } if (mysql_bind_param(prep_read[i], bind_read)) { ndbout << "mysql_bind_param: " << mysql_error(&mysql) << endl; - NdbThread_Exit(0) ; + return 0; } if (mysql_bind_result(prep_read[i], &bind_read[1])) { ndbout << "mysql_bind_result: " << mysql_error(&mysql) << endl; - NdbThread_Exit(0) ; + return 0; } } @@ -978,11 +978,11 @@ static void* flexBenchThread(void* pArg) prep_delete[i] = mysql_prepare(&mysql, buf, pos); if (prep_delete[i] == 0) { ndbout << "mysql_prepare: " << mysql_error(&mysql) << endl; - NdbThread_Exit(0) ; + return 0; } if (mysql_bind_param(prep_delete[i], bind_delete)) { ndbout << "mysql_bind_param: " << mysql_error(&mysql) << endl; - NdbThread_Exit(0) ; + return 0; } } } @@ -1431,8 +1431,7 @@ static void* flexBenchThread(void* pArg) ndbout << "I got here " << endl; return_ndb_object(pNdb, ndb_id); } - NdbThread_Exit(0); - return NULL; // Just to keep compiler happy + return NULL; } diff --git a/ndb/test/ndbapi/mainAsyncGenerator.cpp b/ndb/test/ndbapi/mainAsyncGenerator.cpp index 16cb50e160f..73a8b98ab57 100644 --- a/ndb/test/ndbapi/mainAsyncGenerator.cpp +++ b/ndb/test/ndbapi/mainAsyncGenerator.cpp @@ -274,8 +274,6 @@ threadRoutine(void *arg) asyncDbDisconnect(pNDB); - NdbThread_Exit(0); - return NULL; } diff --git a/ndb/test/src/NDBT_Test.cpp b/ndb/test/src/NDBT_Test.cpp index bbbde008938..17e46bf33e5 100644 --- a/ndb/test/src/NDBT_Test.cpp +++ b/ndb/test/src/NDBT_Test.cpp @@ -475,7 +475,6 @@ void * runStep_C(void * s) { runStep(s); - NdbThread_Exit(0); return NULL; } diff --git a/ndb/test/tools/transproxy.cpp b/ndb/test/tools/transproxy.cpp index 88267801172..28a621fa584 100644 --- a/ndb/test/tools/transproxy.cpp +++ b/ndb/test/tools/transproxy.cpp @@ -291,7 +291,6 @@ extern "C" void* copyrun_C(void* copy) { ((Copy*) copy)->run(); - NdbThread_Exit(0); return 0; } @@ -322,7 +321,6 @@ extern "C" void* connrun_C(void* conn) { ((Conn*) conn)->run(); - NdbThread_Exit(0); return 0; } From eca1f04aa43a9c02a6af1ed9ba9f666a5b684607 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 1 Feb 2005 11:59:44 -0800 Subject: [PATCH 08/53] Make sure mysql_client_test and embedded test clients get added to binary distribution on all platforms. scripts/make_binary_distribution.sh: Add mysql_client_test and embedded versions to basic list of binaries and only list libtool-produced '.libs/' versions as non-netware binaries --- scripts/make_binary_distribution.sh | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index 22b51168c23..33d4794e4f7 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -107,8 +107,11 @@ BIN_FILES="extra/comp_err$BS extra/replace$BS extra/perror$BS \ client/mysql$BS client/mysqlshow$BS client/mysqladmin$BS \ client/mysqldump$BS client/mysqlimport$BS \ client/mysqltest$BS client/mysqlcheck$BS \ - client/mysqlbinlog$BS -"; + client/mysqlbinlog$BS \ + tests/mysql_client_test$BS \ + libmysqld/examples/mysql_client_test_embedded$BS \ + libmysqld/examples/mysqltest_embedded$BS \ + "; # Platform-specific bin dir files: if [ $BASE_SYSTEM = "netware" ] ; then @@ -127,8 +130,9 @@ else client/.libs/mysqltest client/.libs/mysqlcheck \ client/.libs/mysqlbinlog client/.libs/mysqlmanagerc \ client/.libs/mysqlmanager-pwgen tools/.libs/mysqlmanager \ - tests/.libs/mysql_client_test libmysqld/examples/mysql_client_test_embedded \ - libmysqld/examples/mysqltest_embedded \ + tests/.libs/mysql_client_test \ + libmysqld/examples/.libs/mysql_client_test_embedded \ + libmysqld/examples/.libs/mysqltest_embedded \ "; fi From 6162c4a6eb22b413a477bb6b9b0f08ec9b98a193 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 1 Feb 2005 15:08:31 -0800 Subject: [PATCH 09/53] Fix value of YEAR field when set from a non-numeric string. (Bug #6067) mysql-test/t/type_date.test: Add new regression test mysql-test/r/type_date.result: Add result sql/field.cc: Set YEAR to 0 when set to a non-numeric string, not 2000, and issue a warning. --- mysql-test/r/type_date.result | 8 ++++++++ mysql-test/t/type_date.test | 7 +++++++ sql/field.cc | 12 ++++++++++-- 3 files changed, 25 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/type_date.result b/mysql-test/r/type_date.result index 71d1b9ad381..3428b5969d9 100644 --- a/mysql-test/r/type_date.result +++ b/mysql-test/r/type_date.result @@ -96,3 +96,11 @@ f2 19781126 19781126 DROP TABLE t1, t2, t3; +CREATE TABLE t1 (y YEAR); +INSERT INTO t1 VALUES ('abc'); +Warnings: +Warning 1265 Data truncated for column 'y' at row 1 +SELECT * FROM t1; +y +0000 +DROP TABLE t1; diff --git a/mysql-test/t/type_date.test b/mysql-test/t/type_date.test index 64420a85189..304ed19b971 100644 --- a/mysql-test/t/type_date.test +++ b/mysql-test/t/type_date.test @@ -107,3 +107,10 @@ SELECT * FROM t2; SELECT * FROM t3; DROP TABLE t1, t2, t3; + +# Test that setting YEAR to invalid string results in default value, not +# 2000. (Bug #6067) +CREATE TABLE t1 (y YEAR); +INSERT INTO t1 VALUES ('abc'); +SELECT * FROM t1; +DROP TABLE t1; diff --git a/sql/field.cc b/sql/field.cc index 7357bc06f11..a2b749257df 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -3511,9 +3511,17 @@ void Field_time::sql_type(String &res) const int Field_year::store(const char *from, uint len,CHARSET_INFO *cs) { - int not_used; // We can ignore result from str2int + int err; char *end; - long nr= my_strntol(cs, from, len, 10, &end, ¬_used); + long nr= my_strntol(cs, from, len, 10, &end, &err); + + if (err) + { + if (table->in_use->count_cuted_fields) + set_warning(MYSQL_ERROR::WARN_LEVEL_WARN, ER_WARN_DATA_TRUNCATED, 1); + *ptr= 0; + return 0; + } if (nr < 0 || nr >= 100 && nr <= 1900 || nr > 2155) { From 6d0d03dab9a74fb512f03ea0275d84d34657ce2a Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 1 Feb 2005 18:29:10 -0800 Subject: [PATCH 10/53] Fix QUOTE() to not reuse the input field for output, which resulted in incorrect results when the input was a constant across a multi-row SELECT statement. (Bug #8248) sql/item_strfunc.h: Add tmp_value member sql/item_strfunc.cc: Always allocate a new string for QUOTE(), in case the field is being reused for multiple rows. mysql-test/t/func_str.test: Add regression test mysql-test/r/func_str.result: Add test results --- mysql-test/r/func_str.result | 7 +++++++ mysql-test/t/func_str.test | 5 +++++ sql/item_strfunc.cc | 15 +++++---------- sql/item_strfunc.h | 1 + 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/mysql-test/r/func_str.result b/mysql-test/r/func_str.result index 278cd4dd935..9392f152bb4 100644 --- a/mysql-test/r/func_str.result +++ b/mysql-test/r/func_str.result @@ -297,3 +297,10 @@ quote(ltrim(concat(' ', 'a'))) select quote(trim(concat(' ', 'a'))); quote(trim(concat(' ', 'a'))) 'a' +CREATE TABLE t1 SELECT 1 UNION SELECT 2 UNION SELECT 3; +SELECT QUOTE('A') FROM t1; +QUOTE('A') +'A' +'A' +'A' +DROP TABLE t1; diff --git a/mysql-test/t/func_str.test b/mysql-test/t/func_str.test index 79a996e7e78..6c2abd27551 100644 --- a/mysql-test/t/func_str.test +++ b/mysql-test/t/func_str.test @@ -193,3 +193,8 @@ select trim(leading 'foo' from 'foo'); select quote(ltrim(concat(' ', 'a'))); select quote(trim(concat(' ', 'a'))); + +# Bad results from QUOTE(). Bug #8248 +CREATE TABLE t1 SELECT 1 UNION SELECT 2 UNION SELECT 3; +SELECT QUOTE('A') FROM t1; +DROP TABLE t1; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index a852906ee2c..aeb63d6af00 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -2183,18 +2183,13 @@ String *Item_func_quote::val_str(String *str) for (from= (char*) arg->ptr(), end= from + arg_length; from < end; from++) new_length+= get_esc_bit(escmask, (uchar) *from); - /* - We have to use realloc() instead of alloc() as we want to keep the - old result in arg - */ - if (arg->realloc(new_length)) + if (tmp_value.alloc(new_length)) goto null; /* - As 'arg' and 'str' may be the same string, we must replace characters - from the end to the beginning + We replace characters from the end to the beginning */ - to= (char*) arg->ptr() + new_length - 1; + to= (char*) tmp_value.ptr() + new_length - 1; *to--= '\''; for (start= (char*) arg->ptr(),end= start + arg_length; end-- != start; to--) { @@ -2222,9 +2217,9 @@ String *Item_func_quote::val_str(String *str) } } *to= '\''; - arg->length(new_length); + tmp_value.length(new_length); null_value= 0; - return arg; + return &tmp_value; null: null_value= 1; diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index fc98ebfe67d..ece15484fd9 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -535,6 +535,7 @@ public: class Item_func_quote :public Item_str_func { + String tmp_value; public: Item_func_quote(Item *a) :Item_str_func(a) {} const char *func_name() const { return "quote"; } From 01e1ca9f67ce9fbc23ba37b804a3c81d4ae0b4f5 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 07:38:11 +0100 Subject: [PATCH 11/53] ndb - (backport from 4.1) Fix weird link error on sol9x86 - use incremental linker configure.in: Fix weird link error on sol9x86 - use incremental linker --- configure.in | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/configure.in b/configure.in index 98d060e660c..caa42004736 100644 --- a/configure.in +++ b/configure.in @@ -2954,6 +2954,15 @@ EOF AC_CONFIG_SUBDIRS(innobase) fi +case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc-$have_ndbcluster in + *solaris*-i?86-no-yes) + # ndb fail for whatever strange reason to link Sun Forte/x86 + # unless using incremental linker + CXXFLAGS="$CXXFLAGS -xildon" + ;; + *) ;; +esac + if test X"$have_ndbcluster" = Xyes then if test X"$mysql_cv_compress" != Xyes From d81a0bede23ca74222252b6f43bd85ead5d7be2a Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 08:38:24 +0200 Subject: [PATCH 12/53] Fix for BUG#8023. Allow LIMIT clause after DUAL. mysql-test/r/limit.result: Added test result for BUG#8023. mysql-test/t/limit.test: Added test for BUG#8023. sql/sql_yacc.yy: Allow the specification of a LIMIT clause after DUAL. This is needed for queries as: select a from t1 union all select 1 from dual limit 1; In this query LIMIT is applied to the whole UNION, so it makes sense, however, the current parser did not allow any clause after DUAL. --- mysql-test/r/limit.result | 9 +++++++++ mysql-test/t/limit.test | 10 ++++++++++ sql/sql_yacc.yy | 9 +++++---- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/limit.result b/mysql-test/r/limit.result index c82105e6a49..6a3d2bffab0 100644 --- a/mysql-test/r/limit.result +++ b/mysql-test/r/limit.result @@ -67,3 +67,12 @@ SELECT * FROM t1; id id2 3 0 DROP TABLE t1; +create table t1 (a integer); +insert into t1 values (1); +select 1 as a from t1 union all select 1 from dual limit 1; +a +1 +(select 1 as a from t1) union all (select 1 from dual) limit 1; +a +1 +drop table t1; diff --git a/mysql-test/t/limit.test b/mysql-test/t/limit.test index 61c57c9b468..28b287a5d4a 100644 --- a/mysql-test/t/limit.test +++ b/mysql-test/t/limit.test @@ -49,3 +49,13 @@ SELECT * FROM t1; DELETE FROM t1 WHERE id2 = 0 ORDER BY id desc LIMIT 1; SELECT * FROM t1; DROP TABLE t1; + +# +# Bug#8023 - limit on UNION with from DUAL, causes syntax error +# +create table t1 (a integer); +insert into t1 values (1); +# both queries must return one row +select 1 as a from t1 union all select 1 from dual limit 1; +(select 1 as a from t1) union all (select 1 from dual) limit 1; +drop table t1; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 1e51d8fb82d..e70efe14557 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -2455,10 +2455,11 @@ select_into: select_from: FROM join_table_list where_clause group_clause having_clause opt_order_clause opt_limit_clause procedure_clause - | FROM DUAL_SYM /* oracle compatibility: oracle always requires FROM - clause, and DUAL is system table without fields. - Is "SELECT 1 FROM DUAL" any better than - "SELECT 1" ? Hmmm :) */ + | FROM DUAL_SYM opt_limit_clause + /* oracle compatibility: oracle always requires FROM clause, + and DUAL is system table without fields. + Is "SELECT 1 FROM DUAL" any better than "SELECT 1" ? + Hmmm :) */ ; select_options: From e7ff7469897e1022c55c68117e3a49625be9d1ab Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 10:50:33 +0100 Subject: [PATCH 13/53] backported configure flag from 5.0 workaround for HPUX signal.h error, missing extern "C" moved my_thread_end to NdbThreadExit more checks for shared memory transporter signum setup acinclude.m4: backported configure flag from 5.0 include/my_global.h: workaround for HPUX signal.h error, missing extern "C" ndb/src/common/portlib/NdbThread.c: moved my_thread_end to NdbThreadExit ndb/src/mgmsrv/ConfigInfo.cpp: more checks for shared memory transporter signum setup --- acinclude.m4 | 7 ++++++- include/my_global.h | 6 ++++++ ndb/src/common/portlib/NdbThread.c | 2 +- ndb/src/mgmsrv/ConfigInfo.cpp | 14 ++++++++++++++ 4 files changed, 27 insertions(+), 2 deletions(-) diff --git a/acinclude.m4 b/acinclude.m4 index 4f2ad8daf91..d7e22332655 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -1624,7 +1624,12 @@ AC_DEFUN([MYSQL_CHECK_NDB_OPTIONS], [ --without-ndb-debug Disable special ndb debug features], [ndb_debug="$withval"], [ndb_debug="default"]) - + AC_ARG_WITH([ndb-ccflags], + [ + --with-ndb-ccflags Extra CC options for ndb compile], + [ndb_cxxflags_fix="$ndb_cxxflags_fix $withval"], + [ndb_cxxflags_fix=$ndb_cxxflags_fix]) + AC_MSG_CHECKING([for NDB Cluster options]) AC_MSG_RESULT([]) diff --git a/include/my_global.h b/include/my_global.h index 3263d079853..7ca3d5e1e58 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -135,7 +135,13 @@ #ifdef HAVE_UNIXWARE7_THREADS #include #else +#if defined(HPUX10) || defined(HPUX11) +C_MODE_START /* HPUX needs this, signal.h bug */ +#include +C_MODE_END +#else #include /* AIX must have this included first */ +#endif #endif /* HAVE_UNIXWARE7_THREADS */ #endif /* HAVE_mit_thread */ #if !defined(SCO) && !defined(_REENTRANT) diff --git a/ndb/src/common/portlib/NdbThread.c b/ndb/src/common/portlib/NdbThread.c index c1137efdb41..aaee9b45069 100644 --- a/ndb/src/common/portlib/NdbThread.c +++ b/ndb/src/common/portlib/NdbThread.c @@ -56,7 +56,6 @@ ndb_thread_wrapper(void* _ss){ void *ret; struct NdbThread * ss = (struct NdbThread *)_ss; ret= (* ss->func)(ss->object); - my_thread_end(); NdbThread_Exit(ret); } /* will never be reached */ @@ -140,6 +139,7 @@ int NdbThread_WaitFor(struct NdbThread* p_wait_thread, void** status) void NdbThread_Exit(void *status) { + my_thread_end(); pthread_exit(status); } diff --git a/ndb/src/mgmsrv/ConfigInfo.cpp b/ndb/src/mgmsrv/ConfigInfo.cpp index 9be4af1b9b5..07310e3a8b8 100644 --- a/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/ndb/src/mgmsrv/ConfigInfo.cpp @@ -3192,13 +3192,27 @@ fixShmKey(InitConfigFileParser::Context & ctx, const char *) { DBUG_ENTER("fixShmKey"); { + static int last_signum= -1; Uint32 signum; if(!ctx.m_currentSection->get("Signum", &signum)) { signum= OPT_NDB_SHM_SIGNUM_DEFAULT; + if (signum <= 0) + { + ctx.reportError("Unable to set default parameter for [SHM]Signum" + " please specify [SHM DEFAULT]Signum"); + return false; + } ctx.m_currentSection->put("Signum", signum); DBUG_PRINT("info",("Added Signum=%u", signum)); } + if ( last_signum != (int)signum && last_signum >= 0 ) + { + ctx.reportError("All shared memory transporters must have same [SHM]Signum defined." + " Use [SHM DEFAULT]Signum"); + return false; + } + last_signum= (int)signum; } { Uint32 id1= 0, id2= 0, key= 0; From d2784c988f3511f08fd2d51ee8bc6a7f2dda1e3b Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 13:40:17 +0100 Subject: [PATCH 14/53] Small Do-compile improvements: - added Option "--comment" to be able to override the default compilation comment - improved the abort subroutine to not rely on an external "tail" command (the default /usr/bin/tail on Solaris does not understand the "-n" notation). Get rid of a "useless use of cat" case in the process. Build-tools/Do-compile: - added Option "--comment" to be able to override the default compilation comment - improved the abort subroutine to not rely on an external "tail" command (the default /usr/bin/tail on Solaris does not understand the "-n" notation). Get rid of a "useless use of cat" case in the process. --- Build-tools/Do-compile | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/Build-tools/Do-compile b/Build-tools/Do-compile index 4034533f2eb..1c3ff01796f 100755 --- a/Build-tools/Do-compile +++ b/Build-tools/Do-compile @@ -7,7 +7,7 @@ use Sys::Hostname; @config_options= (); @make_options= (); -$opt_distribution=$opt_user=$opt_config_env=$opt_config_extra_env=""; +$opt_comment=$opt_distribution=$opt_user=$opt_config_env=$opt_config_extra_env=""; $opt_dbd_options=$opt_perl_options=$opt_config_options=$opt_make_options=$opt_suffix=""; $opt_tmp=$opt_version_suffix=""; $opt_bundled_zlib=$opt_help=$opt_delete=$opt_debug=$opt_stage=$opt_no_test=$opt_no_perl=$opt_one_error=$opt_with_low_memory=$opt_fast_benchmark=$opt_static_client=$opt_static_server=$opt_static_perl=$opt_sur=$opt_with_small_disk=$opt_local_perl=$opt_tcpip=$opt_build_thread=$opt_use_old_distribution=$opt_enable_shared=$opt_no_crash_me=$opt_no_strip=$opt_with_archive=$opt_with_cluster=$opt_with_csv=$opt_with_example=$opt_with_debug=$opt_no_benchmark=$opt_no_mysqltest=$opt_without_embedded=$opt_readline=0; @@ -17,6 +17,7 @@ GetOptions( "bdb", "build-thread=i", "bundled-zlib", + "comment=s", "config-env=s" => \@config_env, "config-extra-env=s" => \@config_extra_env, "config-options=s" => \@config_options, @@ -110,6 +111,7 @@ $log="$pwd/Logs/$host-$major.$minor$opt_version_suffix.log"; $opt_distribution =~ /(mysql[^\/]*)\.tar/; $ver=$1; $gcc_version=which("gcc"); +$opt_comment= "Official MySQL$opt_version_suffix binary" unless $opt_comment; if (defined($gcc_version) && ! $opt_config_env) { $tmp=`$gcc_version -v 2>&1`; @@ -303,7 +305,7 @@ if ($opt_stage <= 1) } $prefix="/usr/local/mysql"; - check_system("$opt_config_env ./configure --prefix=$prefix --localstatedir=$prefix/data --libexecdir=$prefix/bin --with-comment=\"Official MySQL$opt_version_suffix binary\" --with-extra-charsets=complex --with-server-suffix=\"$opt_version_suffix\" --enable-thread-safe-client --enable-local-infile $opt_config_options","Thank you for choosing MySQL"); + check_system("$opt_config_env ./configure --prefix=$prefix --localstatedir=$prefix/data --libexecdir=$prefix/bin --with-comment=\"$opt_comment\" --with-extra-charsets=complex --with-server-suffix=\"$opt_version_suffix\" --enable-thread-safe-client --enable-local-infile $opt_config_options","Thank you for choosing MySQL"); if (-d "$pwd/$host/include-mysql") { safe_system("cp -r $pwd/$host/include-mysql/* $pwd/$host/$ver/include"); @@ -530,6 +532,10 @@ When running several Do-compile runs in parallel, each build should have its own thread ID, so running the test suites does not cause conflicts with duplicate TCP port numbers. +--comment= +Replace the default compilation comment that is embedded into +the mysqld binary. + --config-env= To set up the environment, like 'CC=cc CXX=gcc CXXFLAGS=-O3' @@ -684,16 +690,20 @@ sub abort if ($opt_user) { - $mail_header_file="$opt_tmp/do-command.$$"; - open(TMP,">$mail_header_file"); + # Take the last 40 lines of the build log + open(LOG, "$log") or die $!; + my @log= ; + close LOG; + splice @log => 0, -40; + my $mail_file="$opt_tmp/do-command.$$"; + open(TMP,">$mail_file") or die $!; print TMP "From: mysqldev\@$full_host_name\n"; print TMP "To: $email\n"; print TMP "Subject: $host($uname): $ver$opt_version_suffix compilation failed\n\n"; + print TMP @log; close TMP; - system("tail -n 40 $log > $log.mail"); - system("cat $mail_header_file $log.mail | $sendmail -t -f $email"); - unlink($mail_header_file); - unlink("$log.mail"); + system("$sendmail -t -f $email < $mail_file"); + unlink($mail_file); } exit 1; } From 8ecae2e65cd09568bdfcedd9b22b5fe41cf6144c Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 09:05:52 -0800 Subject: [PATCH 15/53] Fix merge of func_str tests (accidently duplicated a test). mysql-test/t/func_str.test: Remove duplicated test that crept in during merge mysql-test/r/func_str.result: Update results --- mysql-test/r/func_str.result | 26 +++++++++++++------------- mysql-test/t/func_str.test | 7 +------ 2 files changed, 14 insertions(+), 19 deletions(-) diff --git a/mysql-test/r/func_str.result b/mysql-test/r/func_str.result index a8a7ad0e349..88b1a5ea743 100644 --- a/mysql-test/r/func_str.result +++ b/mysql-test/r/func_str.result @@ -325,6 +325,19 @@ trim(trailing 'foo' from 'foo') select trim(leading 'foo' from 'foo'); trim(leading 'foo' from 'foo') +select quote(ltrim(concat(' ', 'a'))); +quote(ltrim(concat(' ', 'a'))) +'a' +select quote(trim(concat(' ', 'a'))); +quote(trim(concat(' ', 'a'))) +'a' +CREATE TABLE t1 SELECT 1 UNION SELECT 2 UNION SELECT 3; +SELECT QUOTE('A') FROM t1; +QUOTE('A') +'A' +'A' +'A' +DROP TABLE t1; select 1=_latin1'1'; 1=_latin1'1' 1 @@ -691,19 +704,6 @@ select count(*) as total, left(c,10) as reg from t1 group by reg order by reg de total reg 10 2004-12-10 drop table t1; -select quote(ltrim(concat(' ', 'a'))); -quote(ltrim(concat(' ', 'a'))) -'a' -select quote(trim(concat(' ', 'a'))); -quote(trim(concat(' ', 'a'))) -'a' -CREATE TABLE t1 SELECT 1 UNION SELECT 2 UNION SELECT 3; -SELECT QUOTE('A') FROM t1; -QUOTE('A') -'A' -'A' -'A' -DROP TABLE t1; select trim(null from 'kate') as "must_be_null"; must_be_null NULL diff --git a/mysql-test/t/func_str.test b/mysql-test/t/func_str.test index 2bf130d1538..6d5974ca5ed 100644 --- a/mysql-test/t/func_str.test +++ b/mysql-test/t/func_str.test @@ -206,6 +206,7 @@ select quote(trim(concat(' ', 'a'))); CREATE TABLE t1 SELECT 1 UNION SELECT 2 UNION SELECT 3; SELECT QUOTE('A') FROM t1; DROP TABLE t1; + # Test collation and coercibility # @@ -440,12 +441,6 @@ create table t1 (a int not null primary key, b varchar(40), c datetime); insert into t1 (a,b,c) values (1,'Tom','2004-12-10 12:13:14'),(2,'ball games','2004-12-10 12:13:14'), (3,'Basil','2004-12-10 12:13:14'), (4,'Dean','2004-12-10 12:13:14'),(5,'Ellis','2004-12-10 12:13:14'), (6,'Serg','2004-12-10 12:13:14'), (7,'Sergei','2004-12-10 12:13:14'),(8,'Georg','2004-12-10 12:13:14'),(9,'Salle','2004-12-10 12:13:14'),(10,'Sinisa','2004-12-10 12:13:14'); select count(*) as total, left(c,10) as reg from t1 group by reg order by reg desc limit 0,12; drop table t1; -# crashing bug with QUOTE() and LTRIM() or TRIM() fixed -# Bug #7495 -# - -select quote(ltrim(concat(' ', 'a'))); -select quote(trim(concat(' ', 'a'))); # # Bug#7455 unexpected result: TRIM( FROM ) gives NOT NULL From 785049c67867f502618584770e2d6c320a73aac1 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 18:30:33 +0100 Subject: [PATCH 16/53] fix linkerror on sol9x86 configure.in: fix linkerror on sol9x86 --- configure.in | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/configure.in b/configure.in index e57cdabad25..2ad8cb2ac68 100644 --- a/configure.in +++ b/configure.in @@ -2954,6 +2954,15 @@ EOF AC_CONFIG_SUBDIRS(innobase) fi +case $SYSTEM_TYPE-$MACHINE_TYPE-$ac_cv_prog_gcc-$have_ndbcluster in + *solaris*-i?86-no-yes) + # ndb fail for whatever strange reason to link Sun Forte/x86 + # unless using incremental linker + CXXFLAGS="$CXXFLAGS -xildon" + ;; + *) ;; +esac + if test X"$have_ndbcluster" = Xyes then if test X"$mysql_cv_compress" != Xyes From d4ac4cb112076cb9f79674f46591c3608cc10318 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 19:23:24 +0100 Subject: [PATCH 17/53] - typo fix... --- Build-tools/Bootstrap | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Build-tools/Bootstrap b/Build-tools/Bootstrap index 827eb4022d7..8b769dca3c4 100755 --- a/Build-tools/Bootstrap +++ b/Build-tools/Bootstrap @@ -214,7 +214,7 @@ if (-d $target_dir) @stat= stat("$target_dir/configure.in"); my $mtime= $stat[9]; my ($sec,$min,$hour,$mday,$mon,$year) = localtime($mtime); - my $mtime= sprintf("%04d%-02d-%02d-%02d:%02d", $year+1900, $mon+1, $mday, $hour, $min); + my $mtime= sprintf("%04d-%02d-%02d-%02d:%02d", $year+1900, $mon+1, $mday, $hour, $min); &logger("Renaming $target_dir to $target_dir-$mtime"); $command= "mv "; From 3d0f9d96d7236af2c4dc4bf844233e27497d59dc Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 20:28:01 +0200 Subject: [PATCH 18/53] Fixed during review of new pulled code extra/perror.c: Use strmov() instead of strcpy() Indentation fixes sql/sql_table.cc: Revert back part of the old code as the new code didn't use mysql_data_home, which would have caused problems in the embedded server sql/sql_update.cc: Ensure that used_index is always set (It has to be set because it's value is tested if order != 0) --- extra/perror.c | 14 ++++++++------ sql/sql_table.cc | 9 +++++++-- sql/sql_update.cc | 3 +++ 3 files changed, 18 insertions(+), 8 deletions(-) diff --git a/extra/perror.c b/extra/perror.c index fc10d8eaecc..27027520cbe 100644 --- a/extra/perror.c +++ b/extra/perror.c @@ -213,12 +213,14 @@ int main(int argc,char *argv[]) string 'Unknown Error'. To avoid printing it we try to find the error string by asking for an impossible big error message. */ - msg = strerror(10000); + msg= strerror(10000); - /* allocate a buffer for unknown_error since strerror always returns the same pointer - on some platforms such as Windows */ - unknown_error = malloc( strlen(msg)+1 ); - strcpy( unknown_error, msg ); + /* + Allocate a buffer for unknown_error since strerror always returns + the same pointer on some platforms such as Windows + */ + unknown_error= malloc(strlen(msg)+1); + strmov(unknown_error, msg); for ( ; argc-- > 0 ; argv++) { @@ -271,7 +273,7 @@ int main(int argc,char *argv[]) /* if we allocated a buffer for unknown_error, free it now */ if (unknown_error) - free(unknown_error); + free(unknown_error); exit(error); return error; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index f3c107c2696..5f3875ba934 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2272,7 +2272,10 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, strxmov(src_path, (*tmp_table)->path, reg_ext, NullS); else { - fn_format( src_path, src_table, src_db, reg_ext, MYF(MY_UNPACK_FILENAME)); + strxmov(src_path, mysql_data_home, "/", src_db, "/", src_table, + reg_ext, NullS); + /* Resolve symlinks (for windows) */ + fn_format(src_path, src_path, "", "", MYF(MY_UNPACK_FILENAME)); if (access(src_path, F_OK)) { my_error(ER_BAD_TABLE_ERROR, MYF(0), src_table); @@ -2299,7 +2302,9 @@ int mysql_create_like_table(THD* thd, TABLE_LIST* table, } else { - fn_format( dst_path, table_name, db, reg_ext, MYF(MY_UNPACK_FILENAME)); + strxmov(dst_path, mysql_data_home, "/", db, "/", table_name, + reg_ext, NullS); + fn_format(dst_path, dst_path, "", "", MYF(MY_UNPACK_FILENAME)); if (!access(dst_path, F_OK)) goto table_exists; } diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 0ec71bdfba3..663f2d2be34 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -167,7 +167,10 @@ int mysql_update(THD *thd, else if ((used_index=table->file->key_used_on_scan) < MAX_KEY) used_key_is_modified=check_if_key_used(table, used_index, fields); else + { used_key_is_modified=0; + used_index= MAX_KEY; + } if (used_key_is_modified || order) { /* From 1bbfa6f75fcbe8514d5c494972f50f43dba0f7e0 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 21:03:46 +0100 Subject: [PATCH 19/53] ndb - add abort to multi op test case ndb/include/ndbapi/NdbConnection.hpp: Add aborts to test case ndb/test/include/HugoOperations.hpp: Add aborts to test case ndb/test/ndbapi/testOperations.cpp: Add aborts to test case ndb/test/src/HugoOperations.cpp: Add aborts to test case ndb/test/src/HugoTransactions.cpp: Add aborts to test case --- ndb/include/ndbapi/NdbConnection.hpp | 2 +- ndb/test/include/HugoOperations.hpp | 2 + ndb/test/ndbapi/testOperations.cpp | 240 ++++++++++++++++++--------- ndb/test/src/HugoOperations.cpp | 7 + ndb/test/src/HugoTransactions.cpp | 122 +++++++------- 5 files changed, 234 insertions(+), 139 deletions(-) diff --git a/ndb/include/ndbapi/NdbConnection.hpp b/ndb/include/ndbapi/NdbConnection.hpp index f173cd8ac6e..53830dd93c5 100644 --- a/ndb/include/ndbapi/NdbConnection.hpp +++ b/ndb/include/ndbapi/NdbConnection.hpp @@ -688,7 +688,7 @@ private: void remove_list(NdbOperation*& head, NdbOperation*); void define_scan_op(NdbIndexScanOperation*); - friend int runOperations(class NDBT_Context*, class NDBT_Step*); + friend class HugoOperations; }; inline diff --git a/ndb/test/include/HugoOperations.hpp b/ndb/test/include/HugoOperations.hpp index fe22e4b5649..9ca2772e768 100644 --- a/ndb/test/include/HugoOperations.hpp +++ b/ndb/test/include/HugoOperations.hpp @@ -30,6 +30,8 @@ public: int closeTransaction(Ndb*); NdbConnection* getTransaction(); void refresh(); + + void setTransactionId(Uint64); int pkInsertRecord(Ndb*, int recordNo, diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp index 92cc3e81b1a..e254aff58dc 100644 --- a/ndb/test/ndbapi/testOperations.cpp +++ b/ndb/test/ndbapi/testOperations.cpp @@ -103,6 +103,10 @@ OperationTestCase matrix[] = { << " failed on line " << __LINE__ << endl; \ abort(); return NDBT_FAILED; } +#define C3(b) if (!(b)) { \ + g_err << "ERR: failed on line " << __LINE__ << endl; \ + return NDBT_FAILED; } + int runOp(HugoOperations & hugoOps, Ndb * pNdb, @@ -326,18 +330,122 @@ generate(Vector& out, size_t len) } } +static const Uint32 DUMMY = 0; +static const Uint32 ROW = 1; + +int +verify_other(NDBT_Context* ctx, + Ndb* pNdb, int seq, OPS latest, bool initial_row, bool commit) +{ + Uint32 no_wait = NdbOperation::LM_CommittedRead* + ctx->getProperty("NoWait", (Uint32)1); + + for(size_t j = no_wait; j<3; j++) + { + HugoOperations other(*ctx->getTab()); + C3(other.startTransaction(pNdb) == 0); + C3(other.pkReadRecord(pNdb, ROW, 1, (NdbOperation::LockMode)j) == 0); + int tmp= other.execute_Commit(pNdb); + if(seq == 0){ + if(j == NdbOperation::LM_CommittedRead) + { + C3(initial_row? tmp==0 && other.verifyUpdatesValue(0) == 0 : tmp==626); + } + else + { + C3(tmp == 266); + } + } + else if(commit) + { + switch(latest){ + case o_INS: + case o_UPD: + C3(tmp == 0 && other.verifyUpdatesValue(seq) == 0); + break; + case o_DEL: + C3(tmp == 626); + break; + case o_DONE: + abort(); + } + } + else + { + // rollback + C3(initial_row? tmp==0 && other.verifyUpdatesValue(0) == 0 : tmp==626); + } + } + + return NDBT_OK; +} + +int +verify_savepoint(NDBT_Context* ctx, + Ndb* pNdb, int seq, OPS latest, + Uint64 transactionId) +{ + bool initial_row= (seq == 0) && latest == o_INS; + + for(size_t j = 0; j<3; j++) + { + const NdbOperation::LockMode lm= (NdbOperation::LockMode)j; + + HugoOperations same(*ctx->getTab()); + C3(same.startTransaction(pNdb) == 0); + same.setTransactionId(transactionId); // Cheat + + /** + * Increase savepoint to k + */ + for(size_t l = 1; l<=seq; l++) + { + C3(same.pkReadRecord(pNdb, DUMMY, 1, lm) == 0); // Read dummy row + C3(same.execute_NoCommit(pNdb) == 0); + g_info << "savepoint: " << l << endl; + } + + g_info << "op(" << seq << "): " + << " lock mode " << lm << endl; + + C3(same.pkReadRecord(pNdb, ROW, 1, lm) == 0); // Read real row + int tmp= same.execute_Commit(pNdb); + if(seq == 0) + { + if(initial_row) + { + C3(tmp == 0 && same.verifyUpdatesValue(0) == 0); + } else + { + C3(tmp == 626); + } + } + else + { + switch(latest){ + case o_INS: + case o_UPD: + C3(tmp == 0 && same.verifyUpdatesValue(seq) == 0); + break; + case o_DEL: + C3(tmp == 626); + break; + case o_DONE: + abort(); + } + } + } + return NDBT_OK; +} + int runOperations(NDBT_Context* ctx, NDBT_Step* step) { - const Uint32 DUMMY = 0; - const Uint32 ROW = 1; - int tmp; Ndb* pNdb = GETNDB(step); Uint32 seqNo = ctx->getProperty("Sequence", (Uint32)0); - Uint32 no_wait = NdbOperation::LM_CommittedRead* - ctx->getProperty("NoWait", (Uint32)1); + Uint32 commit= ctx->getProperty("Commit", (Uint32)1); if(seqNo == 0) { @@ -355,8 +463,8 @@ runOperations(NDBT_Context* ctx, NDBT_Step* step) C3(hugoOps.execute_Commit(pNdb) == 0); } - const bool inital_row= (seq[0] != o_INS); - if(inital_row) + const bool initial_row= (seq[0] != o_INS); + if(initial_row) { HugoOperations hugoOps(*ctx->getTab()); C3(hugoOps.startTransaction(pNdb) == 0); @@ -389,80 +497,36 @@ runOperations(NDBT_Context* ctx, NDBT_Step* step) /** * Verify other transaction */ - for(size_t j = no_wait; j<3; j++) - { - HugoOperations other(*ctx->getTab()); - C3(other.startTransaction(pNdb) == 0); - C3(other.pkReadRecord(pNdb, ROW, 1, (NdbOperation::LockMode)j) == 0); - tmp= other.execute_Commit(pNdb); - if(j == NdbOperation::LM_CommittedRead) - { - C3(inital_row? tmp==0 && other.verifyUpdatesValue(0) == 0 : tmp==626); - } - else - { - C3(tmp == 266); - } - } - + if(verify_other(ctx, pNdb, 0, seq[0], initial_row, commit) != NDBT_OK) + return NDBT_FAILED; + /** * Verify savepoint read */ Uint64 transactionId= trans1.getTransaction()->getTransactionId(); + for(size_t k=0; k<=i+1; k++) { - for(size_t j = 0; j<3; j++) - { - const NdbOperation::LockMode lm= (NdbOperation::LockMode)j; - - HugoOperations same(*ctx->getTab()); - C3(same.startTransaction(pNdb) == 0); - same.getTransaction()->setTransactionId(transactionId); // Cheat - - /** - * Increase savepoint to k - */ - for(size_t l = 1; l<=k; l++) - { - C3(same.pkReadRecord(pNdb, DUMMY, 1, lm) == 0); // Read dummy row - C3(same.execute_NoCommit(pNdb) == 0); - g_info << "savepoint: " << l << endl; - } - - g_info << "op(" << k << ", " << i << "): " - << " lock mode " << lm << endl; - - C3(same.pkReadRecord(pNdb, ROW, 1, lm) == 0); // Read real row - tmp= same.execute_Commit(pNdb); - if(k == 0) - { - if(inital_row) - { - C3(tmp == 0 && same.verifyUpdatesValue(0) == 0); - } else - { - C3(tmp == 626); - } - } - else - { - switch(seq[k-1]){ - case o_INS: - case o_UPD: - C3(tmp == 0 && same.verifyUpdatesValue(k) == 0); - break; - case o_DEL: - C3(tmp == 626); - break; - case o_DONE: - abort(); - } - } - } - } + if(verify_savepoint(ctx, pNdb, k, + k>0 ? seq[k-1] : initial_row ? o_INS : o_DONE, + transactionId) != NDBT_OK) + return NDBT_FAILED; + } + } + + if(commit) + { + C3(trans1.execute_Commit(pNdb) == 0); + } + else + { + C3(trans1.execute_Rollback(pNdb) == 0); } - C3(trans1.execute_Commit(pNdb) == 0); + if(verify_other(ctx, pNdb, seq.size(), seq.back(), + initial_row, commit) != NDBT_OK) + return NDBT_FAILED; + return NDBT_OK; } @@ -495,16 +559,20 @@ main(int argc, const char** argv){ } } + BaseString n1; + n1.append(name); + n1.append("_COMMIT"); + NDBT_TestCaseImpl1 *pt = new NDBT_TestCaseImpl1(&ts, - name.c_str()+1, ""); - + n1.c_str()+1, ""); + pt->setProperty("Sequence", tmp[i]); pt->addInitializer(new NDBT_Initializer(pt, "runClearTable", runClearTable)); pt->addStep(new NDBT_ParallelStep(pt, - name.c_str()+1, + "run", runOperations)); pt->addFinalizer(new NDBT_Finalizer(pt, @@ -512,8 +580,26 @@ main(int argc, const char** argv){ runClearTable)); ts.addTest(pt); - } + name.append("_ABORT"); + pt = new NDBT_TestCaseImpl1(&ts, name.c_str()+1, ""); + pt->setProperty("Sequence", tmp[i]); + pt->setProperty("Commit", (Uint32)0); + pt->addInitializer(new NDBT_Initializer(pt, + "runClearTable", + runClearTable)); + + pt->addStep(new NDBT_ParallelStep(pt, + "run", + runOperations)); + + pt->addFinalizer(new NDBT_Finalizer(pt, + "runClearTable", + runClearTable)); + + ts.addTest(pt); + } + for(Uint32 i = 0; isetTransactionId(id); + } +} + int HugoOperations::closeTransaction(Ndb* pNdb){ if (pTrans != NULL){ diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index 096f5406bbf..a2ed9c77db7 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -92,7 +92,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -102,13 +102,13 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, const NdbError err = pTrans->getNdbError(); if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -127,7 +127,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, while((eof = rs->nextResult(true)) == 0){ rows++; if (calc.verifyRowValues(&row) != 0){ - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -137,11 +137,11 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, rs->close(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_OK; } } @@ -150,7 +150,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR_INFO(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); switch (err.code){ case 488: @@ -164,17 +164,17 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); g_info << rows << " rows have been read" << endl; if (records != 0 && rows != records){ g_err << "Check expected number of records failed" << endl - << " expected=" << records <<", " << endl - << " read=" << rows << endl; + << " expected=" << records <<", " << endl + << " read=" << rows << endl; return NDBT_FAILED; } @@ -248,7 +248,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -258,13 +258,13 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, const NdbError err = pTrans->getNdbError(); if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -283,7 +283,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, while((eof = rs->nextResult(true)) == 0){ rows++; if (calc.verifyRowValues(&row) != 0){ - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -293,11 +293,11 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, rs->close(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_OK; } } @@ -306,7 +306,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR_INFO(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); switch (err.code){ case 488: @@ -320,17 +320,17 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); g_info << rows << " rows have been read" << endl; if (records != 0 && rows != records){ g_err << "Check expected number of records failed" << endl - << " expected=" << records <<", " << endl - << " read=" << rows << endl; + << " expected=" << records <<", " << endl + << " read=" << rows << endl; return NDBT_FAILED; } @@ -344,9 +344,9 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, int HugoTransactions::scanUpdateRecords(Ndb* pNdb, - int records, - int abortPercent, - int parallelism){ + int records, + int abortPercent, + int parallelism){ if(m_defaultScanUpdateMethod == 1){ return scanUpdateRecords1(pNdb, records, abortPercent, parallelism); } else if(m_defaultScanUpdateMethod == 2){ @@ -707,7 +707,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, while (true){ - restart: +restart: if (retryAttempt++ >= retryMax){ g_info << "ERROR: has retried this operation " << retryAttempt << " times, failing!" << endl; @@ -743,7 +743,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, for(a=0; agetValue(tab.getColumn(a)->getName())) == NULL){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -752,7 +752,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, if( check == -1 ) { const NdbError err = pTrans->getNdbError(); ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); if (err.status == NdbError::TemporaryError){ NdbSleep_MilliSleep(50); continue; @@ -777,7 +777,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, NdbOperation* pUp = rs->updateTuple(); if(pUp == 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } const int updates = calc.getUpdatesValue(&row) + 1; @@ -786,7 +786,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == false){ if(setValueForAttr(pUp, a, r, updates ) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -795,7 +795,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, if (rows == abortCount && abortTrans == true){ g_info << "Scan is aborted" << endl; // This scan should be aborted - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_OK; } } while((check = rs->nextResult(false)) == 0); @@ -807,7 +807,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, const NdbError err = pTrans->getNdbError(); if( check == -1 ) { - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); ERR(err); if (err.status == NdbError::TemporaryError){ NdbSleep_MilliSleep(50); @@ -819,7 +819,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, const NdbError err = pTrans->getNdbError(); if( check == -1 ) { - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); ERR(err); if (err.status == NdbError::TemporaryError){ NdbSleep_MilliSleep(50); @@ -828,7 +828,7 @@ HugoTransactions::scanUpdateRecords3(Ndb* pNdb, return NDBT_FAILED; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); g_info << rows << " rows have been updated" << endl; return NDBT_OK; @@ -1772,7 +1772,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, pUpdOp = pTrans->getNdbOperation(tab.getName()); if (pUpdOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1788,7 +1788,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == true){ if(equalForAttr(pUpdOp, a, r) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1805,7 +1805,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, check = pUpdOp->incValue(attr->getName(), valToIncWith); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1817,7 +1817,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, (calc.isUpdateCol(a) == false)){ if(setValueForAttr(pUpdOp, a, r, updates ) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1846,7 +1846,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); r++; // Read next record @@ -1900,7 +1900,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1928,7 +1928,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, switch(err.status){ case NdbError::TemporaryError: ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; @@ -2066,18 +2066,18 @@ HugoTransactions::lockRecords(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } for (int b=0; (bcloseTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -2178,7 +2178,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, pOp = pTrans->getNdbIndexOperation(idxName, tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->readTuple(); @@ -2186,7 +2186,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, pOp = sOp = pTrans->getNdbIndexScanOperation(idxName, tab.getName()); if (sOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -2196,7 +2196,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -2205,7 +2205,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == true){ if(equalForAttr(pOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -2216,7 +2216,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, if((rows[b]->attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -2257,11 +2257,11 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, if(ordered && rs->nextResult(true) == 0){ ndbout << "Error when comparing records " << " - index op next_result to many" << endl; - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); } deallocRows(); g_info << reads << " records read" << endl; @@ -2322,21 +2322,21 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, pOp = pTrans->getNdbIndexOperation(idxName, tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->readTupleExclusive(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } else { pOp = sOp = pTrans->getNdbIndexScanOperation(idxName, tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -2349,7 +2349,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == true){ if(equalForAttr(pOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -2371,7 +2371,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, if( check == -1 ) { const NdbError err = pTrans->getNdbError(); ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); if (err.status == NdbError::TemporaryError){ NdbSleep_MilliSleep(50); @@ -2405,13 +2405,13 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, if (pUpdOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -2420,7 +2420,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == true){ if(equalForAttr(pUpdOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -2431,7 +2431,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == false){ if(setValueForAttr(pUpdOp, a, r+b, updates ) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -2455,7 +2455,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, updated += batchsize; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); r+= batchsize; // Read next record } From ad21db5e0727704ee0527c08410bafff4d3150f4 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 22:35:36 +0200 Subject: [PATCH 20/53] Use -DUNIV_DEBUG (extra debugging for InnoDB) when configuring with --debug=full --- configure.in | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/configure.in b/configure.in index e57cdabad25..98d060e660c 100644 --- a/configure.in +++ b/configure.in @@ -1677,8 +1677,8 @@ then elif test "$with_debug" = "full" then # Full debug. Very slow in some cases - CFLAGS="$DEBUG_CFLAGS -DDBUG_ON -DSAFE_MUTEX -DSAFEMALLOC $CFLAGS" - CXXFLAGS="$DEBUG_CXXFLAGS -DDBUG_ON -DSAFE_MUTEX -DSAFEMALLOC $CXXFLAGS" + CFLAGS="$DEBUG_CFLAGS -DDBUG_ON -DSAFE_MUTEX -DSAFEMALLOC -DUNIV_DEBUG $CFLAGS" + CXXFLAGS="$DEBUG_CXXFLAGS -DDBUG_ON -DSAFE_MUTEX -DSAFEMALLOC -DUNIV_DEBUG $CXXFLAGS" else # Optimized version. No debug CFLAGS="$OPTIMIZE_CFLAGS -DDBUG_OFF $CFLAGS" From ba51652c2cd753566d768ccdf02f2de92a17a4b2 Mon Sep 17 00:00:00 2001 From: unknown Date: Wed, 2 Feb 2005 15:03:34 -0800 Subject: [PATCH 21/53] Fix 'mysqlcheck --help' to not specify what storage engines are supported, rather than give incorrect information. (Bug #8029) client/mysqlcheck.c: Make usage message more general as to what storage engines are supported. --- client/mysqlcheck.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index c670b84db44..babf4de0c3d 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -195,7 +195,7 @@ static void usage(void) puts("and you are welcome to modify and redistribute it under the GPL license.\n"); puts("This program can be used to CHECK (-c,-m,-C), REPAIR (-r), ANALYZE (-a)"); puts("or OPTIMIZE (-o) tables. Some of the options (like -e or -q) can be"); - puts("used at the same time. It works on MyISAM and in some cases on BDB tables."); + puts("used at the same time. Not all options are supported by all storage engines."); puts("Please consult the MySQL manual for latest information about the"); puts("above. The options -c,-r,-a and -o are exclusive to each other, which"); puts("means that the last option will be used, if several was specified.\n"); From a6cb0e5ff1c5160f60ebc5303f5c0d898d28dcda Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 17:10:46 +0100 Subject: [PATCH 22/53] ndb - use hugo methods all the way in hugo more test program fixes replace pNdb->closeTransaction with closeTransaction(pNdb) ndb/test/src/HugoTransactions.cpp: more test program fixes replace pNdb->closeTransaction with closeTransaction(pNdb) --- ndb/test/src/HugoTransactions.cpp | 188 +++++++++++++++--------------- 1 file changed, 94 insertions(+), 94 deletions(-) diff --git a/ndb/test/src/HugoTransactions.cpp b/ndb/test/src/HugoTransactions.cpp index a2ed9c77db7..85c96ef0f7f 100644 --- a/ndb/test/src/HugoTransactions.cpp +++ b/ndb/test/src/HugoTransactions.cpp @@ -68,7 +68,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, pOp = pTrans->getNdbScanOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -77,14 +77,14 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, if( rs == 0 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->interpret_exit_ok(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -224,7 +224,7 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, pOp = pTrans->getNdbIndexScanOperation(pIdx->getName(), tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -233,14 +233,14 @@ HugoTransactions::scanReadRecords(Ndb* pNdb, if( rs == 0 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->interpret_exit_ok(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -398,21 +398,21 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb, pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->openScanExclusive(parallelism); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->interpret_exit_ok(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -420,7 +420,7 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb, for(a=0; agetValue(tab.getColumn(a)->getName())) == NULL){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -430,13 +430,13 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb, const NdbError err = pTrans->getNdbError(); if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -464,11 +464,11 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb, check = pTrans->stopScan(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_OK; } int res = takeOverAndUpdateRecord(pNdb, pOp); @@ -477,7 +477,7 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb, continue; } if (res != 0){ - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return res; } @@ -501,18 +501,18 @@ HugoTransactions::scanUpdateRecords1(Ndb* pNdb, continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } if(eof == -2){ - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); g_info << rows << " rows have been updated" << endl; return NDBT_OK; @@ -565,21 +565,21 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb, pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->openScanExclusive(parallelism); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->interpret_exit_ok(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -587,7 +587,7 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb, for(a=0; agetValue(tab.getColumn(a)->getName())) == NULL){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -597,13 +597,13 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb, const NdbError err = pTrans->getNdbError(); if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -639,7 +639,7 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb, rows++; if (addRowToUpdate(pNdb, pUpTrans, pOp) != 0){ pNdb->closeTransaction(pUpTrans); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } while((eof = pTrans->nextScanResult(false)) == 0); @@ -650,12 +650,12 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb, check = pTrans->stopScan(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); pNdb->closeTransaction(pUpTrans); return NDBT_FAILED; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); pNdb->closeTransaction(pUpTrans); return NDBT_OK; } @@ -665,7 +665,7 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb, const NdbError err = pUpTrans->getNdbError(); ERR(err); pNdb->closeTransaction(pUpTrans); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } pNdb->closeTransaction(pUpTrans); @@ -675,17 +675,17 @@ HugoTransactions::scanUpdateRecords2(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); g_info << rows << " rows have been updated" << endl; return NDBT_OK; @@ -728,14 +728,14 @@ restart: pOp = pTrans->getNdbScanOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } NdbResultSet *rs = pOp->readTuplesExclusive(parallelism); if( rs == 0 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -899,14 +899,14 @@ HugoTransactions::loadTable(Ndb* pNdb, pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->insertTuple(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -914,7 +914,7 @@ HugoTransactions::loadTable(Ndb* pNdb, for (a = 0; agetNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -932,7 +932,7 @@ HugoTransactions::loadTable(Ndb* pNdb, } if(check == -1 ) { const NdbError err = pTrans->getNdbError(); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); pTrans= 0; switch(err.status){ case NdbError::Success: @@ -974,7 +974,7 @@ HugoTransactions::loadTable(Ndb* pNdb, } else{ if (closeTrans) { - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); pTrans= 0; } } @@ -1025,14 +1025,14 @@ HugoTransactions::fillTable(Ndb* pNdb, pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->insertTuple(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1040,7 +1040,7 @@ HugoTransactions::fillTable(Ndb* pNdb, for (a = 0; agetNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1050,7 +1050,7 @@ HugoTransactions::fillTable(Ndb* pNdb, check = pTrans->execute( Commit, CommitAsMuchAsPossible ); if(check == -1 ) { const NdbError err = pTrans->getNdbError(); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); switch(err.status){ case NdbError::Success: @@ -1102,7 +1102,7 @@ HugoTransactions::fillTable(Ndb* pNdb, } } else{ - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); } // Step to next record @@ -1419,7 +1419,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1441,7 +1441,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1450,7 +1450,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == true){ if(equalForAttr(pOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1461,7 +1461,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, if((rows[b]->attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1474,7 +1474,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; @@ -1487,13 +1487,13 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, default: ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } else{ for (int b=0; (bcloseTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } reads++; @@ -1501,7 +1501,7 @@ HugoTransactions::pkReadRecords(Ndb* pNdb, } } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); } deallocRows(); @@ -1556,14 +1556,14 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->readTupleExclusive(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1572,7 +1572,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == true){ if(equalForAttr(pOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1583,7 +1583,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, if((rows[b]->attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1595,19 +1595,19 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } for(b = 0; bcloseTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1617,14 +1617,14 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, pUpdOp = pTrans->getNdbOperation(tab.getName()); if (pUpdOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pUpdOp->updateTuple(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1632,7 +1632,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == true){ if(equalForAttr(pUpdOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1642,7 +1642,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == false){ if(setValueForAttr(pUpdOp, a, r+b, updates ) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1655,14 +1655,14 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); ndbout << "r = " << r << endl; - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } else{ @@ -1670,7 +1670,7 @@ HugoTransactions::pkUpdateRecords(Ndb* pNdb, } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); r += batch; // Read next record } @@ -1716,14 +1716,14 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, NdbOperation* pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->readTupleExclusive(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1732,7 +1732,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == true){ if(equalForAttr(pOp, a, r) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1744,7 +1744,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, if((row.attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1756,13 +1756,13 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1779,7 +1779,7 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, check = pUpdOp->interpretedUpdateTuple(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1831,14 +1831,14 @@ HugoTransactions::pkInterpretedUpdateRecords(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); ndbout << "r = " << r << endl; - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } else{ @@ -1907,7 +1907,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, check = pOp->deleteTuple(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -1916,7 +1916,7 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == true){ if(equalForAttr(pOp, a, r) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -1947,20 +1947,20 @@ HugoTransactions::pkDelRecords(Ndb* pNdb, } } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; break; default: ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } else { deleted++; } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); r++; // Read next record @@ -2023,14 +2023,14 @@ HugoTransactions::lockRecords(Ndb* pNdb, pOp = pTrans->getNdbOperation(tab.getName()); if (pOp == NULL) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } check = pOp->readTupleExclusive(); if( check == -1 ) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -2039,7 +2039,7 @@ HugoTransactions::lockRecords(Ndb* pNdb, if (tab.getColumn(a)->getPrimaryKey() == true){ if(equalForAttr(pOp, a, r+b) != 0){ ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -2050,7 +2050,7 @@ HugoTransactions::lockRecords(Ndb* pNdb, if((rows[b]->attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -2092,26 +2092,26 @@ HugoTransactions::lockRecords(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; } ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } else{ for (int b=0; (bcloseTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } r++; // Read next record } } - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); } @@ -2229,7 +2229,7 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, if (err.status == NdbError::TemporaryError){ ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); NdbSleep_MilliSleep(50); retryAttempt++; continue; @@ -2242,13 +2242,13 @@ HugoTransactions::indexReadRecords(Ndb* pNdb, default: ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } else{ for (int b=0; (bcloseTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } reads++; @@ -2360,7 +2360,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, if((rows[b]->attributeStore(a) = pOp->getValue(tab.getColumn(a)->getName())) == 0) { ERR(pTrans->getNdbError()); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } } @@ -2383,13 +2383,13 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, if(ordered && check != 0){ g_err << "Row: " << r << " not found!!" << endl; - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } for(b = 0; bcloseTransaction(pTrans); + closeTransaction(pNdb); return NDBT_FAILED; } @@ -2442,7 +2442,7 @@ HugoTransactions::indexUpdateRecords(Ndb* pNdb, if( check == -1 ) { const NdbError err = pTrans->getNdbError(); ERR(err); - pNdb->closeTransaction(pTrans); + closeTransaction(pNdb); if (err.status == NdbError::TemporaryError){ NdbSleep_MilliSleep(50); From 12a0a21c5ee7d4dec9c6199873933b65964c7ef2 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 17:53:03 +0100 Subject: [PATCH 23/53] ndbcluster rpms --- support-files/mysql.spec.sh | 94 +++++++++++++++++++++++++++++ support-files/ndb-config-2-node.ini | 43 +++++++++++++ 2 files changed, 137 insertions(+) create mode 100644 support-files/ndb-config-2-node.ini diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 99280385965..0d72356731c 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -104,6 +104,53 @@ This package contains the standard MySQL clients and administration tools. %description client -l pt_BR Este pacote contém os clientes padrão para o MySQL. +%package ndb-storage +Release: %{release} +Summary: MySQL - ndbcluster storage engine +Group: Applications/Databases + +%description ndb-storage +This package contains the ndbcluster storage engine. +It is necessary to have this package installed on all +computers that should store ndbcluster table data. +Note that this storage engine can only be used in conjunction +with the MySQL Max server. + +%{see_base} + +%package ndb-management +Release: %{release} +Summary: MySQL - ndbcluster storage engine management +Group: Applications/Databases + +%description ndb-management +This package contains ndbcluster storage engine management. +It is necessary to have this package installed on at least +one computer in the cluster. + +%{see_base} + +%package ndb-tools +Release: %{release} +Summary: MySQL - ndbcluster storage engine basic tools +Group: Applications/Databases + +%description ndb-tools +This package contains ndbcluster storage engine basic tools. + +%{see_base} + +%package ndb-extra +Release: %{release} +Summary: MySQL - ndbcluster storage engine extra tools +Group: Applications/Databases + +%description ndb-extra +This package contains some extra ndbcluster storage engine tools for the advanced user. +They should be used with caution. + +%{see_base} + %package bench Release: %{release} Requires: %{name}-client perl-DBI perl @@ -162,6 +209,7 @@ Requires: MySQL-server >= 4.0 Optional MySQL server binary that supports additional features like: - Berkeley DB Storage Engine + - Ndbcluster Storage Engine interface - Archive Storage Engine - CSV Storage Engine - Example Storage Engine @@ -279,6 +327,7 @@ BuildMySQL "--enable-shared \ --without-openssl \ --with-berkeley-db \ --with-innodb \ + --with-ndbcluster \ --with-raid \ --with-archive \ --with-csv-storage-engine \ @@ -293,6 +342,9 @@ BuildMySQL "--enable-shared \ mv sql/mysqld sql/mysqld-max nm --numeric-sort sql/mysqld-max > sql/mysqld-max.sym +# Install the ndb binaries +(cd ndb; make install DESTDIR=$RBR) + # Install embedded server library in the build root install -m 644 libmysqld/libmysqld.a $RBR%{_libdir}/mysql/ @@ -435,6 +487,21 @@ chmod -R og-rw $mysql_datadir/mysql # Allow safe_mysqld to start mysqld and print a message before we exit sleep 2 + +%pre ndb-storage +mysql_clusterdir=/var/lib/mysql-cluster + +# Create cluster directory if needed +if test ! -d $mysql_clusterdir; then mkdir -m755 $mysql_clusterdir; fi + + +%pre ndb-storage +mysql_clusterdir=/var/lib/mysql-cluster + +# Create cluster directory if needed +if test ! -d $mysql_clusterdir; then mkdir -m755 $mysql_clusterdir; fi + + %post Max # Restart mysqld, to use the new binary. echo "Restarting mysqld." @@ -475,6 +542,7 @@ fi %doc Docs/manual.{html,ps,texi,txt} %doc Docs/manual_toc.html %doc support-files/my-*.cnf +%doc support-files/ndb-*.ini %doc %attr(644, root, root) %{_infodir}/mysql.info* @@ -556,6 +624,32 @@ fi %postun shared /sbin/ldconfig +%files ndb-storage +%defattr(-,root,root,0755) +%attr(755, root, root) %{_sbindir}/ndbd + +%files ndb-management +%defattr(-,root,root,0755) +%attr(755, root, root) %{_sbindir}/ndb_mgmd +%attr(755, root, root) %{_bindir}/ndb_mgm + +%files ndb-tools +%defattr(-,root,root,0755) +%attr(755, root, root) %{_bindir}/ndb_mgm +%attr(755, root, root) %{_bindir}/ndb_restore +%attr(755, root, root) %{_bindir}/ndb_waiter +%attr(755, root, root) %{_bindir}/ndb_select_all +%attr(755, root, root) %{_bindir}/ndb_select_count +%attr(755, root, root) %{_bindir}/ndb_desc +%attr(755, root, root) %{_bindir}/ndb_show_tables +%attr(755, root, root) %{_bindir}/ndb_test_platform + +%files ndb-extra +%defattr(-,root,root,0755) +%attr(755, root, root) %{_bindir}/ndb_drop_index +%attr(755, root, root) %{_bindir}/ndb_drop_table +%attr(755, root, root) %{_bindir}/ndb_delete_all + %files devel %defattr(-, root, root, 0755) %doc EXCEPTIONS-CLIENT diff --git a/support-files/ndb-config-2-node.ini b/support-files/ndb-config-2-node.ini new file mode 100644 index 00000000000..be80f1dd0b3 --- /dev/null +++ b/support-files/ndb-config-2-node.ini @@ -0,0 +1,43 @@ +# Example Ndbcluster storage engine config file. +# +[ndbd default] +NoOfReplicas= 2 +MaxNoOfConcurrentOperations= 10000 +DataMemory= 80M +IndexMemory= 24M +TimeBetweenWatchDogCheck= 30000 +DataDir= /var/lib/mysql-cluster +MaxNoOfOrderedIndexes= 512 + +[ndb_mgmd default] +DataDir= /var/lib/mysql-cluster + +[ndb_mgmd] +Id=1 +HostName= localhost + +[ndbd] +Id= 2 +HostName= localhost + +[ndbd] +Id= 3 +HostName= localhost + +[mysqld] +Id= 4 + +[mysqld] +Id= 5 + +[mysqld] +Id= 6 + +[mysqld] +Id= 7 + +# choose an unused port number +# in this configuration 63132, 63133, and 63134 +# will be used +[tcp default] +PortNumber= 63132 From e244a3a9b4a9d9fd9dad63338ead667cf4b9a959 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 17:58:02 +0100 Subject: [PATCH 24/53] made an .sh file of ndb-config-2-node.ini instead support-files/ndb-config-2-node.ini.sh: Rename: support-files/ndb-config-2-node.ini -> support-files/ndb-config-2-node.ini.sh --- support-files/Makefile.am | 9 ++++++--- .../{ndb-config-2-node.ini => ndb-config-2-node.ini.sh} | 0 2 files changed, 6 insertions(+), 3 deletions(-) rename support-files/{ndb-config-2-node.ini => ndb-config-2-node.ini.sh} (100%) diff --git a/support-files/Makefile.am b/support-files/Makefile.am index 7ae1071f9ec..0a6077f0efc 100644 --- a/support-files/Makefile.am +++ b/support-files/Makefile.am @@ -27,7 +27,8 @@ EXTRA_DIST = mysql.spec.sh \ mysql.server.sh \ binary-configure.sh \ magic \ - MySQL-shared-compat.spec.sh + MySQL-shared-compat.spec.sh \ + ndb-config-2-node.ini.sh SUBDIRS = MacOSX @@ -38,7 +39,8 @@ pkgdata_DATA = my-small.cnf \ my-innodb-heavy-4G.cnf \ mysql-log-rotate \ mysql-@VERSION@.spec \ - MySQL-shared-compat.spec + MySQL-shared-compat.spec \ + ndb-config-2-node.ini pkgdata_SCRIPTS = mysql.server @@ -52,7 +54,8 @@ CLEANFILES = my-small.cnf \ mysql-log-rotate \ mysql.server \ binary-configure \ - MySQL-shared-compat.spec + MySQL-shared-compat.spec \ + ndb-config-2-node.ini mysql-@VERSION@.spec: mysql.spec rm -f $@ diff --git a/support-files/ndb-config-2-node.ini b/support-files/ndb-config-2-node.ini.sh similarity index 100% rename from support-files/ndb-config-2-node.ini rename to support-files/ndb-config-2-node.ini.sh From 567055363e69843feb8215c8cef0e832ceb472cf Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 18:26:59 +0000 Subject: [PATCH 25/53] Bug#7310 Fix test for classic builds mysql-test/t/multi_update.test: Bug#7310 Ignore warnings for Bug#5837 test --- mysql-test/t/multi_update.test | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mysql-test/t/multi_update.test b/mysql-test/t/multi_update.test index 2d6770f77ed..2fc4ebcb275 100644 --- a/mysql-test/t/multi_update.test +++ b/mysql-test/t/multi_update.test @@ -340,8 +340,10 @@ drop table t1, t2; # Test for BUG#5837 - delete with outer join and const tables drop table if exists t2, t1; +--disable_warnings create table t1(aclid bigint not null primary key, status tinyint(1) not null ) type = innodb; create table t2(refid bigint not null primary key, aclid bigint, index idx_acl(aclid) )type = innodb; +--enable_warnings insert into t2 values(1,null); delete t2, t1 from t2 as a left join t1 as b on (a.aclid=b.aclid) where a.refid='1'; drop table t1, t2; From 044f9e8227d1aa1e41e83a3a89e162b429a6646b Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 21:13:27 +0100 Subject: [PATCH 26/53] mysql-test-run.pl: Let --start-and-exit actually start a server Added that test case names can be specified on the comman line Added embedded server support Added environment variables UMASK, UMASK_DIR Added missing MASTER_MYSOCK1, MASTER_MYPORT1, USE_RUNNING_SERVER Added missing CHARSETSDIR, MYSQL_FIX_SYSTEM_TABLES, MYSQL_CLIENT_TEST Pass on return value from sleep_until_file_created(), to fail test Fail test if early termination of mysqld servers Create intial databases for the second master, and two additional slaves mtr_process.pl: Find out if port is still in use, using simple TCP connect Use non blocking waitpid() to catch terminations early Make a special case spawning the 'mysqltest' application Redo the fork() if it returns EAGAIN Make sure to record if master or slave terminated Improved debugging output Improved code that remove PID files to avoid race Abort if we can't stop all mysqld servers using our ports Many improvements in killing mysqld servers Let sleep_until_file_created() catch if server died early mtr_report.pl: Added option to disable test cases using .disabled file If --timer, only try to open file with time data if it exists mtr_io.pl: Remove starting/ending space reading server options from file mysql-test/lib/mtr_io.pl: Remove starting/ending space reading server options from file mysql-test/lib/mtr_report.pl: Added option to disable test cases using .disabled file If --timer, only try to open file with time data if it exists mysql-test/lib/mtr_process.pl: Find out if port is still in use, using simple TCP connect Use non blocking waitpid() to catch terminations early Make a special case spawning the 'mysqltest' application Redo the fork() if it returns EAGAIN Make sure to record if master or slave terminated Improved debugging output Improved code that remove PID files to avoid race Abort if we can't stop all mysqld servers using our ports Many improvements in killing mysqld servers Let sleep_until_file_created() catch if server died early mysql-test/mysql-test-run.pl: Let --start-and-exit actually start a server Added that test case names can be specified on the comman line Added embedded server support Added environment variables UMASK, UMASK_DIR Added missing MASTER_MYSOCK1, MASTER_MYPORT1, USE_RUNNING_SERVER Added missing CHARSETSDIR, MYSQL_FIX_SYSTEM_TABLES, MYSQL_CLIENT_TEST Pass on return value from sleep_until_file_created(), to fail test Fail test if early termination of mysqld servers Create intial databases for the second master, and two additional slaves --- mysql-test/lib/mtr_io.pl | 3 + mysql-test/lib/mtr_process.pl | 728 +++++++++++++++++++++++----------- mysql-test/lib/mtr_report.pl | 25 +- mysql-test/mysql-test-run.pl | 459 +++++++++------------ 4 files changed, 696 insertions(+), 519 deletions(-) diff --git a/mysql-test/lib/mtr_io.pl b/mysql-test/lib/mtr_io.pl index 017ba11645b..b3da6d97664 100644 --- a/mysql-test/lib/mtr_io.pl +++ b/mysql-test/lib/mtr_io.pl @@ -8,6 +8,7 @@ use strict; sub mtr_get_pid_from_file ($); sub mtr_get_opts_from_file ($); +sub mtr_fromfile ($); sub mtr_tofile ($@); sub mtr_tonewfile($@); @@ -107,6 +108,8 @@ sub mtr_fromfile ($) { open(FILE,"<",$file) or mtr_error("can't open file \"$file\": $!"); my $text= join('', ); close FILE; + $text =~ s/^\s+//; # Remove starting space, incl newlines + $text =~ s/\s+$//; # Remove ending space, incl newlines return $text; } diff --git a/mysql-test/lib/mtr_process.pl b/mysql-test/lib/mtr_process.pl index e832468d0cb..e1461a9730c 100644 --- a/mysql-test/lib/mtr_process.pl +++ b/mysql-test/lib/mtr_process.pl @@ -5,14 +5,19 @@ # same name. #use Carp qw(cluck); +use Socket; +use Errno; use strict; -use POSIX ":sys_wait_h"; +#use POSIX ":sys_wait_h"; +use POSIX 'WNOHANG'; sub mtr_run ($$$$$$); sub mtr_spawn ($$$$$$); -sub mtr_stop_mysqld_servers ($$); +sub mtr_stop_mysqld_servers ($); sub mtr_kill_leftovers (); +sub mtr_record_dead_children (); +sub sleep_until_file_created ($$$); # static in C sub spawn_impl ($$$$$$$); @@ -34,7 +39,18 @@ sub mtr_run ($$$$$$) { my $error= shift; my $pid_file= shift; - return spawn_impl($path,$arg_list_t,1,$input,$output,$error,$pid_file); + return spawn_impl($path,$arg_list_t,'run',$input,$output,$error,$pid_file); +} + +sub mtr_run_test ($$$$$$) { + my $path= shift; + my $arg_list_t= shift; + my $input= shift; + my $output= shift; + my $error= shift; + my $pid_file= shift; + + return spawn_impl($path,$arg_list_t,'test',$input,$output,$error,$pid_file); } sub mtr_spawn ($$$$$$) { @@ -45,7 +61,7 @@ sub mtr_spawn ($$$$$$) { my $error= shift; my $pid_file= shift; - return spawn_impl($path,$arg_list_t,0,$input,$output,$error,$pid_file); + return spawn_impl($path,$arg_list_t,'spawn',$input,$output,$error,$pid_file); } @@ -58,7 +74,7 @@ sub mtr_spawn ($$$$$$) { sub spawn_impl ($$$$$$$) { my $path= shift; my $arg_list_t= shift; - my $join= shift; + my $mode= shift; my $input= shift; my $output= shift; my $error= shift; @@ -71,107 +87,203 @@ sub spawn_impl ($$$$$$$) { print STDERR "#### ", "STDIN $input\n" if $input; print STDERR "#### ", "STDOUT $output\n" if $output; print STDERR "#### ", "STDERR $error\n" if $error; - if ( $join ) - { - print STDERR "#### ", "RUN "; - } - else - { - print STDERR "#### ", "SPAWN "; - } - print STDERR "$path ", join(" ",@$arg_list_t), "\n"; + print STDERR "#### ", "$mode : $path ", join(" ",@$arg_list_t), "\n"; print STDERR "#### ", "-" x 78, "\n"; } - my $pid= fork(); - if ( ! defined $pid ) + FORK: { - mtr_error("$path ($pid) can't be forked"); - } + my $pid= fork(); - if ( $pid ) - { - # Parent, i.e. the main script - if ( $join ) + if ( ! defined $pid ) { - # We run a command and wait for the result - # FIXME this need to be improved - my $res= waitpid($pid,0); + if ( $! == $!{EAGAIN} ) # See "perldoc Errno" + { + mtr_debug("Got EAGAIN from fork(), sleep 1 second and redo"); + sleep(1); + redo FORK; + } + else + { + mtr_error("$path ($pid) can't be forked"); + } + } - if ( $res == -1 ) + if ( $pid ) + { + spawn_parent_impl($pid,$mode,$path); + } + else + { + # Child, redirect output and exec + # FIXME I tried POSIX::setsid() here to detach and, I hoped, + # avoid zombies. But everything went wild, somehow the parent + # became a deamon as well, and was hard to kill ;-) + # Need to catch SIGCHLD and do waitpid or something instead...... + + $SIG{INT}= 'DEFAULT'; # Parent do some stuff, we don't + + if ( $output ) + { + if ( ! open(STDOUT,">",$output) ) + { + mtr_error("can't redirect STDOUT to \"$output\": $!"); + } + } + if ( $error ) + { + if ( $output eq $error ) + { + if ( ! open(STDERR,">&STDOUT") ) + { + mtr_error("can't dup STDOUT: $!"); + } + } + else + { + if ( ! open(STDERR,">",$error) ) + { + mtr_error("can't redirect STDERR to \"$output\": $!"); + } + } + } + if ( $input ) + { + if ( ! open(STDIN,"<",$input) ) + { + mtr_error("can't redirect STDIN to \"$input\": $!"); + } + } + exec($path,@$arg_list_t); + } + } +} + + +sub spawn_parent_impl { + my $pid= shift; + my $mode= shift; + my $path= shift; + + if ( $mode eq 'run' or $mode eq 'test' ) + { + my $exit_value= -1; + my $signal_num= 0; + my $dumped_core= 0; + + if ( $mode eq 'run' ) + { + # Simple run of command, we wait for it to return + my $ret_pid= waitpid($pid,0); + + if ( $ret_pid <= 0 ) { mtr_error("$path ($pid) got lost somehow"); } - my $exit_value= $? >> 8; - my $signal_num= $? & 127; - my $dumped_core= $? & 128; - if ( $signal_num ) - { - mtr_error("$path ($pid) got signal $signal_num"); - } - if ( $dumped_core ) - { - mtr_error("$path ($pid) dumped core"); - } + + $exit_value= $? >> 8; + $signal_num= $? & 127; + $dumped_core= $? & 128; + return $exit_value; } else { - # We spawned a process we don't wait for - return $pid; + # We run mysqltest and wait for it to return. But we try to + # catch dying mysqld processes as well. + # + # We do blocking waitpid() until we get the return from the + # "mysqltest" call. But if a mysqld process dies that we + # started, we take this as an error, and kill mysqltest. + # + # FIXME is this as it should be? Can't mysqld terminate + # normally from running a test case? + + my $ret_pid; # What waitpid() returns + + while ( ($ret_pid= waitpid(-1,0)) != -1 ) + { + # Someone terminated, don't know who. Collect + # status info first before $? is lost, + # but not $exit_value, this is flagged from + # + + if ( $ret_pid == $pid ) + { + # We got termination of mysqltest, we are done + $exit_value= $? >> 8; + $signal_num= $? & 127; + $dumped_core= $? & 128; + last; + } + + # If one of the mysqld processes died, we want to + # mark this, and kill the mysqltest process. + + foreach my $idx (0..1) + { + if ( $::master->[$idx]->{'pid'} eq $ret_pid ) + { + mtr_debug("child $ret_pid was master[$idx], " . + "exit during mysqltest run"); + $::master->[$idx]->{'pid'}= 0; + last; + } + } + + foreach my $idx (0..2) + { + if ( $::slave->[$idx]->{'pid'} eq $ret_pid ) + { + mtr_debug("child $ret_pid was slave[$idx], " . + "exit during mysqltest run"); + $::slave->[$idx]->{'pid'}= 0; + last; + } + } + + mtr_debug("waitpid() catched exit of unknown child $ret_pid, " . + "exit during mysqltest run"); + } + + if ( $ret_pid != $pid ) + { + # We terminated the waiting because a "mysqld" process died. + # Kill the mysqltest process. + + kill(9,$pid); + + $ret_pid= waitpid($pid,0); + + if ( $ret_pid == -1 ) + { + mtr_error("$path ($pid) got lost somehow"); + } + } + + return $exit_value; } } else { - # Child, redirect output and exec - # FIXME I tried POSIX::setsid() here to detach and, I hoped, - # avoid zombies. But everything went wild, somehow the parent - # became a deamon as well, and was hard to kill ;-) - # Need to catch SIGCHLD and do waitpid or something instead...... - - $SIG{INT}= 'DEFAULT'; # Parent do some stuff, we don't - - if ( $output ) - { - if ( ! open(STDOUT,">",$output) ) - { - mtr_error("can't redirect STDOUT to \"$output\": $!"); - } - } - if ( $error ) - { - if ( $output eq $error ) - { - if ( ! open(STDERR,">&STDOUT") ) - { - mtr_error("can't dup STDOUT: $!"); - } - } - else - { - if ( ! open(STDERR,">",$error) ) - { - mtr_error("can't redirect STDERR to \"$output\": $!"); - } - } - } - if ( $input ) - { - if ( ! open(STDIN,"<",$input) ) - { - mtr_error("can't redirect STDIN to \"$input\": $!"); - } - } - exec($path,@$arg_list_t); + # We spawned a process we don't wait for + return $pid; } } + + ############################################################################## # # Kill processes left from previous runs # ############################################################################## +# We just "ping" on the ports, and if we can't do a socket connect +# we assume the server is dead. So we don't *really* know a server +# is dead, we just hope that it after letting the listen port go, +# it is dead enough for us to start a new server. + sub mtr_kill_leftovers () { # First, kill all masters and slaves that would conflict with @@ -199,10 +311,23 @@ sub mtr_kill_leftovers () { }); } - mtr_stop_mysqld_servers(\@args, 1); + mtr_mysqladmin_shutdown(\@args); + + # We now have tried to terminate nice. We have waited for the listen + # port to be free, but can't really tell if the mysqld process died + # or not. We now try to find the process PID from the PID file, and + # send a kill to that process. Note that Perl let kill(0,@pids) be + # a way to just return the numer of processes the kernel can send + # signals to. So this can be used (except on Cygwin) to determine + # if there are processes left running that we cound out might exists. + # + # But still after all this work, all we know is that we have + # the ports free. # We scan the "var/run/" directory for other process id's to kill - my $rundir= "$::glob_mysql_test_dir/var/run"; # FIXME $path_run_dir or something + + # FIXME $path_run_dir or something + my $rundir= "$::glob_mysql_test_dir/var/run"; if ( -d $rundir ) { @@ -218,193 +343,157 @@ sub mtr_kill_leftovers () { if ( -f $pidfile ) { my $pid= mtr_get_pid_from_file($pidfile); - if ( ! unlink($pidfile) ) + + # Race, could have been removed between I tested with -f + # and the unlink() below, so I better check again with -f + + if ( ! unlink($pidfile) and -f $pidfile ) { mtr_error("can't remove $pidfile"); } - push(@pids, $pid); + + if ( $::glob_cygwin_perl or kill(0, $pid) ) + { + push(@pids, $pid); # We know (cygwin guess) it exists + } } } closedir(RUNDIR); - start_reap_all(); - - if ( $::glob_cygwin_perl ) + if ( @pids ) { - # We have no (easy) way of knowing the Cygwin controlling - # process, in the PID file we only have the Windows process id. - system("kill -f " . join(" ",@pids)); # Hope for the best.... - } - else - { - my $retries= 10; # 10 seconds - do + if ( $::glob_cygwin_perl ) { - kill(9, @pids); - } while ( $retries-- and kill(0, @pids) ); + # We have no (easy) way of knowing the Cygwin controlling + # process, in the PID file we only have the Windows process id. + system("kill -f " . join(" ",@pids)); # Hope for the best.... + mtr_debug("Sleep 5 seconds waiting for processes to die"); + sleep(5); + } + else + { + my $retries= 10; # 10 seconds + do + { + kill(9, @pids); + mtr_debug("Sleep 1 second waiting for processes to die"); + sleep(1) # Wait one second + } while ( $retries-- and kill(0, @pids) ); - if ( kill(0, @pids) ) - { - mtr_error("can't kill processes " . join(" ", @pids)); + if ( kill(0, @pids) ) # Check if some left + { + # FIXME maybe just mtr_warning() ? + mtr_error("can't kill process(es) " . join(" ", @pids)); + } } } + } - stop_reap_all(); + # We may have failed everything, bug we now check again if we have + # the listen ports free to use, and if they are free, just go for it. + + foreach my $srv ( @args ) + { + if ( mtr_ping_mysqld_server($srv->{'port'}, $srv->{'sockfile'}) ) + { + mtr_error("can't kill old mysqld holding port $srv->{'port'}"); + } } } ############################################################################## # -# Shut down mysqld servers +# Shut down mysqld servers we have started from this run of this script # ############################################################################## -# To speed things we kill servers in parallel. -# The argument is a list of 'pidfiles' and 'socketfiles'. -# We use the pidfiles and socketfiles to try to terminate the servers. -# This is not perfect, there could still be other server processes -# left. +# To speed things we kill servers in parallel. The argument is a list +# of 'ports', 'pids', 'pidfiles' and 'socketfiles'. -# Force flag is to be set only for killing mysqld servers this script -# didn't create in this run, i.e. initial cleanup before we start working. -# If force flag is set, we try to kill all with mysqladmin, and -# give up if we have no PIDs. +# FIXME On Cygwin, and maybe some other platforms, $srv->{'pid'} and +# $srv->{'pidfile'} will not be the same PID. We need to try to kill +# both I think. -# FIXME On some operating systems, $srv->{'pid'} and $srv->{'pidfile'} -# will not be the same PID. We need to try to kill both I think. - -sub mtr_stop_mysqld_servers ($$) { +sub mtr_stop_mysqld_servers ($) { my $spec= shift; - my $force= shift; # ---------------------------------------------------------------------- - # If the process was not started from this file, we got no PID, - # we try to find it in the PID file. + # First try nice normal shutdown using 'mysqladmin' # ---------------------------------------------------------------------- - my $any_pid= 0; # If we have any PIDs + mtr_mysqladmin_shutdown($spec); + + # ---------------------------------------------------------------------- + # We loop with waitpid() nonblocking to see how many of the ones we + # are to kill, actually got killed by mtr_mysqladmin_shutdown(). + # Note that we don't rely on this, the mysqld server might have stop + # listening to the port, but still be alive. But it is a start. + # ---------------------------------------------------------------------- foreach my $srv ( @$spec ) { - if ( ! $srv->{'pid'} and -f $srv->{'pidfile'} ) + if ( $srv->{'pid'} and (waitpid($srv->{'pid'},&WNOHANG) == $srv->{'pid'}) ) { - $srv->{'pid'}= mtr_get_pid_from_file($srv->{'pidfile'}); - } - if ( $srv->{'pid'} ) - { - $any_pid= 1; + $srv->{'pid'}= 0; } } - # If the processes where started from this script, and we know - # no PIDs, then we don't have to do anything. + # ---------------------------------------------------------------------- + # We know the process was started from this file, so there is a PID + # saved, or else we have nothing to do. + # Might be that is is recorded to be missing, but we failed to + # take away the PID file earlier, then we do it now. + # ---------------------------------------------------------------------- - if ( ! $any_pid and ! $force ) + my %mysqld_pids; + + foreach my $srv ( @$spec ) + { + if ( $srv->{'pid'} ) + { + $mysqld_pids{$srv->{'pid'}}= 1; + } + else + { + # Race, could have been removed between I tested with -f + # and the unlink() below, so I better check again with -f + + if ( -f $srv->{'pidfile'} and ! unlink($srv->{'pidfile'}) and + -f $srv->{'pidfile'} ) + { + mtr_error("can't remove $srv->{'pidfile'}"); + } + } + } + + # ---------------------------------------------------------------------- + # If the processes where started from this script, and we had no PIDS + # then we don't have to do anything. + # ---------------------------------------------------------------------- + + if ( ! keys %mysqld_pids ) { # cluck "This is how we got here!"; return; } # ---------------------------------------------------------------------- - # First try nice normal shutdown using 'mysqladmin' - # ---------------------------------------------------------------------- - - start_reap_all(); # Don't require waitpid() of children - - foreach my $srv ( @$spec ) - { - if ( -e $srv->{'sockfile'} or $srv->{'port'} ) - { - # FIXME wrong log..... - # FIXME, stderr..... - # Shutdown time must be high as slave may be in reconnect - my $args; - - mtr_init_args(\$args); - - mtr_add_arg($args, "--no-defaults"); - mtr_add_arg($args, "--user=%s", $::opt_user); - mtr_add_arg($args, "--password="); - if ( -e $srv->{'sockfile'} ) - { - mtr_add_arg($args, "--socket=%s", $srv->{'sockfile'}); - } - if ( $srv->{'port'} ) - { - mtr_add_arg($args, "--port=%s", $srv->{'port'}); - } - mtr_add_arg($args, "--connect_timeout=5"); - mtr_add_arg($args, "--shutdown_timeout=20"); - mtr_add_arg($args, "--protocol=tcp"); # FIXME new thing, will it help?! - mtr_add_arg($args, "shutdown"); - # We don't wait for termination of mysqladmin - mtr_spawn($::exe_mysqladmin, $args, - "", $::path_manager_log, $::path_manager_log, ""); - } - } - - # Wait for them all to remove their pid and socket file - - PIDSOCKFILEREMOVED: - for (my $loop= $::opt_sleep_time_for_delete; $loop; $loop--) - { - my $pidsockfiles_left= 0; - foreach my $srv ( @$spec ) - { - if ( -e $srv->{'sockfile'} or -f $srv->{'pidfile'} ) - { - $pidsockfiles_left++; # Could be that pidfile is left - } - } - if ( ! $pidsockfiles_left ) - { - last PIDSOCKFILEREMOVED; - } - if ( $loop % 20 == 1 ) - { - mtr_warning("Still processes alive after 10 seconds, retrying for $loop seconds..."); - } - mtr_debug("Sleep for 1 second waiting for pid and socket file removal"); - sleep(1); # One second - } - - # ---------------------------------------------------------------------- - # If no known PIDs, we have nothing more to try - # ---------------------------------------------------------------------- - - if ( ! $any_pid ) - { - stop_reap_all(); - return; - } - - # ---------------------------------------------------------------------- - # We may have killed all that left a socket, but we are not sure we got - # them all killed. If we suspect it lives, try nice kill with SIG_TERM. - # Note that for true Win32 processes, kill(0,$pid) will not return 1. + # In mtr_mysqladmin_shutdown() we only waited for the mysqld servers + # not to listen to the port. But we are not sure we got them all + # killed. If we suspect it lives, try nice kill with SIG_TERM. Note + # that for true Win32 processes, kill(0,$pid) will not return 1. # ---------------------------------------------------------------------- SIGNAL: foreach my $sig (15,9) { - my $process_left= 0; - foreach my $srv ( @$spec ) + my $retries= 10; # 10 seconds + kill($sig, keys %mysqld_pids); + while ( $retries-- and kill(0, keys %mysqld_pids) ) { - if ( $srv->{'pid'} and - ( -f $srv->{'pidfile'} or kill(0,$srv->{'pid'}) ) ) - { - $process_left++; - mtr_warning("process $srv->{'pid'} not cooperating, " . - "will send signal $sig to process"); - kill($sig,$srv->{'pid'}); # SIG_TERM - } - if ( ! $process_left ) - { - last SIGNAL; - } + mtr_debug("Sleep 1 second waiting for processes to die"); + sleep(1) # Wait one second } - mtr_debug("Sleep for 5 seconds waiting for processes to die"); - sleep(5); # We wait longer than usual } # ---------------------------------------------------------------------- @@ -437,8 +526,8 @@ sub mtr_stop_mysqld_servers ($$) { foreach my $file ($srv->{'pidfile'}, $srv->{'sockfile'}) { - unlink($file); - if ( -e $file ) + # Know it is dead so should be no race, careful anyway + if ( -f $file and ! unlink($file) and -f $file ) { $errors++; mtr_warning("couldn't delete $file"); @@ -454,9 +543,147 @@ sub mtr_stop_mysqld_servers ($$) { } } - stop_reap_all(); + # FIXME We just assume they are all dead, for Cygwin we are not + # really sure + +} - # FIXME We just assume they are all dead, we don't know.... + +############################################################################## +# +# Shut down mysqld servers using "mysqladmin ... shutdown". +# To speed this up, we start them in parallel and use waitpid() to +# catch their termination. Note that this doesn't say the servers +# are terminated, just that 'mysqladmin' is terminated. +# +# Note that mysqladmin will ask the server about what PID file it uses, +# and mysqladmin will wait for it to be removed before it terminates +# (unless passes timeout). +# +# This function will take at most about 20 seconds, and we still are not +# sure we killed them all. If none is responding to ping, we return 1, +# else we return 0. +# +############################################################################## + +sub mtr_mysqladmin_shutdown () { + my $spec= shift; + + my @mysql_admin_pids; + my @to_kill_specs; + + foreach my $srv ( @$spec ) + { + if ( mtr_ping_mysqld_server($srv->{'port'}, $srv->{'sockfile'}) ) + { + push(@to_kill_specs, $srv); + } + } + + + foreach my $srv ( @to_kill_specs ) + { + # FIXME wrong log..... + # FIXME, stderr..... + # Shutdown time must be high as slave may be in reconnect + my $args; + + mtr_init_args(\$args); + + mtr_add_arg($args, "--no-defaults"); + mtr_add_arg($args, "--user=%s", $::opt_user); + mtr_add_arg($args, "--password="); + if ( -e $srv->{'sockfile'} ) + { + mtr_add_arg($args, "--socket=%s", $srv->{'sockfile'}); + } + if ( $srv->{'port'} ) + { + mtr_add_arg($args, "--port=%s", $srv->{'port'}); + } + if ( $srv->{'port'} and ! -e $srv->{'sockfile'} ) + { + mtr_add_arg($args, "--protocol=tcp"); # Needed if no --socket + } + mtr_add_arg($args, "--connect_timeout=5"); + mtr_add_arg($args, "--shutdown_timeout=20"); + mtr_add_arg($args, "shutdown"); + # We don't wait for termination of mysqladmin + my $pid= mtr_spawn($::exe_mysqladmin, $args, + "", $::path_manager_log, $::path_manager_log, ""); + push(@mysql_admin_pids, $pid); + } + + # We wait blocking, we wait for the last one anyway + foreach my $pid (@mysql_admin_pids) + { + waitpid($pid,0); # FIXME no need to check -1 or 0? + } + + # If we trusted "mysqladmin --shutdown_timeout= ..." we could just + # terminate now, but we don't (FIXME should be debugged). + # So we try again to ping and at least wait the same amount of time + # mysqladmin would for all to die. + + my $timeout= 20; # 20 seconds max + my $res= 1; # If we just fall through, we are done + + TIME: + while ( $timeout-- ) + { + foreach my $srv ( @to_kill_specs ) + { + $res= 1; # We are optimistic + if ( mtr_ping_mysqld_server($srv->{'port'}, $srv->{'sockfile'}) ) + { + mtr_debug("Sleep 1 second waiting for processes to stop using port"); + sleep(1); # One second + $res= 0; + next TIME; + } + } + last; # If we got here, we are done + } + + return $res; +} + +############################################################################## +# +# The operating system will keep information about dead children, +# we read this information here, and if we have records the process +# is alive, we mark it as dead. +# +############################################################################## + +sub mtr_record_dead_children () { + + my $ret_pid; + + # FIXME the man page says to wait for -1 to terminate, + # but on OS X we get '0' all the time... + while ( ($ret_pid= waitpid(-1,&WNOHANG)) > 0 ) + { + mtr_debug("waitpid() catched exit of child $ret_pid"); + foreach my $idx (0..1) + { + if ( $::master->[$idx]->{'pid'} eq $ret_pid ) + { + mtr_debug("child $ret_pid was master[$idx]"); + $::master->[$idx]->{'pid'}= 0; + } + } + + foreach my $idx (0..2) + { + if ( $::slave->[$idx]->{'pid'} eq $ret_pid ) + { + mtr_debug("child $ret_pid was slave[$idx]"); + $::slave->[$idx]->{'pid'}= 0; + last; + } + } + } } sub start_reap_all { @@ -467,6 +694,32 @@ sub stop_reap_all { $SIG{CHLD}= 'DEFAULT'; } +sub mtr_ping_mysqld_server () { + my $port= shift; + + my $remote= "localhost"; + my $iaddr= inet_aton($remote); + if ( ! $iaddr ) + { + mtr_error("can't find IP number for $remote"); + } + my $paddr= sockaddr_in($port, $iaddr); + my $proto= getprotobyname('tcp'); + if ( ! socket(SOCK, PF_INET, SOCK_STREAM, $proto) ) + { + mtr_error("can't create socket: $!"); + } + if ( connect(SOCK, $paddr) ) + { + close(SOCK); # FIXME check error? + return 1; + } + else + { + return 0; + } +} + ############################################################################## # # Wait for a file to be created @@ -474,33 +727,38 @@ sub stop_reap_all { ############################################################################## -sub sleep_until_file_created ($$) { +sub sleep_until_file_created ($$$) { my $pidfile= shift; my $timeout= shift; + my $pid= shift; - my $loop= $timeout; - while ( $loop-- ) + for ( my $loop= 1; $loop <= $timeout; $loop++ ) { if ( -r $pidfile ) { - return; + return 1; } - mtr_debug("Sleep for 1 second waiting for creation of $pidfile"); - if ( $loop % 20 == 1 ) + # Check if it died after the fork() was successful + if ( waitpid($pid,&WNOHANG) == $pid ) { - mtr_warning("Waiting for $pidfile to be created, still trying for $loop seconds..."); + return 0; + } + + mtr_debug("Sleep 1 second waiting for creation of $pidfile"); + + if ( $loop % 60 == 0 ) + { + my $left= $timeout - $loop; + mtr_warning("Waited $loop seconds for $pidfile to be created, " . + "still waiting for $left seconds..."); } sleep(1); } - if ( ! -r $pidfile ) - { - mtr_error("No $pidfile was created"); - } + return 0; } - 1; diff --git a/mysql-test/lib/mtr_report.pl b/mysql-test/lib/mtr_report.pl index 0f75fc1341a..c45bb1601ce 100644 --- a/mysql-test/lib/mtr_report.pl +++ b/mysql-test/lib/mtr_report.pl @@ -10,6 +10,7 @@ sub mtr_report_test_name($); sub mtr_report_test_passed($); sub mtr_report_test_failed($); sub mtr_report_test_skipped($); +sub mtr_report_test_disabled($); sub mtr_show_failed_diff ($); sub mtr_report_stats ($); @@ -72,7 +73,14 @@ sub mtr_report_test_skipped ($) { my $tinfo= shift; $tinfo->{'result'}= 'MTR_RES_SKIPPED'; - print "[ skipped ]\n"; + if ( $tinfo->{'disable'} ) + { + print "[ disabled ] $tinfo->{'comment'}\n"; + } + else + { + print "[ skipped ]\n"; + } } sub mtr_report_test_passed ($) { @@ -95,9 +103,18 @@ sub mtr_report_test_failed ($) { $tinfo->{'result'}= 'MTR_RES_FAILED'; print "[ fail ]\n"; - print "Errors are (from $::path_timefile) :\n"; - print mtr_fromfile($::path_timefile); # FIXME print_file() instead - print "\n(the last lines may be the most important ones)\n"; + # FIXME Instead of this test, and meaningless error message in 'else' + # we should write out into $::path_timefile when the error occurs. + if ( -f $::path_timefile ) + { + print "Errors are (from $::path_timefile) :\n"; + print mtr_fromfile($::path_timefile); # FIXME print_file() instead + print "\n(the last lines may be the most important ones)\n"; + } + else + { + print "Unexpected termination, probably when starting mysqld\n"; + } } sub mtr_report_stats ($) { diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 3bbdb48d98a..3dd6f5803d7 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -84,10 +84,11 @@ use Sys::Hostname; #use Carp; use IO::Socket; use IO::Socket::INET; -use Data::Dumper; +#use Data::Dumper; use strict; #use diagnostics; +require "lib/mtr_cases.pl"; require "lib/mtr_process.pl"; require "lib/mtr_io.pl"; require "lib/mtr_gcov.pl"; @@ -165,14 +166,12 @@ our $glob_user= 'test'; our $glob_use_embedded_server= 0; our $glob_basedir; -our $glob_do_test; # The total result our $path_charsetsdir; our $path_client_bindir; our $path_language; -our $path_tests_bindir; our $path_timefile; our $path_manager_log; # Used by mysqldadmin our $path_slave_load_tmpdir; # What is this?! @@ -192,8 +191,10 @@ our $exe_master_mysqld; our $exe_mysql; our $exe_mysqladmin; our $exe_mysqlbinlog; +our $exe_mysql_client_test; our $exe_mysqld; our $exe_mysqldump; # Called from test case +our $exe_mysql_fix_system_tables; our $exe_mysqltest; our $exe_slave_mysqld; @@ -208,6 +209,7 @@ our $opt_current_test; our $opt_ddd; our $opt_debug; our $opt_do_test; +our @opt_cases; # The test cases names in argv our $opt_embedded_server; our $opt_extern; our $opt_fast; @@ -232,8 +234,6 @@ our $opt_local_master; our $master; # Will be struct in C our $slave; -our $opt_master_myport; -our $opt_slave_myport; our $opt_ndbcluster_port; our $opt_ndbconnectstring; @@ -297,8 +297,6 @@ sub command_line_setup (); sub executable_setup (); sub environment_setup (); sub kill_and_cleanup (); -sub collect_test_cases ($); -sub sleep_until_file_created ($$); sub ndbcluster_start (); sub ndbcluster_stop (); sub run_benchmarks ($); @@ -306,6 +304,7 @@ sub run_tests (); sub mysql_install_db (); sub install_db ($$); sub run_testcase ($); +sub report_failure_and_restart ($); sub do_before_start_master ($$); sub do_before_start_slave ($$); sub mysqld_start ($$$$); @@ -358,7 +357,15 @@ sub main () { if ( $opt_start_and_exit ) { - mtr_report("Servers started, exiting"); + # FIXME what about ndb? + if ( mysqld_start('master',0,[],[]) ) + { + mtr_report("Servers started, exiting"); + } + else + { + mtr_error("Can't start the mysqld server"); + } } else { @@ -447,8 +454,8 @@ sub command_line_setup () { $path_manager_log= "$glob_mysql_test_dir/var/log/manager.log"; $opt_current_test= "$glob_mysql_test_dir/var/log/current_test"; - $opt_master_myport= 9306; - $opt_slave_myport= 9308; + my $opt_master_myport= 9306; + my $opt_slave_myport= 9308; $opt_ndbcluster_port= 9350; # Read the command line @@ -532,6 +539,8 @@ sub command_line_setup () { usage(""); } + @opt_cases= @ARGV; + # Put this into a hash, will be a C struct $master->[0]->{'path_myddir'}= "$glob_mysql_test_dir/var/master-data"; @@ -598,7 +607,7 @@ sub command_line_setup () { # Look at the command line options and set script flags # -------------------------------------------------------------------------- - if ( $opt_record and ! @ARGV) + if ( $opt_record and ! @opt_cases ) { mtr_error("Will not run in record mode without a specific test case"); } @@ -733,7 +742,8 @@ sub executable_setup () { { mtr_error("Can't find embedded server 'mysqltest'"); } - $path_tests_bindir= "$glob_basedir/libmysqld/examples"; + $exe_mysql_client_test= + "$glob_basedir/libmysqld/examples/mysql_client_test_embedded"; } else { @@ -749,7 +759,8 @@ sub executable_setup () { { $exe_mysqltest= "$glob_basedir/client/mysqltest"; } - $path_tests_bindir= "$glob_basedir/tests"; + $exe_mysql_client_test= + "$glob_basedir/tests/mysql_client_test"; } if ( -f "$glob_basedir/client/.libs/mysqldump" ) { @@ -768,22 +779,26 @@ sub executable_setup () { $exe_mysqlbinlog= "$glob_basedir/client/mysqlbinlog"; } - $exe_mysqld= "$glob_basedir/sql/mysqld"; - $path_client_bindir= "$glob_basedir/client"; - $exe_mysqladmin= "$path_client_bindir/mysqladmin"; - $exe_mysql= "$path_client_bindir/mysql"; - $path_language= "$glob_basedir/sql/share/english/"; - $path_charsetsdir= "$glob_basedir/sql/share/charsets"; + $path_client_bindir= "$glob_basedir/client"; + $exe_mysqld= "$glob_basedir/sql/mysqld"; + $exe_mysqladmin= "$path_client_bindir/mysqladmin"; + $exe_mysql= "$path_client_bindir/mysql"; + $exe_mysql_fix_system_tables= "$glob_basedir/scripts/mysql_fix_privilege_tables"; + $path_language= "$glob_basedir/sql/share/english/"; + $path_charsetsdir= "$glob_basedir/sql/share/charsets"; } else { - $path_client_bindir= "$glob_basedir/bin"; - $path_tests_bindir= "$glob_basedir/tests"; - $exe_mysqltest= "$path_client_bindir/mysqltest"; - $exe_mysqldump= "$path_client_bindir/mysqldump"; - $exe_mysqlbinlog= "$path_client_bindir/mysqlbinlog"; - $exe_mysqladmin= "$path_client_bindir/mysqladmin"; - $exe_mysql= "$path_client_bindir/mysql"; + my $path_tests_bindir= "$glob_basedir/tests"; + + $path_client_bindir= "$glob_basedir/bin"; + $exe_mysqltest= "$path_client_bindir/mysqltest"; + $exe_mysqldump= "$path_client_bindir/mysqldump"; + $exe_mysqlbinlog= "$path_client_bindir/mysqlbinlog"; + $exe_mysqladmin= "$path_client_bindir/mysqladmin"; + $exe_mysql= "$path_client_bindir/mysql"; + $exe_mysql_fix_system_tables= "$path_client_bindir/scripts/mysql_fix_privilege_tables"; + if ( -d "$glob_basedir/share/mysql/english" ) { $path_language ="$glob_basedir/share/mysql/english/"; @@ -804,6 +819,33 @@ sub executable_setup () { $exe_mysqld= "$glob_basedir/bin/mysqld"; } + if ( $glob_use_embedded_server ) + { + if ( -f "$path_client_bindir/mysqltest_embedded" ) + { + # FIXME valgrind? + $exe_mysqltest="$path_client_bindir/mysqltest_embedded"; + } + else + { + error("Cannot find embedded server 'mysqltest_embedded'"); + } + if ( -d "$path_tests_bindir/mysql_client_test_embedded" ) + { + $exe_mysql_client_test= + "$path_tests_bindir/mysql_client_test_embedded"; + } + else + { + $exe_mysql_client_test= + "$path_client_bindir/mysql_client_test_embedded"; + } + } + else + { + $exe_mysqltest="$path_client_bindir/mysqltest"; + $exe_mysql_client_test="$path_client_bindir/mysql_client_test"; + } } # FIXME special $exe_master_mysqld and $exe_slave_mysqld @@ -846,13 +888,18 @@ sub environment_setup () { # Also command lines in .opt files may contain env vars # -------------------------------------------------------------------------- - $ENV{'LC_COLLATE'}= "C"; - $ENV{'MYSQL_TEST_DIR'}= $glob_mysql_test_dir; - $ENV{'MASTER_MYPORT'}= $opt_master_myport; - $ENV{'SLAVE_MYPORT'}= $opt_slave_myport; -# $ENV{'MYSQL_TCP_PORT'}= '@MYSQL_TCP_PORT@'; # FIXME - $ENV{'MYSQL_TCP_PORT'}= 3306; - $ENV{'MASTER_MYSOCK'}= $master->[0]->{'path_mysock'}; + $ENV{'UMASK'}= "0660"; # The octal *string* + $ENV{'UMASK_DIR'}= "0770"; # The octal *string* + $ENV{'LC_COLLATE'}= "C"; + $ENV{'USE_RUNNING_SERVER'}= $glob_use_running_server; + $ENV{'MYSQL_TEST_DIR'}= $glob_mysql_test_dir; + $ENV{'MASTER_MYSOCK'}= $master->[0]->{'path_mysock'}; + $ENV{'MASTER_MYSOCK1'}= $master->[1]->{'path_mysock'}; + $ENV{'MASTER_MYPORT'}= $master->[0]->{'path_myport'}; + $ENV{'MASTER_MYPORT1'}= $master->[1]->{'path_myport'}; + $ENV{'SLAVE_MYPORT'}= $slave->[0]->{'path_myport'}; +# $ENV{'MYSQL_TCP_PORT'}= '@MYSQL_TCP_PORT@'; # FIXME + $ENV{'MYSQL_TCP_PORT'}= 3306; } @@ -875,203 +922,6 @@ sub handle_int_signal () { } -############################################################################## -# -# Collect information about test cases we are to run -# -############################################################################## - -sub collect_test_cases ($) { - my $suite= shift; # Test suite name - - my $testdir; - my $resdir; - - if ( $suite eq "main" ) - { - $testdir= "$glob_mysql_test_dir/t"; - $resdir= "$glob_mysql_test_dir/r"; - } - else - { - $testdir= "$glob_mysql_test_dir/suite/$suite/t"; - $resdir= "$glob_mysql_test_dir/suite/$suite/r"; - } - - my @tests; # Array of hash, will be array of C struct - - opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!"); - - foreach my $elem ( sort readdir(TESTDIR) ) { - my $tname= mtr_match_extension($elem,"test"); - next if ! defined $tname; - next if $opt_do_test and ! defined mtr_match_prefix($elem,$opt_do_test); - my $path= "$testdir/$elem"; - - # ---------------------------------------------------------------------- - # Skip some tests silently - # ---------------------------------------------------------------------- - - if ( $opt_start_from and $tname lt $opt_start_from ) - { - next; - } - - # ---------------------------------------------------------------------- - # Skip some tests but include in list, just mark them to skip - # ---------------------------------------------------------------------- - - my $tinfo= {}; - $tinfo->{'name'}= $tname; - $tinfo->{'result_file'}= "$resdir/$tname.result"; - push(@tests, $tinfo); - - if ( $opt_skip_test and defined mtr_match_prefix($tname,$opt_skip_test) ) - { - $tinfo->{'skip'}= 1; - next; - } - - # FIXME temporary solution, we have a hard coded list of test cases to - # skip if we are using the embedded server - - if ( $glob_use_embedded_server and - mtr_match_any_exact($tname,\@skip_if_embedded_server) ) - { - $tinfo->{'skip'}= 1; - next; - } - - # ---------------------------------------------------------------------- - # Collect information about test case - # ---------------------------------------------------------------------- - - $tinfo->{'path'}= $path; - $tinfo->{'timezone'}= "GMT-3"; # for UNIX_TIMESTAMP tests to work - - if ( defined mtr_match_prefix($tname,"rpl") ) - { - if ( $opt_skip_rpl ) - { - $tinfo->{'skip'}= 1; - next; - } - - # FIXME currently we always restart slaves - $tinfo->{'slave_restart'}= 1; - - if ( $tname eq 'rpl_failsafe' or $tname eq 'rpl_chain_temp_table' ) - { - $tinfo->{'slave_num'}= 3; - } - else - { - $tinfo->{'slave_num'}= 1; - } - } - - # FIXME what about embedded_server + ndbcluster, skip ?! - - my $master_opt_file= "$testdir/$tname-master.opt"; - my $slave_opt_file= "$testdir/$tname-slave.opt"; - my $slave_mi_file= "$testdir/$tname.slave-mi"; - my $master_sh= "$testdir/$tname-master.sh"; - my $slave_sh= "$testdir/$tname-slave.sh"; - - if ( -f $master_opt_file ) - { - $tinfo->{'master_restart'}= 1; # We think so for now - # This is a dirty hack from old mysql-test-run, we use the opt file - # to flag other things as well, it is not a opt list at all - my $extra_master_opt= mtr_get_opts_from_file($master_opt_file); - - foreach my $opt (@$extra_master_opt) - { - my $value; - - $value= mtr_match_prefix($opt, "--timezone="); - - if ( defined $value ) - { - $tinfo->{'timezone'}= $value; - $extra_master_opt= []; - $tinfo->{'master_restart'}= 0; - last; - } - - $value= mtr_match_prefix($opt, "--result-file="); - - if ( defined $value ) - { - $tinfo->{'result_file'}= "r/$value.result"; - if ( $opt_result_ext and $opt_record or - -f "$tinfo->{'result_file'}$opt_result_ext") - { - $tinfo->{'result_file'}.= $opt_result_ext; - } - $extra_master_opt= []; - $tinfo->{'master_restart'}= 0; - last; - } - } - - $tinfo->{'master_opt'}= $extra_master_opt; - } - - if ( -f $slave_opt_file ) - { - $tinfo->{'slave_opt'}= mtr_get_opts_from_file($slave_opt_file); - $tinfo->{'slave_restart'}= 1; - } - - if ( -f $slave_mi_file ) - { - $tinfo->{'slave_mi'}= mtr_get_opts_from_file($slave_mi_file); - $tinfo->{'slave_restart'}= 1; - } - - if ( -f $master_sh ) - { - if ( $glob_win32_perl ) - { - $tinfo->{'skip'}= 1; - } - else - { - $tinfo->{'master_sh'}= $master_sh; - $tinfo->{'master_restart'}= 1; - } - } - - if ( -f $slave_sh ) - { - if ( $glob_win32_perl ) - { - $tinfo->{'skip'}= 1; - } - else - { - $tinfo->{'slave_sh'}= $slave_sh; - $tinfo->{'slave_restart'}= 1; - } - } - - # We can't restart a running server that may be in use - - if ( $glob_use_running_server and - ( $tinfo->{'master_restart'} or $tinfo->{'slave_restart'} ) ) - { - $tinfo->{'skip'}= 1; - } - - } - - closedir TESTDIR; - - return \@tests; -} - - ############################################################################## # # Handle left overs from previous runs @@ -1189,6 +1039,10 @@ sub run_benchmarks ($) { if ( ! $glob_use_embedded_server and ! $opt_local_master ) { $master->[0]->{'pid'}= mysqld_start('master',0,[],[]); + if ( ! $master->[0]->{'pid'} ) + { + mtr_error("Can't start the mysqld server"); + } } mtr_init_args(\$args); @@ -1254,7 +1108,7 @@ sub run_suite () { mtr_print_thick_line(); - mtr_report("Finding Tests in the '$suite' suite"); + mtr_report("Finding Tests in the '$suite' suite"); my $tests= collect_test_cases($suite); @@ -1301,10 +1155,12 @@ sub run_suite () { sub mysql_install_db () { - mtr_report("Installing Test Databases"); - + # FIXME not exactly true I think, needs improvements install_db('master', $master->[0]->{'path_myddir'}); + install_db('master', $master->[1]->{'path_myddir'}); install_db('slave', $slave->[0]->{'path_myddir'}); + install_db('slave', $slave->[1]->{'path_myddir'}); + install_db('slave', $slave->[2]->{'path_myddir'}); return 0; } @@ -1422,6 +1278,12 @@ sub run_testcase ($) { mtr_tofile($master->[0]->{'path_myerr'},"CURRENT_TEST: $tname\n"); do_before_start_master($tname,$tinfo->{'master_sh'}); + # ---------------------------------------------------------------------- + # If any mysqld servers running died, we have to know + # ---------------------------------------------------------------------- + + mtr_record_dead_children(); + # ---------------------------------------------------------------------- # Start masters # ---------------------------------------------------------------------- @@ -1439,14 +1301,24 @@ sub run_testcase ($) { { $master->[0]->{'pid'}= mysqld_start('master',0,$tinfo->{'master_opt'},[]); + if ( ! $master->[0]->{'pid'} ) + { + report_failure_and_restart($tinfo); + return; + } } if ( $opt_with_ndbcluster and ! $master->[1]->{'pid'} ) { $master->[1]->{'pid'}= mysqld_start('master',1,$tinfo->{'master_opt'},[]); + if ( ! $master->[1]->{'pid'} ) + { + report_failure_and_restart($tinfo); + return; + } } - if ( $tinfo->{'master_opt'} ) + if ( @{$tinfo->{'master_opt'}} ) { $master->[0]->{'uses_special_flags'}= 1; } @@ -1469,6 +1341,11 @@ sub run_testcase ($) { $slave->[$idx]->{'pid'}= mysqld_start('slave',$idx, $tinfo->{'slave_opt'}, $tinfo->{'slave_mi'}); + if ( ! $slave->[$idx]->{'pid'} ) + { + report_failure_and_restart($tinfo); + return; + } } } } @@ -1502,33 +1379,40 @@ sub run_testcase ($) { "mysqltest returned unexpected code $res, " . "it has probably crashed"); } - mtr_report_test_failed($tinfo); - mtr_show_failed_diff($tname); - print "\n"; - if ( ! $opt_force ) - { - print "Aborting: $tname failed. To continue, re-run with '--force'."; - print "\n"; - if ( ! $opt_gdb and ! $glob_use_running_server and - ! $opt_ddd and ! $glob_use_embedded_server ) - { - stop_masters_slaves(); - } - exit(1); - } - - # FIXME always terminate on failure?! - if ( ! $opt_gdb and ! $glob_use_running_server and - ! $opt_ddd and ! $glob_use_embedded_server ) - { - stop_masters_slaves(); - } - print "Resuming Tests\n\n"; + report_failure_and_restart($tinfo); } } } +sub report_failure_and_restart ($) { + my $tinfo= shift; + + mtr_report_test_failed($tinfo); + mtr_show_failed_diff($tinfo->{'name'}); + print "\n"; + if ( ! $opt_force ) + { + print "Aborting: $tinfo->{'name'} failed. To continue, re-run with '--force'."; + print "\n"; + if ( ! $opt_gdb and ! $glob_use_running_server and + ! $opt_ddd and ! $glob_use_embedded_server ) + { + stop_masters_slaves(); + } + exit(1); + } + + # FIXME always terminate on failure?! + if ( ! $opt_gdb and ! $glob_use_running_server and + ! $opt_ddd and ! $glob_use_embedded_server ) + { + stop_masters_slaves(); + } + print "Resuming Tests\n\n"; +} + + ############################################################################## # # Start and stop servers @@ -1603,11 +1487,13 @@ sub do_before_start_slave ($$) { } sub mysqld_arguments ($$$$$) { - my $args= shift; - my $type= shift; # master/slave/bootstrap - my $idx= shift; - my $extra_opt= shift; - my $slave_master_info= shift; + my $args= shift; + my $type= shift; # master/slave/bootstrap + my $idx= shift; + my $extra_opt= shift; + my $slave_master_info= shift; + +# print STDERR Dumper($extra_opt); my $sidx= ""; # Index as string, 0 is empty string if ( $idx > 0 ) @@ -1835,10 +1721,10 @@ sub mysqld_arguments ($$$$$) { ############################################################################## sub mysqld_start ($$$$) { - my $type= shift; # master/slave/bootstrap - my $idx= shift; - my $extra_opt= shift; - my $slave_master_info= shift; + my $type= shift; # master/slave/bootstrap + my $idx= shift; + my $extra_opt= shift; + my $slave_master_info= shift; my $args; # Arg vector my $exe; @@ -1893,9 +1779,8 @@ sub mysqld_start ($$$$) { $master->[$idx]->{'path_myerr'}, $master->[$idx]->{'path_myerr'}, "") ) { - sleep_until_file_created($master->[$idx]->{'path_mypid'}, - $master->[$idx]->{'start_timeout'}); - return $pid; + return sleep_until_file_created($master->[$idx]->{'path_mypid'}, + $master->[$idx]->{'start_timeout'}, $pid); } } @@ -1905,13 +1790,12 @@ sub mysqld_start ($$$$) { $slave->[$idx]->{'path_myerr'}, $slave->[$idx]->{'path_myerr'}, "") ) { - sleep_until_file_created($slave->[$idx]->{'path_mypid'}, - $master->[$idx]->{'start_timeout'}); - return $pid; + return sleep_until_file_created($slave->[$idx]->{'path_mypid'}, + $master->[$idx]->{'start_timeout'}, $pid); } } - mtr_error("Can't start mysqld FIXME"); + return 0; } sub stop_masters_slaves () { @@ -1944,7 +1828,7 @@ sub stop_masters () { } } - mtr_stop_mysqld_servers(\@args, 0); + mtr_stop_mysqld_servers(\@args); } sub stop_slaves () { @@ -1966,7 +1850,7 @@ sub stop_slaves () { } } - mtr_stop_mysqld_servers(\@args, 0); + mtr_stop_mysqld_servers(\@args); } @@ -1992,17 +1876,32 @@ sub run_mysqltest ($$) { } my $cmdline_mysql= - "$exe_mysql --host=localhost --port=$master->[0]->{'path_myport'} " . - "--socket=$master->[0]->{'path_mysock'} --user=root --password="; + "$exe_mysql --host=localhost --user=root --password= " . + "--port=$master->[0]->{'path_myport'} " . + "--socket=$master->[0]->{'path_mysock'}"; + + my $cmdline_mysql_client_test= + "$exe_mysql_client_test --no-defaults --testcase --user=root --silent " . + "--port=$master->[0]->{'path_myport'} " . + "--socket=$master->[0]->{'path_mysock'}"; + + my $cmdline_mysql_fix_system_tables= + "$exe_mysql_fix_system_tables --no-defaults --host=localhost --user=root --password= " . + "--basedir=$glob_basedir --bindir=$path_client_bindir --verbose " . + "--port=$master->[0]->{'path_myport'} " . + "--socket=$master->[0]->{'path_mysock'}"; + + # FIXME really needing a PATH??? # $ENV{'PATH'}= "/bin:/usr/bin:/usr/local/bin:/usr/bsd:/usr/X11R6/bin:/usr/openwin/bin:/usr/bin/X11:$ENV{'PATH'}"; - $ENV{'MYSQL'}= $exe_mysql; + $ENV{'MYSQL'}= $cmdline_mysql; $ENV{'MYSQL_DUMP'}= $cmdline_mysqldump; - $ENV{'MYSQL_BINLOG'}= $exe_mysqlbinlog; - $ENV{'CLIENT_BINDIR'}= $path_client_bindir; - $ENV{'TESTS_BINDIR'}= $path_tests_bindir; + $ENV{'MYSQL_BINLOG'}= $cmdline_mysqlbinlog; + $ENV{'MYSQL_FIX_SYSTEM_TABLES'}= $cmdline_mysql_fix_system_tables; + $ENV{'MYSQL_CLIENT_TEST'}= $cmdline_mysql_client_test; + $ENV{'CHARSETSDIR'}= $path_charsetsdir; my $exe= $exe_mysqltest; my $args; From c2e9e15e9f44b2149286a9b6b784f93fe9b2938e Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 21:50:56 +0100 Subject: [PATCH 27/53] mtr_cases.pl: new file --- mysql-test/lib/mtr_cases.pl | 270 ++++++++++++++++++++++++++++++++++++ 1 file changed, 270 insertions(+) create mode 100644 mysql-test/lib/mtr_cases.pl diff --git a/mysql-test/lib/mtr_cases.pl b/mysql-test/lib/mtr_cases.pl new file mode 100644 index 00000000000..5977bb380cf --- /dev/null +++ b/mysql-test/lib/mtr_cases.pl @@ -0,0 +1,270 @@ +# -*- cperl -*- + +# This is a library file used by the Perl version of mysql-test-run, +# and is part of the translation of the Bourne shell script with the +# same name. + +use strict; + +sub collect_test_cases ($); +sub collect_one_test_case ($$$$$); + +############################################################################## +# +# Collect information about test cases we are to run +# +############################################################################## + +sub collect_test_cases ($) { + my $suite= shift; # Test suite name + + my $testdir; + my $resdir; + + if ( $suite eq "main" ) + { + $testdir= "$::glob_mysql_test_dir/t"; + $resdir= "$::glob_mysql_test_dir/r"; + } + else + { + $testdir= "$::glob_mysql_test_dir/suite/$suite/t"; + $resdir= "$::glob_mysql_test_dir/suite/$suite/r"; + } + + my $cases = []; # Array of hash, will be array of C struct + + opendir(TESTDIR, $testdir) or mtr_error("Can't open dir \"$testdir\": $!"); + + if ( @::opt_cases ) + { + foreach my $tname ( @::opt_cases ) { # Run in specified order, no sort + my $elem= "$tname.test"; + if ( ! -f "$testdir/$elem") + { + mtr_error("Test case $tname ($testdir/$elem) is not found"); + } + collect_one_test_case($testdir,$resdir,$tname,$elem,$cases); + } + closedir TESTDIR; + } + else + { + foreach my $elem ( sort readdir(TESTDIR) ) { + my $tname= mtr_match_extension($elem,"test"); + next if ! defined $tname; + next if $::opt_do_test and ! defined mtr_match_prefix($elem,$::opt_do_test); + + collect_one_test_case($testdir,$resdir,$tname,$elem,$cases); + } + closedir TESTDIR; + } + + # To speed things up, we sort first in if the test require a restart + # or not, second in alphanumeric order. + +# @$cases = sort { +# if ( $a->{'master_restart'} and $b->{'master_restart'} or +# ! $a->{'master_restart'} and ! $b->{'master_restart'} ) +# { +# return $a->{'name'} cmp $b->{'name'}; +# } +# if ( $a->{'master_restart'} ) +# { +# return 1; # Is greater +# } +# else +# { +# return -1; # Is less +# } +# } @$cases; + + return $cases; +} + + +############################################################################## +# +# Collect information about a single test case +# +############################################################################## + + +sub collect_one_test_case($$$$$) { + my $testdir= shift; + my $resdir= shift; + my $tname= shift; + my $elem= shift; + my $cases= shift; + + my $path= "$testdir/$elem"; + + # ---------------------------------------------------------------------- + # Skip some tests silently + # ---------------------------------------------------------------------- + + if ( $::opt_start_from and $tname lt $::opt_start_from ) + { + return; + } + + # ---------------------------------------------------------------------- + # Skip some tests but include in list, just mark them to skip + # ---------------------------------------------------------------------- + + my $tinfo= {}; + $tinfo->{'name'}= $tname; + $tinfo->{'result_file'}= "$resdir/$tname.result"; + push(@$cases, $tinfo); + + if ( $::opt_skip_test and defined mtr_match_prefix($tname,$::opt_skip_test) ) + { + $tinfo->{'skip'}= 1; + return; + } + + # FIXME temporary solution, we have a hard coded list of test cases to + # skip if we are using the embedded server + + if ( $::glob_use_embedded_server and + mtr_match_any_exact($tname,\@::skip_if_embedded_server) ) + { + $tinfo->{'skip'}= 1; + return; + } + + # ---------------------------------------------------------------------- + # Collect information about test case + # ---------------------------------------------------------------------- + + $tinfo->{'path'}= $path; + $tinfo->{'timezone'}= "GMT-3"; # for UNIX_TIMESTAMP tests to work + + if ( defined mtr_match_prefix($tname,"rpl") ) + { + if ( $::opt_skip_rpl ) + { + $tinfo->{'skip'}= 1; + return; + } + + $tinfo->{'slave_num'}= 1; # Default, use one slave + + # FIXME currently we always restart slaves + $tinfo->{'slave_restart'}= 1; + + if ( $tname eq 'rpl_failsafe' or $tname eq 'rpl_chain_temp_table' ) + { +# $tinfo->{'slave_num'}= 3; # Not 3 ? Check old code, strange + } + } + + # FIXME what about embedded_server + ndbcluster, skip ?! + + my $master_opt_file= "$testdir/$tname-master.opt"; + my $slave_opt_file= "$testdir/$tname-slave.opt"; + my $slave_mi_file= "$testdir/$tname.slave-mi"; + my $master_sh= "$testdir/$tname-master.sh"; + my $slave_sh= "$testdir/$tname-slave.sh"; + my $disabled= "$testdir/$tname.disabled"; + + $tinfo->{'master_opt'}= []; + $tinfo->{'slave_opt'}= []; + $tinfo->{'slave_mi'}= []; + + if ( -f $master_opt_file ) + { + $tinfo->{'master_restart'}= 1; # We think so for now + # This is a dirty hack from old mysql-test-run, we use the opt file + # to flag other things as well, it is not a opt list at all + my $extra_master_opt= mtr_get_opts_from_file($master_opt_file); + + foreach my $opt (@$extra_master_opt) + { + my $value; + + $value= mtr_match_prefix($opt, "--timezone="); + + if ( defined $value ) + { + $tinfo->{'timezone'}= $value; + $extra_master_opt= []; + $tinfo->{'master_restart'}= 0; + last; + } + + $value= mtr_match_prefix($opt, "--result-file="); + + if ( defined $value ) + { + $tinfo->{'result_file'}= "r/$value.result"; + if ( $::opt_result_ext and $::opt_record or + -f "$tinfo->{'result_file'}$::opt_result_ext") + { + $tinfo->{'result_file'}.= $::opt_result_ext; + } + $extra_master_opt= []; + $tinfo->{'master_restart'}= 0; + last; + } + } + + $tinfo->{'master_opt'}= $extra_master_opt; + } + + if ( -f $slave_opt_file ) + { + $tinfo->{'slave_opt'}= mtr_get_opts_from_file($slave_opt_file); + $tinfo->{'slave_restart'}= 1; + } + + if ( -f $slave_mi_file ) + { + $tinfo->{'slave_mi'}= mtr_get_opts_from_file($slave_mi_file); + $tinfo->{'slave_restart'}= 1; + } + + if ( -f $master_sh ) + { + if ( $::glob_win32_perl ) + { + $tinfo->{'skip'}= 1; + } + else + { + $tinfo->{'master_sh'}= $master_sh; + $tinfo->{'master_restart'}= 1; + } + } + + if ( -f $slave_sh ) + { + if ( $::glob_win32_perl ) + { + $tinfo->{'skip'}= 1; + } + else + { + $tinfo->{'slave_sh'}= $slave_sh; + $tinfo->{'slave_restart'}= 1; + } + } + + if ( -f $disabled ) + { + $tinfo->{'skip'}= 1; + $tinfo->{'disable'}= 1; # Sub type of 'skip' + $tinfo->{'comment'}= mtr_fromfile($disabled); + } + + # We can't restart a running server that may be in use + + if ( $::glob_use_running_server and + ( $tinfo->{'master_restart'} or $tinfo->{'slave_restart'} ) ) + { + $tinfo->{'skip'}= 1; + } +} + + +1; From 3c925ee0f1b3387e6df952de8f86c618f11c1a8d Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 21:11:12 +0000 Subject: [PATCH 28/53] Bug#8057 Fix crash with LAST_INSERT_ID() in UPDATE, Tests included, mysql-test/r/update.result: Bug#8057 Test for bug mysql-test/t/update.test: Bug#8057 Test for bug sql/item_func.cc: Bug#8057 Don't create new Item in val_int() --- mysql-test/r/update.result | 7 +++++++ mysql-test/t/update.test | 9 +++++++++ sql/item_func.cc | 8 ++------ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/update.result b/mysql-test/r/update.result index beab6105f79..ac370db9ecc 100644 --- a/mysql-test/r/update.result +++ b/mysql-test/r/update.result @@ -212,3 +212,10 @@ insert into t1 values (1, "t1c2-1", 10), (2, "t1c2-2", 20); update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1"; update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1" where t1.c3 = 10; drop table t1, t2; +create table t1 (id int not null auto_increment primary key, id_str varchar(32)); +insert into t1 (id_str) values ("test"); +update t1 set id_str = concat(id_str, id) where id = last_insert_id(); +select * from t1; +id id_str +1 test1 +drop table t1; diff --git a/mysql-test/t/update.test b/mysql-test/t/update.test index 704263b1216..04192f25ac8 100644 --- a/mysql-test/t/update.test +++ b/mysql-test/t/update.test @@ -170,3 +170,12 @@ insert into t1 values (1, "t1c2-1", 10), (2, "t1c2-2", 20); update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1"; update t1 left join t2 on t1.c1 = t2.c1 set t2.c2 = "t2c2-1" where t1.c3 = 10; drop table t1, t2; + +# +# Bug #8057 +# +create table t1 (id int not null auto_increment primary key, id_str varchar(32)); +insert into t1 (id_str) values ("test"); +update t1 set id_str = concat(id_str, id) where id = last_insert_id(); +select * from t1; +drop table t1; diff --git a/sql/item_func.cc b/sql/item_func.cc index 7125f4704b8..03b5688efc2 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2284,14 +2284,10 @@ longlong Item_func_last_insert_id::val_int() longlong value=args[0]->val_int(); current_thd->insert_id(value); null_value=args[0]->null_value; - return value; } else - { - Item *it= get_system_var(current_thd, OPT_SESSION, "last_insert_id", 14, - "last_insert_id()"); - return it->val_int(); - } + current_thd->lex->uncacheable(UNCACHEABLE_SIDEEFFECT); + return current_thd->insert_id(); } /* This function is just used to test speed of different functions */ From a3efbf47c80bf6896c6a9fceecd761b757f76971 Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 14:21:16 -0800 Subject: [PATCH 29/53] Copy *.result.es files for binary distribution so embedded tests can be run scripts/make_binary_distribution.sh: Make sure to copy result.es files --- scripts/make_binary_distribution.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/make_binary_distribution.sh b/scripts/make_binary_distribution.sh index 33d4794e4f7..910aa38c33f 100644 --- a/scripts/make_binary_distribution.sh +++ b/scripts/make_binary_distribution.sh @@ -220,7 +220,7 @@ $CP mysql-test/include/*.inc $BASE/mysql-test/include $CP mysql-test/std_data/*.dat mysql-test/std_data/*.*001 $BASE/mysql-test/std_data $CP mysql-test/std_data/des_key_file $BASE/mysql-test/std_data $CP mysql-test/t/*test mysql-test/t/*.opt mysql-test/t/*.slave-mi mysql-test/t/*.sh $BASE/mysql-test/t -$CP mysql-test/r/*result mysql-test/r/*.require $BASE/mysql-test/r +$CP mysql-test/r/*result mysql-test/r/*result.es mysql-test/r/*.require $BASE/mysql-test/r if [ $BASE_SYSTEM != "netware" ] ; then chmod a+x $BASE/bin/* From 9f7c9aa7d5eed311e3d40c8f7d1a55abb7d4566c Mon Sep 17 00:00:00 2001 From: unknown Date: Thu, 3 Feb 2005 23:56:13 +0100 Subject: [PATCH 30/53] ndb - sol9x86: cc -xO3: fix optimizer error. ndb/src/common/util/NdbSqlUtil.cpp: sol9x86: cc -xO3: fix optimizer error. Note: same expression remains in Field_newdate::val_int(). --- ndb/src/common/util/NdbSqlUtil.cpp | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/ndb/src/common/util/NdbSqlUtil.cpp b/ndb/src/common/util/NdbSqlUtil.cpp index 6b23da774af..53fa5d69215 100644 --- a/ndb/src/common/util/NdbSqlUtil.cpp +++ b/ndb/src/common/util/NdbSqlUtil.cpp @@ -526,6 +526,7 @@ NdbSqlUtil::cmpDate(const void* info, const Uint32* p1, const Uint32* p2, Uint32 union { const Uint32* p; const unsigned char* v; } u1, u2; u1.p = p1; u2.p = p2; +#ifdef ndb_date_sol9x86_cc_xO3_madness // from Field_newdate::val_int Uint64 j1 = uint3korr(u1.v); Uint64 j2 = uint3korr(u2.v); @@ -536,6 +537,33 @@ NdbSqlUtil::cmpDate(const void* info, const Uint32* p1, const Uint32* p2, Uint32 if (j1 > j2) return +1; return 0; +#else + uint j1 = uint3korr(u1.v); + uint j2 = uint3korr(u2.v); + uint d1 = (j1 & 31); + uint d2 = (j2 & 31); + j1 = (j1 >> 5); + j2 = (j2 >> 5); + uint m1 = (j1 & 15); + uint m2 = (j2 & 15); + j1 = (j1 >> 4); + j2 = (j2 >> 4); + uint y1 = j1; + uint y2 = j2; + if (y1 < y2) + return -1; + if (y1 > y2) + return +1; + if (m1 < m2) + return -1; + if (m1 > m2) + return +1; + if (d1 < d2) + return -1; + if (d1 > d2) + return +1; + return 0; +#endif #endif } From ebda548d0d26f49a05d424f186e0b1d92c90925e Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 09:14:22 +0300 Subject: [PATCH 31/53] Fix for BUG#7716: in in_string::set() take into account that the value returned by item->val_str() may be a substring of the passed string. Disallow string=its_substring assignment in String::operator=(). mysql-test/r/func_misc.result: Testcase for BUG#7716 mysql-test/t/func_misc.test: Testcase for BUG#7716 sql/item_cmpfunc.cc: Fix for BUG#7716: in in_string::set() take into account that the string returned by item->val_str(S) may be not S but use the buffer owned by S. sql/sql_string.h: * Added assert: String& String::operator=(const String&) may not be used to do assignments like str = string_that_uses_buffer_owned_by_str * Added String::uses_buffer_owned_by(). --- mysql-test/r/func_misc.result | 21 +++++++++++++++++++++ mysql-test/t/func_misc.test | 15 +++++++++++++++ sql/item_cmpfunc.cc | 4 ++++ sql/sql_string.h | 10 ++++++++++ 4 files changed, 50 insertions(+) diff --git a/mysql-test/r/func_misc.result b/mysql-test/r/func_misc.result index 5a9f0f68228..2d464c891bf 100644 --- a/mysql-test/r/func_misc.result +++ b/mysql-test/r/func_misc.result @@ -28,3 +28,24 @@ length(format('nan', 2)) > 0 select concat("$",format(2500,2)); concat("$",format(2500,2)) $2,500.00 +create table t1 ( a timestamp ); +insert into t1 values ( '2004-01-06 12:34' ); +select a from t1 where left(a+0,6) in ( left(20040106,6) ); +a +2004-01-06 12:34:00 +select a from t1 where left(a+0,6) = ( left(20040106,6) ); +a +2004-01-06 12:34:00 +select a from t1 where right(a+0,6) in ( right(20040106123400,6) ); +a +2004-01-06 12:34:00 +select a from t1 where right(a+0,6) = ( right(20040106123400,6) ); +a +2004-01-06 12:34:00 +select a from t1 where mid(a+0,6,3) in ( mid(20040106123400,6,3) ); +a +2004-01-06 12:34:00 +select a from t1 where mid(a+0,6,3) = ( mid(20040106123400,6,3) ); +a +2004-01-06 12:34:00 +drop table t1; diff --git a/mysql-test/t/func_misc.test b/mysql-test/t/func_misc.test index e73f2a1b26c..89aba7ee583 100644 --- a/mysql-test/t/func_misc.test +++ b/mysql-test/t/func_misc.test @@ -23,3 +23,18 @@ select length(format('nan', 2)) > 0; # Test for bug #628 # select concat("$",format(2500,2)); + +# Test for BUG#7716 +create table t1 ( a timestamp ); +insert into t1 values ( '2004-01-06 12:34' ); +select a from t1 where left(a+0,6) in ( left(20040106,6) ); +select a from t1 where left(a+0,6) = ( left(20040106,6) ); + +select a from t1 where right(a+0,6) in ( right(20040106123400,6) ); +select a from t1 where right(a+0,6) = ( right(20040106123400,6) ); + +select a from t1 where mid(a+0,6,3) in ( mid(20040106123400,6,3) ); +select a from t1 where mid(a+0,6,3) = ( mid(20040106123400,6,3) ); + +drop table t1; + diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index c5e6d520ab7..46ef3281dd1 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1503,7 +1503,11 @@ void in_string::set(uint pos,Item *item) String *str=((String*) base)+pos; String *res=item->val_str(str); if (res && res != str) + { + if (res->uses_buffer_owned_by(str)) + res->copy(); *str= *res; + } if (!str->charset()) { CHARSET_INFO *cs; diff --git a/sql/sql_string.h b/sql/sql_string.h index a8fb9574c0b..9136dddbbf2 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -182,6 +182,11 @@ public: { if (&s != this) { + /* + It is forbidden to do assignments like + some_string = substring_of_that_string + */ + DBUG_ASSERT(!s.uses_buffer_owned_by(this)); free(); Ptr=s.Ptr ; str_length=s.str_length ; Alloced_length=s.Alloced_length; alloced=0; @@ -313,4 +318,9 @@ public: /* Swap two string objects. Efficient way to exchange data without memcpy. */ void swap(String &s); + + inline bool uses_buffer_owned_by(const String *s) const + { + return (s->alloced && Ptr >= s->Ptr && Ptr < s->Ptr + s->str_length); + } }; From 4c69539827f69a693236eca0a2f512b1618e80a1 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:35:08 +0400 Subject: [PATCH 32/53] type_float.result.es updated. mysql-test/r/type_float.result.es: Updated. BitKeeper/etc/logging_ok: Logging to logging@openlogging.org accepted --- BitKeeper/etc/logging_ok | 1 + mysql-test/r/type_float.result.es | 9 +++++++++ 2 files changed, 10 insertions(+) diff --git a/BitKeeper/etc/logging_ok b/BitKeeper/etc/logging_ok index 6ccc886e161..bf88e38a780 100644 --- a/BitKeeper/etc/logging_ok +++ b/BitKeeper/etc/logging_ok @@ -180,6 +180,7 @@ ram@gw.mysql.r18.ru ram@gw.udmsearch.izhnet.ru ram@mysql.r18.ru ram@ram.(none) +ramil@mysql.com ranger@regul.home.lan rburnett@build.mysql.com reggie@bob.(none) diff --git a/mysql-test/r/type_float.result.es b/mysql-test/r/type_float.result.es index b93539b6bea..5fcf9213f83 100644 --- a/mysql-test/r/type_float.result.es +++ b/mysql-test/r/type_float.result.es @@ -143,6 +143,15 @@ drop table t1; create table t1 (f float(54)); ERROR 42000: Incorrect column specifier for column 'f' drop table if exists t1; +create table t1 (d1 double, d2 double unsigned); +insert into t1 set d1 = -1.0; +update t1 set d2 = d1; +Warnings: +Warning 1264 Data truncated; out of range for column 'd2' at row 1 +select * from t1; +d1 d2 +-1 0 +drop table t1; create table t1 (f float(4,3)); insert into t1 values (-11.0),(-11),("-11"),(11.0),(11),("11"); Warnings: From 66eb71f3fc13fefaa6d72532b521a28c09138aa3 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:31:36 +0300 Subject: [PATCH 33/53] A fix: bug#6931: Date Type column problem when using UNION-Table bug#7833: Wrong datatype of aggregate column is returned mysql-test/r/func_group.result: Test case for bug 7833: Wrong datatype of aggregate column is returned mysql-test/r/union.result: Test case for bug 6931: Date Type column problem when using UNION-Table. mysql-test/t/func_group.test: Test case for bug 7833: Wrong datatype of aggregate column is returned mysql-test/t/union.test: Test case for bug 6931: Date Type column problem when using UNION-Table. --- mysql-test/r/func_group.result | 12 +++++++ mysql-test/r/union.result | 36 +++++++++++++++++++ mysql-test/t/func_group.test | 14 ++++++++ mysql-test/t/union.test | 35 ++++++++++++++++++ sql/field.cc | 35 ++++++++++++++++++ sql/field.h | 1 + sql/item.cc | 65 +++++++++++++++++++++++++++------- sql/item.h | 4 +-- sql/sql_union.cc | 14 ++++++-- 9 files changed, 200 insertions(+), 16 deletions(-) diff --git a/mysql-test/r/func_group.result b/mysql-test/r/func_group.result index 4bb79a1cb41..fa645700875 100644 --- a/mysql-test/r/func_group.result +++ b/mysql-test/r/func_group.result @@ -733,3 +733,15 @@ one 2 two 2 three 1 drop table t1; +create table t1(f1 datetime); +insert into t1 values (now()); +create table t2 select f2 from (select max(now()) f2 from t1) a; +show columns from t2; +Field Type Null Key Default Extra +f2 datetime 0000-00-00 00:00:00 +drop table t2; +create table t2 select f2 from (select now() f2 from t1) a; +show columns from t2; +Field Type Null Key Default Extra +f2 datetime 0000-00-00 00:00:00 +drop table t2, t1; diff --git a/mysql-test/r/union.result b/mysql-test/r/union.result index f07bdad9021..115ef6a47f9 100644 --- a/mysql-test/r/union.result +++ b/mysql-test/r/union.result @@ -1137,3 +1137,39 @@ t1 CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=latin1 drop table t1; drop table t2; +create table t1(a1 int, f1 char(10)); +create table t2 +select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a +union +select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a +order by f2, a1; +show columns from t2; +Field Type Null Key Default Extra +f2 date YES NULL +a1 int(11) YES NULL +drop table t1, t2; +create table t1 (f1 int); +create table t2 (f1 int, f2 int ,f3 date); +create table t3 (f1 int, f2 char(10)); +create table t4 +( +select t2.f3 as sdate +from t1 +left outer join t2 on (t1.f1 = t2.f1) +inner join t3 on (t2.f2 = t3.f1) +order by t1.f1, t3.f1, t2.f3 +) +union +( +select cast('2004-12-31' as date) as sdate +from t1 +left outer join t2 on (t1.f1 = t2.f1) +inner join t3 on (t2.f2 = t3.f1) +group by t1.f1 +order by t1.f1, t3.f1, t2.f3 +) +order by sdate; +show columns from t4; +Field Type Null Key Default Extra +sdate date YES NULL +drop table t1, t2, t3, t4; diff --git a/mysql-test/t/func_group.test b/mysql-test/t/func_group.test index 79d6112e6de..465611a5ebb 100644 --- a/mysql-test/t/func_group.test +++ b/mysql-test/t/func_group.test @@ -473,3 +473,17 @@ INSERT INTO t1 VALUES select val, count(*) from t1 group by val; drop table t1; + + +# +# Bug 7833: Wrong datatype of aggregate column is returned +# + +create table t1(f1 datetime); +insert into t1 values (now()); +create table t2 select f2 from (select max(now()) f2 from t1) a; +show columns from t2; +drop table t2; +create table t2 select f2 from (select now() f2 from t1) a; +show columns from t2; +drop table t2, t1; diff --git a/mysql-test/t/union.test b/mysql-test/t/union.test index 8682808f3f3..90b2197603b 100644 --- a/mysql-test/t/union.test +++ b/mysql-test/t/union.test @@ -664,3 +664,38 @@ show create table t1; drop table t1; drop table t2; +# +# Bug 6931: Date Type column problem when using UNION-Table. +# +create table t1(a1 int, f1 char(10)); +create table t2 +select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a +union +select f2,a1 from (select a1, CAST('2004-12-31' AS DATE) f2 from t1) a +order by f2, a1; +show columns from t2; +drop table t1, t2; + +create table t1 (f1 int); +create table t2 (f1 int, f2 int ,f3 date); +create table t3 (f1 int, f2 char(10)); +create table t4 +( + select t2.f3 as sdate + from t1 + left outer join t2 on (t1.f1 = t2.f1) + inner join t3 on (t2.f2 = t3.f1) + order by t1.f1, t3.f1, t2.f3 +) +union +( + select cast('2004-12-31' as date) as sdate + from t1 + left outer join t2 on (t1.f1 = t2.f1) + inner join t3 on (t2.f2 = t3.f1) + group by t1.f1 + order by t1.f1, t3.f1, t2.f3 +) +order by sdate; +show columns from t4; +drop table t1, t2, t3, t4; diff --git a/sql/field.cc b/sql/field.cc index 7357bc06f11..9965cb792be 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -245,6 +245,7 @@ static Field::field_cast_enum field_cast_date[]= Field::FIELD_CAST_BLOB, Field::FIELD_CAST_STOP}; static Field::field_cast_enum field_cast_newdate[]= {Field::FIELD_CAST_NEWDATE, + Field::FIELD_CAST_DATE, Field::FIELD_CAST_DATETIME, Field::FIELD_CAST_STRING, Field::FIELD_CAST_VARSTRING, Field::FIELD_CAST_BLOB, Field::FIELD_CAST_STOP}; @@ -6024,6 +6025,40 @@ Field *make_field(char *ptr, uint32 field_length, } +/* + Check if field_type is appropriate field type + to create field for tmp table using + item->tmp_table_field() method + + SYNOPSIS + field_types_to_be_kept() + field_type - field type + + NOTE + it is used in function get_holder_example_field() + from item.cc + + RETURN + 1 - can use item->tmp_table_field() method + 0 - can not use item->tmp_table_field() method + +*/ + +bool field_types_to_be_kept(enum_field_types field_type) +{ + switch (field_type) + { + case FIELD_TYPE_DATE: + case FIELD_TYPE_NEWDATE: + case FIELD_TYPE_TIME: + case FIELD_TYPE_DATETIME: + return 1; + default: + return 0; + } +} + + /* Create a field suitable for create of table */ create_field::create_field(Field *old_field,Field *orig_field) diff --git a/sql/field.h b/sql/field.h index 27a01a69273..fd0f2f9c2f1 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1265,6 +1265,7 @@ int set_field_to_null(Field *field); int set_field_to_null_with_conversions(Field *field, bool no_conversions); bool test_if_int(const char *str, int length, const char *int_end, CHARSET_INFO *cs); +bool field_types_to_be_kept(enum_field_types field_type); /* The following are for the interface with the .frm file diff --git a/sql/item.cc b/sql/item.cc index ab29c147dfb..d61d628e8fa 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -2639,7 +2639,53 @@ void Item_cache_row::bring_value() } -Item_type_holder::Item_type_holder(THD *thd, Item *item) +/* + Returns field for temporary table dependind on item type + + SYNOPSIS + get_holder_example_field() + thd - thread handler + item - pointer to item + table - empty table object + + NOTE + It is possible to return field for Item_func + items only if field type of this item is + date or time or datetime type. + also see function field_types_to_be_kept() from + field.cc + + RETURN + # - field + 0 - no field +*/ + +Field *get_holder_example_field(THD *thd, Item *item, TABLE *table) +{ + DBUG_ASSERT(table); + + Item_func *tmp_item= 0; + if (item->type() == Item::FIELD_ITEM) + return (((Item_field*) item)->field); + if (item->type() == Item::FUNC_ITEM) + tmp_item= (Item_func *) item; + else if (item->type() == Item::SUM_FUNC_ITEM) + { + Item_sum *item_sum= (Item_sum *) item; + if (item_sum->keep_field_type()) + { + if (item_sum->args[0]->type() == Item::FIELD_ITEM) + return (((Item_field*) item_sum->args[0])->field); + if (item_sum->args[0]->type() == Item::FUNC_ITEM) + tmp_item= (Item_func *) item_sum->args[0]; + } + } + return (tmp_item && field_types_to_be_kept(tmp_item->field_type()) ? + tmp_item->tmp_table_field(table) : 0); +} + + +Item_type_holder::Item_type_holder(THD *thd, Item *item, TABLE *table) :Item(thd, item), item_type(item->result_type()), orig_type(item_type) { @@ -2649,10 +2695,7 @@ Item_type_holder::Item_type_holder(THD *thd, Item *item) It is safe assign pointer on field, because it will be used just after all JOIN::prepare calls and before any SELECT execution */ - if (item->type() == Item::FIELD_ITEM) - field_example= ((Item_field*) item)->field; - else - field_example= 0; + field_example= get_holder_example_field(thd, item, table); max_length= real_length(item); maybe_null= item->maybe_null; collation.set(item->collation); @@ -2692,25 +2735,23 @@ inline bool is_attr_compatible(Item *from, Item *to) (to->maybe_null || !from->maybe_null) && (to->result_type() != STRING_RESULT || from->result_type() != STRING_RESULT || - my_charset_same(from->collation.collation, - to->collation.collation))); + (from->collation.collation == to->collation.collation))); } -bool Item_type_holder::join_types(THD *thd, Item *item) +bool Item_type_holder::join_types(THD *thd, Item *item, TABLE *table) { uint32 new_length= real_length(item); bool use_new_field= 0, use_expression_type= 0; Item_result new_result_type= type_convertor[item_type][item->result_type()]; - bool item_is_a_field= item->type() == Item::FIELD_ITEM; - + Field *field= get_holder_example_field(thd, item, table); + bool item_is_a_field= field; /* Check if both items point to fields: in this case we can adjust column types of result table in the union smartly. */ if (field_example && item_is_a_field) { - Field *field= ((Item_field *)item)->field; /* Can 'field_example' field store data of the column? */ if ((use_new_field= (!field->field_cast_compatible(field_example->field_cast_type()) || @@ -2751,7 +2792,7 @@ bool Item_type_holder::join_types(THD *thd, Item *item) It is safe to assign a pointer to field here, because it will be used before any table is closed. */ - field_example= ((Item_field*) item)->field; + field_example= field; } old_cs= collation.collation->name; diff --git a/sql/item.h b/sql/item.h index 237a8f7efac..e0de7452eec 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1321,14 +1321,14 @@ protected: Item_result orig_type; Field *field_example; public: - Item_type_holder(THD*, Item*); + Item_type_holder(THD*, Item*, TABLE *); Item_result result_type () const { return item_type; } enum Type type() const { return TYPE_HOLDER; } double val(); longlong val_int(); String *val_str(String*); - bool join_types(THD *thd, Item *); + bool join_types(THD *thd, Item *, TABLE *); Field *example() { return field_example; } static uint32 real_length(Item *item); void cleanup() diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 027a21db7ac..882316d57d7 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -148,6 +148,7 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, SELECT_LEX *sl, *first_select; select_result *tmp_result; bool is_union; + TABLE *empty_table= 0; DBUG_ENTER("st_select_lex_unit::prepare"); describe= test(additional_options & SELECT_DESCRIBE); @@ -239,13 +240,21 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, goto err; if (sl == first_select) { + /* + We need to create an empty table object. It is used + to create tmp_table fields in Item_type_holder. + The main reason of this is that we can't create + field object without table. + */ + DBUG_ASSERT(!empty_table); + empty_table= (TABLE*) thd->calloc(sizeof(TABLE)); types.empty(); List_iterator_fast it(sl->item_list); Item *item_tmp; while ((item_tmp= it++)) { /* Error's in 'new' will be detected after loop */ - types.push_back(new Item_type_holder(thd_arg, item_tmp)); + types.push_back(new Item_type_holder(thd_arg, item_tmp, empty_table)); } if (thd_arg->is_fatal_error) @@ -264,7 +273,8 @@ int st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, Item *type, *item_tmp; while ((type= tp++, item_tmp= it++)) { - if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp)) + if (((Item_type_holder*)type)->join_types(thd_arg, item_tmp, + empty_table)) DBUG_RETURN(-1); } } From 48e2d224047ddb5a70dcca3abd7f4f828ee0b5bd Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 14:25:08 +0100 Subject: [PATCH 34/53] added test to trigger drifferent fragmentations in ndb corrected documentation on fragmentation set "fragmentation medium" to mean 2 fragments per node instead of 1 set default fragmentation to small instead of medium bug#8284 adjust fragmentation to max_rows mysql-test/r/ndb_basic.result: added test to trigger drifferent fragmentations in ndb mysql-test/t/ndb_basic.test: added test to trigger drifferent fragmentations in ndb ndb/include/ndbapi/NdbDictionary.hpp: corrected documentation on fragmentation ndb/src/kernel/blocks/dbdih/DbdihMain.cpp: set "fragmentation medium" to mean 2 fragments per node instead of 1 ndb/src/ndbapi/NdbDictionaryImpl.cpp: set default fragmentation to small instead of medium sql/ha_ndbcluster.cc: bug#8284 adjust fragmentation to max_rows --- mysql-test/r/ndb_basic.result | 34 +++++++++++++++++ mysql-test/t/ndb_basic.test | 38 +++++++++++++++++++ ndb/include/ndbapi/NdbDictionary.hpp | 6 +-- ndb/src/kernel/blocks/dbdih/DbdihMain.cpp | 2 +- ndb/src/ndbapi/NdbDictionaryImpl.cpp | 2 +- sql/ha_ndbcluster.cc | 45 ++++++++++++++++++++++- 6 files changed, 121 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/ndb_basic.result b/mysql-test/r/ndb_basic.result index 6ec5338acbe..a6396080ef0 100644 --- a/mysql-test/r/ndb_basic.result +++ b/mysql-test/r/ndb_basic.result @@ -573,3 +573,37 @@ select * from t1 where a12345678901234567890123456789a1234567890=2; a1234567890123456789012345678901234567890 a12345678901234567890123456789a1234567890 5 2 drop table t1; +create table t1 +(a bigint, b bigint, c bigint, d bigint, +primary key (a,b,c,d)) +engine=ndb +max_rows=200000000; +Warnings: +Warning 1105 Ndb might have problems storing the max amount of rows specified +insert into t1 values +(1,2,3,4),(2,3,4,5),(3,4,5,6), +(3,2,3,4),(1,3,4,5),(2,4,5,6), +(1,2,3,5),(2,3,4,8),(3,4,5,9), +(3,2,3,5),(1,3,4,8),(2,4,5,9), +(1,2,3,6),(2,3,4,6),(3,4,5,7), +(3,2,3,6),(1,3,4,6),(2,4,5,7), +(1,2,3,7),(2,3,4,7),(3,4,5,8), +(3,2,3,7),(1,3,4,7),(2,4,5,8), +(1,3,3,4),(2,4,4,5),(3,5,5,6), +(3,3,3,4),(1,4,4,5),(2,5,5,6), +(1,3,3,5),(2,4,4,8),(3,5,5,9), +(3,3,3,5),(1,4,4,8),(2,5,5,9), +(1,3,3,6),(2,4,4,6),(3,5,5,7), +(3,3,3,6),(1,4,4,6),(2,5,5,7), +(1,3,3,7),(2,4,4,7),(3,5,5,8), +(3,3,3,7),(1,4,4,7),(2,5,5,8); +select count(*) from t1; +count(*) +48 +drop table t1; +create table t1 +(a bigint, b bigint, c bigint, d bigint, +primary key (a)) +engine=ndb +max_rows=1; +drop table t1; diff --git a/mysql-test/t/ndb_basic.test b/mysql-test/t/ndb_basic.test index 2671223ada8..f460c573a9d 100644 --- a/mysql-test/t/ndb_basic.test +++ b/mysql-test/t/ndb_basic.test @@ -539,3 +539,41 @@ insert into t1 values (1,1),(2,1),(3,1),(4,1),(5,2),(6,1),(7,1); explain select * from t1 where a12345678901234567890123456789a1234567890=2; select * from t1 where a12345678901234567890123456789a1234567890=2; drop table t1; + +# +# test fragment creation +# +# first a table with _many_ fragments per node group +# then a table with just one fragment per node group +# +create table t1 + (a bigint, b bigint, c bigint, d bigint, + primary key (a,b,c,d)) + engine=ndb + max_rows=200000000; +insert into t1 values + (1,2,3,4),(2,3,4,5),(3,4,5,6), + (3,2,3,4),(1,3,4,5),(2,4,5,6), + (1,2,3,5),(2,3,4,8),(3,4,5,9), + (3,2,3,5),(1,3,4,8),(2,4,5,9), + (1,2,3,6),(2,3,4,6),(3,4,5,7), + (3,2,3,6),(1,3,4,6),(2,4,5,7), + (1,2,3,7),(2,3,4,7),(3,4,5,8), + (3,2,3,7),(1,3,4,7),(2,4,5,8), + (1,3,3,4),(2,4,4,5),(3,5,5,6), + (3,3,3,4),(1,4,4,5),(2,5,5,6), + (1,3,3,5),(2,4,4,8),(3,5,5,9), + (3,3,3,5),(1,4,4,8),(2,5,5,9), + (1,3,3,6),(2,4,4,6),(3,5,5,7), + (3,3,3,6),(1,4,4,6),(2,5,5,7), + (1,3,3,7),(2,4,4,7),(3,5,5,8), + (3,3,3,7),(1,4,4,7),(2,5,5,8); +select count(*) from t1; +drop table t1; + +create table t1 + (a bigint, b bigint, c bigint, d bigint, + primary key (a)) + engine=ndb + max_rows=1; +drop table t1; diff --git a/ndb/include/ndbapi/NdbDictionary.hpp b/ndb/include/ndbapi/NdbDictionary.hpp index 0dca1c0f106..49afbd695c9 100644 --- a/ndb/include/ndbapi/NdbDictionary.hpp +++ b/ndb/include/ndbapi/NdbDictionary.hpp @@ -141,9 +141,9 @@ public: enum FragmentType { FragUndefined = 0, ///< Fragmentation type undefined or default FragSingle = 1, ///< Only one fragment - FragAllSmall = 2, ///< One fragment per node group - FragAllMedium = 3, ///< Default value. Two fragments per node group. - FragAllLarge = 4 ///< Eight fragments per node group. + FragAllSmall = 2, ///< One fragment per node, default + FragAllMedium = 3, ///< two fragments per node + FragAllLarge = 4 ///< Four fragments per node. }; }; diff --git a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index dba1efbba9a..0bc8351a9db 100644 --- a/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -6178,7 +6178,7 @@ void Dbdih::execCREATE_FRAGMENTATION_REQ(Signal * signal){ break; case DictTabInfo::AllNodesMediumTable: jam(); - noOfFragments = csystemnodes; + noOfFragments = 2 * csystemnodes; break; case DictTabInfo::AllNodesLargeTable: jam(); diff --git a/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 9f6ed144fb0..530f15d3a2e 100644 --- a/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -284,7 +284,7 @@ void NdbTableImpl::init(){ clearNewProperties(); m_frm.clear(); - m_fragmentType = NdbDictionary::Object::FragAllMedium; + m_fragmentType = NdbDictionary::Object::FragAllSmall; m_logging = true; m_kvalue = 6; m_minLoadFactor = 78; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a959cbaf434..9f0da616289 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3503,6 +3503,47 @@ static int create_ndb_column(NDBCOL &col, Create a table in NDB Cluster */ +static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) +{ + if (form->max_rows == 0) /* default setting, don't set fragmentation */ + return; + /** + * get the number of fragments right + */ + uint no_fragments; + { +#if MYSQL_VERSION_ID >= 50000 + uint acc_row_size= 25+2; +#else + uint acc_row_size= pk_length*4; + /* add acc overhead */ + if (pk_length <= 8) + acc_row_size+= 25+2; /* main page will set the limit */ + else + acc_row_size+= 4+4; /* overflow page will set the limit */ +#endif + ulonglong acc_fragment_size= 512*1024*1024; + ulonglong max_rows= form->max_rows; + no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; + } + { + uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); + NDBTAB::FragmentType ftype; + if (no_fragments > 2*no_nodes) + { + ftype= NDBTAB::FragAllLarge; + if (no_fragments > 4*no_nodes) + push_warning(current_thd, MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "Ndb might have problems storing the max amount of rows specified"); + } + else if (no_fragments > no_nodes) + ftype= NDBTAB::FragAllMedium; + else + ftype= NDBTAB::FragAllSmall; + tab.setFragmentType(ftype); + } +} + int ha_ndbcluster::create(const char *name, TABLE *form, HA_CREATE_INFO *info) @@ -3605,7 +3646,9 @@ int ha_ndbcluster::create(const char *name, break; } } - + + ndb_set_fragmentation(tab, form, pk_length); + if ((my_errno= check_ndb_connection())) DBUG_RETURN(my_errno); From 8c750c466b0109723233e93aea85a39673409b40 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 15:24:06 +0100 Subject: [PATCH 35/53] indexless boolean fulltext search was depending on default_charset_info - Bug#8159 ftbw->off wasn't cleared on reinit - Bug#8234 include/ft_global.h: get rid of default_charset_info in indexless fulltext searches myisam/ft_boolean_search.c: get rid of default_charset_info in indexless fulltext searches clear ftbw->off on reinits myisam/ft_static.c: get rid of default_charset_info in indexless fulltext searches myisam/ftdefs.h: get rid of default_charset_info in indexless fulltext searches sql/ha_myisam.h: get rid of default_charset_info in indexless fulltext searches sql/handler.h: get rid of default_charset_info in indexless fulltext searches sql/item_func.cc: get rid of default_charset_info in indexless fulltext searches --- include/ft_global.h | 2 +- myisam/ft_boolean_search.c | 7 ++++--- myisam/ft_static.c | 5 +++-- myisam/ftdefs.h | 2 +- sql/ha_myisam.h | 8 ++++++-- sql/handler.h | 3 +-- sql/item_func.cc | 8 +++----- 7 files changed, 19 insertions(+), 16 deletions(-) diff --git a/include/ft_global.h b/include/ft_global.h index 94f6ad9ef51..c3f60d13a7a 100644 --- a/include/ft_global.h +++ b/include/ft_global.h @@ -62,7 +62,7 @@ void ft_free_stopwords(void); #define FT_SORTED 2 #define FT_EXPAND 4 /* query expansion */ -FT_INFO *ft_init_search(uint,void *, uint, byte *, uint, byte *); +FT_INFO *ft_init_search(uint,void *, uint, byte *, uint,CHARSET_INFO *, byte *); my_bool ft_boolean_check_syntax_string(const byte *); #ifdef __cplusplus diff --git a/myisam/ft_boolean_search.c b/myisam/ft_boolean_search.c index aab3854dd34..4253b5ff96f 100644 --- a/myisam/ft_boolean_search.c +++ b/myisam/ft_boolean_search.c @@ -365,6 +365,7 @@ static void _ftb_init_index_search(FT_INFO *ftb) reset_tree(& ftb->no_dupes); } + ftbw->off=0; /* in case of reinit */ if (_ft2_search(ftb, ftbw, 1)) return; } @@ -373,7 +374,7 @@ static void _ftb_init_index_search(FT_INFO *ftb) FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query, - uint query_len) + uint query_len, CHARSET_INFO *cs) { FTB *ftb; FTB_EXPR *ftbe; @@ -385,8 +386,8 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, byte *query, ftb->state=UNINITIALIZED; ftb->info=info; ftb->keynr=keynr; - ftb->charset= ((keynr==NO_SUCH_KEY) ? - default_charset_info : info->s->keyinfo[keynr].seg->charset); + ftb->charset=cs; + DBUG_ASSERT(keynr==NO_SUCH_KEY || cs == info->s->keyinfo[keynr].seg->charset); ftb->with_scan=0; ftb->lastpos=HA_OFFSET_ERROR; bzero(& ftb->no_dupes, sizeof(TREE)); diff --git a/myisam/ft_static.c b/myisam/ft_static.c index 7168406d027..994a94d0c49 100644 --- a/myisam/ft_static.c +++ b/myisam/ft_static.c @@ -55,11 +55,12 @@ const struct _ft_vft _ft_vft_boolean = { FT_INFO *ft_init_search(uint flags, void *info, uint keynr, - byte *query, uint query_len, byte *record) + byte *query, uint query_len, CHARSET_INFO *cs, + byte *record) { FT_INFO *res; if (flags & FT_BOOL) - res= ft_init_boolean_search((MI_INFO *)info, keynr, query, query_len); + res= ft_init_boolean_search((MI_INFO *)info, keynr, query, query_len,cs); else res= ft_init_nlq_search((MI_INFO *)info, keynr, query, query_len, flags, record); diff --git a/myisam/ftdefs.h b/myisam/ftdefs.h index e7a0829e140..ddb9fbfead2 100644 --- a/myisam/ftdefs.h +++ b/myisam/ftdefs.h @@ -131,7 +131,7 @@ FT_WORD * _mi_ft_parserecord(MI_INFO *, uint, const byte *); uint _mi_ft_parse(TREE *, MI_INFO *, uint, const byte *, my_bool); FT_INFO *ft_init_nlq_search(MI_INFO *, uint, byte *, uint, uint, byte *); -FT_INFO *ft_init_boolean_search(MI_INFO *, uint, byte *, uint); +FT_INFO *ft_init_boolean_search(MI_INFO *, uint, byte *, uint, CHARSET_INFO *); extern const struct _ft_vft _ft_vft_nlq; int ft_nlq_read_next(FT_INFO *, char *); diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h index 972d6b18e19..1e6cf2f4ada 100644 --- a/sql/ha_myisam.h +++ b/sql/ha_myisam.h @@ -88,8 +88,12 @@ class ha_myisam: public handler ft_handler->please->reinit_search(ft_handler); return 0; } - FT_INFO *ft_init_ext(uint flags, uint inx,const byte *key, uint keylen) - { return ft_init_search(flags,file,inx,(byte*) key,keylen, table->record[0]); } + FT_INFO *ft_init_ext(uint flags, uint inx,String *key) + { + return ft_init_search(flags,file,inx, + (byte *)key->ptr(), key->length(), key->charset(), + table->record[0]); + } int ft_read(byte *buf); int rnd_init(bool scan); int rnd_next(byte *buf); diff --git a/sql/handler.h b/sql/handler.h index 245defe61e0..0426312f404 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -373,8 +373,7 @@ public: int compare_key(key_range *range); virtual int ft_init() { return HA_ERR_WRONG_COMMAND; } void ft_end() { ft_handler=NULL; } - virtual FT_INFO *ft_init_ext(uint flags,uint inx,const byte *key, - uint keylen) + virtual FT_INFO *ft_init_ext(uint flags, uint inx,String *key) { return NULL; } virtual int ft_read(byte *buf) { return HA_ERR_WRONG_COMMAND; } virtual int rnd_next(byte *buf)=0; diff --git a/sql/item_func.cc b/sql/item_func.cc index bff49541252..85cd1c693b7 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -3047,9 +3047,7 @@ void Item_func_match::init_search(bool no_order) if (join_key && !no_order) flags|=FT_SORTED; - ft_handler=table->file->ft_init_ext(flags, key, - (byte*) ft_tmp->ptr(), - ft_tmp->length()); + ft_handler=table->file->ft_init_ext(flags, key, ft_tmp); if (join_key) table->file->ft_handler=ft_handler; @@ -3091,12 +3089,12 @@ bool Item_func_match::fix_fields(THD *thd, TABLE_LIST *tlist, Item **ref) } /* Check that all columns come from the same table. - We've already checked that columns in MATCH are fields so + We've already checked that columns in MATCH are fields so PARAM_TABLE_BIT can only appear from AGAINST argument. */ if ((used_tables_cache & ~PARAM_TABLE_BIT) != item->used_tables()) key=NO_SUCH_KEY; - + if (key == NO_SUCH_KEY && !(flags & FT_BOOL)) { my_error(ER_WRONG_ARGUMENTS,MYF(0),"MATCH"); From 8ed40c4b09bb4c56f17a1432fc9024a5dfc4e04a Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 19:12:15 +0400 Subject: [PATCH 36/53] Embedded version of test fixed mysql-test/r/insert_select.result.es: Test.es fixed --- mysql-test/r/insert_select.result.es | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mysql-test/r/insert_select.result.es b/mysql-test/r/insert_select.result.es index 9e11402733d..9cac6d31b8f 100644 --- a/mysql-test/r/insert_select.result.es +++ b/mysql-test/r/insert_select.result.es @@ -633,3 +633,15 @@ No Field Count 0 1 100 0 2 100 drop table t1, t2; +CREATE TABLE t1 ( +ID int(11) NOT NULL auto_increment, +NO int(11) NOT NULL default '0', +SEQ int(11) NOT NULL default '0', +PRIMARY KEY (ID), +KEY t1$NO (SEQ,NO) +) ENGINE=MyISAM; +INSERT INTO t1 (SEQ, NO) SELECT "1" AS SEQ, IF(MAX(NO) IS NULL, 0, MAX(NO)) + 1 AS NO FROM t1 WHERE (SEQ = 1); +select SQL_BUFFER_RESULT * from t1 WHERE (SEQ = 1); +ID NO SEQ +1 1 1 +drop table t1; From ededf83143df115f5feaabbbcfeea2494d7d65d5 Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 22:43:54 +0100 Subject: [PATCH 37/53] Fix for BUG#8055 "Trouble with replication from temporary tables and ignores": when we close the session's temp tables at session end, we automatically write to binlog *one* DROP TEMPORARY TABLE *per tmp table*. mysql-test/r/drop_temp_table.result: result update (note: one DROP TEMPORARY TABLE per tmp table) mysql-test/t/drop_temp_table.test: checking that we have one DROP TEMPORARY TABLE per tmp table now, not one multi-table DROP. Hiding columns Log_pos/End_log_pos per Monty's request. sql/sql_base.cc: When we close the session's temp tables at session end, we automatically write to binlog one DROP TEMPORARY TABLE per tmp table, not one single multi-table DROP TEMPORARY TABLE (because it causes problems if slave has --replicate*table rules). --- mysql-test/r/drop_temp_table.result | 16 ++++--- mysql-test/t/drop_temp_table.test | 3 ++ sql/sql_base.cc | 66 ++++++++++++++--------------- 3 files changed, 45 insertions(+), 40 deletions(-) diff --git a/mysql-test/r/drop_temp_table.result b/mysql-test/r/drop_temp_table.result index 266196877c8..a486964feb2 100644 --- a/mysql-test/r/drop_temp_table.result +++ b/mysql-test/r/drop_temp_table.result @@ -1,7 +1,9 @@ reset master; create database `drop-temp+table-test`; use `drop-temp+table-test`; +create temporary table shortn1 (a int); create temporary table `table:name` (a int); +create temporary table shortn2 (a int); select get_lock("a",10); get_lock("a",10) 1 @@ -10,9 +12,13 @@ get_lock("a",10) 1 show binlog events; Log_name Pos Event_type Server_id Orig_log_pos Info -master-bin.000001 4 Start 1 4 Server ver: VERSION, Binlog ver: 3 -master-bin.000001 79 Query 1 79 create database `drop-temp+table-test` -master-bin.000001 168 Query 1 168 use `drop-temp+table-test`; create temporary table `table:name` (a int) -master-bin.000001 262 Query 1 262 use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`table:name` -master-bin.000001 391 Query 1 391 use `drop-temp+table-test`; DO RELEASE_LOCK("a") +master-bin.000001 # Start 1 # Server ver: VERSION, Binlog ver: 3 +master-bin.000001 # Query 1 # create database `drop-temp+table-test` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table shortn1 (a int) +master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table `table:name` (a int) +master-bin.000001 # Query 1 # use `drop-temp+table-test`; create temporary table shortn2 (a int) +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn2` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`table:name` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `drop-temp+table-test`.`shortn1` +master-bin.000001 # Query 1 # use `drop-temp+table-test`; DO RELEASE_LOCK("a") drop database `drop-temp+table-test`; diff --git a/mysql-test/t/drop_temp_table.test b/mysql-test/t/drop_temp_table.test index 1a7d8796bb3..dcd95721179 100644 --- a/mysql-test/t/drop_temp_table.test +++ b/mysql-test/t/drop_temp_table.test @@ -4,7 +4,9 @@ connection con1; reset master; create database `drop-temp+table-test`; use `drop-temp+table-test`; +create temporary table shortn1 (a int); create temporary table `table:name` (a int); +create temporary table shortn2 (a int); select get_lock("a",10); disconnect con1; @@ -15,5 +17,6 @@ connection con2; select get_lock("a",10); let $VERSION=`select version()`; --replace_result $VERSION VERSION +--replace_column 2 # 5 # show binlog events; drop database `drop-temp+table-test`; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 7434897ab90..fe1f268e277 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -485,62 +485,58 @@ void close_temporary(TABLE *table,bool delete_table) void close_temporary_tables(THD *thd) { TABLE *table,*next; - char *query, *end; - uint query_buf_size; - bool found_user_tables = 0; + char *query, *name_in_query, *end; + uint greatest_key_length= 0; if (!thd->temporary_tables) return; + /* + We write a DROP TEMPORARY TABLE for each temp table left, so that our + replication slave can clean them up. Not one multi-table DROP TABLE binlog + event: this would cause problems if slave uses --replicate-*-table. + */ LINT_INIT(end); - query_buf_size= 50; // Enough for DROP ... TABLE IF EXISTS + /* We'll re-use always same buffer so make it big enough for longest name */ for (table=thd->temporary_tables ; table ; table=table->next) - /* - We are going to add 4 ` around the db/table names, so 1 does not look - enough; indeed it is enough, because table->key_length is greater (by 8, - because of server_id and thread_id) than db||table. - */ - query_buf_size+= table->key_length+1; + greatest_key_length= max(greatest_key_length, table->key_length); - if ((query = alloc_root(thd->mem_root, query_buf_size))) + if ((query = alloc_root(thd->mem_root, greatest_key_length+50))) // Better add "if exists", in case a RESET MASTER has been done - end=strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS "); + name_in_query= strmov(query, "DROP /*!40005 TEMPORARY */ TABLE IF EXISTS `"); for (table=thd->temporary_tables ; table ; table=next) { - if (query) // we might be out of memory, but this is not fatal + /* + In we are OOM for 'query' this is not fatal. We skip temporary tables + not created directly by the user. + */ + if (query && mysql_bin_log.is_open() && (table->real_name[0] != '#')) { - // skip temporary tables not created directly by the user - if (table->real_name[0] != '#') - found_user_tables = 1; /* Here we assume table_cache_key always starts with \0 terminated db name */ - end = strxmov(end,"`",table->table_cache_key,"`.`", - table->real_name,"`,", NullS); + end = strxmov(name_in_query, table->table_cache_key, "`.`", + table->real_name, "`", NullS); + Query_log_event qinfo(thd, query, (ulong)(end-query), 0, FALSE); + /* + Imagine the thread had created a temp table, then was doing a SELECT, and + the SELECT was killed. Then it's not clever to mark the statement above as + "killed", because it's not really a statement updating data, and there + are 99.99% chances it will succeed on slave. And, if thread is + killed now, it's not clever either. + If a real update (one updating a persistent table) was killed on the + master, then this real update will be logged with error_code=killed, + rightfully causing the slave to stop. + */ + qinfo.error_code= 0; + mysql_bin_log.write(&qinfo); } next=table->next; close_temporary(table); } - if (query && found_user_tables && mysql_bin_log.is_open()) - { - /* The -1 is to remove last ',' */ - thd->clear_error(); - Query_log_event qinfo(thd, query, (ulong)(end-query)-1, 0, FALSE); - /* - Imagine the thread had created a temp table, then was doing a SELECT, and - the SELECT was killed. Then it's not clever to mark the statement above as - "killed", because it's not really a statement updating data, and there - are 99.99% chances it will succeed on slave. - If a real update (one updating a persistent table) was killed on the - master, then this real update will be logged with error_code=killed, - rightfully causing the slave to stop. - */ - qinfo.error_code= 0; - mysql_bin_log.write(&qinfo); - } thd->temporary_tables=0; } From 987e620d6376ae3b9047d1185dca7141585173ac Mon Sep 17 00:00:00 2001 From: unknown Date: Fri, 4 Feb 2005 23:07:10 +0100 Subject: [PATCH 38/53] Backport of ChangeSet 1.1845 05/02/04 13:53:16 guilhem@mysql.com +1 -0 from 5.0. Proposal to fix this problem: when using libmysqlclient, you must call mysql_server_end() to nicely free memory at the end of your program; it however sounds weird to call a function named *SERVER_end* when you're the CLIENT (you're not ending the server, you're ending your ability to talk to servers). So here I add two defines which should be more generic names. Our manual mentions these functions only for libmysqld API so needs some fixing, and then we can close BUG#8099 and BUG#6149. include/mysql.h: Creating synonyms (defines): mysql_library_init for mysql_server_init, mysql_library_end for mysql_server_end; these new names are more generic, so suitable when using libmysqlclient as well as libmysqld. --- include/mysql.h | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/include/mysql.h b/include/mysql.h index 2c0197e2300..d8a56126756 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -334,6 +334,17 @@ typedef struct st_mysql_parameters */ int STDCALL mysql_server_init(int argc, char **argv, char **groups); void STDCALL mysql_server_end(void); +/* + mysql_server_init/end need to be called when using libmysqld or + libmysqlclient (exactly, mysql_server_init() is called by mysql_init() so + you don't need to call it explicitely; but you need to call + mysql_server_end() to free memory). The names are a bit misleading + (mysql_SERVER* to be used when using libmysqlCLIENT). So we add more general + names which suit well whether you're using libmysqld or libmysqlclient. We + intend to promote these aliases over the mysql_server* ones. +*/ +#define mysql_library_init mysql_server_init +#define mysql_library_end mysql_server_end MYSQL_PARAMETERS *STDCALL mysql_get_parameters(void); From fe83a1938d51c2fe780631911282902aeae1c0a9 Mon Sep 17 00:00:00 2001 From: unknown Date: Sat, 5 Feb 2005 01:21:16 +0300 Subject: [PATCH 39/53] A fix for Bug#6273 "building fails on link": we should not use CLIENT_LIBS in mysql_config as CLIENT_LIBS point to builddir when we use the bundled zlib. acinclude.m4: Extend MYSQL_CHECK_ZLIB_WITH_COMPRESS m4 macro to substitute ZLIB_DEPS - this is a special version of ZLIB_LIBS to use in mysql_config configure.in: Remove NON_THREADED_CLIENT_LIBS which weren't really NON_THREADED_CLIENT_LIBS and use NON_THREADED_LIBS instead. AC_SUBST NON_THREADED_LIBS and STATIC_NSS_FLAGS as they're now needed inside mysql_config.sh scripts/Makefile.am: Add STATIC_NSS_FLAGS, NON_THREADED_LIBS and ZLIB_DEPS to sed substitution list. scripts/mysql_config.sh: We can't use CLIENT_LIBS as in case when we use the bundled zlib it has a reference to $(top_builddir)/zlib. libs and libs_r now need to be specified explicitly. zlib/Makefile.am: Install libz.la in case it's used by MySQL: this way we guarantee that paths printed by mysql_config are valid in all cases. --- acinclude.m4 | 16 +++++++++++++--- configure.in | 16 ++++++++++------ scripts/Makefile.am | 3 +++ scripts/mysql_config.sh | 7 ++++--- zlib/Makefile.am | 2 +- 5 files changed, 31 insertions(+), 13 deletions(-) diff --git a/acinclude.m4 b/acinclude.m4 index d7e22332655..5ddd8952c42 100644 --- a/acinclude.m4 +++ b/acinclude.m4 @@ -194,6 +194,8 @@ dnl Define zlib paths to point at bundled zlib AC_DEFUN([MYSQL_USE_BUNDLED_ZLIB], [ ZLIB_INCLUDES="-I\$(top_srcdir)/zlib" ZLIB_LIBS="\$(top_builddir)/zlib/libz.la" +dnl Omit -L$pkglibdir as it's always in the list of mysql_config deps. +ZLIB_DEPS="-lz" zlib_dir="zlib" AC_SUBST([zlib_dir]) mysql_cv_compress="yes" @@ -235,8 +237,13 @@ dnl $prefix/lib. If zlib headers or binaries weren't found at $prefix, the dnl macro bails out with error. dnl dnl If the library was found, this function #defines HAVE_COMPRESS -dnl and configure variables ZLIB_INCLUDES (i.e. -I/path/to/zlib/include) and -dnl ZLIB_LIBS (i. e. -L/path/to/zlib/lib -lz). +dnl and configure variables ZLIB_INCLUDES (i.e. -I/path/to/zlib/include), +dnl ZLIB_LIBS (i. e. -L/path/to/zlib/lib -lz) and ZLIB_DEPS which is +dnl used in mysql_config and is always the same as ZLIB_LIBS except to +dnl when we use the bundled zlib. In the latter case ZLIB_LIBS points to the +dnl build dir ($top_builddir/zlib), while mysql_config must point to the +dnl installation dir ($pkglibdir), so ZLIB_DEPS is set to point to +dnl $pkglibdir. AC_DEFUN([MYSQL_CHECK_ZLIB_WITH_COMPRESS], [ AC_MSG_CHECKING([for zlib compression library]) @@ -285,7 +292,11 @@ case $SYSTEM_TYPE in ;; esac if test "$mysql_cv_compress" = "yes"; then + if test "x$ZLIB_DEPS" = "x"; then + ZLIB_DEPS="$ZLIB_LIBS" + fi AC_SUBST([ZLIB_LIBS]) + AC_SUBST([ZLIB_DEPS]) AC_SUBST([ZLIB_INCLUDES]) AC_DEFINE([HAVE_COMPRESS], [1], [Define to enable compression support]) fi @@ -1039,7 +1050,6 @@ AC_MSG_CHECKING(for OpenSSL) echo "You can't use the --all-static link option when using openssl." exit 1 fi - NON_THREADED_CLIENT_LIBS="$NON_THREADED_CLIENT_LIBS $openssl_libs" else AC_MSG_RESULT(no) if test ! -z "$openssl_includes" diff --git a/configure.in b/configure.in index caa42004736..665029accb3 100644 --- a/configure.in +++ b/configure.in @@ -924,9 +924,11 @@ if test "$ac_cv_header_termio_h" = "no" -a "$ac_cv_header_termios_h" = "no" then AC_CHECK_FUNC(gtty, , AC_CHECK_LIB(compat, gtty)) fi -# We make a special variable for client library's to avoid including -# thread libs in the client. -NON_THREADED_CLIENT_LIBS="$LIBS $ZLIB_LIBS" + +# We make a special variable for non-threaded version of LIBS to avoid +# including thread libs into non-threaded version of MySQL client library. +# Later in this script LIBS will be augmented with a threads library. +NON_THREADED_LIBS="$LIBS" AC_MSG_CHECKING([for int8]) case $SYSTEM_TYPE in @@ -1502,7 +1504,7 @@ then if test -f /usr/lib/libxnet.so -a "$SYSTEM_TYPE" = "sni-sysv4" then LIBS="-lxnet $LIBS" - NON_THREADED_CLIENT_LIBS="$NON_THREADED_CLIENT_LIBS -lxnet" + NON_THREADED_LIBS="-lxnet $NON_THREADED_LIBS" with_named_thread="-Kthread $LDFLAGS -lxnet" LD_FLAGS="" CFLAGS="-Kthread $CFLAGS" @@ -2826,7 +2828,7 @@ dnl This probably should be cleaned up more - for now the threaded dnl client is just using plain-old libs. sql_client_dirs="libmysql strings regex client" linked_client_targets="linked_libmysql_sources" -CLIENT_LIBS=$NON_THREADED_CLIENT_LIBS + if test "$THREAD_SAFE_CLIENT" != "no" then sql_client_dirs="libmysql_r $sql_client_dirs" @@ -2834,9 +2836,11 @@ then AC_DEFINE([THREAD_SAFE_CLIENT], [1], [Should be client be thread safe]) fi -CLIENT_LIBS="$CLIENT_LIBS $STATIC_NSS_FLAGS" +CLIENT_LIBS="$NON_THREADED_LIBS $openssl_libs $ZLIB_LIBS $STATIC_NSS_FLAGS" AC_SUBST(CLIENT_LIBS) +AC_SUBST(NON_THREADED_LIBS) +AC_SUBST(STATIC_NSS_FLAGS) AC_SUBST(sql_client_dirs) AC_SUBST(linked_client_targets) diff --git a/scripts/Makefile.am b/scripts/Makefile.am index 71b70fc0e4a..d5337df35b1 100644 --- a/scripts/Makefile.am +++ b/scripts/Makefile.am @@ -140,6 +140,9 @@ SUFFIXES = .sh -e 's!@''IS_LINUX''@!@IS_LINUX@!' \ -e "s!@""CONF_COMMAND""@!@CONF_COMMAND@!" \ -e 's!@''MYSQLD_USER''@!@MYSQLD_USER@!' \ + -e 's!@''STATIC_NSS_FLAGS''@!@STATIC_NSS_FLAGS@!' \ + -e 's!@''NON_THREADED_LIBS''@!@NON_THREADED_LIBS@!' \ + -e 's!@''ZLIB_DEPS''@!@ZLIB_DEPS@!' \ -e "s!@MAKE@!$(MAKE)!" \ $< > $@-t @CHMOD@ +x $@-t diff --git a/scripts/mysql_config.sh b/scripts/mysql_config.sh index 90418de3d1d..a5c8af5ecb2 100644 --- a/scripts/mysql_config.sh +++ b/scripts/mysql_config.sh @@ -82,13 +82,14 @@ version='@VERSION@' socket='@MYSQL_UNIX_ADDR@' port='@MYSQL_TCP_PORT@' ldflags='@LDFLAGS@' -client_libs='@CLIENT_LIBS@' # Create options -libs="$ldflags -L$pkglibdir -lmysqlclient $client_libs" +libs="$ldflags -L$pkglibdir -lmysqlclient @ZLIB_DEPS@ @NON_THREADED_LIBS@" +libs="$libs @openssl_libs@ @STATIC_NSS_FLAGS@" libs=`echo "$libs" | sed -e 's; \+; ;g' | sed -e 's;^ *;;' | sed -e 's; *\$;;'` -libs_r="$ldflags -L$pkglibdir -lmysqlclient_r @LIBS@ @ZLIB_LIBS@ @openssl_libs@" + +libs_r="$ldflags -L$pkglibdir -lmysqlclient_r @ZLIB_DEPS@ @LIBS@ @openssl_libs@" libs_r=`echo "$libs_r" | sed -e 's; \+; ;g' | sed -e 's;^ *;;' | sed -e 's; *\$;;'` cflags="-I$pkgincludedir @CFLAGS@ " #note: end space! include="-I$pkgincludedir" diff --git a/zlib/Makefile.am b/zlib/Makefile.am index 58d3811cd7c..e94d184a841 100644 --- a/zlib/Makefile.am +++ b/zlib/Makefile.am @@ -16,7 +16,7 @@ # Process this file with automake to create Makefile.in -noinst_LTLIBRARIES=libz.la +pkglib_LTLIBRARIES=libz.la noinst_HEADERS= crc32.h deflate.h inffast.h inffixed.h inflate.h \ inftrees.h trees.h zconf.h zlib.h zutil.h From 37b4b89ebe3beb834bcea3b4481204f7bb4d0ade Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 6 Feb 2005 10:00:29 +0100 Subject: [PATCH 40/53] bug8262 - ndb crash if scan is poped from queue before all attrinfo has arrived ndb/src/kernel/blocks/dblqh/DblqhMain.cpp: only restart scan if all attrinfo has arrived otherwise just set remove from queue and set state ndb/src/kernel/blocks/dbtc/DbtcMain.cpp: new error insert - force send attrinfo ndb/test/ndbapi/testScan.cpp: new test bug8262 many threads - scanning small tables ndb/test/run-test/daily-basic-tests.txt: run new test case in autotest --- ndb/src/kernel/blocks/dblqh/DblqhMain.cpp | 19 ++++++++++---- ndb/src/kernel/blocks/dbtc/DbtcMain.cpp | 4 +++ ndb/test/ndbapi/testScan.cpp | 30 ++++++++++++++++++++--- ndb/test/run-test/daily-basic-tests.txt | 4 +++ 4 files changed, 49 insertions(+), 8 deletions(-) diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index 8bbbc72a38d..c79f4dfc6c7 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -8719,13 +8719,14 @@ void Dblqh::finishScanrec(Signal* signal) return; } + ndbrequire(restart.p->scanState == ScanRecord::IN_QUEUE); + ScanRecordPtr tmpScan = scanptr; TcConnectionrecPtr tmpTc = tcConnectptr; tcConnectptr.i = restart.p->scanTcrec; ptrCheckGuard(tcConnectptr, ctcConnectrecFileSize, tcConnectionrec); restart.p->scanNumber = scanNumber; - restart.p->scanState = ScanRecord::WAIT_ACC_SCAN; queue.remove(restart); scans.add(restart); @@ -8740,10 +8741,18 @@ void Dblqh::finishScanrec(Signal* signal) ndbout_c("adding-r (%d %d)", restart.p->scanNumber, restart.p->fragPtrI); #endif } - - scanptr = restart; - continueAfterReceivingAllAiLab(signal); - + + restart.p->scanState = ScanRecord::SCAN_FREE; // set in initScanRec + if(tcConnectptr.p->transactionState == TcConnectionrec::SCAN_STATE_USED) + { + jam(); + scanptr = restart; + continueAfterReceivingAllAiLab(signal); + } + else + { + ndbrequire(tcConnectptr.p->transactionState == TcConnectionrec::WAIT_SCAN_AI); + } scanptr = tmpScan; tcConnectptr = tmpTc; }//Dblqh::finishScanrec() diff --git a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp index c804fa32bd2..97931041e2a 100644 --- a/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp +++ b/ndb/src/kernel/blocks/dbtc/DbtcMain.cpp @@ -9028,6 +9028,8 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal) scanFragptr.p->lqhBlockref = ref; scanFragptr.p->m_connectCount = getNodeInfo(tnodeid).m_connectCount; sendScanFragReq(signal, scanptr.p, scanFragptr.p); + if(ERROR_INSERTED(8035)) + globalTransporterRegistry.performSend(); attrbufptr.i = cachePtr.p->firstAttrbuf; while (attrbufptr.i != RNIL) { jam(); @@ -9037,6 +9039,8 @@ void Dbtc::execDIGETPRIMCONF(Signal* signal) attrbufptr.p, ref); attrbufptr.i = attrbufptr.p->attrbuf[ZINBUF_NEXT]; + if(ERROR_INSERTED(8035)) + globalTransporterRegistry.performSend(); }//while scanFragptr.p->scanFragState = ScanFragRec::LQH_ACTIVE; scanFragptr.p->startFragTimer(ctcTimer); diff --git a/ndb/test/ndbapi/testScan.cpp b/ndb/test/ndbapi/testScan.cpp index 22ec3fff327..f1018d29846 100644 --- a/ndb/test/ndbapi/testScan.cpp +++ b/ndb/test/ndbapi/testScan.cpp @@ -35,7 +35,8 @@ getTable(Ndb* pNdb, int i){ int runLoadTable(NDBT_Context* ctx, NDBT_Step* step){ - int records = ctx->getNumRecords(); + int records = ctx->getProperty("Rows", ctx->getNumRecords()); + HugoTransactions hugoTrans(*ctx->getTab()); if (hugoTrans.loadTable(GETNDB(step), records) != 0){ return NDBT_FAILED; @@ -264,7 +265,7 @@ int runVerifyTable(NDBT_Context* ctx, NDBT_Step* step){ int runScanRead(NDBT_Context* ctx, NDBT_Step* step){ int loops = ctx->getNumLoops(); - int records = ctx->getNumRecords(); + int records = ctx->getProperty("Rows", ctx->getNumRecords()); int parallelism = ctx->getProperty("Parallelism", 240); int abort = ctx->getProperty("AbortProb", 5); @@ -375,7 +376,20 @@ int runScanReadError(NDBT_Context* ctx, NDBT_Step* step){ restarter.insertErrorInAllNodes(0); return result; } - + +int +runInsertError(NDBT_Context* ctx, NDBT_Step* step){ + int error = ctx->getProperty("ErrorCode"); + NdbRestarter restarter; + + ctx->setProperty("ErrorCode", (Uint32)0); + if (restarter.insertErrorInAllNodes(error) != 0){ + ndbout << "Could not insert error in all nodes "<getNumLoops(); @@ -1221,6 +1235,16 @@ TESTCASE("ScanRead100", STEPS(runScanRead, 100); FINALIZER(runClearTable); } +TESTCASE("Scan-bug8262", + ""){ + TC_PROPERTY("Rows", 1); + TC_PROPERTY("ErrorCode", 8035); + INITIALIZER(runLoadTable); + INITIALIZER(runInsertError); // Will reset error code + STEPS(runScanRead, 25); + FINALIZER(runInsertError); + FINALIZER(runClearTable); +} TESTCASE("ScanRead40RandomTable", "Verify scan requirement: Scan with 40 simultaneous threads. "\ "Use random table for the scan"){ diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 87f86795370..c62908ae999 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -378,6 +378,10 @@ max-time: 500 cmd: testScan args: -n ScanRestart T1 +max-time: 500 +cmd: testScan +args: -l 100 -n Scan-bug8262 T7 + # OLD FLEX max-time: 500 cmd: flexBench From 3455bc53988cad19cabf204e2d0f4d3477f67b35 Mon Sep 17 00:00:00 2001 From: unknown Date: Sun, 6 Feb 2005 13:06:12 +0200 Subject: [PATCH 41/53] fixed test 'subselect' in case when innodb is not compiled in (thanks HF who niticed it) mysql-test/r/subselect.result: test depends on innodb moved from 'subselect' to 'subselect_innodb' mysql-test/r/subselect_innodb.result: test depends on innodb moved from 'subselect' to 'subselect_innodb' mysql-test/t/subselect.test: test depends on innodb moved from 'subselect' to 'subselect_innodb' mysql-test/t/subselect_innodb.test: test depends on innodb moved from 'subselect' to 'subselect_innodb' --- mysql-test/r/subselect.result | 12 ------------ mysql-test/r/subselect_innodb.result | 12 ++++++++++++ mysql-test/t/subselect.test | 14 -------------- mysql-test/t/subselect_innodb.test | 14 ++++++++++++++ 4 files changed, 26 insertions(+), 26 deletions(-) diff --git a/mysql-test/r/subselect.result b/mysql-test/r/subselect.result index 437fd624ae1..03dcc23c919 100644 --- a/mysql-test/r/subselect.result +++ b/mysql-test/r/subselect.result @@ -2196,15 +2196,3 @@ ERROR 42S22: Reference 'xx' not supported (forward reference in item list) select 1 = ALL (select 1 from t1 where 1 = xx ), 1 as xx from DUAL; ERROR 42S22: Reference 'xx' not supported (forward reference in item list) drop table t1; -CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB; -CREATE TABLE t2 LIKE t1; -INSERT INTO t1 VALUES (1,1,1); -INSERT INTO t2 VALUES (1,1,1); -PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having -count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)"; -EXECUTE my_stmt; -b count(*) -EXECUTE my_stmt; -b count(*) -deallocate prepare my_stmt; -drop table t1,t2; diff --git a/mysql-test/r/subselect_innodb.result b/mysql-test/r/subselect_innodb.result index 0b813a07a1d..0666fd76661 100644 --- a/mysql-test/r/subselect_innodb.result +++ b/mysql-test/r/subselect_innodb.result @@ -140,3 +140,15 @@ id date1 coworkerid description sum_used sum_remaining comments 6 2004-01-01 1 test 22 33 comment 7 2004-01-01 1 test 22 33 comment drop table t1; +CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB; +CREATE TABLE t2 LIKE t1; +INSERT INTO t1 VALUES (1,1,1); +INSERT INTO t2 VALUES (1,1,1); +PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having +count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)"; +EXECUTE my_stmt; +b count(*) +EXECUTE my_stmt; +b count(*) +deallocate prepare my_stmt; +drop table t1,t2; diff --git a/mysql-test/t/subselect.test b/mysql-test/t/subselect.test index cdec080611d..55400dae0be 100644 --- a/mysql-test/t/subselect.test +++ b/mysql-test/t/subselect.test @@ -1465,17 +1465,3 @@ select 1 = ALL (select 1 from t1 where 1 = xx ), 1 as xx; -- error 1247 select 1 = ALL (select 1 from t1 where 1 = xx ), 1 as xx from DUAL; drop table t1; - -# -# cleaning up of results of subselects (BUG#8125) -# -CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB; -CREATE TABLE t2 LIKE t1; -INSERT INTO t1 VALUES (1,1,1); -INSERT INTO t2 VALUES (1,1,1); -PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having -count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)"; -EXECUTE my_stmt; -EXECUTE my_stmt; -deallocate prepare my_stmt; -drop table t1,t2; diff --git a/mysql-test/t/subselect_innodb.test b/mysql-test/t/subselect_innodb.test index aa7fe138876..5d796988178 100644 --- a/mysql-test/t/subselect_innodb.test +++ b/mysql-test/t/subselect_innodb.test @@ -145,3 +145,17 @@ SELECT DISTINCT FROM t1; select * from t1; drop table t1; + +# +# cleaning up of results of subselects (BUG#8125) +# +CREATE TABLE `t1` ( `a` char(3) NOT NULL default '', `b` char(3) NOT NULL default '', `c` char(3) NOT NULL default '', PRIMARY KEY (`a`,`b`,`c`)) ENGINE=InnoDB; +CREATE TABLE t2 LIKE t1; +INSERT INTO t1 VALUES (1,1,1); +INSERT INTO t2 VALUES (1,1,1); +PREPARE my_stmt FROM "SELECT t1.b, count(*) FROM t1 group by t1.b having +count(*) > ALL (SELECT COUNT(*) FROM t2 WHERE t2.a=1 GROUP By t2.b)"; +EXECUTE my_stmt; +EXECUTE my_stmt; +deallocate prepare my_stmt; +drop table t1,t2; From b19ff40dda17c7ee6cfd3c7019c59cdb77fdbd6f Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 00:12:46 -0600 Subject: [PATCH 42/53] Do-solaris-pkg: Perl script to create Solaris installation packages. --- Build-tools/Do-solaris-pkg | 180 +++++++++++++++++++++++++++++++++++++ 1 file changed, 180 insertions(+) create mode 100644 Build-tools/Do-solaris-pkg diff --git a/Build-tools/Do-solaris-pkg b/Build-tools/Do-solaris-pkg new file mode 100644 index 00000000000..5b7326f78e2 --- /dev/null +++ b/Build-tools/Do-solaris-pkg @@ -0,0 +1,180 @@ +#!/usr/bin/perl +# +# Script to create Solaris packages +# +$INTERACTIVE= 0; +$find = "/usr/bin/find"; +$pkgproto = "/usr/bin/pkgproto"; +$pkgmk = "/usr/bin/pkgmk -o"; +$pkgtrans = "/usr/bin/pkgtrans"; +$temp = "/tmp/prototype$$"; +$prototype = "prototype"; +$pkginfo = "pkginfo"; +($gid ,$pkg ,$uid ,$userInfo ,$email ,$quota ,$group ,$passwd +,$category ,$userHome ,$vendor ,$loginShell ,$pstamp ,$basedir)=(); + +$fullname = shift @ARGV; +$fullname or die "No package name was specified"; +-d $fullname or die "That directory is not present!"; + +$fullname =~ s,/+$,,; # Remove ending slash if any + +$pkgdir = `cd ../pkgs; pwd`; +$pwd = `pwd`; +if ($pwd =~ '\/usr\/local') { + $pwd = $`; +} +die "Wrong location, please cd to /usr/local/ and run again.\n" + if ($pwd eq ""); + +system ("$find . -print | $pkgproto > $temp"); +open (PREPROTO,"<$temp") or die "Unable to read prototype information ($!)\n"; +open (PROTO,">$prototype") or die "Unable to write file prototype ($!)\n"; +print PROTO "i pkginfo=./$pkginfo\n"; +while () { + # Read the prototype information from /tmp/prototype$$ + chomp; + $thisline = $_; + if ($thisline =~ " prototype " + or $thisline =~ " pkginfo ") { + # We don't need that line + } elsif ($thisline =~ "^[fd] ") { + # Change the ownership for files and directories + ($dir, $none, $file, $mode, $user, $group) = split / /,$thisline; + print PROTO "$dir $none $file $mode bin bin\n"; + } else { + # Symlinks and other stuff should be printed as well ofcourse + print PROTO "$thisline\n"; + } +} +close PROTO; +close PREPROTO; + +# Clean up +unlink $temp or warn "Unable to remove tempfile ($!)\n"; + +# Now we can start building the package +# +# First get some info + +$fullname =~ /^((mysql)-.+)-([\d\.]+)-.+$/ + or die "This name is not what I expected - \"$fullname\""; + +$default{"name"}= $2; +$default{"version"}= $3; +$default{"pkg"}= $1; +$default{"arch"} = `uname -m`; +chomp $default{"arch"}; +$default{"category"}= "application"; +$default{"vendor"}= "MySQL AB"; +$default{"email"}= "build\@mysql.com"; +$default{"pstamp"}= "MySQL AB Build Engineers"; +$os = `uname -r`; +$os =~ '\.'; +$os = "sol$'"; +chomp $os; +$default{"basedir"}= "/usr/local"; +$default{"packagename"}= $fullname; + +# Check for correctness of guessed values by userinput + +%questions = ( + pkg => "Please give the name for this package", + name => "Now enter the real name for this package", + arch => "What architecture did you build the package on?", + version => "Enter the version number of the package", + category => "What category does this package belong to?", + vendor => "Who is the vendor of this package?", + email => "Enter the email adress for contact", + pstamp => "Enter your own name", + basedir => "What is the basedir this package will install into?", + packagename => "How should I call the packagefile?", +); + +@vars = qw(pkg name arch version category vendor email pstamp basedir + packagename); +foreach $varname (@vars) { + getvar_noq($varname); +} + +if ($INTERACTIVE) { + while (!&chkvar()) { + print "\n"; + foreach $varname (@vars) { + getvar($varname); + } + @vars = qw(pkg name arch version category vendor email pstamp basedir + packagename); + } +} +$classes = "none"; + +# Create the pkginfo file + +print "\nNow creating $pkginfo file\n"; +open (PKGINFO,">$pkginfo") || die "Unable to open $pkginfo for writing ($!)\n"; +print PKGINFO "PKG=\"$pkg\"\n"; +print PKGINFO "NAME=\"$name\"\n"; +print PKGINFO "ARCH=\"$arch\"\n"; +print PKGINFO "VERSION=\"$version\"\n"; +print PKGINFO "CATEGORY=\"$category\"\n"; +print PKGINFO "VENDOR=\"$vendor\"\n"; +print PKGINFO "EMAIL=\"$email\"\n"; +print PKGINFO "PSTAMP=\"$pstamp\"\n"; +print PKGINFO "BASEDIR=\"$basedir\"\n"; +print PKGINFO "CLASSES=\"$classes\"\n"; +close PKGINFO; +print "Done.\n"; + +# Build and zip the package + +print "Building package\n"; +system ("$pkgmk -r `pwd`"); +system ("(cd /var/spool/pkg; $pkgtrans -s -o `pwd` /tmp/$packagename $pkg)"); +system ("gzip /tmp/$packagename"); + +# Clean-up the spool area +system ("(cd /var/spool/pkg; rm -rf $pkg)"); +unlink $pkginfo; +unlink $prototype; +system ("mv /tmp/${packagename}.gz $pkgdir"); +print "Done. (~/packaging/pkgs/$packagename.gz)\n"; +# The subroutines +sub chkvar { + print "\n"; + + print "PKG=$pkg\n"; + print "NAME=$name\n"; + print "ARCH=$arch\n"; + print "VERSION=$version\n"; + print "CATEGORY=$category\n"; + print "VENDOR=$vendor\n"; + print "EMAIL=$email\n"; + print "PSTAMP=$pstamp\n"; + print "BASEDIR=$basedir\n"; + print "PACKAGENAME=$packagename\n"; + + + print "\nIs this information correct? [Y/n]: "; + my $answer= ; + chomp $answer; + $answer= 'Y' if ($answer eq ""); + $answer= uc $answer; + my $res= ($answer eq 'Y')? 1 : 0; + return($res); +} + +sub getvar_noq { + my $questionname = "@_"; + $$questionname = $default{$questionname}; +} + +sub getvar { + my $questionname = "@_"; + my $ucquestionname= uc $questionname; + print "$ucquestionname: $questions{$questionname} [$default{\"$questionname\"}]: "; + my $answer = ; + chomp $answer; + $$questionname = $answer; + $$questionname = $default{$questionname} if ($$questionname eq ""); +} From d3a6f130e212d3a306c624ee777704b30f9ceef8 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 01:27:58 -0600 Subject: [PATCH 43/53] Do-solaris-pkg: Deposit the new .pkg.gz into the ~/$hostname/ directory Build-tools/Do-solaris-pkg: Deposit the new .pkg.gz into the ~/$hostname/ directory --- Build-tools/Do-solaris-pkg | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/Build-tools/Do-solaris-pkg b/Build-tools/Do-solaris-pkg index 5b7326f78e2..374113b28a1 100644 --- a/Build-tools/Do-solaris-pkg +++ b/Build-tools/Do-solaris-pkg @@ -3,6 +3,7 @@ # Script to create Solaris packages # $INTERACTIVE= 0; +$hostname= `hostname`; $find = "/usr/bin/find"; $pkgproto = "/usr/bin/pkgproto"; $pkgmk = "/usr/bin/pkgmk -o"; @@ -19,7 +20,7 @@ $fullname or die "No package name was specified"; $fullname =~ s,/+$,,; # Remove ending slash if any -$pkgdir = `cd ../pkgs; pwd`; +$pkgdir= `cd ../$hostname; pwd`; $pwd = `pwd`; if ($pwd =~ '\/usr\/local') { $pwd = $`; @@ -74,7 +75,7 @@ $os =~ '\.'; $os = "sol$'"; chomp $os; $default{"basedir"}= "/usr/local"; -$default{"packagename"}= $fullname; +$default{"packagename"}= $fullname . '.pkg'; # Check for correctness of guessed values by userinput @@ -138,7 +139,7 @@ system ("(cd /var/spool/pkg; rm -rf $pkg)"); unlink $pkginfo; unlink $prototype; system ("mv /tmp/${packagename}.gz $pkgdir"); -print "Done. (~/packaging/pkgs/$packagename.gz)\n"; +print "Done. (~/$hostname/$packagename.gz)\n"; # The subroutines sub chkvar { print "\n"; From 4fd3b1cf41dc92dbd99a71b67beeae53eb3ec133 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 08:51:27 +0100 Subject: [PATCH 44/53] Corrected fragmentation calculation to take into account that LQH creates "2 fragments per fragment" in 4.1 and 5.0 +added some comments to ndb fragmentation calulation --- sql/ha_ndbcluster.cc | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index 9f0da616289..b4e05ace27b 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3513,18 +3513,23 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) uint no_fragments; { #if MYSQL_VERSION_ID >= 50000 - uint acc_row_size= 25+2; + uint acc_row_size= 25 + /*safety margin*/ 2; #else uint acc_row_size= pk_length*4; /* add acc overhead */ - if (pk_length <= 8) - acc_row_size+= 25+2; /* main page will set the limit */ - else - acc_row_size+= 4+4; /* overflow page will set the limit */ + if (pk_length <= 8) /* main page will set the limit */ + acc_row_size+= 25 + /*safety margin*/ 2; + else /* overflow page will set the limit */ + acc_row_size+= 4 + /*safety margin*/ 4; #endif ulonglong acc_fragment_size= 512*1024*1024; ulonglong max_rows= form->max_rows; +#if MYSQL_VERSION_ID >= 50100 + no_fragments= ((max_rows*acc_row_size)/acc_fragment_size+1 + +1/*correct rounding*/)/2; +#else no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; +#endif } { uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); From e4add35d34b3d727067fee63ae409b20b641b1b2 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 02:18:12 -0600 Subject: [PATCH 45/53] Do-solaris-pkg: Minor tweaks to work properly Build-tools/Do-solaris-pkg: Minor tweaks to work properly --- Build-tools/Do-solaris-pkg | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Build-tools/Do-solaris-pkg b/Build-tools/Do-solaris-pkg index 374113b28a1..685a1f0923b 100644 --- a/Build-tools/Do-solaris-pkg +++ b/Build-tools/Do-solaris-pkg @@ -3,7 +3,7 @@ # Script to create Solaris packages # $INTERACTIVE= 0; -$hostname= `hostname`; +chomp ($hostname= `hostname`); $find = "/usr/bin/find"; $pkgproto = "/usr/bin/pkgproto"; $pkgmk = "/usr/bin/pkgmk -o"; @@ -75,7 +75,7 @@ $os =~ '\.'; $os = "sol$'"; chomp $os; $default{"basedir"}= "/usr/local"; -$default{"packagename"}= $fullname . '.pkg'; +$default{"packagename"}= "${fullname}.pkg"; # Check for correctness of guessed values by userinput @@ -136,6 +136,8 @@ system ("gzip /tmp/$packagename"); # Clean-up the spool area system ("(cd /var/spool/pkg; rm -rf $pkg)"); +# Clean-up the ~/packaging/ area +system ("(rm -rf mysql*)"); unlink $pkginfo; unlink $prototype; system ("mv /tmp/${packagename}.gz $pkgdir"); From aa2ac91c975d77f2ea020849a4e24c1fc97db1bf Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 09:58:37 +0100 Subject: [PATCH 46/53] ha_ndbcluster.cc: corrected typo in previous changeset sql/ha_ndbcluster.cc: corrected typo in previous changeset --- sql/ha_ndbcluster.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index b4e05ace27b..9e34baae198 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -3525,10 +3525,10 @@ static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) ulonglong acc_fragment_size= 512*1024*1024; ulonglong max_rows= form->max_rows; #if MYSQL_VERSION_ID >= 50100 + no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; +#else no_fragments= ((max_rows*acc_row_size)/acc_fragment_size+1 +1/*correct rounding*/)/2; -#else - no_fragments= (max_rows*acc_row_size)/acc_fragment_size+1; #endif } { From f3f2ec2f91abc1c6a6486e5ca4f6e2fdbe20343d Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 13:25:03 +0100 Subject: [PATCH 47/53] compile-solaris-sparc-purify: Set CCLD as well BUILD/compile-solaris-sparc-purify: Set CCLD as well --- BUILD/compile-solaris-sparc-purify | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/BUILD/compile-solaris-sparc-purify b/BUILD/compile-solaris-sparc-purify index 0e530f75b60..5f5ba81396f 100755 --- a/BUILD/compile-solaris-sparc-purify +++ b/BUILD/compile-solaris-sparc-purify @@ -61,7 +61,7 @@ purifying_binaries () if [ -n "$cxxfilt" ] ; then opts="$opts -demangle-program=$cxxfilt" fi - opts="$opts -best-effort g++" + opts="$opts -best-effort" back=`pwd` cd $dir @@ -76,17 +76,17 @@ purifying_binaries () fi if [ -n "$mode" -a $mode = purify ] ; then - gmake CXXLD="purify $opts" $target + gmake CCLD="purify $opts gcc" CXXLD="purify $opts g++" $target mv $binary $binary-purify fi if [ -n "$mode" -a $mode = quantify ] ; then - gmake CXXLD="quantify $opts" $target + gmake CCLD="quantify $opts gcc" CXXLD="quantify $opts g++" $target mv $binary $binary-quantify fi if [ -n "$mode" -a $mode = purecov ] ; then - gmake CXXLD="purecov $opts" $target + gmake CCLD="purecov $opts gcc" CXXLD="purecov $opts g++" $target mv $binary $binary-purecov fi From e7119f62a8452678c9ab99bb1b57905dbbeb2978 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 16:25:07 +0100 Subject: [PATCH 48/53] mysql.spec.sh: removed typo moved pre to post for directory creation add changelog comments support-files/mysql.spec.sh: removed typo moved pre to post for directory creation add changelog comments --- support-files/mysql.spec.sh | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/support-files/mysql.spec.sh b/support-files/mysql.spec.sh index 0d72356731c..b06ba462b26 100644 --- a/support-files/mysql.spec.sh +++ b/support-files/mysql.spec.sh @@ -488,14 +488,7 @@ chmod -R og-rw $mysql_datadir/mysql sleep 2 -%pre ndb-storage -mysql_clusterdir=/var/lib/mysql-cluster - -# Create cluster directory if needed -if test ! -d $mysql_clusterdir; then mkdir -m755 $mysql_clusterdir; fi - - -%pre ndb-storage +%post ndb-storage mysql_clusterdir=/var/lib/mysql-cluster # Create cluster directory if needed @@ -701,6 +694,12 @@ fi # itself - note that they must be ordered by date (important when # merging BK trees) %changelog +* Monday Feb 7 2005 Tomas Ulin + +- enabled the "Ndbcluster" storage engine for the max binary +- added extra make install in ndb subdir after Max build to get ndb binaries +- added packages for ndbcluster storage engine + * Fri Jan 14 2005 Lenz Grimmer - replaced obsoleted "BuildPrereq" with "BuildRequires" instead From d6ed8cd7097391b55936a1dfec248da65e7f64f9 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 18:13:57 +0200 Subject: [PATCH 49/53] Fixed bug in HAVING when refering to RAND() through alias (BUG 8216) mysql-test/r/group_by.result: New test case mysql-test/r/user_var.result: Test changed (to be more correct) with bug fix mysql-test/t/group_by.test: Added test for HAVING bug sql/item_cmpfunc.cc: Fixed bug in HAVING when refering to RAND() sql/item_func.cc: Fixed bug in HAVING when refering to RAND() sql/item_row.cc: Fixed bug in HAVING when refering to RAND() sql/item_strfunc.cc: Fixed bug in HAVING when refering to RAND() sql/unireg.h: Added PSEUDO_TABLES_BITS for easy testing of real table reference --- mysql-test/r/group_by.result | 45 ++++++++++++++++++++++++++++------- mysql-test/r/user_var.result | 4 ++-- mysql-test/t/group_by.test | 32 ++++++++++++++++++------- sql/item_cmpfunc.cc | 46 ++++++++++++++++++++++++++++++++++-- sql/item_func.cc | 8 +++++-- sql/item_row.cc | 11 ++++++--- sql/item_strfunc.cc | 7 ++++-- sql/unireg.h | 2 ++ 8 files changed, 126 insertions(+), 29 deletions(-) diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result index f92b3ea4f4d..17b1bb03d1d 100644 --- a/mysql-test/r/group_by.result +++ b/mysql-test/r/group_by.result @@ -629,15 +629,6 @@ explain SELECT i, COUNT(DISTINCT(i)) FROM t1 GROUP BY j ORDER BY NULL; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 6 Using filesort DROP TABLE t1; -create table t1 ( col1 int, col2 int ); -insert into t1 values (1,1),(1,2),(1,3),(2,1),(2,2); -select group_concat( distinct col1 ) as alias from t1 -group by col2 having alias like '%'; -alias -1,2 -1,2 -1 -drop table t1; create table t1 (a int); insert into t1 values(null); select min(a) is null from t1; @@ -650,3 +641,39 @@ select 1 and min(a) is null from t1; 1 and min(a) is null 1 drop table t1; +create table t1 ( col1 int, col2 int ); +insert into t1 values (1,1),(1,2),(1,3),(2,1),(2,2); +select group_concat( distinct col1 ) as alias from t1 +group by col2 having alias like '%'; +alias +1,2 +1,2 +1 +drop table t1; +create table t1 (a integer, b integer, c integer); +insert into t1 (a,b) values (1,2),(1,3),(2,5); +select a, 0.1*0+1 r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2=1; +a r2 r1 +1 1.0 2 +select a, rand()*0+1 r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2=1; +a r2 r1 +1 1 2 +select a,sum(b) from t1 where a=1 group by c; +a sum(b) +1 5 +select a*sum(b) from t1 where a=1 group by c; +a*sum(b) +5 +select sum(a)*sum(b) from t1 where a=1 group by c; +sum(a)*sum(b) +10 +select a,sum(b) from t1 where a=1 group by c having a=1; +a sum(b) +1 5 +select a as d,sum(b) from t1 where a=1 group by c having d=1; +d sum(b) +1 5 +select sum(a)*sum(b) as d from t1 where a=1 group by c having d > 0; +d +10 +drop table t1; diff --git a/mysql-test/r/user_var.result b/mysql-test/r/user_var.result index 81846391795..041d1b836b7 100644 --- a/mysql-test/r/user_var.result +++ b/mysql-test/r/user_var.result @@ -109,8 +109,8 @@ select @a:=0; select @a, @a:=@a+count(*), count(*), @a from t1 group by i; @a @a:=@a+count(*) count(*) @a 0 1 1 0 -0 2 2 0 -0 3 3 0 +0 3 2 0 +0 6 3 0 select @a:=0; @a:=0 0 diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test index c0447b06303..379f668df1a 100644 --- a/mysql-test/t/group_by.test +++ b/mysql-test/t/group_by.test @@ -457,15 +457,6 @@ SELECT i, COUNT(DISTINCT(i)) FROM t1 GROUP BY j ORDER BY NULL; explain SELECT i, COUNT(DISTINCT(i)) FROM t1 GROUP BY j ORDER BY NULL; DROP TABLE t1; -# Test for BUG#5400: GROUP_CONCAT returns everything twice. -create table t1 ( col1 int, col2 int ); -insert into t1 values (1,1),(1,2),(1,3),(2,1),(2,2); -select group_concat( distinct col1 ) as alias from t1 - group by col2 having alias like '%'; - -drop table t1; - - #Test for BUG#6976: Aggregate functions have incorrect NULL-ness create table t1 (a int); insert into t1 values(null); @@ -474,3 +465,26 @@ select min(a) is null or null from t1; select 1 and min(a) is null from t1; drop table t1; +# Test for BUG#5400: GROUP_CONCAT returns everything twice. +create table t1 ( col1 int, col2 int ); +insert into t1 values (1,1),(1,2),(1,3),(2,1),(2,2); +select group_concat( distinct col1 ) as alias from t1 + group by col2 having alias like '%'; + +drop table t1; + +# +# Test BUG#8216 when referring in HAVING to n alias which is rand() function +# + +create table t1 (a integer, b integer, c integer); +insert into t1 (a,b) values (1,2),(1,3),(2,5); +select a, 0.1*0+1 r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2=1; +select a, rand()*0+1 r2, sum(1) r1 from t1 where a = 1 group by a having r1>1 and r2=1; +select a,sum(b) from t1 where a=1 group by c; +select a*sum(b) from t1 where a=1 group by c; +select sum(a)*sum(b) from t1 where a=1 group by c; +select a,sum(b) from t1 where a=1 group by c having a=1; +select a as d,sum(b) from t1 where a=1 group by c having d=1; +select sum(a)*sum(b) as d from t1 where a=1 group by c having d > 0; +drop table t1; diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index c5e6d520ab7..213286878a8 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1960,6 +1960,36 @@ bool Item_cond::walk(Item_processor processor, byte *arg) return Item_func::walk(processor, arg); } + +/* + Move SUM items out from item tree and replace with reference + + SYNOPSIS + split_sum_func() + thd Thread handler + ref_pointer_array Pointer to array of reference fields + fields All fields in select + + NOTES + This function is run on all expression (SELECT list, WHERE, HAVING etc) + that have or refer (HAVING) to a SUM expression. + + The split is done to get an unique item for each SUM function + so that we can easily find and calculate them. + (Calculation done by update_sum_func() and copy_sum_funcs() in + sql_select.cc) + + All found SUM items are added FIRST in the fields list and + we replace the item with a reference. + + We also replace all functions without side effects (like RAND() or UDF's) + that uses columns as arguments. + For functions with side effects, we just remember any fields referred + by the function to ensure that we get a copy of the field value for the + first accepted row. This ensures that we can do things like + SELECT a*SUM(b) FROM t1 WHERE a=1 +*/ + void Item_cond::split_sum_func(THD *thd, Item **ref_pointer_array, List &fields) { @@ -1969,10 +1999,22 @@ void Item_cond::split_sum_func(THD *thd, Item **ref_pointer_array, const_item_cache=0; while ((item=li++)) { - if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) + /* with_sum_func is set for items that contains a SUM expression */ + if (item->type() != SUM_FUNC_ITEM && + (item->with_sum_func || + (item->used_tables() & PSEUDO_TABLE_BITS))) item->split_sum_func(thd, ref_pointer_array, fields); - else if (item->used_tables() || item->type() == SUM_FUNC_ITEM) + else if (item->type() == SUM_FUNC_ITEM || + (item->used_tables() && item->type() != REF_ITEM)) { + /* + Replace item with a reference so that we can easily calculate + it (in case of sum functions) or copy it (in case of fields) + + The test above is to ensure we don't do a reference for things + that are constants or are not yet calculated as in: + SELECT RAND() as r1, SUM(a) as r2 FROM t1 HAVING r1 > 1 AND r2 > 0 + */ Item **ref= li.ref(); uint el= fields.elements; ref_pointer_array[el]= item; diff --git a/sql/item_func.cc b/sql/item_func.cc index bff49541252..c67ddfa179e 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -351,6 +351,7 @@ bool Item_func::walk (Item_processor processor, byte *argument) return (this->*processor)(argument); } + void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array, List &fields) { @@ -358,9 +359,12 @@ void Item_func::split_sum_func(THD *thd, Item **ref_pointer_array, for (arg= args, arg_end= args+arg_count; arg != arg_end ; arg++) { Item *item=* arg; - if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) + if (item->type() != SUM_FUNC_ITEM && + (item->with_sum_func || + (item->used_tables() & PSEUDO_TABLE_BITS))) item->split_sum_func(thd, ref_pointer_array, fields); - else if (item->used_tables() || item->type() == SUM_FUNC_ITEM) + else if (item->type() == SUM_FUNC_ITEM || + (item->used_tables() && item->type() != REF_ITEM)) { uint el= fields.elements; ref_pointer_array[el]= item; diff --git a/sql/item_row.cc b/sql/item_row.cc index 4e4957b980e..0ace0fc0451 100644 --- a/sql/item_row.cc +++ b/sql/item_row.cc @@ -84,15 +84,20 @@ bool Item_row::fix_fields(THD *thd, TABLE_LIST *tabl, Item **ref) return 0; } + void Item_row::split_sum_func(THD *thd, Item **ref_pointer_array, List &fields) { Item **arg, **arg_end; for (arg= items, arg_end= items+arg_count; arg != arg_end ; arg++) { - if ((*arg)->with_sum_func && (*arg)->type() != SUM_FUNC_ITEM) - (*arg)->split_sum_func(thd, ref_pointer_array, fields); - else if ((*arg)->used_tables() || (*arg)->type() == SUM_FUNC_ITEM) + Item *item= *arg; + if (item->type() != SUM_FUNC_ITEM && + (item->with_sum_func || + (item->used_tables() & PSEUDO_TABLE_BITS))) + item->split_sum_func(thd, ref_pointer_array, fields); + else if (item->type() == SUM_FUNC_ITEM || + (item->used_tables() && item->type() != REF_ITEM)) { uint el= fields.elements; ref_pointer_array[el]=*arg; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 1fb68561374..b22b65eddd0 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1758,9 +1758,12 @@ String *Item_func_elt::val_str(String *str) void Item_func_make_set::split_sum_func(THD *thd, Item **ref_pointer_array, List &fields) { - if (item->with_sum_func && item->type() != SUM_FUNC_ITEM) + if (item->type() != SUM_FUNC_ITEM && + (item->with_sum_func || + (item->used_tables() & PSEUDO_TABLE_BITS))) item->split_sum_func(thd, ref_pointer_array, fields); - else if (item->used_tables() || item->type() == SUM_FUNC_ITEM) + else if (item->type() == SUM_FUNC_ITEM || + (item->used_tables() && item->type() != REF_ITEM)) { uint el= fields.elements; ref_pointer_array[el]=item; diff --git a/sql/unireg.h b/sql/unireg.h index 4ab2ba26b15..70df9a89c8f 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -72,6 +72,8 @@ #define PARAM_TABLE_BIT (((table_map) 1) << (sizeof(table_map)*8-3)) #define OUTER_REF_TABLE_BIT (((table_map) 1) << (sizeof(table_map)*8-2)) #define RAND_TABLE_BIT (((table_map) 1) << (sizeof(table_map)*8-1)) +#define PSEUDO_TABLE_BITS (PARAM_TABLE_BIT | OUTER_REF_TABLE_BIT | \ + RAND_TABLE_BIT) #define MAX_FIELDS 4096 /* Limit in the .frm file */ #define MAX_SORT_MEMORY (2048*1024-MALLOC_OVERHEAD) From ac630383f4c32565a50095115e088e6076ec5898 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 17:46:52 +0100 Subject: [PATCH 50/53] have test suite continue even if ndbcluster fails to start if --force flag is set --- mysql-test/include/have_ndb.inc | 1 + mysql-test/mysql-test-run.sh | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/mysql-test/include/have_ndb.inc b/mysql-test/include/have_ndb.inc index 84e60657876..9b85197abe8 100644 --- a/mysql-test/include/have_ndb.inc +++ b/mysql-test/include/have_ndb.inc @@ -1,3 +1,4 @@ +--exec test x$NDB_STATUS_OK = x1 -- require r/have_ndb.require disable_query_log; show variables like "have_ndbcluster"; diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index bc3a266208a..0c3d3dcdf0f 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -693,6 +693,8 @@ export NDB_MGM export NDB_BACKUP_DIR export NDB_TOOLS_OUTPUT export PURIFYOPTIONS +NDB_STATUS_OK=1 +export NDB_STATUS_OK MYSQL_TEST_ARGS="--no-defaults --socket=$MASTER_MYSOCK --database=$DB \ --user=$DBUSER --password=$DBPASSWD --silent -v --skip-safemalloc \ @@ -1055,7 +1057,15 @@ start_ndbcluster() else NDBCLUSTER_EXTRA_OPTS="--small" fi - ./ndb/ndbcluster $NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --initial || exit 1 + ./ndb/ndbcluster $NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --initial || NDB_STATUS_OK=0 + if [ x$NDB_STATUS_OK != x1 ] ; then + if [ x$FORCE != x1 ] ; then + exit 1 + fi + USE_NDBCLUSTER= + return + fi + NDB_CONNECTSTRING="host=localhost:$NDBCLUSTER_PORT" else NDB_CONNECTSTRING="$USE_RUNNING_NDBCLUSTER" From 41dffbb150a7f3080d66b394d2f244bd91ab8b90 Mon Sep 17 00:00:00 2001 From: unknown Date: Mon, 7 Feb 2005 17:59:46 +0100 Subject: [PATCH 51/53] mysql-test-run.sh: fixed start-and-exit flag mysql-test/mysql-test-run.sh: fixed start-and-exit flag --- mysql-test/mysql-test-run.sh | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 0c3d3dcdf0f..39d3f0492c2 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -1630,6 +1630,12 @@ run_testcase () fi fi fi + + if [ "x$START_AND_EXIT" = "x1" ] ; then + echo "Servers started, exiting" + exit + fi + cd $MYSQL_TEST_DIR if [ -f $tf ] ; then @@ -1767,11 +1773,6 @@ then mysql_loadstd fi -if [ "x$START_AND_EXIT" = "x1" ] ; then - echo "Servers started, exiting" - exit -fi - $ECHO "Starting Tests" # From 6a1e75621155f700bc6912f9669a5a374342b712 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 8 Feb 2005 06:11:05 +0100 Subject: [PATCH 52/53] ndbcluster now runs clean in 4.1, 5.0 may still need some work --- mysql-test/mysql-test-run.pl | 152 ++++++++++++++++++++++++++--------- 1 file changed, 112 insertions(+), 40 deletions(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 3dd6f5803d7..f204fee50ed 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -284,6 +284,11 @@ our $opt_warnings; our $opt_with_ndbcluster; our $opt_with_openssl; +our $exe_ndb_mgm; +our $path_ndb_tools_dir; +our $path_ndb_backup_dir; +our $file_ndb_testrun_log; +our $flag_ndb_status_ok= 1; ###################################################################### # @@ -297,6 +302,7 @@ sub command_line_setup (); sub executable_setup (); sub environment_setup (); sub kill_and_cleanup (); +sub ndbcluster_install (); sub ndbcluster_start (); sub ndbcluster_stop (); sub run_benchmarks ($); @@ -346,18 +352,12 @@ sub main () { kill_and_cleanup(); mysql_install_db(); - if ( $opt_with_ndbcluster and ! $glob_use_running_ndbcluster ) - { - ndbcluster_start(); # We start the cluster storage engine - } - # mysql_loadstd(); FIXME copying from "std_data" .frm and # .MGR but there are none?! } if ( $opt_start_and_exit ) { - # FIXME what about ndb? if ( mysqld_start('master',0,[],[]) ) { mtr_report("Servers started, exiting"); @@ -551,6 +551,8 @@ sub command_line_setup () { $master->[0]->{'path_myport'}= $opt_master_myport; $master->[0]->{'start_timeout'}= 400; # enough time create innodb tables + $master->[0]->{'ndbcluster'}= 1; # ndbcluster not started + $master->[1]->{'path_myddir'}= "$glob_mysql_test_dir/var/master1-data"; $master->[1]->{'path_myerr'}= "$glob_mysql_test_dir/var/log/master1.err"; $master->[1]->{'path_mylog'}= "$glob_mysql_test_dir/var/log/master1.log"; @@ -681,6 +683,10 @@ sub command_line_setup () { $glob_use_running_ndbcluster= 1; $opt_with_ndbcluster= 1; } + else + { + $opt_ndbconnectstring= "host=localhost:$opt_ndbcluster_port"; + } # FIXME @@ -786,6 +792,9 @@ sub executable_setup () { $exe_mysql_fix_system_tables= "$glob_basedir/scripts/mysql_fix_privilege_tables"; $path_language= "$glob_basedir/sql/share/english/"; $path_charsetsdir= "$glob_basedir/sql/share/charsets"; + + $path_ndb_tools_dir= "$glob_basedir/ndb/tools"; + $exe_ndb_mgm= "$glob_basedir/ndb/src/mgmclient/ndb_mgm"; } else { @@ -846,6 +855,9 @@ sub executable_setup () { $exe_mysqltest="$path_client_bindir/mysqltest"; $exe_mysql_client_test="$path_client_bindir/mysql_client_test"; } + + $path_ndb_tools_dir= "$glob_basedir/bin"; + $exe_ndb_mgm= "$glob_basedir/bin/ndb_mgm"; } # FIXME special $exe_master_mysqld and $exe_slave_mysqld @@ -860,6 +872,10 @@ sub executable_setup () { { $exe_slave_mysqld= $exe_mysqld; } + + $path_ndb_backup_dir= + "$glob_mysql_test_dir/var/ndbcluster-$opt_ndbcluster_port"; + $file_ndb_testrun_log= "$glob_mysql_test_dir/var/log/ndb_testrun.log"; } @@ -949,22 +965,19 @@ sub kill_and_cleanup () { mtr_report("Killing Possible Leftover Processes"); mkpath("$glob_mysql_test_dir/var/log"); # Needed for mysqladmin log mtr_kill_leftovers(); - } - if ( $opt_with_ndbcluster and ! $glob_use_running_ndbcluster ) - { ndbcluster_stop(); + $master->[0]->{'ndbcluster'}= 1; } mtr_report("Removing Stale Files"); rmtree("$glob_mysql_test_dir/var/log"); - rmtree("$glob_mysql_test_dir/var/ndbcluster"); + rmtree("$glob_mysql_test_dir/var/ndbcluster-$opt_ndbcluster_port"); rmtree("$glob_mysql_test_dir/var/run"); rmtree("$glob_mysql_test_dir/var/tmp"); mkpath("$glob_mysql_test_dir/var/log"); - mkpath("$glob_mysql_test_dir/var/ndbcluster"); mkpath("$glob_mysql_test_dir/var/run"); mkpath("$glob_mysql_test_dir/var/tmp"); mkpath($opt_tmpdir); @@ -1002,26 +1015,67 @@ sub kill_and_cleanup () { # FIXME why is there a different start below?! +sub ndbcluster_install () { + + if ( ! $opt_with_ndbcluster or $glob_use_running_ndbcluster ) + { + return 0; + } + mtr_report("Install ndbcluster"); + my $ndbcluster_opts= $opt_bench ? "" : "--small"; + my $ndbcluster_port_base= $opt_ndbcluster_port + 2; + if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", + ["--port=$opt_ndbcluster_port", + "--port-base=$ndbcluster_port_base", + "--data-dir=$glob_mysql_test_dir/var", + $ndbcluster_opts, + "--initial"], + "", "", "", "") ) + { + mtr_error("Error ndbcluster_install"); + return 1; + } + + ndbcluster_stop(); + $master->[0]->{'ndbcluster'}= 1; + + return 0; +} + sub ndbcluster_start () { - mtr_report("Starting ndbcluster"); - my $ndbcluster_opts= $opt_bench ? "" : "--small"; - # FIXME check result code?! - mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", - ["--port-base=$opt_ndbcluster_port", - $ndbcluster_opts, - "--diskless", - "--initial", - "--data-dir=$glob_mysql_test_dir/var"], - "", "", "", ""); + if ( ! $opt_with_ndbcluster or $glob_use_running_ndbcluster ) + { + return 0; + } + # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null + if ( mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", + ["--port=$opt_ndbcluster_port", + "--data-dir=$glob_mysql_test_dir/var"], + "", "/dev/null", "", "") ) + { + mtr_error("Error ndbcluster_install"); + return 1; + } + + return 0; } sub ndbcluster_stop () { + + if ( ! $opt_with_ndbcluster or $glob_use_running_ndbcluster ) + { + return; + } + my $ndbcluster_port_base= $opt_ndbcluster_port + 2; + # FIXME, we want to _append_ output to file $file_ndb_testrun_log instead of /dev/null mtr_run("$glob_mysql_test_dir/ndb/ndbcluster", - ["--data-dir=$glob_mysql_test_dir/var", - "--port-base=$opt_ndbcluster_port", + ["--port=$opt_ndbcluster_port", + "--data-dir=$glob_mysql_test_dir/var", "--stop"], - "", "", "", ""); + "", "/dev/null", "", ""); + + return; } @@ -1129,11 +1183,6 @@ sub run_suite () { stop_masters_slaves(); } - if ( $opt_with_ndbcluster and ! $glob_use_running_ndbcluster ) - { - ndbcluster_stop(); - } - if ( $opt_gcov ) { gcov_collect(); # collect coverage information @@ -1162,6 +1211,13 @@ sub mysql_install_db () { install_db('slave', $slave->[1]->{'path_myddir'}); install_db('slave', $slave->[2]->{'path_myddir'}); + if ( ndbcluster_install() ) + { + # failed to install, disable usage but flag that its no ok + $opt_with_ndbcluster= 0; + $flag_ndb_status_ok= 0; + } + return 0; } @@ -1224,6 +1280,9 @@ sub run_testcase ($) { mtr_tonewfile($opt_current_test,"$tname\n"); # Always tell where we are + # output current test to ndbcluster log file to enable diagnostics + mtr_tofile($file_ndb_testrun_log,"CURRENT TEST $tname\n"); + # ---------------------------------------------------------------------- # If marked to skip, just print out and return. # Note that a test case not marked as 'skip' can still be @@ -1297,6 +1356,15 @@ sub run_testcase ($) { if ( ! $opt_local_master ) { + if ( $master->[0]->{'ndbcluster'} ) + { + $master->[0]->{'ndbcluster'}= ndbcluster_start(); + if ( $master->[0]->{'ndbcluster'} ) + { + report_failure_and_restart($tinfo); + return; + } + } if ( ! $master->[0]->{'pid'} ) { $master->[0]->{'pid'}= @@ -1614,17 +1682,8 @@ sub mysqld_arguments ($$$$$) { if ( $opt_with_ndbcluster ) { mtr_add_arg($args, "%s--ndbcluster", $prefix); - - if ( $glob_use_running_ndbcluster ) - { - mtr_add_arg($args,"--ndb-connectstring=%s", $prefix, - $opt_ndbconnectstring); - } - else - { - mtr_add_arg($args,"--ndb-connectstring=host=localhost:%d", - $prefix, $opt_ndbcluster_port); - } + mtr_add_arg($args, "%s--ndb-connectstring=%s", $prefix, + $opt_ndbconnectstring); } # FIXME always set nowdays??? SMALL_SERVER @@ -1828,6 +1887,12 @@ sub stop_masters () { } } + if ( ! $master->[0]->{'ndbcluster'} ) + { + ndbcluster_stop(); + $master->[0]->{'ndbcluster'}= 1; + } + mtr_stop_mysqld_servers(\@args); } @@ -1903,6 +1968,13 @@ sub run_mysqltest ($$) { $ENV{'MYSQL_CLIENT_TEST'}= $cmdline_mysql_client_test; $ENV{'CHARSETSDIR'}= $path_charsetsdir; + $ENV{'NDB_STATUS_OK'}= $flag_ndb_status_ok; + $ENV{'NDB_MGM'}= $exe_ndb_mgm; + $ENV{'NDB_BACKUP_DIR'}= $path_ndb_backup_dir; + $ENV{'NDB_TOOLS_DIR'}= $path_ndb_tools_dir; + $ENV{'NDB_TOOLS_OUTPUT'}= $file_ndb_testrun_log; + $ENV{'NDB_CONNECTSTRING'}= $opt_ndbconnectstring; + my $exe= $exe_mysqltest; my $args; From 6cee60ea260384763fb54b6cf651f518726488d2 Mon Sep 17 00:00:00 2001 From: unknown Date: Tue, 8 Feb 2005 09:36:36 +0200 Subject: [PATCH 53/53] Relaxed locking in INSERT...SELECT, single table UPDATE...SELECT and single table DELETE...SELECT clauses when innobase_locks_unsafe_for_binlog is used and isolation level of the transaction is not serializable. InnoDB uses consistent read in these cases for a selected table. Backported from 5.0.x. sql/ha_innodb.cc: Relaxed locking in INSERT...SELECT, single table UPDATE...SELECT and single table DELETE...SELECT clauses when innobase_locks_unsafe_for_binlog is used and isolation level of the transaction is not serializable. InnoDB uses consistent read in these cases for a selected table. --- sql/ha_innodb.cc | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 1d75ce99aee..702139624ff 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -5282,8 +5282,27 @@ ha_innobase::store_lock( are not simple SELECTs; note that select_lock_type in this case may get strengthened in ::external_lock() to LOCK_X. */ - prebuilt->select_lock_type = LOCK_S; - prebuilt->stored_select_lock_type = LOCK_S; + if (srv_locks_unsafe_for_binlog && + prebuilt->trx->isolation_level != TRX_ISO_SERIALIZABLE && + (lock_type == TL_READ || lock_type == TL_READ_NO_INSERT) && + thd->lex->sql_command != SQLCOM_SELECT && + thd->lex->sql_command != SQLCOM_UPDATE_MULTI && + thd->lex->sql_command != SQLCOM_DELETE_MULTI ) { + + /* In case we have innobase_locks_unsafe_for_binlog + option set and isolation level of the transaction + is not set to serializable and MySQL is doing + INSERT INTO...SELECT without FOR UPDATE or IN + SHARE MODE we use consistent read for select. + Similarly, in case of DELETE...SELECT and + UPDATE...SELECT when these are not multi table.*/ + + prebuilt->select_lock_type = LOCK_NONE; + prebuilt->stored_select_lock_type = LOCK_NONE; + } else { + prebuilt->select_lock_type = LOCK_S; + prebuilt->stored_select_lock_type = LOCK_S; + } } else if (lock_type != TL_IGNORE) {