diff --git a/client/mysql.cc b/client/mysql.cc index 1faffb7046e..5e87a6d420a 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -44,7 +44,7 @@ #include #endif -const char *VER= "14.9"; +const char *VER= "14.10"; /* Don't try to make a nice table if the data is too big */ #define MAX_COLUMN_LENGTH 1024 @@ -237,7 +237,7 @@ static COMMANDS commands[] = { { "connect",'r', com_connect,1, "Reconnect to the server. Optional arguments are db and host." }, { "delimiter", 'd', com_delimiter, 1, - "Set query delimiter. " }, + "Set statement delimiter. NOTE: Takes the rest of the line as new delimiter." }, #ifdef USE_POPEN { "edit", 'e', com_edit, 0, "Edit command with $EDITOR."}, #endif diff --git a/config/ac-macros/misc.m4 b/config/ac-macros/misc.m4 index 186a069ac61..201702e5379 100644 --- a/config/ac-macros/misc.m4 +++ b/config/ac-macros/misc.m4 @@ -34,10 +34,10 @@ undefine([AC_CV_NAME])dnl AC_DEFUN([MYSQL_TYPE_ACCEPT], [ac_save_CXXFLAGS="$CXXFLAGS" AC_CACHE_CHECK([base type of last arg to accept], mysql_cv_btype_last_arg_accept, -AC_LANG_SAVE -AC_LANG_CPLUSPLUS +AC_LANG_PUSH(C++) if test "$ac_cv_prog_gxx" = "yes" then + # Add -Werror, remove -fbranch-probabilities (Bug #268) CXXFLAGS=`echo $CXXFLAGS -Werror | sed 's/-fbranch-probabilities//'` fi mysql_cv_btype_last_arg_accept=none @@ -64,7 +64,7 @@ fi if test "$mysql_cv_btype_last_arg_accept" = "none"; then mysql_cv_btype_last_arg_accept=int fi) -AC_LANG_RESTORE +AC_LANG_POP(C++) AC_DEFINE_UNQUOTED([SOCKET_SIZE_TYPE], [$mysql_cv_btype_last_arg_accept], [The base type of the last arg to accept]) CXXFLAGS="$ac_save_CXXFLAGS" @@ -90,6 +90,35 @@ then fi ]) +#---START: Figure out whether to use 'struct rlimit' or 'struct rlimit64' +AC_DEFUN([MYSQL_TYPE_STRUCT_RLIMIT], +[ac_save_CXXFLAGS="$CXXFLAGS" +AC_CACHE_CHECK([struct type to use with setrlimit], mysql_cv_btype_struct_rlimit, +AC_LANG_PUSH(C++) +if test "$ac_cv_prog_gxx" = "yes" +then + # Add -Werror, remove -fbranch-probabilities (Bug #268) + CXXFLAGS=`echo $CXXFLAGS -Werror | sed 's/-fbranch-probabilities//'` +fi +mysql_cv_btype_struct_rlimit=none +[AC_TRY_COMPILE([#if defined(inline) +#undef inline +#endif +#include +#include +], +[struct rlimit64 rl; setrlimit(RLIMIT_CORE, &rl);], +mysql_cv_btype_struct_rlimit="struct rlimit64")] +if test "$mysql_cv_btype_struct_rlimit" = "none"; then +mysql_cv_btype_struct_rlimit="struct rlimit" +fi) +AC_LANG_POP(C++) +AC_DEFINE_UNQUOTED([STRUCT_RLIMIT], [$mysql_cv_btype_struct_rlimit], + [The struct rlimit type to use with setrlimit]) +CXXFLAGS="$ac_save_CXXFLAGS" +]) +#---END: + AC_DEFUN([MYSQL_TIMESPEC_TS], [AC_CACHE_CHECK([if struct timespec has a ts_sec member], mysql_cv_timespec_ts, [AC_TRY_COMPILE([#include diff --git a/config/ac-macros/openssl.m4 b/config/ac-macros/openssl.m4 index 6541a492247..aa46dd45360 100644 --- a/config/ac-macros/openssl.m4 +++ b/config/ac-macros/openssl.m4 @@ -44,7 +44,7 @@ AC_DEFUN([MYSQL_FIND_OPENSSL], [ if test -z "$OPENSSL_LIB" -o -z "$OPENSSL_INCLUDE" ; then echo "Could not find an installation of OpenSSL" if test -n "$OPENSSL_LIB" ; then - if test "$IS_LINUX" = "true"; then + if test "$TARGET_LINUX" = "true"; then echo "Looks like you've forgotten to install OpenSSL development RPM" fi fi diff --git a/configure.in b/configure.in index f4ced66a06d..d71446aa91a 100644 --- a/configure.in +++ b/configure.in @@ -6,7 +6,7 @@ AC_PREREQ(2.50)dnl Minimum Autoconf version required. AC_INIT(sql/mysqld.cc) AC_CANONICAL_SYSTEM # Don't forget to also update the NDB lines below. -AM_INIT_AUTOMAKE(mysql, 5.0.5-beta) +AM_INIT_AUTOMAKE(mysql, 5.0.6-beta) AM_CONFIG_HEADER(config.h) PROTOCOL_VERSION=10 @@ -17,7 +17,7 @@ SHARED_LIB_VERSION=14:0:0 # ndb version NDB_VERSION_MAJOR=5 NDB_VERSION_MINOR=0 -NDB_VERSION_BUILD=4 +NDB_VERSION_BUILD=6 NDB_VERSION_STATUS="beta" # Set all version vars based on $VERSION. How do we do this more elegant ? @@ -383,15 +383,16 @@ AC_MSG_CHECKING("if we should use 'skip-locking' as default for $target_os") if expr "$target_os" : "[[Ll]]inux.*" > /dev/null then MYSQLD_DEFAULT_SWITCHES="--skip-locking" - IS_LINUX="true" - AC_MSG_RESULT("yes"); + TARGET_LINUX="true" + AC_MSG_RESULT("yes") + AC_DEFINE([TARGET_OS_LINUX], [1], [Whether we build for Linux]) else MYSQLD_DEFAULT_SWITCHES="" - IS_LINUX="false" - AC_MSG_RESULT("no"); + TARGET_LINUX="false" + AC_MSG_RESULT("no") fi AC_SUBST(MYSQLD_DEFAULT_SWITCHES) -AC_SUBST(IS_LINUX) +AC_SUBST(TARGET_LINUX) dnl Find paths to some shell programs AC_PATH_PROG(LN, ln, ln) @@ -580,7 +581,7 @@ AC_SUBST(NOINST_LDFLAGS) # (this is true on the MySQL build machines to avoid NSS problems) # -if test "$IS_LINUX" = "true" -a "$static_nss" = "" +if test "$TARGET_LINUX" = "true" -a "$static_nss" = "" then tmp=`nm /usr/lib/libc.a | grep _nss_files_getaliasent_r` if test -n "$tmp" @@ -747,7 +748,7 @@ AC_CHECK_FUNC(sem_init, , AC_CHECK_LIB(posix4, sem_init)) MYSQL_CHECK_ZLIB_WITH_COMPRESS # For large pages support -if test "$IS_LINUX" = "true" +if test "$TARGET_LINUX" = "true" then # For SHM_HUGETLB on Linux AC_CHECK_DECLS(SHM_HUGETLB, @@ -805,7 +806,7 @@ struct request_info *req; ]) AC_SUBST(WRAPLIBS) -if test "$IS_LINUX" = "true"; then +if test "$TARGET_LINUX" = "true"; then AC_MSG_CHECKING([for atomic operations]) atom_ops= @@ -849,7 +850,7 @@ int main() [ USE_PSTACK=no ]) pstack_libs= pstack_dirs= - if test "$USE_PSTACK" = yes -a "$IS_LINUX" = "true" -a "$BASE_MACHINE_TYPE" = "i386" + if test "$USE_PSTACK" = yes -a "$TARGET_LINUX" = "true" -a "$BASE_MACHINE_TYPE" = "i386" then have_libiberty= have_libbfd= my_save_LIBS="$LIBS" @@ -1231,61 +1232,93 @@ esac # We have to check libc last because else it fails on Solaris 2.6 with_posix_threads="no" -# Hack for DEC-UNIX (OSF1) +# Search thread lib on Linux if test "$with_named_thread" = "no" then - # Look for LinuxThreads. - AC_MSG_CHECKING("LinuxThreads") - res=`grep Linuxthreads /usr/include/pthread.h 2>/dev/null | wc -l` - if test "$res" -gt 0 + AC_MSG_CHECKING("Linux threads") + if test "$TARGET_LINUX" = "true" then - AC_MSG_RESULT("Found") - AC_DEFINE([HAVE_LINUXTHREADS], [1], - [Whether we are using Xavier Leroy's LinuxThreads]) - # Linux 2.0 sanity check - AC_TRY_COMPILE([#include ], [int a = sched_get_priority_min(1);], , - AC_MSG_ERROR([Syntax error in sched.h. Change _P to __P in the /usr/include/sched.h file. See the Installation chapter in the Reference Manual])) - # RedHat 5.0 does not work with dynamic linking of this. -static also - # gives a speed increase in linux so it does not hurt on other systems. - with_named_thread="-lpthread" - else - AC_MSG_RESULT("Not found") - # If this is a linux machine we should barf - if test "$IS_LINUX" = "true" - then - AC_MSG_ERROR([This is a linux system and Linuxthreads was not -found. On linux Linuxthreads should be used. Please install Linuxthreads -(or a new glibc) and try again. See the Installation chapter in the -Reference Manual for more information.]) - else - AC_MSG_CHECKING("DEC threads") - if test -f /usr/shlib/libpthread.so -a -f /usr/lib/libmach.a -a -f /usr/ccs/lib/cmplrs/cc/libexc.a - then - with_named_thread="-lpthread -lmach -lexc" - CFLAGS="$CFLAGS -D_REENTRANT" - CXXFLAGS="$CXXFLAGS -D_REENTRANT" - AC_DEFINE(HAVE_DEC_THREADS, [1], - [Whether we are using DEC threads]) - AC_MSG_RESULT("yes") - else - AC_MSG_RESULT("no") - AC_MSG_CHECKING("DEC 3.2 threads") - if test -f /usr/shlib/libpthreads.so -a -f /usr/lib/libmach.a -a -f /usr/ccs/lib/cmplrs/cc/libexc.a - then - with_named_thread="-lpthreads -lmach -lc_r" - AC_DEFINE(HAVE_DEC_THREADS, [1]) - AC_DEFINE([HAVE_DEC_3_2_THREADS], [1], - [Whether we are using OSF1 DEC threads on 3.2]) - with_osf32_threads="yes" - MYSQLD_DEFAULT_SWITCHES="--skip-thread-priority" - AC_MSG_RESULT("yes") - else - AC_MSG_RESULT("no") + AC_MSG_RESULT("starting") + # use getconf to check glibc contents + AC_MSG_CHECKING("getconf GNU_LIBPTHREAD_VERSION") + case `getconf GNU_LIBPTHREAD_VERSION | tr abcdefghijklmnopqrstuvwxyz ABCDEFGHIJKLMNOPQRSTUVWXYZ` in + NPTL* ) + AC_MSG_RESULT("NPTL") + AC_DEFINE([HAVE_NPTL], [1], [NPTL threads implementation]) + with_named_thread="-lpthread" + ;; + LINUXTHREADS* ) + AC_MSG_RESULT("Linuxthreads") + AC_DEFINE([HAVE_LINUXTHREADS], [1], + [Whether we are using Xavier Leroy's LinuxThreads]) + with_named_thread="-lpthread" + ;; + * ) + AC_MSG_RESULT("unknown") + ;; + esac + if test "$with_named_thread" = "no" + then + # old method, check headers + # Look for LinuxThreads. + AC_MSG_CHECKING("LinuxThreads in header file comment") + res=`grep Linuxthreads /usr/include/pthread.h 2>/dev/null | wc -l` + if test "$res" -gt 0 + then + AC_MSG_RESULT("Found") + AC_DEFINE([HAVE_LINUXTHREADS], [1], + [Whether we are using Xavier Leroy's LinuxThreads]) + # Linux 2.0 sanity check + AC_TRY_COMPILE([#include ], [int a = sched_get_priority_min(1);], , + AC_MSG_ERROR([Syntax error in sched.h. Change _P to __P in the /usr/include/sched.h file. See the Installation chapter in the Reference Manual])) + # RedHat 5.0 does not work with dynamic linking of this. -static also + # gives a speed increase in linux so it does not hurt on other systems. + with_named_thread="-lpthread" + else + AC_MSG_RESULT("Not found") + # If this is a linux machine we should barf + AC_MSG_ERROR([This is a Linux system without a working getconf, +and Linuxthreads was not found. Please install it (or a new glibc) and try again. +See the Installation chapter in the Reference Manual for more information.]) fi - fi - fi - fi -fi + else + AC_MSG_RESULT("no need to check headers") + fi + + AC_MSG_CHECKING("for pthread_create in -lpthread"); + ac_save_LIBS="$LIBS" + LIBS="$LIBS -lpthread" + AC_TRY_LINK( [#include ], + [ (void) pthread_create((pthread_t*) 0,(pthread_attr_t*) 0, 0, 0); ], + AC_MSG_RESULT("yes"), + [ AC_MSG_RESULT("no") + AC_MSG_ERROR([ +This is a Linux system claiming to support threads, either Linuxthreads or NPTL, but linking a test program failed. +Please install one of these (or a new glibc) and try again. +See the Installation chapter in the Reference Manual for more information.]) ] + ) + LIBS="$ac_save_LIBS" + else + AC_MSG_RESULT("no") + fi # "$TARGET_LINUX" +fi # "$with_named_thread" = "no" -a "$with_mit_threads" = "no" + + +# Hack for DEC-UNIX (OSF1 -> Tru64) +if test "$with_named_thread" = "no" -a "$with_mit_threads" = "no" +then + AC_MSG_CHECKING("DEC threads post OSF/1 3.2") + if test -f /usr/shlib/libpthread.so -a -f /usr/lib/libmach.a -a -f /usr/ccs/lib/cmplrs/cc/libexc.a + then + with_named_thread="-lpthread -lmach -lexc" + CFLAGS="$CFLAGS -D_REENTRANT" + CXXFLAGS="$CXXFLAGS -D_REENTRANT" + AC_DEFINE(HAVE_DEC_THREADS, [1], [Whether we are using DEC threads]) + AC_MSG_RESULT("yes") + else + AC_MSG_RESULT("no") + fi # DEC threads +fi # "$with_named_thread" = "no" -a "$with_mit_threads" = "no" dnl This is needed because -lsocket has to come after the thread @@ -1696,7 +1729,7 @@ fi AC_SUBST(COMPILATION_COMMENT) AC_MSG_CHECKING("need of special linking flags") -if test "$IS_LINUX" = "true" -a "$ac_cv_prog_gcc" = "yes" -a "$all_is_static" != "yes" +if test "$TARGET_LINUX" = "true" -a "$ac_cv_prog_gcc" = "yes" -a "$all_is_static" != "yes" then LDFLAGS="$LDFLAGS -rdynamic" AC_MSG_RESULT("-rdynamic") @@ -1754,6 +1787,8 @@ AC_C_BIGENDIAN MYSQL_TYPE_ACCEPT #---END: +# Figure out what type of struct rlimit to use with setrlimit +MYSQL_TYPE_STRUCT_RLIMIT # Find where the stack goes MYSQL_STACK_DIRECTION # We want to skip alloca on irix unconditionally. It may work on some version.. @@ -1914,7 +1949,7 @@ CFLAGS="$ORG_CFLAGS" # Sanity check: We chould not have any fseeko symbol unless # large_file_support=yes AC_CHECK_FUNC(fseeko, -[if test "$large_file_support" = no -a "$IS_LINUX" = "true"; +[if test "$large_file_support" = no -a "$TARGET_LINUX" = "true"; then AC_MSG_ERROR("Found fseeko symbol but large_file_support is not enabled!"); fi] diff --git a/heap/hp_hash.c b/heap/hp_hash.c index 3121ef71fb0..52a250bd7af 100644 --- a/heap/hp_hash.c +++ b/heap/hp_hash.c @@ -255,6 +255,9 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) if (*pos) /* Found null */ { nr^= (nr << 1) | 1; + /* Add key pack length (2) to key for VARCHAR segments */ + if (seg->type == HA_KEYTYPE_VARTEXT1) + key+= 2; continue; } pos++; @@ -390,6 +393,9 @@ ulong hp_hashnr(register HP_KEYDEF *keydef, register const byte *key) if (*pos) { nr^= (nr << 1) | 1; + /* Add key pack length (2) to key for VARCHAR segments */ + if (seg->type == HA_KEYTYPE_VARTEXT1) + key+= 2; continue; } pos++; @@ -584,7 +590,12 @@ int hp_key_cmp(HP_KEYDEF *keydef, const byte *rec, const byte *key) if (found_null != (int) *key++) return 1; if (found_null) + { + /* Add key pack length (2) to key for VARCHAR segments */ + if (seg->type == HA_KEYTYPE_VARTEXT1) + key+= 2; continue; + } } if (seg->type == HA_KEYTYPE_TEXT) { diff --git a/include/my_global.h b/include/my_global.h index 7ec04377864..74846fe1762 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -97,7 +97,7 @@ /* Fix problem with S_ISLNK() on Linux */ -#if defined(HAVE_LINUXTHREADS) +#if defined(TARGET_OS_LINUX) #undef _GNU_SOURCE #define _GNU_SOURCE 1 #endif @@ -107,6 +107,7 @@ #undef THREAD #undef HAVE_mit_thread #undef HAVE_LINUXTHREADS +#undef HAVE_NPTL #undef HAVE_UNIXWARE7_THREADS #endif @@ -246,13 +247,13 @@ C_MODE_START int __cxa_pure_virtual() {\ #endif /* In Linux-alpha we have atomic.h if we are using gcc */ -#if defined(HAVE_LINUXTHREADS) && defined(__GNUC__) && defined(__alpha__) && (__GNUC__ > 2 || ( __GNUC__ == 2 && __GNUC_MINOR__ >= 95)) && !defined(HAVE_ATOMIC_ADD) +#if defined(TARGET_OS_LINUX) && defined(__GNUC__) && defined(__alpha__) && (__GNUC__ > 2 || ( __GNUC__ == 2 && __GNUC_MINOR__ >= 95)) && !defined(HAVE_ATOMIC_ADD) #define HAVE_ATOMIC_ADD #define HAVE_ATOMIC_SUB #endif /* In Linux-ia64 including atomic.h will give us an error */ -#if (defined(HAVE_LINUXTHREADS) && defined(__GNUC__) && (defined(__ia64__)||defined(__powerpc64__))) || !defined(THREAD) +#if (defined(TARGET_OS_LINUX) && defined(__GNUC__) && (defined(__ia64__)||defined(__powerpc64__))) || !defined(THREAD) #undef HAVE_ATOMIC_ADD #undef HAVE_ATOMIC_SUB #endif @@ -799,7 +800,7 @@ typedef unsigned long uint32; /* Short for unsigned integer >= 32 bits */ #error "Neither int or long is of 4 bytes width" #endif -#if !defined(HAVE_ULONG) && !defined(HAVE_LINUXTHREADS) && !defined(__USE_MISC) +#if !defined(HAVE_ULONG) && !defined(TARGET_OS_LINUX) && !defined(__USE_MISC) typedef unsigned long ulong; /* Short for unsigned long */ #endif #ifndef longlong_defined diff --git a/innobase/dict/dict0dict.c b/innobase/dict/dict0dict.c index 8c9724da079..0c39defeada 100644 --- a/innobase/dict/dict0dict.c +++ b/innobase/dict/dict0dict.c @@ -199,7 +199,17 @@ FILE* dict_foreign_err_file = NULL; mutex_t dict_foreign_err_mutex; /* mutex protecting the foreign and unique error buffers */ - +/********************************************************************** +Makes all characters in a NUL-terminated UTF-8 string lower case. */ + +void +dict_casedn_str( +/*============*/ + char* a) /* in/out: string to put in lower case */ +{ + innobase_casedn_str(a); +} + /************************************************************************ Checks if the database name in two table names is the same. */ diff --git a/innobase/fil/fil0fil.c b/innobase/fil/fil0fil.c index e8efdcfbce0..8600f583dbd 100644 --- a/innobase/fil/fil0fil.c +++ b/innobase/fil/fil0fil.c @@ -25,6 +25,7 @@ Created 10/25/1995 Heikki Tuuri #include "srv0start.h" #include "mtr0mtr.h" #include "mtr0log.h" +#include "dict0dict.h" /* @@ -2743,7 +2744,15 @@ fil_load_single_table_tablespace( sprintf(filepath, "%s/%s/%s", fil_path_to_mysql_datadir, dbname, filename); srv_normalize_path_for_win(filepath); +#ifdef __WIN__ + /* If lower_case_table_names is 0 or 2, then MySQL allows database + directory names with upper case letters. On Windows, all table and + database names in InnoDB are internally always in lower case. Put the + file path to lower case, so that we are consistent with InnoDB's + internal data dictionary. */ + dict_casedn_str(filepath); +#endif file = os_file_create_simple_no_error_handling(filepath, OS_FILE_OPEN, OS_FILE_READ_ONLY, &success); if (!success) { diff --git a/innobase/include/dict0dict.h b/innobase/include/dict0dict.h index 56f0a158a20..d9cda402bac 100644 --- a/innobase/include/dict0dict.h +++ b/innobase/include/dict0dict.h @@ -26,6 +26,13 @@ Created 1/8/1996 Heikki Tuuri #include "ut0byte.h" #include "trx0types.h" +/********************************************************************** +Makes all characters in a NUL-terminated UTF-8 string lower case. */ + +void +dict_casedn_str( +/*============*/ + char* a); /* in/out: string to put in lower case */ /************************************************************************ Get the database name length in a table name. */ diff --git a/myisam/Makefile.am b/myisam/Makefile.am index e77e46cb7a3..c61647b25f7 100644 --- a/myisam/Makefile.am +++ b/myisam/Makefile.am @@ -88,7 +88,7 @@ SUFFIXES = .sh -e 's!@''FIND_PROC''@!@FIND_PROC@!' \ -e 's!@''MYSQLD_DEFAULT_SWITCHES''@!@MYSQLD_DEFAULT_SWITCHES@!' \ -e 's!@''MYSQL_UNIX_ADDR''@!@MYSQL_UNIX_ADDR@!' \ - -e 's!@''IS_LINUX''@!@IS_LINUX@!' \ + -e 's!@''TARGET_LINUX''@!@TARGET_LINUX@!' \ -e "s!@""CONF_COMMAND""@!@CONF_COMMAND@!" \ -e 's!@''MYSQLD_USER''@!@MYSQLD_USER@!' \ -e 's!@''sysconfdir''@!@sysconfdir@!' \ diff --git a/myisam/mi_key.c b/myisam/mi_key.c index ab5ddd3a378..d7d10e116aa 100644 --- a/myisam/mi_key.c +++ b/myisam/mi_key.c @@ -242,7 +242,10 @@ uint _mi_pack_key(register MI_INFO *info, uint keynr, uchar *key, uchar *old, { k_length-=length; if (keyseg->flag & (HA_VAR_LENGTH_PART | HA_BLOB_PART)) + { k_length-=2; /* Skip length */ + old+= 2; + } continue; /* Found NULL */ } } diff --git a/mysql-test/include/varchar.inc b/mysql-test/include/varchar.inc index 3ec9d00d05d..13b4315f2b8 100644 --- a/mysql-test/include/varchar.inc +++ b/mysql-test/include/varchar.inc @@ -226,3 +226,13 @@ create table t1 (v varchar(65530), key(v(10))); insert into t1 values(repeat('a',65530)); select length(v) from t1 where v=repeat('a',65530); drop table t1; + +# +# Bug #9489: problem with hash indexes +# + +create table t1(a int, b varchar(12), key ba(b, a)); +insert into t1 values (1, 'A'), (20, NULL); +explain select * from t1 where a=20 and b is null; +select * from t1 where a=20 and b is null; +drop table t1; diff --git a/mysql-test/r/bdb.result b/mysql-test/r/bdb.result index d743bc03675..876da3cb964 100644 --- a/mysql-test/r/bdb.result +++ b/mysql-test/r/bdb.result @@ -1858,6 +1858,15 @@ select length(v) from t1 where v=repeat('a',65530); length(v) 65530 drop table t1; +create table t1(a int, b varchar(12), key ba(b, a)); +insert into t1 values (1, 'A'), (20, NULL); +explain select * from t1 where a=20 and b is null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref ba ba 20 const,const 1 Using where +select * from t1 where a=20 and b is null; +a b +20 NULL +drop table t1; create table t1 (v varchar(65530), key(v)); Warnings: Warning 1071 Specified key was too long; max key length is 255 bytes diff --git a/mysql-test/r/blackhole.result b/mysql-test/r/blackhole.result index a3053075de5..66752b11655 100644 --- a/mysql-test/r/blackhole.result +++ b/mysql-test/r/blackhole.result @@ -83,4 +83,43 @@ Full-text indexes are called collections Only MyISAM tables support collections select * from t1 where MATCH(a,b) AGAINST ("only"); a b -drop table if exists t1,t2; +reset master; +drop table t1,t2; +create table t1 (a int) engine=blackhole; +delete from t1 where a=10; +update t1 set a=11 where a=15; +insert into t1 values(1); +insert ignore into t1 values(1); +replace into t1 values(100); +create table t2 (a varchar(200)) engine=blackhole; +load data infile '../../std_data/words.dat' into table t2; +alter table t1 add b int; +alter table t1 drop b; +create table t3 like t1; +insert into t1 select * from t3; +replace into t1 select * from t3; +select * from t1; +a +select * from t2; +a +select * from t3; +a +show binlog events; +Log_name Pos Event_type Server_id Orig_log_pos Info +master-bin.000001 # Start 1 # Server ver: VERSION, Binlog ver: 3 +master-bin.000001 # Query 1 # use `test`; drop table t1,t2 +master-bin.000001 # Query 1 # use `test`; create table t1 (a int) engine=blackhole +master-bin.000001 # Query 1 # use `test`; delete from t1 where a=10 +master-bin.000001 # Query 1 # use `test`; update t1 set a=11 where a=15 +master-bin.000001 # Query 1 # use `test`; insert into t1 values(1) +master-bin.000001 # Query 1 # use `test`; insert ignore into t1 values(1) +master-bin.000001 # Query 1 # use `test`; replace into t1 values(100) +master-bin.000001 # Query 1 # use `test`; create table t2 (a varchar(200)) engine=blackhole +master-bin.000001 # Create_file 1 # db=test;table=t2;file_id=1;block_len=581 +master-bin.000001 # Exec_load 1 # ;file_id=1 +master-bin.000001 # Query 1 # use `test`; alter table t1 add b int +master-bin.000001 # Query 1 # use `test`; alter table t1 drop b +master-bin.000001 # Query 1 # use `test`; create table t3 like t1 +master-bin.000001 # Query 1 # use `test`; insert into t1 select * from t3 +master-bin.000001 # Query 1 # use `test`; replace into t1 select * from t3 +drop table t1,t2,t3; diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result index ccd8601ace0..d6cfefff9e6 100644 --- a/mysql-test/r/innodb.result +++ b/mysql-test/r/innodb.result @@ -2359,6 +2359,15 @@ select length(v) from t1 where v=repeat('a',65530); length(v) 65530 drop table t1; +create table t1(a int, b varchar(12), key ba(b, a)); +insert into t1 values (1, 'A'), (20, NULL); +explain select * from t1 where a=20 and b is null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref ba ba 20 const,const 1 Using where; Using index +select * from t1 where a=20 and b is null; +a b +20 NULL +drop table t1; create table t1 (v varchar(65530), key(v)); ERROR HY000: Can't create table './test/t1' (errno: 139) create table t1 (v varchar(65536)); diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index 539df03e6f9..1a8ace98d05 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -1157,6 +1157,15 @@ select length(v) from t1 where v=repeat('a',65530); length(v) 65530 drop table t1; +create table t1(a int, b varchar(12), key ba(b, a)); +insert into t1 values (1, 'A'), (20, NULL); +explain select * from t1 where a=20 and b is null; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref ba ba 20 const,const 1 Using where; Using index +select * from t1 where a=20 and b is null; +a b +20 NULL +drop table t1; create table t1 (v varchar(65530), key(v)); Warnings: Warning 1071 Specified key was too long; max key length is 1000 bytes diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index 130bda3e5bb..fdbdd26b00c 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -599,3 +599,35 @@ UNLOCK TABLES; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; DROP TABLE t1; +CREATE TABLE t1 (a char(10)); +INSERT INTO t1 VALUES ('\''); + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +DROP TABLE IF EXISTS `t1`; +CREATE TABLE `t1` ( + `a` char(10) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1; + + +/*!40000 ALTER TABLE `t1` DISABLE KEYS */; +LOCK TABLES `t1` WRITE; +INSERT INTO `t1` VALUES ('\''); +UNLOCK TABLES; +/*!40000 ALTER TABLE `t1` ENABLE KEYS */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +DROP TABLE t1; diff --git a/mysql-test/r/type_varchar.result b/mysql-test/r/type_varchar.result index d2fe843a68b..3bd7fe6b175 100644 --- a/mysql-test/r/type_varchar.result +++ b/mysql-test/r/type_varchar.result @@ -1,4 +1,4 @@ -drop table if exists t1; +drop table if exists t1, t2; create table t1 (v varchar(30), c char(3), e enum('abc','def','ghi'), t text); truncate table vchar; show create table t1; @@ -383,3 +383,12 @@ select * from t1; pkcol othercol test somethingelse drop table t1; +create table t1 (a int, b varchar(12)); +insert into t1 values (1, 'A'), (22, NULL); +create table t2 (a int); +insert into t2 values (22), (22); +select t1.a, t1.b, min(t1.b) from t1 inner join t2 ON t2.a = t1.a +group by t1.b, t1.a; +a b min(t1.b) +22 NULL NULL +drop table t1, t2; diff --git a/mysql-test/t/blackhole.test b/mysql-test/t/blackhole.test index 052574d6921..d1fcfc971a9 100644 --- a/mysql-test/t/blackhole.test +++ b/mysql-test/t/blackhole.test @@ -96,4 +96,32 @@ select * from t1 where MATCH(a,b) AGAINST ("indexes"); select * from t1 where MATCH(a,b) AGAINST ("indexes collections"); select * from t1 where MATCH(a,b) AGAINST ("only"); -drop table if exists t1,t2; +# Test that every DML (except SELECT) and DDL gets into binlog +# so that blackhole can be used as "binlog propagator" + +reset master; +drop table t1,t2; +create table t1 (a int) engine=blackhole; +delete from t1 where a=10; +update t1 set a=11 where a=15; +insert into t1 values(1); +insert ignore into t1 values(1); +replace into t1 values(100); +create table t2 (a varchar(200)) engine=blackhole; +load data infile '../../std_data/words.dat' into table t2; +alter table t1 add b int; +alter table t1 drop b; +create table t3 like t1; +insert into t1 select * from t3; +replace into t1 select * from t3; +# Just to verify +select * from t1; +select * from t2; +select * from t3; + +let $VERSION=`select version()`; +--replace_result $VERSION VERSION +--replace_column 2 # 5 # +show binlog events; + +drop table t1,t2,t3; diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index ca883f5c4d2..949c62ef288 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -196,3 +196,11 @@ INSERT INTO `t1` VALUES (0x602010000280100005E71A); --exec $MYSQL_DUMP --skip-extended-insert --hex-blob test --skip-comments t1 DROP TABLE t1; +# +# Bug #9756 +# + +CREATE TABLE t1 (a char(10)); +INSERT INTO t1 VALUES ('\''); +--exec $MYSQL_DUMP --skip-comments test t1 +DROP TABLE t1; diff --git a/mysql-test/t/type_varchar.test b/mysql-test/t/type_varchar.test index 9867cf4c057..2bffca6b889 100644 --- a/mysql-test/t/type_varchar.test +++ b/mysql-test/t/type_varchar.test @@ -1,5 +1,5 @@ --disable_warnings -drop table if exists t1; +drop table if exists t1, t2; --enable_warnings create table t1 (v varchar(30), c char(3), e enum('abc','def','ghi'), t text); @@ -106,3 +106,15 @@ insert into t1 values ('test', 'something'); update t1 set othercol='somethingelse' where pkcol='test'; select * from t1; drop table t1; + +# +# Bug #9489: problems with key handling +# + +create table t1 (a int, b varchar(12)); +insert into t1 values (1, 'A'), (22, NULL); +create table t2 (a int); +insert into t2 values (22), (22); +select t1.a, t1.b, min(t1.b) from t1 inner join t2 ON t2.a = t1.a + group by t1.b, t1.a; +drop table t1, t2; diff --git a/mysys/charset.c b/mysys/charset.c index a4a8205a3f9..534a6aa998e 100644 --- a/mysys/charset.c +++ b/mysys/charset.c @@ -581,15 +581,15 @@ ulong escape_string_for_mysql(CHARSET_INFO *charset_info, { char escape=0; #ifdef USE_MB - int l; - if (use_mb_flag && (l= my_ismbchar(charset_info, from, end))) + int tmp_length; + if (use_mb_flag && (tmp_length= my_ismbchar(charset_info, from, end))) { - if (to + l >= to_end) + if (to + tmp_length > to_end) { overflow=1; break; } - while (l--) + while (tmp_length--) *to++= *from++; from--; continue; @@ -605,7 +605,7 @@ ulong escape_string_for_mysql(CHARSET_INFO *charset_info, multi-byte character into a valid one. For example, 0xbf27 is not a valid GBK character, but 0xbf5c is. (0x27 = ', 0x5c = \) */ - if (use_mb_flag && (l= my_mbcharlen(charset_info, *from)) > 1) + if (use_mb_flag && (tmp_length= my_mbcharlen(charset_info, *from)) > 1) escape= *from; else #endif @@ -634,7 +634,7 @@ ulong escape_string_for_mysql(CHARSET_INFO *charset_info, } if (escape) { - if (to + 2 >= to_end) + if (to + 2 > to_end) { overflow=1; break; @@ -644,7 +644,7 @@ ulong escape_string_for_mysql(CHARSET_INFO *charset_info, } else { - if (to + 1 >= to_end) + if (to + 1 > to_end) { overflow=1; break; diff --git a/mysys/thr_mutex.c b/mysys/thr_mutex.c index bbcfaa8bba6..2facb4e18cf 100644 --- a/mysys/thr_mutex.c +++ b/mysys/thr_mutex.c @@ -17,7 +17,7 @@ /* This makes a wrapper for mutex handling to make it easier to debug mutex */ #include -#if defined(HAVE_LINUXTHREADS) && !defined (__USE_UNIX98) +#if defined(TARGET_OS_LINUX) && !defined (__USE_UNIX98) #define __USE_UNIX98 /* To get rw locks under Linux */ #endif #if defined(THREAD) && defined(SAFE_MUTEX) diff --git a/ndb/include/kernel/signaldata/BackupImpl.hpp b/ndb/include/kernel/signaldata/BackupImpl.hpp index 2ac91570aad..2032e2347b5 100644 --- a/ndb/include/kernel/signaldata/BackupImpl.hpp +++ b/ndb/include/kernel/signaldata/BackupImpl.hpp @@ -75,7 +75,7 @@ class DefineBackupRef { friend bool printDEFINE_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 3 ); + STATIC_CONST( SignalLength = 4 ); enum ErrorCode { Undefined = 1340, @@ -92,6 +92,7 @@ private: Uint32 backupId; Uint32 backupPtr; Uint32 errorCode; + Uint32 nodeId; }; class DefineBackupConf { @@ -158,7 +159,7 @@ class StartBackupRef { friend bool printSTART_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 4 ); + STATIC_CONST( SignalLength = 5 ); enum ErrorCode { FailedToAllocateTriggerRecord = 1 @@ -168,6 +169,7 @@ private: Uint32 backupPtr; Uint32 signalNo; Uint32 errorCode; + Uint32 nodeId; }; class StartBackupConf { @@ -232,9 +234,8 @@ public: private: Uint32 backupId; Uint32 backupPtr; - Uint32 tableId; - Uint32 fragmentNo; Uint32 errorCode; + Uint32 nodeId; }; class BackupFragmentConf { @@ -296,12 +297,13 @@ class StopBackupRef { friend bool printSTOP_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 3 ); + STATIC_CONST( SignalLength = 4 ); private: Uint32 backupId; Uint32 backupPtr; Uint32 errorCode; + Uint32 nodeId; }; class StopBackupConf { diff --git a/ndb/include/kernel/signaldata/BackupSignalData.hpp b/ndb/include/kernel/signaldata/BackupSignalData.hpp index fb018026a49..b38dd8d14b2 100644 --- a/ndb/include/kernel/signaldata/BackupSignalData.hpp +++ b/ndb/include/kernel/signaldata/BackupSignalData.hpp @@ -240,6 +240,9 @@ public: FileOrScanError = 1325, // slave -> coordinator BackupFailureDueToNodeFail = 1326, // slave -> slave OkToClean = 1327 // master -> slave + + ,AbortScan = 1328 + ,IncompatibleVersions = 1329 }; private: Uint32 requestType; diff --git a/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/ndb/src/common/debugger/signaldata/BackupImpl.cpp index bdc34d614cf..e9b0188d93b 100644 --- a/ndb/src/common/debugger/signaldata/BackupImpl.cpp +++ b/ndb/src/common/debugger/signaldata/BackupImpl.cpp @@ -90,10 +90,8 @@ printBACKUP_FRAGMENT_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ bool printBACKUP_FRAGMENT_REF(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ BackupFragmentRef* sig = (BackupFragmentRef*)data; - fprintf(out, " backupPtr: %d backupId: %d\n", - sig->backupPtr, sig->backupId); - fprintf(out, " tableId: %d fragmentNo: %d errorCode: %d\n", - sig->tableId, sig->fragmentNo, sig->errorCode); + fprintf(out, " backupPtr: %d backupId: %d nodeId: %d errorCode: %d\n", + sig->backupPtr, sig->backupId, sig->nodeId, sig->errorCode); return true; } diff --git a/ndb/src/kernel/blocks/backup/Backup.cpp b/ndb/src/kernel/blocks/backup/Backup.cpp index 840466460cb..fdff2702bec 100644 --- a/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/ndb/src/kernel/blocks/backup/Backup.cpp @@ -67,31 +67,6 @@ static const Uint32 BACKUP_SEQUENCE = 0x1F000000; //#define DEBUG_ABORT -//--------------------------------------------------------- -// Ignore this since a completed abort could have preceded -// this message. -//--------------------------------------------------------- -#define slaveAbortCheck() \ -if ((ptr.p->backupId != backupId) || \ - (ptr.p->slaveState.getState() == ABORTING)) { \ - jam(); \ - return; \ -} - -#define masterAbortCheck() \ -if ((ptr.p->backupId != backupId) || \ - (ptr.p->masterData.state.getState() == ABORTING)) { \ - jam(); \ - return; \ -} - -#define defineSlaveAbortCheck() \ - if (ptr.p->slaveState.getState() == ABORTING) { \ - jam(); \ - closeFiles(signal, ptr); \ - return; \ - } - static Uint32 g_TypeOfStart = NodeState::ST_ILLEGAL_TYPE; void @@ -221,12 +196,7 @@ Backup::execCONTINUEB(Signal* signal) jam(); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, Tdata1); - - if (ptr.p->slaveState.getState() == ABORTING) { - jam(); - closeFiles(signal, ptr); - return; - }//if + BackupFilePtr filePtr; ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); FsBuffer & buf = filePtr.p->operation.dataBuffer; @@ -324,13 +294,7 @@ Backup::execDUMP_STATE_ORD(Signal* signal) for(c_backups.first(ptr); ptr.i != RNIL; c_backups.next(ptr)){ infoEvent("BackupRecord %d: BackupId: %d MasterRef: %x ClientRef: %x", ptr.i, ptr.p->backupId, ptr.p->masterRef, ptr.p->clientRef); - if(ptr.p->masterRef == reference()){ - infoEvent(" MasterState: %d State: %d", - ptr.p->masterData.state.getState(), - ptr.p->slaveState.getState()); - } else { - infoEvent(" State: %d", ptr.p->slaveState.getState()); - } + infoEvent(" State: %d", ptr.p->slaveState.getState()); BackupFilePtr filePtr; for(ptr.p->files.first(filePtr); filePtr.i != RNIL; ptr.p->files.next(filePtr)){ @@ -338,7 +302,7 @@ Backup::execDUMP_STATE_ORD(Signal* signal) infoEvent(" file %d: type: %d open: %d running: %d done: %d scan: %d", filePtr.i, filePtr.p->fileType, filePtr.p->fileOpened, filePtr.p->fileRunning, - filePtr.p->fileDone, filePtr.p->scanRunning); + filePtr.p->fileClosing, filePtr.p->scanRunning); } } } @@ -356,6 +320,17 @@ Backup::execDUMP_STATE_ORD(Signal* signal) infoEvent("PagePool: %d", c_pagePool.getSize()); + + if(signal->getLength() == 2 && signal->theData[1] == 2424) + { + ndbrequire(c_tablePool.getSize() == c_tablePool.getNoOfFree()); + ndbrequire(c_attributePool.getSize() == c_attributePool.getNoOfFree()); + ndbrequire(c_backupPool.getSize() == c_backupPool.getNoOfFree()); + ndbrequire(c_backupFilePool.getSize() == c_backupFilePool.getNoOfFree()); + ndbrequire(c_pagePool.getSize() == c_pagePool.getNoOfFree()); + ndbrequire(c_fragmentPool.getSize() == c_fragmentPool.getNoOfFree()); + ndbrequire(c_triggerPool.getSize() == c_triggerPool.getNoOfFree()); + } } } @@ -511,27 +486,6 @@ const char* triggerNameFormat[] = { "NDB$BACKUP_%d_%d_DELETE" }; -const Backup::State -Backup::validMasterTransitions[] = { - INITIAL, DEFINING, - DEFINING, DEFINED, - DEFINED, STARTED, - STARTED, SCANNING, - SCANNING, STOPPING, - STOPPING, INITIAL, - - DEFINING, ABORTING, - DEFINED, ABORTING, - STARTED, ABORTING, - SCANNING, ABORTING, - STOPPING, ABORTING, - ABORTING, ABORTING, - - DEFINING, INITIAL, - ABORTING, INITIAL, - INITIAL, INITIAL -}; - const Backup::State Backup::validSlaveTransitions[] = { INITIAL, DEFINING, @@ -561,10 +515,6 @@ const Uint32 Backup::validSlaveTransitionsCount = sizeof(Backup::validSlaveTransitions) / sizeof(Backup::State); -const Uint32 -Backup::validMasterTransitionsCount = -sizeof(Backup::validMasterTransitions) / sizeof(Backup::State); - void Backup::CompoundState::setState(State newState){ bool found = false; @@ -578,7 +528,8 @@ Backup::CompoundState::setState(State newState){ break; } } - ndbrequire(found); + + //ndbrequire(found); if (newState == INITIAL) abortState = INITIAL; @@ -647,8 +598,7 @@ Backup::execNODE_FAILREP(Signal* signal) Uint32 theFailedNodes[NodeBitmask::Size]; for (Uint32 i = 0; i < NodeBitmask::Size; i++) theFailedNodes[i] = rep->theNodes[i]; - -// NodeId old_master_node_id = getMasterNodeId(); + c_masterNodeId = new_master_node_id; NodePtr nodePtr; @@ -686,15 +636,24 @@ Backup::execNODE_FAILREP(Signal* signal) } bool -Backup::verifyNodesAlive(const NdbNodeBitmask& aNodeBitMask) +Backup::verifyNodesAlive(BackupRecordPtr ptr, + const NdbNodeBitmask& aNodeBitMask) { + Uint32 version = getNodeInfo(getOwnNodeId()).m_version; for (Uint32 i = 0; i < MAX_NDB_NODES; i++) { jam(); if(aNodeBitMask.get(i)) { if(!c_aliveNodes.get(i)){ jam(); + ptr.p->setErrorCode(AbortBackupOrd::BackupFailureDueToNodeFail); return false; }//if + if(getNodeInfo(i).m_version != version) + { + jam(); + ptr.p->setErrorCode(AbortBackupOrd::IncompatibleVersions); + return false; + } }//if }//for return true; @@ -706,9 +665,9 @@ Backup::checkNodeFail(Signal* signal, NodeId newCoord, Uint32 theFailedNodes[NodeBitmask::Size]) { - ndbrequire( ptr.p->nodes.get(newCoord)); /* just to make sure newCoord - * is part of the backup - */ + NdbNodeBitmask mask; + mask.assign(2, theFailedNodes); + /* Update ptr.p->nodes to be up to date with current alive nodes */ NodePtr nodePtr; @@ -730,26 +689,42 @@ Backup::checkNodeFail(Signal* signal, return; // failed node is not part of backup process, safe to continue } - bool doMasterTakeover = false; - if(NodeBitmask::get(theFailedNodes, refToNode(ptr.p->masterRef))){ - jam(); - doMasterTakeover = true; - }; - - if (newCoord == getOwnNodeId()){ - jam(); - if (doMasterTakeover) { - /** - * I'm new master - */ - CRASH_INSERTION((10002)); -#ifdef DEBUG_ABORT - ndbout_c("**** Master Takeover: Node failed: Master id = %u", - refToNode(ptr.p->masterRef)); -#endif - masterTakeOver(signal, ptr); + if(mask.get(refToNode(ptr.p->masterRef))) + { + /** + * Master died...abort + */ + ptr.p->masterRef = reference(); + ptr.p->nodes.clear(); + ptr.p->nodes.set(getOwnNodeId()); + ptr.p->setErrorCode(AbortBackupOrd::BackupFailureDueToNodeFail); + switch(ptr.p->m_gsn){ + case GSN_DEFINE_BACKUP_REQ: + case GSN_START_BACKUP_REQ: + case GSN_BACKUP_FRAGMENT_REQ: + case GSN_STOP_BACKUP_REQ: + // I'm currently processing...reply to self and abort... + ptr.p->masterData.gsn = ptr.p->m_gsn; + ptr.p->masterData.sendCounter = ptr.p->nodes; return; - }//if + case GSN_DEFINE_BACKUP_REF: + case GSN_DEFINE_BACKUP_CONF: + case GSN_START_BACKUP_REF: + case GSN_START_BACKUP_CONF: + case GSN_BACKUP_FRAGMENT_REF: + case GSN_BACKUP_FRAGMENT_CONF: + case GSN_STOP_BACKUP_REF: + case GSN_STOP_BACKUP_CONF: + ptr.p->masterData.gsn = GSN_DEFINE_BACKUP_REQ; + masterAbort(signal, ptr); + return; + case GSN_ABORT_BACKUP_ORD: + // Already aborting + return; + } + } + else if (newCoord == getOwnNodeId()) + { /** * I'm master for this backup */ @@ -759,61 +734,81 @@ Backup::checkNodeFail(Signal* signal, ndbout_c("**** Master: Node failed: Master id = %u", refToNode(ptr.p->masterRef)); #endif - masterAbort(signal, ptr, false); + + Uint32 gsn, len, pos; + ptr.p->nodes.bitANDC(mask); + switch(ptr.p->masterData.gsn){ + case GSN_DEFINE_BACKUP_REQ: + { + DefineBackupRef * ref = (DefineBackupRef*)signal->getDataPtr(); + ref->backupPtr = ptr.i; + ref->backupId = ptr.p->backupId; + ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; + gsn= GSN_DEFINE_BACKUP_REF; + len= DefineBackupRef::SignalLength; + pos= &ref->nodeId - signal->getDataPtr(); + break; + } + case GSN_START_BACKUP_REQ: + { + StartBackupRef * ref = (StartBackupRef*)signal->getDataPtr(); + ref->backupPtr = ptr.i; + ref->backupId = ptr.p->backupId; + ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; + ref->signalNo = ptr.p->masterData.startBackup.signalNo; + gsn= GSN_START_BACKUP_REF; + len= StartBackupRef::SignalLength; + pos= &ref->nodeId - signal->getDataPtr(); + break; + } + case GSN_BACKUP_FRAGMENT_REQ: + { + BackupFragmentRef * ref = (BackupFragmentRef*)signal->getDataPtr(); + ref->backupPtr = ptr.i; + ref->backupId = ptr.p->backupId; + ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; + gsn= GSN_BACKUP_FRAGMENT_REF; + len= BackupFragmentRef::SignalLength; + pos= &ref->nodeId - signal->getDataPtr(); + break; + } + case GSN_STOP_BACKUP_REQ: + { + StopBackupRef * ref = (StopBackupRef*)signal->getDataPtr(); + ref->backupPtr = ptr.i; + ref->backupId = ptr.p->backupId; + ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; + gsn= GSN_STOP_BACKUP_REF; + len= StopBackupRef::SignalLength; + pos= &ref->nodeId - signal->getDataPtr(); + break; + } + case GSN_CREATE_TRIG_REQ: + case GSN_ALTER_TRIG_REQ: + case GSN_WAIT_GCP_REQ: + case GSN_UTIL_SEQUENCE_REQ: + case GSN_UTIL_LOCK_REQ: + case GSN_DROP_TRIG_REQ: + return; + } + + for(Uint32 i = 0; (i = mask.find(i+1)) != NdbNodeBitmask::NotFound; ) + { + signal->theData[pos] = i; + sendSignal(reference(), gsn, signal, len, JBB); +#ifdef DEBUG_ABORT + ndbout_c("sending %d to self from %d", gsn, i); +#endif + } return; }//if - - /** - * If there's a new master, (it's not me) - * but remember who it is - */ - ptr.p->masterRef = calcBackupBlockRef(newCoord); -#ifdef DEBUG_ABORT - ndbout_c("**** Slave: Node failed: Master id = %u", - refToNode(ptr.p->masterRef)); -#endif + /** * I abort myself as slave if not master */ CRASH_INSERTION((10021)); - // slaveAbort(signal, ptr); } -void -Backup::masterTakeOver(Signal* signal, BackupRecordPtr ptr) -{ - ptr.p->masterRef = reference(); - ptr.p->masterData.gsn = MAX_GSN + 1; - - switch(ptr.p->slaveState.getState()){ - case INITIAL: - jam(); - ptr.p->masterData.state.forceState(INITIAL); - break; - case ABORTING: - jam(); - case DEFINING: - jam(); - case DEFINED: - jam(); - case STARTED: - jam(); - case SCANNING: - jam(); - ptr.p->masterData.state.forceState(STARTED); - break; - case STOPPING: - jam(); - case CLEANING: - jam(); - ptr.p->masterData.state.forceState(STOPPING); - break; - default: - ndbrequire(false); - } - masterAbort(signal, ptr, false); -} - void Backup::execINCL_NODEREQ(Signal* signal) { @@ -895,8 +890,8 @@ Backup::execBACKUP_REQ(Signal* signal) ndbrequire(ptr.p->pages.empty()); ndbrequire(ptr.p->tables.isEmpty()); - ptr.p->masterData.state.forceState(INITIAL); - ptr.p->masterData.state.setState(DEFINING); + ptr.p->m_gsn = 0; + ptr.p->errorCode = 0; ptr.p->clientRef = senderRef; ptr.p->clientData = senderData; ptr.p->masterRef = reference(); @@ -905,6 +900,7 @@ Backup::execBACKUP_REQ(Signal* signal) ptr.p->backupKey[0] = 0; ptr.p->backupKey[1] = 0; ptr.p->backupDataLen = 0; + ptr.p->masterData.errorCode = 0; ptr.p->masterData.dropTrig.tableId = RNIL; ptr.p->masterData.alterTrig.tableId = RNIL; @@ -928,7 +924,6 @@ Backup::execUTIL_SEQUENCE_REF(Signal* signal) ndbrequire(ptr.i == RNIL); c_backupPool.getPtr(ptr); ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ); - ptr.p->masterData.gsn = 0; sendBackupRef(signal, ptr, BackupRef::SequenceFailure); }//execUTIL_SEQUENCE_REF() @@ -938,8 +933,7 @@ Backup::sendBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errorCode) { jam(); sendBackupRef(ptr.p->clientRef, signal, ptr.p->clientData, errorCode); - // ptr.p->masterData.state.setState(INITIAL); - cleanupSlaveResources(ptr); + cleanup(signal, ptr); } void @@ -968,7 +962,8 @@ Backup::execUTIL_SEQUENCE_CONF(Signal* signal) UtilSequenceConf * conf = (UtilSequenceConf*)signal->getDataPtr(); - if(conf->requestType == UtilSequenceReq::Create) { + if(conf->requestType == UtilSequenceReq::Create) + { jam(); sendSTTORRY(signal); // At startup in NDB return; @@ -979,18 +974,20 @@ Backup::execUTIL_SEQUENCE_CONF(Signal* signal) c_backupPool.getPtr(ptr); ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ); - ptr.p->masterData.gsn = 0; - if (ptr.p->masterData.state.getState() == ABORTING) { + + if (ptr.p->checkError()) + { jam(); sendBackupRef(signal, ptr, ptr.p->errorCode); return; }//if - if (ERROR_INSERTED(10023)) { - ptr.p->masterData.state.setState(ABORTING); + + if (ERROR_INSERTED(10023)) + { sendBackupRef(signal, ptr, 323); return; }//if - ndbrequire(ptr.p->masterData.state.getState() == DEFINING); + { Uint64 backupId; @@ -1018,7 +1015,6 @@ Backup::defineBackupMutex_locked(Signal* signal, Uint32 ptrI, Uint32 retVal){ c_backupPool.getPtr(ptr); ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ); - ptr.p->masterData.gsn = 0; ptr.p->masterData.gsn = GSN_UTIL_LOCK_REQ; Mutex mutex(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex); @@ -1040,14 +1036,13 @@ Backup::dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal) c_backupPool.getPtr(ptr); ndbrequire(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ); - ptr.p->masterData.gsn = 0; if (ERROR_INSERTED(10031)) { - ptr.p->masterData.state.setState(ABORTING); ptr.p->setErrorCode(331); }//if - if (ptr.p->masterData.state.getState() == ABORTING) { + if (ptr.p->checkError()) + { jam(); /** @@ -1062,13 +1057,11 @@ Backup::dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal) Mutex mutex2(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex); jam(); mutex2.unlock(); // ignore response - + sendBackupRef(signal, ptr, ptr.p->errorCode); return; }//if - ndbrequire(ptr.p->masterData.state.getState() == DEFINING); - sendDefineBackupReq(signal, ptr); } @@ -1078,33 +1071,6 @@ Backup::dictCommitTableMutex_locked(Signal* signal, Uint32 ptrI,Uint32 retVal) * *****************************************************************************/ -void -Backup::sendSignalAllWait(BackupRecordPtr ptr, Uint32 gsn, Signal *signal, - Uint32 signalLength, bool executeDirect) -{ - jam(); - ptr.p->masterData.gsn = gsn; - ptr.p->masterData.sendCounter.clearWaitingFor(); - NodePtr node; - for(c_nodes.first(node); node.i != RNIL; c_nodes.next(node)){ - jam(); - const Uint32 nodeId = node.p->nodeId; - if(node.p->alive && ptr.p->nodes.get(nodeId)){ - jam(); - - ptr.p->masterData.sendCounter.setWaitingFor(nodeId); - - const BlockReference ref = numberToRef(BACKUP, nodeId); - if (!executeDirect || ref != reference()) { - sendSignal(ref, gsn, signal, signalLength, JBB); - }//if - }//if - }//for - if (executeDirect) { - EXECUTE_DIRECT(BACKUP, gsn, signal, signalLength); - } -} - bool Backup::haveAllSignals(BackupRecordPtr ptr, Uint32 gsn, Uint32 nodeId) { @@ -1114,10 +1080,6 @@ Backup::haveAllSignals(BackupRecordPtr ptr, Uint32 gsn, Uint32 nodeId) ndbrequire(ptr.p->masterData.sendCounter.isWaitingFor(nodeId)); ptr.p->masterData.sendCounter.clearWaitingFor(nodeId); - - if (ptr.p->masterData.sendCounter.done()) - ptr.p->masterData.gsn = 0; - return ptr.p->masterData.sendCounter.done(); } @@ -1138,11 +1100,12 @@ Backup::sendDefineBackupReq(Signal *signal, BackupRecordPtr ptr) req->nodes = ptr.p->nodes; req->backupDataLen = ptr.p->backupDataLen; - ptr.p->masterData.errorCode = 0; - ptr.p->okToCleanMaster = false; // master must wait with cleaning to last - sendSignalAllWait(ptr, GSN_DEFINE_BACKUP_REQ, signal, - DefineBackupReq::SignalLength, - true /* do execute direct on oneself */); + ptr.p->masterData.gsn = GSN_DEFINE_BACKUP_REQ; + ptr.p->masterData.sendCounter = ptr.p->nodes; + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + sendSignal(rg, GSN_DEFINE_BACKUP_REQ, signal, + DefineBackupReq::SignalLength, JBB); + /** * Now send backup data */ @@ -1167,17 +1130,15 @@ Backup::execDEFINE_BACKUP_REF(Signal* signal) jamEntry(); DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtr(); - + const Uint32 ptrI = ref->backupPtr; - const Uint32 backupId = ref->backupId; - const Uint32 nodeId = refToNode(signal->senderBlockRef()); - + //const Uint32 backupId = ref->backupId; + const Uint32 nodeId = ref->nodeId; + BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - - masterAbortCheck(); // macro will do return if ABORTING - ptr.p->masterData.errorCode = ref->errorCode; + ptr.p->setErrorCode(ref->errorCode); defineBackupReply(signal, ptr, nodeId); } @@ -1188,17 +1149,16 @@ Backup::execDEFINE_BACKUP_CONF(Signal* signal) DefineBackupConf* conf = (DefineBackupConf*)signal->getDataPtr(); const Uint32 ptrI = conf->backupPtr; - const Uint32 backupId = conf->backupId; + //const Uint32 backupId = conf->backupId; const Uint32 nodeId = refToNode(signal->senderBlockRef()); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING - - if (ERROR_INSERTED(10024)) { - ptr.p->masterData.errorCode = 324; - }//if + if (ERROR_INSERTED(10024)) + { + ptr.p->setErrorCode(324); + } defineBackupReply(signal, ptr, nodeId); } @@ -1210,6 +1170,7 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) jam(); return; } + /** * Unlock mutexes */ @@ -1223,16 +1184,10 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) jam(); mutex2.unlock(); // ignore response - if(ptr.p->errorCode) { + if(ptr.p->checkError()) + { jam(); - ptr.p->masterData.errorCode = ptr.p->errorCode; - } - - if(ptr.p->masterData.errorCode){ - jam(); - ptr.p->setErrorCode(ptr.p->masterData.errorCode); - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::OkToClean); - masterSendAbortBackup(signal, ptr); + masterAbort(signal, ptr); return; } @@ -1252,7 +1207,6 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3); sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB); - ptr.p->masterData.state.setState(DEFINED); /** * Prepare Trig */ @@ -1286,7 +1240,6 @@ Backup::sendCreateTrig(Signal* signal, { CreateTrigReq * req =(CreateTrigReq *)signal->getDataPtrSend(); - ptr.p->errorCode = 0; ptr.p->masterData.gsn = GSN_CREATE_TRIG_REQ; ptr.p->masterData.sendCounter = 3; ptr.p->masterData.createTrig.tableId = tabPtr.p->tableId; @@ -1395,17 +1348,14 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr) return; }//if - ptr.p->masterData.gsn = 0; + if (ERROR_INSERTED(10025)) + { + ptr.p->errorCode = 325; + } if(ptr.p->checkError()) { jam(); - masterAbort(signal, ptr, true); - return; - }//if - - if (ERROR_INSERTED(10025)) { - ptr.p->errorCode = 325; - masterAbort(signal, ptr, true); + masterAbort(signal, ptr); return; }//if @@ -1425,10 +1375,7 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr) /** * Finished with all tables, send StartBackupReq */ - ptr.p->masterData.state.setState(STARTED); - ptr.p->tables.first(tabPtr); - ptr.p->errorCode = 0; ptr.p->masterData.startBackup.signalNo = 0; ptr.p->masterData.startBackup.noOfSignals = (ptr.p->tables.noOfElements() + StartBackupReq::MaxTableTriggers - 1) / @@ -1467,9 +1414,12 @@ Backup::sendStartBackup(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr) }//for req->noOfTableTriggers = i; - sendSignalAllWait(ptr, GSN_START_BACKUP_REQ, signal, - StartBackupReq::HeaderLength + - (i * StartBackupReq::TableTriggerLength)); + ptr.p->masterData.gsn = GSN_START_BACKUP_REQ; + ptr.p->masterData.sendCounter = ptr.p->nodes; + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + sendSignal(rg, GSN_START_BACKUP_REQ, signal, + StartBackupReq::HeaderLength + + (i * StartBackupReq::TableTriggerLength), JBB); } void @@ -1479,15 +1429,13 @@ Backup::execSTART_BACKUP_REF(Signal* signal) StartBackupRef* ref = (StartBackupRef*)signal->getDataPtr(); const Uint32 ptrI = ref->backupPtr; - const Uint32 backupId = ref->backupId; + //const Uint32 backupId = ref->backupId; const Uint32 signalNo = ref->signalNo; - const Uint32 nodeId = refToNode(signal->senderBlockRef()); + const Uint32 nodeId = ref->nodeId; BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING - ptr.p->setErrorCode(ref->errorCode); startBackupReply(signal, ptr, nodeId, signalNo); } @@ -1499,15 +1447,13 @@ Backup::execSTART_BACKUP_CONF(Signal* signal) StartBackupConf* conf = (StartBackupConf*)signal->getDataPtr(); const Uint32 ptrI = conf->backupPtr; - const Uint32 backupId = conf->backupId; + //const Uint32 backupId = conf->backupId; const Uint32 signalNo = conf->signalNo; const Uint32 nodeId = refToNode(signal->senderBlockRef()); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING - startBackupReply(signal, ptr, nodeId, signalNo); } @@ -1524,17 +1470,16 @@ Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr, return; } + if (ERROR_INSERTED(10026)) + { + ptr.p->errorCode = 326; + } + if(ptr.p->checkError()){ jam(); - masterAbort(signal, ptr, true); + masterAbort(signal, ptr); return; } - - if (ERROR_INSERTED(10026)) { - ptr.p->errorCode = 326; - masterAbort(signal, ptr, true); - return; - }//if TablePtr tabPtr; c_tablePool.getPtr(tabPtr, ptr.p->masterData.startBackup.tablePtr); @@ -1566,7 +1511,6 @@ Backup::sendAlterTrig(Signal* signal, BackupRecordPtr ptr) { AlterTrigReq * req =(AlterTrigReq *)signal->getDataPtrSend(); - ptr.p->errorCode = 0; ptr.p->masterData.gsn = GSN_ALTER_TRIG_REQ; ptr.p->masterData.sendCounter = 0; @@ -1608,6 +1552,7 @@ Backup::sendAlterTrig(Signal* signal, BackupRecordPtr ptr) return; }//if ptr.p->masterData.alterTrig.tableId = RNIL; + /** * Finished with all tables */ @@ -1669,11 +1614,9 @@ Backup::alterTrigReply(Signal* signal, BackupRecordPtr ptr) return; }//if - ptr.p->masterData.gsn = 0; - if(ptr.p->checkError()){ jam(); - masterAbort(signal, ptr, true); + masterAbort(signal, ptr); return; }//if @@ -1719,11 +1662,10 @@ Backup::execWAIT_GCP_CONF(Signal* signal){ ndbrequire(ptr.p->masterRef == reference()); ndbrequire(ptr.p->masterData.gsn == GSN_WAIT_GCP_REQ); - ptr.p->masterData.gsn = 0; if(ptr.p->checkError()) { jam(); - masterAbort(signal, ptr, true); + masterAbort(signal, ptr); return; }//if @@ -1731,13 +1673,13 @@ Backup::execWAIT_GCP_CONF(Signal* signal){ jam(); CRASH_INSERTION((10008)); ptr.p->startGCP = gcp; - ptr.p->masterData.state.setState(SCANNING); + ptr.p->masterData.sendCounter= 0; + ptr.p->masterData.gsn = GSN_BACKUP_FRAGMENT_REQ; nextFragment(signal, ptr); } else { jam(); CRASH_INSERTION((10009)); ptr.p->stopGCP = gcp; - ptr.p->masterData.state.setState(STOPPING); sendDropTrig(signal, ptr); // regular dropping of triggers }//if } @@ -1787,6 +1729,7 @@ Backup::nextFragment(Signal* signal, BackupRecordPtr ptr) req->fragmentNo = i; req->count = 0; + ptr.p->masterData.sendCounter++; const BlockReference ref = numberToRef(BACKUP, nodeId); sendSignal(ref, GSN_BACKUP_FRAGMENT_REQ, signal, BackupFragmentReq::SignalLength, JBB); @@ -1824,7 +1767,7 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) BackupFragmentConf * conf = (BackupFragmentConf*)signal->getDataPtr(); const Uint32 ptrI = conf->backupPtr; - const Uint32 backupId = conf->backupId; + //const Uint32 backupId = conf->backupId; const Uint32 tableId = conf->tableId; const Uint32 fragmentNo = conf->fragmentNo; const Uint32 nodeId = refToNode(signal->senderBlockRef()); @@ -1834,10 +1777,9 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING - ptr.p->noOfBytes += noOfBytes; ptr.p->noOfRecords += noOfRecords; + ptr.p->masterData.sendCounter--; TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); @@ -1852,17 +1794,24 @@ Backup::execBACKUP_FRAGMENT_CONF(Signal* signal) fragPtr.p->scanned = 1; fragPtr.p->scanning = 0; - if(ptr.p->checkError()) { - jam(); - masterAbort(signal, ptr, true); - return; - }//if - if (ERROR_INSERTED(10028)) { + if (ERROR_INSERTED(10028)) + { ptr.p->errorCode = 328; - masterAbort(signal, ptr, true); - return; - }//if - nextFragment(signal, ptr); + } + + if(ptr.p->checkError()) + { + if(ptr.p->masterData.sendCounter.done()) + { + jam(); + masterAbort(signal, ptr); + return; + }//if + } + else + { + nextFragment(signal, ptr); + } } void @@ -1874,15 +1823,52 @@ Backup::execBACKUP_FRAGMENT_REF(Signal* signal) BackupFragmentRef * ref = (BackupFragmentRef*)signal->getDataPtr(); const Uint32 ptrI = ref->backupPtr; - const Uint32 backupId = ref->backupId; + //const Uint32 backupId = ref->backupId; + const Uint32 nodeId = ref->nodeId; BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING + TablePtr tabPtr; + ptr.p->tables.first(tabPtr); + for(; tabPtr.i != RNIL; ptr.p->tables.next(tabPtr)) { + jam(); + FragmentPtr fragPtr; + Array & frags = tabPtr.p->fragments; + const Uint32 fragCount = frags.getSize(); + + for(Uint32 i = 0; ifragments.getPtr(fragPtr, i); + if(fragPtr.p->scanning != 0 && nodeId == fragPtr.p->node) + { + jam(); + ndbrequire(fragPtr.p->scanned == 0); + fragPtr.p->scanned = 1; + fragPtr.p->scanning = 0; + goto done; + } + } + } + ndbrequire(false); +done: + ptr.p->masterData.sendCounter--; ptr.p->setErrorCode(ref->errorCode); - masterAbort(signal, ptr, true); + + if(ptr.p->masterData.sendCounter.done()) + { + jam(); + masterAbort(signal, ptr); + return; + }//if + + AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); + ord->backupId = ptr.p->backupId; + ord->backupPtr = ptr.i; + ord->requestType = AbortBackupOrd::LogBufferFull; + ord->senderData= ptr.i; + execABORT_BACKUP_ORD(signal); } /***************************************************************************** @@ -1910,15 +1896,7 @@ Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr) jam(); ptr.p->masterData.dropTrig.tableId = RNIL; - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::OkToClean); - - if(ptr.p->masterData.state.getState() == STOPPING) { - jam(); - sendStopBackup(signal, ptr); - return; - }//if - ndbrequire(ptr.p->masterData.state.getState() == ABORTING); - masterSendAbortBackup(signal, ptr); + sendStopBackup(signal, ptr); }//if } @@ -2010,7 +1988,6 @@ Backup::dropTrigReply(Signal* signal, BackupRecordPtr ptr) return; }//if - ptr.p->masterData.gsn = 0; sendDropTrig(signal, ptr); // recursive next } @@ -2023,14 +2000,23 @@ void Backup::execSTOP_BACKUP_REF(Signal* signal) { jamEntry(); - ndbrequire(0); + + StopBackupRef* ref = (StopBackupRef*)signal->getDataPtr(); + const Uint32 ptrI = ref->backupPtr; + //const Uint32 backupId = ref->backupId; + const Uint32 nodeId = ref->nodeId; + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, ptrI); + + ptr.p->setErrorCode(ref->errorCode); + stopBackupReply(signal, ptr, nodeId); } void Backup::sendStopBackup(Signal* signal, BackupRecordPtr ptr) { jam(); - ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ; StopBackupReq* stop = (StopBackupReq*)signal->getDataPtrSend(); stop->backupPtr = ptr.i; @@ -2038,8 +2024,11 @@ Backup::sendStopBackup(Signal* signal, BackupRecordPtr ptr) stop->startGCP = ptr.p->startGCP; stop->stopGCP = ptr.p->stopGCP; - sendSignalAllWait(ptr, GSN_STOP_BACKUP_REQ, signal, - StopBackupReq::SignalLength); + ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ; + ptr.p->masterData.sendCounter = ptr.p->nodes; + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + sendSignal(rg, GSN_STOP_BACKUP_REQ, signal, + StopBackupReq::SignalLength, JBB); } void @@ -2049,14 +2038,12 @@ Backup::execSTOP_BACKUP_CONF(Signal* signal) StopBackupConf* conf = (StopBackupConf*)signal->getDataPtr(); const Uint32 ptrI = conf->backupPtr; - const Uint32 backupId = conf->backupId; + //const Uint32 backupId = conf->backupId; const Uint32 nodeId = refToNode(signal->senderBlockRef()); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - masterAbortCheck(); // macro will do return if ABORTING - ptr.p->noOfLogBytes += conf->noOfLogBytes; ptr.p->noOfLogRecords += conf->noOfLogRecords; @@ -2073,35 +2060,39 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) return; } - // ptr.p->masterData.state.setState(INITIAL); - - // send backup complete first to slaves so that they know sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupComplete); - - BackupCompleteRep * rep = (BackupCompleteRep*)signal->getDataPtrSend(); - rep->backupId = ptr.p->backupId; - rep->senderData = ptr.p->clientData; - rep->startGCP = ptr.p->startGCP; - rep->stopGCP = ptr.p->stopGCP; - rep->noOfBytes = ptr.p->noOfBytes; - rep->noOfRecords = ptr.p->noOfRecords; - rep->noOfLogBytes = ptr.p->noOfLogBytes; - rep->noOfLogRecords = ptr.p->noOfLogRecords; - rep->nodes = ptr.p->nodes; - sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal, - BackupCompleteRep::SignalLength, JBB); - - signal->theData[0] = NDB_LE_BackupCompleted; - signal->theData[1] = ptr.p->clientRef; - signal->theData[2] = ptr.p->backupId; - signal->theData[3] = ptr.p->startGCP; - signal->theData[4] = ptr.p->stopGCP; - signal->theData[5] = ptr.p->noOfBytes; - signal->theData[6] = ptr.p->noOfRecords; - signal->theData[7] = ptr.p->noOfLogBytes; - signal->theData[8] = ptr.p->noOfLogRecords; - ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB); + + if(!ptr.p->checkError()) + { + BackupCompleteRep * rep = (BackupCompleteRep*)signal->getDataPtrSend(); + rep->backupId = ptr.p->backupId; + rep->senderData = ptr.p->clientData; + rep->startGCP = ptr.p->startGCP; + rep->stopGCP = ptr.p->stopGCP; + rep->noOfBytes = ptr.p->noOfBytes; + rep->noOfRecords = ptr.p->noOfRecords; + rep->noOfLogBytes = ptr.p->noOfLogBytes; + rep->noOfLogRecords = ptr.p->noOfLogRecords; + rep->nodes = ptr.p->nodes; + sendSignal(ptr.p->clientRef, GSN_BACKUP_COMPLETE_REP, signal, + BackupCompleteRep::SignalLength, JBB); + + signal->theData[0] = NDB_LE_BackupCompleted; + signal->theData[1] = ptr.p->clientRef; + signal->theData[2] = ptr.p->backupId; + signal->theData[3] = ptr.p->startGCP; + signal->theData[4] = ptr.p->stopGCP; + signal->theData[5] = ptr.p->noOfBytes; + signal->theData[6] = ptr.p->noOfRecords; + signal->theData[7] = ptr.p->noOfLogBytes; + signal->theData[8] = ptr.p->noOfLogRecords; + ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+9); + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 9+NdbNodeBitmask::Size, JBB); + } + else + { + masterAbort(signal, ptr); + } } /***************************************************************************** @@ -2110,199 +2101,96 @@ Backup::stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) * *****************************************************************************/ void -Backup::masterAbort(Signal* signal, BackupRecordPtr ptr, bool controlledAbort) +Backup::masterAbort(Signal* signal, BackupRecordPtr ptr) { - if(ptr.p->masterData.state.getState() == ABORTING) { -#ifdef DEBUG_ABORT - ndbout_c("---- Master already aborting"); -#endif - jam(); - return; - } jam(); #ifdef DEBUG_ABORT ndbout_c("************ masterAbort"); #endif - - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupFailure); - if (!ptr.p->checkError()) - ptr.p->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; - - const State s = ptr.p->masterData.state.getState(); - - ptr.p->masterData.state.setState(ABORTING); - - ndbrequire(s == INITIAL || - s == STARTED || - s == DEFINING || - s == DEFINED || - s == SCANNING || - s == STOPPING || - s == ABORTING); - if(ptr.p->masterData.gsn == GSN_UTIL_SEQUENCE_REQ) { + if(ptr.p->masterData.errorCode != 0) + { jam(); - DEBUG_OUT("masterAbort: gsn = GSN_UTIL_SEQUENCE_REQ"); - //------------------------------------------------------- - // We are waiting for UTIL_SEQUENCE response. We rely on - // this to arrive and check for ABORTING in response. - // No slaves are involved at this point and ABORT simply - // results in BACKUP_REF to client - //------------------------------------------------------- - /** - * Waiting for Sequence Id - * @see execUTIL_SEQUENCE_CONF - */ return; - }//if + } + + BackupAbortRep* rep = (BackupAbortRep*)signal->getDataPtrSend(); + rep->backupId = ptr.p->backupId; + rep->senderData = ptr.p->clientData; + rep->reason = ptr.p->errorCode; + sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal, + BackupAbortRep::SignalLength, JBB); - if(ptr.p->masterData.gsn == GSN_UTIL_LOCK_REQ) { - jam(); - DEBUG_OUT("masterAbort: gsn = GSN_UTIL_LOCK_REQ"); - //------------------------------------------------------- - // We are waiting for UTIL_LOCK response (mutex). We rely on - // this to arrive and check for ABORTING in response. - // No slaves are involved at this point and ABORT simply - // results in BACKUP_REF to client - //------------------------------------------------------- - /** - * Waiting for lock - * @see execUTIL_LOCK_CONF - */ + signal->theData[0] = NDB_LE_BackupAborted; + signal->theData[1] = ptr.p->clientRef; + signal->theData[2] = ptr.p->backupId; + signal->theData[3] = ptr.p->errorCode; + sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); + + ndbrequire(ptr.p->errorCode); + ptr.p->masterData.errorCode = ptr.p->errorCode; + + AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); + ord->backupId = ptr.p->backupId; + ord->backupPtr = ptr.i; + ord->senderData= ptr.i; + NodeReceiverGroup rg(BACKUP, ptr.p->nodes); + + switch(ptr.p->masterData.gsn){ + case GSN_DEFINE_BACKUP_REQ: + ord->requestType = AbortBackupOrd::BackupFailure; + sendSignal(rg, GSN_ABORT_BACKUP_ORD, signal, + AbortBackupOrd::SignalLength, JBB); return; - }//if - - /** - * Unlock mutexes only at master - */ - jam(); - Mutex mutex1(signal, c_mutexMgr, ptr.p->masterData.m_dictCommitTableMutex); - jam(); - mutex1.unlock(); // ignore response - - jam(); - Mutex mutex2(signal, c_mutexMgr, ptr.p->masterData.m_defineBackupMutex); - jam(); - mutex2.unlock(); // ignore response - - if (!controlledAbort) { + case GSN_CREATE_TRIG_REQ: + case GSN_START_BACKUP_REQ: + case GSN_ALTER_TRIG_REQ: + case GSN_WAIT_GCP_REQ: + case GSN_BACKUP_FRAGMENT_REQ: jam(); - if (s == DEFINING) { - jam(); -//------------------------------------------------------- -// If we are in the defining phase all work is done by -// slaves. No triggers have been allocated thus slaves -// may free all "Master" resources, let them know... -//------------------------------------------------------- - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::OkToClean); - return; - }//if - if (s == DEFINED) { - jam(); -//------------------------------------------------------- -// DEFINED is the state when triggers are created. We rely -// on that DICT will report create trigger failure in case -// of node failure. Thus no special action is needed here. -// We will check for errorCode != 0 when receiving -// replies on create trigger. -//------------------------------------------------------- - return; - }//if - if(ptr.p->masterData.gsn == GSN_WAIT_GCP_REQ) { - jam(); - DEBUG_OUT("masterAbort: gsn = GSN_WAIT_GCP_REQ"); -//------------------------------------------------------- -// We are waiting for WAIT_GCP response. We rely on -// this to arrive and check for ABORTING in response. -//------------------------------------------------------- - - /** - * Waiting for GCP - * @see execWAIT_GCP_CONF - */ - return; - }//if - - if(ptr.p->masterData.gsn == GSN_ALTER_TRIG_REQ) { - jam(); - DEBUG_OUT("masterAbort: gsn = GSN_ALTER_TRIG_REQ"); -//------------------------------------------------------- -// We are waiting for ALTER_TRIG response. We rely on -// this to arrive and check for ABORTING in response. -//------------------------------------------------------- - - /** - * All triggers haven't been created yet - */ - return; - }//if - - if(ptr.p->masterData.gsn == GSN_DROP_TRIG_REQ) { - jam(); - DEBUG_OUT("masterAbort: gsn = GSN_DROP_TRIG_REQ"); -//------------------------------------------------------- -// We are waiting for DROP_TRIG response. We rely on -// this to arrive and will continue dropping triggers -// until completed. -//------------------------------------------------------- - - /** - * I'm currently dropping the trigger - */ - return; - }//if - }//if - -//------------------------------------------------------- -// If we are waiting for START_BACKUP responses we can -// safely start dropping triggers (state == STARTED). -// We will ignore any START_BACKUP responses after this. -//------------------------------------------------------- - DEBUG_OUT("masterAbort: sendDropTrig"); - sendDropTrig(signal, ptr); // dropping due to error + ptr.p->stopGCP= ptr.p->startGCP + 1; + sendDropTrig(signal, ptr); // dropping due to error + return; + case GSN_UTIL_SEQUENCE_REQ: + case GSN_UTIL_LOCK_REQ: + case GSN_DROP_TRIG_REQ: + ndbrequire(false); + return; + case GSN_STOP_BACKUP_REQ: + return; + } } void -Backup::masterSendAbortBackup(Signal* signal, BackupRecordPtr ptr) +Backup::abort_scan(Signal * signal, BackupRecordPtr ptr) { - if (ptr.p->masterData.state.getState() != ABORTING) { - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupFailure); - ptr.p->masterData.state.setState(ABORTING); + AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); + ord->backupId = ptr.p->backupId; + ord->backupPtr = ptr.i; + ord->senderData= ptr.i; + ord->requestType = AbortBackupOrd::AbortScan; + + TablePtr tabPtr; + ptr.p->tables.first(tabPtr); + for(; tabPtr.i != RNIL; ptr.p->tables.next(tabPtr)) { + jam(); + FragmentPtr fragPtr; + Array & frags = tabPtr.p->fragments; + const Uint32 fragCount = frags.getSize(); + + for(Uint32 i = 0; ifragments.getPtr(fragPtr, i); + const Uint32 nodeId = fragPtr.p->node; + if(fragPtr.p->scanning != 0 && ptr.p->nodes.get(nodeId)) { + jam(); + + const BlockReference ref = numberToRef(BACKUP, nodeId); + sendSignal(ref, GSN_ABORT_BACKUP_ORD, signal, + AbortBackupOrd::SignalLength, JBB); + + } + } } - const State s = ptr.p->masterData.state.getAbortState(); - - /** - * First inform to client - */ - if(s == DEFINING) { - jam(); -#ifdef DEBUG_ABORT - ndbout_c("** Abort: sending BACKUP_REF to mgmtsrvr"); -#endif - sendBackupRef(ptr.p->clientRef, signal, ptr.p->clientData, - ptr.p->errorCode); - - } else { - jam(); -#ifdef DEBUG_ABORT - ndbout_c("** Abort: sending BACKUP_ABORT_REP to mgmtsrvr"); -#endif - BackupAbortRep* rep = (BackupAbortRep*)signal->getDataPtrSend(); - rep->backupId = ptr.p->backupId; - rep->senderData = ptr.p->clientData; - rep->reason = ptr.p->errorCode; - sendSignal(ptr.p->clientRef, GSN_BACKUP_ABORT_REP, signal, - BackupAbortRep::SignalLength, JBB); - - signal->theData[0] = NDB_LE_BackupAborted; - signal->theData[1] = ptr.p->clientRef; - signal->theData[2] = ptr.p->backupId; - signal->theData[3] = ptr.p->errorCode; - sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 4, JBB); - }//if - - // ptr.p->masterData.state.setState(INITIAL); - - sendAbortBackupOrd(signal, ptr, AbortBackupOrd::BackupFailure); } /***************************************************************************** @@ -2313,26 +2201,17 @@ Backup::masterSendAbortBackup(Signal* signal, BackupRecordPtr ptr) void Backup::defineBackupRef(Signal* signal, BackupRecordPtr ptr, Uint32 errCode) { - if (ptr.p->slaveState.getState() == ABORTING) { - jam(); - return; - } - ptr.p->slaveState.setState(ABORTING); - - if (errCode != 0) { - jam(); - ptr.p->setErrorCode(errCode); - }//if + ptr.p->m_gsn = GSN_DEFINE_BACKUP_REF; + ptr.p->setErrorCode(errCode); ndbrequire(ptr.p->errorCode != 0); - + DefineBackupRef* ref = (DefineBackupRef*)signal->getDataPtrSend(); ref->backupId = ptr.p->backupId; ref->backupPtr = ptr.i; ref->errorCode = ptr.p->errorCode; + ref->nodeId = getOwnNodeId(); sendSignal(ptr.p->masterRef, GSN_DEFINE_BACKUP_REF, signal, DefineBackupRef::SignalLength, JBB); - - closeFiles(signal, ptr); } void @@ -2366,6 +2245,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) CRASH_INSERTION((10014)); + ptr.p->m_gsn = GSN_DEFINE_BACKUP_REQ; ptr.p->slaveState.forceState(INITIAL); ptr.p->slaveState.setState(DEFINING); ptr.p->errorCode = 0; @@ -2379,6 +2259,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) ptr.p->backupDataLen = req->backupDataLen; ptr.p->masterData.dropTrig.tableId = RNIL; ptr.p->masterData.alterTrig.tableId = RNIL; + ptr.p->masterData.errorCode = 0; ptr.p->noOfBytes = 0; ptr.p->noOfRecords = 0; ptr.p->noOfLogBytes = 0; @@ -2432,7 +2313,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) files[i].p->tableId = RNIL; files[i].p->backupPtr = ptr.i; files[i].p->filePointer = RNIL; - files[i].p->fileDone = 0; + files[i].p->fileClosing = 0; files[i].p->fileOpened = 0; files[i].p->fileRunning = 0; files[i].p->scanRunning = 0; @@ -2468,17 +2349,14 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) ptr.p->logFilePtr = files[1].i; ptr.p->dataFilePtr = files[2].i; - if (!verifyNodesAlive(ptr.p->nodes)) { + if (!verifyNodesAlive(ptr, ptr.p->nodes)) { jam(); defineBackupRef(signal, ptr, DefineBackupRef::Undefined); - // sendBackupRef(signal, ptr, - // ptr.p->errorCode?ptr.p->errorCode:BackupRef::Undefined); return; }//if if (ERROR_INSERTED(10027)) { jam(); defineBackupRef(signal, ptr, 327); - // sendBackupRef(signal, ptr, 327); return; }//if @@ -2546,8 +2424,6 @@ Backup::execLIST_TABLES_CONF(Signal* signal) return; }//if - defineSlaveAbortCheck(); - /** * All tables fetched */ @@ -2679,8 +2555,6 @@ Backup::openFilesReply(Signal* signal, }//if }//for - defineSlaveAbortCheck(); - /** * Did open succeed for all files */ @@ -2810,8 +2684,6 @@ Backup::execGET_TABINFOREF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, senderData); - defineSlaveAbortCheck(); - defineBackupRef(signal, ptr, ref->errorCode); } @@ -2833,8 +2705,6 @@ Backup::execGET_TABINFO_CONF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, senderData); - defineSlaveAbortCheck(); - SegmentedSectionPtr dictTabInfoPtr; signal->getSection(dictTabInfoPtr, GetTabInfoConf::DICT_TAB_INFO); ndbrequire(dictTabInfoPtr.sz == len); @@ -3047,8 +2917,6 @@ Backup::execDI_FCOUNTCONF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, senderData); - defineSlaveAbortCheck(); - TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); @@ -3127,8 +2995,6 @@ Backup::execDIGETPRIMCONF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, senderData); - defineSlaveAbortCheck(); - TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); @@ -3143,9 +3009,7 @@ Backup::execDIGETPRIMCONF(Signal* signal) void Backup::getFragmentInfoDone(Signal* signal, BackupRecordPtr ptr) { - // Slave must now hold on to master data until - // AbortBackupOrd::OkToClean signal - ptr.p->okToCleanMaster = false; + ptr.p->m_gsn = GSN_DEFINE_BACKUP_CONF; ptr.p->slaveState.setState(DEFINED); DefineBackupConf * conf = (DefineBackupConf*)signal->getDataPtr(); conf->backupPtr = ptr.i; @@ -3169,16 +3033,15 @@ Backup::execSTART_BACKUP_REQ(Signal* signal) StartBackupReq* req = (StartBackupReq*)signal->getDataPtr(); const Uint32 ptrI = req->backupPtr; - const Uint32 backupId = req->backupId; + //const Uint32 backupId = req->backupId; const Uint32 signalNo = req->signalNo; - + BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - - slaveAbortCheck(); // macro will do return if ABORTING ptr.p->slaveState.setState(STARTED); - + ptr.p->m_gsn = GSN_START_BACKUP_REQ; + for(Uint32 i = 0; inoOfTableTriggers; i++) { jam(); TablePtr tabPtr; @@ -3191,11 +3054,13 @@ Backup::execSTART_BACKUP_REQ(Signal* signal) TriggerPtr trigPtr; if(!ptr.p->triggers.seizeId(trigPtr, triggerId)) { jam(); + ptr.p->m_gsn = GSN_START_BACKUP_REF; StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend(); ref->backupPtr = ptr.i; ref->backupId = ptr.p->backupId; ref->signalNo = signalNo; ref->errorCode = StartBackupRef::FailedToAllocateTriggerRecord; + ref->nodeId = getOwnNodeId(); sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal, StartBackupRef::SignalLength, JBB); return; @@ -3233,6 +3098,7 @@ Backup::execSTART_BACKUP_REQ(Signal* signal) }//if }//for + ptr.p->m_gsn = GSN_START_BACKUP_CONF; StartBackupConf* conf = (StartBackupConf*)signal->getDataPtrSend(); conf->backupPtr = ptr.i; conf->backupId = ptr.p->backupId; @@ -3255,7 +3121,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) CRASH_INSERTION((10016)); const Uint32 ptrI = req->backupPtr; - const Uint32 backupId = req->backupId; + //const Uint32 backupId = req->backupId; const Uint32 tableId = req->tableId; const Uint32 fragNo = req->fragmentNo; const Uint32 count = req->count; @@ -3266,10 +3132,9 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - slaveAbortCheck(); // macro will do return if ABORTING - ptr.p->slaveState.setState(SCANNING); - + ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_REQ; + /** * Get file */ @@ -3280,7 +3145,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) ndbrequire(filePtr.p->fileOpened == 1); ndbrequire(filePtr.p->fileRunning == 1); ndbrequire(filePtr.p->scanRunning == 0); - ndbrequire(filePtr.p->fileDone == 0); + ndbrequire(filePtr.p->fileClosing == 0); /** * Get table @@ -3350,7 +3215,7 @@ Backup::execBACKUP_FRAGMENT_REQ(Signal* signal) req->transId1 = 0; req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); req->clientOpPtr= filePtr.i; - req->batch_size_rows= 16; + req->batch_size_rows= parallelism; req->batch_size_bytes= 0; sendSignal(DBLQH_REF, GSN_SCAN_FRAGREQ, signal, ScanFragReq::SignalLength, JBB); @@ -3572,6 +3437,13 @@ Backup::OperationRecord::newScan() return false; } +bool +Backup::OperationRecord::closeScan() +{ + opNoDone = opNoConf = opLen = 0; + return true; +} + bool Backup::OperationRecord::scanConf(Uint32 noOfOps, Uint32 total_len) { @@ -3600,11 +3472,9 @@ Backup::execSCAN_FRAGREF(Signal* signal) c_backupFilePool.getPtr(filePtr, filePtrI); filePtr.p->errorCode = ref->errorCode; + filePtr.p->scanRunning = 0; - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - abortFile(signal, ptr, filePtr); + backupFragmentRef(signal, filePtr); } void @@ -3639,9 +3509,11 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) { jam(); - if(filePtr.p->errorCode != 0){ + if(filePtr.p->errorCode != 0) + { jam(); - abortFileHook(signal, filePtr, true); // Scan completed + filePtr.p->scanRunning = 0; + backupFragmentRef(signal, filePtr); // Scan completed return; }//if @@ -3669,20 +3541,51 @@ Backup::fragmentCompleted(Signal* signal, BackupFilePtr filePtr) sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_CONF, signal, BackupFragmentConf::SignalLength, JBB); + ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_CONF; ptr.p->slaveState.setState(STARTED); return; } + +void +Backup::backupFragmentRef(Signal * signal, BackupFilePtr filePtr) +{ + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, filePtr.p->backupPtr); + + ptr.p->m_gsn = GSN_BACKUP_FRAGMENT_REF; + + BackupFragmentRef * ref = (BackupFragmentRef*)signal->getDataPtrSend(); + ref->backupId = ptr.p->backupId; + ref->backupPtr = ptr.i; + ref->nodeId = getOwnNodeId(); + ref->errorCode = ptr.p->errorCode; + sendSignal(ptr.p->masterRef, GSN_BACKUP_FRAGMENT_REF, signal, + BackupFragmentRef::SignalLength, JBB); +} void Backup::checkScan(Signal* signal, BackupFilePtr filePtr) { - if(filePtr.p->errorCode != 0){ + OperationRecord & op = filePtr.p->operation; + + if(filePtr.p->errorCode != 0) + { jam(); - abortFileHook(signal, filePtr, false); // Scan not completed + + /** + * Close scan + */ + op.closeScan(); + ScanFragNextReq * req = (ScanFragNextReq *)signal->getDataPtrSend(); + req->senderData = filePtr.i; + req->closeFlag = 1; + req->transId1 = 0; + req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); + sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, + ScanFragNextReq::SignalLength, JBB); return; }//if - - OperationRecord & op = filePtr.p->operation; + if(op.newScan()) { jam(); @@ -3693,8 +3596,28 @@ Backup::checkScan(Signal* signal, BackupFilePtr filePtr) req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); req->batch_size_rows= 16; req->batch_size_bytes= 0; - sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); + if(ERROR_INSERTED(10032)) + sendSignalWithDelay(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, + 100, ScanFragNextReq::SignalLength); + else if(ERROR_INSERTED(10033)) + { + SET_ERROR_INSERT_VALUE(10032); + sendSignalWithDelay(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, + 10000, ScanFragNextReq::SignalLength); + + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, filePtr.p->backupPtr); + AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); + ord->backupId = ptr.p->backupId; + ord->backupPtr = ptr.i; + ord->requestType = AbortBackupOrd::FileOrScanError; + ord->senderData= ptr.i; + sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, + AbortBackupOrd::SignalLength, JBB); + } + else + sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, + ScanFragNextReq::SignalLength, JBB); return; }//if @@ -3718,11 +3641,8 @@ Backup::execFSAPPENDREF(Signal* signal) filePtr.p->fileRunning = 0; filePtr.p->errorCode = errCode; - - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - abortFile(signal, ptr, filePtr); + + checkFile(signal, filePtr); } void @@ -3738,12 +3658,6 @@ Backup::execFSAPPENDCONF(Signal* signal) BackupFilePtr filePtr; c_backupFilePool.getPtr(filePtr, filePtrI); - - if (ERROR_INSERTED(10029)) { - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - abortFile(signal, ptr, filePtr); - }//if OperationRecord & op = filePtr.p->operation; @@ -3761,30 +3675,25 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr) #endif OperationRecord & op = filePtr.p->operation; - + Uint32 * tmp, sz; bool eof; - if(op.dataBuffer.getReadPtr(&tmp, &sz, &eof)) { + if(op.dataBuffer.getReadPtr(&tmp, &sz, &eof)) + { jam(); - if(filePtr.p->errorCode == 0) { - jam(); - FsAppendReq * req = (FsAppendReq *)signal->getDataPtrSend(); - req->filePointer = filePtr.p->filePointer; - req->userPointer = filePtr.i; - req->userReference = reference(); - req->varIndex = 0; - req->offset = tmp - c_startOfPages; - req->size = sz; - - sendSignal(NDBFS_REF, GSN_FSAPPENDREQ, signal, - FsAppendReq::SignalLength, JBA); - return; - } else { - jam(); - if (filePtr.p->scanRunning == 1) - eof = false; - }//if - }//if + jam(); + FsAppendReq * req = (FsAppendReq *)signal->getDataPtrSend(); + req->filePointer = filePtr.p->filePointer; + req->userPointer = filePtr.i; + req->userReference = reference(); + req->varIndex = 0; + req->offset = tmp - c_startOfPages; + req->size = sz; + + sendSignal(NDBFS_REF, GSN_FSAPPENDREQ, signal, + FsAppendReq::SignalLength, JBA); + return; + } if(!eof) { jam(); @@ -3794,9 +3703,7 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr) return; }//if - ndbrequire(filePtr.p->fileDone == 1); - - if(sz > 0 && filePtr.p->errorCode == 0) { + if(sz > 0) { jam(); FsAppendReq * req = (FsAppendReq *)signal->getDataPtrSend(); req->filePointer = filePtr.p->filePointer; @@ -3812,6 +3719,7 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr) }//if filePtr.p->fileRunning = 0; + filePtr.p->fileClosing = 1; FsCloseReq * req = (FsCloseReq *)signal->getDataPtrSend(); req->filePointer = filePtr.p->filePointer; @@ -3819,64 +3727,11 @@ Backup::checkFile(Signal* signal, BackupFilePtr filePtr) req->userReference = reference(); req->fileFlag = 0; #ifdef DEBUG_ABORT - ndbout_c("***** FSCLOSEREQ filePtr.i = %u", filePtr.i); + ndbout_c("***** a FSCLOSEREQ filePtr.i = %u", filePtr.i); #endif sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, signal, FsCloseReq::SignalLength, JBA); } -void -Backup::abortFile(Signal* signal, BackupRecordPtr ptr, BackupFilePtr filePtr) -{ - jam(); - - if(ptr.p->slaveState.getState() != ABORTING) { - /** - * Inform master of failure - */ - jam(); - ptr.p->slaveState.setState(ABORTING); - ptr.p->setErrorCode(AbortBackupOrd::FileOrScanError); - sendAbortBackupOrdSlave(signal, ptr, AbortBackupOrd::FileOrScanError); - return; - }//if - - - for(ptr.p->files.first(filePtr); - filePtr.i!=RNIL; - ptr.p->files.next(filePtr)){ - jam(); - filePtr.p->errorCode = 1; - }//for - - closeFiles(signal, ptr); -} - -void -Backup::abortFileHook(Signal* signal, BackupFilePtr filePtr, bool scanComplete) -{ - jam(); - - if(!scanComplete) { - jam(); - - ScanFragNextReq * req = (ScanFragNextReq *)signal->getDataPtrSend(); - req->senderData = filePtr.i; - req->closeFlag = 1; - req->transId1 = 0; - req->transId2 = (BACKUP << 20) + (getOwnNodeId() << 8); - sendSignal(DBLQH_REF, GSN_SCAN_NEXTREQ, signal, - ScanFragNextReq::SignalLength, JBB); - return; - }//if - - filePtr.p->scanRunning = 0; - - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - - filePtr.i = RNIL; - abortFile(signal, ptr, filePtr); -} /**************************************************************************** * @@ -3953,27 +3808,30 @@ Backup::execTRIG_ATTRINFO(Signal* signal) { }//if BackupFormat::LogFile::LogEntry * logEntry = trigPtr.p->logEntry; - if(logEntry == 0) { + if(logEntry == 0) + { jam(); Uint32 * dst; FsBuffer & buf = trigPtr.p->operation->dataBuffer; ndbrequire(trigPtr.p->maxRecordSize <= buf.getMaxWrite()); - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); - if(!buf.getWritePtr(&dst, trigPtr.p->maxRecordSize)) { + if(ERROR_INSERTED(10030) || + !buf.getWritePtr(&dst, trigPtr.p->maxRecordSize)) + { jam(); + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); trigPtr.p->errorCode = AbortBackupOrd::LogBufferFull; - sendAbortBackupOrdSlave(signal, ptr, AbortBackupOrd::LogBufferFull); + AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); + ord->backupId = ptr.p->backupId; + ord->backupPtr = ptr.i; + ord->requestType = AbortBackupOrd::LogBufferFull; + ord->senderData= ptr.i; + sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, + AbortBackupOrd::SignalLength, JBB); return; }//if - if(trigPtr.p->operation->noOfBytes > 123 && ERROR_INSERTED(10030)) { - jam(); - trigPtr.p->errorCode = AbortBackupOrd::LogBufferFull; - sendAbortBackupOrdSlave(signal, ptr, AbortBackupOrd::LogBufferFull); - return; - }//if - + logEntry = (BackupFormat::LogFile::LogEntry *)dst; trigPtr.p->logEntry = logEntry; logEntry->Length = 0; @@ -4015,9 +3873,10 @@ Backup::execFIRE_TRIG_ORD(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); - if(gci != ptr.p->currGCP) { + if(gci != ptr.p->currGCP) + { jam(); - + trigPtr.p->logEntry->TriggerEvent = htonl(trigPtr.p->event | 0x10000); trigPtr.p->logEntry->Data[len] = htonl(gci); len ++; @@ -4035,20 +3894,6 @@ Backup::execFIRE_TRIG_ORD(Signal* signal) trigPtr.p->operation->noOfRecords += 1; } -void -Backup::sendAbortBackupOrdSlave(Signal* signal, BackupRecordPtr ptr, - Uint32 requestType) -{ - jam(); - AbortBackupOrd *ord = (AbortBackupOrd*)signal->getDataPtrSend(); - ord->backupId = ptr.p->backupId; - ord->backupPtr = ptr.i; - ord->requestType = requestType; - ord->senderData= ptr.i; - sendSignal(ptr.p->masterRef, GSN_ABORT_BACKUP_ORD, signal, - AbortBackupOrd::SignalLength, JBB); -} - void Backup::sendAbortBackupOrd(Signal* signal, BackupRecordPtr ptr, Uint32 requestType) @@ -4085,7 +3930,7 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) CRASH_INSERTION((10020)); const Uint32 ptrI = req->backupPtr; - const Uint32 backupId = req->backupId; + //const Uint32 backupId = req->backupId; const Uint32 startGCP = req->startGCP; const Uint32 stopGCP = req->stopGCP; @@ -4101,7 +3946,7 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) c_backupPool.getPtr(ptr, ptrI); ptr.p->slaveState.setState(STOPPING); - slaveAbortCheck(); // macro will do return if ABORTING + ptr.p->m_gsn = GSN_STOP_BACKUP_REQ; /** * Insert footers @@ -4140,12 +3985,6 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) void Backup::closeFiles(Signal* sig, BackupRecordPtr ptr) { - if (ptr.p->closingFiles) { - jam(); - return; - } - ptr.p->closingFiles = true; - /** * Close all files */ @@ -4161,12 +4000,12 @@ Backup::closeFiles(Signal* sig, BackupRecordPtr ptr) jam(); openCount++; - if(filePtr.p->fileDone == 1){ + if(filePtr.p->fileClosing == 1){ jam(); continue; }//if - filePtr.p->fileDone = 1; + filePtr.p->fileClosing = 1; if(filePtr.p->fileRunning == 1){ jam(); @@ -4183,7 +4022,7 @@ Backup::closeFiles(Signal* sig, BackupRecordPtr ptr) req->userReference = reference(); req->fileFlag = 0; #ifdef DEBUG_ABORT - ndbout_c("***** FSCLOSEREQ filePtr.i = %u", filePtr.i); + ndbout_c("***** b FSCLOSEREQ filePtr.i = %u", filePtr.i); #endif sendSignal(NDBFS_REF, GSN_FSCLOSEREQ, sig, FsCloseReq::SignalLength, JBA); @@ -4210,11 +4049,6 @@ Backup::execFSCLOSEREF(Signal* signal) BackupRecordPtr ptr; c_backupPool.getPtr(ptr, filePtr.p->backupPtr); - /** - * This should only happen during abort of backup - */ - ndbrequire(ptr.p->slaveState.getState() == ABORTING); - filePtr.p->fileOpened = 1; FsConf * conf = (FsConf*)signal->getDataPtr(); conf->userPointer = filePtrI; @@ -4237,7 +4071,7 @@ Backup::execFSCLOSECONF(Signal* signal) ndbout_c("***** FSCLOSECONF filePtrI = %u", filePtrI); #endif - ndbrequire(filePtr.p->fileDone == 1); + ndbrequire(filePtr.p->fileClosing == 1); ndbrequire(filePtr.p->fileOpened == 1); ndbrequire(filePtr.p->fileRunning == 0); ndbrequire(filePtr.p->scanRunning == 0); @@ -4265,25 +4099,20 @@ Backup::closeFilesDone(Signal* signal, BackupRecordPtr ptr) { jam(); - if(ptr.p->slaveState.getState() == STOPPING) { - jam(); - BackupFilePtr filePtr; - ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); - - StopBackupConf* conf = (StopBackupConf*)signal->getDataPtrSend(); - conf->backupId = ptr.p->backupId; - conf->backupPtr = ptr.i; - conf->noOfLogBytes = filePtr.p->operation.noOfBytes; - conf->noOfLogRecords = filePtr.p->operation.noOfRecords; - sendSignal(ptr.p->masterRef, GSN_STOP_BACKUP_CONF, signal, - StopBackupConf::SignalLength, JBB); - - ptr.p->slaveState.setState(CLEANING); - return; - }//if + jam(); + BackupFilePtr filePtr; + ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); - ndbrequire(ptr.p->slaveState.getState() == ABORTING); - removeBackup(signal, ptr); + StopBackupConf* conf = (StopBackupConf*)signal->getDataPtrSend(); + conf->backupId = ptr.p->backupId; + conf->backupPtr = ptr.i; + conf->noOfLogBytes = filePtr.p->operation.noOfBytes; + conf->noOfLogRecords = filePtr.p->operation.noOfRecords; + sendSignal(ptr.p->masterRef, GSN_STOP_BACKUP_CONF, signal, + StopBackupConf::SignalLength, JBB); + + ptr.p->m_gsn = GSN_STOP_BACKUP_CONF; + ptr.p->slaveState.setState(CLEANING); } /***************************************************************************** @@ -4291,57 +4120,6 @@ Backup::closeFilesDone(Signal* signal, BackupRecordPtr ptr) * Slave functionallity: Abort backup * *****************************************************************************/ -void -Backup::removeBackup(Signal* signal, BackupRecordPtr ptr) -{ - jam(); - - FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend(); - req->userReference = reference(); - req->userPointer = ptr.i; - req->directory = 1; - req->ownDirectory = 1; - FsOpenReq::setVersion(req->fileNumber, 2); - FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); - FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId); - FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId()); - sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, - FsRemoveReq::SignalLength, JBA); -} - -void -Backup::execFSREMOVEREF(Signal* signal) -{ - jamEntry(); - ndbrequire(0); -} - -void -Backup::execFSREMOVECONF(Signal* signal){ - jamEntry(); - - FsConf * conf = (FsConf*)signal->getDataPtr(); - const Uint32 ptrI = conf->userPointer; - - /** - * Get backup record - */ - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, ptrI); - - ndbrequire(ptr.p->slaveState.getState() == ABORTING); - if (ptr.p->masterRef == reference()) { - if (ptr.p->masterData.state.getAbortState() == DEFINING) { - jam(); - sendBackupRef(signal, ptr, ptr.p->errorCode); - return; - } else { - jam(); - }//if - }//if - cleanupSlaveResources(ptr); -} - /***************************************************************************** * * Slave functionallity: Abort backup @@ -4394,8 +4172,7 @@ Backup::execABORT_BACKUP_ORD(Signal* signal) if (c_backupPool.findId(senderData)) { jam(); c_backupPool.getPtr(ptr, senderData); - } else { // TODO might be abort sent to not master, - // or master aborting too early + } else { jam(); #ifdef DEBUG_ABORT ndbout_c("Backup: abort request type=%u on id=%u,%u not found", @@ -4405,15 +4182,15 @@ Backup::execABORT_BACKUP_ORD(Signal* signal) } }//if + ptr.p->m_gsn = GSN_ABORT_BACKUP_ORD; const bool isCoordinator = (ptr.p->masterRef == reference()); - + bool ok = false; switch(requestType){ /** * Requests sent to master */ - case AbortBackupOrd::ClientAbort: jam(); // fall through @@ -4422,113 +4199,61 @@ Backup::execABORT_BACKUP_ORD(Signal* signal) // fall through case AbortBackupOrd::FileOrScanError: jam(); - if(ptr.p->masterData.state.getState() == ABORTING) { -#ifdef DEBUG_ABORT - ndbout_c("---- Already aborting"); -#endif - jam(); - return; - } + ndbrequire(isCoordinator); ptr.p->setErrorCode(requestType); - ndbrequire(isCoordinator); // Sent from slave to coordinator - masterAbort(signal, ptr, false); + if(ptr.p->masterData.gsn == GSN_BACKUP_FRAGMENT_REQ) + { + /** + * Only scans are actively aborted + */ + abort_scan(signal, ptr); + } return; - - /** - * Info sent to slave - */ - - case AbortBackupOrd::OkToClean: - jam(); - cleanupMasterResources(ptr); - return; - + /** * Requests sent to slave */ - + case AbortBackupOrd::AbortScan: + jam(); + ptr.p->setErrorCode(requestType); + return; + case AbortBackupOrd::BackupComplete: jam(); - if (ptr.p->slaveState.getState() == CLEANING) { // TODO what if state is - // not CLEANING? - jam(); - cleanupSlaveResources(ptr); - }//if + cleanup(signal, ptr); return; - break; - case AbortBackupOrd::BackupFailureDueToNodeFail: - jam(); - ok = true; - if (ptr.p->errorCode != 0) - ptr.p->setErrorCode(requestType); - break; case AbortBackupOrd::BackupFailure: - jam(); - ok = true; - break; + case AbortBackupOrd::BackupFailureDueToNodeFail: + case AbortBackupOrd::OkToClean: + case AbortBackupOrd::IncompatibleVersions: +#ifndef VM_TRACE + default: +#endif + ptr.p->setErrorCode(requestType); + ok= true; } ndbrequire(ok); - /** - * Slave abort - */ - slaveAbort(signal, ptr); -} - -void -Backup::slaveAbort(Signal* signal, BackupRecordPtr ptr) -{ - if(ptr.p->slaveState.getState() == ABORTING) { -#ifdef DEBUG_ABORT - ndbout_c("---- Slave already aborting"); -#endif - jam(); - return; + Uint32 ref= ptr.p->masterRef; + ptr.p->masterRef = reference(); + ptr.p->nodes.clear(); + ptr.p->nodes.set(getOwnNodeId()); + + if(ref == reference()) + { + ptr.p->stopGCP= ptr.p->startGCP + 1; + sendDropTrig(signal, ptr); } -#ifdef DEBUG_ABORT - ndbout_c("************* slaveAbort"); -#endif - - State slaveState = ptr.p->slaveState.getState(); - ptr.p->slaveState.setState(ABORTING); - switch(slaveState) { - case DEFINING: - jam(); - return; -//------------------------------------------ -// Will watch for the abort at various places -// in the defining phase. -//------------------------------------------ - case ABORTING: - jam(); - //Fall through - case DEFINED: - jam(); - //Fall through - case STOPPING: - jam(); + else + { + ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ; + ptr.p->masterData.sendCounter.clearWaitingFor(); + ptr.p->masterData.sendCounter.setWaitingFor(getOwnNodeId()); closeFiles(signal, ptr); - return; - case STARTED: - jam(); - //Fall through - case SCANNING: - jam(); - BackupFilePtr filePtr; - filePtr.i = RNIL; - abortFile(signal, ptr, filePtr); - return; - case CLEANING: - jam(); - cleanupSlaveResources(ptr); - return; - case INITIAL: - jam(); - ndbrequire(false); - return; } } + void Backup::dumpUsedResources() { @@ -4576,12 +4301,8 @@ Backup::dumpUsedResources() } void -Backup::cleanupMasterResources(BackupRecordPtr ptr) +Backup::cleanup(Signal* signal, BackupRecordPtr ptr) { -#ifdef DEBUG_ABORT - ndbout_c("******** Cleanup Master Resources *********"); - ndbout_c("backupId = %u, errorCode = %u", ptr.p->backupId, ptr.p->errorCode); -#endif TablePtr tabPtr; for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL;ptr.p->tables.next(tabPtr)) @@ -4601,20 +4322,6 @@ Backup::cleanupMasterResources(BackupRecordPtr ptr) tabPtr.p->triggerIds[j] = ILLEGAL_TRIGGER_ID; }//for }//for - ptr.p->tables.release(); - ptr.p->triggers.release(); - ptr.p->okToCleanMaster = true; - - cleanupFinalResources(ptr); -} - -void -Backup::cleanupSlaveResources(BackupRecordPtr ptr) -{ -#ifdef DEBUG_ABORT - ndbout_c("******** Clean Up Slave Resources*********"); - ndbout_c("backupId = %u, errorCode = %u", ptr.p->backupId, ptr.p->errorCode); -#endif BackupFilePtr filePtr; for(ptr.p->files.first(filePtr); @@ -4626,35 +4333,65 @@ Backup::cleanupSlaveResources(BackupRecordPtr ptr) ndbrequire(filePtr.p->scanRunning == 0); filePtr.p->pages.release(); }//for - ptr.p->files.release(); - cleanupFinalResources(ptr); + ptr.p->files.release(); + ptr.p->tables.release(); + ptr.p->triggers.release(); + + ptr.p->tables.release(); + ptr.p->triggers.release(); + ptr.p->pages.release(); + ptr.p->backupId = ~0; + + if(ptr.p->checkError()) + removeBackup(signal, ptr); + else + c_backups.release(ptr); +} + + +void +Backup::removeBackup(Signal* signal, BackupRecordPtr ptr) +{ + jam(); + + FsRemoveReq * req = (FsRemoveReq *)signal->getDataPtrSend(); + req->userReference = reference(); + req->userPointer = ptr.i; + req->directory = 1; + req->ownDirectory = 1; + FsOpenReq::setVersion(req->fileNumber, 2); + FsOpenReq::setSuffix(req->fileNumber, FsOpenReq::S_CTL); + FsOpenReq::v2_setSequence(req->fileNumber, ptr.p->backupId); + FsOpenReq::v2_setNodeId(req->fileNumber, getOwnNodeId()); + sendSignal(NDBFS_REF, GSN_FSREMOVEREQ, signal, + FsRemoveReq::SignalLength, JBA); } void -Backup::cleanupFinalResources(BackupRecordPtr ptr) +Backup::execFSREMOVEREF(Signal* signal) { -#ifdef DEBUG_ABORT - ndbout_c("******** Clean Up Final Resources*********"); - ndbout_c("backupId = %u, errorCode = %u", ptr.p->backupId, ptr.p->errorCode); -#endif + jamEntry(); + FsRef * ref = (FsRef*)signal->getDataPtr(); + const Uint32 ptrI = ref->userPointer; - // if (!ptr.p->tables.empty() || !ptr.p->files.empty()) { - if (!ptr.p->okToCleanMaster || !ptr.p->files.empty()) { - jam(); -#ifdef DEBUG_ABORT - ndbout_c("******** Waiting to do final cleanup"); -#endif - return; - } - ptr.p->pages.release(); - ptr.p->masterData.state.setState(INITIAL); - ptr.p->slaveState.setState(INITIAL); - ptr.p->backupId = 0; - - ptr.p->closingFiles = false; - ptr.p->okToCleanMaster = true; - - c_backups.release(ptr); - // ndbrequire(false); + FsConf * conf = (FsConf*)signal->getDataPtr(); + conf->userPointer = ptrI; + execFSREMOVECONF(signal); } + +void +Backup::execFSREMOVECONF(Signal* signal){ + jamEntry(); + + FsConf * conf = (FsConf*)signal->getDataPtr(); + const Uint32 ptrI = conf->userPointer; + + /** + * Get backup record + */ + BackupRecordPtr ptr; + c_backupPool.getPtr(ptr, ptrI); + c_backups.release(ptr); +} + diff --git a/ndb/src/kernel/blocks/backup/Backup.hpp b/ndb/src/kernel/blocks/backup/Backup.hpp index 1a5d6c7a925..7bcea5655b4 100644 --- a/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/ndb/src/kernel/blocks/backup/Backup.hpp @@ -232,6 +232,7 @@ public: */ bool newScan(); bool scanConf(Uint32 noOfOps, Uint32 opLen); + bool closeScan(); /** * Per record @@ -330,7 +331,7 @@ public: Uint8 fileOpened; Uint8 fileRunning; - Uint8 fileDone; + Uint8 fileClosing; Uint8 scanRunning; }; typedef Ptr BackupFilePtr; @@ -403,13 +404,11 @@ public: ArrayPool & trp) : slaveState(b, validSlaveTransitions, validSlaveTransitionsCount,1) , tables(tp), triggers(trp), files(bp), pages(pp) - , masterData(b, validMasterTransitions, validMasterTransitionsCount) - , backup(b) - { - closingFiles = false; - okToCleanMaster = true; - } + , masterData(b), backup(b) + { + } + Uint32 m_gsn; CompoundState slaveState; Uint32 clientRef; @@ -420,9 +419,6 @@ public: Uint32 errorCode; NdbNodeBitmask nodes; - bool okToCleanMaster; - bool closingFiles; - Uint64 noOfBytes; Uint64 noOfRecords; Uint64 noOfLogBytes; @@ -444,15 +440,13 @@ public: SimpleProperties props;// Used for (un)packing backup request struct MasterData { - MasterData(Backup & b, const State valid[], Uint32 count) - : state(b, valid, count, 0) - { - } + MasterData(Backup & b) + { + } MutexHandle2 m_defineBackupMutex; MutexHandle2 m_dictCommitTableMutex; Uint32 gsn; - CompoundState state; SignalCounter sendCounter; Uint32 errorCode; struct { @@ -557,7 +551,8 @@ public: void stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId); void defineBackupRef(Signal*, BackupRecordPtr, Uint32 errCode = 0); - + void backupFragmentRef(Signal * signal, BackupFilePtr filePtr); + void nextFragment(Signal*, BackupRecordPtr); void sendCreateTrig(Signal*, BackupRecordPtr ptr, TablePtr tabPtr); @@ -578,14 +573,14 @@ public: void sendAbortBackupOrd(Signal* signal, BackupRecordPtr ptr, Uint32 errCode); void sendAbortBackupOrdSlave(Signal* signal, BackupRecordPtr ptr, Uint32 errCode); - void masterAbort(Signal*, BackupRecordPtr ptr, bool controlledAbort); + void masterAbort(Signal*, BackupRecordPtr ptr); void masterSendAbortBackup(Signal*, BackupRecordPtr ptr); void slaveAbort(Signal*, BackupRecordPtr ptr); void abortFile(Signal* signal, BackupRecordPtr ptr, BackupFilePtr filePtr); void abortFileHook(Signal* signal, BackupFilePtr filePtr, bool scanDone); - bool verifyNodesAlive(const NdbNodeBitmask& aNodeBitMask); + bool verifyNodesAlive(BackupRecordPtr, const NdbNodeBitmask& aNodeBitMask); bool checkAbort(BackupRecordPtr ptr); void checkNodeFail(Signal* signal, BackupRecordPtr ptr, @@ -603,9 +598,8 @@ public: void sendBackupRef(BlockReference ref, Signal *signal, Uint32 senderData, Uint32 errorCode); void dumpUsedResources(); - void cleanupMasterResources(BackupRecordPtr ptr); - void cleanupSlaveResources(BackupRecordPtr ptr); - void cleanupFinalResources(BackupRecordPtr ptr); + void cleanup(Signal*, BackupRecordPtr ptr); + void abort_scan(Signal*, BackupRecordPtr ptr); void removeBackup(Signal*, BackupRecordPtr ptr); void sendSTTORRY(Signal*); diff --git a/ndb/src/kernel/blocks/backup/Backup.txt b/ndb/src/kernel/blocks/backup/Backup.txt index ee5e02bb549..73942c6ebdc 100644 --- a/ndb/src/kernel/blocks/backup/Backup.txt +++ b/ndb/src/kernel/blocks/backup/Backup.txt @@ -341,3 +341,28 @@ start backup (ERROR_INSERTED(10022))) { if (ERROR_INSERTED(10029)) { if(trigPtr.p->operation->noOfBytes > 123 && ERROR_INSERTED(10030)) { + +----- XXX --- + +DEFINE_BACKUP_REF -> + ABORT_BACKUP_ORD(no reply) when all DEFINE_BACKUP replies has arrived + +START_BACKUP_REF + ABORT_BACKUP_ORD(no reply) when all START_BACKUP_ replies has arrived + +BACKUP_FRAGMENT_REF + ABORT_BACKUP_ORD(reply) directly to all nodes running BACKUP_FRAGMENT + + When all nodes has replied BACKUP_FRAGMENT + ABORT_BACKUP_ORD(no reply) + +STOP_BACKUP_REF + ABORT_BACKUP_ORD(no reply) when all STOP_BACKUP_ replies has arrived + +NF_COMPLETE_REP + slave dies + master sends OUTSTANDING_REF to self + slave does nothing + + master dies + slave elects self as master and sets only itself as participant diff --git a/ndb/src/kernel/blocks/backup/BackupInit.cpp b/ndb/src/kernel/blocks/backup/BackupInit.cpp index 08fa089a9c0..eae72f43db5 100644 --- a/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ b/ndb/src/kernel/blocks/backup/BackupInit.cpp @@ -175,7 +175,7 @@ Backup::Backup(const Configuration & conf) : addRecSignal(GSN_START_BACKUP_CONF, &Backup::execSTART_BACKUP_CONF); addRecSignal(GSN_BACKUP_FRAGMENT_REQ, &Backup::execBACKUP_FRAGMENT_REQ); - //addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF); + addRecSignal(GSN_BACKUP_FRAGMENT_REF, &Backup::execBACKUP_FRAGMENT_REF); addRecSignal(GSN_BACKUP_FRAGMENT_CONF, &Backup::execBACKUP_FRAGMENT_CONF); addRecSignal(GSN_STOP_BACKUP_REQ, &Backup::execSTOP_BACKUP_REQ); diff --git a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp index 9001491dd64..c313abc28eb 100644 --- a/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp +++ b/ndb/src/kernel/blocks/cmvmi/Cmvmi.cpp @@ -126,6 +126,7 @@ Cmvmi::Cmvmi(const Configuration & conf) : } setNodeInfo(getOwnNodeId()).m_connected = true; + setNodeInfo(getOwnNodeId()).m_version = ndbGetOwnVersion(); } Cmvmi::~Cmvmi() diff --git a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp index 6a65da5bb6a..a2d6fe4d64a 100644 --- a/ndb/src/kernel/blocks/dbacc/Dbacc.hpp +++ b/ndb/src/kernel/blocks/dbacc/Dbacc.hpp @@ -984,7 +984,7 @@ private: Uint32 placeReadInLockQueue(Signal* signal); void placeSerialQueueRead(Signal* signal); void checkOnlyReadEntry(Signal* signal); - void getNoParallelTransaction(Signal* signal); + Uint32 getNoParallelTransaction(const Operationrec*); void moveLastParallelQueue(Signal* signal); void moveLastParallelQueueWrite(Signal* signal); Uint32 placeWriteInLockQueue(Signal* signal); @@ -1045,6 +1045,8 @@ private: Uint32 executeNextOperation(Signal* signal); void releaselock(Signal* signal); void takeOutFragWaitQue(Signal* signal); + void check_lock_upgrade(Signal* signal, OperationrecPtr lock_owner, + OperationrecPtr release_op); void allocOverflowPage(Signal* signal); bool getrootfragmentrec(Signal* signal, RootfragmentrecPtr&, Uint32 fragId); void insertLockOwnersList(Signal* signal, const OperationrecPtr&); @@ -1206,7 +1208,6 @@ private: OperationrecPtr mlpqOperPtr; OperationrecPtr queOperPtr; OperationrecPtr readWriteOpPtr; - OperationrecPtr tgnptMainOpPtr; Uint32 cfreeopRec; Uint32 coprecsize; /* --------------------------------------------------------------------------------- */ @@ -1412,7 +1413,6 @@ private: Uint32 turlIndex; Uint32 tlfrTmp1; Uint32 tlfrTmp2; - Uint32 tgnptNrTransaction; Uint32 tscanTrid1; Uint32 tscanTrid2; diff --git a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp index bf73e263501..0054b935cdb 100644 --- a/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp +++ b/ndb/src/kernel/blocks/dbacc/DbaccMain.cpp @@ -2033,9 +2033,7 @@ void Dbacc::insertelementLab(Signal* signal) /* --------------------------------------------------------------------------------- */ Uint32 Dbacc::placeReadInLockQueue(Signal* signal) { - tgnptMainOpPtr = queOperPtr; - getNoParallelTransaction(signal); - if (tgnptNrTransaction == 1) { + if (getNoParallelTransaction(queOperPtr.p) == 1) { if ((queOperPtr.p->transId1 == operationRecPtr.p->transId1) && (queOperPtr.p->transId2 == operationRecPtr.p->transId2)) { /* --------------------------------------------------------------------------------- */ @@ -2118,9 +2116,7 @@ void Dbacc::placeSerialQueueRead(Signal* signal) checkOnlyReadEntry(signal); return; }//if - tgnptMainOpPtr = readWriteOpPtr; - getNoParallelTransaction(signal); - if (tgnptNrTransaction == 1) { + if (getNoParallelTransaction(readWriteOpPtr.p) == 1) { jam(); /* --------------------------------------------------------------------------------- */ /* THERE WAS ONLY ONE TRANSACTION INVOLVED IN THE PARALLEL QUEUE. IF THIS IS OUR */ @@ -2201,24 +2197,23 @@ void Dbacc::checkOnlyReadEntry(Signal* signal) /* --------------------------------------------------------------------------------- */ /* GET_NO_PARALLEL_TRANSACTION */ /* --------------------------------------------------------------------------------- */ -void Dbacc::getNoParallelTransaction(Signal* signal) +Uint32 +Dbacc::getNoParallelTransaction(const Operationrec * op) { - OperationrecPtr tnptOpPtr; - - tgnptNrTransaction = 1; - tnptOpPtr.i = tgnptMainOpPtr.p->nextParallelQue; - while ((tnptOpPtr.i != RNIL) && - (tgnptNrTransaction == 1)) { + OperationrecPtr tmp; + + tmp.i= op->nextParallelQue; + Uint32 transId[2] = { op->transId1, op->transId2 }; + while (tmp.i != RNIL) + { jam(); - ptrCheckGuard(tnptOpPtr, coprecsize, operationrec); - if ((tnptOpPtr.p->transId1 == tgnptMainOpPtr.p->transId1) && - (tnptOpPtr.p->transId2 == tgnptMainOpPtr.p->transId2)) { - tnptOpPtr.i = tnptOpPtr.p->nextParallelQue; - } else { - jam(); - tgnptNrTransaction++; - }//if - }//while + ptrCheckGuard(tmp, coprecsize, operationrec); + if (tmp.p->transId1 == transId[0] && tmp.p->transId2 == transId[1]) + tmp.i = tmp.p->nextParallelQue; + else + return 2; + } + return 1; }//Dbacc::getNoParallelTransaction() void Dbacc::moveLastParallelQueue(Signal* signal) @@ -2259,9 +2254,7 @@ void Dbacc::moveLastParallelQueueWrite(Signal* signal) /* --------------------------------------------------------------------------------- */ Uint32 Dbacc::placeWriteInLockQueue(Signal* signal) { - tgnptMainOpPtr = queOperPtr; - getNoParallelTransaction(signal); - if (!((tgnptNrTransaction == 1) && + if (!((getNoParallelTransaction(queOperPtr.p) == 1) && (queOperPtr.p->transId1 == operationRecPtr.p->transId1) && (queOperPtr.p->transId2 == operationRecPtr.p->transId2))) { jam(); @@ -2312,9 +2305,7 @@ void Dbacc::placeSerialQueueWrite(Signal* signal) }//if readWriteOpPtr.i = readWriteOpPtr.p->nextSerialQue; ptrCheckGuard(readWriteOpPtr, coprecsize, operationrec); - tgnptMainOpPtr = readWriteOpPtr; - getNoParallelTransaction(signal); - if (tgnptNrTransaction == 1) { + if (getNoParallelTransaction(readWriteOpPtr.p) == 1) { /* --------------------------------------------------------------------------------- */ /* THERE WAS ONLY ONE TRANSACTION INVOLVED IN THE PARALLEL QUEUE. IF THIS IS OUR */ /* TRANSACTION WE CAN STILL GET HOLD OF THE LOCK. */ @@ -4449,9 +4440,153 @@ void Dbacc::commitOperation(Signal* signal) ptrCheckGuard(tolqTmpPtr, coprecsize, operationrec); tolqTmpPtr.p->prevParallelQue = operationRecPtr.p->prevParallelQue; }//if - }//if + + /** + * Check possible lock upgrade + * 1) Find lock owner + * 2) Count transactions in parallel que + * 3) If count == 1 and TRANSID(next serial) == TRANSID(lock owner) + * upgrade next serial + */ + if(operationRecPtr.p->lockMode) + { + jam(); + /** + * Committing a non shared operation can't lead to lock upgrade + */ + return; + } + + OperationrecPtr lock_owner; + lock_owner.i = operationRecPtr.p->prevParallelQue; + ptrCheckGuard(lock_owner, coprecsize, operationrec); + Uint32 transid[2] = { lock_owner.p->transId1, + lock_owner.p->transId2 }; + + + while(lock_owner.p->prevParallelQue != RNIL) + { + lock_owner.i = lock_owner.p->prevParallelQue; + ptrCheckGuard(lock_owner, coprecsize, operationrec); + + if(lock_owner.p->transId1 != transid[0] || + lock_owner.p->transId2 != transid[1]) + { + jam(); + /** + * If more than 1 trans in lock queue -> no lock upgrade + */ + return; + } + } + + check_lock_upgrade(signal, lock_owner, operationRecPtr); + } }//Dbacc::commitOperation() +void +Dbacc::check_lock_upgrade(Signal* signal, + OperationrecPtr lock_owner, + OperationrecPtr release_op) +{ + if((lock_owner.p->transId1 == release_op.p->transId1 && + lock_owner.p->transId2 == release_op.p->transId2) || + release_op.p->lockMode || + lock_owner.p->nextSerialQue == RNIL) + { + jam(); + /** + * No lock upgrade if same trans or lock owner has no serial queue + * or releasing non shared op + */ + return; + } + + OperationrecPtr next; + next.i = lock_owner.p->nextSerialQue; + ptrCheckGuard(next, coprecsize, operationrec); + + if(lock_owner.p->transId1 != next.p->transId1 || + lock_owner.p->transId2 != next.p->transId2) + { + jam(); + /** + * No lock upgrad if !same trans in serial queue + */ + return; + } + + if (getNoParallelTransaction(lock_owner.p) > 1) + { + jam(); + /** + * No lock upgrade if more than 1 transaction in parallell queue + */ + return; + } + + if (getNoParallelTransaction(next.p) > 1) + { + jam(); + /** + * No lock upgrade if more than 1 transaction in next's parallell queue + */ + return; + } + + OperationrecPtr tmp; + tmp.i = lock_owner.p->nextSerialQue = next.p->nextSerialQue; + if(tmp.i != RNIL) + { + ptrCheckGuard(tmp, coprecsize, operationrec); + ndbassert(tmp.p->prevSerialQue == next.i); + tmp.p->prevSerialQue = lock_owner.i; + } + next.p->nextSerialQue = next.p->prevSerialQue = RNIL; + + // Find end of parallell que + tmp = lock_owner; + Uint32 lockMode = next.p->lockMode > lock_owner.p->lockMode ? + next.p->lockMode : lock_owner.p->lockMode; + while(tmp.p->nextParallelQue != RNIL) + { + jam(); + tmp.i = tmp.p->nextParallelQue; + tmp.p->lockMode = lockMode; + ptrCheckGuard(tmp, coprecsize, operationrec); + } + tmp.p->lockMode = lockMode; + + next.p->prevParallelQue = tmp.i; + tmp.p->nextParallelQue = next.i; + + OperationrecPtr save = operationRecPtr; + + Uint32 localdata[2]; + localdata[0] = lock_owner.p->localdata[0]; + localdata[1] = lock_owner.p->localdata[1]; + do { + next.p->localdata[0] = localdata[0]; + next.p->localdata[1] = localdata[1]; + next.p->lockMode = lockMode; + + operationRecPtr = next; + executeNextOperation(signal); + if (next.p->nextParallelQue != RNIL) + { + jam(); + next.i = next.p->nextParallelQue; + ptrCheckGuard(next, coprecsize, operationrec); + } else { + jam(); + break; + }//if + } while (1); + + operationRecPtr = save; + +} + /* ------------------------------------------------------------------------- */ /* RELEASELOCK */ /* RESETS LOCK OF AN ELEMENT. */ @@ -4488,6 +4623,8 @@ void Dbacc::releaselock(Signal* signal) ptrCheckGuard(trlTmpOperPtr, coprecsize, operationrec); trlTmpOperPtr.p->prevSerialQue = trlOperPtr.i; }//if + + check_lock_upgrade(signal, copyInOperPtr, operationRecPtr); /* --------------------------------------------------------------------------------- */ /* SINCE THERE ARE STILL ITEMS IN THE PARALLEL QUEUE WE NEED NOT WORRY ABOUT */ /* STARTING QUEUED OPERATIONS. THUS WE CAN END HERE. */ @@ -6952,7 +7089,7 @@ void Dbacc::checkSendLcpConfLab(Signal* signal) break; }//switch lcpConnectptr.p->noOfLcpConf++; - ndbrequire(lcpConnectptr.p->noOfLcpConf <= 2); + ndbrequire(lcpConnectptr.p->noOfLcpConf <= 4); fragrecptr.p->fragState = ACTIVEFRAG; rlpPageptr.i = fragrecptr.p->zeroPagePtr; ptrCheckGuard(rlpPageptr, cpagesize, page8); @@ -6970,7 +7107,7 @@ void Dbacc::checkSendLcpConfLab(Signal* signal) }//for signal->theData[0] = fragrecptr.p->lcpLqhPtr; sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_LCPCONF, signal, 1, JBB); - if (lcpConnectptr.p->noOfLcpConf == 2) { + if (lcpConnectptr.p->noOfLcpConf == 4) { jam(); releaseLcpConnectRec(signal); rootfragrecptr.i = fragrecptr.p->myroot; @@ -7001,6 +7138,13 @@ void Dbacc::execACC_CONTOPREQ(Signal* signal) /* LOCAL FRAG ID */ tresult = 0; ptrCheckGuard(lcpConnectptr, clcpConnectsize, lcpConnectrec); + if(ERROR_INSERTED(3002) && lcpConnectptr.p->noOfLcpConf < 2) + { + sendSignalWithDelay(cownBlockref, GSN_ACC_CONTOPREQ, signal, 300, + signal->getLength()); + return; + } + ndbrequire(lcpConnectptr.p->lcpstate == LCP_ACTIVE); rootfragrecptr.i = lcpConnectptr.p->rootrecptr; ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec); @@ -7034,6 +7178,15 @@ void Dbacc::execACC_CONTOPREQ(Signal* signal) }//while signal->theData[0] = fragrecptr.p->lcpLqhPtr; sendSignal(lcpConnectptr.p->lcpUserblockref, GSN_ACC_CONTOPCONF, signal, 1, JBA); + + lcpConnectptr.p->noOfLcpConf++; + if (lcpConnectptr.p->noOfLcpConf == 4) { + jam(); + releaseLcpConnectRec(signal); + rootfragrecptr.i = fragrecptr.p->myroot; + ptrCheckGuard(rootfragrecptr, crootfragmentsize, rootfragmentrec); + rootfragrecptr.p->rootState = ACTIVEROOT; + }//if return; /* ALL QUEUED OPERATION ARE RESTARTED IF NEEDED. */ }//Dbacc::execACC_CONTOPREQ() diff --git a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 4bc5b127a8f..3d5340494ab 100644 --- a/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -9884,11 +9884,20 @@ Dbdict::execBUILDINDXREQ(Signal* signal) requestType == BuildIndxReq::RT_ALTER_INDEX || requestType == BuildIndxReq::RT_SYSTEMRESTART) { jam(); + + const bool isLocal = req->getRequestFlag() & RequestFlag::RF_LOCAL; + NdbNodeBitmask receiverNodes = c_aliveNodes; + if (isLocal) { + receiverNodes.clear(); + receiverNodes.set(getOwnNodeId()); + } + if (signal->getLength() == BuildIndxReq::SignalLength) { jam(); - if (getOwnNodeId() != c_masterNodeId) { + + if (!isLocal && getOwnNodeId() != c_masterNodeId) { jam(); - + releaseSections(signal); OpBuildIndex opBad; opPtr.p = &opBad; @@ -9901,9 +9910,9 @@ Dbdict::execBUILDINDXREQ(Signal* signal) } // forward initial request plus operation key to all req->setOpKey(++c_opRecordSequence); - NodeReceiverGroup rg(DBDICT, c_aliveNodes); + NodeReceiverGroup rg(DBDICT, receiverNodes); sendSignal(rg, GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength + 1, JBB); + signal, BuildIndxReq::SignalLength + 1, JBB); return; } // seize operation record @@ -9926,7 +9935,7 @@ Dbdict::execBUILDINDXREQ(Signal* signal) } c_opBuildIndex.add(opPtr); // master expects to hear from all - opPtr.p->m_signalCounter = c_aliveNodes; + opPtr.p->m_signalCounter = receiverNodes; buildIndex_sendReply(signal, opPtr, false); return; } @@ -10281,10 +10290,20 @@ Dbdict::buildIndex_sendSlaveReq(Signal* signal, OpBuildIndexPtr opPtr) req->setConnectionPtr(opPtr.p->key); req->setRequestType(opPtr.p->m_requestType); req->addRequestFlag(opPtr.p->m_requestFlag); - opPtr.p->m_signalCounter = c_aliveNodes; - NodeReceiverGroup rg(DBDICT, c_aliveNodes); - sendSignal(rg, GSN_BUILDINDXREQ, - signal, BuildIndxReq::SignalLength, JBB); + if(opPtr.p->m_requestFlag & RequestFlag::RF_LOCAL) + { + opPtr.p->m_signalCounter.clearWaitingFor(); + opPtr.p->m_signalCounter.setWaitingFor(getOwnNodeId()); + sendSignal(reference(), GSN_BUILDINDXREQ, + signal, BuildIndxReq::SignalLength, JBB); + } + else + { + opPtr.p->m_signalCounter = c_aliveNodes; + NodeReceiverGroup rg(DBDICT, c_aliveNodes); + sendSignal(rg, GSN_BUILDINDXREQ, + signal, BuildIndxReq::SignalLength, JBB); + } } void diff --git a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp index e7debe1f978..f5d2dbc0a6c 100644 --- a/ndb/src/kernel/blocks/dblqh/Dblqh.hpp +++ b/ndb/src/kernel/blocks/dblqh/Dblqh.hpp @@ -966,7 +966,6 @@ public: enum LcpState { LCP_IDLE = 0, - LCP_STARTED = 1, LCP_COMPLETED = 2, LCP_WAIT_FRAGID = 3, LCP_WAIT_TUP_PREPLCP = 4, @@ -2265,7 +2264,7 @@ private: void sendCopyActiveConf(Signal* signal,Uint32 tableId); void checkLcpCompleted(Signal* signal); void checkLcpHoldop(Signal* signal); - void checkLcpStarted(Signal* signal); + bool checkLcpStarted(Signal* signal); void checkLcpTupprep(Signal* signal); void getNextFragForLcp(Signal* signal); void initLcpLocAcc(Signal* signal, Uint32 fragId); diff --git a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index b0d44bf1bd9..50ee4c4b06e 100644 --- a/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -10426,8 +10426,8 @@ void Dblqh::execTUP_LCPSTARTED(Signal* signal) void Dblqh::lcpStartedLab(Signal* signal) { - checkLcpStarted(signal); - if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) { + if (checkLcpStarted(signal)) + { jam(); /* ---------------------------------------------------------------------- * THE LOCAL CHECKPOINT HAS BEEN STARTED. IT IS NOW TIME TO @@ -10507,26 +10507,7 @@ void Dblqh::execLQH_RESTART_OP(Signal* signal) lcpPtr.i = signal->theData[1]; ptrCheckGuard(lcpPtr, clcpFileSize, lcpRecord); ndbrequire(fragptr.p->fragStatus == Fragrecord::BLOCKED); - if (lcpPtr.p->lcpState == LcpRecord::LCP_STARTED) { - jam(); - /***********************************************************************/ - /* THIS SIGNAL CAN ONLY BE RECEIVED WHEN FRAGMENT IS BLOCKED AND - * THE LOCAL CHECKPOINT HAS BEEN STARTED. THE BLOCKING WILL BE - * REMOVED AS SOON AS ALL OPERATIONS HAVE BEEN STARTED. - ***********************************************************************/ - restartOperationsLab(signal); - } else if (lcpPtr.p->lcpState == LcpRecord::LCP_BLOCKED_COMP) { - jam(); - /*******************************************************************> - * THE CHECKPOINT IS COMPLETED BUT HAS NOT YET STARTED UP - * ALL OPERATIONS AGAIN. - * WE PERFORM THIS START-UP BEFORE CONTINUING WITH THE NEXT - * FRAGMENT OF THE LOCAL CHECKPOINT TO AVOID ANY STRANGE ERRORS. - *******************************************************************> */ - restartOperationsLab(signal); - } else { - ndbrequire(false); - } + restartOperationsLab(signal); }//Dblqh::execLQH_RESTART_OP() void Dblqh::restartOperationsLab(Signal* signal) @@ -11075,7 +11056,8 @@ void Dblqh::checkLcpHoldop(Signal* signal) * * SUBROUTINE SHORT NAME = CLS * ========================================================================== */ -void Dblqh::checkLcpStarted(Signal* signal) +bool +Dblqh::checkLcpStarted(Signal* signal) { LcpLocRecordPtr clsLcpLocptr; @@ -11085,7 +11067,7 @@ void Dblqh::checkLcpStarted(Signal* signal) do { ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord); if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_WAIT_STARTED){ - return; + return false; }//if clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc; i++; @@ -11096,12 +11078,13 @@ void Dblqh::checkLcpStarted(Signal* signal) do { ptrCheckGuard(clsLcpLocptr, clcpLocrecFileSize, lcpLocRecord); if (clsLcpLocptr.p->lcpLocstate == LcpLocRecord::TUP_WAIT_STARTED){ - return; + return false; }//if clsLcpLocptr.i = clsLcpLocptr.p->nextLcpLoc; i++; } while (clsLcpLocptr.i != RNIL); - lcpPtr.p->lcpState = LcpRecord::LCP_STARTED; + + return true; }//Dblqh::checkLcpStarted() /* ========================================================================== @@ -11262,20 +11245,12 @@ void Dblqh::sendAccContOp(Signal* signal) do { ptrCheckGuard(sacLcpLocptr, clcpLocrecFileSize, lcpLocRecord); sacLcpLocptr.p->accContCounter = 0; - if(sacLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_STARTED){ - /* ------------------------------------------------------------------- */ - /*SEND START OPERATIONS TO ACC AGAIN */ - /* ------------------------------------------------------------------- */ - signal->theData[0] = lcpPtr.p->lcpAccptr; - signal->theData[1] = sacLcpLocptr.p->locFragid; - sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA); - count++; - } else if(sacLcpLocptr.p->lcpLocstate == LcpLocRecord::ACC_COMPLETED){ - signal->theData[0] = sacLcpLocptr.i; - sendSignal(reference(), GSN_ACC_CONTOPCONF, signal, 1, JBB); - } else { - ndbrequire(false); - } + /* ------------------------------------------------------------------- */ + /*SEND START OPERATIONS TO ACC AGAIN */ + /* ------------------------------------------------------------------- */ + signal->theData[0] = lcpPtr.p->lcpAccptr; + signal->theData[1] = sacLcpLocptr.p->locFragid; + sendSignal(fragptr.p->accBlockref, GSN_ACC_CONTOPREQ, signal, 2, JBA); sacLcpLocptr.i = sacLcpLocptr.p->nextLcpLoc; } while (sacLcpLocptr.i != RNIL); @@ -11311,9 +11286,18 @@ void Dblqh::sendStartLcp(Signal* signal) signal->theData[0] = stlLcpLocptr.i; signal->theData[1] = cownref; signal->theData[2] = stlLcpLocptr.p->tupRef; - sendSignal(fragptr.p->tupBlockref, GSN_TUP_LCPREQ, signal, 3, JBA); + if(ERROR_INSERTED(5077)) + sendSignalWithDelay(fragptr.p->tupBlockref, GSN_TUP_LCPREQ, + signal, 5000, 3); + else + sendSignal(fragptr.p->tupBlockref, GSN_TUP_LCPREQ, signal, 3, JBA); stlLcpLocptr.i = stlLcpLocptr.p->nextLcpLoc; } while (stlLcpLocptr.i != RNIL); + + if(ERROR_INSERTED(5077)) + { + ndbout_c("Delayed TUP_LCPREQ with 5 sec"); + } }//Dblqh::sendStartLcp() /* ------------------------------------------------------------------------- */ diff --git a/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp b/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp index b944bb5485b..0fee687f1bc 100644 --- a/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp +++ b/ndb/src/kernel/blocks/ndbfs/OpenFiles.hpp @@ -82,8 +82,14 @@ inline bool OpenFiles::insert(AsyncFile* file, Uint16 id){ continue; if(strcmp(m_files[i].m_file->theFileName.c_str(), - file->theFileName.c_str()) == 0){ - ERROR_SET(fatal, AFS_ERROR_ALLREADY_OPEN,"","OpenFiles::insert()"); + file->theFileName.c_str()) == 0) + { + BaseString names; + names.assfmt("open: >%s< existing: >%s<", + file->theFileName.c_str(), + m_files[i].m_file->theFileName.c_str()); + ERROR_SET(fatal, AFS_ERROR_ALLREADY_OPEN, names.c_str(), + "OpenFiles::insert()"); } } diff --git a/ndb/src/mgmapi/mgmapi.cpp b/ndb/src/mgmapi/mgmapi.cpp index fe7909ba5a1..ab32de5b9ca 100644 --- a/ndb/src/mgmapi/mgmapi.cpp +++ b/ndb/src/mgmapi/mgmapi.cpp @@ -1615,9 +1615,9 @@ ndb_mgm_start_backup(NdbMgmHandle handle, int wait_completed, { // start backup can take some time, set timeout high Uint64 old_timeout= handle->read_timeout; if (wait_completed == 2) - handle->read_timeout= 30*60*1000; // 30 minutes + handle->read_timeout= 48*60*60*1000; // 48 hours else if (wait_completed == 1) - handle->read_timeout= 5*60*1000; // 5 minutes + handle->read_timeout= 10*60*1000; // 10 minutes reply = ndb_mgm_call(handle, start_backup_reply, "start backup", &args); handle->read_timeout= old_timeout; } diff --git a/ndb/src/mgmsrv/MgmtSrvr.cpp b/ndb/src/mgmsrv/MgmtSrvr.cpp index 86a5f3e8f4b..6b9bf7a0978 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.cpp +++ b/ndb/src/mgmsrv/MgmtSrvr.cpp @@ -810,7 +810,7 @@ MgmtSrvr::restartNode(int processId, bool nostart, result = sendSignal(processId, NO_WAIT, signal, true); } - if (result == -1) { + if (result == -1 && theWaitState != WAIT_NODEFAILURE) { m_stopRec.inUse = false; return SEND_OR_RECEIVE_FAILED; } @@ -1937,6 +1937,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) #ifdef VM_TRACE ndbout_c("I'm not master resending to %d", aNodeId); #endif + theWaitNode= aNodeId; NdbApiSignal aSignal(_ownReference); BackupReq* req = CAST_PTR(BackupReq, aSignal.getDataPtrSend()); aSignal.set(TestOrd::TraceAPI, BACKUP, GSN_BACKUP_REQ, @@ -1964,6 +1965,7 @@ MgmtSrvr::handleReceivedSignal(NdbApiSignal* signal) event.Event = BackupEvent::BackupAborted; event.Aborted.Reason = rep->reason; event.Aborted.BackupId = rep->backupId; + event.Aborted.ErrorCode = rep->reason; backupCallback(event); } break; @@ -2093,6 +2095,13 @@ MgmtSrvr::handleStatus(NodeId nodeId, bool alive, bool nfComplete) handleStopReply(nodeId, 0); DBUG_VOID_RETURN; } + + if(theWaitNode == nodeId && + theWaitState != NO_WAIT && theWaitState != WAIT_STOP) + { + theWaitState = WAIT_NODEFAILURE; + NdbCondition_Signal(theMgmtWaitForResponseCondPtr); + } } eventReport(_ownNodeId, theData); @@ -2448,7 +2457,7 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) int result; if (waitCompleted == 2) { result = sendRecSignal(nodeId, WAIT_BACKUP_COMPLETED, - signal, true, 30*60*1000 /*30 secs*/); + signal, true, 48*60*60*1000 /* 48 hours */); } else if (waitCompleted == 1) { result = sendRecSignal(nodeId, WAIT_BACKUP_STARTED, @@ -2477,22 +2486,6 @@ MgmtSrvr::startBackup(Uint32& backupId, int waitCompleted) return -1; break; } - } else { - switch(m_lastBackupEvent.Event){ - case BackupEvent::BackupCompleted: - backupId = m_lastBackupEvent.Completed.BackupId; - break; - case BackupEvent::BackupStarted: - backupId = m_lastBackupEvent.Started.BackupId; - break; - case BackupEvent::BackupFailedToStart: - return m_lastBackupEvent.FailedToStart.ErrorCode; - case BackupEvent::BackupAborted: - return m_lastBackupEvent.Aborted.ErrorCode; - default: - return -1; - break; - } } return 0; diff --git a/ndb/src/mgmsrv/MgmtSrvr.hpp b/ndb/src/mgmsrv/MgmtSrvr.hpp index 637f54f74a8..7e13b41438f 100644 --- a/ndb/src/mgmsrv/MgmtSrvr.hpp +++ b/ndb/src/mgmsrv/MgmtSrvr.hpp @@ -622,7 +622,8 @@ private: WAIT_STOP, WAIT_BACKUP_STARTED, WAIT_BACKUP_COMPLETED, - WAIT_VERSION + WAIT_VERSION, + WAIT_NODEFAILURE }; /** @@ -706,6 +707,7 @@ private: NdbApiSignal* theSignalIdleList; // List of unused signals + Uint32 theWaitNode; WaitSignalType theWaitState; // State denoting a set of signals we accept to recieve. diff --git a/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp b/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp index 2126c9d358d..f93948abc75 100644 --- a/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp +++ b/ndb/src/mgmsrv/MgmtSrvrGeneralSignalHandling.cpp @@ -108,6 +108,7 @@ MgmtSrvr::sendRecSignal(Uint16 aNodeId, return -1; } theWaitState = aWaitState; + theWaitNode = aNodeId; return receiveOptimisedResponse(waitTime); } @@ -119,11 +120,12 @@ MgmtSrvr::receiveOptimisedResponse(int waitTime) theFacade->checkForceSend(_blockNumber); NDB_TICKS maxTime = NdbTick_CurrentMillisecond() + waitTime; - while (theWaitState != NO_WAIT && waitTime > 0) { + while (theWaitState != NO_WAIT && theWaitState != WAIT_NODEFAILURE + && waitTime > 0) { NdbCondition_WaitTimeout(theMgmtWaitForResponseCondPtr, theFacade->theMutexPtr, waitTime); - if(theWaitState == NO_WAIT) + if(theWaitState == NO_WAIT || theWaitState == WAIT_NODEFAILURE) break; waitTime = (maxTime - NdbTick_CurrentMillisecond()); }//while diff --git a/ndb/src/ndbapi/Ndbif.cpp b/ndb/src/ndbapi/Ndbif.cpp index 40aaa1e3daa..c550701229c 100644 --- a/ndb/src/ndbapi/Ndbif.cpp +++ b/ndb/src/ndbapi/Ndbif.cpp @@ -454,7 +454,7 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) tFirstDataPtr = int2void(tFirstData); if(tFirstDataPtr != 0){ tOp = void2rec_op(tFirstDataPtr); - if (tOp->checkMagicNumber() == 0) { + if (tOp->checkMagicNumber(false) == 0) { tCon = tOp->theNdbCon; if (tCon != NULL) { if ((tCon->theSendStatus == NdbTransaction::sendTC_OP) || @@ -467,11 +467,11 @@ Ndb::handleReceivedSignal(NdbApiSignal* aSignal, LinearSectionPtr ptr[3]) }//if }//if }//if - } else { -#ifdef VM_TRACE - ndbout_c("Recevied TCKEY_FAILREF wo/ operation"); -#endif } +#ifdef VM_TRACE + ndbout_c("Recevied TCKEY_FAILREF wo/ operation"); +#endif + return; break; } case GSN_TCKEYREF: diff --git a/ndb/src/ndbapi/ndberror.c b/ndb/src/ndbapi/ndberror.c index ac9bda5a9f3..6bbd38c9bbb 100644 --- a/ndb/src/ndbapi/ndberror.c +++ b/ndb/src/ndbapi/ndberror.c @@ -144,10 +144,10 @@ ErrorBundle ErrorCodes[] = { { 4008, UR, "Receive from NDB failed" }, { 4009, UR, "Cluster Failure" }, { 4012, UR, - "Time-out, most likely caused by simple read or cluster failure" }, + "Request ndbd time-out, maybe due to high load or communication problems"}, { 4024, UR, - "Time-out, most likely caused by simple read or cluster failure" }, - + "Time-out, most likely caused by simple read or cluster failure" }, + /** * TemporaryResourceError */ @@ -383,7 +383,7 @@ ErrorBundle ErrorCodes[] = { { 1325, IE, "File or scan error" }, { 1326, IE, "Backup abortet due to node failure" }, { 1327, IE, "1327" }, - + { 1340, IE, "Backup undefined error" }, { 1342, AE, "Backup failed to allocate buffers (check configuration)" }, { 1343, AE, "Backup failed to setup fs buffers (check configuration)" }, @@ -393,7 +393,8 @@ ErrorBundle ErrorCodes[] = { { 1347, AE, "Backup failed to allocate table memory (check configuration)" }, { 1348, AE, "Backup failed to allocate file record (check configuration)" }, { 1349, AE, "Backup failed to allocate attribute record (check configuration)" }, - + { 1329, AE, "Backup during software upgrade not supported" }, + /** * Still uncategorized */ diff --git a/ndb/test/ndbapi/testBackup.cpp b/ndb/test/ndbapi/testBackup.cpp index f3594514c5b..7a6f11a6bb9 100644 --- a/ndb/test/ndbapi/testBackup.cpp +++ b/ndb/test/ndbapi/testBackup.cpp @@ -74,20 +74,20 @@ int runAbort(NDBT_Context* ctx, NDBT_Step* step){ if (testMaster) { if (testSlave) { - if (backup.NFMasterAsSlave(restarter) == -1){ + if (backup.NFMasterAsSlave(restarter) != NDBT_OK){ return NDBT_FAILED; } } else { - if (backup.NFMaster(restarter) == -1){ + if (backup.NFMaster(restarter) != NDBT_OK){ return NDBT_FAILED; } } } else { - if (backup.NFSlave(restarter) == -1){ + if (backup.NFSlave(restarter) != NDBT_OK){ return NDBT_FAILED; } } - + return NDBT_OK; } @@ -108,16 +108,16 @@ int runFail(NDBT_Context* ctx, NDBT_Step* step){ if (testMaster) { if (testSlave) { - if (backup.FailMasterAsSlave(restarter) == -1){ + if (backup.FailMasterAsSlave(restarter) != NDBT_OK){ return NDBT_FAILED; } } else { - if (backup.FailMaster(restarter) == -1){ + if (backup.FailMaster(restarter) != NDBT_OK){ return NDBT_FAILED; } } } else { - if (backup.FailSlave(restarter) == -1){ + if (backup.FailSlave(restarter) != NDBT_OK){ return NDBT_FAILED; } } diff --git a/ndb/test/ndbapi/testIndex.cpp b/ndb/test/ndbapi/testIndex.cpp index 3526326b680..5785db232c4 100644 --- a/ndb/test/ndbapi/testIndex.cpp +++ b/ndb/test/ndbapi/testIndex.cpp @@ -1323,14 +1323,14 @@ TESTCASE("NFNR2_O", "Test that indexes are correctly maintained during node fail and node restart"){ TC_PROPERTY("OrderedIndex", 1); TC_PROPERTY("LoggedIndexes", (unsigned)0); - TC_PROPERTY("PauseThreads", 2); + TC_PROPERTY("PauseThreads", 1); INITIALIZER(runClearTable); INITIALIZER(createRandomIndex); INITIALIZER(createPkIndex); INITIALIZER(runLoadTable); STEP(runRestarts); STEP(runTransactions2); - STEP(runTransactions2); + //STEP(runTransactions2); FINALIZER(runVerifyIndex); FINALIZER(createRandomIndex_Drop); FINALIZER(createPkIndex_Drop); diff --git a/ndb/test/ndbapi/testOperations.cpp b/ndb/test/ndbapi/testOperations.cpp index 9f1d5ee1191..726f35b01fb 100644 --- a/ndb/test/ndbapi/testOperations.cpp +++ b/ndb/test/ndbapi/testOperations.cpp @@ -547,21 +547,64 @@ runLockUpgrade1(NDBT_Context* ctx, NDBT_Step* step){ do { CHECK(hugoOps.startTransaction(pNdb) == 0); - CHECK(hugoOps.pkReadRecord(pNdb, 0, 1, NdbOperation::LM_Read) == 0); - CHECK(hugoOps.execute_NoCommit(pNdb) == 0); + if(ctx->getProperty("LOCK_UPGRADE", 1) == 1) + { + CHECK(hugoOps.pkReadRecord(pNdb, 0, 1, NdbOperation::LM_Read) == 0); + CHECK(hugoOps.execute_NoCommit(pNdb) == 0); - ctx->setProperty("READ_DONE", 1); - ctx->broadcast(); - ndbout_c("wait 2"); - ctx->getPropertyWait("READ_DONE", 2); - ndbout_c("wait 2 - done"); + ctx->setProperty("READ_DONE", 1); + ctx->broadcast(); + ndbout_c("wait 2"); + ctx->getPropertyWait("READ_DONE", 2); + ndbout_c("wait 2 - done"); + } + else + { + ctx->setProperty("READ_DONE", 1); + ctx->broadcast(); + ctx->getPropertyWait("READ_DONE", 2); + ndbout_c("wait 2 - done"); + CHECK(hugoOps.pkReadRecord(pNdb, 0, 1, NdbOperation::LM_Read) == 0); + CHECK(hugoOps.execute_NoCommit(pNdb) == 0); + } + if(ctx->getProperty("LU_OP", o_INS) == o_INS) + { + CHECK(hugoOps.pkDeleteRecord(pNdb, 0, 1) == 0); + CHECK(hugoOps.pkInsertRecord(pNdb, 0, 1, 2) == 0); + } + else if(ctx->getProperty("LU_OP", o_UPD) == o_UPD) + { + CHECK(hugoOps.pkUpdateRecord(pNdb, 0, 1, 2) == 0); + } + else + { + CHECK(hugoOps.pkDeleteRecord(pNdb, 0, 1) == 0); + } ctx->setProperty("READ_DONE", 3); ctx->broadcast(); ndbout_c("before update"); - CHECK(hugoOps.pkUpdateRecord(pNdb, 0, 1, 2) == 0); ndbout_c("wait update"); - CHECK(hugoOps.execute_NoCommit(pNdb) == 0); - CHECK(hugoOps.closeTransaction(pNdb)); + CHECK(hugoOps.execute_Commit(pNdb) == 0); + CHECK(hugoOps.closeTransaction(pNdb) == 0); + + CHECK(hugoOps.startTransaction(pNdb) == 0); + CHECK(hugoOps.pkReadRecord(pNdb, 0, 1) == 0); + int res= hugoOps.execute_Commit(pNdb); + if(ctx->getProperty("LU_OP", o_INS) == o_INS) + { + CHECK(res == 0); + CHECK(hugoOps.verifyUpdatesValue(2) == 0); + } + else if(ctx->getProperty("LU_OP", o_UPD) == o_UPD) + { + CHECK(res == 0); + CHECK(hugoOps.verifyUpdatesValue(2) == 0); + } + else + { + CHECK(res == 626); + } + } while(0); return result; @@ -592,10 +635,17 @@ runLockUpgrade2(NDBT_Context* ctx, NDBT_Step* step){ ndbout_c("wait 3 - done"); NdbSleep_MilliSleep(200); - CHECK(hugoOps.execute_Commit(pNdb) == 0); + if(ctx->getProperty("LU_COMMIT", (Uint32)0) == 0) + { + CHECK(hugoOps.execute_Commit(pNdb) == 0); + } + else + { + CHECK(hugoOps.execute_Rollback(pNdb) == 0); + } } while(0); - return NDBT_FAILED; + return result; } int @@ -607,11 +657,17 @@ main(int argc, const char** argv){ NDBT_TestSuite ts("testOperations"); + for(Uint32 i = 0; i < 12; i++) { BaseString name("bug_9749"); + name.appfmt("_%d", i); NDBT_TestCaseImpl1 *pt = new NDBT_TestCaseImpl1(&ts, name.c_str(), ""); + pt->setProperty("LOCK_UPGRADE", 1 + (i & 1)); + pt->setProperty("LU_OP", 1 + ((i >> 1) % 3)); + pt->setProperty("LU_COMMIT", i / 6); + pt->addInitializer(new NDBT_Initializer(pt, "runClearTable", runClearTable)); diff --git a/ndb/test/run-test/daily-basic-tests.txt b/ndb/test/run-test/daily-basic-tests.txt index 8528e709eb3..b2d809ef6be 100644 --- a/ndb/test/run-test/daily-basic-tests.txt +++ b/ndb/test/run-test/daily-basic-tests.txt @@ -2,6 +2,54 @@ max-time: 3600 cmd: atrt-mysql-test-run args: --force +max-time: 600 +cmd: atrt-testBackup +args: -n NFMaster T1 + +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + +max-time: 600 +cmd: atrt-testBackup +args: -n NFMasterAsSlave T1 + +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + +max-time: 600 +cmd: atrt-testBackup +args: -n NFSlave T1 + +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + +max-time: 600 +cmd: atrt-testBackup +args: -n FailMaster T1 + +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + +max-time: 600 +cmd: atrt-testBackup +args: -n FailMasterAsSlave T1 + +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + +max-time: 600 +cmd: atrt-testBackup +args: -n FailSlave T1 + +max-time: 600 +cmd: testBasic +args: -n PkRead T1 + max-time: 600 cmd: atrt-testBackup args: -n BackupOne T1 T6 T3 I3 diff --git a/ndb/test/src/NdbBackup.cpp b/ndb/test/src/NdbBackup.cpp index bc5baeae4b5..fe101b9c80b 100644 --- a/ndb/test/src/NdbBackup.cpp +++ b/ndb/test/src/NdbBackup.cpp @@ -244,7 +244,11 @@ NdbBackup::NFSlave(NdbRestarter& _restarter){ int NdbBackup::NF(NdbRestarter& _restarter, int *NFDuringBackup_codes, const int sz, bool onMaster){ + int nNodes = _restarter.getNumDbNodes(); { + if(nNodes == 1) + return NDBT_OK; + int nodeId = _restarter.getMasterNodeId(); CHECK(_restarter.restartOneDbNode(nodeId, false, true, true) == 0, @@ -255,15 +259,11 @@ NdbBackup::NF(NdbRestarter& _restarter, int *NFDuringBackup_codes, const int sz, CHECK(_restarter.startNodes(&nodeId, 1) == 0, "failed to start node"); - - NdbSleep_SecSleep(10); } - + CHECK(_restarter.waitClusterStarted() == 0, "waitClusterStarted failed"); - - int nNodes = _restarter.getNumDbNodes(); - + myRandom48Init(NdbTick_CurrentMillisecond()); for(int i = 0; imysql_log_file_name = log_file_name; trx->mysql_log_offset = (ib_longlong)end_offset; +#ifdef HAVE_REPLICATION + if (thd->variables.sync_replication) { + /* Let us store the binlog file name and the position, so that + we know how long to wait for the binlog to the replicated to + the slave in synchronous replication. */ + + if (trx->repl_wait_binlog_name == NULL) { + + trx->repl_wait_binlog_name = + (char*)mem_alloc_noninline(FN_REFLEN + 100); + } + + ut_a(strlen(log_file_name) < FN_REFLEN + 100); + + strcpy(trx->repl_wait_binlog_name, log_file_name); + + trx->repl_wait_binlog_pos = (ib_longlong)end_offset; + } +#endif /* HAVE_REPLICATION */ trx->flush_log_later = TRUE; innobase_commit(thd, trx_handle); @@ -1572,6 +1639,7 @@ innobase_report_binlog_offset_and_commit( return(0); } +#if 0 /*********************************************************************** This function stores the binlog offset and flushes logs. */ @@ -1602,7 +1670,6 @@ innobase_store_binlog_offset_and_flush_log( /* Syncronous flush of the log buffer to disk */ log_buffer_flush_to_disk(); } - #endif /********************************************************************* @@ -1631,9 +1698,215 @@ innobase_commit_complete( trx_commit_complete_for_mysql(trx); } +#ifdef HAVE_REPLICATION + if (thd->variables.sync_replication + && trx->repl_wait_binlog_name + && innobase_repl_state != 0) { + + struct timespec abstime; + int cmp; + int ret; + + /* In synchronous replication, let us wait until the MySQL + replication has sent the relevant binlog segment to the + replication slave. */ + + pthread_mutex_lock(&innobase_repl_cond_mutex); +try_again: + if (innobase_repl_state == 0) { + + pthread_mutex_unlock(&innobase_repl_cond_mutex); + + return(0); + } + + cmp = strcmp(innobase_repl_file_name, + trx->repl_wait_binlog_name); + if (cmp > 0 + || (cmp == 0 && innobase_repl_pos + >= (my_off_t)trx->repl_wait_binlog_pos)) { + /* We have already sent the relevant binlog to the + slave: no need to wait here */ + + pthread_mutex_unlock(&innobase_repl_cond_mutex); + +/* printf("Binlog now sent\n"); */ + + return(0); + } + + /* Let us update the info about the minimum binlog position + of waiting threads in the innobase_repl_... variables */ + + if (innobase_repl_wait_file_name_inited != 0) { + cmp = strcmp(trx->repl_wait_binlog_name, + innobase_repl_wait_file_name); + if (cmp < 0 + || (cmp == 0 && (my_off_t)trx->repl_wait_binlog_pos + <= innobase_repl_wait_pos)) { + /* This thd has an even lower position, let + us update the minimum info */ + + strcpy(innobase_repl_wait_file_name, + trx->repl_wait_binlog_name); + + innobase_repl_wait_pos = + trx->repl_wait_binlog_pos; + } + } else { + strcpy(innobase_repl_wait_file_name, + trx->repl_wait_binlog_name); + + innobase_repl_wait_pos = trx->repl_wait_binlog_pos; + + innobase_repl_wait_file_name_inited = 1; + } + set_timespec(abstime, thd->variables.sync_replication_timeout); + + /* Let us suspend this thread to wait on the condition; + when replication has progressed far enough, we will release + these waiting threads. The following call + pthread_cond_timedwait also atomically unlocks + innobase_repl_cond_mutex. */ + + innobase_repl_n_wait_threads++; + +/* printf("Waiting for binlog to be sent\n"); */ + + ret = pthread_cond_timedwait(&innobase_repl_cond, + &innobase_repl_cond_mutex, &abstime); + innobase_repl_n_wait_threads--; + + if (ret != 0) { + ut_print_timestamp(stderr); + + fprintf(stderr, +" InnoDB: Error: MySQL synchronous replication\n" +"InnoDB: was not able to send the binlog to the slave within the\n" +"InnoDB: timeout %lu. We assume that the slave has become inaccessible,\n" +"InnoDB: and switch off synchronous replication until the communication.\n" +"InnoDB: to the slave works again.\n", + thd->variables.sync_replication_timeout); + fprintf(stderr, +"InnoDB: MySQL synchronous replication has sent binlog\n" +"InnoDB: to the slave up to file %s, position %lu\n", innobase_repl_file_name, + (ulong)innobase_repl_pos); + fprintf(stderr, +"InnoDB: This transaction needs it to be sent up to\n" +"InnoDB: file %s, position %lu\n", trx->repl_wait_binlog_name, + (uint)trx->repl_wait_binlog_pos); + + innobase_repl_state = 0; + + pthread_mutex_unlock(&innobase_repl_cond_mutex); + + return(0); + } + + goto try_again; + } +#endif HAVE_REPLICATION return(0); } +#ifdef HAVE_REPLICATION +/********************************************************************* +In synchronous replication, reports to InnoDB up to which binlog position +we have sent the binlog to the slave. Note that replication is synchronous +for one slave only. For other slaves, we do nothing in this function. This +function is used in a replication master. */ + +int +innobase_repl_report_sent_binlog( +/*=============================*/ + /* out: 0 */ + THD* thd, /* in: thread doing the binlog communication to + the slave */ + char* log_file_name, /* in: binlog file name */ + my_off_t end_offset) /* in: the offset in the binlog file up to + which we sent the contents to the slave */ +{ + int cmp; + ibool can_release_threads = 0; + + /* If synchronous replication is not switched on, or this thd is + sending binlog to a slave where we do not need synchronous replication, + then return immediately */ + + if (thd->server_id != thd->variables.sync_replication_slave_id) { + + /* Do nothing */ + + return(0); + } + + pthread_mutex_lock(&innobase_repl_cond_mutex); + + if (innobase_repl_state == 0) { + + ut_print_timestamp(stderr); + fprintf(stderr, +" InnoDB: Switching MySQL synchronous replication on again at\n" +"InnoDB: binlog file %s, position %lu\n", log_file_name, (ulong)end_offset); + + innobase_repl_state = 1; + } + + /* The position should increase monotonically, since just one thread + is sending the binlog to the slave for which we want synchronous + replication. Let us check this, and print an error to the .err log + if that is not the case. */ + + if (innobase_repl_file_name_inited) { + cmp = strcmp(log_file_name, innobase_repl_file_name); + + if (cmp < 0 + || (cmp == 0 && end_offset < innobase_repl_pos)) { + + ut_print_timestamp(stderr); + fprintf(stderr, +" InnoDB: Error: MySQL synchronous replication has sent binlog\n" +"InnoDB: to the slave up to file %s, position %lu\n", innobase_repl_file_name, + (ulong)innobase_repl_pos); + fprintf(stderr, +"InnoDB: but now MySQL reports that it sent the binlog only up to\n" +"InnoDB: file %s, position %lu\n", log_file_name, (ulong)end_offset); + + } + } + + strcpy(innobase_repl_file_name, log_file_name); + innobase_repl_pos = end_offset; + innobase_repl_file_name_inited = 1; + + if (innobase_repl_n_wait_threads > 0) { + /* Let us check if some of the waiting threads doing a trx + commit can now proceed */ + + cmp = strcmp(innobase_repl_file_name, + innobase_repl_wait_file_name); + if (cmp > 0 + || (cmp == 0 && innobase_repl_pos + >= innobase_repl_wait_pos)) { + + /* Yes, at least one waiting thread can now proceed: + let us release all waiting threads with a broadcast */ + + can_release_threads = 1; + + innobase_repl_wait_file_name_inited = 0; + } + } + + pthread_mutex_unlock(&innobase_repl_cond_mutex); + + if (can_release_threads) { + + pthread_cond_broadcast(&innobase_repl_cond); + } +} +#endif /* HAVE_REPLICATION */ + /********************************************************************* Rolls back a transaction or the latest SQL statement. */ diff --git a/sql/ha_innodb.h b/sql/ha_innodb.h index 35f95ead757..6c412a889b2 100644 --- a/sql/ha_innodb.h +++ b/sql/ha_innodb.h @@ -321,3 +321,5 @@ int innobase_rollback_by_xid( int innobase_xa_end(THD *thd); +int innobase_repl_report_sent_binlog(THD *thd, char *log_file_name, + my_off_t end_offset); diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index ffa4ac55462..e2d954b1928 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2000-2003 MySQL AB + /* Copyright (C) 2000-2003 MySQL AB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -4479,6 +4479,7 @@ int ndbcluster_drop_database(const char *path) uint i; char *tabname; List drop_list; + int ret= 0; ha_ndbcluster::set_dbname(path, (char *)&dbname); DBUG_PRINT("enter", ("db: %s", dbname)); @@ -4505,10 +4506,18 @@ int ndbcluster_drop_database(const char *path) ndb->setDatabaseName(dbname); List_iterator_fast it(drop_list); while ((tabname=it++)) + { if (dict->dropTable(tabname)) - ERR_RETURN(dict->getNdbError()); - - DBUG_RETURN(0); + { + const NdbError err= dict->getNdbError(); + if (err.code != 709) + { + ERR_PRINT(err); + ret= ndb_to_mysql_error(&err); + } + } + } + DBUG_RETURN(ret); } diff --git a/sql/handler.cc b/sql/handler.cc index 542efaba2bf..3095aeb9476 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -2411,3 +2411,62 @@ TYPELIB *ha_known_exts(void) } return &known_extensions; } + +#ifdef HAVE_REPLICATION +/* + Reports to table handlers up to which position we have sent the binlog + to a slave in replication + + SYNOPSIS + ha_repl_report_sent_binlog() + + NOTES + Only works for InnoDB at the moment + + RETURN VALUE + Always 0 (= success) + + PARAMETERS + THD *thd in: thread doing the binlog communication to + the slave + char *log_file_name in: binlog file name + my_off_t end_offset in: the offset in the binlog file up to + which we sent the contents to the slave +*/ + +int ha_repl_report_sent_binlog(THD *thd, char *log_file_name, + my_off_t end_offset) +{ +#ifdef HAVE_INNOBASE_DB + return innobase_repl_report_sent_binlog(thd,log_file_name,end_offset); +#else + /* remove warnings about unused parameters */ + thd=thd; log_file_name=log_file_name; end_offset=end_offset; + return 0; +#endif +} + +/* + Reports to table handlers that we stop replication to a specific slave + + SYNOPSIS + ha_repl_report_replication_stop() + + NOTES + Does nothing at the moment + + RETURN VALUE + Always 0 (= success) + + PARAMETERS + THD *thd in: thread doing the binlog communication to + the slave +*/ + +int ha_repl_report_replication_stop(THD *thd) +{ + thd = thd; + + return 0; +} +#endif /* HAVE_REPLICATION */ diff --git a/sql/handler.h b/sql/handler.h index 4c06fe8299d..5e25f038c36 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -843,7 +843,7 @@ int ha_change_key_cache_param(KEY_CACHE *key_cache); int ha_change_key_cache(KEY_CACHE *old_key_cache, KEY_CACHE *new_key_cache); int ha_end_key_cache(KEY_CACHE *key_cache); -/* weird stuff */ +/* report to InnoDB that control passes to the client */ int ha_release_temporary_latches(THD *thd); /* transactions: interface to handlerton functions */ @@ -875,3 +875,7 @@ void trans_register_ha(THD *thd, bool all, handlerton *ht); #define trans_need_2pc(thd, all) ((total_ha_2pc > 1) && \ !((all ? &thd->transaction.all : &thd->transaction.stmt)->no_2pc)) +/* semi-synchronous replication */ +int ha_repl_report_sent_binlog(THD *thd, char *log_file_name, + my_off_t end_offset); +int ha_repl_report_replication_stop(THD *thd); diff --git a/sql/item_func.h b/sql/item_func.h index ba5a6101e4c..77b977fe778 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -1318,7 +1318,7 @@ public: longlong val_int() { if (execute(&result_field)) - return 0LL; + return (longlong) 0; return result_field->val_int(); } diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index cd4777d542b..7a9c7898856 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -814,14 +814,14 @@ static bool calc_time_diff(TIME *l_time1, TIME *l_time2, int l_sign, We should check it before calc_time_diff call. */ if (l_time1->time_type == MYSQL_TIMESTAMP_TIME) // Time value - days= l_time1->day - l_sign*l_time2->day; + days= (long)l_time1->day - l_sign * (long)l_time2->day; else { days= calc_daynr((uint) l_time1->year, (uint) l_time1->month, (uint) l_time1->day); if (l_time2->time_type == MYSQL_TIMESTAMP_TIME) - days-= l_sign*l_time2->day; + days-= l_sign * (long)l_time2->day; else days-= l_sign*calc_daynr((uint) l_time2->year, (uint) l_time2->month, diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 9c592d068ee..169c9e057b5 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2022,7 +2022,7 @@ static void init_signals(void) if (test_flags & TEST_CORE_ON_SIGNAL) { /* Change limits so that we will get a core file */ - struct rlimit rl; + STRUCT_RLIMIT rl; rl.rlim_cur = rl.rlim_max = RLIM_INFINITY; if (setrlimit(RLIMIT_CORE, &rl) && global_system_variables.log_warnings) sql_print_warning("setrlimit could not change the size of core files to 'infinity'; We may not be able to generate a core file on signals"); @@ -5495,7 +5495,7 @@ The minimum value for this variable is 4096.", {"sync-frm", OPT_SYNC_FRM, "Sync .frm to disk on create. Enabled by default.", (gptr*) &opt_sync_frm, (gptr*) &opt_sync_frm, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, -#ifdef DOES_NOTHING_YET +#ifdef HAVE_REPLICATION {"sync-replication", OPT_SYNC_REPLICATION, "Enable synchronous replication.", (gptr*) &global_system_variables.sync_replication, @@ -5511,7 +5511,7 @@ The minimum value for this variable is 4096.", (gptr*) &global_system_variables.sync_replication_timeout, (gptr*) &global_system_variables.sync_replication_timeout, 0, GET_ULONG, REQUIRED_ARG, 10, 0, ~0L, 0, 1, 0}, -#endif +#endif /* HAVE_REPLICATION */ {"table_cache", OPT_TABLE_CACHE, "The number of open tables for all threads.", (gptr*) &table_cache_size, (gptr*) &table_cache_size, 0, GET_ULONG, REQUIRED_ARG, 64, 1, 512*1024L, diff --git a/sql/opt_range.cc b/sql/opt_range.cc index ac25f15d460..95fe003770b 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -2257,8 +2257,7 @@ inline double get_index_only_read_time(const PARAM* param, ha_rows records, param->table->file->ref_length) + 1); read_time=((double) (records+keys_per_block-1)/ (double) keys_per_block); - /* Add 0.01 to avoid cost races between 'range' and 'index' */ - return read_time + 0.01; + return read_time; } @@ -3150,10 +3149,16 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, (param->table->file->index_flags(keynr, param->max_key_part,1) & HA_KEYREAD_ONLY) && !(pk_is_clustered && keynr == param->table->s->primary_key)) - /* We can resolve this by only reading through this key. */ + { + /* + We can resolve this by only reading through this key. + 0.01 is added to avoid races between range and 'index' scan. + */ found_read_time= get_index_only_read_time(param,found_records,keynr) + - cpu_cost; + cpu_cost + 0.01; + } else + { /* cost(read_through_index) = cost(disk_io) + cost(row_in_range_checks) The row_in_range check is in QUICK_RANGE_SELECT::cmp_next function. @@ -3161,8 +3166,8 @@ static TRP_RANGE *get_key_scans_params(PARAM *param, SEL_TREE *tree, found_read_time= param->table->file->read_time(keynr, param->range_count, found_records) + - cpu_cost; - + cpu_cost + 0.01; + } DBUG_PRINT("info",("key %s: found_read_time: %g (cur. read_time: %g)", param->table->key_info[keynr].name, found_read_time, read_time)); diff --git a/sql/set_var.cc b/sql/set_var.cc index bb3db177936..0e6e36f63a2 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -956,11 +956,9 @@ struct show_var_st init_vars[]= { {"sql_warnings", (char*) &sys_sql_warnings, SHOW_BOOL}, #ifdef HAVE_REPLICATION {sys_sync_binlog_period.name,(char*) &sys_sync_binlog_period, SHOW_SYS}, -#ifdef DOES_NOTHING_YET {sys_sync_replication.name, (char*) &sys_sync_replication, SHOW_SYS}, {sys_sync_replication_slave_id.name, (char*) &sys_sync_replication_slave_id,SHOW_SYS}, {sys_sync_replication_timeout.name, (char*) &sys_sync_replication_timeout,SHOW_SYS}, -#endif #endif {sys_sync_frm.name, (char*) &sys_sync_frm, SHOW_SYS}, #ifdef HAVE_TZNAME diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 634b6ab0995..0dcfd985f88 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -385,6 +385,9 @@ impossible position"; goto err; } + if (thd->variables.sync_replication) + ha_repl_report_sent_binlog(thd, log_file_name, pos); + /* We need to start a packet with something other than 255 to distinguish it from error @@ -470,6 +473,10 @@ impossible position"; my_errno= ER_UNKNOWN_ERROR; goto err; } + + if (thd->variables.sync_replication) + ha_repl_report_sent_binlog(thd, log_file_name, my_b_tell(&log)); + /* No need to save this event. We are only doing simple reads (no real parsing of the events) so we don't need it. And so @@ -527,6 +534,10 @@ impossible position"; my_errno= ER_UNKNOWN_ERROR; goto err; } + + if (thd->variables.sync_replication) + ha_repl_report_sent_binlog(thd, log_file_name, my_b_tell(&log)); + DBUG_PRINT("info", ("log event code %d", (*packet)[LOG_EVENT_OFFSET+1] )); if ((*packet)[LOG_EVENT_OFFSET+1] == LOAD_EVENT) @@ -640,6 +651,9 @@ impossible position"; goto err; } + if (thd->variables.sync_replication) + ha_repl_report_sent_binlog(thd, log_file_name, my_b_tell(&log)); + if ((*packet)[LOG_EVENT_OFFSET+1] == LOAD_EVENT) { if (send_file(thd)) @@ -704,12 +718,19 @@ impossible position"; my_errno= ER_MASTER_FATAL_ERROR_READING_BINLOG; goto err; } + + if (thd->variables.sync_replication) + ha_repl_report_sent_binlog(thd, log_file_name, 0); + packet->length(0); packet->append('\0'); } } end: + if (thd->variables.sync_replication) + ha_repl_report_replication_stop(thd); + end_io_cache(&log); (void)my_close(file, MYF(MY_WME)); @@ -721,6 +742,9 @@ end: DBUG_VOID_RETURN; err: + if (thd->variables.sync_replication) + ha_repl_report_replication_stop(thd); + thd->proc_info = "Waiting to finalize termination"; end_io_cache(&log); /* diff --git a/sql/stacktrace.c b/sql/stacktrace.c index 322d647e741..838f547dc02 100644 --- a/sql/stacktrace.c +++ b/sql/stacktrace.c @@ -43,7 +43,7 @@ void safe_print_str(const char* name, const char* val, int max_len) fputc('\n', stderr); } -#ifdef HAVE_LINUXTHREADS +#ifdef TARGET_OS_LINUX #define SIGRETURN_FRAME_COUNT 2 #if defined(__alpha__) && defined(__GNUC__) @@ -201,7 +201,7 @@ end: stack trace is much more helpful in diagnosing the problem, so please do \n\ resolve it\n"); } -#endif /* HAVE_LINUXTHREADS */ +#endif /* TARGET_OS_LINUX */ #endif /* HAVE_STACKTRACE */ /* Produce a core for the thread */ diff --git a/sql/stacktrace.h b/sql/stacktrace.h index 980e1ea07eb..d5d1e05ef0e 100644 --- a/sql/stacktrace.h +++ b/sql/stacktrace.h @@ -18,7 +18,7 @@ extern "C" { #endif -#ifdef HAVE_LINUXTHREADS +#ifdef TARGET_OS_LINUX #if defined(HAVE_STACKTRACE) || (defined (__i386__) || (defined(__alpha__) && defined(__GNUC__))) #undef HAVE_STACKTRACE #define HAVE_STACKTRACE @@ -30,7 +30,7 @@ extern char* heap_start; void print_stacktrace(gptr stack_bottom, ulong thread_stack); void safe_print_str(const char* name, const char* val, int max_len); #endif /* (defined (__i386__) || (defined(__alpha__) && defined(__GNUC__))) */ -#endif /* HAVE_LINUXTHREADS */ +#endif /* TARGET_OS_LINUX */ /* Define empty prototypes for functions that are not implemented */ #ifndef HAVE_STACKTRACE diff --git a/support-files/Makefile.am b/support-files/Makefile.am index 0a6077f0efc..5f5a10fc1fc 100644 --- a/support-files/Makefile.am +++ b/support-files/Makefile.am @@ -90,7 +90,7 @@ SUFFIXES = .sh -e 's!@''FIND_PROC''@!@FIND_PROC@!' \ -e 's!@''MYSQLD_DEFAULT_SWITCHES''@!@MYSQLD_DEFAULT_SWITCHES@!' \ -e 's!@''MYSQL_UNIX_ADDR''@!@MYSQL_UNIX_ADDR@!' \ - -e 's!@''IS_LINUX''@!@IS_LINUX@!' \ + -e 's!@''TARGET_LINUX''@!@TARGET_LINUX@!' \ -e "s!@""CONF_COMMAND""@!@CONF_COMMAND@!" \ -e 's!@''MYSQLD_USER''@!@MYSQLD_USER@!' \ -e 's!@''sysconfdir''@!@sysconfdir@!' \ diff --git a/tools/mysqlmanager.c b/tools/mysqlmanager.c index 1ae8f908dc2..4a5c08be50a 100644 --- a/tools/mysqlmanager.c +++ b/tools/mysqlmanager.c @@ -101,7 +101,7 @@ static CHARSET_INFO *cs= &my_charset_latin1; set by the user */ -#if defined(__i386__) && defined(HAVE_LINUXTHREADS) +#if defined(__i386__) && defined(TARGET_OS_LINUX) #define DO_STACKTRACE 1 #endif