diff --git a/VC++Files/client/mysqlslap.vcproj b/VC++Files/client/mysqlslap.vcproj index 0f0f2f7db45..581a852b170 100644 --- a/VC++Files/client/mysqlslap.vcproj +++ b/VC++Files/client/mysqlslap.vcproj @@ -3,6 +3,7 @@ ProjectType="Visual C++" Version="7.10" Name="mysqlslap" + ProjectGUID="{2E9332CF-072A-4381-BF37-17C5AB4F8583}" SccProjectName="" SccLocalPath=""> @@ -39,7 +40,7 @@ Name="VCCustomBuildTool"/> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/VC++Files/sql/mysqld.vcproj b/VC++Files/sql/mysqld.vcproj index 28f360a2fa2..6002fb5d815 100644 --- a/VC++Files/sql/mysqld.vcproj +++ b/VC++Files/sql/mysqld.vcproj @@ -87,7 +87,7 @@ InlineFunctionExpansion="1" OptimizeForProcessor="2" AdditionalIncludeDirectories="../storage/bdb/build_win32,../include,../regex,../extra/yassl/include,../zlib" - PreprocessorDefinitions="NDEBUG;DBUG_OFF;HAVE_INNOBASE_DB;HAVE_BERKELEY_DB;HAVE_ARCHIVE_DB;HAVE_BLACKHOLE_DB;HAVE_EXAMPLE_DB;HAVE_FEDERATED_DB;MYSQL_SERVER;_WINDOWS;_CONSOLE;HAVE_DLOPEN;WITH_INNOBASE_STORAGE_ENGINE;WITH_BERKELEY_STORAGE_ENGINE;WITH_ARCHIVE_STORAGE_ENGINE;WITH_BLACKHOLE_STORAGE_ENGINE;WITH_EXAMPLE_STORAGE_ENGINE;WITH_FEDERATED_STORAGE_ENGINE;WITH_PARTITION_STORAGE_ENGINE" + PreprocessorDefinitions="NDEBUG;DBUG_OFF;HAVE_INNOBASE_DB;HAVE_BERKELEY_DB;HAVE_ARCHIVE_DB;HAVE_BLACKHOLE_DB;HAVE_EXAMPLE_DB;HAVE_FEDERATED_DB;MYSQL_SERVER;_WINDOWS;_CONSOLE;HAVE_DLOPEN;WITH_INNOBASE_STORAGE_ENGINE;WITH_BERKELEY_STORAGE_ENGINE;WITH_ARCHIVE_STORAGE_ENGINE;WITH_BLACKHOLE_STORAGE_ENGINE;WITH_EXAMPLE_STORAGE_ENGINE;WITH_FEDERATED_STORAGE_ENGINE;WITH_PARTITION_STORAGE_ENGINE;HAVE_ROW_BASED_REPLICATION" StringPooling="TRUE" RuntimeLibrary="0" EnableFunctionLevelLinking="TRUE" @@ -148,7 +148,7 @@ InlineFunctionExpansion="1" OptimizeForProcessor="2" AdditionalIncludeDirectories="../storage/bdb/build_win32,../include,../regex,../extra/yassl/include,../zlib" - PreprocessorDefinitions="NDEBUG;__NT__;DBUG_OFF;HAVE_INNOBASE_DB;HAVE_BERKELEY_DB;HAVE_ARCHIVE_DB;HAVE_BLACKHOLE_DB;HAVE_EXAMPLE_DB;HAVE_FEDERATED_DB;MYSQL_SERVER;_WINDOWS;_CONSOLE;HAVE_DLOPEN;WITH_INNOBASE_STORAGE_ENGINE;WITH_BERKELEY_STORAGE_ENGINE;WITH_ARCHIVE_STORAGE_ENGINE;WITH_BLACKHOLE_STORAGE_ENGINE;WITH_EXAMPLE_STORAGE_ENGINE;WITH_FEDERATED_STORAGE_ENGINE;WITH_PARTITION_STORAGE_ENGINE" + PreprocessorDefinitions="NDEBUG;__NT__;DBUG_OFF;HAVE_INNOBASE_DB;HAVE_BERKELEY_DB;HAVE_ARCHIVE_DB;HAVE_BLACKHOLE_DB;HAVE_EXAMPLE_DB;HAVE_FEDERATED_DB;MYSQL_SERVER;_WINDOWS;_CONSOLE;HAVE_DLOPEN;WITH_INNOBASE_STORAGE_ENGINE;WITH_BERKELEY_STORAGE_ENGINE;WITH_ARCHIVE_STORAGE_ENGINE;WITH_BLACKHOLE_STORAGE_ENGINE;WITH_EXAMPLE_STORAGE_ENGINE;WITH_FEDERATED_STORAGE_ENGINE;WITH_PARTITION_STORAGE_ENGINE;HAVE_ROW_BASED_REPLICATION" StringPooling="TRUE" RuntimeLibrary="0" EnableFunctionLevelLinking="TRUE" @@ -336,7 +336,7 @@ Optimization="0" OptimizeForProcessor="2" AdditionalIncludeDirectories="../storage/bdb/build_win32,../include,../regex,../extra/yassl/include,../zlib" - PreprocessorDefinitions="_DEBUG;SAFEMALLOC;SAFE_MUTEX;HAVE_INNOBASE_DB;HAVE_BERKELEY_DB;HAVE_ARCHIVE_DB;HAVE_BLACKHOLE_DB;HAVE_EXAMPLE_DB;HAVE_FEDERATED_DB;MYSQL_SERVER;_WINDOWS;_CONSOLE;HAVE_DLOPEN;WITH_INNOBASE_STORAGE_ENGINE;WITH_BERKELEY_STORAGE_ENGINE;WITH_ARCHIVE_STORAGE_ENGINE;WITH_BLACKHOLE_STORAGE_ENGINE;WITH_EXAMPLE_STORAGE_ENGINE;WITH_FEDERATED_STORAGE_ENGINE;WITH_PARTITION_STORAGE_ENGINE" + PreprocessorDefinitions="_DEBUG;SAFEMALLOC;SAFE_MUTEX;HAVE_INNOBASE_DB;HAVE_BERKELEY_DB;HAVE_ARCHIVE_DB;HAVE_BLACKHOLE_DB;HAVE_EXAMPLE_DB;HAVE_FEDERATED_DB;MYSQL_SERVER;_WINDOWS;_CONSOLE;HAVE_DLOPEN;WITH_INNOBASE_STORAGE_ENGINE;WITH_BERKELEY_STORAGE_ENGINE;WITH_ARCHIVE_STORAGE_ENGINE;WITH_BLACKHOLE_STORAGE_ENGINE;WITH_EXAMPLE_STORAGE_ENGINE;WITH_FEDERATED_STORAGE_ENGINE;WITH_PARTITION_STORAGE_ENGINE;HAVE_ROW_BASED_REPLICATION" RuntimeLibrary="1" PrecompiledHeaderFile=".\debug/mysqld.pch" AssemblerListingLocation=".\debug/" @@ -3242,6 +3242,82 @@ PreprocessorDefinitions=""/> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/VC++Files/storage/example/example.vcproj b/VC++Files/storage/example/example.vcproj new file mode 100644 index 00000000000..3a2daa55676 --- /dev/null +++ b/VC++Files/storage/example/example.vcproj @@ -0,0 +1,257 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/client/Makefile.am b/client/Makefile.am index a22b929207c..849bd37eb57 100644 --- a/client/Makefile.am +++ b/client/Makefile.am @@ -48,7 +48,7 @@ mysqlbinlog_SOURCES = mysqlbinlog.cc $(top_srcdir)/mysys/mf_tempdir.c \ $(top_srcdir)/mysys/my_vle.c \ $(top_srcdir)/mysys/base64.c mysqlbinlog_LDADD = $(LDADD) $(CXXLDFLAGS) -mysqlslap_LDADD = $(LDADD) $(CXXLDFLAGS) -lpthread +mysqlslap_LDADD = $(LDADD) $(CXXLDFLAGS) $(CLIENT_THREAD_LIBS) mysqltestmanager_pwgen_SOURCES = mysqlmanager-pwgen.c mysqltestmanagerc_SOURCES= mysqlmanagerc.c $(yassl_dummy_link_fix) mysqlcheck_SOURCES= mysqlcheck.c $(yassl_dummy_link_fix) diff --git a/client/mysqlslap.c b/client/mysqlslap.c index 99965adcc04..c993b93e608 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -85,13 +85,25 @@ TODO: #include #include #include +#ifndef __WIN__ #include +#endif #include #include #define MYSLAPLOCK "/myslaplock.lck" #define MYSLAPLOCK_DIR "/tmp" +#ifdef __WIN__ +#define srandom srand +#define random rand +#define snprintf _snprintf +#endif + +#ifdef HAVE_SMEM +static char *shared_memory_base_name=0; +#endif + static char **defaults_argv; static char *host= NULL, *opt_password= NULL, *user= NULL, @@ -213,6 +225,17 @@ static long int timedif(struct timeval a, struct timeval b) return s + us; } +#ifdef __WIN__ +static int gettimeofday(struct timeval *tp, void *tzp) +{ + unsigned int ticks; + ticks= GetTickCount(); + tp->tv_usec= ticks*1000; + tp->tv_sec= ticks/1000; + + return 0; +} +#endif int main(int argc, char **argv) { @@ -222,7 +245,10 @@ int main(int argc, char **argv) unsigned long long client_limit; statement *eptr; - DBUG_ENTER("main"); +#ifdef __WIN__ + opt_use_threads= 1; +#endif + MY_INIT(argv[0]); /* Seed the random number generator if we will be using it. */ @@ -353,12 +379,14 @@ int main(int argc, char **argv) free_defaults(defaults_argv); my_end(0); - DBUG_RETURN(0); /* No compiler warnings */ + return 0; } static struct my_option my_long_options[] = { + {"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, + 0, 0, 0, 0, 0, 0}, {"auto-generate-sql", 'a', "Generate SQL where not supplied by file or command line.", (gptr*) &auto_generate_sql, (gptr*) &auto_generate_sql, @@ -375,7 +403,8 @@ static struct my_option my_long_options[] = {"create-schema", OPT_CREATE_SLAP_SCHEMA, "Schema to run tests in.", (gptr*) &create_schema_string, (gptr*) &create_schema_string, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"csv", OPT_CREATE_SLAP_SCHEMA, "Schema to run tests in.", + {"csv", OPT_CREATE_SLAP_SCHEMA, + "Generate CSV output to named file or to stdout if no file is named.", (gptr*) &opt_csv_str, (gptr*) &opt_csv_str, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"debug", '#', "Output debug log. Often this is 'd:t:o,filename'.", @@ -388,8 +417,6 @@ static struct my_option my_long_options[] = {"engine", 'e', "Storage engine to use for creating the table.", (gptr*) &default_engine, (gptr*) &default_engine, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"help", '?', "Display this help and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, - 0, 0, 0, 0, 0, 0}, {"host", 'h', "Connect to host.", (gptr*) &host, (gptr*) &host, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"iterations", 'i', "Number of times too run the tests.", (gptr*) &iterations, @@ -402,28 +429,28 @@ static struct my_option my_long_options[] = (gptr*) &num_char_cols, (gptr*) &num_char_cols, 0, GET_UINT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0}, {"number-int-cols", 'y', - "Number of VARCHAR columns to create table with if specifying \ - --sql-generate-sql.", (gptr*) &num_int_cols, (gptr*) &num_int_cols, 0, + "Number of VARCHAR columns to create table with if specifying " + "--sql-generate-sql.", (gptr*) &num_int_cols, (gptr*) &num_int_cols, 0, GET_UINT, REQUIRED_ARG, 1, 0, 0, 0, 0, 0}, - {"number-of-query", OPT_MYSQL_NUMBER_OF_QUERY, + {"number-of-queries", OPT_MYSQL_NUMBER_OF_QUERY, "Limit each client to this number of queries (this is not exact).", (gptr*) &num_of_query, (gptr*) &num_of_query, 0, GET_ULL, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"only-print", OPT_MYSQL_ONLY_PRINT, - "This causes mysqlslap to not connect to the databases, but instead print \ - out what it would have done instead.", + "This causes mysqlslap to not connect to the databases, but instead print " + "out what it would have done instead.", (gptr*) &opt_only_print, (gptr*) &opt_only_print, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"password", 'p', - "Password to use when connecting to server. If password is not given it's \ - asked from the tty.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, - {"port", 'P', "Port number to use for connection.", (gptr*) &opt_mysql_port, - (gptr*) &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, - 0}, + "Password to use when connecting to server. If password is not given it's " + "asked from the tty.", 0, 0, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #ifdef __WIN__ {"pipe", 'W', "Use named pipes to connect to server.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"port", 'P', "Port number to use for connection.", (gptr*) &opt_mysql_port, + (gptr*) &opt_mysql_port, 0, GET_UINT, REQUIRED_ARG, MYSQL_PORT, 0, 0, 0, 0, + 0}, {"preserve-schema", OPT_MYSQL_PRESERVE_SCHEMA, "Preserve the schema from the mysqlslap run.", (gptr*) &opt_preserve, (gptr*) &opt_preserve, 0, GET_BOOL, @@ -434,33 +461,33 @@ static struct my_option my_long_options[] = {"query", 'q', "Query to run or file containing query to run.", (gptr*) &user_supplied_query, (gptr*) &user_supplied_query, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, - {"silent", 's', "Run program in silent mode - no output.", - (gptr*) &opt_silent, (gptr*) &opt_silent, 0, GET_BOOL, NO_ARG, - 0, 0, 0, 0, 0, 0}, #ifdef HAVE_SMEM {"shared-memory-base-name", OPT_SHARED_MEMORY_BASE_NAME, "Base name of shared memory.", (gptr*) &shared_memory_base_name, (gptr*) &shared_memory_base_name, 0, GET_STR_ALLOC, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif + {"silent", 's', "Run program in silent mode - no output.", + (gptr*) &opt_silent, (gptr*) &opt_silent, 0, GET_BOOL, NO_ARG, + 0, 0, 0, 0, 0, 0}, {"slave", OPT_MYSQL_SLAP_SLAVE, "Follow master locks for other slap clients", (gptr*) &opt_slave, (gptr*) &opt_slave, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"socket", 'S', "Socket file to use for connection.", (gptr*) &opt_mysql_unix_port, (gptr*) &opt_mysql_unix_port, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, +#include {"use-threads", OPT_USE_THREADS, "Use pthread calls instead of fork() calls (default on Windows)", (gptr*) &opt_use_threads, (gptr*) &opt_use_threads, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, -#include #ifndef DONT_ALLOW_USER_CHANGE {"user", 'u', "User for login if not current user.", (gptr*) &user, (gptr*) &user, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, #endif {"verbose", 'v', - "More verbose output; You can use this multiple times to get even more \ - verbose output.", (gptr*) &verbose, (gptr*) &verbose, 0, + "More verbose output; You can use this multiple times to get even more " + "verbose output.", (gptr*) &verbose, (gptr*) &verbose, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, {"version", 'V', "Output version information and exit.", 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -936,7 +963,6 @@ drop_schema(MYSQL *mysql, const char *db) DBUG_RETURN(0); } - static int run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) { @@ -980,6 +1006,7 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) } } } +#ifndef __WIN__ else { fflush(NULL); @@ -1020,6 +1047,7 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) } } } +#endif /* Lets release use some clients! */ if (!opt_slave) @@ -1041,6 +1069,7 @@ run_scheduler(stats *sptr, statement *stmts, uint concur, ulonglong limit) } my_lock(lock_file, F_UNLCK, 0, F_TO_EOF, MYF(0)); } +#ifndef __WIN__ else { WAIT: @@ -1051,6 +1080,7 @@ WAIT: DBUG_PRINT("info", ("Parent: child %d status %d", pid, status)); } } +#endif gettimeofday(&end_time, NULL); my_close(lock_file, MYF(0)); diff --git a/config/ac-macros/storage.m4 b/config/ac-macros/storage.m4 index 8564e8ef5cc..4148aed818d 100644 --- a/config/ac-macros/storage.m4 +++ b/config/ac-macros/storage.m4 @@ -39,7 +39,10 @@ then AC_DEFINE([$5]) mysql_se_decls="${mysql_se_decls},$6" mysql_se_htons="${mysql_se_htons},&$6" - mysql_se_objs="$mysql_se_objs $8" + if test "$8" != "no" + then + mysql_se_objs="$mysql_se_objs $8" + fi mysql_se_dirs="$mysql_se_dirs $7" mysql_se_libs="$mysql_se_libs $9" else diff --git a/configure.in b/configure.in index e8256a5a526..ffc311a7857 100644 --- a/configure.in +++ b/configure.in @@ -1420,6 +1420,7 @@ AC_MSG_CHECKING("named thread libs:") if test "$with_named_thread" != "no" then LIBS="$with_named_thread $LIBS $with_named_thread" + CLIENT_THREAD_LIBS="$with_named_thread" with_posix_threads="yes" AC_MSG_RESULT("$with_named_thread") else @@ -1436,6 +1437,7 @@ else AC_MSG_CHECKING("for pthread_create in -lpthread"); ac_save_LIBS="$LIBS" LIBS="$LIBS -lpthread" + CLIENT_THREAD_LIBS="-lpthread" AC_TRY_LINK( [#include ], [ (void) pthread_create((pthread_t*) 0,(pthread_attr_t*) 0, 0, 0); ], @@ -1444,6 +1446,7 @@ else if test "$with_posix_threads" = "no" then LIBS=" $ac_save_LIBS -lpthreads" + CLIENT_THREAD_LIBS="-lpthreads" AC_MSG_CHECKING("for pthread_create in -lpthreads"); AC_TRY_LINK( [#include ], @@ -1454,6 +1457,7 @@ else then # This is for FreeBSD LIBS="$ac_save_LIBS -pthread" + CLIENT_THREAD_LIBS="-pthread" AC_MSG_CHECKING("for pthread_create in -pthread"); AC_TRY_LINK( [#include ], @@ -2437,7 +2441,8 @@ MYSQL_STORAGE_ENGINE(archive,,,,,,storage/archive,, \$(top_builddir)/storage/archive/libarchive.a, [ AC_CONFIG_FILES(storage/archive/Makefile) ]) -MYSQL_STORAGE_ENGINE(csv,,,,,no,storage/csv,,,[ +MYSQL_STORAGE_ENGINE(csv,,,"yes",,tina_hton,storage/csv,no, + \$(top_builddir)/storage/csv/libcsv.a,[ AC_CONFIG_FILES(storage/csv/Makefile) ]) MYSQL_STORAGE_ENGINE(blackhole) @@ -2469,6 +2474,7 @@ fi CLIENT_LIBS="$NON_THREADED_LIBS $openssl_libs $ZLIB_LIBS $STATIC_NSS_FLAGS" AC_SUBST(CLIENT_LIBS) +AC_SUBST(CLIENT_THREAD_LIBS) AC_SUBST(NON_THREADED_LIBS) AC_SUBST(STATIC_NSS_FLAGS) AC_SUBST(sql_client_dirs) diff --git a/extra/comp_err.c b/extra/comp_err.c index d0e387dcd35..65fc131a5fc 100644 --- a/extra/comp_err.c +++ b/extra/comp_err.c @@ -876,9 +876,10 @@ static void usage(void) { DBUG_ENTER("usage"); print_version(); - printf("This software comes with ABSOLUTELY NO WARRANTY. This is free " - "software,\nand you are welcome to modify and redistribute it under " - "the GPL license\nUsage:\n"); + printf("This software comes with ABSOLUTELY NO WARRANTY. " + "This is free software,\n" + "and you are welcome to modify and redistribute it under the GPL license.\n" + "Usage:\n"); my_print_help(my_long_options); my_print_variables(my_long_options); DBUG_VOID_RETURN; diff --git a/include/config-win.h b/include/config-win.h index 2de75385d4d..4e952f8248d 100644 --- a/include/config-win.h +++ b/include/config-win.h @@ -353,6 +353,9 @@ inline double ulonglong2double(ulonglong value) #define HAVE_VIO_READ_BUFF #define HAVE_STRNLEN +#define strcasecmp stricmp +#define strncasecmp strnicmp + #ifndef __NT__ #undef FILE_SHARE_DELETE #define FILE_SHARE_DELETE 0 /* Not implemented on Win 98/ME */ diff --git a/include/m_ctype.h b/include/m_ctype.h index 1bdba5e3266..29fec006c7f 100644 --- a/include/m_ctype.h +++ b/include/m_ctype.h @@ -66,7 +66,8 @@ extern MY_UNICASE_INFO *my_unicase_turkish[256]; #define MY_CS_UNICODE 128 /* is a charset is full unicode */ #define MY_CS_READY 256 /* if a charset is initialized */ #define MY_CS_AVAILABLE 512 /* If either compiled-in or loaded*/ -#define MY_CS_CSSORT 1024 /* if case sensitive sort order */ +#define MY_CS_CSSORT 1024 /* if case sensitive sort order */ +#define MY_CS_HIDDEN 2048 /* don't display in SHOW */ #define MY_CHARSET_UNDEFINED 0 diff --git a/include/my_base.h b/include/my_base.h index cb82bfef531..5962775fb57 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -155,13 +155,18 @@ enum ha_extra_function { */ HA_EXTRA_KEYREAD_PRESERVE_FIELDS, HA_EXTRA_MMAP, - /* + /* Ignore if the a tuple is not found, continue processing the transaction and ignore that 'row'. Needed for idempotency handling on the slave */ HA_EXTRA_IGNORE_NO_KEY, - HA_EXTRA_NO_IGNORE_NO_KEY + HA_EXTRA_NO_IGNORE_NO_KEY, + /* + Mark the table as a log table. For some handlers (e.g. CSV) this results + in a special locking for the table. + */ + HA_EXTRA_MARK_AS_LOG_TABLE }; /* The following is parameter to ha_panic() */ diff --git a/include/my_sys.h b/include/my_sys.h index dc2a2c63379..89b0bd4fbec 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -806,6 +806,9 @@ extern void print_defaults(const char *conf_file, const char **groups); extern my_bool my_compress(byte *, ulong *, ulong *); extern my_bool my_uncompress(byte *, ulong *, ulong *); extern byte *my_compress_alloc(const byte *packet, ulong *len, ulong *complen); +extern int packfrm(const void *, uint, const void **, uint *); +extern int unpackfrm(const void **, uint *, const void *); + extern ha_checksum my_checksum(ha_checksum crc, const byte *mem, uint count); extern uint my_bit_log2(ulong value); extern uint my_count_bits(ulonglong v); diff --git a/include/mysql.h b/include/mysql.h index f3244d4ba36..303cb2af4f5 100644 --- a/include/mysql.h +++ b/include/mysql.h @@ -130,14 +130,14 @@ typedef MYSQL_ROWS *MYSQL_ROW_OFFSET; /* offset to current row */ #include "my_alloc.h" +typedef struct embedded_query_result EMBEDDED_QUERY_RESULT; typedef struct st_mysql_data { my_ulonglong rows; unsigned int fields; MYSQL_ROWS *data; MEM_ROOT alloc; -#if !defined(CHECK_EMBEDDED_DIFFERENCES) || defined(EMBEDDED_LIBRARY) - MYSQL_ROWS **prev_ptr; -#endif + /* extra info for embedded library */ + struct embedded_query_result *embedded_info; } MYSQL_DATA; enum mysql_option @@ -287,6 +287,8 @@ typedef struct st_mysql from mysql_stmt_close if close had to cancel result set of this object. */ my_bool *unbuffered_fetch_owner; + /* needed for embedded server - no net buffer to store the 'info' */ + char *info_buffer; } MYSQL; typedef struct st_mysql_res { @@ -755,6 +757,7 @@ typedef struct st_mysql_methods const char *(*read_statistics)(MYSQL *mysql); my_bool (*next_result)(MYSQL *mysql); int (*read_change_user_result)(MYSQL *mysql, char *buff, const char *passwd); + int (*read_rows_from_cursor)(MYSQL_STMT *stmt); #endif } MYSQL_METHODS; diff --git a/include/thr_lock.h b/include/thr_lock.h index 251d8e7c9cf..c3a7909175f 100644 --- a/include/thr_lock.h +++ b/include/thr_lock.h @@ -143,10 +143,12 @@ void thr_unlock(THR_LOCK_DATA *data); enum enum_thr_lock_result thr_multi_lock(THR_LOCK_DATA **data, uint count, THR_LOCK_OWNER *owner); void thr_multi_unlock(THR_LOCK_DATA **data,uint count); -void thr_abort_locks(THR_LOCK *lock); +void thr_abort_locks(THR_LOCK *lock, bool upgrade_lock); my_bool thr_abort_locks_for_thread(THR_LOCK *lock, pthread_t thread); void thr_print_locks(void); /* For debugging */ my_bool thr_upgrade_write_delay_lock(THR_LOCK_DATA *data); +void thr_downgrade_write_lock(THR_LOCK_DATA *data, + enum thr_lock_type new_lock_type); my_bool thr_reschedule_write_lock(THR_LOCK_DATA *data); #ifdef __cplusplus } diff --git a/libmysql/libmysql.c b/libmysql/libmysql.c index 11ee7284cbf..e2ee44efffb 100644 --- a/libmysql/libmysql.c +++ b/libmysql/libmysql.c @@ -2722,13 +2722,13 @@ stmt_read_row_from_cursor(MYSQL_STMT *stmt, unsigned char **row) /* Send row request to the server */ int4store(buff, stmt->stmt_id); int4store(buff + 4, stmt->prefetch_rows); /* number of rows to fetch */ - if (cli_advanced_command(mysql, COM_STMT_FETCH, buff, sizeof(buff), - NullS, 0, 1)) + if ((*mysql->methods->advanced_command)(mysql, COM_STMT_FETCH, + buff, sizeof(buff), NullS, 0, 1)) { set_stmt_errmsg(stmt, net->last_error, net->last_errno, net->sqlstate); return 1; } - if (cli_read_binary_rows(stmt)) + if ((*mysql->methods->read_rows_from_cursor)(stmt)) return 1; stmt->server_status= mysql->server_status; @@ -5101,9 +5101,9 @@ my_bool STDCALL mysql_autocommit(MYSQL * mysql, my_bool auto_mode) DBUG_ENTER("mysql_autocommit"); DBUG_PRINT("enter", ("mode : %d", auto_mode)); - if (auto_mode) /* set to true */ - DBUG_RETURN((my_bool) mysql_real_query(mysql, "set autocommit=1", 16)); - DBUG_RETURN((my_bool) mysql_real_query(mysql, "set autocommit=0", 16)); + DBUG_RETURN((my_bool) mysql_real_query(mysql, auto_mode ? + "set autocommit=1":"set autocommit=0", + 16)); } diff --git a/libmysqld/emb_qcache.cc b/libmysqld/emb_qcache.cc index ecc45096165..078243a6d5e 100644 --- a/libmysqld/emb_qcache.cc +++ b/libmysqld/emb_qcache.cc @@ -18,6 +18,7 @@ #ifdef HAVE_QUERY_CACHE #include #include "emb_qcache.h" +#include "embedded_priv.h" void Querycache_stream::store_char(char c) { @@ -284,22 +285,25 @@ int Querycache_stream::load_column(MEM_ROOT *alloc, char** column) uint emb_count_querycache_size(THD *thd) { - uint result; - MYSQL *mysql= thd->mysql; - MYSQL_FIELD *field= mysql->fields; - MYSQL_FIELD *field_end= field + mysql->field_count; - MYSQL_ROWS *cur_row=NULL; - my_ulonglong n_rows=0; + uint result= 0; + MYSQL_FIELD *field; + MYSQL_FIELD *field_end; + MYSQL_ROWS *cur_row; + my_ulonglong n_rows; + MYSQL_DATA *data= thd->first_data; + + while (data->embedded_info->next) + data= data->embedded_info->next; + field= data->embedded_info->fields_list; + field_end= field + data->fields; if (!field) - return 0; - if (thd->data) - { - *thd->data->prev_ptr= NULL; // this marks the last record - cur_row= thd->data->data; - n_rows= thd->data->rows; - } - result= (uint) (4+8 + (42 + 4*n_rows)*mysql->field_count); + return result; + *data->embedded_info->prev_ptr= NULL; // this marks the last record + cur_row= data->data; + n_rows= data->rows; + /* n_fields + n_rows + (field_info + strlen * n_rows) * n_fields */ + result+= (uint) (4+8 + (42 + 4*n_rows)*data->fields); for(; field < field_end; field++) { @@ -313,34 +317,38 @@ uint emb_count_querycache_size(THD *thd) for (; cur_row; cur_row=cur_row->next) { MYSQL_ROW col= cur_row->data; - MYSQL_ROW col_end= col + mysql->field_count; + MYSQL_ROW col_end= col + data->fields; for (; col < col_end; col++) if (*col) - result+= *(uint *)((*col) - sizeof(uint)); + result+= *(uint *)((*col) - sizeof(uint)); } return result; } void emb_store_querycache_result(Querycache_stream *dst, THD *thd) { - MYSQL *mysql= thd->mysql; - MYSQL_FIELD *field= mysql->fields; - MYSQL_FIELD *field_end= field + mysql->field_count; - MYSQL_ROWS *cur_row= NULL; - my_ulonglong n_rows= 0; + MYSQL_FIELD *field; + MYSQL_FIELD *field_end; + MYSQL_ROWS *cur_row; + my_ulonglong n_rows; + MYSQL_DATA *data= thd->first_data; + + DBUG_ENTER("emb_store_querycache_result"); + + while (data->embedded_info->next) + data= data->embedded_info->next; + field= data->embedded_info->fields_list; + field_end= field + data->fields; if (!field) - return; + DBUG_VOID_RETURN; - if (thd->data) - { - *thd->data->prev_ptr= NULL; // this marks the last record - cur_row= thd->data->data; - n_rows= thd->data->rows; - } + *data->embedded_info->prev_ptr= NULL; // this marks the last record + cur_row= data->data; + n_rows= data->rows; - dst->store_int((uint)mysql->field_count); - dst->store_ll((uint)n_rows); + dst->store_int((uint)data->fields); + dst->store_ll((ulonglong)n_rows); for(; field < field_end; field++) { @@ -356,14 +364,13 @@ void emb_store_querycache_result(Querycache_stream *dst, THD *thd) dst->store_str(field->org_table, field->org_table_length); dst->store_str(field->db, field->db_length); dst->store_str(field->catalog, field->catalog_length); - dst->store_safe_str(field->def, field->def_length); } for (; cur_row; cur_row=cur_row->next) { MYSQL_ROW col= cur_row->data; - MYSQL_ROW col_end= col + mysql->field_count; + MYSQL_ROW col_end= col + data->fields; for (; col < col_end; col++) { uint len= *col ? *(uint *)((*col) - sizeof(uint)) : 0; @@ -371,28 +378,34 @@ void emb_store_querycache_result(Querycache_stream *dst, THD *thd) } } DBUG_ASSERT(emb_count_querycache_size(thd) == dst->stored_size); + DBUG_VOID_RETURN; } int emb_load_querycache_result(THD *thd, Querycache_stream *src) { - MYSQL *mysql= thd->mysql; - MYSQL_DATA *data; + MYSQL_DATA *data= thd->alloc_new_dataset(); MYSQL_FIELD *field; MYSQL_FIELD *field_end; - MEM_ROOT *f_alloc= &mysql->field_alloc; + MEM_ROOT *f_alloc; MYSQL_ROWS *row, *end_row; MYSQL_ROWS **prev_row; ulonglong rows; MYSQL_ROW columns; + DBUG_ENTER("emb_load_querycache_result"); - mysql->field_count= src->load_int(); + if (!data) + goto err; + init_alloc_root(&data->alloc, 8192,0); + f_alloc= &data->alloc; + + data->fields= src->load_int(); rows= src->load_ll(); if (!(field= (MYSQL_FIELD *) - alloc_root(&mysql->field_alloc,mysql->field_count*sizeof(MYSQL_FIELD)))) + alloc_root(f_alloc,data->fields*sizeof(MYSQL_FIELD)))) goto err; - mysql->fields= field; - for(field_end= field+mysql->field_count; field < field_end; field++) + data->embedded_info->fields_list= field; + for(field_end= field+data->fields; field < field_end; field++) { field->length= src->load_int(); field->max_length= (unsigned int)src->load_int(); @@ -402,47 +415,43 @@ int emb_load_querycache_result(THD *thd, Querycache_stream *src) field->decimals= (unsigned int)src->load_char(); if (!(field->name= src->load_str(f_alloc, &field->name_length)) || - !(field->table= src->load_str(f_alloc,&field->table_length)) || - !(field->org_name= src->load_str(f_alloc, &field->org_name_length)) || - !(field->org_table= src->load_str(f_alloc, &field->org_table_length))|| - !(field->db= src->load_str(f_alloc, &field->db_length)) || - !(field->catalog= src->load_str(f_alloc, &field->catalog_length)) || - src->load_safe_str(f_alloc, &field->def, &field->def_length)) + !(field->table= src->load_str(f_alloc,&field->table_length)) || + !(field->org_name= src->load_str(f_alloc, &field->org_name_length)) || + !(field->org_table= src->load_str(f_alloc, &field->org_table_length))|| + !(field->db= src->load_str(f_alloc, &field->db_length)) || + !(field->catalog= src->load_str(f_alloc, &field->catalog_length)) || + src->load_safe_str(f_alloc, &field->def, &field->def_length)) goto err; } - if (!rows) - return 0; - if (!(data= (MYSQL_DATA*)my_malloc(sizeof(MYSQL_DATA), - MYF(MY_WME | MY_ZEROFILL)))) - goto err; - thd->data= data; - init_alloc_root(&data->alloc, 8192,0); - row= (MYSQL_ROWS *)alloc_root(&data->alloc, (uint) (rows * sizeof(MYSQL_ROWS) + - rows * (mysql->field_count+1)*sizeof(char*))); + row= (MYSQL_ROWS *)alloc_root(&data->alloc, + (uint) (rows * sizeof(MYSQL_ROWS) + + rows*(data->fields+1)*sizeof(char*))); end_row= row + rows; columns= (MYSQL_ROW)end_row; data->rows= rows; - data->fields= mysql->field_count; data->data= row; + if (!rows) + goto return_ok; for (prev_row= &row->next; row < end_row; prev_row= &row->next, row++) { *prev_row= row; row->data= columns; - MYSQL_ROW col_end= columns + mysql->field_count; + MYSQL_ROW col_end= columns + data->fields; for (; columns < col_end; columns++) src->load_column(&data->alloc, columns); *(columns++)= NULL; } *prev_row= NULL; - data->prev_ptr= prev_row; - - return 0; + data->embedded_info->prev_ptr= prev_row; +return_ok: + send_eof(thd); + DBUG_RETURN(0); err: - return 1; + DBUG_RETURN(1); } #endif /*HAVE_QUERY_CACHE*/ diff --git a/libmysqld/embedded_priv.h b/libmysqld/embedded_priv.h index d4316dff63f..88015340e8c 100644 --- a/libmysqld/embedded_priv.h +++ b/libmysqld/embedded_priv.h @@ -16,18 +16,25 @@ /* Prototypes for the embedded version of MySQL */ -#include -#include -#include -#include -#include - C_MODE_START void lib_connection_phase(NET *net, int phase); void init_embedded_mysql(MYSQL *mysql, int client_flag, char *db); void *create_embedded_thd(int client_flag, char *db); int check_embedded_connection(MYSQL *mysql); void free_old_query(MYSQL *mysql); -void embedded_get_error(MYSQL *mysql); extern MYSQL_METHODS embedded_methods; + +/* This one is used by embedded library to gather returning data */ +typedef struct embedded_query_result +{ + MYSQL_ROWS **prev_ptr; + unsigned int warning_count, server_status; + struct st_mysql_data *next; + my_ulonglong affected_rows, insert_id; + char info[MYSQL_ERRMSG_SIZE]; + MYSQL_FIELD *fields_list; + unsigned int last_errno; + char sqlstate[SQLSTATE_LENGTH+1]; +} EQR; + C_MODE_END diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 8552b1c2b8a..c39f1b1f373 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -42,21 +42,48 @@ C_MODE_START #undef ER #include "errmsg.h" #include +#include "embedded_priv.h" -void embedded_get_error(MYSQL *mysql) +static my_bool emb_read_query_result(MYSQL *mysql); + +void THD::clear_data_list() +{ + while (first_data) + { + MYSQL_DATA *data= first_data; + first_data= data->embedded_info->next; + free_rows(data); + } + data_tail= &first_data; + free_rows(cur_data); + cur_data= 0; +} + + +/* + Reads error information from the MYSQL_DATA and puts + it into proper MYSQL members + + SYNOPSIS + embedded_get_error() + mysql connection handler + data query result + + NOTES + after that function error information will be accessible + with usual functions like mysql_error() + data is my_free-d in this function + most of the data is stored in data->embedded_info structure +*/ + +void embedded_get_error(MYSQL *mysql, MYSQL_DATA *data) { - THD *thd=(THD *) mysql->thd; NET *net= &mysql->net; - if ((net->last_errno= thd->net.last_errno)) - { - memcpy(net->last_error, thd->net.last_error, sizeof(net->last_error)); - memcpy(net->sqlstate, thd->net.sqlstate, sizeof(net->sqlstate)); - } - else - { - net->last_error[0]= 0; - strmov(net->sqlstate, not_error_sqlstate); - } + struct embedded_query_result *ei= data->embedded_info; + net->last_errno= ei->last_errno; + strmake(net->last_error, ei->info, sizeof(net->last_error)); + memcpy(net->sqlstate, ei->sqlstate, sizeof(net->sqlstate)); + my_free((gptr) data, MYF(0)); } static my_bool @@ -68,11 +95,7 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, THD *thd=(THD *) mysql->thd; NET *net= &mysql->net; - if (thd->data) - { - free_rows(thd->data); - thd->data= 0; - } + thd->clear_data_list(); /* Check that we are calling the client functions in right order */ if (mysql->status != MYSQL_STATUS_READY) { @@ -104,83 +127,101 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, arg_length= header_length; } + thd->net.no_send_error= 0; result= dispatch_command(command, thd, (char *) arg, arg_length + 1); + thd->cur_data= 0; if (!skip_check) result= thd->net.last_errno ? -1 : 0; - /* - If mysql->field_count is set it means the parsing of the query was OK - and metadata was returned (see Protocol::send_fields). - In this case we postpone the error to be returned in mysql_stmt_store_result - (see emb_read_rows) to behave just as standalone server. - */ - if (!mysql->field_count) - embedded_get_error(mysql); - mysql->server_status= thd->server_status; - mysql->warning_count= ((THD*)mysql->thd)->total_warn_count; return result; } static void emb_flush_use_result(MYSQL *mysql) { - MYSQL_DATA *data= ((THD*)(mysql->thd))->data; - - if (data) + THD *thd= (THD*) mysql->thd; + if (thd->cur_data) { + free_rows(thd->cur_data); + thd->cur_data= 0; + } + else if (thd->first_data) + { + MYSQL_DATA *data= thd->first_data; + thd->first_data= data->embedded_info->next; free_rows(data); - ((THD*)(mysql->thd))->data= NULL; } } + +/* + reads dataset from the next query result + + SYNOPSIS + emb_read_rows() + mysql connection handle + other parameters are not used + + NOTES + It just gets next MYSQL_DATA from the result's queue + + RETURN + pointer to MYSQL_DATA with the coming recordset +*/ + static MYSQL_DATA * emb_read_rows(MYSQL *mysql, MYSQL_FIELD *mysql_fields __attribute__((unused)), unsigned int fields __attribute__((unused))) { - MYSQL_DATA *result= ((THD*)mysql->thd)->data; - embedded_get_error(mysql); - if (mysql->net.last_errno) - return NULL; - if (!result) + MYSQL_DATA *result= ((THD*)mysql->thd)->cur_data; + ((THD*)mysql->thd)->cur_data= 0; + if (result->embedded_info->last_errno) { - if (!(result=(MYSQL_DATA*) my_malloc(sizeof(MYSQL_DATA), - MYF(MY_WME | MY_ZEROFILL)))) - { - NET *net = &mysql->net; - net->last_errno=CR_OUT_OF_MEMORY; - strmov(net->sqlstate, unknown_sqlstate); - strmov(net->last_error,ER(net->last_errno)); - return NULL; - } - return result; + embedded_get_error(mysql, result); + return NULL; } - *result->prev_ptr= NULL; - ((THD*)mysql->thd)->data= NULL; + *result->embedded_info->prev_ptr= NULL; return result; } + static MYSQL_FIELD *emb_list_fields(MYSQL *mysql) { + MYSQL_DATA *res; + if (emb_read_query_result(mysql)) + return 0; + res= ((THD*) mysql->thd)->cur_data; + ((THD*) mysql->thd)->cur_data= 0; + mysql->field_alloc= res->alloc; + my_free((gptr) res,MYF(0)); + mysql->status= MYSQL_STATUS_READY; return mysql->fields; } static my_bool emb_read_prepare_result(MYSQL *mysql, MYSQL_STMT *stmt) { - THD *thd= (THD*)mysql->thd; - if (mysql->net.last_errno) - return 1; + THD *thd= (THD*) mysql->thd; + MYSQL_DATA *res; + stmt->stmt_id= thd->client_stmt_id; stmt->param_count= thd->client_param_count; - stmt->field_count= mysql->field_count; + stmt->field_count= 0; - if (stmt->field_count != 0) + if (thd->first_data) { + if (emb_read_query_result(mysql)) + return 1; + stmt->field_count= mysql->field_count; + mysql->status= MYSQL_STATUS_READY; + res= thd->cur_data; + thd->cur_data= NULL; if (!(mysql->server_status & SERVER_STATUS_AUTOCOMMIT)) mysql->server_status|= SERVER_STATUS_IN_TRANS; stmt->fields= mysql->fields; - stmt->mem_root= mysql->field_alloc; + stmt->mem_root= res->alloc; mysql->fields= NULL; + my_free((gptr) res,MYF(0)); } return 0; @@ -201,13 +242,42 @@ static void emb_fetch_lengths(ulong *to, MYSQL_ROW column, *to= *column ? *(uint *)((*column) - sizeof(uint)) : 0; } -static my_bool emb_mysql_read_query_result(MYSQL *mysql) +static my_bool emb_read_query_result(MYSQL *mysql) { - if (mysql->net.last_errno) - return -1; + THD *thd= (THD*) mysql->thd; + MYSQL_DATA *res= thd->first_data; + DBUG_ASSERT(!thd->cur_data); + thd->first_data= res->embedded_info->next; + if (res->embedded_info->last_errno && + !res->embedded_info->fields_list) + { + embedded_get_error(mysql, res); + return 1; + } - if (mysql->field_count) + mysql->warning_count= res->embedded_info->warning_count; + mysql->server_status= res->embedded_info->server_status; + mysql->field_count= res->fields; + mysql->fields= res->embedded_info->fields_list; + mysql->affected_rows= res->embedded_info->affected_rows; + mysql->insert_id= res->embedded_info->insert_id; + mysql->net.last_errno= 0; + mysql->net.last_error[0]= 0; + mysql->info= 0; + + if (res->embedded_info->info[0]) + { + strmake(mysql->info_buffer, res->embedded_info->info, MYSQL_ERRMSG_SIZE-1); + mysql->info= mysql->info_buffer; + } + + if (res->embedded_info->fields_list) + { mysql->status=MYSQL_STATUS_GET_RESULT; + thd->cur_data= res; + } + else + my_free((gptr) res, MYF(0)); return 0; } @@ -215,14 +285,18 @@ static my_bool emb_mysql_read_query_result(MYSQL *mysql) static int emb_stmt_execute(MYSQL_STMT *stmt) { DBUG_ENTER("emb_stmt_execute"); - char header[4]; + char header[5]; + MYSQL_DATA *res; + THD *thd; + int4store(header, stmt->stmt_id); - THD *thd= (THD*)stmt->mysql->thd; + header[4]= stmt->flags; + thd= (THD*)stmt->mysql->thd; thd->client_param_count= stmt->param_count; thd->client_params= stmt->params; if (emb_advanced_command(stmt->mysql, COM_STMT_EXECUTE,0,0, header, sizeof(header), 1) || - emb_mysql_read_query_result(stmt->mysql)) + emb_read_query_result(stmt->mysql)) { NET *net= &stmt->mysql->net; set_stmt_errmsg(stmt, net->last_error, net->last_errno, net->sqlstate); @@ -230,6 +304,8 @@ static int emb_stmt_execute(MYSQL_STMT *stmt) } stmt->affected_rows= stmt->mysql->affected_rows; stmt->insert_id= stmt->mysql->insert_id; + stmt->server_status= stmt->mysql->server_status; + DBUG_RETURN(0); } @@ -240,22 +316,53 @@ int emb_read_binary_rows(MYSQL_STMT *stmt) return 1; stmt->result= *data; my_free((char *) data, MYF(0)); + set_stmt_errmsg(stmt, stmt->mysql->net.last_error, + stmt->mysql->net.last_errno, stmt->mysql->net.sqlstate); return 0; } +int emb_read_rows_from_cursor(MYSQL_STMT *stmt) +{ + MYSQL *mysql= stmt->mysql; + THD *thd= (THD*) mysql->thd; + MYSQL_DATA *res= thd->first_data; + DBUG_ASSERT(!thd->first_data->embedded_info->next); + thd->first_data= 0; + if (res->embedded_info->last_errno) + { + embedded_get_error(mysql, res); + set_stmt_errmsg(stmt, mysql->net.last_error, + mysql->net.last_errno, mysql->net.sqlstate); + return 1; + } + + thd->cur_data= res; + mysql->warning_count= res->embedded_info->warning_count; + mysql->server_status= res->embedded_info->server_status; + mysql->net.last_errno= 0; + mysql->net.last_error[0]= 0; + + return emb_read_binary_rows(stmt); +} + int emb_unbuffered_fetch(MYSQL *mysql, char **row) { - MYSQL_DATA *data= ((THD*)mysql->thd)->data; - embedded_get_error(mysql); - if (mysql->net.last_errno) - return mysql->net.last_errno; + THD *thd= (THD*) mysql->thd; + MYSQL_DATA *data= thd->cur_data; + if (data && data->embedded_info->last_errno) + { + embedded_get_error(mysql, data); + thd->cur_data= 0; + return 1; + } if (!data || !data->data) { *row= NULL; if (data) { + thd->cur_data= thd->first_data; + thd->first_data= data->embedded_info->next; free_rows(data); - ((THD*)mysql->thd)->data= NULL; } } else @@ -269,9 +376,9 @@ int emb_unbuffered_fetch(MYSQL *mysql, char **row) static void emb_free_embedded_thd(MYSQL *mysql) { THD *thd= (THD*)mysql->thd; - if (thd->data) - free_rows(thd->data); + thd->clear_data_list(); thread_count--; + thd->store_globals(); delete thd; mysql->thd=0; } @@ -283,23 +390,11 @@ static const char * emb_read_statistics(MYSQL *mysql) } -static MYSQL_RES * emb_mysql_store_result(MYSQL *mysql) +static MYSQL_RES * emb_store_result(MYSQL *mysql) { return mysql_store_result(mysql); } -my_bool emb_next_result(MYSQL *mysql) -{ - THD *thd= (THD*)mysql->thd; - DBUG_ENTER("emb_next_result"); - - if (emb_advanced_command(mysql, COM_QUERY,0,0, - thd->query_rest.ptr(),thd->query_rest.length(),1) || - emb_mysql_read_query_result(mysql)) - DBUG_RETURN(1); - - DBUG_RETURN(0); /* No more results */ -} int emb_read_change_user_result(MYSQL *mysql, char *buff __attribute__((unused)), @@ -310,10 +405,10 @@ int emb_read_change_user_result(MYSQL *mysql, MYSQL_METHODS embedded_methods= { - emb_mysql_read_query_result, + emb_read_query_result, emb_advanced_command, emb_read_rows, - emb_mysql_store_result, + emb_store_result, emb_fetch_lengths, emb_flush_use_result, emb_list_fields, @@ -323,8 +418,9 @@ MYSQL_METHODS embedded_methods= emb_unbuffered_fetch, emb_free_embedded_thd, emb_read_statistics, - emb_next_result, - emb_read_change_user_result + emb_read_query_result, + emb_read_change_user_result, + emb_read_rows_from_cursor }; C_MODE_END @@ -395,6 +491,12 @@ int init_embedded_server(int argc, char **argv, char **groups) my_progname= (char *)"mysql_embedded"; + /* + Perform basic logger initialization logger. Should be called after + MY_INIT, as it initializes mutexes. Log tables are inited later. + */ + logger.init_base(); + if (init_common_variables("my", *argcp, *argvp, (const char **)groups)) { mysql_server_end(); @@ -483,6 +585,7 @@ void init_embedded_mysql(MYSQL *mysql, int client_flag, char *db) THD *thd = (THD *)mysql->thd; thd->mysql= mysql; mysql->server_version= server_version; + init_alloc_root(&mysql->field_alloc, 8192, 0); } void *create_embedded_thd(int client_flag, char *db) @@ -490,6 +593,7 @@ void *create_embedded_thd(int client_flag, char *db) THD * thd= new THD; thd->thread_id= thread_id++; + thd->thread_stack= (char*) &thd; if (thd->store_globals()) { fprintf(stderr,"store_globals failed.\n"); @@ -498,7 +602,6 @@ void *create_embedded_thd(int client_flag, char *db) thd->mysys_var= my_thread_var; thd->dbug_thread_id= my_thread_id(); - thd->thread_stack= (char*) &thd; /* TODO - add init_connect command execution */ @@ -517,9 +620,10 @@ void *create_embedded_thd(int client_flag, char *db) thd->security_ctx->db_access= DB_ACLS; thd->security_ctx->master_access= ~NO_ACCESS; #endif - thd->net.query_cache_query= 0; - - thd->data= 0; + thd->cur_data= 0; + thd->first_data= 0; + thd->data_tail= &thd->first_data; + bzero((char*) &thd->net, sizeof(thd->net)); thread_count++; return thd; @@ -531,11 +635,15 @@ err: #ifdef NO_EMBEDDED_ACCESS_CHECKS int check_embedded_connection(MYSQL *mysql) { + int result; THD *thd= (THD*)mysql->thd; Security_context *sctx= thd->security_ctx; - sctx->host_or_ip= sctx->host= (char*)my_localhost; + sctx->host_or_ip= sctx->host= (char*) my_localhost; + strmake(sctx->priv_host, (char*) my_localhost, MAX_HOSTNAME-1); sctx->priv_user= sctx->user= my_strdup(mysql->user, MYF(0)); - return check_user(thd, COM_CONNECT, NULL, 0, thd->db, true); + result= check_user(thd, COM_CONNECT, NULL, 0, thd->db, true); + emb_read_query_result(mysql); + return result; } #else @@ -616,26 +724,147 @@ static char *dup_str_aux(MEM_ROOT *root, const char *from, uint length, } +/* + creates new result and hooks it to the list + + SYNOPSIS + alloc_new_dataset() + + NOTES + allocs the MYSQL_DATA + embedded_query_result couple + to store the next query result, + links these two and attach it to the THD::data_tail + + RETURN + pointer to the newly created query result +*/ + +MYSQL_DATA *THD::alloc_new_dataset() +{ + MYSQL_DATA *data; + struct embedded_query_result *emb_data; + if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), + &data, sizeof(*data), + &emb_data, sizeof(*emb_data), + NULL)) + return NULL; + + emb_data->prev_ptr= &data->data; + cur_data= data; + *data_tail= data; + data_tail= &emb_data->next; + data->embedded_info= emb_data; + return data; +} + + +/* + stores server_status and warning_count in the current + query result structures + + SYNOPSIS + write_eof_packet() + thd current thread + + NOTES + should be called to after we get the recordset-result + +*/ + +static void write_eof_packet(THD *thd) +{ + /* + The following test should never be true, but it's better to do it + because if 'is_fatal_error' is set the server is not going to execute + other queries (see the if test in dispatch_command / COM_QUERY) + */ + if (thd->is_fatal_error) + thd->server_status&= ~SERVER_MORE_RESULTS_EXISTS; + thd->cur_data->embedded_info->server_status= thd->server_status; + /* + Don't send warn count during SP execution, as the warn_list + is cleared between substatements, and mysqltest gets confused + */ + thd->cur_data->embedded_info->warning_count= + (thd->spcont ? 0 : min(thd->total_warn_count, 65535)); +} + + +/* + allocs new query result and initialises Protocol::alloc + + SYNOPSIS + Protocol::begin_dataset() + + RETURN + 0 if success + 1 if memory allocation failed +*/ + +int Protocol::begin_dataset() +{ + MYSQL_DATA *data= thd->alloc_new_dataset(); + if (!data) + return 1; + alloc= &data->alloc; + init_alloc_root(alloc,8192,0); /* Assume rowlength < 8192 */ + alloc->min_malloc=sizeof(MYSQL_ROWS); + return 0; +} + + +/* + remove last row of current recordset + + SYNOPSIS + Protocol_simple::remove_last_row() + + NOTES + does the loop from the beginning of the current recordset to + the last record and cuts it off. + Not supposed to be frequently called. +*/ + +void Protocol_simple::remove_last_row() +{ + MYSQL_DATA *data= thd->cur_data; + MYSQL_ROWS **last_row_hook= &data->data; + uint count= data->rows; + DBUG_ENTER("Protocol_simple::remove_last_row"); + while (--count) + last_row_hook= &(*last_row_hook)->next; + + *last_row_hook= 0; + data->embedded_info->prev_ptr= last_row_hook; + data->rows--; + + DBUG_VOID_RETURN; +} + + bool Protocol::send_fields(List *list, uint flags) { List_iterator_fast it(*list); Item *item; MYSQL_FIELD *client_field; - MYSQL *mysql= thd->mysql; MEM_ROOT *field_alloc; CHARSET_INFO *thd_cs= thd->variables.character_set_results; CHARSET_INFO *cs= system_charset_info; - + MYSQL_DATA *data; DBUG_ENTER("send_fields"); - if (!mysql) // bootstrap file handling + if (!thd->mysql) // bootstrap file handling DBUG_RETURN(0); - field_count= list->elements; - field_alloc= &mysql->field_alloc; - if (!(client_field= thd->mysql->fields= - (MYSQL_FIELD *)alloc_root(field_alloc, - sizeof(MYSQL_FIELD) * field_count))) + if (begin_dataset()) + goto err; + + data= thd->cur_data; + data->fields= field_count= list->elements; + field_alloc= &data->alloc; + + if (!(client_field= data->embedded_info->fields_list= + (MYSQL_FIELD*)alloc_root(field_alloc, sizeof(MYSQL_FIELD)*field_count))) goto err; while ((item= it++)) @@ -643,6 +872,10 @@ bool Protocol::send_fields(List *list, uint flags) Send_field server_field; item->make_field(&server_field); + /* Keep things compatible for old clients */ + if (server_field.type == MYSQL_TYPE_VARCHAR) + server_field.type= MYSQL_TYPE_VAR_STRING; + client_field->db= dup_str_aux(field_alloc, server_field.db_name, strlen(server_field.db_name), cs, thd_cs); client_field->table= dup_str_aux(field_alloc, server_field.table_name, @@ -703,7 +936,9 @@ bool Protocol::send_fields(List *list, uint flags) client_field->max_length= 0; ++client_field; } - thd->mysql->field_count= field_count; + + if (flags & SEND_EOF) + write_eof_packet(thd); DBUG_RETURN(prepare_for_send(list)); err: @@ -723,25 +958,11 @@ bool Protocol::write() bool Protocol_prep::write() { MYSQL_ROWS *cur; - MYSQL_DATA *data= thd->data; - - if (!data) - { - if (!(data= (MYSQL_DATA*) my_malloc(sizeof(MYSQL_DATA), - MYF(MY_WME | MY_ZEROFILL)))) - return true; - - alloc= &data->alloc; - init_alloc_root(alloc,8192,0); /* Assume rowlength < 8192 */ - alloc->min_malloc=sizeof(MYSQL_ROWS); - data->rows=0; - data->fields=field_count; - data->prev_ptr= &data->data; - thd->data= data; - } + MYSQL_DATA *data= thd->cur_data; data->rows++; - if (!(cur= (MYSQL_ROWS *)alloc_root(alloc, sizeof(MYSQL_ROWS)+packet->length()))) + if (!(cur= (MYSQL_ROWS *)alloc_root(alloc, + sizeof(MYSQL_ROWS)+packet->length()))) { my_error(ER_OUT_OF_RESOURCES,MYF(0)); return true; @@ -750,8 +971,8 @@ bool Protocol_prep::write() memcpy(cur->data, packet->ptr()+1, packet->length()-1); cur->length= packet->length(); /* To allow us to do sanity checks */ - *data->prev_ptr= cur; - data->prev_ptr= &cur->next; + *data->embedded_info->prev_ptr= cur; + data->embedded_info->prev_ptr= &cur->next; cur->next= 0; return false; @@ -761,46 +982,52 @@ void send_ok(THD *thd,ha_rows affected_rows,ulonglong id,const char *message) { DBUG_ENTER("send_ok"); - MYSQL *mysql= current_thd->mysql; + MYSQL_DATA *data; + MYSQL *mysql= thd->mysql; + if (!mysql) // bootstrap file handling DBUG_VOID_RETURN; - mysql->affected_rows= affected_rows; - mysql->insert_id= id; + if (thd->net.no_send_ok) // hack for re-parsing queries + DBUG_VOID_RETURN; + if (!(data= thd->alloc_new_dataset())) + return; + data->embedded_info->affected_rows= affected_rows; + data->embedded_info->insert_id= id; if (message) - { - strmake(thd->net.last_error, message, sizeof(thd->net.last_error)-1); - mysql->info= thd->net.last_error; - } + strmake(data->embedded_info->info, message, + sizeof(data->embedded_info->info)-1); + + write_eof_packet(thd); + thd->cur_data= 0; DBUG_VOID_RETURN; } void send_eof(THD *thd) { + write_eof_packet(thd); + thd->cur_data= 0; } + +void net_send_error_packet(THD *thd, uint sql_errno, const char *err) +{ + MYSQL_DATA *data= thd->cur_data ? thd->cur_data : thd->alloc_new_dataset(); + struct embedded_query_result *ei= data->embedded_info; + + ei->last_errno= sql_errno; + strmake(ei->info, err, sizeof(ei->info)-1); + strmov(ei->sqlstate, mysql_errno_to_sqlstate(sql_errno)); + thd->cur_data= 0; +} + + void Protocol_simple::prepare_for_resend() { MYSQL_ROWS *cur; - MYSQL_DATA *data= thd->data; - + MYSQL_DATA *data= thd->cur_data; DBUG_ENTER("send_data"); - if (!data) - { - if (!(data= (MYSQL_DATA*) my_malloc(sizeof(MYSQL_DATA), - MYF(MY_WME | MY_ZEROFILL)))) - goto err; - - alloc= &data->alloc; - init_alloc_root(alloc,8192,0); /* Assume rowlength < 8192 */ - alloc->min_malloc=sizeof(MYSQL_ROWS); - data->rows=0; - data->fields=field_count; - data->prev_ptr= &data->data; - thd->data= data; - } - data->rows++; if (!(cur= (MYSQL_ROWS *)alloc_root(alloc, sizeof(MYSQL_ROWS)+(field_count + 1) * sizeof(char *)))) { @@ -809,10 +1036,10 @@ void Protocol_simple::prepare_for_resend() } cur->data= (MYSQL_ROW)(((char *)cur) + sizeof(MYSQL_ROWS)); - *data->prev_ptr= cur; - data->prev_ptr= &cur->next; + *data->embedded_info->prev_ptr= cur; + data->embedded_info->prev_ptr= &cur->next; next_field=cur->data; - next_mysql_field= thd->mysql->fields; + next_mysql_field= data->embedded_info->fields_list; err: DBUG_VOID_RETURN; } diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c index 70074e44c6f..cad1bd4c47b 100644 --- a/libmysqld/libmysqld.c +++ b/libmysqld/libmysqld.c @@ -14,6 +14,11 @@ along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include +#include +#include +#include +#include #include "embedded_priv.h" #include #include @@ -193,7 +198,12 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, if (!user) user= ""; - mysql->user=my_strdup(user,MYF(0)); + /* + We need to alloc some space for mysql->info but don't want to + put extra 'my_free's in mysql_close. + So we alloc it with the 'user' string to be freed at once + */ + mysql->user= my_strdup(user, MYF(0)); port=0; unix_socket=0; @@ -207,6 +217,7 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, if (db) client_flag|=CLIENT_CONNECT_WITH_DB; + mysql->info_buffer= my_malloc(MYSQL_ERRMSG_SIZE, MYF(0)); mysql->thd= create_embedded_thd(client_flag, db_name); init_embedded_mysql(mysql, client_flag, db_name); @@ -243,7 +254,6 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, DBUG_RETURN(mysql); error: - embedded_get_error(mysql); DBUG_PRINT("error",("message: %u (%s)", mysql->net.last_errno, mysql->net.last_error)); { diff --git a/mysql-test/include/im_check_os.inc b/mysql-test/include/im_check_os.inc index 9465115feb5..33105f79d52 100644 --- a/mysql-test/include/im_check_os.inc +++ b/mysql-test/include/im_check_os.inc @@ -3,5 +3,12 @@ --source include/not_windows.inc +# check that CSV engine was compiled in, as IM the test suite uses +# logs tables-specific option and the option is not present if CSV +# (and => the log tables) are not in. +# NOTE: In future we should remove this check and make the test suite +# to pass correct opyions to IM depending on the CSV presence +--source include/have_csv.inc + --connection default --disconnect dflt_server_con diff --git a/mysql-test/include/partition_1.inc b/mysql-test/include/partition_1.inc new file mode 100644 index 00000000000..9b829494a73 --- /dev/null +++ b/mysql-test/include/partition_1.inc @@ -0,0 +1,750 @@ +-- source include/have_partition.inc + +# include/partition_1.inc +# +# Partitionong tests +# +# Attention: The variable +# $engine -- Storage engine to be tested. +# must be set within the script sourcing this file. +# +--disable_abort_on_error +SET AUTOCOMMIT= 1; + +##### Disabled testcases, because of open bugs ##### +--echo +--echo #------------------------------------------------------------------------ +--echo # There are several testcases disabled because ouf the open bugs +--echo # #15407 , #15408 , #15890 , #15961 , #13447 , #15966 , #15968, #16370 +--echo #------------------------------------------------------------------------ +# Bug#15407 Partitions: crash if subpartition +let $fixed_bug15407= 0; +# Bug#15408 Partitions: subpartition names are not unique +let $fixed_bug15408= 0; +# Bug#15890 Partitions: Strange interpretation of partition number +let $fixed_bug15890= 0; +# Bug#15961 Partitions: Creation of subpart. table without subpart. rule not rejected +let $fixed_bug15961= 0; +# Bug#13447 Partitions: crash with alter table +let $fixed_bug13447= 0; +# Bug#15966 Partitions: crash if session default engine <> engine used in create table +let $fixed_bug15966= 0; +# Bug#15968 Partitions: crash when INSERT with f1 = -1 into PARTITION BY HASH(f1) +let $fixed_bug15968= 0; +# Bug #16370 Partitions: subpartitions names not mentioned in SHOW CREATE TABLE output +let $fixed_bug16370= 0; + +##### Option, for displaying files ##### +# +# Attention: Displaying the directory content via "ls var/master-data/test/t*" +# is probably not portable. +# let $ls= 0; disables the execution of "ls ....." +let $ls= 0; + +################################################################################ +# Partitioning syntax +# +# CREATE TABLE .... (column-list ..) +# PARTITION BY +# KEY '(' ( column-list ) ')' +# | RANGE '(' ( expr ) ')' +# | LIST '(' ( expr ) ')' +# | HASH '(' ( expr ) ')' +# [PARTITIONS num ] +# [SUBPARTITION BY +# KEY '(' ( column-list ) ')' +# | HASH '(' ( expr ) ')' +# [SUBPARTITIONS num ] +# ] +# [ '(' +# ( PARTITION logical-name +# [ VALUES LESS THAN '(' ( expr | MAX_VALUE ) ')' ] +# [ VALUES IN '(' (expr)+ ')' ] +# [ TABLESPACE tablespace-name ] +# [ [ STORAGE ] ENGINE [ '=' ] storage-engine-name ] +# [ NODEGROUP nodegroup-id ] +# [ '(' +# ( SUBPARTITION logical-name +# [ TABLESPACE tablespace-name ] +# [ STORAGE ENGINE = storage-engine-name ] +# [ NODEGROUP nodegroup-id ] +# )+ +# ')' +# )+ +# ')' +# ] +################################################################################ + +--echo +--echo #------------------------------------------------------------------------ +--echo # 0. Setting of auxiliary variables + Creation of an auxiliary table +--echo # needed in all testcases +--echo #------------------------------------------------------------------------ +let $max_row= `SELECT @max_row`; +let $max_row_div2= `SELECT @max_row DIV 2`; +let $max_row_div3= `SELECT @max_row DIV 3`; +let $max_row_div4= `SELECT @max_row DIV 4`; +let $max_int_4= 2147483647; +--disable_warnings +DROP TABLE IF EXISTS t0_template; +--enable_warnings +CREATE TABLE t0_template ( f1 INTEGER, f2 char(20), PRIMARY KEY(f1)) +ENGINE = MEMORY; +--echo # Logging of INSERTs into t0_template suppressed +--disable_query_log +let $num= $max_row; +while ($num) +{ + eval INSERT INTO t0_template SET f1 = $num, f2 = '---$num---'; + + dec $num; +} +--enable_query_log + +--echo +--echo #------------------------------------------------------------------------ +--echo # 1. Some syntax checks +--echo #------------------------------------------------------------------------ +--echo # 1.1 Subpartioned table without subpartitioning rule must be rejected +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +if ($fixed_bug15961) +{ +# Bug#15961 Partitions: Creation of subpart. table without subpart. rule not rejected +--error 9999 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +( PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11)); +} +--echo # FIXME Implement testcases, where it is checked that all create and +--echo # alter table statements +--echo # - with missing mandatory parameters are rejected +--echo # - with optional parameters are accepted +--echo # - with wrong combinations of optional parameters are rejected +--echo # - ............ + +--echo +--echo #------------------------------------------------------------------------ +--echo # 2. Checks where the engine is assigned on all supported (CREATE TABLE +--echo # statement) positions + basic operations on the tables +--echo # Storage engine mixups are currently (2005-12-23) not supported +--echo #------------------------------------------------------------------------ +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--echo # 2.1 non partitioned table (for comparison) +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine; +# MLML Full size (as check of check routine) +--source include/partition_10.inc +DROP TABLE t1; +# +--echo # 2.2 Assignment of storage engine just after column list only +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine + PARTITION BY HASH(f1) PARTITIONS 2; +--source include/partition_10.inc +DROP TABLE t1; +# +--echo # 2.3 Assignment of storage engine just after partition or subpartition +--echo # name only +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) + PARTITION BY HASH(f1) + ( PARTITION part1 STORAGE ENGINE = $engine, + PARTITION part2 STORAGE ENGINE = $engine + ); +--source include/partition_10.inc +DROP TABLE t1; +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) + PARTITION BY RANGE(f1) + SUBPARTITION BY HASH(f1) + ( PARTITION part1 VALUES LESS THAN ($max_row_div2) + (SUBPARTITION subpart11 STORAGE ENGINE = $engine, + SUBPARTITION subpart12 STORAGE ENGINE = $engine), + PARTITION part2 VALUES LESS THAN ($max_int_4) + (SUBPARTITION subpart21 STORAGE ENGINE = $engine, + SUBPARTITION subpart22 STORAGE ENGINE = $engine) + ); +--source include/partition_10.inc +DROP TABLE t1; +# +--echo # 2.4 Some but not all named partitions or subpartitions get a storage +--echo # engine assigned +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) + PARTITION BY HASH(f1) + ( PARTITION part1 STORAGE ENGINE = $engine, + PARTITION part2 + ); +--source include/partition_10.inc +DROP TABLE t1; +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) + PARTITION BY HASH(f1) + ( PARTITION part1 , + PARTITION part2 STORAGE ENGINE = $engine + ); +--source include/partition_10.inc +DROP TABLE t1; +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) + PARTITION BY RANGE(f1) + SUBPARTITION BY HASH(f1) + ( PARTITION part1 VALUES LESS THAN ($max_row_div2) + (SUBPARTITION subpart11, + SUBPARTITION subpart12 STORAGE ENGINE = $engine), + PARTITION part2 VALUES LESS THAN ($max_int_4) + (SUBPARTITION subpart21 STORAGE ENGINE = $engine, + SUBPARTITION subpart22 STORAGE ENGINE = $engine) + ); +--source include/partition_10.inc +DROP TABLE t1; +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) + PARTITION BY RANGE(f1) + SUBPARTITION BY HASH(f1) + ( PARTITION part1 VALUES LESS THAN ($max_row_div2) + (SUBPARTITION subpart11 STORAGE ENGINE = $engine, + SUBPARTITION subpart12 STORAGE ENGINE = $engine), + PARTITION part2 VALUES LESS THAN ($max_int_4) + (SUBPARTITION subpart21, + SUBPARTITION subpart22 ) + ); +--source include/partition_10.inc +DROP TABLE t1; +# +--echo # 2.5 Storage engine assignment after partition name + after name of +--echo # subpartitions belonging to another partition +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) + PARTITION BY RANGE(f1) + SUBPARTITION BY HASH(f1) + ( PARTITION part1 VALUES LESS THAN ($max_row_div2) ENGINE = $engine + (SUBPARTITION subpart11, + SUBPARTITION subpart12), + PARTITION part2 VALUES LESS THAN ($max_int_4) + (SUBPARTITION subpart21 STORAGE ENGINE = $engine, + SUBPARTITION subpart22 STORAGE ENGINE = $engine) + ); +--source include/partition_10.inc +DROP TABLE t1; +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) + PARTITION BY RANGE(f1) + SUBPARTITION BY HASH(f1) + ( PARTITION part1 VALUES LESS THAN ($max_row_div2) + (SUBPARTITION subpart11 STORAGE ENGINE = $engine, + SUBPARTITION subpart12 STORAGE ENGINE = $engine), + PARTITION part2 VALUES LESS THAN ($max_int_4) ENGINE = $engine + (SUBPARTITION subpart21, + SUBPARTITION subpart22) + ); +--source include/partition_10.inc +DROP TABLE t1; +# +--echo # 2.6 Precedence of storage engine assignments +--echo # 2.6.1 Storage engine assignment after column list + after partition +--echo # or subpartition name +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine + PARTITION BY HASH(f1) + ( PARTITION part1 STORAGE ENGINE = $engine, + PARTITION part2 STORAGE ENGINE = $engine + ); +--source include/partition_10.inc +DROP TABLE t1; +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = $engine + PARTITION BY RANGE(f1) + SUBPARTITION BY HASH(f1) + ( PARTITION part1 VALUES LESS THAN ($max_row_div2) + (SUBPARTITION subpart11 STORAGE ENGINE = $engine, + SUBPARTITION subpart12 STORAGE ENGINE = $engine), + PARTITION part2 VALUES LESS THAN ($max_int_4) + (SUBPARTITION subpart21 STORAGE ENGINE = $engine, + SUBPARTITION subpart22 STORAGE ENGINE = $engine) + ); +--source include/partition_10.inc +DROP TABLE t1; +--echo # 2.6.2 Storage engine assignment after partition name + after +--echo # subpartition name +# in partition part + in sub partition part +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) + PARTITION BY RANGE(f1) + SUBPARTITION BY HASH(f1) + ( PARTITION part1 VALUES LESS THAN ($max_row_div2) STORAGE ENGINE = $engine + (SUBPARTITION subpart11 STORAGE ENGINE = $engine, + SUBPARTITION subpart12 STORAGE ENGINE = $engine), + PARTITION part2 VALUES LESS THAN ($max_int_4) + (SUBPARTITION subpart21 STORAGE ENGINE = $engine, + SUBPARTITION subpart22 STORAGE ENGINE = $engine) + ); +--source include/partition_10.inc +DROP TABLE t1; + +--echo # 2.7 Session default engine differs from engine used within create table +eval SET SESSION storage_engine=$engine_other; +if ($fixed_bug15966) +{ +# Bug#15966 Partitions: crash if session default engine <> engine used in create table +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) ( PARTITION part1 ENGINE = $engine); +--source include/partition_10.inc +DROP TABLE t1; +# Bug#15966 Partitions: crash if session default engine <> engine used in create table +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) + PARTITION BY RANGE(f1) + SUBPARTITION BY HASH(f1) + ( PARTITION part1 VALUES LESS THAN (1000) + (SUBPARTITION subpart11 STORAGE ENGINE = $engine, + SUBPARTITION subpart12 STORAGE ENGINE = $engine)); +--source include/partition_10.inc +DROP TABLE t1; +} +eval SET SESSION storage_engine=$engine; + + +--echo +--echo #------------------------------------------------------------------------ +--echo # 3. Check assigning the number of partitions and subpartitions +--echo # with and without named partitions/subpartitions +--echo #------------------------------------------------------------------------ +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +--echo # 3.1 (positive) without partition/subpartition number assignment +--echo # 3.1.1 no partition number, no named partitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1); +--source include/partition_10.inc +DROP TABLE t1; +--echo # 3.1.2 no partition number, named partitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2); +--source include/partition_10.inc +DROP TABLE t1; +# Attention: Several combinations are impossible +# If subpartitioning exists +# - partitioning algorithm must be RANGE or LIST +# This implies the assignment of named partitions. +# - subpartitioning algorithm must be HASH or KEY +--echo # 3.1.3 variations on no partition/subpartition number, named partitions, +--echo # different subpartitions are/are not named +# +# Partition name -- "properties" +# part1 -- first/non last +# part2 -- non first/non last +# part3 -- non first/ last +# +# Testpattern: +# named subpartitions in +# Partition part1 part2 part3 +# N N N +# N N Y +# N Y N +# N Y Y +# Y N N +# Y N Y +# Y Y N +# Y Y Y +--disable_query_log +let $part0= CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1); +# +eval SET @aux = '(PARTITION part1 VALUES LESS THAN ($max_row_div2),'; +let $part1_N= `SELECT @AUX`; +eval SET @aux = '(PARTITION part1 VALUES LESS THAN ($max_row_div2) +(SUBPARTITION subpart11 , SUBPARTITION subpart12 ),'; +let $part1_Y= `SELECT @AUX`; +# +eval SET @aux = 'PARTITION part2 VALUES LESS THAN ($max_row),'; +let $part2_N= `SELECT @AUX`; +eval SET @aux = 'PARTITION part2 VALUES LESS THAN ($max_row) +(SUBPARTITION subpart21 , SUBPARTITION subpart22 ),'; +let $part2_Y= `SELECT @AUX`; +# +eval SET @aux = 'PARTITION part3 VALUES LESS THAN ($max_int_4))'; +let $part3_N= `SELECT @AUX`; +eval SET @aux = 'PARTITION part3 VALUES LESS THAN ($max_int_4) +(SUBPARTITION subpart31 , SUBPARTITION subpart32 ))'; +let $part3_Y= `SELECT @AUX`; +--enable_query_log + +eval $part0 $part1_N $part2_N $part3_N ; +DROP TABLE t1; +# Bug#15407 Partitions: crash if subpartition +if ($fixed_bug15407) +{ +eval $part0 $part1_N $part2_N $part3_Y ; +--source include/partition_10.inc +DROP TABLE t1; +eval $part0 $part1_N $part2_Y $part3_N ; +--source include/partition_10.inc +DROP TABLE t1; +eval $part0 $part1_N $part2_Y $part3_Y ; +--source include/partition_10.inc +DROP TABLE t1; +eval $part0 $part1_Y $part2_N $part3_N ; +--source include/partition_10.inc +DROP TABLE t1; +eval $part0 $part1_Y $part2_N $part3_Y ; +--source include/partition_10.inc +DROP TABLE t1; +eval $part0 $part1_Y $part2_Y $part3_N ; +--source include/partition_10.inc +DROP TABLE t1; +} +eval $part0 $part1_Y $part2_Y $part3_Y ; +--source include/partition_10.inc +DROP TABLE t1; + +--echo # 3.2 partition/subpartition numbers good and bad values and notations +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +--echo # 3.2.1 partition/subpartition numbers INTEGER notation +# ML: "positive/negative" is my private judgement. It need no to correspond +# with the server response. +# (positive) number = 2 +let $part_number= 2; +--source include/partition_11.inc +# (positive) special case number = 1 +let $part_number= 1; +--source include/partition_11.inc +# (negative) 0 is non sense +let $part_number= 0; +--source include/partition_11.inc +# (negative) -1 is non sense +let $part_number= -1; +--source include/partition_11.inc +# (negative) 1000000 is too huge +let $part_number= 1000000; +--source include/partition_11.inc + +if ($fixed_bug15890) +{ +--echo # 3.2.2 partition/subpartition numbers DECIMAL notation +# (positive) number = 2.0 +let $part_number= 2.0; +--source include/partition_11.inc +# (negative) -2.0 is non sense +let $part_number= -2.0; +--source include/partition_11.inc +# (negative) case number = 0.0 is non sense +let $part_number= 0.0; +--source include/partition_11.inc +# Bug#15890 Partitions: Strange interpretation of partition number +# (negative) number = 1.5 is non sense +let $part_number= 1.5; +--source include/partition_11.inc +# (negative) number is too huge +let $part_number= 999999999999999999999999999999.999999999999999999999999999999; +--source include/partition_11.inc +# (negative) number is nearly zero +let $part_number= 0.000000000000000000000000000001; +--source include/partition_11.inc + +--echo # 3.2.3 partition/subpartition numbers FLOAT notation +##### FLOAT notation +# (positive) number = 2.0E+0 +let $part_number= 2.0E+0; +--source include/partition_11.inc +# Bug#15890 Partitions: Strange interpretation of partition number +# (positive) number = 0.2E+1 +let $part_number= 0.2E+1; +--source include/partition_11.inc +# (negative) -2.0E+0 is non sense +let $part_number= -2.0E+0; +--source include/partition_11.inc +# (negative) 0.15E+1 is non sense +let $part_number= 0.15E+1; +--source include/partition_11.inc +# (negative) 0.0E+300 is zero +let $part_number= 0.0E+300; +--source include/partition_11.inc +# Bug#15890 Partitions: Strange interpretation of partition number +# (negative) 1E+300 is too huge +let $part_number= 1E+300; +--source include/partition_11.inc +# (negative) 1E-300 is nearly zero +let $part_number= 1E-300; +--source include/partition_11.inc +} + +--echo # 3.2.4 partition/subpartition numbers STRING notation +##### STRING notation +# (negative?) case number = '2' +let $part_number= '2'; +--source include/partition_11.inc +# (negative?) case number = '2.0' +let $part_number= '2.0'; +--source include/partition_11.inc +# (negative?) case number = '0.2E+1' +let $part_number= '0.2E+1'; +--source include/partition_11.inc +# (negative) Strings starts with digit, but 'A' follows +let $part_number= '2A'; +--source include/partition_11.inc +# (negative) Strings starts with 'A', but digit follows +let $part_number= 'A2'; +--source include/partition_11.inc +# (negative) empty string +let $part_number= ''; +--source include/partition_11.inc +# (negative) string without any digits +let $part_number= 'GARBAGE'; +--source include/partition_11.inc + +--echo # 3.2.5 partition/subpartition numbers other notations +# (negative) Strings starts with digit, but 'A' follows +let $part_number= 2A; +--source include/partition_11.inc +# (negative) Strings starts with 'A', but digit follows +let $part_number= A2; +--source include/partition_11.inc +# (negative) string without any digits +let $part_number= GARBAGE; +--source include/partition_11.inc + +# (negative?) double quotes +let $part_number= "2"; +--source include/partition_11.inc +# (negative) Strings starts with digit, but 'A' follows +let $part_number= "2A"; +--source include/partition_11.inc +# (negative) Strings starts with 'A', but digit follows +let $part_number= "A2"; +--source include/partition_11.inc +# (negative) string without any digits +let $part_number= "GARBAGE"; +--source include/partition_11.inc + +--echo # 3.3 Mixups of assigned partition/subpartition numbers and names +--echo # 3.3.1 (positive) number of partition/subpartition +--echo # = number of named partition/subpartition +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1, PARTITION part2 ) ; +SHOW CREATE TABLE t1; +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) PARTITIONS 2 +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) + (SUBPARTITION subpart11, SUBPARTITION subpart12), + PARTITION part2 VALUES LESS THAN (2147483647) + (SUBPARTITION subpart21, SUBPARTITION subpart22) +); +--source include/partition_layout.inc +DROP TABLE t1; +--echo # 3.3.2 (positive) number of partition/subpartition , +--echo # 0 (= no) named partition/subpartition +--echo # already checked above +--echo # 3.3.3 (negative) number of partitions/subpartitions +--echo # > number of named partitions/subpartitions +--error 1064 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1 ) ; +# Wrong number of named subpartitions in first partition +--error 1064 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) + (SUBPARTITION subpart11 ), + PARTITION part2 VALUES LESS THAN (2147483647) + (SUBPARTITION subpart21, SUBPARTITION subpart22) +); +# Wrong number of named subpartitions in non first/non last partition +--error 1064 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) + (SUBPARTITION subpart11, SUBPARTITION subpart12), + PARTITION part2 VALUES LESS THAN (2000) + (SUBPARTITION subpart21 ), + PARTITION part3 VALUES LESS THAN (2147483647) + (SUBPARTITION subpart31, SUBPARTITION subpart32) +); +# Wrong number of named subpartitions in last partition +--error 1064 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) PARTITIONS 2 +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) + (SUBPARTITION subpart11, SUBPARTITION subpart12), + PARTITION part2 VALUES LESS THAN (2147483647) + (SUBPARTITION subpart21 ) +); +--echo # 3.3.4 (negative) number of partitions < number of named partitions +--error 1064 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 1 ( PARTITION part1, PARTITION part2 ) ; +# Wrong number of named subpartitions in first partition +--error 1064 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 1 +( PARTITION part1 VALUES LESS THAN (1000) + (SUBPARTITION subpart11, SUBPARTITION subpart12), + PARTITION part2 VALUES LESS THAN (2147483647) + (SUBPARTITION subpart21, SUBPARTITION subpart22) +); +# Wrong number of named subpartitions in non first/non last partition +--error 1064 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 1 +( PARTITION part1 VALUES LESS THAN (1000) + (SUBPARTITION subpart11, SUBPARTITION subpart12), + PARTITION part2 VALUES LESS THAN (2000) + (SUBPARTITION subpart21 ), + PARTITION part3 VALUES LESS THAN (2147483647) + (SUBPARTITION subpart31, SUBPARTITION subpart32) +); +# Wrong number of named subpartitions in last partition +--error 1064 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 1 +( PARTITION part1 VALUES LESS THAN (1000) + (SUBPARTITION subpart11, SUBPARTITION subpart12), + PARTITION part2 VALUES LESS THAN (2147483647) + (SUBPARTITION subpart21, SUBPARTITION subpart22) +); + + +--echo +--echo #------------------------------------------------------------------------ +--echo # 4. Checks of logical partition/subpartition name +--echo # file name clashes during CREATE TABLE +--echo #------------------------------------------------------------------------ +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings + +--echo # 4.1 (negative) A partition name used more than once +--error ER_SAME_NAME_PARTITION +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part1); +# +if ($fixed_bug15408) +{ +# Bug#15408 Partitions: subpartition names are not unique +--error ER_SAME_NAME_PARTITION +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (1000) + (SUBPARTITION subpart11, SUBPARTITION subpart11) +); +} +--echo # FIXME Implement testcases with filename problems +--echo # existing file of other table --- partition/subpartition file name +--echo # partition/subpartition file name --- file of the same table + +--echo +--echo #------------------------------------------------------------------------ +--echo # 5. Alter table experiments +--echo #------------------------------------------------------------------------ +--disable_warnings +DROP TABLE IF EXISTS t1; +--enable_warnings +--echo # 5.1 alter table add partition +--echo # 5.1.1 (negative) add partition to non partitioned table +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)); +--source include/partition_layout.inc +# MyISAM gets ER_PARTITION_MGMT_ON_NONPARTITIONED and NDB 1005 +# The error code of NDB differs, because all NDB tables are partitioned even +# if the CREATE TABLE does not contain a partitioning clause. +--error ER_PARTITION_MGMT_ON_NONPARTITIONED,1005 +ALTER TABLE t1 ADD PARTITION (PARTITION part1); +--source include/partition_layout.inc +DROP TABLE t1; + +--echo # 5.1.2 Add one partition to a table with one partition +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1); +--source include/partition_layout.inc +eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1; +--disable_query_log +eval SELECT $engine = 'NDB' INTO @aux; +let $my_exit= `SELECT @aux`; +if ($my_exit) +{ + exit; +} +--enable_query_log +ALTER TABLE t1 ADD PARTITION (PARTITION part1); +--source include/partition_12.inc +DROP TABLE t1; + +--echo # 5.1.3 Several times add one partition to a table with some partitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part3); +--source include/partition_layout.inc +eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1; +# Partition name before first existing partition name +ALTER TABLE t1 ADD PARTITION (PARTITION part0); +--source include/partition_12.inc +DELETE FROM t1; +eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1; +# Partition name between existing partition names +ALTER TABLE t1 ADD PARTITION (PARTITION part2); +--source include/partition_12.inc +DELETE FROM t1; +eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1; +if ($fixed_bug13447) +{ +# Partition name after all existing partition names +# Bug#13447 Partitions: crash with alter table +ALTER TABLE t1 ADD PARTITION (PARTITION part4); +} +--source include/partition_12.inc +DROP TABLE t1; + +--echo # 5.1.4 Add several partitions to a table with some partitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part3); +--source include/partition_layout.inc +eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1; +if ($fixed_bug13447) +{ +# Bug#13447 Partitions: crash with alter table +ALTER TABLE t1 ADD PARTITION (PARTITION part0, PARTITION part2, PARTITION part4); +} +--source include/partition_12.inc +DROP TABLE t1; + +--echo # 5.1.5 (negative) Add partitions to a table with some partitions +--echo # clash on new and already existing partition names +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2, PARTITION part3); +# Clash on first/non last partition name +--error ER_SAME_NAME_PARTITION +ALTER TABLE t1 ADD PARTITION (PARTITION part1); +# Clash on non first/non last partition name +--error ER_SAME_NAME_PARTITION +ALTER TABLE t1 ADD PARTITION (PARTITION part2); +# Clash on non first/last partition name +--error ER_SAME_NAME_PARTITION +ALTER TABLE t1 ADD PARTITION (PARTITION part3); +# Clash on all partition names +--error ER_SAME_NAME_PARTITION +ALTER TABLE t1 ADD PARTITION (PARTITION part1, PARTITION part2, PARTITION part3); +DROP TABLE t1; + +# FIXME Is there any way to add a subpartition to an already existing partition + +--echo # 5.2 alter table add subpartition +--echo # 5.2.1 Add one subpartition to a table with subpartitioning rule and +--echo # no explicit defined subpartitions +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +(PARTITION part1 VALUES LESS THAN ($max_row_div2)); +if ($fixed_bug16370) +{ +--source include/partition_layout.inc +} +eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row_div2 - 1; +eval ALTER TABLE t1 ADD PARTITION (PARTITION part2 VALUES LESS THAN ($max_int_4) + (SUBPARTITION subpart21)); +if ($fixed_bug16370) +{ +--source include/partition_12.inc +} +DROP TABLE t1; +DROP TABLE if exists t0_template; diff --git a/mysql-test/include/partition_10.inc b/mysql-test/include/partition_10.inc new file mode 100644 index 00000000000..74b0fdf7f6a --- /dev/null +++ b/mysql-test/include/partition_10.inc @@ -0,0 +1,73 @@ +# include/partition_10.inc +# +# Do some basic checks on a table. +# +# FIXME: Do not write the statements and results, if SQL return code = 0 +# and result set like expected. Write a message, that all is like +# expected instead. +# +# All SELECTs are so written, that we get my_value = 1, when everything +# is like expected. +# + +--source include/partition_layout.inc + +####### Variations with multiple records +# Select on empty table +SELECT COUNT(*) = 0 AS my_value FROM t1; +# (mass) Insert of $max_row records +eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND $max_row; +# Select +eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row) + AS my_value FROM t1; +# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1; +# (mass) Update $max_row_div4 * 2 + 1 records +eval UPDATE t1 SET f1 = f1 + $max_row +WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 AND $max_row_div2 + $max_row_div4; +# Select +eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row_div2 + $max_row_div4 + $max_row ) + AS my_value FROM t1; +# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1; +# (mass) Delete $max_row_div4 * 2 + 1 records +eval DELETE FROM t1 +WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 + $max_row AND $max_row_div2 + $max_row_div4 + $max_row; +# Select +eval SELECT (COUNT(*) = $max_row - $max_row_div4 - $max_row_div4 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row) + AS my_value FROM t1; +# DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1; + +####### Variations with single records +# Insert one record at beginning +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +# Select this record +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +# Insert one record at end +eval INSERT INTO t1 SET f1 = $max_row + 1, f2 = '#######'; +# Select this record +eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 1 AND f2 = '#######'; +# Update one record +eval UPDATE t1 SET f1 = $max_row + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +# Select +eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ'; +if ($fixed_bug15968) +{ +# Bug #15968: Partitions: crash when INSERT with f1 = -1 into PARTITION BY HASH(f1) +eval UPDATE t1 SET f1 = 0 - 1, f2 = 'ZZZZZZZ' + WHERE f1 = $max_row + 1 AND f2 = '#######'; +# Select +SELECT COUNT(*) AS my_value FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ'; +} +# Delete +eval DELETE FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ'; +if ($fixed_bug15968) +{ +DELETE FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ'; +} +# Select +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; + +# Truncate +TRUNCATE t1; +# Select on empty table +SELECT COUNT(*) = 0 AS my_value FROM t1; diff --git a/mysql-test/include/partition_11.inc b/mysql-test/include/partition_11.inc new file mode 100644 index 00000000000..7ed4d882aa0 --- /dev/null +++ b/mysql-test/include/partition_11.inc @@ -0,0 +1,34 @@ +# include/partition_11.inc +# +# Try to create a table with the given partition number +# + +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS $part_number; +--disable_query_log +eval SET @my_errno= $mysql_errno ; +let $run= `SELECT @my_errno = 0`; +--enable_query_log +# +# If this operation was successfull, check + drop this table +if ($run) +{ + --source include/partition_10.inc + eval DROP TABLE t1; +} +#### Try to create a table with the given subpartition number +eval CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS $part_number +(PARTITION part1 VALUES LESS THAN ($max_row_div2), PARTITION part2 VALUES LESS THAN ($max_int_4)); +--disable_query_log +eval SET @my_errno= $mysql_errno ; +let $run= `SELECT @my_errno = 0`; +--enable_query_log +# +# If this operation was successfull, check + drop this table +if ($run) +{ + --source include/partition_10.inc + eval DROP TABLE t1; +} diff --git a/mysql-test/include/partition_12.inc b/mysql-test/include/partition_12.inc new file mode 100644 index 00000000000..2a5610b82e1 --- /dev/null +++ b/mysql-test/include/partition_12.inc @@ -0,0 +1,65 @@ +# include/partition_12.inc +# +# Do some basic things on a table, if the SQL command executed just before +# sourcing this file was successful. +# + +--source include/partition_layout.inc + +####### Variations with multiple records + # (mass) Insert max_row_div2 + 1 records + eval INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN $max_row_div2 AND $max_row; + # Select + eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row) + AS my_value FROM t1; + # DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1; + # (mass) Update $max_row_div4 * 2 + 1 records + eval UPDATE t1 SET f1 = f1 + $max_row + WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 AND $max_row_div2 + $max_row_div4; + # Select + eval SELECT (COUNT(*) = $max_row) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row_div2 + $max_row_div4 + $max_row ) + AS my_value FROM t1; + # DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1; + # (mass) Delete $max_row_div4 * 2 + 1 records + eval DELETE FROM t1 + WHERE f1 BETWEEN $max_row_div2 - $max_row_div4 + $max_row AND $max_row_div2 + $max_row_div4 + $max_row; + # Select + eval SELECT (COUNT(*) = $max_row - $max_row_div4 - $max_row_div4 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = $max_row) + AS my_value FROM t1; + # DEBUG SELECT COUNT(*),MIN(f1),MAX(f1) FROM t1; + +####### Variations with single records +# Insert one record at beginning +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +# Select this record +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +# Insert one record at end +eval INSERT INTO t1 SET f1 = $max_row + 1, f2 = '#######'; +# Select this record +eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 1 AND f2 = '#######'; +# Update one record +eval UPDATE t1 SET f1 = $max_row + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +# Select +eval SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ'; +if ($fixed_bug15968) +{ +# Bug #15968: Partitions: crash when INSERT with f1 = -1 into PARTITION BY HASH(f1) +eval UPDATE t1 SET f1 = 0 - 1, f2 = 'ZZZZZZZ' + WHERE f1 = $max_row + 1 AND f2 = '#######'; +# Select +SELECT COUNT(*) AS my_value FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ'; +} +# Delete +eval DELETE FROM t1 WHERE f1 = $max_row + 2 AND f2 = 'ZZZZZZZ'; +if ($fixed_bug15968) +{ +DELETE FROM t1 WHERE f1 = 0 - 1 AND f2 = 'ZZZZZZZ'; +} +# Select +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; + +# Truncate +TRUNCATE t1; +# Select on empty table +SELECT COUNT(*) = 0 AS my_value FROM t1; diff --git a/mysql-test/include/partition_layout.inc b/mysql-test/include/partition_layout.inc new file mode 100644 index 00000000000..0a59c23fafd --- /dev/null +++ b/mysql-test/include/partition_layout.inc @@ -0,0 +1,13 @@ +# include/partition_layout.inc +# +# Print partitioning related informations about the table t1 +# + +eval SHOW CREATE TABLE t1; + +# Optional (most probably issues with separators and case sensitivity) +# listing of files belonging to the table t1 +if ($ls) +{ + --exec ls var/master-data/test/t1* +} diff --git a/mysql-test/include/system_db_struct.inc b/mysql-test/include/system_db_struct.inc index 9e8886377fc..4c80161bb85 100644 --- a/mysql-test/include/system_db_struct.inc +++ b/mysql-test/include/system_db_struct.inc @@ -13,3 +13,5 @@ show create table columns_priv; show create table procs_priv; show create table proc; show create table event; +show create table general_log; +show create table slow_log; diff --git a/mysql-test/lib/init_db.sql b/mysql-test/lib/init_db.sql index ef6383f6680..cef7933d808 100644 --- a/mysql-test/lib/init_db.sql +++ b/mysql-test/lib/init_db.sql @@ -570,6 +570,10 @@ CREATE TABLE proc ( ) character set utf8 comment='Stored Procedures'; +CREATE PROCEDURE create_log_tables() BEGIN DECLARE is_csv_enabled int DEFAULT 0; SELECT @@have_csv = 'YES' INTO is_csv_enabled; IF (is_csv_enabled) THEN CREATE TABLE general_log (event_time TIMESTAMP NOT NULL, user_host MEDIUMTEXT, thread_id INTEGER, server_id INTEGER, command_type VARCHAR(64), argument MEDIUMTEXT) engine=CSV CHARACTER SET utf8 comment='General log'; CREATE TABLE slow_log (start_time TIMESTAMP NOT NULL, user_host MEDIUMTEXT NOT NULL, query_time TIME NOT NULL, lock_time TIME NOT NULL, rows_sent INTEGER NOT NULL, rows_examined INTEGER NOT NULL, db VARCHAR(512), last_insert_id INTEGER, insert_id INTEGER, server_id INTEGER, sql_text MEDIUMTEXT NOT NULL) engine=CSV CHARACTER SET utf8 comment='Slow log'; END IF; END; +CALL create_log_tables(); +DROP PROCEDURE create_log_tables; + CREATE TABLE event ( db char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', name char(64) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL default '', diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 792a495bc4f..e0bbeec7a87 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -517,6 +517,7 @@ sub command_line_setup () { $opt_master_myport= $ENV{'MTR_BUILD_THREAD'} * 40 + 8120; $opt_slave_myport= $opt_master_myport + 16; $opt_ndbcluster_port= $opt_master_myport + 24; + $opt_ndbcluster_port_slave= $opt_master_myport + 32; $im_port= $opt_master_myport + 10; $im_mysqld1_port= $opt_master_myport + 12; $im_mysqld2_port= $opt_master_myport + 14; @@ -711,6 +712,8 @@ sub command_line_setup () { $glob_use_embedded_server= 1; push(@glob_test_mode, "embedded"); $opt_skip_rpl= 1; # We never run replication with embedded + $opt_skip_ndbcluster= 1; # Avoid auto detection + $opt_skip_ssl= 1; if ( $opt_extern ) { @@ -909,6 +912,7 @@ sub command_line_setup () { path_datadir => "$opt_vardir/im_mysqld_1.data", path_sock => "$sockdir/mysqld_1.sock", path_pid => "$opt_vardir/run/mysqld_1.pid", + old_log_format => 1 }; $instance_manager->{'instances'}->[1]= @@ -919,6 +923,7 @@ sub command_line_setup () { path_sock => "$sockdir/mysqld_2.sock", path_pid => "$opt_vardir/run/mysqld_2.pid", nonguarded => 1, + old_log_format => 1 }; if ( $opt_extern ) @@ -1118,6 +1123,9 @@ sub environment_setup () { # $ENV{'MYSQL_TCP_PORT'}= '@MYSQL_TCP_PORT@'; # FIXME $ENV{'MYSQL_TCP_PORT'}= 3306; + $ENV{'NDBCLUSTER_PORT'}= $opt_ndbcluster_port; + $ENV{'NDBCLUSTER_PORT_SLAVE'}=$opt_ndbcluster_port_slave; + $ENV{'IM_PATH_PID'}= $instance_manager->{path_pid}; $ENV{'IM_MYSQLD1_SOCK'}= $instance_manager->{instances}->[0]->{path_sock}; @@ -1142,7 +1150,8 @@ sub environment_setup () { print "Using MASTER_MYPORT = $ENV{MASTER_MYPORT}\n"; print "Using MASTER_MYPORT1 = $ENV{MASTER_MYPORT1}\n"; print "Using SLAVE_MYPORT = $ENV{SLAVE_MYPORT}\n"; - print "Using NDBCLUSTER_PORT = $opt_ndbcluster_port\n"; + print "Using NDBCLUSTER_PORT = $ENV{NDBCLUSTER_PORT}\n"; + print "Using NDBCLUSTER_PORT_SLAVE = $ENV{NDBCLUSTER_PORT_SLAVE}\n"; print "Using IM_MYSQLD1_PORT = $ENV{'IM_MYSQLD1_PORT'}\n"; print "Using IM_MYSQLD2_PORT = $ENV{'IM_MYSQLD2_PORT'}\n"; } @@ -1364,7 +1373,8 @@ sub ndbcluster_install () { "--data-dir=$opt_vardir", "--verbose=2", $ndbcluster_opts, - "--initial"], + "--initial", + "--relative-config-data-dir"], "", "", "", "") ) { mtr_error("Error ndbcluster_install"); @@ -1456,7 +1466,8 @@ sub ndbcluster_install_slave () { "--verbose=2", "--small", "--ndbd-nodes=1", - "--initial"], + "--initial", + "--relative-config-data-dir"], "", "", "", "") ) { mtr_error("Error ndbcluster_install_slave"); @@ -1821,6 +1832,7 @@ EOF ; print OUT "nonguarded\n" if $instance->{'nonguarded'}; + print OUT "old-log-format\n" if $instance->{'old_log_format'}; print OUT "\n"; } diff --git a/mysql-test/mysql-test-run.sh b/mysql-test/mysql-test-run.sh index 5d583f506ba..7fc88d79e9a 100644 --- a/mysql-test/mysql-test-run.sh +++ b/mysql-test/mysql-test-run.sh @@ -268,9 +268,11 @@ USE_RUNNING_SERVER=0 USE_NDBCLUSTER=@USE_NDBCLUSTER@ USE_NDBCLUSTER_SLAVE=@USE_NDBCLUSTER@ USE_NDBCLUSTER_ALL=0 +USE_NDBCLUSTER_ONLY=0 USE_RUNNING_NDBCLUSTER="" USE_RUNNING_NDBCLUSTER_SLAVE="" NDB_EXTRA_TEST=0 +NDB_VERBOSE=0 NDBCLUSTER_EXTRA_OPTS="" USE_PURIFY="" PURIFY_LOGS="" @@ -297,6 +299,7 @@ TEST_MODE="" NDB_MGM_EXTRA_OPTS= NDB_MGMD_EXTRA_OPTS= NDBD_EXTRA_OPTS= +MASTER_MYSQLDBINLOG=1 SLAVE_MYSQLDBINLOG=1 DO_STRESS="" @@ -316,6 +319,8 @@ while test $# -gt 0; do USE_EMBEDDED_SERVER=1 USE_MANAGER=0 NO_SLAVE=1 USE_RUNNING_SERVER=0 + USE_NDBCLUSTER="" + USE_NDBCLUSTER_SLAVE="" TEST_MODE="$TEST_MODE embedded" ;; --purify) USE_PURIFY=1 @@ -341,6 +346,10 @@ while test $# -gt 0; do USE_NDBCLUSTER="--ndbcluster" USE_NDBCLUSTER_SLAVE="--ndbcluster" USE_NDBCLUSTER_ALL=1 ;; + --with-ndbcluster-only) + USE_NDBCLUSTER="--ndbcluster" + USE_NDBCLUSTER_SLAVE="--ndbcluster" + USE_NDBCLUSTER_ONLY=1 ;; --ndb-connectstring=*) USE_NDBCLUSTER="--ndbcluster" ; USE_RUNNING_NDBCLUSTER=`$ECHO "$1" | $SED -e "s;--ndb-connectstring=;;"` ;; @@ -351,6 +360,8 @@ while test $# -gt 0; do NDBCLUSTER_EXTRA_OPTS=" " NDB_EXTRA_TEST=1 ; ;; + --ndb-verbose) + NDB_VERBOSE=2 ;; --ndb_mgm-extra-opts=*) NDB_MGM_EXTRA_OPTS=`$ECHO "$1" | $SED -e "s;--ndb_mgm-extra-opts=;;"` ;; --ndb_mgmd-extra-opts=*) @@ -543,6 +554,7 @@ while test $# -gt 0; do EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --skip-ndbcluster" EXTRA_SLAVE_MYSQLD_OPT="$EXTRA_SLAVE_MYSQLD_OPT --skip-ndbcluster" ;; + --skip-master-binlog) MASTER_MYSQLDBINLOG=0 ;; --skip-slave-binlog) SLAVE_MYSQLDBINLOG=0 ;; --skip-*) EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT $1" @@ -1266,8 +1278,10 @@ start_ndbcluster() then NDBCLUSTER_EXTRA_OPTS="--small" fi - OPTS="$NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --verbose=2 --initial" - echo "Starting master ndbcluster " $OPTS + OPTS="$NDBCLUSTER_OPTS $NDBCLUSTER_EXTRA_OPTS --verbose=$NDB_VERBOSE --initial --relative-config-data-dir" + if [ "x$NDB_VERBOSE" != "x0" ] ; then + echo "Starting master ndbcluster " $OPTS + fi ./ndb/ndbcluster $OPTS || NDB_STATUS_OK=0 if [ x$NDB_STATUS_OK != x1 ] ; then if [ x$FORCE != x1 ] ; then @@ -1351,9 +1365,13 @@ start_master() then CURR_MASTER_MYSQLD_TRACE="$EXTRA_MASTER_MYSQLD_TRACE$1" fi + if [ x$MASTER_MYSQLDBINLOG = x1 ] + then + EXTRA_MASTER_MYSQLD_OPT="$EXTRA_MASTER_MYSQLD_OPT --log-bin=$MYSQL_TEST_DIR/var/log/master-bin$1" + fi if [ -z "$DO_BENCH" -a -z "$DO_STRESS" ] then - master_args="--no-defaults --log-bin=$MYSQL_TEST_DIR/var/log/master-bin$1 \ + master_args="--no-defaults \ --server-id=$id \ --basedir=$MY_BASEDIR \ --port=$this_master_myport \ @@ -1379,7 +1397,7 @@ start_master() $EXTRA_MASTER_MYSQLD_OPT $EXTRA_MASTER_OPT \ $NOT_FIRST_MASTER_EXTRA_OPTS $CURR_MASTER_MYSQLD_TRACE" else - master_args="--no-defaults --log-bin=$MYSQL_TEST_DIR/var/log/master-bin$1 \ + master_args="--no-defaults \ --server-id=$id --rpl-recovery-rank=1 \ --basedir=$MY_BASEDIR --init-rpl-role=master \ --port=$this_master_myport \ @@ -1493,8 +1511,10 @@ start_slave() NDBCLUSTER_EXTRA_OPTS="--small" fi - OPTS="$NDBCLUSTER_OPTS_SLAVE --initial $NDBCLUSTER_EXTRA_OPTS --ndbd-nodes=1 --verbose=2" - echo "Starting slave ndbcluster " $OPTS + OPTS="$NDBCLUSTER_OPTS_SLAVE --initial $NDBCLUSTER_EXTRA_OPTS --ndbd-nodes=1 --verbose=$NDB_VERBOSE --relative-config-data-dir" + if [ "x$NDB_VERBOSE" != "x0" ] ; then + echo "Starting slave ndbcluster " $OPTS + fi ./ndb/ndbcluster $OPTS \ || NDB_SLAVE_STATUS_OK=0 # > /dev/null 2>&1 || NDB_SLAVE_STATUS_OK=0 @@ -1750,6 +1770,10 @@ run_testcase () if [ "x$USE_NDBCLUSTER_ALL" = "x1" ] ; then NDBCLUSTER_TEST=1 fi + if [ "x$USE_NDBCLUSTER_ONLY" = "x1" -a "x$NDBCLUSTER_TEST" != "x1" ] ; then + skip_test $tname + return + fi if [ "$USE_MANAGER" = 1 ] ; then many_slaves=`$EXPR \( \( $tname : rpl_failsafe \) != 0 \) \| \( \( $tname : rpl_chain_temp_table \) != 0 \)` fi diff --git a/mysql-test/ndb/ndbcluster.sh b/mysql-test/ndb/ndbcluster.sh index bc5d1000fe5..fc1d12bac53 100644 --- a/mysql-test/ndb/ndbcluster.sh +++ b/mysql-test/ndb/ndbcluster.sh @@ -52,6 +52,7 @@ initial_ndb= status_ndb= ndb_diskless=0 ndbd_nodes=2 +relative_config_data_dir= ndb_no_ord=512 ndb_no_attr=2048 @@ -89,7 +90,7 @@ while test $# -gt 0; do --small) ndb_no_ord=32 ndb_con_op=5000 - ndb_dmem=10M + ndb_dmem=20M ndb_imem=1M ndb_pbmem=4M ;; @@ -99,6 +100,9 @@ while test $# -gt 0; do --data-dir=*) fsdir=`echo "$1" | sed -e "s;--data-dir=;;"` ;; + --relative-config-data-dir) + relative_config_data_dir=1 + ;; --port=*) port=`echo "$1" | sed -e "s;--port=;;"` ;; @@ -196,6 +200,11 @@ fi # Start management server as deamon # Edit file system path and ports in config file +if [ $relative_config_data_dir ] ; then + config_fs_ndb="." +else + config_fs_ndb=$fs_ndb +fi if [ $initial_ndb ] ; then rm -rf $fs_ndb/ndb_* 2>&1 | cat > /dev/null sed \ @@ -206,7 +215,7 @@ if [ $initial_ndb ] ; then -e s,"CHOOSE_IndexMemory","$ndb_imem",g \ -e s,"CHOOSE_Diskless","$ndb_diskless",g \ -e s,"CHOOSE_HOSTNAME_".*,"$ndb_host",g \ - -e s,"CHOOSE_FILESYSTEM","$fs_ndb",g \ + -e s,"CHOOSE_FILESYSTEM","$config_fs_ndb",g \ -e s,"CHOOSE_PORT_MGM","$ndb_mgmd_port",g \ -e s,"CHOOSE_DiskPageBufferMemory","$ndb_pbmem",g \ < "$config_ini" \ diff --git a/mysql-test/r/connect.result b/mysql-test/r/connect.result index ed017641aa9..862260346f5 100644 --- a/mysql-test/r/connect.result +++ b/mysql-test/r/connect.result @@ -5,6 +5,7 @@ columns_priv db event func +general_log help_category help_keyword help_relation @@ -13,6 +14,7 @@ host plugin proc procs_priv +slow_log tables_priv time_zone time_zone_leap_second @@ -34,6 +36,7 @@ columns_priv db event func +general_log help_category help_keyword help_relation @@ -42,6 +45,7 @@ host plugin proc procs_priv +slow_log tables_priv time_zone time_zone_leap_second @@ -71,6 +75,7 @@ columns_priv db event func +general_log help_category help_keyword help_relation @@ -79,6 +84,7 @@ host plugin proc procs_priv +slow_log tables_priv time_zone time_zone_leap_second diff --git a/mysql-test/r/csv.result b/mysql-test/r/csv.result index 3c87c1f4b92..6f58fdfe54a 100644 --- a/mysql-test/r/csv.result +++ b/mysql-test/r/csv.result @@ -4976,6 +4976,23 @@ c1 4 5 DROP TABLE bug14672; +CREATE TABLE test_concurrent_insert ( val integer ) ENGINE = CSV; +LOCK TABLES test_concurrent_insert READ LOCAL; +INSERT INTO test_concurrent_insert VALUES (1); +SELECT * FROM test_concurrent_insert; +val +1 +SELECT * FROM test_concurrent_insert; +val +UNLOCK TABLES; +LOCK TABLES test_concurrent_insert WRITE; +INSERT INTO test_concurrent_insert VALUES (2); +SELECT * FROM test_concurrent_insert; +val +1 +2 +UNLOCK TABLES; +DROP TABLE test_concurrent_insert; create table t1 (a int) engine=csv; insert t1 values (1); delete from t1; diff --git a/mysql-test/r/ctype_cp932_notembedded.result b/mysql-test/r/ctype_cp932_notembedded.result new file mode 100644 index 00000000000..913e8125619 --- /dev/null +++ b/mysql-test/r/ctype_cp932_notembedded.result @@ -0,0 +1,17 @@ +drop table if exists t1; +set names cp932; +set character_set_database = cp932; +RESET MASTER; +CREATE TABLE t1(f1 blob); +PREPARE stmt1 FROM 'INSERT INTO t1 VALUES(?)'; +SET @var1= x'8300'; +EXECUTE stmt1 USING @var1; +SHOW BINLOG EVENTS FROM 102; +Log_name Pos Event_type Server_id End_log_pos Info +master-bin.000001 # Query 1 # use `test`; CREATE TABLE t1(f1 blob) +master-bin.000001 # Table_map 1 # test.t1 +master-bin.000001 # Write_rows 1 # +SELECT HEX(f1) FROM t1; +HEX(f1) +8300 +DROP table t1; diff --git a/mysql-test/r/im_utils.result b/mysql-test/r/im_utils.result index fbfaeaebcac..e204affc8ec 100644 --- a/mysql-test/r/im_utils.result +++ b/mysql-test/r/im_utils.result @@ -21,6 +21,7 @@ skip-stack-trace VALUE skip-innodb VALUE skip-bdb VALUE skip-ndbcluster VALUE +old-log-format VALUE SHOW INSTANCE OPTIONS mysqld2; option_name value instance_name VALUE @@ -41,6 +42,7 @@ skip-stack-trace VALUE skip-innodb VALUE skip-bdb VALUE skip-ndbcluster VALUE +old-log-format VALUE START INSTANCE mysqld2; STOP INSTANCE mysqld2; SHOW mysqld1 LOG FILES; diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index 651dc3fdc96..8778ded244f 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -62,6 +62,7 @@ columns_priv db event func +general_log help_category help_keyword help_relation @@ -70,6 +71,7 @@ host plugin proc procs_priv +slow_log tables_priv time_zone time_zone_leap_second @@ -732,7 +734,7 @@ CREATE TABLE t_crashme ( f1 BIGINT); CREATE VIEW a1 (t_CRASHME) AS SELECT f1 FROM t_crashme GROUP BY f1; CREATE VIEW a2 AS SELECT t_CRASHME FROM a1; count(*) -107 +109 drop view a2, a1; drop table t_crashme; select table_schema,table_name, column_name from @@ -816,7 +818,7 @@ SELECT table_schema, count(*) FROM information_schema.TABLES GROUP BY TABLE_SCHE table_schema count(*) cluster_replication 1 information_schema 19 -mysql 19 +mysql 21 create table t1 (i int, j int); create trigger trg1 before insert on t1 for each row begin diff --git a/mysql-test/r/innodb.result b/mysql-test/r/innodb.result index ed4c14388ab..577e6a7192a 100644 --- a/mysql-test/r/innodb.result +++ b/mysql-test/r/innodb.result @@ -2635,25 +2635,6 @@ checksum table t1; Table Checksum test.t1 2050879373 drop table t1; -create table t1 (col1 integer primary key, col2 integer) engine=innodb; -insert t1 values (1,100); -create function f1 () returns integer begin -declare var1 int; -select col2 into var1 from t1 where col1=1 for update; -return var1; -end| -start transaction; -select f1(); -f1() -100 - update t1 set col2=0 where col1=1; -select * from t1; -col1 col2 -1 100 -rollback; -rollback; -drop table t1; -drop function f1; create table t1 ( a int, b char(10), c char(10), filler char(10), primary key(a, b(2)), unique key (a, c(2)) ) character set utf8 engine = innodb; diff --git a/mysql-test/r/innodb_notembedded.result b/mysql-test/r/innodb_notembedded.result new file mode 100644 index 00000000000..fc71468b97b --- /dev/null +++ b/mysql-test/r/innodb_notembedded.result @@ -0,0 +1,19 @@ +create table t1 (col1 integer primary key, col2 integer) engine=innodb; +insert t1 values (1,100); +create function f1 () returns integer begin +declare var1 int; +select col2 into var1 from t1 where col1=1 for update; +return var1; +end| +start transaction; +select f1(); +f1() +100 + update t1 set col2=0 where col1=1; +select * from t1; +col1 col2 +1 100 +rollback; +rollback; +drop table t1; +drop function f1; diff --git a/mysql-test/r/loaddata.result b/mysql-test/r/loaddata.result index a52952ffdb5..0246ca5f71d 100644 --- a/mysql-test/r/loaddata.result +++ b/mysql-test/r/loaddata.result @@ -139,3 +139,30 @@ a b c 10 NULL Ten 15 NULL Fifteen drop table t1, t2; +CREATE TABLE t1 (a int); +INSERT INTO t1 VALUES (1); +SET NAMES latin1; +SET character_set_filesystem=filename; +select @@character_set_filesystem; +@@character_set_filesystem +filename +SELECT * INTO OUTFILE 't-1' FROM t1; +DELETE FROM t1; +LOAD DATA INFILE 't-1' INTO TABLE t1; +SELECT * FROM t1; +a +1 +DELETE FROM t1; +SET character_set_filesystem=latin1; +select @@character_set_filesystem; +@@character_set_filesystem +latin1 +LOAD DATA INFILE 't@002d1' INTO TABLE t1; +SELECT * FROM t1; +a +1 +DROP TABLE t1; +SET character_set_filesystem=default; +select @@character_set_filesystem; +@@character_set_filesystem +binary diff --git a/mysql-test/r/log_tables.result b/mysql-test/r/log_tables.result new file mode 100644 index 00000000000..caaf0367bb7 --- /dev/null +++ b/mysql-test/r/log_tables.result @@ -0,0 +1,54 @@ +use mysql; +truncate table general_log; +select * from general_log; +event_time user_host thread_id server_id command_type argument +TIMESTAMP root[root] @ localhost [] 1 1 Query select * from general_log +truncate table slow_log; +select * from slow_log; +start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text +truncate table general_log; +select * from general_log where argument like '%general_log%'; +event_time user_host thread_id server_id command_type argument +TIMESTAMP root[root] @ localhost [] 1 1 Query select * from general_log where argument like '%general_log%' +create table join_test (verbose_comment varchar (80), command_type varchar(64)); +insert into join_test values ("User performed a usual SQL query", "Query"); +insert into join_test values ("New DB connection was registered", "Connect"); +insert into join_test values ("Get the table info", "Field List"); +select verbose_comment, user_host, argument +from mysql.general_log join join_test +on (mysql.general_log.command_type = join_test.command_type); +verbose_comment user_host argument +User performed a usual SQL query root[root] @ localhost [] select * from general_log where argument like '%general_log%' +User performed a usual SQL query root[root] @ localhost [] create table join_test (verbose_comment varchar (80), command_type varchar(64)) +User performed a usual SQL query root[root] @ localhost [] insert into join_test values ("User performed a usual SQL query", "Query") +User performed a usual SQL query root[root] @ localhost [] insert into join_test values ("New DB connection was registered", "Connect") +User performed a usual SQL query root[root] @ localhost [] insert into join_test values ("Get the table info", "Field List") +User performed a usual SQL query root[root] @ localhost [] select verbose_comment, user_host, argument +from mysql.general_log join join_test +on (mysql.general_log.command_type = join_test.command_type) +drop table join_test; +flush logs; +lock tables mysql.general_log WRITE; +ERROR HY000: You can't write-lock a log table. Only read access is possible. +lock tables mysql.slow_log WRITE; +ERROR HY000: You can't write-lock a log table. Only read access is possible. +lock tables mysql.general_log READ; +ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead. +lock tables mysql.slow_log READ; +ERROR HY000: You can't use usual read lock with log tables. Try READ LOCAL instead. +lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL; +unlock tables; +lock tables mysql.general_log READ LOCAL; + flush logs; +unlock tables; +select "Mark that we woke up from flush logs in the test" + as "test passed"; +test passed +Mark that we woke up from flush logs in the test +lock tables mysql.general_log READ LOCAL; + truncate mysql.general_log; +unlock tables; +select "Mark that we woke up from TRUNCATE in the test" + as "test passed"; +test passed +Mark that we woke up from TRUNCATE in the test diff --git a/mysql-test/r/mysqlcheck.result b/mysql-test/r/mysqlcheck.result index e6251dd9422..bd4bbc15a4f 100644 --- a/mysql-test/r/mysqlcheck.result +++ b/mysql-test/r/mysqlcheck.result @@ -3,6 +3,8 @@ mysql.columns_priv OK mysql.db OK mysql.event OK mysql.func OK +mysql.general_log +note : The storage engine for the table doesn't support optimize mysql.help_category OK mysql.help_keyword OK mysql.help_relation OK @@ -11,6 +13,8 @@ mysql.host OK mysql.plugin OK mysql.proc OK mysql.procs_priv OK +mysql.slow_log +note : The storage engine for the table doesn't support optimize mysql.tables_priv OK mysql.time_zone OK mysql.time_zone_leap_second OK @@ -22,6 +26,8 @@ mysql.columns_priv OK mysql.db OK mysql.event OK mysql.func OK +mysql.general_log +note : The storage engine for the table doesn't support optimize mysql.help_category OK mysql.help_keyword OK mysql.help_relation OK @@ -30,6 +36,8 @@ mysql.host OK mysql.plugin OK mysql.proc OK mysql.procs_priv OK +mysql.slow_log +note : The storage engine for the table doesn't support optimize mysql.tables_priv OK mysql.time_zone OK mysql.time_zone_leap_second OK diff --git a/mysql-test/r/mysqltest.result b/mysql-test/r/mysqltest.result index ed474265b9b..3e1bf1946ff 100644 --- a/mysql-test/r/mysqltest.result +++ b/mysql-test/r/mysqltest.result @@ -1,6 +1,6 @@ -select -1 as "before_use_test" ; +select 0 as "before_use_test" ; before_use_test --1 +0 select otto from (select 1 as otto) as t1; otto 1 diff --git a/mysql-test/r/ndb_autodiscover.result b/mysql-test/r/ndb_autodiscover.result index 813e37e8892..cb85c4ac873 100644 --- a/mysql-test/r/ndb_autodiscover.result +++ b/mysql-test/r/ndb_autodiscover.result @@ -110,7 +110,7 @@ t3 CREATE TABLE `t3` ( `id` int(11) NOT NULL, `name` char(255) default NULL, PRIMARY KEY (`id`) -) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1 +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () select * from t3; id name 1 Explorer diff --git a/mysql-test/r/ndb_bitfield.result b/mysql-test/r/ndb_bitfield.result index 0b7d09dd9b9..b719a2c220e 100644 --- a/mysql-test/r/ndb_bitfield.result +++ b/mysql-test/r/ndb_bitfield.result @@ -9,7 +9,7 @@ t1 CREATE TABLE `t1` ( `pk1` int(11) NOT NULL, `b` bit(64) default NULL, PRIMARY KEY (`pk1`) -) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1 +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () insert into t1 values (0,b'1111111111111111111111111111111111111111111111111111111111111111'), (1,b'1000000000000000000000000000000000000000000000000000000000000000'), diff --git a/mysql-test/r/ndb_config.result b/mysql-test/r/ndb_config.result index 82d760c6cfc..d2a8a91828c 100644 --- a/mysql-test/r/ndb_config.result +++ b/mysql-test/r/ndb_config.result @@ -1,7 +1,7 @@ ndbd,1,localhost ndbd,2,localhost ndb_mgmd,3,localhost mysqld,4, mysqld,5, mysqld,6, mysqld,7, mysqld,8, mysqld,9, mysqld,10, mysqld,11, -1,localhost,10485760,1048576 2,localhost,10485760,1048576 -1 localhost 10485760 1048576 -2 localhost 10485760 1048576 +1,localhost,20971520,1048576 2,localhost,20971520,1048576 +1 localhost 20971520 1048576 +2 localhost 20971520 1048576 1 2 ndbd,1,localhost ndbd,2,localhost ndb_mgmd,3,localhost mysqld,4, mysqld,5, mysqld,6, mysqld,7, mysqld,8, mysqld,9, mysqld,10, mysqld,11, ndbd,1,localhost,52428800,26214400 ndbd,2,localhost,52428800,36700160 ndbd,3,localhost,52428800,52428800 ndbd,4,localhost,52428800,52428800 ndb_mgmd,5,localhost,, mysqld,6,localhost,, diff --git a/mysql-test/r/ndb_gis.result b/mysql-test/r/ndb_gis.result index f49572b893b..5f8eb299093 100644 --- a/mysql-test/r/ndb_gis.result +++ b/mysql-test/r/ndb_gis.result @@ -13,7 +13,7 @@ Table Create Table gis_point CREATE TABLE `gis_point` ( `fid` int(11) default NULL, `g` point default NULL -) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1 +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () SHOW FIELDS FROM gis_point; Field Type Null Key Default Extra fid int(11) YES NULL @@ -471,7 +471,7 @@ Table Create Table gis_point CREATE TABLE `gis_point` ( `fid` int(11) default NULL, `g` point default NULL -) ENGINE=NDBCLUSTER DEFAULT CHARSET=latin1 +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () SHOW FIELDS FROM gis_point; Field Type Null Key Default Extra fid int(11) YES NULL diff --git a/mysql-test/r/ndb_partition_key.result b/mysql-test/r/ndb_partition_key.result index 415b9d37b4d..3cfbca63c1f 100644 --- a/mysql-test/r/ndb_partition_key.result +++ b/mysql-test/r/ndb_partition_key.result @@ -80,3 +80,12 @@ t1 CREATE TABLE `t1` ( PRIMARY KEY USING HASH (`a`,`b`,`c`) ) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY (b) DROP TABLE t1; +CREATE TABLE t1 (a int not null primary key) +PARTITION BY KEY(a) +(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); +drop table t1; +CREATE TABLE t1 (a int not null primary key); +ALTER TABLE t1 +PARTITION BY KEY(a) +(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); +drop table t1; diff --git a/mysql-test/r/ndb_restore.result b/mysql-test/r/ndb_restore.result index d86d0bdb58b..b151eebde08 100644 --- a/mysql-test/r/ndb_restore.result +++ b/mysql-test/r/ndb_restore.result @@ -1,6 +1,14 @@ use test; drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +id type state logging database schema name +2 UserTable Online Yes cluster_replication def apply_status +1 SystemTable Online Yes sys def NDB$EVENTS_0 +3 UserTable Online Yes cluster_replication def schema +0 SystemTable Online Yes sys def SYSTAB_0 + +NDBT_ProgramExit: 0 - OK + CREATE TABLE `t1_c` ( `capgoaledatta` smallint(5) unsigned NOT NULL auto_increment, `goaledatta` char(2) NOT NULL default '', @@ -225,6 +233,224 @@ from (select * from t9 union select * from t9_c) a; count(*) 3 +ALTER TABLE t1_c +PARTITION BY RANGE (`capgoaledatta`) +(PARTITION p0 VALUES LESS THAN MAXVALUE); +ALTER TABLE t2_c +PARTITION BY LIST(`capgotod`) +(PARTITION p0 VALUES IN (0,1,2,3,4,5,6)); +ALTER TABLE t3_c +PARTITION BY HASH (`CapGoaledatta`); +ALTER TABLE t5_c +PARTITION BY HASH (`capfa`) +PARTITIONS 4; +ALTER TABLE t6_c +PARTITION BY LINEAR HASH (`relatta`) +PARTITIONS 4; +ALTER TABLE t7_c +PARTITION BY LINEAR KEY (`dardtestard`); +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +select count(*) from t1; +count(*) +5 +select count(*) from t1_c; +count(*) +5 +select count(*) +from (select * from t1 union +select * from t1_c) a; +count(*) +5 +select count(*) from t2; +count(*) +6 +select count(*) from t2_c; +count(*) +6 +select count(*) +from (select * from t2 union +select * from t2_c) a; +count(*) +6 +select count(*) from t3; +count(*) +4 +select count(*) from t3_c; +count(*) +4 +select count(*) +from (select * from t3 union +select * from t3_c) a; +count(*) +4 +select count(*) from t4; +count(*) +22 +select count(*) from t4_c; +count(*) +22 +select count(*) +from (select * from t4 union +select * from t4_c) a; +count(*) +22 +select count(*) from t5; +count(*) +3 +select count(*) from t5_c; +count(*) +3 +select count(*) +from (select * from t5 union +select * from t5_c) a; +count(*) +3 +select count(*) from t6; +count(*) +8 +select count(*) from t6_c; +count(*) +8 +select count(*) +from (select * from t6 union +select * from t6_c) a; +count(*) +8 +select count(*) from t7; +count(*) +5 +select count(*) from t7_c; +count(*) +5 +select count(*) +from (select * from t7 union +select * from t7_c) a; +count(*) +5 +select count(*) from t8; +count(*) +3 +select count(*) from t8_c; +count(*) +3 +select count(*) +from (select * from t8 union +select * from t8_c) a; +count(*) +3 +select count(*) from t9; +count(*) +3 +select count(*) from t9_c; +count(*) +3 +select count(*) +from (select * from t9 union +select * from t9_c) a; +count(*) +3 +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +select count(*) from t1; +count(*) +5 +select count(*) from t1_c; +count(*) +5 +select count(*) +from (select * from t1 union +select * from t1_c) a; +count(*) +5 +select count(*) from t2; +count(*) +6 +select count(*) from t2_c; +count(*) +6 +select count(*) +from (select * from t2 union +select * from t2_c) a; +count(*) +6 +select count(*) from t3; +count(*) +4 +select count(*) from t3_c; +count(*) +4 +select count(*) +from (select * from t3 union +select * from t3_c) a; +count(*) +4 +select count(*) from t4; +count(*) +22 +select count(*) from t4_c; +count(*) +22 +select count(*) +from (select * from t4 union +select * from t4_c) a; +count(*) +22 +select count(*) from t5; +count(*) +3 +select count(*) from t5_c; +count(*) +3 +select count(*) +from (select * from t5 union +select * from t5_c) a; +count(*) +3 +select count(*) from t6; +count(*) +8 +select count(*) from t6_c; +count(*) +8 +select count(*) +from (select * from t6 union +select * from t6_c) a; +count(*) +8 +select count(*) from t7; +count(*) +5 +select count(*) from t7_c; +count(*) +5 +select count(*) +from (select * from t7 union +select * from t7_c) a; +count(*) +5 +select count(*) from t8; +count(*) +3 +select count(*) from t8_c; +count(*) +3 +select count(*) +from (select * from t8 union +select * from t8_c) a; +count(*) +3 +select count(*) from t9; +count(*) +3 +select count(*) from t9_c; +count(*) +3 +select count(*) +from (select * from t9 union +select * from t9_c) a; +count(*) +3 +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +Create table test/def/t2_c failed: Translate frm error drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; -520093696,1 +520093696,2 diff --git a/mysql-test/r/partition.result b/mysql-test/r/partition.result index 58f02681682..7ceee9c17ff 100644 --- a/mysql-test/r/partition.result +++ b/mysql-test/r/partition.result @@ -65,6 +65,8 @@ partitions 3 (partition x1 tablespace ts1, partition x2 tablespace ts2, partition x3 tablespace ts3); +CREATE TABLE t2 LIKE t1; +drop table t2; drop table t1; CREATE TABLE t1 ( a int not null, @@ -108,6 +110,127 @@ insert into t1 values (3); insert into t1 values (4); UNLOCK TABLES; drop table t1; +CREATE TABLE t1 (a int, name VARCHAR(50), purchased DATE) +PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (3), +PARTITION p1 VALUES LESS THAN (7), +PARTITION p2 VALUES LESS THAN (9), +PARTITION p3 VALUES LESS THAN (11)); +INSERT INTO t1 VALUES +(1, 'desk organiser', '2003-10-15'), +(2, 'CD player', '1993-11-05'), +(3, 'TV set', '1996-03-10'), +(4, 'bookcase', '1982-01-10'), +(5, 'exercise bike', '2004-05-09'), +(6, 'sofa', '1987-06-05'), +(7, 'popcorn maker', '2001-11-22'), +(8, 'acquarium', '1992-08-04'), +(9, 'study desk', '1984-09-16'), +(10, 'lava lamp', '1998-12-25'); +SELECT * from t1 ORDER BY a; +a name purchased +1 desk organiser 2003-10-15 +2 CD player 1993-11-05 +3 TV set 1996-03-10 +4 bookcase 1982-01-10 +5 exercise bike 2004-05-09 +6 sofa 1987-06-05 +7 popcorn maker 2001-11-22 +8 acquarium 1992-08-04 +9 study desk 1984-09-16 +10 lava lamp 1998-12-25 +ALTER TABLE t1 DROP PARTITION p0; +SELECT * from t1 ORDER BY a; +a name purchased +3 TV set 1996-03-10 +4 bookcase 1982-01-10 +5 exercise bike 2004-05-09 +6 sofa 1987-06-05 +7 popcorn maker 2001-11-22 +8 acquarium 1992-08-04 +9 study desk 1984-09-16 +10 lava lamp 1998-12-25 +drop table t1; +CREATE TABLE t1 (a int) +PARTITION BY LIST (a) +(PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (4,5,6)); +insert into t1 values (1),(2),(3),(4),(5),(6); +select * from t1; +a +1 +2 +3 +4 +5 +6 +truncate t1; +select * from t1; +a +truncate t1; +select * from t1; +a +drop table t1; +CREATE TABLE t1 (a int, b int, primary key(a,b)) +PARTITION BY KEY(b,a) PARTITIONS 4; +insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6); +select * from t1 where a = 4; +a b +4 4 +drop table t1; +CREATE TABLE t1 (a int) +PARTITION BY LIST (a) +PARTITIONS 1 +(PARTITION x1 VALUES IN (1) ENGINE=MEMORY); +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) default NULL +) ENGINE=MEMORY DEFAULT CHARSET=latin1 PARTITION BY LIST (a) (PARTITION x1 VALUES IN (1) ENGINE = MEMORY) +drop table t1; +CREATE TABLE t1 (a int, unique(a)) +PARTITION BY LIST (a) +(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20)); +REPLACE t1 SET a = 4; +ERROR HY000: Table has no partition for value 4 +drop table t1; +CREATE TABLE t1 (a int) +PARTITION BY LIST (a) +(PARTITION x1 VALUES IN (2), PARTITION x2 VALUES IN (3)); +insert into t1 values (2), (3); +insert into t1 values (4); +ERROR HY000: Table has no partition for value 4 +insert into t1 values (1); +ERROR HY000: Table has no partition for value 1 +drop table t1; +CREATE TABLE t1 (a int) +PARTITION BY HASH(a) +PARTITIONS 5; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` int(11) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (a) PARTITIONS 5 +drop table t1; +CREATE TABLE t1 (a int) +PARTITION BY RANGE (a) +(PARTITION x1 VALUES LESS THAN (2)); +insert into t1 values (1); +update t1 set a = 5; +ERROR HY000: Table has no partition for value 5 +drop table t1; +CREATE TABLE t1 (a int) +PARTITION BY LIST (a) +(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20)); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status OK +drop table t1; +CREATE TABLE `t1` ( +`id` int(11) default NULL +) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ; +SELECT * FROM t1; +id +drop table t1; CREATE TABLE `t1` ( `id` int(11) default NULL ) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ; @@ -119,8 +242,8 @@ create table t1 partition by range (a) ( partition p0 values less than(10), partition p1 values less than (20), -partition p2 values less than maxvalue); -alter table t1 reorganise partition p2 into (partition p2 values less than (30)); +partition p2 values less than (25)); +alter table t1 reorganize partition p2 into (partition p2 values less than (30)); show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -139,7 +262,7 @@ PARTITION x6 VALUES LESS THAN (14), PARTITION x7 VALUES LESS THAN (16), PARTITION x8 VALUES LESS THAN (18), PARTITION x9 VALUES LESS THAN (20)); -ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO (PARTITION x1 VALUES LESS THAN (6)); show create table t1; Table Create Table diff --git a/mysql-test/r/partition_02myisam.result b/mysql-test/r/partition_02myisam.result new file mode 100644 index 00000000000..184f9ce17cc --- /dev/null +++ b/mysql-test/r/partition_02myisam.result @@ -0,0 +1,1725 @@ +SET SESSION storage_engine='MYISAM'; +SET @max_row = 200; +SET AUTOCOMMIT= 1; + +#------------------------------------------------------------------------ +# There are several testcases disabled because ouf the open bugs +# #15407 , #15408 , #15890 , #15961 , #13447 , #15966 , #15968, #16370 +#------------------------------------------------------------------------ + +#------------------------------------------------------------------------ +# 0. Setting of auxiliary variables + Creation of an auxiliary table +# needed in all testcases +#------------------------------------------------------------------------ +DROP TABLE IF EXISTS t0_template; +CREATE TABLE t0_template ( f1 INTEGER, f2 char(20), PRIMARY KEY(f1)) +ENGINE = MEMORY; +# Logging of INSERTs into t0_template suppressed + +#------------------------------------------------------------------------ +# 1. Some syntax checks +#------------------------------------------------------------------------ +# 1.1 Subpartioned table without subpartitioning rule must be rejected +DROP TABLE IF EXISTS t1; +# FIXME Implement testcases, where it is checked that all create and +# alter table statements +# - with missing mandatory parameters are rejected +# - with optional parameters are accepted +# - with wrong combinations of optional parameters are rejected +# - ............ + +#------------------------------------------------------------------------ +# 2. Checks where the engine is assigned on all supported (CREATE TABLE +# statement) positions + basic operations on the tables +# Storage engine mixups are currently (2005-12-23) not supported +#------------------------------------------------------------------------ +DROP TABLE IF EXISTS t1; +# 2.1 non partitioned table (for comparison) +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'MYISAM'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.2 Assignment of storage engine just after column list only +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'MYISAM' +PARTITION BY HASH(f1) PARTITIONS 2; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 2 +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.3 Assignment of storage engine just after partition or subpartition +# name only +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) +( PARTITION part1 STORAGE ENGINE = 'MYISAM', +PARTITION part2 STORAGE ENGINE = 'MYISAM' +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (100) +(SUBPARTITION subpart11 STORAGE ENGINE = 'MYISAM', +SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21 STORAGE ENGINE = 'MYISAM', +SUBPARTITION subpart22 STORAGE ENGINE = 'MYISAM') +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.4 Some but not all named partitions or subpartitions get a storage +# engine assigned +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) +( PARTITION part1 STORAGE ENGINE = 'MYISAM', +PARTITION part2 +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) +( PARTITION part1 , +PARTITION part2 STORAGE ENGINE = 'MYISAM' +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (100) +(SUBPARTITION subpart11, +SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21 STORAGE ENGINE = 'MYISAM', +SUBPARTITION subpart22 STORAGE ENGINE = 'MYISAM') +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (100) +(SUBPARTITION subpart11 STORAGE ENGINE = 'MYISAM', +SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, +SUBPARTITION subpart22 ) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.5 Storage engine assignment after partition name + after name of +# subpartitions belonging to another partition +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (100) ENGINE = 'MYISAM' +(SUBPARTITION subpart11, +SUBPARTITION subpart12), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21 STORAGE ENGINE = 'MYISAM', +SUBPARTITION subpart22 STORAGE ENGINE = 'MYISAM') +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (100) +(SUBPARTITION subpart11 STORAGE ENGINE = 'MYISAM', +SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'), +PARTITION part2 VALUES LESS THAN (2147483647) ENGINE = 'MYISAM' +(SUBPARTITION subpart21, +SUBPARTITION subpart22) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.6 Precedence of storage engine assignments +# 2.6.1 Storage engine assignment after column list + after partition +# or subpartition name +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'MYISAM' +PARTITION BY HASH(f1) +( PARTITION part1 STORAGE ENGINE = 'MYISAM', +PARTITION part2 STORAGE ENGINE = 'MYISAM' +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'MYISAM' +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (100) +(SUBPARTITION subpart11 STORAGE ENGINE = 'MYISAM', +SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21 STORAGE ENGINE = 'MYISAM', +SUBPARTITION subpart22 STORAGE ENGINE = 'MYISAM') +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.6.2 Storage engine assignment after partition name + after +# subpartition name +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (100) STORAGE ENGINE = 'MYISAM' +(SUBPARTITION subpart11 STORAGE ENGINE = 'MYISAM', +SUBPARTITION subpart12 STORAGE ENGINE = 'MYISAM'), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21 STORAGE ENGINE = 'MYISAM', +SUBPARTITION subpart22 STORAGE ENGINE = 'MYISAM') +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.7 Session default engine differs from engine used within create table +SET SESSION storage_engine='MEMORY'; +SET SESSION storage_engine='MYISAM'; + +#------------------------------------------------------------------------ +# 3. Check assigning the number of partitions and subpartitions +# with and without named partitions/subpartitions +#------------------------------------------------------------------------ +DROP TABLE IF EXISTS t1; +# 3.1 (positive) without partition/subpartition number assignment +# 3.1.1 no partition number, no named partitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 3.1.2 no partition number, named partitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 3.1.3 variations on no partition/subpartition number, named partitions, +# different subpartitions are/are not named +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) (PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (200), PARTITION part3 VALUES LESS THAN (2147483647)) ; +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) (PARTITION part1 VALUES LESS THAN (100) +(SUBPARTITION subpart11 , SUBPARTITION subpart12 ), PARTITION part2 VALUES LESS THAN (200) +(SUBPARTITION subpart21 , SUBPARTITION subpart22 ), PARTITION part3 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart31 , SUBPARTITION subpart32 )) ; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (100) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (200) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM), PARTITION part3 VALUES LESS THAN (2147483647) (SUBPARTITION subpart31 ENGINE = MyISAM, SUBPARTITION subpart32 ENGINE = MyISAM)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 3.2 partition/subpartition numbers good and bad values and notations +DROP TABLE IF EXISTS t1; +# 3.2.1 partition/subpartition numbers INTEGER notation +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 2; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 2 +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 2 +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) SUBPARTITIONS 2 (PARTITION part1 VALUES LESS THAN (100) , PARTITION part2 VALUES LESS THAN (2147483647) ) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 1 +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 1 +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) SUBPARTITIONS 1 (PARTITION part1 VALUES LESS THAN (100) , PARTITION part2 VALUES LESS THAN (2147483647) ) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 0; +ERROR HY000: Number of partitions = 0 is not an allowed value +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 0 +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR HY000: Number of subpartitions = 0 is not an allowed value +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS -1; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-1' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS -1 +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-1 +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (21' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 1000000; +ERROR HY000: Too many partitions were defined +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 1000000 +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR HY000: Too many partitions were defined +# 3.2.4 partition/subpartition numbers STRING notation +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS '2'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS '2' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS '2.0'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2.0'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS '2.0' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2.0' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN ' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS '0.2E+1'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''0.2E+1'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS '0.2E+1' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''0.2E+1' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS TH' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS '2A'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2A'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS '2A' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2A' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 'A2'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''A2'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 'A2' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''A2' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS ''; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '''' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS '' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (21' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 'GARBAGE'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''GARBAGE'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 'GARBAGE' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''GARBAGE' +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS T' at line 3 +# 3.2.5 partition/subpartition numbers other notations +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 2A; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '2A' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 2A +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '2A +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (21' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS A2; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'A2' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS A2 +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'A2 +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (21' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS GARBAGE; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'GARBAGE' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS GARBAGE +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'GARBAGE +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THA' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS "2"; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2"' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS "2" +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2" +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS "2A"; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2A"' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS "2A" +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2A" +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS "A2"; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"A2"' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS "A2" +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"A2" +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS "GARBAGE"; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"GARBAGE"' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS "GARBAGE" +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"GARBAGE" +(PARTITION part1 VALUES LESS THAN (100), PARTITION part2 VALUES LESS T' at line 3 +# 3.3 Mixups of assigned partition/subpartition numbers and names +# 3.3.1 (positive) number of partition/subpartition +# = number of named partition/subpartition +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1, PARTITION part2 ) ; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM) +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) PARTITIONS 2 +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11, SUBPARTITION subpart12), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, SUBPARTITION subpart22) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = MyISAM, SUBPARTITION subpart12 ENGINE = MyISAM), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = MyISAM, SUBPARTITION subpart22 ENGINE = MyISAM)) +DROP TABLE t1; +# 3.3.2 (positive) number of partition/subpartition , +# 0 (= no) named partition/subpartition +# already checked above +# 3.3.3 (negative) number of partitions/subpartitions +# > number of named partitions/subpartitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1 ) ; +ERROR 42000: Wrong number of partitions defined, mismatch with previous setting near ')' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11 ), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, SUBPARTITION subpart22) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, SUBPAR' at line 5 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11, SUBPARTITION subpart12), +PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 ), +PARTITION part3 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart31, SUBPARTITION subpart32) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '), +PARTITION part3 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart31, SUBPAR' at line 7 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) PARTITIONS 2 +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11, SUBPARTITION subpart12), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21 ) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near ') +)' at line 7 +# 3.3.4 (negative) number of partitions < number of named partitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 1 ( PARTITION part1, PARTITION part2 ) ; +ERROR 42000: Wrong number of partitions defined, mismatch with previous setting near ')' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 1 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11, SUBPARTITION subpart12), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, SUBPARTITION subpart22) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, SUBPAR' at line 5 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 1 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11, SUBPARTITION subpart12), +PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 ), +PARTITION part3 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart31, SUBPARTITION subpart32) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '), +PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 ' at line 5 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 1 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11, SUBPARTITION subpart12), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, SUBPARTITION subpart22) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, SUBPAR' at line 5 + +#------------------------------------------------------------------------ +# 4. Checks of logical partition/subpartition name +# file name clashes during CREATE TABLE +#------------------------------------------------------------------------ +DROP TABLE IF EXISTS t1; +# 4.1 (negative) A partition name used more than once +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part1); +ERROR HY000: Duplicate partition name part1 +# FIXME Implement testcases with filename problems +# existing file of other table --- partition/subpartition file name +# partition/subpartition file name --- file of the same table + +#------------------------------------------------------------------------ +# 5. Alter table experiments +#------------------------------------------------------------------------ +DROP TABLE IF EXISTS t1; +# 5.1 alter table add partition +# 5.1.1 (negative) add partition to non partitioned table +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +ALTER TABLE t1 ADD PARTITION (PARTITION part1); +Got one of the listed errors +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; +# 5.1.2 Add one partition to a table with one partition +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1; +ALTER TABLE t1 ADD PARTITION (PARTITION part1); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION p0 ENGINE = MyISAM, PARTITION part1 ENGINE = MyISAM) +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 100 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 5.1.3 Several times add one partition to a table with some partitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part3); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM) +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1; +ALTER TABLE t1 ADD PARTITION (PARTITION part0); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM, PARTITION part0 ENGINE = MyISAM) +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 100 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DELETE FROM t1; +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1; +ALTER TABLE t1 ADD PARTITION (PARTITION part2); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM, PARTITION part0 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM) +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 100 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DELETE FROM t1; +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM, PARTITION part0 ENGINE = MyISAM, PARTITION part2 ENGINE = MyISAM) +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 100 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 5.1.4 Add several partitions to a table with some partitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part3); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM) +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = MyISAM, PARTITION part3 ENGINE = MyISAM) +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 100 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 5.1.5 (negative) Add partitions to a table with some partitions +# clash on new and already existing partition names +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2, PARTITION part3); +ALTER TABLE t1 ADD PARTITION (PARTITION part1); +ERROR HY000: Duplicate partition name part1 +ALTER TABLE t1 ADD PARTITION (PARTITION part2); +ERROR HY000: Duplicate partition name part2 +ALTER TABLE t1 ADD PARTITION (PARTITION part3); +ERROR HY000: Duplicate partition name part3 +ALTER TABLE t1 ADD PARTITION (PARTITION part1, PARTITION part2, PARTITION part3); +ERROR HY000: Duplicate partition name part1 +DROP TABLE t1; +# 5.2 alter table add subpartition +# 5.2.1 Add one subpartition to a table with subpartitioning rule and +# no explicit defined subpartitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +(PARTITION part1 VALUES LESS THAN (100)); +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100 - 1; +ALTER TABLE t1 ADD PARTITION (PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21)); +DROP TABLE t1; +DROP TABLE if exists t0_template; diff --git a/mysql-test/r/partition_03ndb.result b/mysql-test/r/partition_03ndb.result new file mode 100644 index 00000000000..28339cc7435 --- /dev/null +++ b/mysql-test/r/partition_03ndb.result @@ -0,0 +1,1361 @@ +SET SESSION storage_engine='NDB' ; +SET @max_row = 200; +SET AUTOCOMMIT= 1; +#------------------------------------------------------------------------ +# 0. Creation of an auxiliary table needed in all testcases +#------------------------------------------------------------------------ +DROP TABLE IF EXISTS t0_template; +CREATE TABLE t0_template ( f1 INTEGER, f2 char(20), PRIMARY KEY(f1)) +ENGINE = MEMORY; +# Logging of 200 INSERTs into t0_template suppressed +#------------------------------------------------------------------------ +# 1. Some syntax checks +#------------------------------------------------------------------------ +# 1.1 Subpartioned table without subpartitioning rule must be rejected +DROP TABLE IF EXISTS t1; +#------------------------------------------------------------------------ +# 2. Checks where the engine is set on all supported CREATE TABLE +# statement positions + basic operations on the tables +#------------------------------------------------------------------------ +DROP TABLE IF EXISTS t1; +# 2.1 table (non partitioned) for comparison +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'NDB' ; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY KEY () +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.2 table with engine setting just after column list +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'NDB' +PARTITION BY HASH(f1) PARTITIONS 2; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 2 +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.3 table with engine setting in the named partition part +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) +( PARTITION part1 STORAGE ENGINE = 'NDB' , +PARTITION part2 STORAGE ENGINE = 'NDB' +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.4 table with engine setting in the named subpartition part +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11 STORAGE ENGINE = 'NDB' , +SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ), +PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 STORAGE ENGINE = 'NDB' , +SUBPARTITION subpart22 STORAGE ENGINE = 'NDB' ) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.5 Ugly "incomplete" storage engine assignments +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) +( PARTITION part1 STORAGE ENGINE = 'NDB' , +PARTITION part2 +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) +( PARTITION part1 , +PARTITION part2 STORAGE ENGINE = 'NDB' +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11, +SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ), +PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 STORAGE ENGINE = 'NDB' , +SUBPARTITION subpart22 STORAGE ENGINE = 'NDB' ) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11 STORAGE ENGINE = 'NDB' , +SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ), +PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 , +SUBPARTITION subpart22 ) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.6 Ugly "over determined" storage engine assignments +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'NDB' +PARTITION BY HASH(f1) +( PARTITION part1 STORAGE ENGINE = 'NDB' , +PARTITION part2 STORAGE ENGINE = 'NDB' +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) ENGINE = 'NDB' +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11 STORAGE ENGINE = 'NDB' , +SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ), +PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 STORAGE ENGINE = 'NDB' , +SUBPARTITION subpart22 STORAGE ENGINE = 'NDB' ) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (1000) STORAGE ENGINE = 'NDB' +(SUBPARTITION subpart11 STORAGE ENGINE = 'NDB' , +SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ), +PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 STORAGE ENGINE = 'NDB' , +SUBPARTITION subpart22 STORAGE ENGINE = 'NDB' ) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.7 Ugly storage engine assignments mixups +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (1000) ENGINE = 'NDB' +(SUBPARTITION subpart11 , +SUBPARTITION subpart12 ), +PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 STORAGE ENGINE = 'NDB' , +SUBPARTITION subpart22 STORAGE ENGINE = 'NDB' ) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11 STORAGE ENGINE = 'NDB' , +SUBPARTITION subpart12 STORAGE ENGINE = 'NDB' ), +PARTITION part2 VALUES LESS THAN (2000) ENGINE = 'NDB' +(SUBPARTITION subpart21 , +SUBPARTITION subpart22 ) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 2.8 Session default engine differs from engine used within create table +SET SESSION storage_engine='MEMORY'; +SET SESSION storage_engine='NDB' ; +#------------------------------------------------------------------------ +# 3. Check number of partitions and subpartitions +#------------------------------------------------------------------------ +DROP TABLE IF EXISTS t1; +# 3.1 (positive) without partition/subpartition number assignment +# 3.1.1 no partition number, no named partitions, no subpartitions mentioned +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 3.1.2 no partition number, named partitions, no subpartitions mentioned +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part2); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 3.1.3 variations on no partition/subpartition number, named partitions, +# different subpartitions are/are not named +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) (PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2000), PARTITION part3 VALUES LESS THAN (2147483647)) ; +DROP TABLE t1; +# FIXME several subtestcases of 3.1.3 disabled because of server crashes +# Bug#15407 Partitions: crash if subpartition +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) (PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11 , SUBPARTITION subpart12 ), PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 , SUBPARTITION subpart22 ), PARTITION part3 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21 , SUBPARTITION subpart22 )) ; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2000) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster), PARTITION part3 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster)) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +# 3.2 partition/subpartition numbers good and bad values and notations +DROP TABLE IF EXISTS t1; +# 3.2.1 partition/subpartition numbers INTEGER notation +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 2; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 2 +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 2 +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) SUBPARTITIONS 2 (PARTITION part1 VALUES LESS THAN (1000) , PARTITION part2 VALUES LESS THAN (2147483647) ) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) PARTITIONS 1 +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 1 +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) SUBPARTITIONS 1 (PARTITION part1 VALUES LESS THAN (1000) , PARTITION part2 VALUES LESS THAN (2147483647) ) +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 200; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +UPDATE t1 SET f1 = f1 + 200 +WHERE f1 BETWEEN 100 - 50 AND 100 + 50; +SELECT (COUNT(*) = 200) AND (MIN(f1) = 1) AND (MAX(f1) = 100 + 50 + 200 ) +AS my_value FROM t1; +my_value +1 +DELETE FROM t1 +WHERE f1 BETWEEN 100 - 50 + 200 AND 100 + 50 + 200; +SELECT (COUNT(*) = 200 - 50 - 50 - 1) AND (MIN(f1) = 1) AND (MAX(f1) = 200) +AS my_value FROM t1; +my_value +1 +INSERT INTO t1 SET f1 = 0 , f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 0 AND f2 = '#######'; +my_value +1 +INSERT INTO t1 SET f1 = 200 + 1, f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 1 AND f2 = '#######'; +my_value +1 +UPDATE t1 SET f1 = 200 + 2, f2 = 'ZZZZZZZ' + WHERE f1 = 0 AND f2 = '#######'; +SELECT COUNT(*) = 1 AS my_value FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +my_value +1 +DELETE FROM t1 WHERE f1 = 200 + 2 AND f2 = 'ZZZZZZZ'; +SELECT COUNT(*) = 0 AS my_value FROM t1 WHERE f2 = 'ZZZZZZZ'; +my_value +1 +TRUNCATE t1; +SELECT COUNT(*) = 0 AS my_value FROM t1; +my_value +1 +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 0; +ERROR HY000: Number of partitions = 0 is not an allowed value +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 0 +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR HY000: Number of subpartitions = 0 is not an allowed value +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS -1; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-1' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS -1 +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '-1 +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 1000000; +ERROR HY000: Too many partitions were defined +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 1000000 +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR HY000: Too many partitions were defined +# 3.2.4 partition/subpartition numbers STRING notation +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS '2'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS '2' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS '2.0'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2.0'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS '2.0' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2.0' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS '0.2E+1'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''0.2E+1'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS '0.2E+1' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''0.2E+1' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS T' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS '2A'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2A'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS '2A' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''2A' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN ' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 'A2'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''A2'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 'A2' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''A2' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN ' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS ''; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '''' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS '' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 'GARBAGE'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''GARBAGE'' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 'GARBAGE' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near ''GARBAGE' +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS ' at line 3 +# 3.2.5 partition/subpartition numbers other notations +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 2A; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '2A' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS 2A +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '2A +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS A2; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'A2' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS A2 +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'A2 +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS GARBAGE; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'GARBAGE' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS GARBAGE +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near 'GARBAGE +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS TH' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS "2"; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2"' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS "2" +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2" +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS "2A"; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2A"' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS "2A" +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"2A" +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN ' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS "A2"; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"A2"' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS "A2" +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"A2" +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN ' at line 3 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS "GARBAGE"; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"GARBAGE"' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) SUBPARTITION BY HASH(f1) +SUBPARTITIONS "GARBAGE" +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS THAN (2147483647)); +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '"GARBAGE" +(PARTITION part1 VALUES LESS THAN (1000), PARTITION part2 VALUES LESS ' at line 3 +# 3.3 Mixups of number and names of partition/subpartition assigned +# 3.3.1 (positive) number of partition/subpartition = number of named partition/subpartition +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1, PARTITION part2 ) ; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY HASH (f1) (PARTITION part1 ENGINE = ndbcluster, PARTITION part2 ENGINE = ndbcluster) +DROP TABLE t1; +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) PARTITIONS 2 +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11, SUBPARTITION subpart12), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, SUBPARTITION subpart22) +); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `f1` int(11) default NULL, + `f2` char(20) default NULL +) ENGINE=ndbcluster DEFAULT CHARSET=latin1 PARTITION BY RANGE (f1) SUBPARTITION BY HASH (f1) (PARTITION part1 VALUES LESS THAN (1000) (SUBPARTITION subpart11 ENGINE = ndbcluster, SUBPARTITION subpart12 ENGINE = ndbcluster), PARTITION part2 VALUES LESS THAN (2147483647) (SUBPARTITION subpart21 ENGINE = ndbcluster, SUBPARTITION subpart22 ENGINE = ndbcluster)) +DROP TABLE t1; +# 3.3.2 (positive) number of partition/subpartition , 0 (= no) named partition/subpartition +# already checked above +# 3.3.3 (negative) number of partitions > number of named partitions +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) PARTITIONS 2 ( PARTITION part1 ) ; +ERROR 42000: Wrong number of partitions defined, mismatch with previous setting near ')' at line 2 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11 ), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, SUBPARTITION subpart22) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21, SUBPAR' at line 5 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11, SUBPARTITION subpart12), +PARTITION part2 VALUES LESS THAN (2000) +(SUBPARTITION subpart21 ), +PARTITION part3 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart31, SUBPARTITION subpart32) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near '), +PARTITION part3 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart31, SUBPAR' at line 7 +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY RANGE(f1) PARTITIONS 2 +SUBPARTITION BY HASH(f1) SUBPARTITIONS 2 +( PARTITION part1 VALUES LESS THAN (1000) +(SUBPARTITION subpart11, SUBPARTITION subpart12), +PARTITION part2 VALUES LESS THAN (2147483647) +(SUBPARTITION subpart21 ) +); +ERROR 42000: Wrong number of subpartitions defined, mismatch with previous setting near ') +)' at line 7 +#------------------------------------------------------------------------ +# 4. Checks of logical partition/subpartition name +# file name clashes during CREATE TABLE +#------------------------------------------------------------------------ +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1) (PARTITION part1, PARTITION part1); +ERROR HY000: Duplicate partition name part1 +#------------------------------------------------------------------------ +# 5. Alter table experiments +#------------------------------------------------------------------------ +# 5.1 alter table add partition +# 5.1.1 (negative) add partition to non partitioned table +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)); +# FIXME Why does the error numbers of MyISAM(1482) and NDB(1005) differ ? +ALTER TABLE t1 ADD PARTITION (PARTITION part1); +Got one of the listed errors +DROP TABLE t1; +# 5.1.2 Add one partition to a table with one partition +CREATE TABLE t1 ( f1 INTEGER, f2 char(20)) +PARTITION BY HASH(f1); +INSERT INTO t1 SELECT * FROM t0_template WHERE f1 BETWEEN 1 AND 100; diff --git a/mysql-test/r/partition_error.result b/mysql-test/r/partition_error.result index 90faa3b20b8..1a0b1dd9b3a 100644 --- a/mysql-test/r/partition_error.result +++ b/mysql-test/r/partition_error.result @@ -1,3 +1,4 @@ +drop table if exists t1; partition by list (a) partitions 3 (partition x1 values in (1,2,9,4) tablespace ts1, @@ -544,6 +545,10 @@ partitions 2 partition x2 values in (5)); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MySQL server version for the right syntax to use near '4, partition x2 values in (5))' at line 8 +CREATE TABLE t1 (a int) +PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (x1)); +ERROR 42S22: Unknown column 'x1' in 'partition function' CREATE TABLE t1(a int) PARTITION BY RANGE (a) (PARTITION p1 VALUES LESS THAN(5)); insert into t1 values (10); diff --git a/mysql-test/r/partition_mgm_err.result b/mysql-test/r/partition_mgm_err.result index 01709e726bd..3c2c50fc6f8 100644 --- a/mysql-test/r/partition_mgm_err.result +++ b/mysql-test/r/partition_mgm_err.result @@ -1,3 +1,4 @@ +drop table if exists t1; CREATE TABLE t1 (a int, b int) PARTITION BY RANGE (a) (PARTITION x0 VALUES LESS THAN (2), @@ -10,48 +11,52 @@ PARTITION x6 VALUES LESS THAN (14), PARTITION x7 VALUES LESS THAN (16), PARTITION x8 VALUES LESS THAN (18), PARTITION x9 VALUES LESS THAN (20)); -ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO (PARTITION x01 VALUES LESS THAN (2), PARTITION x11 VALUES LESS THAN (5)); -ERROR HY000: The new partitions cover a bigger range then the reorganised partitions do +ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range ALTER TABLE t1 DROP PARTITION x0, x1, x2, x3, x3; -ERROR HY000: Error in list of partitions to change +ERROR HY000: Error in list of partitions to DROP ALTER TABLE t1 DROP PARTITION x0, x1, x2, x10; -ERROR HY000: Error in list of partitions to change +ERROR HY000: Error in list of partitions to DROP ALTER TABLE t1 DROP PARTITION x10, x1, x2, x1; -ERROR HY000: Error in list of partitions to change +ERROR HY000: Error in list of partitions to DROP ALTER TABLE t1 DROP PARTITION x10, x1, x2, x3; -ERROR HY000: Error in list of partitions to change -ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO +ERROR HY000: Error in list of partitions to DROP +ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO (PARTITION x11 VALUES LESS THAN (22)); ERROR HY000: More partitions to reorganise than there are partitions -ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO (PARTITION x3 VALUES LESS THAN (6)); -ERROR HY000: All partitions must have unique names in the table -ALTER TABLE t1 REORGANISE PARTITION x0, x2 INTO +ERROR HY000: Duplicate partition name x3 +ALTER TABLE t1 REORGANIZE PARTITION x0, x2 INTO (PARTITION x11 VALUES LESS THAN (2)); ERROR HY000: When reorganising a set of partitions they must be in consecutive order -ALTER TABLE t1 REORGANISE PARTITION x0, x1, x1 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0, x1, x1 INTO (PARTITION x11 VALUES LESS THAN (4)); -ERROR HY000: Error in list of partitions to change -ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO +ERROR HY000: Error in list of partitions to REORGANIZE +ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO (PARTITION x01 VALUES LESS THAN (5)); -ERROR HY000: The new partitions cover a bigger range then the reorganised partitions do -ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO +ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range +ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO (PARTITION x01 VALUES LESS THAN (4), PARTITION x11 VALUES LESS THAN (2)); +ERROR HY000: Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range +ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO +(PARTITION x01 VALUES LESS THAN (6), +PARTITION x11 VALUES LESS THAN (4)); ERROR HY000: VALUES LESS THAN value must be strictly increasing for each partition DROP TABLE t1; CREATE TABLE t1 (a int) PARTITION BY KEY (a) PARTITIONS 2; ALTER TABLE t1 ADD PARTITION (PARTITION p1); -ERROR HY000: All partitions must have unique names in the table +ERROR HY000: Duplicate partition name p1 DROP TABLE t1; CREATE TABLE t1 (a int) PARTITION BY KEY (a) (PARTITION x0, PARTITION x1, PARTITION x2, PARTITION x3, PARTITION x3); -ERROR HY000: All partitions must have unique names in the table +ERROR HY000: Duplicate partition name x3 CREATE TABLE t1 (a int) PARTITION BY RANGE (a) SUBPARTITION BY KEY (a) @@ -100,7 +105,7 @@ PARTITION x1 VALUES LESS THAN (8)); ALTER TABLE t1 ADD PARTITION PARTITIONS 1; ERROR HY000: For RANGE partitions each partition must be defined ALTER TABLE t1 DROP PARTITION x2; -ERROR HY000: Error in list of partitions to change +ERROR HY000: Error in list of partitions to DROP ALTER TABLE t1 COALESCE PARTITION 1; ERROR HY000: COALESCE PARTITION can only be used on HASH/KEY partitions ALTER TABLE t1 DROP PARTITION x1; diff --git a/mysql-test/r/query_cache.result b/mysql-test/r/query_cache.result index e790c8589d0..8d76f8fc701 100644 --- a/mysql-test/r/query_cache.result +++ b/mysql-test/r/query_cache.result @@ -1105,56 +1105,20 @@ call f1(); s1 s1 s1 -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 3 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 3 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 0 call f1(); s1 s1 s1 -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 3 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 3 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 3 call f1(); s1 s1 s1 select sql_cache * from t1; s1 -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 4 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 4 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 6 insert into t1 values (1); select sql_cache * from t1; s1 1 -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 1 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 5 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 6 call f1(); s1 1 @@ -1172,15 +1136,6 @@ s1 select sql_cache * from t1; s1 1 -show status like "Qcache_queries_in_cache"; -Variable_name Value -Qcache_queries_in_cache 4 -show status like "Qcache_inserts"; -Variable_name Value -Qcache_inserts 8 -show status like "Qcache_hits"; -Variable_name Value -Qcache_hits 10 flush query cache; reset query cache; flush status; diff --git a/mysql-test/r/query_cache_notembedded.result b/mysql-test/r/query_cache_notembedded.result index e773a63525b..77fa198eb80 100644 --- a/mysql-test/r/query_cache_notembedded.result +++ b/mysql-test/r/query_cache_notembedded.result @@ -94,4 +94,224 @@ a SELECT * FROM t1; a drop table t1; +flush query cache; +reset query cache; +flush status; +create table t1 (s1 int)// +create procedure f1 () begin +select sql_cache * from t1; +select sql_cache * from t1; +select sql_cache * from t1; +end;// +create procedure f2 () begin +select sql_cache * from t1 where s1=1; +select sql_cache * from t1; +end;// +create procedure f3 () begin +select sql_cache * from t1; +select sql_cache * from t1 where s1=1; +end;// +create procedure f4 () begin +select sql_cache * from t1; +select sql_cache * from t1 where s1=1; +select sql_cache * from t1; +select sql_cache * from t1 where s1=1; +select sql_cache * from t1 where s1=1; +end;// +call f1(); +s1 +s1 +s1 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 3 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 0 +call f1(); +s1 +s1 +s1 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 3 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 3 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 3 +call f1(); +s1 +s1 +s1 +select sql_cache * from t1; +s1 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 4 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 4 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 6 +insert into t1 values (1); +select sql_cache * from t1; +s1 +1 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 1 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 5 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 6 +call f1(); +s1 +1 +s1 +1 +s1 +1 +call f1(); +s1 +1 +s1 +1 +s1 +1 +select sql_cache * from t1; +s1 +1 +show status like "Qcache_queries_in_cache"; +Variable_name Value +Qcache_queries_in_cache 4 +show status like "Qcache_inserts"; +Variable_name Value +Qcache_inserts 8 +show status like "Qcache_hits"; +Variable_name Value +Qcache_hits 10 +flush query cache; +reset query cache; +flush status; +select sql_cache * from t1; +s1 +1 +select sql_cache * from t1 where s1=1; +s1 +1 +call f1(); +s1 +1 +s1 +1 +s1 +1 +call f2(); +s1 +1 +s1 +1 +call f3(); +s1 +1 +s1 +1 +call f4(); +s1 +1 +s1 +1 +s1 +1 +s1 +1 +s1 +1 +call f4(); +s1 +1 +s1 +1 +s1 +1 +s1 +1 +s1 +1 +call f3(); +s1 +1 +s1 +1 +call f2(); +s1 +1 +s1 +1 +select sql_cache * from t1 where s1=1; +s1 +1 +insert into t1 values (2); +call f1(); +s1 +1 +2 +s1 +1 +2 +s1 +1 +2 +select sql_cache * from t1 where s1=1; +s1 +1 +select sql_cache * from t1; +s1 +1 +2 +call f1(); +s1 +1 +2 +s1 +1 +2 +s1 +1 +2 +call f3(); +s1 +1 +2 +s1 +1 +call f3(); +s1 +1 +2 +s1 +1 +call f1(); +s1 +1 +2 +s1 +1 +2 +s1 +1 +2 +drop procedure f1; +drop procedure f2; +drop procedure f3; +drop procedure f4; +drop table t1; set GLOBAL query_cache_size=0; diff --git a/mysql-test/r/show_check.result b/mysql-test/r/show_check.result index a78230e63eb..960c389eee2 100644 --- a/mysql-test/r/show_check.result +++ b/mysql-test/r/show_check.result @@ -147,10 +147,14 @@ drop table t1; flush tables; show open tables; Database Table In_use Name_locked +mysql general_log 1 0 +mysql slow_log 1 0 create table t1(n int); insert into t1 values (1); show open tables; Database Table In_use Name_locked +mysql general_log 1 0 +mysql slow_log 1 0 test t1 0 0 drop table t1; create table t1 (a int not null, b VARCHAR(10), INDEX (b) ) AVG_ROW_LENGTH=10 CHECKSUM=1 COMMENT="test" ENGINE=MYISAM MIN_ROWS=10 MAX_ROWS=100 PACK_KEYS=1 DELAY_KEY_WRITE=1 ROW_FORMAT=fixed; @@ -564,20 +568,24 @@ SELECT 1 FROM mysql.db, mysql.proc, mysql.user, mysql.time_zone, mysql.time_zone 1 SHOW OPEN TABLES; Database Table In_use Name_locked -mysql db 0 0 +mysql proc 0 0 test urkunde 0 0 mysql time_zone 0 0 -mysql user 0 0 +mysql db 0 0 test txt1 0 0 -mysql proc 0 0 +mysql slow_log 1 0 test tyt2 0 0 +mysql general_log 1 0 +mysql user 0 0 mysql time_zone_name 0 0 SHOW OPEN TABLES FROM mysql; Database Table In_use Name_locked -mysql db 0 0 -mysql time_zone 0 0 -mysql user 0 0 mysql proc 0 0 +mysql time_zone 0 0 +mysql db 0 0 +mysql slow_log 1 0 +mysql general_log 1 0 +mysql user 0 0 mysql time_zone_name 0 0 SHOW OPEN TABLES FROM mysql LIKE 'u%'; Database Table In_use Name_locked @@ -590,12 +598,16 @@ test tyt2 0 0 mysql time_zone_name 0 0 SHOW OPEN TABLES LIKE '%o%'; Database Table In_use Name_locked -mysql time_zone 0 0 mysql proc 0 0 +mysql time_zone 0 0 +mysql slow_log 1 0 +mysql general_log 1 0 mysql time_zone_name 0 0 FLUSH TABLES; SHOW OPEN TABLES; Database Table In_use Name_locked +mysql general_log 1 0 +mysql slow_log 1 0 DROP TABLE txt1; DROP TABLE tyt2; DROP TABLE urkunde; diff --git a/mysql-test/r/sp-error.result b/mysql-test/r/sp-error.result index d7bed7e88a7..1241e05fa74 100644 --- a/mysql-test/r/sp-error.result +++ b/mysql-test/r/sp-error.result @@ -464,19 +464,6 @@ create table t5 (x int)| call bug3294()| ERROR 42S02: Unknown table 't5' drop procedure bug3294| -drop procedure if exists bug6807| -create procedure bug6807() -begin -declare id int; -set id = connection_id(); -kill query id; -select 'Not reached'; -end| -call bug6807()| -ERROR 70100: Query execution was interrupted -call bug6807()| -ERROR 70100: Query execution was interrupted -drop procedure bug6807| drop procedure if exists bug8776_1| drop procedure if exists bug8776_2| drop procedure if exists bug8776_3| diff --git a/mysql-test/r/sp.result b/mysql-test/r/sp.result index bf205eae0e0..acd32564f51 100644 --- a/mysql-test/r/sp.result +++ b/mysql-test/r/sp.result @@ -2142,7 +2142,6 @@ show create database test; show databases like 'foo'; show errors; show columns from t1; -show grants for 'root'@'localhost'; show keys from t1; show open tables like 'foo'; show privileges; @@ -2169,8 +2168,6 @@ Level Code Message Field Type Null Key Default Extra id char(16) NO data int(11) NO -Grants for root@localhost -GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' WITH GRANT OPTION Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Database Table In_use Name_locked Privilege Context Comment @@ -2224,8 +2221,6 @@ Level Code Message Field Type Null Key Default Extra id char(16) NO data int(11) NO -Grants for root@localhost -GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' WITH GRANT OPTION Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Database Table In_use Name_locked Privilege Context Comment @@ -2262,18 +2257,6 @@ Tables_in_test (foo) Variable_name Value Level Code Message drop procedure bug4902| -drop procedure if exists bug4902_2| -create procedure bug4902_2() -begin -show processlist; -end| -call bug4902_2()| -Id User Host db Command Time State Info -# root localhost test Query # NULL show processlist -call bug4902_2()| -Id User Host db Command Time State Info -# root localhost test Query # NULL show processlist -drop procedure bug4902_2| drop procedure if exists bug4904| create procedure bug4904() begin @@ -2416,52 +2399,6 @@ select @x| NULL delete from t1| drop procedure bug4941| -drop procedure if exists bug3583| -drop procedure if exists bug3583| -create procedure bug3583() -begin -declare c int; -select * from t1; -select count(*) into c from t1; -select c; -end| -insert into t1 values ("x", 3), ("y", 5)| -set @x = @@query_cache_size| -set global query_cache_size = 10*1024*1024| -flush status| -flush query cache| -show status like 'Qcache_hits'| -Variable_name Value -Qcache_hits 0 -call bug3583()| -id data -x 3 -y 5 -c -2 -show status like 'Qcache_hits'| -Variable_name Value -Qcache_hits 0 -call bug3583()| -id data -x 3 -y 5 -c -2 -call bug3583()| -id data -x 3 -y 5 -c -2 -show status like 'Qcache_hits'| -Variable_name Value -Qcache_hits 2 -set global query_cache_size = @x| -flush status| -flush query cache| -delete from t1| -drop procedure bug3583| drop procedure if exists bug4905| create table t3 (s1 int,primary key (s1))| drop procedure if exists bug4905| @@ -2677,17 +2614,6 @@ select id, bug5240() from t1| id bug5240() answer 42 drop function bug5240| -drop function if exists bug5278| -create function bug5278 () returns char -begin -SET PASSWORD FOR 'bob'@'%.loc.gov' = PASSWORD('newpass'); -return 'okay'; -end| -select bug5278()| -ERROR 42000: Can't find any matching row in the user table -select bug5278()| -ERROR 42000: Can't find any matching row in the user table -drop function bug5278| drop procedure if exists p1| create table t3(id int)| insert into t3 values(1)| @@ -4052,14 +3978,6 @@ select bug10100f(5)| ERROR HY000: Recursive stored functions and triggers are not allowed. call bug10100t(5)| ERROR HY000: Recursive limit 0 (as set by the max_sp_recursion_depth variable) was exceeded for routine bug10100p -set @@max_sp_recursion_depth=255| -set @var=1| -call bug10100p(255, @var)| -call bug10100pt(1,255)| -call bug10100pv(1,255)| -call bug10100pd(1,255)| -call bug10100pc(1,255)| -set @@max_sp_recursion_depth=0| deallocate prepare stmt2| drop function bug10100f| drop procedure bug10100p| diff --git a/mysql-test/r/sp_notembedded.result b/mysql-test/r/sp_notembedded.result new file mode 100644 index 00000000000..bdd32647f9d --- /dev/null +++ b/mysql-test/r/sp_notembedded.result @@ -0,0 +1,206 @@ +drop procedure if exists bug4902| +create procedure bug4902() +begin +show grants for 'root'@'localhost'; +end| +call bug4902()| +Grants for root@localhost +GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' WITH GRANT OPTION +call bug4902()| +Grants for root@localhost +GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' WITH GRANT OPTION +drop procedure bug4902| +drop procedure if exists bug4902_2| +create procedure bug4902_2() +begin +show processlist; +end| +call bug4902_2()| +Id User Host db Command Time State Info +# root localhost test Query # NULL show processlist +call bug4902_2()| +Id User Host db Command Time State Info +# root localhost test Query # NULL show processlist +drop procedure bug4902_2| +drop function if exists bug5278| +create function bug5278 () returns char +begin +SET PASSWORD FOR 'bob'@'%.loc.gov' = PASSWORD('newpass'); +return 'okay'; +end| +select bug5278()| +ERROR 42000: Can't find any matching row in the user table +select bug5278()| +ERROR 42000: Can't find any matching row in the user table +drop function bug5278| +drop table if exists t1| +create table t1 ( +id char(16) not null default '', +data int not null +)| +drop procedure if exists bug3583| +drop procedure if exists bug3583| +create procedure bug3583() +begin +declare c int; +select * from t1; +select count(*) into c from t1; +select c; +end| +insert into t1 values ("x", 3), ("y", 5)| +set @x = @@query_cache_size| +set global query_cache_size = 10*1024*1024| +flush status| +flush query cache| +show status like 'Qcache_hits'| +Variable_name Value +Qcache_hits 0 +call bug3583()| +id data +x 3 +y 5 +c +2 +show status like 'Qcache_hits'| +Variable_name Value +Qcache_hits 0 +call bug3583()| +id data +x 3 +y 5 +c +2 +call bug3583()| +id data +x 3 +y 5 +c +2 +show status like 'Qcache_hits'| +Variable_name Value +Qcache_hits 2 +set global query_cache_size = @x| +flush status| +flush query cache| +delete from t1| +drop procedure bug3583| +drop table t1; +#| +drop procedure if exists bug6807| +create procedure bug6807() +begin +declare id int; +set id = connection_id(); +kill query id; +select 'Not reached'; +end| +call bug6807()| +ERROR 70100: Query execution was interrupted +call bug6807()| +ERROR 70100: Query execution was interrupted +drop procedure bug6807| +drop function if exists bug10100f| +drop procedure if exists bug10100p| +drop procedure if exists bug10100t| +drop procedure if exists bug10100pt| +drop procedure if exists bug10100pv| +drop procedure if exists bug10100pd| +drop procedure if exists bug10100pc| +create function bug10100f(prm int) returns int +begin +if prm > 1 then +return prm * bug10100f(prm - 1); +end if; +return 1; +end| +create procedure bug10100p(prm int, inout res int) +begin +set res = res * prm; +if prm > 1 then +call bug10100p(prm - 1, res); +end if; +end| +create procedure bug10100t(prm int) +begin +declare res int; +set res = 1; +call bug10100p(prm, res); +select res; +end| +create table t3 (a int)| +insert into t3 values (0)| +create view v1 as select a from t3; +create procedure bug10100pt(level int, lim int) +begin +if level < lim then +update t3 set a=level; +FLUSH TABLES; +call bug10100pt(level+1, lim); +else +select * from t3; +end if; +end| +create procedure bug10100pv(level int, lim int) +begin +if level < lim then +update v1 set a=level; +FLUSH TABLES; +call bug10100pv(level+1, lim); +else +select * from v1; +end if; +end| +prepare stmt2 from "select * from t3;"; +create procedure bug10100pd(level int, lim int) +begin +if level < lim then +select level; +prepare stmt1 from "update t3 set a=a+2"; +execute stmt1; +FLUSH TABLES; +execute stmt1; +FLUSH TABLES; +execute stmt1; +FLUSH TABLES; +deallocate prepare stmt1; +execute stmt2; +select * from t3; +call bug10100pd(level+1, lim); +else +execute stmt2; +end if; +end| +create procedure bug10100pc(level int, lim int) +begin +declare lv int; +declare c cursor for select a from t3; +open c; +if level < lim then +select level; +fetch c into lv; +select lv; +update t3 set a=level+lv; +FLUSH TABLES; +call bug10100pc(level+1, lim); +else +select * from t3; +end if; +close c; +end| +set @@max_sp_recursion_depth=255| +set @var=1| +call bug10100p(255, @var)| +call bug10100pt(1,255)| +call bug10100pv(1,255)| +call bug10100pd(1,255)| +call bug10100pc(1,255)| +set @@max_sp_recursion_depth=0| +deallocate prepare stmt2| +drop function bug10100f| +drop procedure bug10100p| +drop procedure bug10100t| +drop procedure bug10100pt| +drop procedure bug10100pv| +drop procedure bug10100pd| +drop procedure bug10100pc| +drop view v1| diff --git a/mysql-test/r/subselect_notembedded.result b/mysql-test/r/subselect_notembedded.result new file mode 100644 index 00000000000..dd4b0701c32 --- /dev/null +++ b/mysql-test/r/subselect_notembedded.result @@ -0,0 +1 @@ +purge master logs before (select adddate(current_timestamp(), interval -4 day)); diff --git a/mysql-test/r/system_mysql_db.result b/mysql-test/r/system_mysql_db.result index 2e4e2dc10ea..e3abd7f5200 100644 --- a/mysql-test/r/system_mysql_db.result +++ b/mysql-test/r/system_mysql_db.result @@ -5,6 +5,7 @@ columns_priv db event func +general_log help_category help_keyword help_relation @@ -13,6 +14,7 @@ host plugin proc procs_priv +slow_log tables_priv time_zone time_zone_leap_second @@ -184,6 +186,31 @@ proc CREATE TABLE `proc` ( `comment` char(64) character set utf8 collate utf8_bin NOT NULL default '', PRIMARY KEY (`db`,`name`,`type`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Stored Procedures' +show create table general_log; +Table Create Table +general_log CREATE TABLE `general_log` ( + `event_time` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP, + `user_host` mediumtext, + `thread_id` int(11) default NULL, + `server_id` int(11) default NULL, + `command_type` varchar(64) default NULL, + `argument` mediumtext +) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log' +show create table slow_log; +Table Create Table +slow_log CREATE TABLE `slow_log` ( + `start_time` timestamp NOT NULL default CURRENT_TIMESTAMP on update CURRENT_TIMESTAMP, + `user_host` mediumtext NOT NULL, + `query_time` time NOT NULL, + `lock_time` time NOT NULL, + `rows_sent` int(11) NOT NULL, + `rows_examined` int(11) NOT NULL, + `db` varchar(512) default NULL, + `last_insert_id` int(11) default NULL, + `insert_id` int(11) default NULL, + `server_id` int(11) default NULL, + `sql_text` mediumtext NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log' show create table event; Table Create Table event CREATE TABLE `event` ( diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index b10d4302776..fea725922ab 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -2213,15 +2213,6 @@ r_object_id users_names 120001a080000542 guser02 drop view v1, v2; drop table t1, t2; -create definer=some_user@`` sql security invoker view v1 as select 1; -ERROR HY000: Definer is not fully qualified -create definer=some_user@localhost sql security invoker view v1 as select 1; -Warnings: -Note 1449 There is no 'some_user'@'localhost' registered -show create view v1; -View Create View -v1 CREATE ALGORITHM=UNDEFINED DEFINER=`some_user`@`localhost` SQL SECURITY INVOKER VIEW `v1` AS select 1 AS `1` -drop view v1; create table t1 (s1 int); create view abc as select * from t1 as abc; drop table t1; diff --git a/mysql-test/r/view_grant.result b/mysql-test/r/view_grant.result index cb1ac88ea8e..daf34362b5d 100644 --- a/mysql-test/r/view_grant.result +++ b/mysql-test/r/view_grant.result @@ -520,3 +520,12 @@ use test; use test; drop user mysqltest_1@localhost; drop database mysqltest; +create definer=some_user@`` sql security invoker view v1 as select 1; +ERROR HY000: Definer is not fully qualified +create definer=some_user@localhost sql security invoker view v1 as select 1; +Warnings: +Note 1449 There is no 'some_user'@'localhost' registered +show create view v1; +View Create View +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`some_user`@`localhost` SQL SECURITY INVOKER VIEW `v1` AS select 1 AS `1` +drop view v1; diff --git a/mysql-test/t/backup.test b/mysql-test/t/backup.test index 7ab3433c7ed..64de3a426b2 100644 --- a/mysql-test/t/backup.test +++ b/mysql-test/t/backup.test @@ -1,3 +1,6 @@ +# This test should work in embedded server after we fix mysqltest +-- source include/not_embedded.inc + # # This test is a bit tricky as we can't use backup table to overwrite an old # table diff --git a/mysql-test/t/binlog_stm_binlog.test b/mysql-test/t/binlog_stm_binlog.test index da93d86c7ce..6d3dce9a323 100644 --- a/mysql-test/t/binlog_stm_binlog.test +++ b/mysql-test/t/binlog_stm_binlog.test @@ -1,5 +1,6 @@ # This is a wrapper for binlog.test so that the same test case can be used # For both statement and row based bin logs 9/19/2005 [jbm] +-- source include/not_embedded.inc -- source include/have_binlog_format_statement.inc -- source extra/binlog_tests/binlog.test diff --git a/mysql-test/t/binlog_stm_blackhole.test b/mysql-test/t/binlog_stm_blackhole.test index 3c0096fa3f6..6047d8ca2fc 100644 --- a/mysql-test/t/binlog_stm_blackhole.test +++ b/mysql-test/t/binlog_stm_blackhole.test @@ -1,5 +1,6 @@ # This is a wrapper for binlog.test so that the same test case can be used # For both statement and row based bin logs 9/19/2005 [jbm] +-- source include/not_embedded.inc -- source include/have_binlog_format_statement.inc -- source extra/binlog_tests/blackhole.test diff --git a/mysql-test/t/binlog_stm_ctype_cp932.test b/mysql-test/t/binlog_stm_ctype_cp932.test index cef179e0028..436f95a2453 100644 --- a/mysql-test/t/binlog_stm_ctype_cp932.test +++ b/mysql-test/t/binlog_stm_ctype_cp932.test @@ -1,5 +1,6 @@ # This is a wrapper for binlog.test so that the same test case can be used # For both statement and row based bin logs 9/19/2005 [jbm] +-- source include/not_embedded.inc -- source include/have_binlog_format_statement.inc -- source extra/binlog_tests/ctype_cp932.test diff --git a/mysql-test/t/compress.test b/mysql-test/t/compress.test index 46244edd2a8..3f1892b5dec 100644 --- a/mysql-test/t/compress.test +++ b/mysql-test/t/compress.test @@ -1,6 +1,9 @@ # Turn on compression between the client and server # and run a number of tests +# Can't test with embedded server +-- source include/not_embedded.inc + -- source include/have_compress.inc connect (comp_con,localhost,root,,,,,COMPRESS); diff --git a/mysql-test/t/connect.test b/mysql-test/t/connect.test index fef9d4552e6..5ee3d64e56f 100644 --- a/mysql-test/t/connect.test +++ b/mysql-test/t/connect.test @@ -5,6 +5,10 @@ # This test makes no sense with the embedded server --source include/not_embedded.inc +# check that CSV engine was compiled in, as the test relies on the presence +# of the log tables (which are CSV-based). By connect mysql; show tables; +--source include/have_csv.inc + --disable_warnings drop table if exists t1,t2; --enable_warnings diff --git a/mysql-test/t/csv.test b/mysql-test/t/csv.test index a028f6ced6d..6f0f42f109c 100644 --- a/mysql-test/t/csv.test +++ b/mysql-test/t/csv.test @@ -2,7 +2,7 @@ # Test for the CSV engine # --- source include/have_csv.inc +--source include/have_csv.inc # # Simple select test @@ -1353,6 +1353,40 @@ DROP TABLE bug14672; # End of 4.1 tests +# +# Test CONCURRENT INSERT (5.1) +# + +CREATE TABLE test_concurrent_insert ( val integer ) ENGINE = CSV; + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; +# obtain TL_READ lock on the table +LOCK TABLES test_concurrent_insert READ LOCAL; + +connection con2; +# should pass despite of the lock +INSERT INTO test_concurrent_insert VALUES (1); +SELECT * FROM test_concurrent_insert; + +connection con1; +# first connection should not notice the changes +SELECT * FROM test_concurrent_insert; + +UNLOCK TABLES; + +# Now check that we see our own changes + +LOCK TABLES test_concurrent_insert WRITE; +INSERT INTO test_concurrent_insert VALUES (2); +SELECT * FROM test_concurrent_insert; +UNLOCK TABLES; + +# cleanup +DROP TABLE test_concurrent_insert; + # # BUG#13406 - incorrect amount of "records deleted" # diff --git a/mysql-test/t/delayed.test b/mysql-test/t/delayed.test index ca34cc020f3..5ae757b1fde 100644 --- a/mysql-test/t/delayed.test +++ b/mysql-test/t/delayed.test @@ -3,6 +3,9 @@ # (Can't be tested with purify :( ) # +# This tests not performed with embedded server +-- source include/not_embedded.inc + --disable_warnings drop table if exists t1; --enable_warnings diff --git a/mysql-test/t/disabled.def b/mysql-test/t/disabled.def index b2973e02189..6f1d928d429 100644 --- a/mysql-test/t/disabled.def +++ b/mysql-test/t/disabled.def @@ -19,6 +19,7 @@ innodb_concurrent : Results are not deterministic, Elliot will fix (BUG#3300) subselect : Bug#15706 ps_7ndb : dbug assert in RBR mode when executing test suite rpl_ddl : Bug#15963 SBR does not show "Definer" correctly +partition_03ndb : Bug#16385 events : Affects flush test case. A table lock not released somewhere ndb_binlog_basic : Results are not deterministic, Tomas will fix rpl_ndb_basic : Bug#16228 @@ -26,3 +27,5 @@ rpl_sp : Bug #16456 #ndb_dd_disk2memory : Bug #16466 ndb_autodiscover : Needs to be fixed w.r.t binlog ndb_autodiscover2 : Needs to be fixed w.r.t binlog +system_mysql_db : Needs fixing +system_mysql_db_fix : Needs fixing diff --git a/mysql-test/t/federated.test b/mysql-test/t/federated.test index b6b3b90c083..a31fcf59074 100644 --- a/mysql-test/t/federated.test +++ b/mysql-test/t/federated.test @@ -1,3 +1,5 @@ +# should work with embedded server after mysqltest is fixed +-- source include/not_embedded.inc source include/federated.inc; connection slave; diff --git a/mysql-test/t/federated_archive.test b/mysql-test/t/federated_archive.test index 6d80664fef7..f8df5f1c51a 100644 --- a/mysql-test/t/federated_archive.test +++ b/mysql-test/t/federated_archive.test @@ -1,3 +1,5 @@ +# should work with embedded server after mysqltest is fixed +-- source include/not_embedded.inc source include/have_archive.inc; source include/federated.inc; diff --git a/mysql-test/t/federated_bug_13118.test b/mysql-test/t/federated_bug_13118.test index deec79becd2..e429a660489 100644 --- a/mysql-test/t/federated_bug_13118.test +++ b/mysql-test/t/federated_bug_13118.test @@ -1,3 +1,5 @@ +# should work with embedded server after mysqltest is fixed +-- source include/not_embedded.inc source include/federated.inc; diff --git a/mysql-test/t/federated_transactions.test b/mysql-test/t/federated_transactions.test index 1a5b14ca8b4..5095c8ce9c3 100644 --- a/mysql-test/t/federated_transactions.test +++ b/mysql-test/t/federated_transactions.test @@ -1,3 +1,5 @@ +# should work with embedded server after mysqltest is fixed +-- source include/not_embedded.inc source include/have_bdb.inc; source include/federated.inc; diff --git a/mysql-test/t/flush_table.test b/mysql-test/t/flush_table.test index 0ea0ac0840a..50e7e91419a 100644 --- a/mysql-test/t/flush_table.test +++ b/mysql-test/t/flush_table.test @@ -4,6 +4,8 @@ # Test of flush table # +# Should work in embedded server after mysqltest is fixed +-- source include/not_embedded.inc --disable_warnings drop table if exists t1,t2; --enable_warnings diff --git a/mysql-test/t/handler.test b/mysql-test/t/handler.test index 3fb09df5f2f..a7f1eeaa2cc 100644 --- a/mysql-test/t/handler.test +++ b/mysql-test/t/handler.test @@ -2,6 +2,9 @@ # test of HANDLER ... # +# should work in embedded server after mysqltest is fixed +-- source include/not_embedded.inc + --disable_warnings drop table if exists t1,t3,t4,t5; --enable_warnings diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test index 2f91f27ac79..7f61b4f080a 100644 --- a/mysql-test/t/information_schema.test +++ b/mysql-test/t/information_schema.test @@ -1,6 +1,10 @@ # This test uses grants, which can't get tested for embedded server -- source include/not_embedded.inc +# check that CSV engine was compiled in, as the result of the test +# depends on the presence of the log tables (which are CSV-based). +--source include/have_csv.inc + # Test for information_schema.schemata & # show databases diff --git a/mysql-test/t/init_connect.test b/mysql-test/t/init_connect.test index 2e3c67a7d38..0ee6387d985 100644 --- a/mysql-test/t/init_connect.test +++ b/mysql-test/t/init_connect.test @@ -2,6 +2,8 @@ # Test of init_connect variable # +# should work with embedded server after mysqltest is fixed +-- source include/not_embedded.inc connect (con0,localhost,root,,); connection con0; select hex(@a); diff --git a/mysql-test/t/innodb.test b/mysql-test/t/innodb.test index 71b92c419ca..10cb1dcf08b 100644 --- a/mysql-test/t/innodb.test +++ b/mysql-test/t/innodb.test @@ -1576,33 +1576,7 @@ connection a; checksum table t1; drop table t1; -# -# BUG#11238 - in prelocking mode SELECT .. FOR UPDATE is changed to -# non-blocking SELECT -# -create table t1 (col1 integer primary key, col2 integer) engine=innodb; -insert t1 values (1,100); -delimiter |; -create function f1 () returns integer begin -declare var1 int; -select col2 into var1 from t1 where col1=1 for update; -return var1; -end| -delimiter ;| -start transaction; -select f1(); -connection b; -send update t1 set col2=0 where col1=1; connection default; -select * from t1; -connection a; -rollback; -connection b; -reap; -rollback; -connection default; -drop table t1; -drop function f1; disconnect a; disconnect b; diff --git a/mysql-test/t/innodb_notembedded.test b/mysql-test/t/innodb_notembedded.test new file mode 100644 index 00000000000..1d8a590be78 --- /dev/null +++ b/mysql-test/t/innodb_notembedded.test @@ -0,0 +1,36 @@ +-- source include/not_embedded.inc +-- source include/have_innodb.inc + +connect (a,localhost,root,,); +connect (b,localhost,root,,); + + +# +# BUG#11238 - in prelocking mode SELECT .. FOR UPDATE is changed to +# non-blocking SELECT +# +create table t1 (col1 integer primary key, col2 integer) engine=innodb; +insert t1 values (1,100); +delimiter |; +create function f1 () returns integer begin +declare var1 int; +select col2 into var1 from t1 where col1=1 for update; +return var1; +end| +delimiter ;| +start transaction; +select f1(); +connection b; +send update t1 set col2=0 where col1=1; +connection default; +select * from t1; +connection a; +rollback; +connection b; +reap; +rollback; +connection default; +drop table t1; +drop function f1; +disconnect a; +disconnect b; diff --git a/mysql-test/t/loaddata.test b/mysql-test/t/loaddata.test index 09d97a42714..014d09da746 100644 --- a/mysql-test/t/loaddata.test +++ b/mysql-test/t/loaddata.test @@ -114,3 +114,27 @@ select * from t1; drop table t1, t2; # End of 5.0 tests + + +# +# Bug#12448 LOAD DATA / SELECT INTO OUTFILE +# doesn't work with multibyte path name +# +CREATE TABLE t1 (a int); +INSERT INTO t1 VALUES (1); +SET NAMES latin1; +SET character_set_filesystem=filename; +select @@character_set_filesystem; +SELECT * INTO OUTFILE 't-1' FROM t1; +DELETE FROM t1; +LOAD DATA INFILE 't-1' INTO TABLE t1; +SELECT * FROM t1; +DELETE FROM t1; +SET character_set_filesystem=latin1; +select @@character_set_filesystem; +LOAD DATA INFILE 't@002d1' INTO TABLE t1; +SELECT * FROM t1; +DROP TABLE t1; +--exec rm $MYSQL_TEST_DIR/var/master-data/test/t@002d1 +SET character_set_filesystem=default; +select @@character_set_filesystem; diff --git a/mysql-test/t/log_tables.test b/mysql-test/t/log_tables.test new file mode 100644 index 00000000000..8c53fe8f028 --- /dev/null +++ b/mysql-test/t/log_tables.test @@ -0,0 +1,148 @@ +# this test needs multithreaded mysqltest +-- source include/not_embedded.inc +# +# Basic log tables test +# + +# check that CSV engine was compiled in +--source include/have_csv.inc + +use mysql; + +# +# Check that log tables work and we can do basic selects. This also +# tests truncate, which works in a special mode with the log tables +# + +truncate table general_log; +--replace_column 1 TIMESTAMP +select * from general_log; +truncate table slow_log; +--replace_column 1 TIMESTAMP +select * from slow_log; + +# +# We want to check that a record newly written to a log table shows up for +# the query: since log tables use concurrent insert machinery and log tables +# are always locked by artificial THD, this feature requires additional +# check in ha_tina::write_row. This simple test should prove that the +# log table flag in the table handler is triggered and working. +# + +truncate table general_log; +--replace_column 1 TIMESTAMP +select * from general_log where argument like '%general_log%'; + + +# +# Check some basic queries interfering with the log tables. +# In our test we'll use a tbale with verbose comments to the short +# command type names, used in the tables +# + +create table join_test (verbose_comment varchar (80), command_type varchar(64)); + +insert into join_test values ("User performed a usual SQL query", "Query"); +insert into join_test values ("New DB connection was registered", "Connect"); +insert into join_test values ("Get the table info", "Field List"); + +select verbose_comment, user_host, argument + from mysql.general_log join join_test + on (mysql.general_log.command_type = join_test.command_type); + +drop table join_test; + +# +# check that flush of the log table work fine +# + +flush logs; + +# +# check locking of the log tables +# + +--error 1532 +lock tables mysql.general_log WRITE; + +--error 1532 +lock tables mysql.slow_log WRITE; + +# +# This attemts to get TL_READ_NO_INSERT lock, which is incompatible with +# TL_WRITE_CONCURRENT_INSERT. This should fail. We issue this error as log +# tables are always opened and locked by the logger. +# + +--error 1533 +lock tables mysql.general_log READ; + +--error 1533 +lock tables mysql.slow_log READ; + +# +# This call should result in TL_READ lock on the log table. This is ok and +# should pass. +# + +lock tables mysql.slow_log READ LOCAL, mysql.general_log READ LOCAL; + +unlock tables; + +# +# check that FLUSH LOGS waits for all readers of the log table to vanish +# + +connect (con1,localhost,root,,); +connect (con2,localhost,root,,); + +connection con1; + +lock tables mysql.general_log READ LOCAL; + +connection con2; + +# this should wait for log tables to unlock +send flush logs; + +connection con1; + +unlock tables; + +# this connection should be alive by the time +connection con2; + +reap; + +select "Mark that we woke up from flush logs in the test" + as "test passed"; + +# +# perform the same check for TRUNCATE: it should also wait for readers +# to disappear +# + +connection con1; + +lock tables mysql.general_log READ LOCAL; + +connection con2; + +# this should wait for log tables to unlock +send truncate mysql.general_log; + +connection con1; + +unlock tables; + +# this connection should be alive by the time +connection con2; + +reap; + +select "Mark that we woke up from TRUNCATE in the test" + as "test passed"; + +disconnect con2; +disconnect con1; + diff --git a/mysql-test/t/mysql.test b/mysql-test/t/mysql.test index 0783c043ef6..7871ec3690e 100644 --- a/mysql-test/t/mysql.test +++ b/mysql-test/t/mysql.test @@ -1,3 +1,5 @@ +# This test should work in embedded server after we fix mysqltest +-- source include/not_embedded.inc # # Testing the MySQL command line client(mysql) # diff --git a/mysql-test/t/mysql_client_test.test b/mysql-test/t/mysql_client_test.test index 9cacb008d09..797d667b0a3 100644 --- a/mysql-test/t/mysql_client_test.test +++ b/mysql-test/t/mysql_client_test.test @@ -1,3 +1,6 @@ +# This test should work in embedded server after we fix mysqltest +-- source include/not_embedded.inc + # We run with different binaries for normal and --embedded-server # # If this test fails with "command "$MYSQL_CLIENT_TEST" failed", diff --git a/mysql-test/t/mysqlcheck.test b/mysql-test/t/mysqlcheck.test index bc88be001ab..167ef435bee 100644 --- a/mysql-test/t/mysqlcheck.test +++ b/mysql-test/t/mysqlcheck.test @@ -1,6 +1,10 @@ # Embedded server doesn't support external clients --source include/not_embedded.inc +# check that CSV engine was compiled in, as the result of the test +# depends on the presence of the log tables (which are CSV-based). +--source include/have_csv.inc + # # Bug #13783 mysqlcheck tries to optimize and analyze information_schema # diff --git a/mysql-test/t/mysqltest.test b/mysql-test/t/mysqltest.test index a7da53994f0..55f08c28719 100644 --- a/mysql-test/t/mysqltest.test +++ b/mysql-test/t/mysqltest.test @@ -1,3 +1,5 @@ +# This test should work in embedded server after mysqltest is fixed +-- source include/not_embedded.inc # ============================================================================ # diff --git a/mysql-test/t/ndb_partition_key.test b/mysql-test/t/ndb_partition_key.test index 76c36924618..7f6120fe094 100644 --- a/mysql-test/t/ndb_partition_key.test +++ b/mysql-test/t/ndb_partition_key.test @@ -63,3 +63,19 @@ insert into t1 values (1,"a",1,1),(2,"a",1,1),(3,"a",1,1); show create table t1; DROP TABLE t1; + +# +# Bug #13155: Problem in Create Table using SHOW CREATE TABLE syntax +# +CREATE TABLE t1 (a int not null primary key) +PARTITION BY KEY(a) +(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); + +drop table t1; + +CREATE TABLE t1 (a int not null primary key); +ALTER TABLE t1 +PARTITION BY KEY(a) +(PARTITION p0 ENGINE = NDB, PARTITION p1 ENGINE = NDB); + +drop table t1; diff --git a/mysql-test/t/ndb_restore.test b/mysql-test/t/ndb_restore.test index 6075c7369ad..5616dea04eb 100644 --- a/mysql-test/t/ndb_restore.test +++ b/mysql-test/t/ndb_restore.test @@ -8,6 +8,9 @@ drop table if exists t1,t2,t3,t4,t5,t6,t7,t8,t9; drop table if exists t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; --enable_warnings +--exec $NDB_TOOLS_DIR/ndb_show_tables +--exec $NDB_MGM --no-defaults -e "all dump 1000" >> $NDB_TOOLS_OUTPUT + CREATE TABLE `t1_c` ( `capgoaledatta` smallint(5) unsigned NOT NULL auto_increment, `goaledatta` char(2) NOT NULL default '', @@ -205,6 +208,151 @@ select count(*) from (select * from t9 union select * from t9_c) a; +# +# Try Partitioned tables as well +# +ALTER TABLE t1_c +PARTITION BY RANGE (`capgoaledatta`) +(PARTITION p0 VALUES LESS THAN MAXVALUE); + +ALTER TABLE t2_c +PARTITION BY LIST(`capgotod`) +(PARTITION p0 VALUES IN (0,1,2,3,4,5,6)); + +ALTER TABLE t3_c +PARTITION BY HASH (`CapGoaledatta`); + +ALTER TABLE t5_c +PARTITION BY HASH (`capfa`) +PARTITIONS 4; + +ALTER TABLE t6_c +PARTITION BY LINEAR HASH (`relatta`) +PARTITIONS 4; + +ALTER TABLE t7_c +PARTITION BY LINEAR KEY (`dardtestard`); + +--exec $NDB_MGM --no-defaults -e "start backup" >> $NDB_TOOLS_OUTPUT +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT + +select count(*) from t1; +select count(*) from t1_c; +select count(*) + from (select * from t1 union + select * from t1_c) a; + +select count(*) from t2; +select count(*) from t2_c; +select count(*) + from (select * from t2 union + select * from t2_c) a; + +select count(*) from t3; +select count(*) from t3_c; +select count(*) + from (select * from t3 union + select * from t3_c) a; + +select count(*) from t4; +select count(*) from t4_c; +select count(*) + from (select * from t4 union + select * from t4_c) a; + +select count(*) from t5; +select count(*) from t5_c; +select count(*) + from (select * from t5 union + select * from t5_c) a; + +select count(*) from t6; +select count(*) from t6_c; +select count(*) + from (select * from t6 union + select * from t6_c) a; + +select count(*) from t7; +select count(*) from t7_c; +select count(*) + from (select * from t7 union + select * from t7_c) a; + +select count(*) from t8; +select count(*) from t8_c; +select count(*) + from (select * from t8 union + select * from t8_c) a; + +select count(*) from t9; +select count(*) from t9_c; +select count(*) + from (select * from t9 union + select * from t9_c) a; + +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --ndb-nodegroup_map '(0,0)' --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 2 -r --print --print_meta $NDB_BACKUP_DIR/BACKUP/BACKUP-2 >> $NDB_TOOLS_OUTPUT + +select count(*) from t1; +select count(*) from t1_c; +select count(*) + from (select * from t1 union + select * from t1_c) a; + +select count(*) from t2; +select count(*) from t2_c; +select count(*) + from (select * from t2 union + select * from t2_c) a; + +select count(*) from t3; +select count(*) from t3_c; +select count(*) + from (select * from t3 union + select * from t3_c) a; + +select count(*) from t4; +select count(*) from t4_c; +select count(*) + from (select * from t4 union + select * from t4_c) a; + +select count(*) from t5; +select count(*) from t5_c; +select count(*) + from (select * from t5 union + select * from t5_c) a; + +select count(*) from t6; +select count(*) from t6_c; +select count(*) + from (select * from t6 union + select * from t6_c) a; + +select count(*) from t7; +select count(*) from t7_c; +select count(*) + from (select * from t7 union + select * from t7_c) a; + +select count(*) from t8; +select count(*) from t8_c; +select count(*) + from (select * from t8 union + select * from t8_c) a; + +select count(*) from t9; +select count(*) from t9_c; +select count(*) + from (select * from t9 union + select * from t9_c) a; + +drop table t1_c,t2_c,t3_c,t4_c,t5_c,t6_c,t7_c,t8_c,t9_c; +--exec $NDB_TOOLS_DIR/ndb_restore --no-defaults -b 2 -n 1 -m -r --ndb-nodegroup_map '(0,1)' $NDB_BACKUP_DIR/BACKUP/BACKUP-2 2>&1 | grep Translate || true + # # Cleanup # diff --git a/mysql-test/t/partition.test b/mysql-test/t/partition.test index 8b1c3f58071..deb95b7fb5c 100644 --- a/mysql-test/t/partition.test +++ b/mysql-test/t/partition.test @@ -8,6 +8,7 @@ --disable_warnings drop table if exists t1; --enable_warnings + # # Partition by key no partition defined => OK # @@ -97,6 +98,9 @@ partitions 3 partition x2 tablespace ts2, partition x3 tablespace ts3); +CREATE TABLE t2 LIKE t1; + +drop table t2; drop table t1; # @@ -162,6 +166,141 @@ UNLOCK TABLES; drop table t1; +# +# Bug #13644 DROP PARTITION NULL's DATE column +# +CREATE TABLE t1 (a int, name VARCHAR(50), purchased DATE) +PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (3), + PARTITION p1 VALUES LESS THAN (7), + PARTITION p2 VALUES LESS THAN (9), + PARTITION p3 VALUES LESS THAN (11)); +INSERT INTO t1 VALUES +(1, 'desk organiser', '2003-10-15'), +(2, 'CD player', '1993-11-05'), +(3, 'TV set', '1996-03-10'), +(4, 'bookcase', '1982-01-10'), +(5, 'exercise bike', '2004-05-09'), +(6, 'sofa', '1987-06-05'), +(7, 'popcorn maker', '2001-11-22'), +(8, 'acquarium', '1992-08-04'), +(9, 'study desk', '1984-09-16'), +(10, 'lava lamp', '1998-12-25'); + +SELECT * from t1 ORDER BY a; +ALTER TABLE t1 DROP PARTITION p0; +SELECT * from t1 ORDER BY a; + +drop table t1; + +# +# Bug #13442; Truncate Partitioned table doesn't work +# + +CREATE TABLE t1 (a int) +PARTITION BY LIST (a) +(PARTITION p0 VALUES IN (1,2,3), PARTITION p1 VALUES IN (4,5,6)); + +insert into t1 values (1),(2),(3),(4),(5),(6); +select * from t1; +truncate t1; +select * from t1; +truncate t1; +select * from t1; +drop table t1; + +# +# Bug #13445 Partition by KEY method crashes server +# +CREATE TABLE t1 (a int, b int, primary key(a,b)) +PARTITION BY KEY(b,a) PARTITIONS 4; + +insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6); +select * from t1 where a = 4; + +drop table t1; + +# +# Bug #13438: Engine clause in PARTITION clause causes crash +# +CREATE TABLE t1 (a int) +PARTITION BY LIST (a) +PARTITIONS 1 +(PARTITION x1 VALUES IN (1) ENGINE=MEMORY); + +show create table t1; +drop table t1; + +# +# Bug #13440: REPLACE causes crash in partitioned table +# +CREATE TABLE t1 (a int, unique(a)) +PARTITION BY LIST (a) +(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20)); + +--error ER_NO_PARTITION_FOR_GIVEN_VALUE +REPLACE t1 SET a = 4; +drop table t1; + +# +# Bug #14365: Crash if value too small in list partitioned table +# +CREATE TABLE t1 (a int) +PARTITION BY LIST (a) +(PARTITION x1 VALUES IN (2), PARTITION x2 VALUES IN (3)); + +insert into t1 values (2), (3); +--error ER_NO_PARTITION_FOR_GIVEN_VALUE +insert into t1 values (4); +--error ER_NO_PARTITION_FOR_GIVEN_VALUE +insert into t1 values (1); +drop table t1; + +# +# Bug 14327: PARTITIONS clause gets lost in SHOW CREATE TABLE +# +CREATE TABLE t1 (a int) +PARTITION BY HASH(a) +PARTITIONS 5; + +SHOW CREATE TABLE t1; + +drop table t1; + +# +# Bug #13446: Update to value outside of list values doesn't give error +# +CREATE TABLE t1 (a int) +PARTITION BY RANGE (a) +(PARTITION x1 VALUES LESS THAN (2)); + +insert into t1 values (1); +--error ER_NO_PARTITION_FOR_GIVEN_VALUE +update t1 set a = 5; + +drop table t1; + +# +# Bug #13441: Analyze on partitioned table didn't work +# +CREATE TABLE t1 (a int) +PARTITION BY LIST (a) +(PARTITION x1 VALUES IN (10), PARTITION x2 VALUES IN (20)); + +analyze table t1; + +drop table t1; + +# +# BUG 14524 +# +CREATE TABLE `t1` ( + `id` int(11) default NULL +) ENGINE=BLACKHOLE DEFAULT CHARSET=latin1 PARTITION BY HASH (id) ; +SELECT * FROM t1; + +drop table t1; + # # BUG 14524 # @@ -180,9 +319,9 @@ create table t1 partition by range (a) ( partition p0 values less than(10), partition p1 values less than (20), - partition p2 values less than maxvalue); + partition p2 values less than (25)); -alter table t1 reorganise partition p2 into (partition p2 values less than (30)); +alter table t1 reorganize partition p2 into (partition p2 values less than (30)); show create table t1; drop table t1; @@ -199,7 +338,8 @@ PARTITION BY RANGE (a) PARTITION x8 VALUES LESS THAN (18), PARTITION x9 VALUES LESS THAN (20)); -ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO (PARTITION x1 VALUES LESS THAN (6)); show create table t1; drop table t1; + diff --git a/mysql-test/t/partition_02myisam.test b/mysql-test/t/partition_02myisam.test new file mode 100644 index 00000000000..107d0b89cea --- /dev/null +++ b/mysql-test/t/partition_02myisam.test @@ -0,0 +1,25 @@ +############################################### +# # +# Partition tests MyISAM tables # +# # +############################################### + +# +# NOTE: PLEASE DO NOT ADD NOT MYISAM SPECIFIC TESTCASES HERE ! +# NON STORAGE SPECIFIC TESTCASES SHOULD BE ADDED IN +# THE SOURCED FIELS ONLY. +# + +# Storage engine to be tested +let $engine= 'MYISAM'; +eval SET SESSION storage_engine=$engine; + + +# Other storage engine <> storage engine to be tested +let $engine_other= 'MEMORY'; +# number of rows for the INSERT/UPDATE/DELETE/SELECT experiments +# on partioned tables +# Attention: In the moment the result files fit to @max_row = 200 only +SET @max_row = 200; + +-- source include/partition_1.inc diff --git a/mysql-test/t/partition_03ndb.test b/mysql-test/t/partition_03ndb.test new file mode 100644 index 00000000000..3190ab9dfc7 --- /dev/null +++ b/mysql-test/t/partition_03ndb.test @@ -0,0 +1,26 @@ +############################################### +# # +# Partition tests NDB tables # +# # +############################################### + +# +# NOTE: PLEASE DO NOT ADD NOT NDB SPECIFIC TESTCASES HERE ! +# NON STORAGE SPECIFIC TESTCASES SHOULD BE ADDED IN +# THE SOURCED FIELS ONLY. +# + +# Storage engine to be tested +let $engine= 'NDB' ; +-- source include/have_ndb.inc +eval SET SESSION storage_engine=$engine; + + +# Other storage engine <> storage engine to be tested +let $engine_other= 'MEMORY'; +# number of rows for the INSERT/UPDATE/DELETE/SELECT experiments +# on partioned tables +# Attention: In the moment the result files fit to @max_row = 200 only +SET @max_row = 200; + +-- source include/partition_1.inc diff --git a/mysql-test/t/partition_error.test b/mysql-test/t/partition_error.test index ea12bbc5207..03a2ab41807 100644 --- a/mysql-test/t/partition_error.test +++ b/mysql-test/t/partition_error.test @@ -4,6 +4,10 @@ # -- source include/have_partition.inc +--disable_warnings +drop table if exists t1; +--enable_warnings + # # Partition by key stand-alone error # @@ -727,6 +731,14 @@ partitions 2 (partition x1 values in 4, partition x2 values in (5)); +# +# Bug #13439: Crash when LESS THAN (non-literal) +# +--error 1054 +CREATE TABLE t1 (a int) +PARTITION BY RANGE (a) +(PARTITION p0 VALUES LESS THAN (x1)); + # # No partition for the given value # diff --git a/mysql-test/t/partition_mgm_err.test b/mysql-test/t/partition_mgm_err.test index 92848fc135e..c12f1c05c05 100644 --- a/mysql-test/t/partition_mgm_err.test +++ b/mysql-test/t/partition_mgm_err.test @@ -4,6 +4,10 @@ # -- source include/have_partition.inc +--disable_warnings +drop table if exists t1; +--enable_warnings + # # Try faulty DROP PARTITION and COALESCE PARTITION # @@ -21,7 +25,7 @@ PARTITION BY RANGE (a) PARTITION x9 VALUES LESS THAN (20)); --error ER_REORG_OUTSIDE_RANGE -ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO (PARTITION x01 VALUES LESS THAN (2), PARTITION x11 VALUES LESS THAN (5)); @@ -38,30 +42,35 @@ ALTER TABLE t1 DROP PARTITION x10, x1, x2, x1; ALTER TABLE t1 DROP PARTITION x10, x1, x2, x3; --error ER_REORG_PARTITION_NOT_EXIST -ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2,x3,x4,x5,x6,x7,x8,x9,x10 INTO (PARTITION x11 VALUES LESS THAN (22)); --error ER_SAME_NAME_PARTITION -ALTER TABLE t1 REORGANISE PARTITION x0,x1,x2 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0,x1,x2 INTO (PARTITION x3 VALUES LESS THAN (6)); --error ER_CONSECUTIVE_REORG_PARTITIONS -ALTER TABLE t1 REORGANISE PARTITION x0, x2 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0, x2 INTO (PARTITION x11 VALUES LESS THAN (2)); --error ER_DROP_PARTITION_NON_EXISTENT -ALTER TABLE t1 REORGANISE PARTITION x0, x1, x1 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0, x1, x1 INTO (PARTITION x11 VALUES LESS THAN (4)); --error ER_REORG_OUTSIDE_RANGE -ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO +ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO (PARTITION x01 VALUES LESS THAN (5)); ---error ER_RANGE_NOT_INCREASING_ERROR -ALTER TABLE t1 REORGANISE PARTITION x0,x1 INTO +--error ER_REORG_OUTSIDE_RANGE +ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO (PARTITION x01 VALUES LESS THAN (4), PARTITION x11 VALUES LESS THAN (2)); +--error ER_RANGE_NOT_INCREASING_ERROR +ALTER TABLE t1 REORGANIZE PARTITION x0,x1 INTO +(PARTITION x01 VALUES LESS THAN (6), + PARTITION x11 VALUES LESS THAN (4)); + DROP TABLE t1; CREATE TABLE t1 (a int) diff --git a/mysql-test/t/query_cache.test b/mysql-test/t/query_cache.test index 9b4a64872d9..a99d802e5a5 100644 --- a/mysql-test/t/query_cache.test +++ b/mysql-test/t/query_cache.test @@ -822,29 +822,14 @@ select sql_cache * from t1 where s1=1; end;// delimiter ;// call f1(); -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; call f1(); -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; call f1(); select sql_cache * from t1; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; insert into t1 values (1); select sql_cache * from t1; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; call f1(); call f1(); select sql_cache * from t1; -show status like "Qcache_queries_in_cache"; -show status like "Qcache_inserts"; -show status like "Qcache_hits"; flush query cache; reset query cache; flush status; diff --git a/mysql-test/t/query_cache_notembedded.test b/mysql-test/t/query_cache_notembedded.test index fd4785ffe95..5e1ab7051e5 100644 --- a/mysql-test/t/query_cache_notembedded.test +++ b/mysql-test/t/query_cache_notembedded.test @@ -97,4 +97,88 @@ connection root; SELECT * FROM t1; drop table t1; +# +# query in QC from normal execution and SP (BUG#6897) +# improved to also test BUG#3583 and BUG#12990 +# +flush query cache; +reset query cache; +flush status; +delimiter //; +create table t1 (s1 int)// +create procedure f1 () begin +select sql_cache * from t1; +select sql_cache * from t1; +select sql_cache * from t1; +end;// +create procedure f2 () begin +select sql_cache * from t1 where s1=1; +select sql_cache * from t1; +end;// +create procedure f3 () begin +select sql_cache * from t1; +select sql_cache * from t1 where s1=1; +end;// +create procedure f4 () begin +select sql_cache * from t1; +select sql_cache * from t1 where s1=1; +select sql_cache * from t1; +select sql_cache * from t1 where s1=1; +select sql_cache * from t1 where s1=1; +end;// +delimiter ;// +call f1(); +--replace_result 1 3 +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +call f1(); +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +call f1(); +select sql_cache * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +insert into t1 values (1); +select sql_cache * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +call f1(); +call f1(); +select sql_cache * from t1; +show status like "Qcache_queries_in_cache"; +show status like "Qcache_inserts"; +show status like "Qcache_hits"; +flush query cache; +reset query cache; +flush status; +select sql_cache * from t1; +select sql_cache * from t1 where s1=1; +call f1(); +call f2(); +call f3(); +call f4(); +call f4(); +call f3(); +call f2(); +select sql_cache * from t1 where s1=1; +insert into t1 values (2); +call f1(); +select sql_cache * from t1 where s1=1; +select sql_cache * from t1; +call f1(); +call f3(); +call f3(); +call f1(); + +drop procedure f1; +drop procedure f2; +drop procedure f3; +drop procedure f4; +drop table t1; + + set GLOBAL query_cache_size=0; diff --git a/mysql-test/t/read_only.test b/mysql-test/t/read_only.test index 0861951a6a1..1e92e438122 100644 --- a/mysql-test/t/read_only.test +++ b/mysql-test/t/read_only.test @@ -1,6 +1,9 @@ # Test of the READ_ONLY global variable: # check that it blocks updates unless they are only on temporary tables. +# should work with embedded server after mysqltest is fixed +-- source include/not_embedded.inc + --disable_warnings DROP TABLE IF EXISTS t1,t2,t3; --enable_warnings diff --git a/mysql-test/t/rpl_ndb_bank.test b/mysql-test/t/rpl_ndb_bank.test index c79c85558fe..1b900236963 100644 --- a/mysql-test/t/rpl_ndb_bank.test +++ b/mysql-test/t/rpl_ndb_bank.test @@ -114,10 +114,10 @@ CREATE DATABASE BANK; # start by taking a backup on master --connection master RESET MASTER; ---exec $NDB_MGM --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -e "start backup" >> $NDB_TOOLS_OUTPUT +--exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "start backup" >> $NDB_TOOLS_OUTPUT # there is no neat way to find the backupid, this is a hack to find it... ---exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring=localhost:$NDBCLUSTER_PORT -d sys -D , SYSTAB_0 | grep 520093696 > var/tmp.dat +--exec $NDB_TOOLS_DIR/ndb_select_all --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > var/tmp.dat CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT) ENGINE = HEAP; DELETE FROM cluster_replication.backup_info; LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ','; diff --git a/mysql-test/t/rpl_ndb_sync.test b/mysql-test/t/rpl_ndb_sync.test index b449735e1be..2e1e96d87de 100644 --- a/mysql-test/t/rpl_ndb_sync.test +++ b/mysql-test/t/rpl_ndb_sync.test @@ -25,8 +25,8 @@ SELECT hex(c1),hex(c2),c3 FROM t1 ORDER BY c3; SELECT hex(c2),hex(c3),c1 FROM t2 ORDER BY c1; # take a backup on master ---exec $NDB_MGM --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -e "start backup" >> $NDB_TOOLS_OUTPUT ---exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults --ndb-connectstring=localhost:$NDBCLUSTER_PORT -d sys -D , SYSTAB_0 | grep 520093696 > var/tmp.dat +--exec $NDB_MGM --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -e "start backup" >> $NDB_TOOLS_OUTPUT +--exec $NDB_TOOLS_DIR/ndb_select_all --no-defaults --ndb-connectstring="localhost:$NDBCLUSTER_PORT" -d sys --delimiter=',' SYSTAB_0 | grep 520093696 > var/tmp.dat CREATE TABLE IF NOT EXISTS cluster_replication.backup_info (id INT, backup_id INT); DELETE FROM cluster_replication.backup_info; LOAD DATA INFILE '../../var/tmp.dat' INTO TABLE cluster_replication.backup_info FIELDS TERMINATED BY ','; diff --git a/mysql-test/t/show_check.test b/mysql-test/t/show_check.test index 32cd9d7db39..7ffafe1374a 100644 --- a/mysql-test/t/show_check.test +++ b/mysql-test/t/show_check.test @@ -2,6 +2,10 @@ # embedded server testing -- source include/not_embedded.inc +# check that CSV engine was compiled in, as the result of the test +# depends on the presence of the log tables (which are CSV-based). +--source include/have_csv.inc + # # Test of some show commands # diff --git a/mysql-test/t/skip_grants.test b/mysql-test/t/skip_grants.test index 16b0fbc4d25..a0164cb0759 100644 --- a/mysql-test/t/skip_grants.test +++ b/mysql-test/t/skip_grants.test @@ -1,3 +1,6 @@ +# This tests not performed with embedded server +-- source include/not_embedded.inc + --disable_warnings drop table if exists t1,v1; drop view if exists t1,v1; diff --git a/mysql-test/t/sp-destruct.test b/mysql-test/t/sp-destruct.test index a2a66090866..bb61f3383bc 100644 --- a/mysql-test/t/sp-destruct.test +++ b/mysql-test/t/sp-destruct.test @@ -7,6 +7,11 @@ # In the case of trouble you might want to skip this. # +# embedded server returns different paths in error messages +# in lines like 'call bug14233();' +# mysqltest should be fixed to allow REPLACE_RESULT in error message +-- source include/not_embedded.inc + # We're using --system things that probably doesn't work on Windows. --source include/not_windows.inc @@ -35,10 +40,13 @@ create trigger t1_ai after insert on t1 for each row call bug14233(); # Unsupported tampering with the mysql.proc definition alter table mysql.proc drop type; +--replace_result $MYSQL_TEST_DIR . --error ER_SP_PROC_TABLE_CORRUPT call bug14233(); +--replace_result $MYSQL_TEST_DIR . --error ER_SP_PROC_TABLE_CORRUPT create view v1 as select bug14233_f(); +--replace_result $MYSQL_TEST_DIR . --error ER_SP_PROC_TABLE_CORRUPT insert into t1 values (0); diff --git a/mysql-test/t/sp-error.test b/mysql-test/t/sp-error.test index cf8f8dfc79c..cabd00fe5f9 100644 --- a/mysql-test/t/sp-error.test +++ b/mysql-test/t/sp-error.test @@ -647,28 +647,6 @@ create table t5 (x int)| call bug3294()| drop procedure bug3294| -# -# BUG#6807: Stored procedure crash if CREATE PROCEDURE ... KILL QUERY -# ---disable_warnings -drop procedure if exists bug6807| ---enable_warnings -create procedure bug6807() -begin - declare id int; - - set id = connection_id(); - kill query id; - select 'Not reached'; -end| - ---error 1317 -call bug6807()| ---error 1317 -call bug6807()| - -drop procedure bug6807| - # # BUG#876: Stored Procedures: Invalid SQLSTATE is allowed in # a DECLARE ? HANDLER FOR stmt. diff --git a/mysql-test/t/sp-threads.test b/mysql-test/t/sp-threads.test index 70c1efb1f0b..d8a8ce5dae7 100644 --- a/mysql-test/t/sp-threads.test +++ b/mysql-test/t/sp-threads.test @@ -1,3 +1,5 @@ +# This test should work in embedded server after mysqltest is fixed +-- source include/not_embedded.inc # # Testing stored procedures with multiple connections, # except security/privilege tests, they go to sp-security.test diff --git a/mysql-test/t/sp.test b/mysql-test/t/sp.test index add7cf22a1e..62a90f36398 100644 --- a/mysql-test/t/sp.test +++ b/mysql-test/t/sp.test @@ -1444,11 +1444,11 @@ end| call ifac(20)| select * from fac| drop table fac| ---replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' +--replace_column 4 'root@localhost' 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' show function status like '%f%'| drop procedure ifac| drop function fac| ---replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' +--replace_column 4 'root@localhost' 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' show function status like '%f%'| @@ -1531,7 +1531,7 @@ begin end while; end| show create procedure opp| ---replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' +--replace_column 4 'root@localhost' 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' show procedure status like '%p%'| # This isn't the fastest way in the world to compute prime numbers, so @@ -1549,7 +1549,7 @@ select * from primes where i=45 or i=100 or i=199| drop table primes| drop procedure opp| drop procedure ip| ---replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' +--replace_column 4 'root@localhost' 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' show procedure status like '%p%'| @@ -1617,13 +1617,13 @@ drop procedure if exists bar| create procedure bar(x char(16), y int) comment "111111111111" sql security invoker insert into test.t1 values (x, y)| ---replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' +--replace_column 4 'root@localhost' 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' show procedure status like 'bar'| alter procedure bar comment "2222222222" sql security definer| alter procedure bar comment "3333333333"| alter procedure bar| show create procedure bar| ---replace_column 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' +--replace_column 4 'root@localhost' 5 '0000-00-00 00:00:00' 6 '0000-00-00 00:00:00' show procedure status like 'bar'| drop procedure bar| @@ -2573,7 +2573,6 @@ begin show databases like 'foo'; show errors; show columns from t1; - show grants for 'root'@'localhost'; show keys from t1; show open tables like 'foo'; show privileges; @@ -2595,20 +2594,6 @@ call bug4902()| drop procedure bug4902| -# We need separate SP for SHOW PROCESSLIST since we want use replace_column ---disable_warnings -drop procedure if exists bug4902_2| ---enable_warnings -create procedure bug4902_2() -begin - show processlist; -end| ---replace_column 1 # 6 # 3 localhost -call bug4902_2()| ---replace_column 1 # 6 # 3 localhost -call bug4902_2()| -drop procedure bug4902_2| - # # BUG#4904 # @@ -2823,44 +2808,6 @@ select @x| delete from t1| drop procedure bug4941| - -# -# BUG#3583: query cache doesn't work for stored procedures -# ---disable_warnings -drop procedure if exists bug3583| ---enable_warnings ---disable_warnings -drop procedure if exists bug3583| ---enable_warnings -create procedure bug3583() -begin - declare c int; - - select * from t1; - select count(*) into c from t1; - select c; -end| - -insert into t1 values ("x", 3), ("y", 5)| -set @x = @@query_cache_size| -set global query_cache_size = 10*1024*1024| - -flush status| -flush query cache| -show status like 'Qcache_hits'| -call bug3583()| -show status like 'Qcache_hits'| -call bug3583()| -call bug3583()| -show status like 'Qcache_hits'| - -set global query_cache_size = @x| -flush status| -flush query cache| -delete from t1| -drop procedure bug3583| - # # BUG#4905: Stored procedure doesn't clear for "Rows affected" # @@ -3169,24 +3116,6 @@ insert into t1 values ("answer", 42)| select id, bug5240() from t1| drop function bug5240| -# -# BUG#5278: Stored procedure packets out of order if SET PASSWORD. -# ---disable_warnings -drop function if exists bug5278| ---enable_warnings -create function bug5278 () returns char -begin - SET PASSWORD FOR 'bob'@'%.loc.gov' = PASSWORD('newpass'); - return 'okay'; -end| - ---error 1133 -select bug5278()| ---error 1133 -select bug5278()| -drop function bug5278| - # # BUG#7992: rolling back temporary Item tree changes in SP # @@ -4809,24 +4738,6 @@ select bug10100f(5)| call bug10100t(5)| #end of the stack checking -set @@max_sp_recursion_depth=255| -set @var=1| -#disable log because error about stack overrun contains numbers which -#depend on a system --- disable_result_log --- error ER_STACK_OVERRUN_NEED_MORE -call bug10100p(255, @var)| --- error ER_STACK_OVERRUN_NEED_MORE -call bug10100pt(1,255)| --- error ER_STACK_OVERRUN_NEED_MORE -call bug10100pv(1,255)| --- error ER_STACK_OVERRUN_NEED_MORE -call bug10100pd(1,255)| --- error ER_STACK_OVERRUN_NEED_MORE -call bug10100pc(1,255)| --- enable_result_log -set @@max_sp_recursion_depth=0| - deallocate prepare stmt2| drop function bug10100f| diff --git a/mysql-test/t/sp_notembedded.test b/mysql-test/t/sp_notembedded.test new file mode 100644 index 00000000000..f7ba9803fda --- /dev/null +++ b/mysql-test/t/sp_notembedded.test @@ -0,0 +1,261 @@ +# Can't test with embedded server +-- source include/not_embedded.inc + +delimiter |; + +# +# BUG#4902: Stored procedure with SHOW WARNINGS leads to packet error +# +# Added tests for show grants command +--disable_warnings +drop procedure if exists bug4902| +--enable_warnings +create procedure bug4902() +begin + show grants for 'root'@'localhost'; +end| +--disable_parsing +show binlog events; +show storage engines; +show master status; +show slave hosts; +show slave status; +--enable_parsing + +call bug4902()| +call bug4902()| + +drop procedure bug4902| + +# We need separate SP for SHOW PROCESSLIST since we want use replace_column +--disable_warnings +drop procedure if exists bug4902_2| +--enable_warnings +create procedure bug4902_2() +begin + show processlist; +end| +--replace_column 1 # 6 # 3 localhost +call bug4902_2()| +--replace_column 1 # 6 # 3 localhost +call bug4902_2()| +drop procedure bug4902_2| + + +# +# BUG#5278: Stored procedure packets out of order if SET PASSWORD. +# +--disable_warnings +drop function if exists bug5278| +--enable_warnings +create function bug5278 () returns char +begin + SET PASSWORD FOR 'bob'@'%.loc.gov' = PASSWORD('newpass'); + return 'okay'; +end| + +--error 1133 +select bug5278()| +--error 1133 +select bug5278()| +drop function bug5278| + + +--disable_warnings +drop table if exists t1| +--enable_warnings +create table t1 ( + id char(16) not null default '', + data int not null +)| +# +# BUG#3583: query cache doesn't work for stored procedures +# +--disable_warnings +drop procedure if exists bug3583| +--enable_warnings +--disable_warnings +drop procedure if exists bug3583| +--enable_warnings +create procedure bug3583() +begin + declare c int; + + select * from t1; + select count(*) into c from t1; + select c; +end| + +insert into t1 values ("x", 3), ("y", 5)| +set @x = @@query_cache_size| +set global query_cache_size = 10*1024*1024| + +flush status| +flush query cache| +show status like 'Qcache_hits'| +call bug3583()| +show status like 'Qcache_hits'| +call bug3583()| +call bug3583()| +show status like 'Qcache_hits'| + +set global query_cache_size = @x| +flush status| +flush query cache| +delete from t1| +drop procedure bug3583| +drop table t1; + +# +# BUG#6807: Stored procedure crash if CREATE PROCEDURE ... KILL QUERY +# +--disable_warnings +drop procedure if exists bug6807| +--enable_warnings +create procedure bug6807() +begin + declare id int; + + set id = connection_id(); + kill query id; + select 'Not reached'; +end| + +--error 1317 +call bug6807()| +--error 1317 +call bug6807()| + +drop procedure bug6807| + +# +# BUG#10100: function (and stored procedure?) recursivity problem +# +--disable_warnings +drop function if exists bug10100f| +drop procedure if exists bug10100p| +drop procedure if exists bug10100t| +drop procedure if exists bug10100pt| +drop procedure if exists bug10100pv| +drop procedure if exists bug10100pd| +drop procedure if exists bug10100pc| +--enable_warnings +# routines with simple recursion +create function bug10100f(prm int) returns int +begin + if prm > 1 then + return prm * bug10100f(prm - 1); + end if; + return 1; +end| +create procedure bug10100p(prm int, inout res int) +begin + set res = res * prm; + if prm > 1 then + call bug10100p(prm - 1, res); + end if; +end| +create procedure bug10100t(prm int) +begin + declare res int; + set res = 1; + call bug10100p(prm, res); + select res; +end| + +# a procedure which use tables and recursion +create table t3 (a int)| +insert into t3 values (0)| +create view v1 as select a from t3; +create procedure bug10100pt(level int, lim int) +begin + if level < lim then + update t3 set a=level; + FLUSH TABLES; + call bug10100pt(level+1, lim); + else + select * from t3; + end if; +end| +# view & recursion +create procedure bug10100pv(level int, lim int) +begin + if level < lim then + update v1 set a=level; + FLUSH TABLES; + call bug10100pv(level+1, lim); + else + select * from v1; + end if; +end| +# dynamic sql & recursion +prepare stmt2 from "select * from t3;"; +create procedure bug10100pd(level int, lim int) +begin + if level < lim then + select level; + prepare stmt1 from "update t3 set a=a+2"; + execute stmt1; + FLUSH TABLES; + execute stmt1; + FLUSH TABLES; + execute stmt1; + FLUSH TABLES; + deallocate prepare stmt1; + execute stmt2; + select * from t3; + call bug10100pd(level+1, lim); + else + execute stmt2; + end if; +end| +# cursor & recursion +create procedure bug10100pc(level int, lim int) +begin + declare lv int; + declare c cursor for select a from t3; + open c; + if level < lim then + select level; + fetch c into lv; + select lv; + update t3 set a=level+lv; + FLUSH TABLES; + call bug10100pc(level+1, lim); + else + select * from t3; + end if; + close c; +end| + +#end of the stack checking +set @@max_sp_recursion_depth=255| +set @var=1| +#disable log because error about stack overrun contains numbers which +#depend on a system +-- disable_result_log +-- error ER_STACK_OVERRUN_NEED_MORE +call bug10100p(255, @var)| +-- error ER_STACK_OVERRUN_NEED_MORE +call bug10100pt(1,255)| +-- error ER_STACK_OVERRUN_NEED_MORE +call bug10100pv(1,255)| +-- error ER_STACK_OVERRUN_NEED_MORE +call bug10100pd(1,255)| +-- error ER_STACK_OVERRUN_NEED_MORE +call bug10100pc(1,255)| +-- enable_result_log +set @@max_sp_recursion_depth=0| + +deallocate prepare stmt2| + +drop function bug10100f| +drop procedure bug10100p| +drop procedure bug10100t| +drop procedure bug10100pt| +drop procedure bug10100pv| +drop procedure bug10100pd| +drop procedure bug10100pc| +drop view v1| + +delimiter ;| diff --git a/mysql-test/t/subselect_notembedded.test b/mysql-test/t/subselect_notembedded.test new file mode 100644 index 00000000000..c5b23f6dac8 --- /dev/null +++ b/mysql-test/t/subselect_notembedded.test @@ -0,0 +1,8 @@ +-- source include/not_embedded.inc + +# +# BUG #10308: purge log with subselect +# + +purge master logs before (select adddate(current_timestamp(), interval -4 day)); + diff --git a/mysql-test/t/system_mysql_db.test b/mysql-test/t/system_mysql_db.test index 27c17da2731..e3d58ab7139 100644 --- a/mysql-test/t/system_mysql_db.test +++ b/mysql-test/t/system_mysql_db.test @@ -2,6 +2,10 @@ # This test must examine integrity of system database "mysql" # +# check that CSV engine was compiled in, as the result of the test +# depends on the presence of the log tables (which are CSV-based). +--source include/have_csv.inc + # First delete some tables maybe left over from previous tests --disable_warnings drop table if exists t1,t1aa,t2aa; diff --git a/mysql-test/t/system_mysql_db_fix.test b/mysql-test/t/system_mysql_db_fix.test index 7974b2fd62d..11ed48011d7 100644 --- a/mysql-test/t/system_mysql_db_fix.test +++ b/mysql-test/t/system_mysql_db_fix.test @@ -1,6 +1,10 @@ # Embedded server doesn't support external clients --source include/not_embedded.inc +# check that CSV engine was compiled in, as the test relies on the presence +# of the log tables (which are CSV-based) +--source include/have_csv.inc + # # This is the test for mysql_fix_privilege_tables # @@ -85,7 +89,10 @@ INSERT INTO user VALUES ('localhost','', '','N','N','N','N','N','N','N','N',' -- disable_query_log -DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, procs_priv, help_category, help_keyword, help_relation, help_topic, proc, time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, time_zone_transition_type, event; +DROP TABLE db, host, user, func, plugin, tables_priv, columns_priv, +procs_priv, help_category, help_keyword, help_relation, help_topic, proc, +time_zone, time_zone_leap_second, time_zone_name, time_zone_transition, +time_zone_transition_type, general_log, slow_log, event; -- enable_query_log diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index 3fed763e3af..33741b9bf46 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -2079,15 +2079,6 @@ order by users_names; drop view v1, v2; drop table t1, t2; -# -# DEFINER information check -# --- error ER_MALFORMED_DEFINER -create definer=some_user@`` sql security invoker view v1 as select 1; -create definer=some_user@localhost sql security invoker view v1 as select 1; -show create view v1; -drop view v1; - # # Bug #6808 - Views: CREATE VIEW v ... FROM t AS v fails # diff --git a/mysql-test/t/view_grant.test b/mysql-test/t/view_grant.test index 559b5247406..26a4a7e1ec5 100644 --- a/mysql-test/t/view_grant.test +++ b/mysql-test/t/view_grant.test @@ -706,3 +706,12 @@ drop database mysqltest; disconnect user1; disconnect root; connection default; + +# +# DEFINER information check +# +-- error ER_MALFORMED_DEFINER +create definer=some_user@`` sql security invoker view v1 as select 1; +create definer=some_user@localhost sql security invoker view v1 as select 1; +show create view v1; +drop view v1; diff --git a/mysql-test/t/wait_timeout.test b/mysql-test/t/wait_timeout.test index 26f91569868..4c1aeee5c04 100644 --- a/mysql-test/t/wait_timeout.test +++ b/mysql-test/t/wait_timeout.test @@ -1,3 +1,6 @@ +# This tests not performed with embedded server +-- source include/not_embedded.inc + # # Bug #8731: wait_timeout does not work on Mac OS X # diff --git a/mysys/charset-def.c b/mysys/charset-def.c index 0559fe59d06..835ce064b2c 100644 --- a/mysys/charset-def.c +++ b/mysys/charset-def.c @@ -78,6 +78,7 @@ my_bool init_compiled_charsets(myf flags __attribute__((unused))) CHARSET_INFO *cs; add_compiled_collation(&my_charset_bin); + add_compiled_collation(&my_charset_filename); add_compiled_collation(&my_charset_latin1); add_compiled_collation(&my_charset_latin1_bin); diff --git a/mysys/my_compress.c b/mysys/my_compress.c index 0e37d2fef9b..2643d4d16ac 100644 --- a/mysys/my_compress.c +++ b/mysys/my_compress.c @@ -95,4 +95,132 @@ my_bool my_uncompress (byte *packet, ulong *len, ulong *complen) } DBUG_RETURN(0); } + +/* + Internal representation of the frm blob +*/ + +struct frm_blob_header +{ + uint ver; /* Version of header */ + uint orglen; /* Original length of compressed data */ + uint complen; /* Compressed length of data, 0=uncompressed */ +}; + +struct frm_blob_struct +{ + struct frm_blob_header head; + char data[1]; +}; + +/* + packfrm is a method used to compress the frm file for storage in a + handler. This method was developed for the NDB handler and has been moved + here to serve also other uses. + + SYNOPSIS + packfrm() + data Data reference to frm file data + len Length of frm file data + out:pack_data Reference to the pointer to the packed frm data + out:pack_len Length of packed frm file data + + RETURN VALUES + 0 Success + >0 Failure +*/ + +int packfrm(const void *data, uint len, + const void **pack_data, uint *pack_len) +{ + int error; + ulong org_len, comp_len; + uint blob_len; + struct frm_blob_struct *blob; + DBUG_ENTER("packfrm"); + DBUG_PRINT("enter", ("data: %x, len: %d", data, len)); + + error= 1; + org_len= len; + if (my_compress((byte*)data, &org_len, &comp_len)) + goto err; + + DBUG_PRINT("info", ("org_len: %d, comp_len: %d", org_len, comp_len)); + DBUG_DUMP("compressed", (char*)data, org_len); + + error= 2; + blob_len= sizeof(struct frm_blob_header)+org_len; + if (!(blob= (struct frm_blob_struct*) my_malloc(blob_len,MYF(MY_WME)))) + goto err; + + /* Store compressed blob in machine independent format */ + int4store((char*)(&blob->head.ver), 1); + int4store((char*)(&blob->head.orglen), comp_len); + int4store((char*)(&blob->head.complen), org_len); + + /* Copy frm data into blob, already in machine independent format */ + memcpy(blob->data, data, org_len); + + *pack_data= blob; + *pack_len= blob_len; + error= 0; + + DBUG_PRINT("exit", ("pack_data: %x, pack_len: %d", *pack_data, *pack_len)); +err: + DBUG_RETURN(error); + +} + +/* + unpackfrm is a method used to decompress the frm file received from a + handler. This method was developed for the NDB handler and has been moved + here to serve also other uses for other clustered storage engines. + + SYNOPSIS + unpackfrm() + pack_data Data reference to packed frm file data + out:unpack_data Reference to the pointer to the unpacked frm data + out:unpack_len Length of unpacked frm file data + + RETURN VALUES¨ + 0 Success + >0 Failure +*/ + +int unpackfrm(const void **unpack_data, uint *unpack_len, + const void *pack_data) +{ + const struct frm_blob_struct *blob= (struct frm_blob_struct*)pack_data; + byte *data; + ulong complen, orglen, ver; + DBUG_ENTER("unpackfrm"); + DBUG_PRINT("enter", ("pack_data: %x", pack_data)); + + complen= uint4korr((char*)&blob->head.complen); + orglen= uint4korr((char*)&blob->head.orglen); + ver= uint4korr((char*)&blob->head.ver); + + DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d", + ver,complen,orglen)); + DBUG_DUMP("blob->data", (char*) blob->data, complen); + + if (ver != 1) + DBUG_RETURN(1); + if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME)))) + DBUG_RETURN(2); + memcpy(data, blob->data, complen); + + + if (my_uncompress(data, &complen, &orglen)) + { + my_free((char*)data, MYF(0)); + DBUG_RETURN(3); + } + + *unpack_data= data; + *unpack_len= complen; + + DBUG_PRINT("exit", ("frmdata: %x, len: %d", *unpack_data, *unpack_len)); + DBUG_RETURN(0); +} #endif /* HAVE_COMPRESS */ diff --git a/mysys/thr_lock.c b/mysys/thr_lock.c index f5a8b618949..4b3e03750c8 100644 --- a/mysys/thr_lock.c +++ b/mysys/thr_lock.c @@ -1009,7 +1009,7 @@ void thr_multi_unlock(THR_LOCK_DATA **data,uint count) TL_WRITE_ONLY to abort any new accesses to the lock */ -void thr_abort_locks(THR_LOCK *lock) +void thr_abort_locks(THR_LOCK *lock, bool upgrade_lock) { THR_LOCK_DATA *data; DBUG_ENTER("thr_abort_locks"); @@ -1031,7 +1031,7 @@ void thr_abort_locks(THR_LOCK *lock) lock->read_wait.last= &lock->read_wait.data; lock->write_wait.last= &lock->write_wait.data; lock->read_wait.data=lock->write_wait.data=0; - if (lock->write.data) + if (upgrade_lock && lock->write.data) lock->write.data->type=TL_WRITE_ONLY; pthread_mutex_unlock(&lock->mutex); DBUG_VOID_RETURN; @@ -1089,6 +1089,213 @@ my_bool thr_abort_locks_for_thread(THR_LOCK *lock, pthread_t thread) } +/* + Downgrade a WRITE_* to a lower WRITE level + SYNOPSIS + thr_downgrade_write_lock() + in_data Lock data of thread downgrading its lock + new_lock_type New write lock type + RETURN VALUE + NONE + DESCRIPTION + This can be used to downgrade a lock already owned. When the downgrade + occurs also other waiters, both readers and writers can be allowed to + start. + The previous lock is often TL_WRITE_ONLY but can also be + TL_WRITE and TL_WRITE_ALLOW_READ. The normal downgrade variants are + TL_WRITE_ONLY => TL_WRITE_ALLOW_READ After a short exclusive lock + TL_WRITE_ALLOW_READ => TL_WRITE_ALLOW_WRITE After discovering that the + operation didn't need such a high lock. + TL_WRITE_ONLY => TL_WRITE after a short exclusive lock while holding a + write table lock + TL_WRITE_ONLY => TL_WRITE_ALLOW_WRITE After a short exclusive lock after + already earlier having dongraded lock to TL_WRITE_ALLOW_WRITE + The implementation is conservative and rather don't start rather than + go on unknown paths to start, the common cases are handled. + + NOTE: + In its current implementation it is only allowed to downgrade from + TL_WRITE_ONLY. In this case there are no waiters. Thus no wake up + logic is required. +*/ + +void thr_downgrade_write_lock(THR_LOCK_DATA *in_data, + enum thr_lock_type new_lock_type) +{ + THR_LOCK *lock=in_data->lock; + THR_LOCK_DATA *data, *next; + enum thr_lock_type old_lock_type= in_data->type; + bool start_writers= FALSE; + bool start_readers= FALSE; + DBUG_ENTER("thr_downgrade_write_only_lock"); + + pthread_mutex_lock(&lock->mutex); + DBUG_ASSERT(old_lock_type == TL_WRITE_ONLY); + DBUG_ASSERT(old_lock_type > new_lock_type); + in_data->type= new_lock_type; + check_locks(lock,"after downgrading lock",0); +#if 0 + switch (old_lock_type) + { + case TL_WRITE_ONLY: + case TL_WRITE: + case TL_WRITE_LOW_PRIORITY: + /* + Previous lock was exclusive we are now ready to start up most waiting + threads. + */ + switch (new_lock_type) + { + case TL_WRITE_ALLOW_READ: + /* Still cannot start WRITE operations. Can only start readers. */ + start_readers= TRUE; + break; + case TL_WRITE: + case TL_WRITE_LOW_PRIORITY: + /* + Still cannot start anything, but new requests are no longer + aborted. + */ + break; + case TL_WRITE_ALLOW_WRITE: + /* + We can start both writers and readers. + */ + start_writers= TRUE; + start_readers= TRUE; + break; + case TL_WRITE_CONCURRENT_INSERT: + case TL_WRITE_DELAYED: + /* + This routine is not designed for those. Lock will be downgraded + but no start of waiters will occur. This is not the optimal but + should be a correct behaviour. + */ + break; + default: + DBUG_ASSERT(0); + } + break; + case TL_WRITE_DELAYED: + case TL_WRITE_CONCURRENT_INSERT: + /* + This routine is not designed for those. Lock will be downgraded + but no start of waiters will occur. This is not the optimal but + should be a correct behaviour. + */ + break; + case TL_WRITE_ALLOW_READ: + DBUG_ASSERT(new_lock_type == TL_WRITE_ALLOW_WRITE); + /* + Previously writers were not allowed to start, now it is ok to + start them again. Readers are already allowed so no reason to + handle them. + */ + start_writers= TRUE; + break; + default: + DBUG_ASSERT(0); + break; + } + if (start_writers) + { + /* + At this time the only active writer can be ourselves. Thus we need + not worry about that there are other concurrent write operations + active on the table. Thus we only need to worry about starting + waiting operations. + We also only come here with TL_WRITE_ALLOW_WRITE as the new + lock type, thus we can start other writers also of the same type. + If we find a lock at exclusive level >= TL_WRITE_LOW_PRIORITY we + don't start any more operations that would be mean those operations + will have to wait for things started afterwards. + */ + DBUG_ASSERT(new_lock_type == TL_WRITE_ALLOW_WRITE); + for (data=lock->write_wait.data; data ; data= next) + { + /* + All WRITE requests compatible with new lock type are also + started + */ + next= data->next; + if (start_writers && data->type == new_lock_type) + { + pthread_cond_t *cond= data->cond; + /* + It is ok to start this waiter. + Move from being first in wait queue to be last in write queue. + */ + if (((*data->prev)= data->next)) + data->next->prev= data->prev; + else + lock->write_wait.last= data->prev; + data->prev= lock->write.last; + lock->write.last= &data->next; + data->next= 0; + check_locks(lock, "Started write lock after downgrade",0); + data->cond= 0; + pthread_cond_signal(cond); + } + else + { + /* + We found an incompatible lock, we won't start any more write + requests to avoid letting writers pass other writers in the + queue. + */ + start_writers= FALSE; + if (data->type >= TL_WRITE_LOW_PRIORITY) + { + /* + We have an exclusive writer in the queue so we won't start + readers either. + */ + start_readers= FALSE; + } + } + } + } + if (start_readers) + { + DBUG_ASSERT(new_lock_type == TL_WRITE_ALLOW_WRITE || + new_lock_type == TL_WRITE_ALLOW_READ); + /* + When we come here we know that the write locks are + TL_WRITE_ALLOW_WRITE or TL_WRITE_ALLOW_READ. This means that reads + are ok + */ + for (data=lock->read_wait.data; data ; data=next) + { + next= data->next; + /* + All reads are ok to start now except TL_READ_NO_INSERT when + write lock is TL_WRITE_ALLOW_READ. + */ + if (new_lock_type != TL_WRITE_ALLOW_READ || + data->type != TL_READ_NO_INSERT) + { + pthread_cond_t *cond= data->cond; + if (((*data->prev)= data->next)) + data->next->prev= data->prev; + else + lock->read_wait.last= data->prev; + data->prev= lock->read.last; + lock->read.last= &data->next; + data->next= 0; + + if (data->type == TL_READ_NO_INSERT) + lock->read_no_write_count++; + check_locks(lock, "Started read lock after downgrade",0); + data->cond= 0; + pthread_cond_signal(cond); + } + } + } + check_locks(lock,"after starting waiters after downgrading lock",0); +#endif + pthread_mutex_unlock(&lock->mutex); + DBUG_VOID_RETURN; +} /* Upgrade a WRITE_DELAY lock to a WRITE_LOCK */ diff --git a/scripts/make_win_src_distribution.sh b/scripts/make_win_src_distribution.sh index 065c7d06d24..2f2d6bb393e 100644 --- a/scripts/make_win_src_distribution.sh +++ b/scripts/make_win_src_distribution.sh @@ -249,7 +249,7 @@ copy_dir_dirs() { # Input directories to be copied # -for i in client dbug extra storage/heap include \ +for i in client dbug extra storage/heap include storage/archive \ libmysql libmysqld storage/myisam \ storage/myisammrg mysys regex sql strings sql-common sql/examples \ tools vio zlib diff --git a/scripts/mysql_create_system_tables.sh b/scripts/mysql_create_system_tables.sh index a90dfacabbd..eeb3e30e19a 100644 --- a/scripts/mysql_create_system_tables.sh +++ b/scripts/mysql_create_system_tables.sh @@ -42,6 +42,7 @@ i_ht="" c_tzn="" c_tz="" c_tzt="" c_tztt="" c_tzls="" c_pl="" i_tzn="" i_tz="" i_tzt="" i_tztt="" i_tzls="" i_pl="" c_p="" c_pp="" +c_gl="" c_sl="" # Check for old tables if test ! -f $mdata/db.frm @@ -354,6 +355,7 @@ then c_hr="$c_hr comment='keyword-topic relation';" fi + if test ! -f $mdata/time_zone_name.frm then if test "$1" = "verbose" ; then @@ -744,6 +746,27 @@ then fi +if test ! -f $mdata/general_log.frm +then + if test "$1" = "verbose" ; then + echo "Preparing general_log table" 1>&2; + fi + c_gl="$c_gl CREATE PROCEDURE create_general_log_table() BEGIN DECLARE is_csv_enabled int DEFAULT 0; SELECT @@have_csv = 'YES' INTO is_csv_enabled; IF (is_csv_enabled) THEN CREATE TABLE general_log (event_time TIMESTAMP NOT NULL, user_host MEDIUMTEXT, thread_id INTEGER, server_id INTEGER, command_type VARCHAR(64), argument MEDIUMTEXT) engine=CSV CHARACTER SET utf8 comment='General log'; END IF; END; +CALL create_general_log_table(); +DROP PROCEDURE create_general_log_table;" +fi + + +if test ! -f $mdata/slow_log.frm +then + if test "$1" = "verbose" ; then + echo "Preparing slow_log table" 1>&2; + fi + c_sl="$c_sl CREATE PROCEDURE create_slow_log_table() BEGIN DECLARE is_csv_enabled int DEFAULT 0; SELECT @@have_csv = 'YES' INTO is_csv_enabled; IF (is_csv_enabled) THEN CREATE TABLE slow_log (start_time TIMESTAMP NOT NULL, user_host MEDIUMTEXT NOT NULL, query_time TIME NOT NULL, lock_time TIME NOT NULL, rows_sent INTEGER NOT NULL, rows_examined INTEGER NOT NULL, db VARCHAR(512), last_insert_id INTEGER, insert_id INTEGER, server_id INTEGER, sql_text MEDIUMTEXT NOT NULL) engine=CSV CHARACTER SET utf8 comment='Slow log'; END IF; END; +CALL create_slow_log_table(); +DROP PROCEDURE create_slow_log_table;" +fi + if test ! -f $mdata/event.frm then c_ev="$c_ev CREATE TABLE event (" @@ -812,6 +835,8 @@ $i_tzls $c_p $c_pp +$c_gl +$c_sl $c_ev CREATE DATABASE IF NOT EXISTS cluster_replication; CREATE TABLE IF NOT EXISTS cluster_replication.binlog_index (Position BIGINT UNSIGNED NOT NULL, File VARCHAR(255) NOT NULL, epoch BIGINT UNSIGNED NOT NULL, inserts BIGINT UNSIGNED NOT NULL, updates BIGINT UNSIGNED NOT NULL, deletes BIGINT UNSIGNED NOT NULL, schemaops BIGINT UNSIGNED NOT NULL, PRIMARY KEY(epoch)) ENGINE=MYISAM; diff --git a/scripts/mysql_fix_privilege_tables.sql b/scripts/mysql_fix_privilege_tables.sql index af995836c0c..d33339e4128 100644 --- a/scripts/mysql_fix_privilege_tables.sql +++ b/scripts/mysql_fix_privilege_tables.sql @@ -527,6 +527,42 @@ ALTER TABLE proc MODIFY db MODIFY comment char(64) collate utf8_bin DEFAULT '' NOT NULL; +-- +-- Create missing log tables (5.1) +-- + +delimiter // +CREATE PROCEDURE create_log_tables() +BEGIN + DECLARE is_csv_enabled int DEFAULT 0; + SELECT @@have_csv = 'YES' INTO is_csv_enabled; + IF (is_csv_enabled) THEN + CREATE TABLE IF NOT EXISTS general_log ( + event_time TIMESTAMP NOT NULL, + user_host MEDIUMTEXT, + thread_id INTEGER, + server_id INTEGER, + command_type VARCHAR(64), + argument MEDIUMTEXT + ) engine=CSV CHARACTER SET utf8 comment='General log'; + CREATE TABLE IF NOT EXISTS slow_log ( + start_time TIMESTAMP NOT NULL, + user_host MEDIUMTEXT NOT NULL, + query_time TIME NOT NULL, + lock_time TIME NOT NULL, + rows_sent INTEGER NOT NULL, + rows_examined INTEGER NOT NULL, + db VARCHAR(512), + last_insert_id INTEGER, + insert_id INTEGER, + server_id INTEGER, + sql_text MEDIUMTEXT NOT NULL + ) engine=CSV CHARACTER SET utf8 comment='Slow log'; + END IF; +END// +delimiter ; +CALL create_log_tables(); +DROP PROCEDURE create_log_tables; # # EVENT table # diff --git a/scripts/mysqld_multi.sh b/scripts/mysqld_multi.sh index b2b85018d7a..2dcc8dc7bc4 100644 --- a/scripts/mysqld_multi.sh +++ b/scripts/mysqld_multi.sh @@ -763,9 +763,6 @@ sub usage print <user,MYF(MY_ALLOW_ZERO_PTR)); my_free(mysql->passwd,MYF(MY_ALLOW_ZERO_PTR)); my_free(mysql->db,MYF(MY_ALLOW_ZERO_PTR)); + my_free(mysql->info_buffer,MYF(MY_ALLOW_ZERO_PTR)); /* Clear pointers for better safety */ - mysql->host_info=mysql->user=mysql->passwd=mysql->db=0; + mysql->info_buffer=mysql->host_info=mysql->user=mysql->passwd=mysql->db=0; } @@ -2476,8 +2478,7 @@ get_info: if (!(mysql->server_status & SERVER_STATUS_AUTOCOMMIT)) mysql->server_status|= SERVER_STATUS_IN_TRANS; - if (!(fields=(*mysql->methods->read_rows)(mysql,(MYSQL_FIELD*)0, - protocol_41(mysql) ? 7 : 5))) + if (!(fields=cli_read_rows(mysql,(MYSQL_FIELD*)0, protocol_41(mysql) ? 7:5))) DBUG_RETURN(1); if (!(mysql->fields=unpack_fields(fields,&mysql->field_alloc, (uint) field_count,0, diff --git a/sql/ha_archive.cc b/sql/ha_archive.cc index fea4005d131..06130f31504 100644 --- a/sql/ha_archive.cc +++ b/sql/ha_archive.cc @@ -170,6 +170,8 @@ handlerton archive_hton = { NULL, /* Start Consistent Snapshot */ NULL, /* Flush logs */ NULL, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ NULL, /* Alter interface */ HTON_NO_FLAGS }; diff --git a/sql/ha_berkeley.cc b/sql/ha_berkeley.cc index fb9ed2de117..e9168487cf4 100644 --- a/sql/ha_berkeley.cc +++ b/sql/ha_berkeley.cc @@ -149,6 +149,8 @@ handlerton berkeley_hton = { NULL, /* Start Consistent Snapshot */ berkeley_flush_logs, /* Flush logs */ berkeley_show_status, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ NULL, /* Alter Tablespace */ HTON_CLOSE_CURSORS_AT_COMMIT | HTON_FLUSH_AFTER_RENAME }; diff --git a/sql/ha_blackhole.cc b/sql/ha_blackhole.cc index 38e03d4d1f7..71b4ef3c9dc 100644 --- a/sql/ha_blackhole.cc +++ b/sql/ha_blackhole.cc @@ -57,6 +57,8 @@ handlerton blackhole_hton= { NULL, /* Start Consistent Snapshot */ NULL, /* Flush logs */ NULL, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ NULL, /* Alter Tablespace */ HTON_CAN_RECREATE }; diff --git a/sql/ha_federated.cc b/sql/ha_federated.cc index b218b52bfd9..0dbbf8e1175 100644 --- a/sql/ha_federated.cc +++ b/sql/ha_federated.cc @@ -394,6 +394,8 @@ handlerton federated_hton= { NULL, /* Start Consistent Snapshot */ NULL, /* Flush logs */ NULL, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ NULL, /* Alter Tablespace */ HTON_ALTER_NOT_SUPPORTED }; diff --git a/sql/ha_heap.cc b/sql/ha_heap.cc index bcb0bf07774..2fe4bc7aeb5 100644 --- a/sql/ha_heap.cc +++ b/sql/ha_heap.cc @@ -54,6 +54,8 @@ handlerton heap_hton= { NULL, /* Start Consistent Snapshot */ NULL, /* Flush logs */ NULL, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ NULL, /* Alter Tablespace */ HTON_CAN_RECREATE }; diff --git a/sql/ha_innodb.cc b/sql/ha_innodb.cc index 75c1c380a42..9aee8a63508 100644 --- a/sql/ha_innodb.cc +++ b/sql/ha_innodb.cc @@ -235,6 +235,8 @@ handlerton innobase_hton = { innobase_start_trx_and_assign_read_view, /* Start Consistent Snapshot */ innobase_flush_logs, /* Flush logs */ innobase_show_status, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ HTON_NO_FLAGS }; diff --git a/sql/ha_myisam.cc b/sql/ha_myisam.cc index 87bc2148b03..06a9acf9761 100644 --- a/sql/ha_myisam.cc +++ b/sql/ha_myisam.cc @@ -86,6 +86,8 @@ handlerton myisam_hton= { NULL, /* Start Consistent Snapshot */ NULL, /* Flush logs */ NULL, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ NULL, /* Alter Tablespace */ HTON_CAN_RECREATE }; @@ -293,6 +295,28 @@ err: } #endif /* HAVE_REPLICATION */ + +bool ha_myisam::check_if_locking_is_allowed(uint sql_command, + ulong type, TABLE *table, + uint count, + bool called_by_logger_thread) +{ + /* + To be able to open and lock for reading system tables like 'mysql.proc', + when we already have some tables opened and locked, and avoid deadlocks + we have to disallow write-locking of these tables with any other tables. + */ + if (table->s->system_table && + table->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE && + count != 1) + { + my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table->s->db.str, + table->s->table_name.str); + return FALSE; + } + return TRUE; +} + /* Name is here without an extension */ int ha_myisam::open(const char *name, int mode, uint test_if_locked) diff --git a/sql/ha_myisam.h b/sql/ha_myisam.h index eb3ac9db7e4..86efed27478 100644 --- a/sql/ha_myisam.h +++ b/sql/ha_myisam.h @@ -60,6 +60,10 @@ class ha_myisam: public handler uint max_supported_key_part_length() const { return MI_MAX_KEY_LENGTH; } uint checksum() const; + virtual bool check_if_locking_is_allowed(uint sql_command, + ulong type, TABLE *table, + uint count, + bool called_by_logger_thread); int open(const char *name, int mode, uint test_if_locked); int close(void); int write_row(byte * buf); diff --git a/sql/ha_myisammrg.cc b/sql/ha_myisammrg.cc index 601fe94bf11..36de3dc64e0 100644 --- a/sql/ha_myisammrg.cc +++ b/sql/ha_myisammrg.cc @@ -64,6 +64,8 @@ handlerton myisammrg_hton= { NULL, /* Start Consistent Snapshot */ NULL, /* Flush logs */ NULL, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ NULL, /* Alter Tablespace */ HTON_CAN_RECREATE }; diff --git a/sql/ha_ndbcluster.cc b/sql/ha_ndbcluster.cc index a9c4ea9da9e..3d44c731b80 100644 --- a/sql/ha_ndbcluster.cc +++ b/sql/ha_ndbcluster.cc @@ -52,6 +52,8 @@ static const int parallelism= 0; // createable against NDB from this handler static const int max_transactions= 3; // should really be 2 but there is a transaction to much allocated when loch table is used +static uint ndbcluster_partition_flags(); +static uint ndbcluster_alter_table_flags(uint flags); static bool ndbcluster_init(void); static int ndbcluster_end(ha_panic_function flag); static bool ndbcluster_show_status(THD*,stat_print_fn *,enum ha_stat_type); @@ -72,6 +74,23 @@ static handler *ndbcluster_create_handler(TABLE_SHARE *table) return new ha_ndbcluster(table); } +static uint ndbcluster_partition_flags() +{ + return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY | + HA_CAN_PARTITION_UNIQUE | HA_USE_AUTO_PARTITION); +} + +static uint ndbcluster_alter_table_flags(uint flags) +{ + if (flags & ALTER_DROP_PARTITION) + return 0; + else + return (HA_ONLINE_ADD_INDEX | HA_ONLINE_DROP_INDEX | + HA_ONLINE_ADD_UNIQUE_INDEX | HA_ONLINE_DROP_UNIQUE_INDEX | + HA_PARTITION_FUNCTION_SUPPORTED); + +} + #define NDB_HIDDEN_PRIMARY_KEY_LENGTH 8 #define NDB_FAILED_AUTO_INCREMENT ~(Uint64)0 @@ -117,10 +136,6 @@ static int rename_share(NDB_SHARE *share, const char *new_key); #endif static void ndb_set_fragmentation(NDBTAB &tab, TABLE *table, uint pk_len); -static int packfrm(const void *data, uint len, const void **pack_data, uint *pack_len); -static int unpackfrm(const void **data, uint *len, - const void* pack_data); - static int ndb_get_table_statistics(Ndb*, const char *, struct Ndb_statistics *); @@ -348,7 +363,7 @@ struct Ndb_local_table_statistics { void ha_ndbcluster::set_rec_per_key() { DBUG_ENTER("ha_ndbcluster::get_status_const"); - for (uint i=0 ; i < table->s->keys ; i++) + for (uint i=0 ; i < table_share->keys ; i++) { table->key_info[i].rec_per_key[table->key_info[i].key_parts-1]= 1; } @@ -447,7 +462,7 @@ void ha_ndbcluster::no_uncommitted_rows_reset(THD *thd) */ void -ha_ndbcluster::invalidate_dictionary_cache(TABLE *table, Ndb *ndb, +ha_ndbcluster::invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb, const char *tabname, bool global) { NDBDICT *dict= ndb->getDictionary(); @@ -470,16 +485,16 @@ ha_ndbcluster::invalidate_dictionary_cache(TABLE *table, Ndb *ndb, } else dict->removeCachedTable(tabname); - table->s->version=0L; /* Free when thread is ready */ + share->version=0L; /* Free when thread is ready */ DBUG_VOID_RETURN; } void ha_ndbcluster::invalidate_dictionary_cache(bool global) { NDBDICT *dict= get_ndb()->getDictionary(); - invalidate_dictionary_cache(table, get_ndb(), m_tabname, global); + invalidate_dictionary_cache(table_share, get_ndb(), m_tabname, global); /* Invalidate indexes */ - for (uint i= 0; i < table->s->keys; i++) + for (uint i= 0; i < table_share->keys; i++) { NDBINDEX *index = (NDBINDEX *) m_index[i].index; NDBINDEX *unique_index = (NDBINDEX *) m_index[i].unique_index; @@ -549,7 +564,7 @@ int ha_ndbcluster::ndb_err(NdbTransaction *trans) if (res == HA_ERR_FOUND_DUPP_KEY) { if (m_rows_to_insert == 1) - m_dupkey= table->s->primary_key; + m_dupkey= table_share->primary_key; else { /* We are batching inserts, offending key is not available */ @@ -788,7 +803,7 @@ int ha_ndbcluster::get_ndb_blobs_value(NdbBlob *last_ndb_blob) for (int loop= 0; loop <= 1; loop++) { uint32 offset= 0; - for (uint i= 0; i < table->s->fields; i++) + for (uint i= 0; i < table_share->fields; i++) { Field *field= table->field[i]; NdbValue value= m_value[i]; @@ -892,10 +907,10 @@ int ha_ndbcluster::get_ndb_value(NdbOperation *ndb_op, Field *field, */ bool ha_ndbcluster::uses_blob_value() { - if (table->s->blob_fields == 0) + if (table_share->blob_fields == 0) return FALSE; { - uint no_fields= table->s->fields; + uint no_fields= table_share->fields; int i; // They always put blobs at the end.. for (i= no_fields - 1; i >= 0; i--) @@ -972,7 +987,7 @@ int ha_ndbcluster::get_metadata(const char *path) DBUG_RETURN(1); } - if (cmp_frm(tab, pack_data, pack_length)) + if (m_share->state != NSS_ALTERED && cmp_frm(tab, pack_data, pack_length)) { if (!invalidating_ndb_table) { @@ -1044,6 +1059,36 @@ static int fix_unique_index_attr_order(NDB_INDEX_DATA &data, DBUG_RETURN(0); } +int ha_ndbcluster::table_changed(const void *pack_frm_data, uint pack_frm_len) +{ + Ndb *ndb; + NDBDICT *dict; + const NDBTAB *orig_tab; + NdbDictionary::Table new_tab; + int result; + DBUG_ENTER("ha_ndbcluster::table_changed"); + DBUG_PRINT("info", ("Modifying frm for table %s", m_tabname)); + if (check_ndb_connection()) + DBUG_RETURN(my_errno= HA_ERR_NO_CONNECTION); + + ndb= get_ndb(); + dict= ndb->getDictionary(); + if (!(orig_tab= dict->getTable(m_tabname))) + ERR_RETURN(dict->getNdbError()); + // Check if thread has stale local cache + if (orig_tab->getObjectStatus() == NdbDictionary::Object::Invalid) + { + dict->removeCachedTable(m_tabname); + if (!(orig_tab= dict->getTable(m_tabname))) + ERR_RETURN(dict->getNdbError()); + } + new_tab= *orig_tab; + new_tab.setFrm(pack_frm_data, pack_frm_len); + if (dict->alterTable(new_tab) != 0) + ERR_RETURN(dict->getNdbError()); + DBUG_RETURN(0); +} + /* Create all the indexes for a table. If any index should fail to be created, @@ -1423,7 +1468,7 @@ static void shrink_varchar(Field* field, const byte* & ptr, char* buf) int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) { - KEY* key_info= table->key_info + table->s->primary_key; + KEY* key_info= table->key_info + table_share->primary_key; KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; DBUG_ENTER("set_primary_key"); @@ -1445,7 +1490,7 @@ int ha_ndbcluster::set_primary_key(NdbOperation *op, const byte *key) int ha_ndbcluster::set_primary_key_from_record(NdbOperation *op, const byte *record) { - KEY* key_info= table->key_info + table->s->primary_key; + KEY* key_info= table->key_info + table_share->primary_key; KEY_PART_INFO* key_part= key_info->key_part; KEY_PART_INFO* end= key_part+key_info->key_parts; DBUG_ENTER("set_primary_key_from_record"); @@ -1490,7 +1535,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) DBUG_ENTER("define_read_attrs"); // Define attributes to read - for (i= 0; i < table->s->fields; i++) + for (i= 0; i < table_share->fields; i++) { Field *field= table->field[i]; if (ha_get_bit_in_read_set(i+1) || @@ -1505,11 +1550,11 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) } } - if (table->s->primary_key == MAX_KEY) + if (table_share->primary_key == MAX_KEY) { DBUG_PRINT("info", ("Getting hidden key")); // Scanning table with no primary key - int hidden_no= table->s->fields; + int hidden_no= table_share->fields; #ifndef DBUG_OFF const NDBTAB *tab= (const NDBTAB *) m_table; if (!tab->getColumn(hidden_no)) @@ -1529,7 +1574,7 @@ int ha_ndbcluster::define_read_attrs(byte* buf, NdbOperation* op) int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf, uint32 part_id) { - uint no_fields= table->s->fields; + uint no_fields= table_share->fields; NdbConnection *trans= m_active_trans; NdbOperation *op; @@ -1547,7 +1592,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf, if (m_use_partition_function) op->setPartitionId(part_id); - if (table->s->primary_key == MAX_KEY) + if (table_share->primary_key == MAX_KEY) { // This table has no primary key, use "hidden" primary key DBUG_PRINT("info", ("Using hidden key")); @@ -1587,7 +1632,7 @@ int ha_ndbcluster::pk_read(const byte *key, uint key_len, byte *buf, int ha_ndbcluster::complemented_pk_read(const byte *old_data, byte *new_data, uint32 old_part_id) { - uint no_fields= table->s->fields, i; + uint no_fields= table_share->fields, i; NdbTransaction *trans= m_active_trans; NdbOperation *op; DBUG_ENTER("complemented_pk_read"); @@ -1673,7 +1718,9 @@ int ha_ndbcluster::peek_row(const byte *record) { uint32 part_id; int error; - if ((error= m_part_info->get_partition_id(m_part_info, &part_id))) + longlong func_value; + if ((error= m_part_info->get_partition_id(m_part_info, &part_id, + &func_value))) { DBUG_RETURN(error); } @@ -2131,17 +2178,17 @@ int ha_ndbcluster::write_row(byte *record) NdbOperation *op; int res; THD *thd= current_thd; + longlong func_value= 0; + DBUG_ENTER("ha_ndbcluster::write_row"); + m_write_op= TRUE; - - DBUG_ENTER("write_row"); - - if (!m_use_write && m_ignore_dup_key && table->s->primary_key != MAX_KEY) + if (!m_use_write && m_ignore_dup_key && table_share->primary_key != MAX_KEY) { int peek_res= peek_row(record); if (!peek_res) { - m_dupkey= table->s->primary_key; + m_dupkey= table_share->primary_key; DBUG_RETURN(HA_ERR_FOUND_DUPP_KEY); } if (peek_res != HA_ERR_KEY_NOT_FOUND) @@ -2164,14 +2211,15 @@ int ha_ndbcluster::write_row(byte *record) { uint32 part_id; int error; - if ((error= m_part_info->get_partition_id(m_part_info, &part_id))) + if ((error= m_part_info->get_partition_id(m_part_info, &part_id, + &func_value))) { DBUG_RETURN(error); } op->setPartitionId(part_id); } - if (table->s->primary_key == MAX_KEY) + if (table_share->primary_key == MAX_KEY) { // Table has hidden primary key Ndb *ndb= get_ndb(); @@ -2184,7 +2232,7 @@ int ha_ndbcluster::write_row(byte *record) ndb->getNdbError().status == NdbError::TemporaryError); if (auto_value == NDB_FAILED_AUTO_INCREMENT) ERR_RETURN(ndb->getNdbError()); - if (set_hidden_key(op, table->s->fields, (const byte*)&auto_value)) + if (set_hidden_key(op, table_share->fields, (const byte*)&auto_value)) ERR_RETURN(op->getNdbError()); } else @@ -2208,7 +2256,7 @@ int ha_ndbcluster::write_row(byte *record) // Set non-key attribute(s) bool set_blob_value= FALSE; - for (i= 0; i < table->s->fields; i++) + for (i= 0; i < table_share->fields; i++) { Field *field= table->field[i]; if (!(field->flags & PRI_KEY_FLAG) && @@ -2220,6 +2268,22 @@ int ha_ndbcluster::write_row(byte *record) } } + if (m_use_partition_function) + { + /* + We need to set the value of the partition function value in + NDB since the NDB kernel doesn't have easy access to the function + to calculate the value. + */ + if (func_value >= INT_MAX32) + func_value= INT_MAX32; + uint32 part_func_value= (uint32)func_value; + uint no_fields= table_share->fields; + if (table_share->primary_key == MAX_KEY) + no_fields++; + op->setValue(no_fields, part_func_value); + } + m_rows_changed++; /* @@ -2331,6 +2395,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) uint i; uint32 old_part_id= 0, new_part_id= 0; int error; + longlong func_value; DBUG_ENTER("update_row"); m_write_op= TRUE; @@ -2343,14 +2408,15 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) if (m_use_partition_function && (error= get_parts_for_update(old_data, new_data, table->record[0], - m_part_info, &old_part_id, &new_part_id))) + m_part_info, &old_part_id, &new_part_id, + &func_value))) { DBUG_RETURN(error); } /* Check for update of primary key for special handling */ - if ((table->s->primary_key != MAX_KEY) && - (key_cmp(table->s->primary_key, old_data, new_data)) || + if ((table_share->primary_key != MAX_KEY) && + (key_cmp(table_share->primary_key, old_data, new_data)) || (old_part_id != new_part_id)) { int read_res, insert_res, delete_res, undo_res; @@ -2424,14 +2490,14 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) if (m_use_partition_function) op->setPartitionId(new_part_id); - if (table->s->primary_key == MAX_KEY) + if (table_share->primary_key == MAX_KEY) { // This table has no primary key, use "hidden" primary key DBUG_PRINT("info", ("Using hidden key")); // Require that the PK for this record has previously been // read into m_value - uint no_fields= table->s->fields; + uint no_fields= table_share->fields; const NdbRecAttr* rec= m_value[no_fields].rec; DBUG_ASSERT(rec); DBUG_DUMP("key", (char*)rec->aRef(), NDB_HIDDEN_PRIMARY_KEY_LENGTH); @@ -2450,7 +2516,7 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) m_rows_changed++; // Set non-key attribute(s) - for (i= 0; i < table->s->fields; i++) + for (i= 0; i < table_share->fields; i++) { Field *field= table->field[i]; if (ha_get_bit_in_write_set(i+1) && @@ -2459,6 +2525,16 @@ int ha_ndbcluster::update_row(const byte *old_data, byte *new_data) ERR_RETURN(op->getNdbError()); } + if (m_use_partition_function) + { + if (func_value >= INT_MAX32) + func_value= INT_MAX32; + uint32 part_func_value= (uint32)func_value; + uint no_fields= table_share->fields; + if (table_share->primary_key == MAX_KEY) + no_fields++; + op->setValue(no_fields, part_func_value); + } // Execute update operation if (!cursor && execute_no_commit(this,trans) != 0) { no_uncommitted_rows_execute_failure(); @@ -2529,11 +2605,11 @@ int ha_ndbcluster::delete_row(const byte *record) no_uncommitted_rows_update(-1); - if (table->s->primary_key == MAX_KEY) + if (table_share->primary_key == MAX_KEY) { // This table has no primary key, use "hidden" primary key DBUG_PRINT("info", ("Using hidden key")); - uint no_fields= table->s->fields; + uint no_fields= table_share->fields; const NdbRecAttr* rec= m_value[no_fields].rec; DBUG_ASSERT(rec != NULL); @@ -2656,10 +2732,10 @@ void ha_ndbcluster::unpack_record(byte *buf) ndb_unpack_record(table, m_value, 0, buf); #ifndef DBUG_OFF // Read and print all values that was fetched - if (table->s->primary_key == MAX_KEY) + if (table_share->primary_key == MAX_KEY) { // Table with hidden primary key - int hidden_no= table->s->fields; + int hidden_no= table_share->fields; const NDBTAB *tab= (const NDBTAB *) m_table; const NDBCOL *hidden_col= tab->getColumn(hidden_no); const NdbRecAttr* rec= m_value[hidden_no].rec; @@ -2686,7 +2762,7 @@ void ha_ndbcluster::print_results() char buf_type[MAX_FIELD_WIDTH], buf_val[MAX_FIELD_WIDTH]; String type(buf_type, sizeof(buf_type), &my_charset_bin); String val(buf_val, sizeof(buf_val), &my_charset_bin); - for (uint f= 0; f < table->s->fields; f++) + for (uint f= 0; f < table_share->fields; f++) { /* Use DBUG_PRINT since DBUG_FILE cannot be filtered out */ char buf[2000]; @@ -2953,7 +3029,7 @@ int ha_ndbcluster::rnd_init(bool scan) DBUG_RETURN(-1); } } - index_init(table->s->primary_key, 0); + index_init(table_share->primary_key, 0); DBUG_RETURN(0); } @@ -3051,9 +3127,9 @@ void ha_ndbcluster::position(const byte *record) byte *buff; DBUG_ENTER("position"); - if (table->s->primary_key != MAX_KEY) + if (table_share->primary_key != MAX_KEY) { - key_info= table->key_info + table->s->primary_key; + key_info= table->key_info + table_share->primary_key; key_part= key_info->key_part; end= key_part + key_info->key_parts; buff= ref; @@ -3095,7 +3171,7 @@ void ha_ndbcluster::position(const byte *record) { // No primary key, get hidden key DBUG_PRINT("info", ("Getting hidden key")); - int hidden_no= table->s->fields; + int hidden_no= table_share->fields; const NdbRecAttr* rec= m_value[hidden_no].rec; memcpy(ref, (const void*)rec->aRef(), ref_length); #ifndef DBUG_OFF @@ -4057,7 +4133,7 @@ int ha_ndbcluster::create(const char *name, caller. Do Ndb specific stuff, such as create a .ndb file */ - if ((my_errno= write_ndb_file())) + if ((my_errno= write_ndb_file(name))) DBUG_RETURN(my_errno); #ifdef HAVE_NDB_BINLOG if (ndb_binlog_thread_running > 0) @@ -4128,7 +4204,7 @@ int ha_ndbcluster::create(const char *name, tab.addColumn(col); pk_length += 2; } - + // Make sure that blob tables don't have to big part size for (i= 0; i < form->s->fields; i++) { @@ -4164,20 +4240,10 @@ int ha_ndbcluster::create(const char *name, // Check partition info partition_info *part_info= form->part_info; - if (part_info) + if ((my_errno= set_up_partition_info(part_info, form, (void*)&tab))) { - int error; - if ((error= set_up_partition_info(part_info, form, (void*)&tab))) - { - DBUG_RETURN(error); - } + DBUG_RETURN(my_errno); } - else - { - ndb_set_fragmentation(tab, form, pk_length); - } - - if ((my_errno= check_ndb_connection())) DBUG_RETURN(my_errno); @@ -4199,7 +4265,7 @@ int ha_ndbcluster::create(const char *name, my_errno= create_indexes(ndb, form); if (!my_errno) - my_errno= write_ndb_file(); + my_errno= write_ndb_file(name); else { /* @@ -4280,6 +4346,47 @@ int ha_ndbcluster::create(const char *name, DBUG_RETURN(my_errno); } +int ha_ndbcluster::create_handler_files(const char *file) +{ + const char *name; + Ndb* ndb; + const NDBTAB *tab; + const void *data, *pack_data; + uint length, pack_length; + int error= 0; + + DBUG_ENTER("create_handler_files"); + + if (!(ndb= get_ndb())) + DBUG_RETURN(HA_ERR_NO_CONNECTION); + + NDBDICT *dict= ndb->getDictionary(); + if (!(tab= dict->getTable(m_tabname))) + DBUG_RETURN(0); // Must be a create, ignore since frm is saved in create + + name= table->s->normalized_path.str; + DBUG_PRINT("enter", ("m_tabname: %s, path: %s", m_tabname, name)); + if (readfrm(name, &data, &length) || + packfrm(data, length, &pack_data, &pack_length)) + { + DBUG_PRINT("info", ("Missing frm for %s", m_tabname)); + my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); + my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR)); + DBUG_RETURN(1); + } + if (cmp_frm(tab, pack_data, pack_length)) + { + DBUG_PRINT("info", ("Table %s has changed, altering frm in ndb", + m_tabname)); + error= table_changed(pack_data, pack_length); + m_share->state= NSS_INITIAL; + } + my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); + my_free((char*)pack_data, MYF(MY_ALLOW_ZERO_PTR)); + + DBUG_RETURN(error); +} + int ha_ndbcluster::create_index(const char *name, KEY *key_info, NDB_INDEX_TYPE idx_type, uint idx_no) { @@ -4407,7 +4514,7 @@ int ha_ndbcluster::add_index(TABLE *table_arg, if((error= create_index(key_info[idx].name, key, idx_type, idx))) break; } - + m_share->state= NSS_ALTERED; DBUG_RETURN(error); } @@ -4442,6 +4549,7 @@ int ha_ndbcluster::prepare_drop_index(TABLE *table_arg, THD *thd= current_thd; Thd_ndb *thd_ndb= get_thd_ndb(thd); Ndb *ndb= thd_ndb->ndb; + m_share->state= NSS_ALTERED; DBUG_RETURN(renumber_indexes(ndb, table_arg)); } @@ -4452,14 +4560,11 @@ int ha_ndbcluster::final_drop_index(TABLE *table_arg) { DBUG_ENTER("ha_ndbcluster::final_drop_index"); DBUG_PRINT("info", ("ha_ndbcluster::final_drop_index")); - int error= 0; // Really drop indexes THD *thd= current_thd; Thd_ndb *thd_ndb= get_thd_ndb(thd); Ndb *ndb= thd_ndb->ndb; - error= drop_indexes(ndb, table_arg); - - DBUG_RETURN(error); + DBUG_RETURN(drop_indexes(ndb, table_arg)); } /* @@ -4921,9 +5026,9 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) primary key to be written in the ref variable */ - if (table->s->primary_key != MAX_KEY) + if (table_share->primary_key != MAX_KEY) { - key= table->key_info+table->s->primary_key; + key= table->key_info+table_share->primary_key; ref_length= key->key_length; DBUG_PRINT("info", (" ref_length: %d", ref_length)); } @@ -4945,10 +5050,23 @@ int ha_ndbcluster::open(const char *name, int mode, uint test_if_locked) if (!res) info(HA_STATUS_VARIABLE | HA_STATUS_CONST); - DBUG_RETURN(res); } +/* + Set partition info + + SYNOPSIS + set_part_info() + part_info + + RETURN VALUE + NONE + + DESCRIPTION + Set up partition info when handler object created +*/ + void ha_ndbcluster::set_part_info(partition_info *part_info) { m_part_info= part_info; @@ -5282,9 +5400,15 @@ int ndbcluster_find_all_files(THD *thd) } else if (cmp_frm(ndbtab, pack_data, pack_length)) { - discover= 1; - sql_print_information("NDB: mismatch in frm for %s.%s, discovering...", - elmt.database, elmt.name); + NDB_SHARE *share= get_share(key, 0, false); + if (!share || share->state != NSS_ALTERED) + { + discover= 1; + sql_print_information("NDB: mismatch in frm for %s.%s, discovering...", + elmt.database, elmt.name); + } + if (share) + free_share(&share); } my_free((char*) data, MYF(MY_ALLOW_ZERO_PTR)); my_free((char*) pack_data, MYF(MY_ALLOW_ZERO_PTR)); @@ -5307,7 +5431,7 @@ int ndbcluster_find_all_files(THD *thd) pthread_mutex_lock(&ndbcluster_mutex); if (((share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables, (byte*) key, strlen(key))) - && share->op == 0 && share->op_old == 0) + && share->op == 0 && share->op_old == 0 && ! (share->flags & NSF_NO_BINLOG)) || share == 0) { /* @@ -5451,7 +5575,7 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, end= strxnmov(end1, sizeof(name) - (end1 - name), file_name, NullS); if ((share= (NDB_SHARE*)hash_search(&ndbcluster_open_tables, (byte*)name, end - name)) - && share->op == 0 && share->op_old == 0) + && share->op == 0 && share->op_old == 0 && ! (share->flags & NSF_NO_BINLOG)) { /* there is no binlog creation setup for this table @@ -5464,6 +5588,8 @@ int ndbcluster_find_files(THD *thd,const char *db,const char *path, pthread_mutex_unlock(&LOCK_open); pthread_mutex_lock(&ndbcluster_mutex); } + /* Table existed in the mysqld so there should be a share */ + DBUG_ASSERT(share != NULL); } pthread_mutex_unlock(&ndbcluster_mutex); } @@ -5570,6 +5696,8 @@ static bool ndbcluster_init() h.panic= ndbcluster_end; /* Panic call */ h.show_status= ndbcluster_show_status; /* Show status */ h.alter_tablespace= ndbcluster_alter_tablespace; /* Show status */ + h.partition_flags= ndbcluster_partition_flags; /* Partition flags */ + h.alter_table_flags=ndbcluster_alter_table_flags; /* Alter table flags */ #ifdef HAVE_NDB_BINLOG ndbcluster_binlog_init_handlerton(); #endif @@ -5721,6 +5849,20 @@ static int ndbcluster_end(ha_panic_function type) DBUG_RETURN(0); } +void ha_ndbcluster::print_error(int error, myf errflag) +{ + DBUG_ENTER("ha_ndbcluster::print_error"); + DBUG_PRINT("enter", ("error = %d", error)); + + if (error == HA_ERR_NO_PARTITION_FOUND) + my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0), + (int)m_part_info->part_expr->val_int()); + else + handler::print_error(error, errflag); + DBUG_VOID_RETURN; +} + + /* Static error print function called from static handler method ndbcluster_commit @@ -5747,8 +5889,10 @@ void ndbcluster_print_error(int error, const NdbOperation *error_op) */ void ha_ndbcluster::set_dbname(const char *path_name, char *dbname) { - char *end, *ptr; - + char *end, *ptr, *tmp_name; + char tmp_buff[FN_REFLEN]; + + tmp_name= tmp_buff; /* Scan name from the end */ ptr= strend(path_name)-1; while (ptr >= path_name && *ptr != '\\' && *ptr != '/') { @@ -5760,18 +5904,19 @@ void ha_ndbcluster::set_dbname(const char *path_name, char *dbname) ptr--; } uint name_len= end - ptr; - memcpy(dbname, ptr + 1, name_len); - dbname[name_len]= '\0'; + memcpy(tmp_name, ptr + 1, name_len); + tmp_name[name_len]= '\0'; #ifdef __WIN__ /* Put to lower case */ - ptr= dbname; + ptr= tmp_name; while (*ptr != '\0') { *ptr= tolower(*ptr); ptr++; } #endif + filename_to_tablename(tmp_name, dbname, FN_REFLEN); } /* @@ -5790,8 +5935,10 @@ void ha_ndbcluster::set_dbname(const char *path_name) void ha_ndbcluster::set_tabname(const char *path_name, char * tabname) { - char *end, *ptr; - + char *end, *ptr, *tmp_name; + char tmp_buff[FN_REFLEN]; + + tmp_name= tmp_buff; /* Scan name from the end */ end= strend(path_name)-1; ptr= end; @@ -5799,17 +5946,18 @@ ha_ndbcluster::set_tabname(const char *path_name, char * tabname) ptr--; } uint name_len= end - ptr; - memcpy(tabname, ptr + 1, end - ptr); - tabname[name_len]= '\0'; + memcpy(tmp_name, ptr + 1, end - ptr); + tmp_name[name_len]= '\0'; #ifdef __WIN__ /* Put to lower case */ - ptr= tabname; + ptr= tmp_name; while (*ptr != '\0') { *ptr= tolower(*ptr); ptr++; } #endif + filename_to_tablename(tmp_name, tabname, FN_REFLEN); } /* @@ -6276,6 +6424,11 @@ int handle_trailing_share(NDB_SHARE *share) share->key, share->use_count); dbug_print_open_tables(); + /* + Ndb share has not been released as it should + */ + DBUG_ASSERT(FALSE); + /* This is probably an error. We can however save the situation at the cost of a possible mem leak, by "renaming" the share @@ -6451,7 +6604,7 @@ NDB_SHARE *ndbcluster_get_share(const char *key, TABLE *table, MEM_ROOT *old_root= *root_ptr; init_sql_alloc(&share->mem_root, 1024, 0); *root_ptr= &share->mem_root; // remember to reset before return - + share->state= NSS_INITIAL; /* enough space for key, db, and table_name */ share->key= alloc_root(*root_ptr, 2 * (length + 1)); share->key_length= length; @@ -6576,104 +6729,6 @@ void ndbcluster_free_share(NDB_SHARE **share, bool have_lock) } -/* - Internal representation of the frm blob - -*/ - -struct frm_blob_struct -{ - struct frm_blob_header - { - uint ver; // Version of header - uint orglen; // Original length of compressed data - uint complen; // Compressed length of data, 0=uncompressed - } head; - char data[1]; -}; - - - -static int packfrm(const void *data, uint len, - const void **pack_data, uint *pack_len) -{ - int error; - ulong org_len, comp_len; - uint blob_len; - frm_blob_struct* blob; - DBUG_ENTER("packfrm"); - DBUG_PRINT("enter", ("data: 0x%lx len: %d", data, len)); - - error= 1; - org_len= len; - if (my_compress((byte*)data, &org_len, &comp_len)) - goto err; - - DBUG_PRINT("info", ("org_len: %d comp_len: %d", org_len, comp_len)); - DBUG_DUMP("compressed", (char*)data, org_len); - - error= 2; - blob_len= sizeof(frm_blob_struct::frm_blob_header)+org_len; - if (!(blob= (frm_blob_struct*) my_malloc(blob_len,MYF(MY_WME)))) - goto err; - - // Store compressed blob in machine independent format - int4store((char*)(&blob->head.ver), 1); - int4store((char*)(&blob->head.orglen), comp_len); - int4store((char*)(&blob->head.complen), org_len); - - // Copy frm data into blob, already in machine independent format - memcpy(blob->data, data, org_len); - - *pack_data= blob; - *pack_len= blob_len; - error= 0; - - DBUG_PRINT("exit", - ("pack_data: 0x%lx pack_len: %d", *pack_data, *pack_len)); -err: - DBUG_RETURN(error); - -} - - -static int unpackfrm(const void **unpack_data, uint *unpack_len, - const void *pack_data) -{ - const frm_blob_struct *blob= (frm_blob_struct*)pack_data; - byte *data; - ulong complen, orglen, ver; - DBUG_ENTER("unpackfrm"); - DBUG_PRINT("enter", ("pack_data: 0x%lx", pack_data)); - - complen= uint4korr((char*)&blob->head.complen); - orglen= uint4korr((char*)&blob->head.orglen); - ver= uint4korr((char*)&blob->head.ver); - - DBUG_PRINT("blob",("ver: %d complen: %d orglen: %d", - ver,complen,orglen)); - DBUG_DUMP("blob->data", (char*) blob->data, complen); - - if (ver != 1) - DBUG_RETURN(1); - if (!(data= my_malloc(max(orglen, complen), MYF(MY_WME)))) - DBUG_RETURN(2); - memcpy(data, blob->data, complen); - - if (my_uncompress(data, &complen, &orglen)) - { - my_free((char*)data, MYF(0)); - DBUG_RETURN(3); - } - - *unpack_data= data; - *unpack_len= complen; - - DBUG_PRINT("exit", ("frmdata: 0x%lx, len: %d", *unpack_data, *unpack_len)); - - DBUG_RETURN(0); -} - static int ndb_get_table_statistics(Ndb* ndb, const char * table, @@ -6756,17 +6811,17 @@ ndb_get_table_statistics(Ndb* ndb, const char * table, that the table with this name is a ndb table */ -int ha_ndbcluster::write_ndb_file() +int ha_ndbcluster::write_ndb_file(const char *name) { File file; bool error=1; char path[FN_REFLEN]; DBUG_ENTER("write_ndb_file"); - DBUG_PRINT("enter", ("db: %s, name: %s", m_dbname, m_tabname)); + DBUG_PRINT("enter", ("name: %s", name)); (void)strxnmov(path, FN_REFLEN-1, - mysql_data_home,"/",m_dbname,"/",m_tabname,ha_ndb_ext,NullS); + mysql_data_home,"/",name,ha_ndb_ext,NullS); if ((file=my_create(path, CREATE_MODE,O_RDWR | O_TRUNC,MYF(MY_WME))) >= 0) { @@ -6790,7 +6845,7 @@ ha_ndbcluster::read_multi_range_first(KEY_MULTI_RANGE **found_range_p, int res; KEY* key_info= table->key_info + active_index; NDB_INDEX_TYPE index_type= get_index_type(active_index); - ulong reclength= table->s->reclength; + ulong reclength= table_share->reclength; NdbOperation* op; if (uses_blob_value()) @@ -6997,7 +7052,7 @@ ha_ndbcluster::read_multi_range_next(KEY_MULTI_RANGE ** multi_range_found_p) int res; int range_no; - ulong reclength= table->s->reclength; + ulong reclength= table_share->reclength; const NdbOperation* op= m_current_multi_operation; for (;multi_range_curr < m_multi_range_defined; multi_range_curr++) { @@ -7146,7 +7201,7 @@ ha_ndbcluster::setup_recattr(const NdbRecAttr* curr) Field **field, **end; NdbValue *value= m_value; - end= table->field + table->s->fields; + end= table->field + table_share->fields; for (field= table->field; field < end; field++, value++) { @@ -8867,11 +8922,121 @@ int ha_ndbcluster::get_default_no_partitions(ulonglong max_rows) uint reported_frags; uint no_fragments= get_no_fragments(max_rows); uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); - adjusted_frag_count(no_fragments, no_nodes, reported_frags); + if (adjusted_frag_count(no_fragments, no_nodes, reported_frags)) + { + push_warning(current_thd, + MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, + "Ndb might have problems storing the max amount of rows specified"); + } return (int)reported_frags; } +/* + Set-up auto-partitioning for NDB Cluster + + SYNOPSIS + set_auto_partitions() + part_info Partition info struct to set-up + + RETURN VALUE + NONE + + DESCRIPTION + Set-up auto partitioning scheme for tables that didn't define any + partitioning. We'll use PARTITION BY KEY() in this case which + translates into partition by primary key if a primary key exists + and partition by hidden key otherwise. +*/ + +void ha_ndbcluster::set_auto_partitions(partition_info *part_info) +{ + DBUG_ENTER("ha_ndbcluster::set_auto_partitions"); + part_info->list_of_part_fields= TRUE; + part_info->part_type= HASH_PARTITION; + switch (opt_ndb_distribution_id) + { + case ND_KEYHASH: + part_info->linear_hash_ind= FALSE; + break; + case ND_LINHASH: + part_info->linear_hash_ind= TRUE; + break; + } + DBUG_VOID_RETURN; +} + + +int ha_ndbcluster::set_range_data(void *tab_ref, partition_info *part_info) +{ + NDBTAB *tab= (NDBTAB*)tab_ref; + int32 *range_data= (int32*)my_malloc(part_info->no_parts*sizeof(int32), + MYF(0)); + uint i; + int error= 0; + DBUG_ENTER("set_range_data"); + + if (!range_data) + { + mem_alloc_error(part_info->no_parts*sizeof(int32)); + DBUG_RETURN(1); + } + for (i= 0; i < part_info->no_parts; i++) + { + longlong range_val= part_info->range_int_array[i]; + if (range_val < INT_MIN32 || range_val >= INT_MAX32) + { + if ((i != part_info->no_parts - 1) || + (range_val != LONGLONG_MAX)) + { + my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB"); + error= 1; + goto error; + } + range_val= INT_MAX32; + } + range_data[i]= (int32)range_val; + } + tab->setRangeListData(range_data, sizeof(int32)*part_info->no_parts); +error: + my_free((char*)range_data, MYF(0)); + DBUG_RETURN(error); +} + +int ha_ndbcluster::set_list_data(void *tab_ref, partition_info *part_info) +{ + NDBTAB *tab= (NDBTAB*)tab_ref; + int32 *list_data= (int32*)my_malloc(part_info->no_list_values * 2 + * sizeof(int32), MYF(0)); + uint32 *part_id, i; + int error= 0; + DBUG_ENTER("set_list_data"); + + if (!list_data) + { + mem_alloc_error(part_info->no_list_values*2*sizeof(int32)); + DBUG_RETURN(1); + } + for (i= 0; i < part_info->no_list_values; i++) + { + LIST_PART_ENTRY *list_entry= &part_info->list_array[i]; + longlong list_val= list_entry->list_value; + if (list_val < INT_MIN32 || list_val > INT_MAX32) + { + my_error(ER_LIMITED_PART_RANGE, MYF(0), "NDB"); + error= 1; + goto error; + } + list_data[2*i]= (int32)list_val; + part_id= (uint32*)&list_data[2*i+1]; + *part_id= list_entry->partition_id; + } + tab->setRangeListData(list_data, 2*sizeof(int32)*part_info->no_list_values); +error: + my_free((char*)list_data, MYF(0)); + DBUG_RETURN(error); +} + /* User defined partitioning set-up. We need to check how many fragments the user wants defined and which node groups to put those into. Later we also @@ -8889,12 +9054,18 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, TABLE *table, void *tab_par) { - DBUG_ENTER("ha_ndbcluster::set_up_partition_info"); - ushort node_group[MAX_PARTITIONS]; - ulong ng_index= 0, i, j; + uint16 frag_data[MAX_PARTITIONS]; + char *ts_names[MAX_PARTITIONS]; + ulong ts_index= 0, fd_index= 0, i, j; NDBTAB *tab= (NDBTAB*)tab_par; NDBTAB::FragmentType ftype= NDBTAB::UserDefined; partition_element *part_elem; + bool first= TRUE; + uint ts_id, ts_version, part_count= 0, tot_ts_name_len; + List_iterator part_it(part_info->partitions); + int error; + char *name_ptr; + DBUG_ENTER("ha_ndbcluster::set_up_partition_info"); if (part_info->part_type == HASH_PARTITION && part_info->list_of_part_fields == TRUE) @@ -8913,103 +9084,84 @@ uint ha_ndbcluster::set_up_partition_info(partition_info *part_info, col->setPartitionKey(TRUE); } } - List_iterator part_it(part_info->partitions); - for (i= 0; i < part_info->no_parts; i++) + else { + /* + Create a shadow field for those tables that have user defined + partitioning. This field stores the value of the partition + function such that NDB can handle reorganisations of the data + even when the MySQL Server isn't available to assist with + calculation of the partition function value. + */ + NDBCOL col; + DBUG_PRINT("info", ("Generating partition func value field")); + col.setName("$PART_FUNC_VALUE"); + col.setType(NdbDictionary::Column::Int); + col.setLength(1); + col.setNullable(FALSE); + col.setPrimaryKey(FALSE); + col.setAutoIncrement(FALSE); + tab->addColumn(col); + if (part_info->part_type == RANGE_PARTITION) + { + if ((error= set_range_data((void*)tab, part_info))) + { + DBUG_RETURN(error); + } + } + else if (part_info->part_type == LIST_PARTITION) + { + if ((error= set_list_data((void*)tab, part_info))) + { + DBUG_RETURN(error); + } + } + } + tab->setFragmentType(ftype); + i= 0; + tot_ts_name_len= 0; + do + { + uint ng; part_elem= part_it++; if (!is_sub_partitioned(part_info)) { - node_group[ng_index++]= part_elem->nodegroup_id; - //Here we should insert tablespace id based on tablespace name + ng= part_elem->nodegroup_id; + if (first && ng == UNDEF_NODEGROUP) + ng= 0; + ts_names[fd_index]= part_elem->tablespace_name; + frag_data[fd_index++]= ng; } else { List_iterator sub_it(part_elem->subpartitions); - for (j= 0; j < part_info->no_subparts; j++) + j= 0; + do { part_elem= sub_it++; - node_group[ng_index++]= part_elem->nodegroup_id; - //Here we should insert tablespace id based on tablespace name - } + ng= part_elem->nodegroup_id; + if (first && ng == UNDEF_NODEGROUP) + ng= 0; + ts_names[fd_index]= part_elem->tablespace_name; + frag_data[fd_index++]= ng; + } while (++j < part_info->no_subparts); } - } - { - uint no_nodes= g_ndb_cluster_connection->no_db_nodes(); - if (ng_index > 4 * no_nodes) - { - DBUG_RETURN(1300); - } - } - tab->setNodeGroupIds(&node_group, ng_index); - tab->setFragmentType(ftype); + first= FALSE; + } while (++i < part_info->no_parts); + tab->setDefaultNoPartitionsFlag(part_info->use_default_no_partitions); + tab->setLinearFlag(part_info->linear_hash_ind); + tab->setMaxRows(table->s->max_rows); + tab->setTablespaceNames(ts_names, fd_index*sizeof(char*)); + tab->setFragmentCount(fd_index); + tab->setFragmentData(&frag_data, fd_index*2); DBUG_RETURN(0); } -/* - This routine is used to set-up fragmentation when the user has only specified - ENGINE = NDB and no user defined partitioning what so ever. Thus all values - will be based on default values. We will choose Linear Hash or Hash with - perfect spread dependent on a session variable defined in MySQL. -*/ - -static void ndb_set_fragmentation(NDBTAB &tab, TABLE *form, uint pk_length) -{ - NDBTAB::FragmentType ftype= NDBTAB::DistrKeyHash; - ushort node_group[MAX_PARTITIONS]; - uint no_nodes= g_ndb_cluster_connection->no_db_nodes(), no_fragments, i; - DBUG_ENTER("ndb_set_fragmentation"); - - if (form->s->max_rows == (ha_rows) 0) - { - no_fragments= no_nodes; - } - else - { - /* - Ensure that we get enough fragments to handle all rows and ensure that - the table is fully distributed by keeping the number of fragments a - multiple of the number of nodes. - */ - uint fragments= get_no_fragments(form->s->max_rows); - if (adjusted_frag_count(fragments, no_nodes, no_fragments)) - { - push_warning(current_thd, - MYSQL_ERROR::WARN_LEVEL_WARN, ER_UNKNOWN_ERROR, - "Ndb might have problems storing the max amount of rows specified"); - } - } - /* - Always start with node group 0 and continue with next node group from - there - */ - node_group[0]= 0; - for (i= 1; i < no_fragments; i++) - node_group[i]= UNDEF_NODEGROUP; - switch (opt_ndb_distribution_id) - { - case ND_KEYHASH: - ftype= NDBTAB::DistrKeyHash; - break; - case ND_LINHASH: - ftype= NDBTAB::DistrKeyLin; - break; - } - tab.setFragmentType(ftype); - tab.setNodeGroupIds(&node_group, no_fragments); - DBUG_VOID_RETURN; -} - bool ha_ndbcluster::check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes) { - /* - TODO: Remove the dummy return below, when cluster gets - signal from alter table when only .frm is changed. Cluster - needs it to manage the copies. - */ - return COMPATIBLE_DATA_NO; - + return COMPATIBLE_DATA_NO; // Disable fast add/drop index if (table_changes != IS_EQUAL_YES) return COMPATIBLE_DATA_NO; @@ -9252,3 +9404,41 @@ ndberror: DBUG_RETURN(1); } + +bool ha_ndbcluster::get_no_parts(const char *name, uint *no_parts) +{ + Ndb *ndb; + NDBDICT *dict; + const NDBTAB *tab; + int err; + DBUG_ENTER("ha_ndbcluster::get_no_parts"); + + set_dbname(name); + set_tabname(name); + do + { + if (check_ndb_connection()) + { + err= HA_ERR_NO_CONNECTION; + break; + } + ndb= get_ndb(); + dict= ndb->getDictionary(); + if (!(tab= dict->getTable(m_tabname))) + ERR_BREAK(dict->getNdbError(), err); + // Check if thread has stale local cache + if (tab->getObjectStatus() == NdbDictionary::Object::Invalid) + { + invalidate_dictionary_cache(FALSE); + if (!(tab= dict->getTable(m_tabname))) + ERR_BREAK(dict->getNdbError(), err); + } + *no_parts= tab->getFragmentCount(); + DBUG_RETURN(FALSE); + } while (1); + +end: + print_error(err, MYF(0)); + DBUG_RETURN(TRUE); +} + diff --git a/sql/ha_ndbcluster.h b/sql/ha_ndbcluster.h index ea35af908d8..73b1b27ede2 100644 --- a/sql/ha_ndbcluster.h +++ b/sql/ha_ndbcluster.h @@ -80,10 +80,12 @@ typedef union { const NdbRecAttr *rec; NdbBlob *blob; void *ptr; } NdbValue; typedef enum { NSS_INITIAL= 0, - NSS_DROPPED + NSS_DROPPED, + NSS_ALTERED } NDB_SHARE_STATE; typedef struct st_ndbcluster_share { + NDB_SHARE_STATE state; MEM_ROOT mem_root; THR_LOCK lock; pthread_mutex_t mutex; @@ -97,7 +99,6 @@ typedef struct st_ndbcluster_share { char *table_name; #ifdef HAVE_NDB_BINLOG uint32 flags; - NDB_SHARE_STATE state; NdbEventOperation *op; NdbEventOperation *op_old; // for rename table char *old_names; // for rename table @@ -113,6 +114,7 @@ typedef struct st_ndbcluster_share { #ifdef HAVE_NDB_BINLOG /* NDB_SHARE.flags */ #define NSF_HIDDEN_PK 1 /* table has hidden primary key */ +#define NSF_NO_BINLOG 4 /* table should not be binlogged */ #endif typedef enum ndb_item_type { @@ -561,22 +563,13 @@ class ha_ndbcluster: public handler int extra_opt(enum ha_extra_function operation, ulong cache_size); int external_lock(THD *thd, int lock_type); int start_stmt(THD *thd, thr_lock_type lock_type); + void print_error(int error, myf errflag); const char * table_type() const; const char ** bas_ext() const; ulong table_flags(void) const; - ulong alter_table_flags(void) const - { - return (HA_ONLINE_ADD_INDEX | HA_ONLINE_DROP_INDEX | - HA_ONLINE_ADD_UNIQUE_INDEX | HA_ONLINE_DROP_UNIQUE_INDEX); - } int add_index(TABLE *table_arg, KEY *key_info, uint num_of_keys); int prepare_drop_index(TABLE *table_arg, uint *key_num, uint num_of_keys); int final_drop_index(TABLE *table_arg); - ulong partition_flags(void) const - { - return (HA_CAN_PARTITION | HA_CAN_UPDATE_PARTITION_KEY | - HA_CAN_PARTITION_UNIQUE); - } void set_part_info(partition_info *part_info); ulong index_flags(uint idx, uint part, bool all_parts) const; uint max_supported_record_length() const; @@ -587,7 +580,11 @@ class ha_ndbcluster: public handler int rename_table(const char *from, const char *to); int delete_table(const char *name); int create(const char *name, TABLE *form, HA_CREATE_INFO *info); + int create_handler_files(const char *file); int get_default_no_partitions(ulonglong max_rows); + bool get_no_parts(const char *name, uint *no_parts); + void set_auto_partitions(partition_info *part_info); + THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); @@ -657,7 +654,7 @@ static void set_tabname(const char *pathname, char *tabname); bool check_if_incompatible_data(HA_CREATE_INFO *info, uint table_changes); - static void invalidate_dictionary_cache(TABLE *table, Ndb *ndb, + static void invalidate_dictionary_cache(TABLE_SHARE *share, Ndb *ndb, const char *tabname, bool global); private: @@ -674,6 +671,7 @@ private: int create_index(const char *name, KEY *key_info, NDB_INDEX_TYPE idx_type, uint idx_no); int drop_ndb_index(const char *name); + int table_changed(const void *pack_frm_data, uint pack_frm_len); // Index list management int create_indexes(Ndb *ndb, TABLE *tab); void clear_index(int i); @@ -694,6 +692,8 @@ private: uint set_up_partition_info(partition_info *part_info, TABLE *table, void *tab); + int set_range_data(void *tab, partition_info* part_info); + int set_list_data(void *tab, partition_info* part_info); int complemented_pk_read(const byte *old_data, byte *new_data, uint32 old_part_id); int pk_read(const byte *key, uint key_len, byte *buf, uint32 part_id); @@ -743,7 +743,7 @@ private: char *update_table_comment(const char * comment); - int write_ndb_file(); + int write_ndb_file(const char *name); int check_ndb_connection(THD* thd= current_thd); diff --git a/sql/ha_ndbcluster_binlog.cc b/sql/ha_ndbcluster_binlog.cc index c80b2b27d8d..f1da21a3ad5 100644 --- a/sql/ha_ndbcluster_binlog.cc +++ b/sql/ha_ndbcluster_binlog.cc @@ -260,7 +260,7 @@ void ndbcluster_binlog_init_share(NDB_SHARE *share, TABLE *_table) break; } if ((error= open_table_from_share(thd, table_share, "", 0, - (uint) READ_ALL, 0, table))) + (uint) READ_ALL, 0, table, FALSE))) { sql_print_error("Unable to open table for %s, error=%d(%d)", share->key, error, my_errno); @@ -1219,7 +1219,7 @@ ndb_handle_schema_change(THD *thd, Ndb *ndb, NdbEventOperation *pOp, pOp->getReqNodeId() != g_ndb_cluster_connection->node_id()) { ndb->setDatabaseName(share->table->s->db.str); - ha_ndbcluster::invalidate_dictionary_cache(share->table, + ha_ndbcluster::invalidate_dictionary_cache(share->table->s, ndb, share->table->s->table_name.str, TRUE); @@ -1714,7 +1714,16 @@ ndbcluster_create_event(Ndb *ndb, const NDBTAB *ndbtab, { DBUG_ENTER("ndbcluster_create_event"); if (!share) + { + DBUG_PRINT("info", ("share == NULL")); DBUG_RETURN(0); + } + if (share->flags & NSF_NO_BINLOG) + { + DBUG_PRINT("info", ("share->flags & NSF_NO_BINLOG, flags: %x %d", share->flags, share->flags & NSF_NO_BINLOG)); + DBUG_RETURN(0); + } + NDBDICT *dict= ndb->getDictionary(); NDBEVENT my_event(event_name); my_event.setTable(*ndbtab); @@ -1831,6 +1840,12 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, DBUG_ASSERT(share != 0); + if (share->flags & NSF_NO_BINLOG) + { + DBUG_PRINT("info", ("share->flags & NSF_NO_BINLOG, flags: %x", share->flags)); + DBUG_RETURN(0); + } + if (share->op) { assert(share->op->getCustomData() == (void *) share); @@ -1854,6 +1869,7 @@ ndbcluster_create_event_ops(NDB_SHARE *share, const NDBTAB *ndbtab, { sql_print_error("NDB Binlog: logging of blob table %s " "is not supported", share->key); + share->flags|= NSF_NO_BINLOG; DBUG_RETURN(0); } } diff --git a/sql/ha_ndbcluster_binlog.h b/sql/ha_ndbcluster_binlog.h index 5334120b43f..e8582ab659a 100644 --- a/sql/ha_ndbcluster_binlog.h +++ b/sql/ha_ndbcluster_binlog.h @@ -27,8 +27,6 @@ typedef NdbDictionary::Event NDBEVENT; extern ulong ndb_extra_logging; -#ifdef HAVE_NDB_BINLOG - #define INJECTOR_EVENT_LEN 200 enum SCHEMA_OP_TYPE @@ -45,13 +43,14 @@ enum SCHEMA_OP_TYPE const uint max_ndb_nodes= 64; /* multiple of 32 */ +static const char *ha_ndb_ext=".ndb"; +static const char share_prefix[]= "./"; + +#ifdef HAVE_NDB_BINLOG extern pthread_t ndb_binlog_thread; extern pthread_mutex_t injector_mutex; extern pthread_cond_t injector_cond; -static const char *ha_ndb_ext=".ndb"; -static const char share_prefix[]= "./"; - extern unsigned char g_node_id_map[max_ndb_nodes]; extern handlerton ndbcluster_hton; extern pthread_t ndb_util_thread; @@ -114,6 +113,8 @@ ndbcluster_show_status_binlog(THD* thd, stat_print_fn *stat_print, the ndb binlog code */ int ndbcluster_find_all_files(THD *thd); +#endif /* HAVE_NDB_BINLOG */ + void ndb_unpack_record(TABLE *table, NdbValue *value, MY_BITMAP *defined, byte *buf); @@ -157,6 +158,3 @@ void set_thd_ndb(THD *thd, Thd_ndb *thd_ndb) { thd->ha_data[ndbcluster_hton.slot]= thd_ndb; } Ndb* check_ndb_in_thd(THD* thd); - - -#endif /* HAVE_NDB_BINLOG */ diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index d7549c1a95b..61ffa49def2 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -67,6 +67,8 @@ static PARTITION_SHARE *get_share(const char *table_name, TABLE * table); ****************************************************************************/ static handler *partition_create_handler(TABLE_SHARE *share); +static uint partition_flags(); +static uint alter_table_flags(uint flags); handlerton partition_hton = { MYSQL_HANDLERTON_INTERFACE_VERSION, @@ -96,15 +98,68 @@ handlerton partition_hton = { NULL, /* Start Consistent Snapshot */ NULL, /* Flush logs */ NULL, /* Show status */ + partition_flags, /* Partition flags */ + alter_table_flags, /* Partition flags */ NULL, /* Alter Tablespace */ HTON_NOT_USER_SELECTABLE | HTON_HIDDEN }; +/* + Create new partition handler + + SYNOPSIS + partition_create_handler() + table Table object + + RETURN VALUE + New partition object +*/ + static handler *partition_create_handler(TABLE_SHARE *share) { return new ha_partition(share); } +/* + HA_CAN_PARTITION: + Used by storage engines that can handle partitioning without this + partition handler + (Partition, NDB) + + HA_CAN_UPDATE_PARTITION_KEY: + Set if the handler can update fields that are part of the partition + function. + + HA_CAN_PARTITION_UNIQUE: + Set if the handler can handle unique indexes where the fields of the + unique key are not part of the fields of the partition function. Thus + a unique key can be set on all fields. + + HA_USE_AUTO_PARTITION + Set if the handler sets all tables to be partitioned by default. +*/ + +static uint partition_flags() +{ + return HA_CAN_PARTITION; +} + +static uint alter_table_flags(uint flags __attribute__((unused))) +{ + return (HA_PARTITION_FUNCTION_SUPPORTED | + HA_FAST_CHANGE_PARTITION); +} + +/* + Constructor method + + SYNOPSIS + ha_partition() + table Table object + + RETURN VALUE + NONE +*/ ha_partition::ha_partition(TABLE_SHARE *share) :handler(&partition_hton, share), m_part_info(NULL), m_create_handler(FALSE), @@ -116,6 +171,17 @@ ha_partition::ha_partition(TABLE_SHARE *share) } +/* + Constructor method + + SYNOPSIS + ha_partition() + part_info Partition info + + RETURN VALUE + NONE +*/ + ha_partition::ha_partition(partition_info *part_info) :handler(&partition_hton, NULL), m_part_info(part_info), m_create_handler(TRUE), @@ -129,13 +195,28 @@ ha_partition::ha_partition(partition_info *part_info) } +/* + Initialise handler object + + SYNOPSIS + init_handler_variables() + + RETURN VALUE + NONE +*/ + void ha_partition::init_handler_variables() { active_index= MAX_KEY; + m_mode= 0; + m_open_test_lock= 0; m_file_buffer= NULL; m_name_buffer_ptr= NULL; m_engine_array= NULL; m_file= NULL; + m_reorged_file= NULL; + m_reorged_parts= 0; + m_added_file= NULL; m_tot_parts= 0; m_has_transactions= 0; m_pkey_is_clustered= 0; @@ -172,6 +253,16 @@ void ha_partition::init_handler_variables() } +/* + Destructor method + + SYNOPSIS + ~ha_partition() + + RETURN VALUE + NONE +*/ + ha_partition::~ha_partition() { DBUG_ENTER("ha_partition::~ha_partition()"); @@ -189,6 +280,17 @@ ha_partition::~ha_partition() /* + Initialise partition handler object + + SYNOPSIS + ha_initialise() + + RETURN VALUE + 1 Error + 0 Success + + DESCRIPTION + The partition handler is only a layer on top of other engines. Thus it can't really perform anything without the underlying handlers. Thus we add this method as part of the allocation of a handler object. @@ -218,6 +320,7 @@ ha_partition::~ha_partition() sort will be performed using the underlying handlers. 5) primary_key_is_clustered, has_transactions and low_byte_first is calculated here. + */ int ha_partition::ha_initialise() @@ -244,7 +347,7 @@ int ha_partition::ha_initialise() } else if (get_from_handler_file(table_share->normalized_path.str)) { - my_error(ER_OUTOFMEMORY, MYF(0), 129); //Temporary fix TODO print_error + mem_alloc_error(2); DBUG_RETURN(1); } /* @@ -289,47 +392,119 @@ int ha_partition::ha_initialise() MODULE meta data changes ****************************************************************************/ /* - This method is used to calculate the partition name, service routine to - the del_ren_cre_table method. + Create partition names + + SYNOPSIS + create_partition_name() + out:out Created partition name string + in1 First part + in2 Second part + name_variant Normal, temporary or renamed partition name + + RETURN VALUE + NONE + + DESCRIPTION + This method is used to calculate the partition name, service routine to + the del_ren_cre_table method. */ -static void create_partition_name(char *out, const char *in1, const char *in2) +#define NORMAL_PART_NAME 0 +#define TEMP_PART_NAME 1 +#define RENAMED_PART_NAME 2 +static void create_partition_name(char *out, const char *in1, + const char *in2, uint name_variant, + bool translate) { - strxmov(out, in1, "_", in2, NullS); + char transl_part_name[FN_REFLEN]; + const char *transl_part; + + if (translate) + { + tablename_to_filename(in2, transl_part_name, FN_REFLEN); + transl_part= transl_part_name; + } + else + transl_part= in2; + if (name_variant == NORMAL_PART_NAME) + strxmov(out, in1, "#P#", transl_part, NullS); + else if (name_variant == TEMP_PART_NAME) + strxmov(out, in1, "#P#", transl_part, "#TMP#", NullS); + else if (name_variant == RENAMED_PART_NAME) + strxmov(out, in1, "#P#", transl_part, "#REN#", NullS); } /* - This method is used to calculate the partition name, service routine to + Create subpartition name + + SYNOPSIS + create_subpartition_name() + out:out Created partition name string + in1 First part + in2 Second part + in3 Third part + name_variant Normal, temporary or renamed partition name + + RETURN VALUE + NONE + + DESCRIPTION + This method is used to calculate the subpartition name, service routine to the del_ren_cre_table method. */ static void create_subpartition_name(char *out, const char *in1, - const char *in2, const char *in3) + const char *in2, const char *in3, + uint name_variant) { - strxmov(out, in1, "_", in2, "_", in3, NullS); + char transl_part_name[FN_REFLEN], transl_subpart_name[FN_REFLEN]; + + tablename_to_filename(in2, transl_part_name, FN_REFLEN); + tablename_to_filename(in3, transl_subpart_name, FN_REFLEN); + if (name_variant == NORMAL_PART_NAME) + strxmov(out, in1, "#P#", transl_part_name, + "#SP#", transl_subpart_name, NullS); + else if (name_variant == TEMP_PART_NAME) + strxmov(out, in1, "#P#", transl_part_name, + "#SP#", transl_subpart_name, "#TMP#", NullS); + else if (name_variant == RENAMED_PART_NAME) + strxmov(out, in1, "#P#", transl_part_name, + "#SP#", transl_subpart_name, "#REN#", NullS); } /* - Used to delete a table. By the time delete_table() has been called all - opened references to this table will have been closed (and your globally - shared references released. The variable name will just be the name of - the table. You will need to remove any files you have created at this - point. + Delete a table - If you do not implement this, the default delete_table() is called from - handler.cc and it will delete all files with the file extentions returned - by bas_ext(). + SYNOPSIS + delete_table() + name Full path of table name - Called from handler.cc by delete_table and ha_create_table(). Only used - during create if the table_flag HA_DROP_BEFORE_CREATE was specified for - the storage engine. + RETURN VALUE + >0 Error + 0 Success + + DESCRIPTION + Used to delete a table. By the time delete_table() has been called all + opened references to this table will have been closed (and your globally + shared references released. The variable name will just be the name of + the table. You will need to remove any files you have created at this + point. + + If you do not implement this, the default delete_table() is called from + handler.cc and it will delete all files with the file extentions returned + by bas_ext(). + + Called from handler.cc by delete_table and ha_create_table(). Only used + during create if the table_flag HA_DROP_BEFORE_CREATE was specified for + the storage engine. */ int ha_partition::delete_table(const char *name) { int error; DBUG_ENTER("ha_partition::delete_table"); + if ((error= del_ren_cre_table(name, NULL, NULL, NULL))) DBUG_RETURN(error); DBUG_RETURN(handler::delete_table(name)); @@ -337,19 +512,32 @@ int ha_partition::delete_table(const char *name) /* - Renames a table from one name to another from alter table call. + Rename a table - If you do not implement this, the default rename_table() is called from - handler.cc and it will delete all files with the file extentions returned - by bas_ext(). + SYNOPSIS + rename_table() + from Full path of old table name + to Full path of new table name - Called from sql_table.cc by mysql_rename_table(). + RETURN VALUE + >0 Error + 0 Success + + DESCRIPTION + Renames a table from one name to another from alter table call. + + If you do not implement this, the default rename_table() is called from + handler.cc and it will rename all files with the file extentions returned + by bas_ext(). + + Called from sql_table.cc by mysql_rename_table(). */ int ha_partition::rename_table(const char *from, const char *to) { int error; DBUG_ENTER("ha_partition::rename_table"); + if ((error= del_ren_cre_table(from, to, NULL, NULL))) DBUG_RETURN(error); DBUG_RETURN(handler::rename_table(from, to)); @@ -357,11 +545,22 @@ int ha_partition::rename_table(const char *from, const char *to) /* - create_handler_files is called to create any handler specific files - before opening the file with openfrm to later call ::create on the - file object. - In the partition handler this is used to store the names of partitions - and types of engines in the partitions. + Create the handler file (.par-file) + + SYNOPSIS + create_handler_files() + name Full path of table name + + RETURN VALUE + >0 Error + 0 Success + + DESCRIPTION + create_handler_files is called to create any handler specific files + before opening the file with openfrm to later call ::create on the + file object. + In the partition handler this is used to store the names of partitions + and types of engines in the partitions. */ int ha_partition::create_handler_files(const char *name) @@ -372,7 +571,6 @@ int ha_partition::create_handler_files(const char *name) We need to update total number of parts since we might write the handler file as part of a partition management command */ - m_tot_parts= get_tot_partitions(m_part_info); if (create_handler_file(name)) { my_error(ER_CANT_CREATE_HANDLER_FILE, MYF(0)); @@ -383,14 +581,27 @@ int ha_partition::create_handler_files(const char *name) /* - create() is called to create a table. The variable name will have the name - of the table. When create() is called you do not need to worry about - opening the table. Also, the FRM file will have already been created so - adjusting create_info will not do you any good. You can overwrite the frm - file at this point if you wish to change the table definition, but there - are no methods currently provided for doing that. + Create a partitioned table - Called from handle.cc by ha_create_table(). + SYNOPSIS + create() + name Full path of table name + table_arg Table object + create_info Create info generated for CREATE TABLE + + RETURN VALUE + >0 Error + 0 Success + + DESCRIPTION + create() is called to create a table. The variable name will have the name + of the table. When create() is called you do not need to worry about + opening the table. Also, the FRM file will have already been created so + adjusting create_info will not do you any good. You can overwrite the frm + file at this point if you wish to change the table definition, but there + are no methods currently provided for doing that. + + Called from handler.cc by ha_create_table(). */ int ha_partition::create(const char *name, TABLE *table_arg, @@ -410,23 +621,838 @@ int ha_partition::create(const char *name, TABLE *table_arg, DBUG_RETURN(0); } + +/* + Drop partitions as part of ALTER TABLE of partitions + + SYNOPSIS + drop_partitions() + path Complete path of db and table name + + RETURN VALUE + >0 Failure + 0 Success + + DESCRIPTION + Use part_info object on handler object to deduce which partitions to + drop (each partition has a state attached to it) +*/ + int ha_partition::drop_partitions(const char *path) { List_iterator part_it(m_part_info->partitions); + List_iterator temp_it(m_part_info->temp_partitions); char part_name_buff[FN_REFLEN]; - uint no_parts= m_part_info->no_parts; - uint no_subparts= m_part_info->no_subparts, i= 0; + uint no_parts= m_part_info->partitions.elements; + uint part_count= 0; + uint no_subparts= m_part_info->no_subparts; + uint i= 0; + uint name_variant; + int error= 1; + bool reorged_parts= (m_reorged_parts > 0); + bool temp_partitions= (m_part_info->temp_partitions.elements > 0); + DBUG_ENTER("ha_partition::drop_partitions"); + + if (temp_partitions) + no_parts= m_part_info->temp_partitions.elements; + do + { + partition_element *part_elem; + if (temp_partitions) + { + /* + We need to remove the reorganised partitions that were put in the + temp_partitions-list. + */ + part_elem= temp_it++; + DBUG_ASSERT(part_elem->part_state == PART_TO_BE_DROPPED); + } + else + part_elem= part_it++; + if (part_elem->part_state == PART_TO_BE_DROPPED || + part_elem->part_state == PART_IS_CHANGED) + { + handler *file; + /* + This part is to be dropped, meaning the part or all its subparts. + */ + name_variant= NORMAL_PART_NAME; + if (part_elem->part_state == PART_IS_CHANGED || + (part_elem->part_state == PART_TO_BE_DROPPED && temp_partitions)) + name_variant= RENAMED_PART_NAME; + if (m_is_sub_partitioned) + { + List_iterator sub_it(part_elem->subpartitions); + uint j= 0, part; + do + { + partition_element *sub_elem= sub_it++; + part= i * no_subparts + j; + create_subpartition_name(part_name_buff, path, + part_elem->partition_name, + sub_elem->partition_name, name_variant); + if (reorged_parts) + file= m_reorged_file[part_count++]; + else + file= m_file[part]; + DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff)); + error= file->delete_table((const char *) part_name_buff); + } while (++j < no_subparts); + } + else + { + create_partition_name(part_name_buff, path, + part_elem->partition_name, name_variant, + TRUE); + if (reorged_parts) + file= m_reorged_file[part_count++]; + else + file= m_file[i]; + DBUG_PRINT("info", ("Drop partition %s", part_name_buff)); + error= file->delete_table((const char *) part_name_buff); + } + if (part_elem->part_state == PART_IS_CHANGED) + part_elem->part_state= PART_NORMAL; + else + part_elem->part_state= PART_IS_DROPPED; + } + } while (++i < no_parts); + DBUG_RETURN(error); +} + + +/* + Rename partitions as part of ALTER TABLE of partitions + + SYNOPSIS + rename_partitions() + path Complete path of db and table name + + RETURN VALUE + TRUE Failure + FALSE Success + + DESCRIPTION + When reorganising partitions, adding hash partitions and coalescing + partitions it can be necessary to rename partitions while holding + an exclusive lock on the table. + Which partitions to rename is given by state of partitions found by the + partition info struct referenced from the handler object +*/ + +int ha_partition::rename_partitions(const char *path) +{ + List_iterator part_it(m_part_info->partitions); + List_iterator temp_it(m_part_info->temp_partitions); + char part_name_buff[FN_REFLEN]; + char norm_name_buff[FN_REFLEN]; + uint no_parts= m_part_info->partitions.elements; + uint part_count= 0; + uint no_subparts= m_part_info->no_subparts; + uint i= 0; + uint j= 0; int error= 1; - DBUG_ENTER("ha_partition::drop_partitions()"); + uint temp_partitions= m_part_info->temp_partitions.elements; + handler *file; + partition_element *part_elem, *sub_elem; + DBUG_ENTER("ha_partition::rename_partitions"); + + if (temp_partitions) + { + do + { + part_elem= temp_it++; + if (m_is_sub_partitioned) + { + List_iterator sub_it(part_elem->subpartitions); + do + { + sub_elem= sub_it++; + file= m_reorged_file[part_count++]; + create_subpartition_name(part_name_buff, path, + part_elem->partition_name, + sub_elem->partition_name, + RENAMED_PART_NAME); + create_subpartition_name(norm_name_buff, path, + part_elem->partition_name, + sub_elem->partition_name, + NORMAL_PART_NAME); + DBUG_PRINT("info", ("Rename subpartition from %s to %s", + norm_name_buff, part_name_buff)); + error= file->rename_table((const char *) norm_name_buff, + (const char *) part_name_buff); + } while (++j < no_subparts); + } + else + { + file= m_reorged_file[part_count++]; + create_partition_name(part_name_buff, path, + part_elem->partition_name, RENAMED_PART_NAME, + TRUE); + create_partition_name(norm_name_buff, path, + part_elem->partition_name, NORMAL_PART_NAME, + TRUE); + DBUG_PRINT("info", ("Rename partition from %s to %s", + norm_name_buff, part_name_buff)); + error= file->rename_table((const char *) norm_name_buff, + (const char *) part_name_buff); + } + } while (++i < temp_partitions); + } + i= 0; + do + { + part_elem= part_it++; + if (part_elem->part_state == PART_IS_CHANGED || + (part_elem->part_state == PART_IS_ADDED && temp_partitions)) + { + if (m_is_sub_partitioned) + { + List_iterator sub_it(part_elem->subpartitions); + uint part; + + j= 0; + do + { + sub_elem= sub_it++; + part= i * no_subparts + j; + create_subpartition_name(norm_name_buff, path, + part_elem->partition_name, + sub_elem->partition_name, + NORMAL_PART_NAME); + if (part_elem->part_state == PART_IS_CHANGED) + { + file= m_reorged_file[part_count++]; + create_subpartition_name(part_name_buff, path, + part_elem->partition_name, + sub_elem->partition_name, + RENAMED_PART_NAME); + DBUG_PRINT("info", ("Rename subpartition from %s to %s", + norm_name_buff, part_name_buff)); + error= file->rename_table((const char *) norm_name_buff, + (const char *) part_name_buff); + } + file= m_new_file[part]; + create_subpartition_name(part_name_buff, path, + part_elem->partition_name, + sub_elem->partition_name, + TEMP_PART_NAME); + DBUG_PRINT("info", ("Rename subpartition from %s to %s", + part_name_buff, norm_name_buff)); + error= file->rename_table((const char *) part_name_buff, + (const char *) norm_name_buff); + } while (++j < no_subparts); + } + else + { + create_partition_name(norm_name_buff, path, + part_elem->partition_name, NORMAL_PART_NAME, + TRUE); + if (part_elem->part_state == PART_IS_CHANGED) + { + file= m_reorged_file[part_count++]; + create_partition_name(part_name_buff, path, + part_elem->partition_name, RENAMED_PART_NAME, + TRUE); + DBUG_PRINT("info", ("Rename partition from %s to %s", + norm_name_buff, part_name_buff)); + error= file->rename_table((const char *) norm_name_buff, + (const char *) part_name_buff); + } + file= m_new_file[i]; + create_partition_name(part_name_buff, path, + part_elem->partition_name, TEMP_PART_NAME, + TRUE); + DBUG_PRINT("info", ("Rename partition from %s to %s", + part_name_buff, norm_name_buff)); + error= file->rename_table((const char *) part_name_buff, + (const char *) norm_name_buff); + } + } + } while (++i < no_parts); + DBUG_RETURN(error); +} + + +#define OPTIMIZE_PARTS 1 +#define ANALYZE_PARTS 2 +#define CHECK_PARTS 3 +#define REPAIR_PARTS 4 + +/* + Optimize table + + SYNOPSIS + optimize() + thd Thread object + check_opt Check/analyze/repair/optimize options + + RETURN VALUES + >0 Error + 0 Success +*/ + +int ha_partition::optimize(THD *thd, HA_CHECK_OPT *check_opt) +{ + DBUG_ENTER("ha_partition::optimize"); + + DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, + OPTIMIZE_PARTS, TRUE)); +} + + +/* + Analyze table + + SYNOPSIS + analyze() + thd Thread object + check_opt Check/analyze/repair/optimize options + + RETURN VALUES + >0 Error + 0 Success +*/ + +int ha_partition::analyze(THD *thd, HA_CHECK_OPT *check_opt) +{ + DBUG_ENTER("ha_partition::analyze"); + + DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, + ANALYZE_PARTS, TRUE)); +} + + +/* + Check table + + SYNOPSIS + check() + thd Thread object + check_opt Check/analyze/repair/optimize options + + RETURN VALUES + >0 Error + 0 Success +*/ + +int ha_partition::check(THD *thd, HA_CHECK_OPT *check_opt) +{ + DBUG_ENTER("ha_partition::check"); + + DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, + CHECK_PARTS, TRUE)); +} + + +/* + Repair table + + SYNOPSIS + repair() + thd Thread object + check_opt Check/analyze/repair/optimize options + + RETURN VALUES + >0 Error + 0 Success +*/ + +int ha_partition::repair(THD *thd, HA_CHECK_OPT *check_opt) +{ + DBUG_ENTER("ha_partition::repair"); + + DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, + REPAIR_PARTS, TRUE)); +} + +/* + Optimize partitions + + SYNOPSIS + optimize_partitions() + thd Thread object + RETURN VALUE + >0 Failure + 0 Success + DESCRIPTION + Call optimize on each partition marked with partition state PART_CHANGED +*/ + +int ha_partition::optimize_partitions(THD *thd) +{ + DBUG_ENTER("ha_partition::optimize_partitions"); + + DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, + OPTIMIZE_PARTS, FALSE)); +} + +/* + Analyze partitions + + SYNOPSIS + analyze_partitions() + thd Thread object + RETURN VALUE + >0 Failure + 0 Success + DESCRIPTION + Call analyze on each partition marked with partition state PART_CHANGED +*/ + +int ha_partition::analyze_partitions(THD *thd) +{ + DBUG_ENTER("ha_partition::analyze_partitions"); + + DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, + ANALYZE_PARTS, FALSE)); +} + +/* + Check partitions + + SYNOPSIS + check_partitions() + thd Thread object + RETURN VALUE + >0 Failure + 0 Success + DESCRIPTION + Call check on each partition marked with partition state PART_CHANGED +*/ + +int ha_partition::check_partitions(THD *thd) +{ + DBUG_ENTER("ha_partition::check_partitions"); + + DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, + CHECK_PARTS, FALSE)); +} + +/* + Repair partitions + + SYNOPSIS + repair_partitions() + thd Thread object + RETURN VALUE + >0 Failure + 0 Success + DESCRIPTION + Call repair on each partition marked with partition state PART_CHANGED +*/ + +int ha_partition::repair_partitions(THD *thd) +{ + DBUG_ENTER("ha_partition::repair_partitions"); + + DBUG_RETURN(handle_opt_partitions(thd, &thd->lex->check_opt, + REPAIR_PARTS, FALSE)); +} + + +/* + Handle optimize/analyze/check/repair of one partition + + SYNOPSIS + handle_opt_part() + thd Thread object + check_opt Options + file Handler object of partition + flag Optimize/Analyze/Check/Repair flag + + RETURN VALUE + >0 Failure + 0 Success +*/ + +static int handle_opt_part(THD *thd, HA_CHECK_OPT *check_opt, + handler *file, uint flag) +{ + int error; + DBUG_ENTER("handle_opt_part"); + DBUG_PRINT("enter", ("flag = %u", flag)); + + if (flag == OPTIMIZE_PARTS) + error= file->optimize(thd, check_opt); + else if (flag == ANALYZE_PARTS) + error= file->analyze(thd, check_opt); + else if (flag == CHECK_PARTS) + error= file->check(thd, check_opt); + else if (flag == REPAIR_PARTS) + error= file->repair(thd, check_opt); + else + { + DBUG_ASSERT(FALSE); + error= 1; + } + if (error == HA_ADMIN_ALREADY_DONE) + error= 0; + DBUG_RETURN(error); +} + + +/* + Handle optimize/analyze/check/repair of partitions + + SYNOPSIS + handle_opt_partitions() + thd Thread object + check_opt Options + flag Optimize/Analyze/Check/Repair flag + all_parts All partitions or only a subset + + RETURN VALUE + >0 Failure + 0 Success +*/ + +int ha_partition::handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, + uint flag, bool all_parts) +{ + List_iterator part_it(m_part_info->partitions); + uint no_parts= m_part_info->no_parts; + uint no_subparts= m_part_info->no_subparts; + uint i= 0; + LEX *lex= thd->lex; + int error; + DBUG_ENTER("ha_partition::handle_opt_partitions"); + DBUG_PRINT("enter", ("all_parts %u, flag= %u", all_parts, flag)); do { partition_element *part_elem= part_it++; - if (part_elem->part_state == PART_IS_DROPPED) + if (all_parts || part_elem->part_state == PART_CHANGED) + { + handler *file; + if (m_is_sub_partitioned) + { + List_iterator sub_it(part_elem->subpartitions); + uint j= 0, part; + do + { + partition_element *sub_elem= sub_it++; + part= i * no_subparts + j; + DBUG_PRINT("info", ("Optimize subpartition %u", + part)); + if ((error= handle_opt_part(thd, check_opt, m_file[part], flag))) + { + my_error(ER_GET_ERRNO, MYF(0), error); + DBUG_RETURN(TRUE); + } + } while (++j < no_subparts); + } + else + { + DBUG_PRINT("info", ("Optimize partition %u", i)); + if ((error= handle_opt_part(thd, check_opt, m_file[i], flag))) + { + my_error(ER_GET_ERRNO, MYF(0), error); + DBUG_RETURN(TRUE); + } + } + } + } while (++i < no_parts); + DBUG_RETURN(FALSE); +} + +/* + Prepare by creating a new partition + + SYNOPSIS + prepare_new_partition() + table Table object + create_info Create info from CREATE TABLE + file Handler object of new partition + part_name partition name + + RETURN VALUE + >0 Error + 0 Success +*/ + +int ha_partition::prepare_new_partition(TABLE *table, + HA_CREATE_INFO *create_info, + handler *file, const char *part_name) +{ + int error; + bool create_flag= FALSE; + bool open_flag= FALSE; + DBUG_ENTER("prepare_new_partition"); + + if ((error= file->create(part_name, table, create_info))) + goto error; + create_flag= TRUE; + if ((error= file->ha_open(table, part_name, m_mode, m_open_test_lock))) + goto error; + if ((error= file->external_lock(current_thd, m_lock_type))) + goto error; + + DBUG_RETURN(0); +error: + if (create_flag) + VOID(file->delete_table(part_name)); + print_error(error, MYF(0)); + DBUG_RETURN(error); +} + + +/* + Cleanup by removing all created partitions after error + + SYNOPSIS + cleanup_new_partition() + part_count Number of partitions to remove + + RETURN VALUE + NONE + + DESCRIPTION + TODO: + We must ensure that in the case that we get an error during the process + that we call external_lock with F_UNLCK, close the table and delete the + table in the case where we have been successful with prepare_handler. + We solve this by keeping an array of successful calls to prepare_handler + which can then be used to undo the call. +*/ + +void ha_partition::cleanup_new_partition(uint part_count) +{ + handler **save_m_file= m_file; + DBUG_ENTER("ha_partition::cleanup_new_partition"); + + if (m_added_file && m_added_file[0]) + { + m_file= m_added_file; + m_added_file= NULL; + + external_lock(current_thd, F_UNLCK); + /* delete_table also needed, a bit more complex */ + close(); + + m_added_file= m_file; + m_file= save_m_file; + } + DBUG_VOID_RETURN; +} + +/* + Implement the partition changes defined by ALTER TABLE of partitions + + SYNOPSIS + change_partitions() + create_info HA_CREATE_INFO object describing all + fields and indexes in table + path Complete path of db and table name + out: copied Output parameter where number of copied + records are added + out: deleted Output parameter where number of deleted + records are added + pack_frm_data Reference to packed frm file + pack_frm_len Length of packed frm file + + RETURN VALUE + >0 Failure + 0 Success + + DESCRIPTION + Add and copy if needed a number of partitions, during this operation + no other operation is ongoing in the server. This is used by + ADD PARTITION all types as well as by REORGANIZE PARTITION. For + one-phased implementations it is used also by DROP and COALESCE + PARTITIONs. + One-phased implementation needs the new frm file, other handlers will + get zero length and a NULL reference here. +*/ + +int ha_partition::change_partitions(HA_CREATE_INFO *create_info, + const char *path, + ulonglong *copied, + ulonglong *deleted, + const void *pack_frm_data + __attribute__((unused)), + uint pack_frm_len + __attribute__((unused))) +{ + List_iterator part_it(m_part_info->partitions); + List_iterator t_it(m_part_info->temp_partitions); + char part_name_buff[FN_REFLEN]; + uint no_parts= m_part_info->partitions.elements; + uint no_subparts= m_part_info->no_subparts; + uint i= 0; + uint no_remain_partitions, part_count; + handler **new_file_array; + int error= 1; + bool first; + bool copy_parts= FALSE; + uint temp_partitions= m_part_info->temp_partitions.elements; + THD *thd= current_thd; + DBUG_ENTER("ha_partition::change_partitions"); + + m_reorged_parts= 0; + if (!is_sub_partitioned(m_part_info)) + no_subparts= 1; + + /* + Step 1: + Calculate number of reorganised partitions and allocate space for + their handler references. + */ + if (temp_partitions) + { + m_reorged_parts= temp_partitions * no_subparts; + } + else + { + do + { + partition_element *part_elem= part_it++; + if (part_elem->part_state == PART_CHANGED || + part_elem->part_state == PART_REORGED_DROPPED) + { + m_reorged_parts+= no_subparts; + } + } while (++i < no_parts); + } + if (m_reorged_parts && + !(m_reorged_file= (handler**)sql_calloc(sizeof(partition_element*)* + (m_reorged_parts + 1)))) + { + mem_alloc_error(sizeof(partition_element*)*(m_reorged_parts+1)); + DBUG_RETURN(TRUE); + } + + /* + Step 2: + Calculate number of partitions after change and allocate space for + their handler references. + */ + no_remain_partitions= 0; + if (temp_partitions) + { + no_remain_partitions= no_parts * no_subparts; + } + else + { + part_it.rewind(); + i= 0; + do + { + partition_element *part_elem= part_it++; + if (part_elem->part_state == PART_NORMAL || + part_elem->part_state == PART_TO_BE_ADDED || + part_elem->part_state == PART_CHANGED) + { + no_remain_partitions+= no_subparts; + } + } while (++i < no_parts); + } + if (!(new_file_array= (handler**)sql_calloc(sizeof(handler*)* + (2*(no_remain_partitions + 1))))) + { + mem_alloc_error(sizeof(handler*)*2*(no_remain_partitions+1)); + DBUG_RETURN(TRUE); + } + m_added_file= &new_file_array[no_remain_partitions + 1]; + + /* + Step 3: + Fill m_reorged_file with handler references and NULL at the end + */ + if (m_reorged_parts) + { + i= 0; + part_count= 0; + first= TRUE; + part_it.rewind(); + do + { + partition_element *part_elem= part_it++; + if (part_elem->part_state == PART_CHANGED || + part_elem->part_state == PART_REORGED_DROPPED) + { + memcpy((void*)&m_reorged_file[part_count], + (void*)&m_file[i*no_subparts], + sizeof(handler*)*no_subparts); + part_count+= no_subparts; + } + else if (first && temp_partitions && + part_elem->part_state == PART_TO_BE_ADDED) + { + /* + When doing an ALTER TABLE REORGANIZE PARTITION a number of + partitions is to be reorganised into a set of new partitions. + The reorganised partitions are in this case in the temp_partitions + list. We copy all of them in one batch and thus we only do this + until we find the first partition with state PART_TO_BE_ADDED + since this is where the new partitions go in and where the old + ones used to be. + */ + first= FALSE; + memcpy((void*)m_reorged_file, &m_file[i*no_subparts], + sizeof(handler*)*m_reorged_parts*no_subparts); + } + } while (++i < no_parts); + } + + /* + Step 4: + Fill new_array_file with handler references. Create the handlers if + needed. + */ + i= 0; + part_count= 0; + part_it.rewind(); + do + { + partition_element *part_elem= part_it++; + if (part_elem->part_state == PART_NORMAL) + { + memcpy((void*)&new_file_array[part_count], (void*)&m_file[i], + sizeof(handler*)*no_subparts); + part_count+= no_subparts; + } + else if (part_elem->part_state == PART_CHANGED || + part_elem->part_state == PART_TO_BE_ADDED) + { + uint j= 0; + do + { + if (!(new_file_array[part_count++]= get_new_handler(table->s, + thd->mem_root, + part_elem->engine_type))) + { + mem_alloc_error(sizeof(handler)); + DBUG_RETURN(TRUE); + } + } while (++j < no_subparts); + } + } while (++i < no_parts); + + /* + Step 5: + Create the new partitions and also open, lock and call external_lock + on them to prepare them for copy phase and also for later close + calls + */ + i= 0; + part_count= 0; + part_it.rewind(); + do + { + partition_element *part_elem= part_it++; + if (part_elem->part_state == PART_TO_BE_ADDED || + part_elem->part_state == PART_CHANGED) { /* - This part is to be dropped, meaning the part or all its subparts. + A new partition needs to be created PART_TO_BE_ADDED means an + entirely new partition and PART_CHANGED means a changed partition + that will still exist with either more or less data in it. */ + uint name_variant= NORMAL_PART_NAME; + if (part_elem->part_state == PART_CHANGED || + (part_elem->part_state == PART_TO_BE_ADDED && temp_partitions)) + name_variant= TEMP_PART_NAME; if (is_sub_partitioned(m_part_info)) { List_iterator sub_it(part_elem->subpartitions); @@ -436,44 +1462,204 @@ int ha_partition::drop_partitions(const char *path) partition_element *sub_elem= sub_it++; create_subpartition_name(part_name_buff, path, part_elem->partition_name, - sub_elem->partition_name); + sub_elem->partition_name, + name_variant); part= i * no_subparts + j; - DBUG_PRINT("info", ("Drop subpartition %s", part_name_buff)); - error= m_file[part]->delete_table((const char *) part_name_buff); + DBUG_PRINT("info", ("Add subpartition %s", part_name_buff)); + if ((error= prepare_new_partition(table, create_info, + new_file_array[part], + (const char *)part_name_buff))) + { + cleanup_new_partition(part_count); + DBUG_RETURN(TRUE); + } + m_added_file[part_count++]= new_file_array[part]; } while (++j < no_subparts); } else { create_partition_name(part_name_buff, path, - part_elem->partition_name); - DBUG_PRINT("info", ("Drop partition %s", part_name_buff)); - error= m_file[i]->delete_table((const char *) part_name_buff); + part_elem->partition_name, name_variant, + TRUE); + DBUG_PRINT("info", ("Add partition %s", part_name_buff)); + if ((error= prepare_new_partition(table, create_info, + new_file_array[i], + (const char *)part_name_buff))) + { + cleanup_new_partition(part_count); + DBUG_RETURN(TRUE); + } + m_added_file[part_count++]= new_file_array[i]; } } } while (++i < no_parts); - DBUG_RETURN(error); + + /* + Step 6: + State update to prepare for next write of the frm file. + */ + i= 0; + part_it.rewind(); + do + { + partition_element *part_elem= part_it++; + if (part_elem->part_state == PART_TO_BE_ADDED) + part_elem->part_state= PART_IS_ADDED; + else if (part_elem->part_state == PART_CHANGED) + part_elem->part_state= PART_IS_CHANGED; + else if (part_elem->part_state == PART_REORGED_DROPPED) + part_elem->part_state= PART_TO_BE_DROPPED; + } while (++i < no_parts); + for (i= 0; i < temp_partitions; i++) + { + partition_element *part_elem= t_it++; + DBUG_ASSERT(part_elem->part_state == PART_TO_BE_REORGED); + part_elem->part_state= PART_TO_BE_DROPPED; + } + m_new_file= new_file_array; + DBUG_RETURN(copy_partitions(copied, deleted)); } + +/* + Copy partitions as part of ALTER TABLE of partitions + + SYNOPSIS + copy_partitions() + out:copied Number of records copied + out:deleted Number of records deleted + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + change_partitions has done all the preparations, now it is time to + actually copy the data from the reorganised partitions to the new + partitions. +*/ + +int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted) +{ + uint reorg_part= 0; + int result= 0; + longlong func_value; + DBUG_ENTER("ha_partition::copy_partitions"); + + while (reorg_part < m_reorged_parts) + { + handler *file= m_reorged_file[reorg_part]; + uint32 new_part; + + late_extra_cache(reorg_part); + if ((result= file->ha_rnd_init(1))) + goto error; + while (TRUE) + { + if ((result= file->rnd_next(m_rec0))) + { + if (result == HA_ERR_RECORD_DELETED) + continue; //Probably MyISAM + if (result != HA_ERR_END_OF_FILE) + goto error; + /* + End-of-file reached, break out to continue with next partition or + end the copy process. + */ + break; + } + /* Found record to insert into new handler */ + if (m_part_info->get_partition_id(m_part_info, &new_part, + &func_value)) + { + /* + This record is in the original table but will not be in the new + table since it doesn't fit into any partition any longer due to + changed partitioning ranges or list values. + */ + deleted++; + } + else + { + /* Copy record to new handler */ + copied++; + if ((result= m_new_file[new_part]->write_row(m_rec0))) + goto error; + } + } + late_extra_no_cache(reorg_part); + file->rnd_end(); + reorg_part++; + } + DBUG_RETURN(FALSE); +error: + print_error(result, MYF(0)); + DBUG_RETURN(TRUE); +} + + +/* + Update create info as part of ALTER TABLE + + SYNOPSIS + update_create_info() + create_info Create info from ALTER TABLE + + RETURN VALUE + NONE + + DESCRIPTION + Method empty so far +*/ + void ha_partition::update_create_info(HA_CREATE_INFO *create_info) { return; } +/* + Change comments specific to handler + + SYNOPSIS + update_table_comment() + comment Original comment + + RETURN VALUE + new comment + + DESCRIPTION + No comment changes so far +*/ + char *ha_partition::update_table_comment(const char *comment) { - return (char*) comment; // Nothing to change + return (char*) comment; /* Nothing to change */ } /* - Common routine to handle delete_table and rename_table. - The routine uses the partition handler file to get the - names of the partition instances. Both these routines - are called after creating the handler without table - object and thus the file is needed to discover the - names of the partitions and the underlying storage engines. + Handle delete, rename and create table + + SYNOPSIS + del_ren_cre_table() + from Full path of old table + to Full path of new table + table_arg Table object + create_info Create info + + RETURN VALUE + >0 Error + 0 Success + + DESCRIPTION + Common routine to handle delete_table and rename_table. + The routine uses the partition handler file to get the + names of the partition instances. Both these routines + are called after creating the handler without table + object and thus the file is needed to discover the + names of the partitions and the underlying storage engines. */ uint ha_partition::del_ren_cre_table(const char *from, @@ -481,7 +1667,8 @@ uint ha_partition::del_ren_cre_table(const char *from, TABLE *table_arg, HA_CREATE_INFO *create_info) { - int save_error= 0, error; + int save_error= 0; + int error; char from_buff[FN_REFLEN], to_buff[FN_REFLEN]; char *name_buffer_ptr; uint i; @@ -496,10 +1683,12 @@ uint ha_partition::del_ren_cre_table(const char *from, i= 0; do { - create_partition_name(from_buff, from, name_buffer_ptr); + create_partition_name(from_buff, from, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); if (to != NULL) { // Rename branch - create_partition_name(to_buff, to, name_buffer_ptr); + create_partition_name(to_buff, to, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); error= (*file)->rename_table((const char*) from_buff, (const char*) to_buff); } @@ -518,12 +1707,23 @@ uint ha_partition::del_ren_cre_table(const char *from, DBUG_RETURN(save_error); } +/* + Find partition based on partition id + + SYNOPSIS + find_partition_element() + part_id Partition id of partition looked for + + RETURN VALUE + >0 Reference to partition_element + 0 Partition not found +*/ partition_element *ha_partition::find_partition_element(uint part_id) { uint i; uint curr_part_id= 0; - List_iterator_fast < partition_element > part_it(m_part_info->partitions); + List_iterator_fast part_it(m_part_info->partitions); for (i= 0; i < m_part_info->no_parts; i++) { @@ -549,18 +1749,32 @@ partition_element *ha_partition::find_partition_element(uint part_id) } +/* + Set up table share object before calling create on underlying handler + + SYNOPSIS + set_up_table_before_create() + table Table object + info Create info + part_id Partition id of partition to set-up + + RETURN VALUE + NONE + + DESCRIPTION + Set up + 1) Comment on partition + 2) MAX_ROWS, MIN_ROWS on partition + 3) Index file name on partition + 4) Data file name on partition +*/ + void ha_partition::set_up_table_before_create(TABLE *table, HA_CREATE_INFO *info, uint part_id) { - /* - Set up - 1) Comment on partition - 2) MAX_ROWS, MIN_ROWS on partition - 3) Index file name on partition - 4) Data file name on partition - */ partition_element *part_elem= find_partition_element(part_id); + if (!part_elem) return; // Fatal error table->s->max_rows= part_elem->part_max_rows; @@ -571,53 +1785,95 @@ void ha_partition::set_up_table_before_create(TABLE *table, /* - Routine used to add two names with '_' in between then. Service routine - to create_handler_file - Include the NULL in the count of characters since it is needed as separator - between the partition names. + Add two names together + + SYNOPSIS + name_add() + out:dest Destination string + first_name First name + sec_name Second name + + RETURN VALUE + >0 Error + 0 Success + + DESCRIPTION + Routine used to add two names with '_' in between then. Service routine + to create_handler_file + Include the NULL in the count of characters since it is needed as separator + between the partition names. */ static uint name_add(char *dest, const char *first_name, const char *sec_name) { - return (uint) (strxmov(dest, first_name, "_", sec_name, NullS) -dest) + 1; + return (uint) (strxmov(dest, first_name, "#SP#", sec_name, NullS) -dest) + 1; } /* - Method used to create handler file with names of partitions, their - engine types and the number of partitions. + Create the special .par file + + SYNOPSIS + create_handler_file() + name Full path of table name + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + Method used to create handler file with names of partitions, their + engine types and the number of partitions. */ bool ha_partition::create_handler_file(const char *name) { partition_element *part_elem, *subpart_elem; uint i, j, part_name_len, subpart_name_len; - uint tot_partition_words, tot_name_len; + uint tot_partition_words, tot_name_len, no_parts; + uint tot_parts= 0; uint tot_len_words, tot_len_byte, chksum, tot_name_words; char *name_buffer_ptr; uchar *file_buffer, *engine_array; bool result= TRUE; char file_name[FN_REFLEN]; + char part_name[FN_REFLEN]; + char subpart_name[FN_REFLEN]; File file; - List_iterator_fast < partition_element > part_it(m_part_info->partitions); + List_iterator_fast part_it(m_part_info->partitions); DBUG_ENTER("create_handler_file"); - DBUG_PRINT("info", ("table name = %s", name)); + no_parts= m_part_info->partitions.elements; + DBUG_PRINT("info", ("table name = %s, no_parts = %u", name, + no_parts)); tot_name_len= 0; - for (i= 0; i < m_part_info->no_parts; i++) + for (i= 0; i < no_parts; i++) { part_elem= part_it++; - part_name_len= strlen(part_elem->partition_name); + if (part_elem->part_state != PART_NORMAL && + part_elem->part_state != PART_IS_ADDED && + part_elem->part_state != PART_IS_CHANGED) + continue; + tablename_to_filename(part_elem->partition_name, part_name, + FN_REFLEN); + part_name_len= strlen(part_name); if (!m_is_sub_partitioned) + { tot_name_len+= part_name_len + 1; + tot_parts++; + } else { - List_iterator_fast sub_it(part_elem->subpartitions); + List_iterator_fast sub_it(part_elem->subpartitions); for (j= 0; j < m_part_info->no_subparts; j++) { subpart_elem= sub_it++; - subpart_name_len= strlen(subpart_elem->partition_name); - tot_name_len+= part_name_len + subpart_name_len + 2; + tablename_to_filename(subpart_elem->partition_name, + subpart_name, + FN_REFLEN); + subpart_name_len= strlen(subpart_name); + tot_name_len+= part_name_len + subpart_name_len + 5; + tot_parts++; } } } @@ -634,7 +1890,7 @@ bool ha_partition::create_handler_file(const char *name) All padding bytes are zeroed */ - tot_partition_words= (m_tot_parts + 3) / 4; + tot_partition_words= (tot_parts + 3) / 4; tot_name_words= (tot_name_len + 3) / 4; tot_len_words= 4 + tot_partition_words + tot_name_words; tot_len_byte= 4 * tot_len_words; @@ -643,25 +1899,34 @@ bool ha_partition::create_handler_file(const char *name) engine_array= (file_buffer + 12); name_buffer_ptr= (char*) (file_buffer + ((4 + tot_partition_words) * 4)); part_it.rewind(); - for (i= 0; i < m_part_info->no_parts; i++) + for (i= 0; i < no_parts; i++) { part_elem= part_it++; + if (part_elem->part_state != PART_NORMAL && + part_elem->part_state != PART_IS_ADDED && + part_elem->part_state != PART_IS_CHANGED) + continue; if (!m_is_sub_partitioned) { - name_buffer_ptr= strmov(name_buffer_ptr, part_elem->partition_name)+1; + tablename_to_filename(part_elem->partition_name, part_name, FN_REFLEN); + name_buffer_ptr= strmov(name_buffer_ptr, part_name)+1; *engine_array= (uchar) ha_legacy_type(part_elem->engine_type); DBUG_PRINT("info", ("engine: %u", *engine_array)); engine_array++; } else { - List_iterator_fast sub_it(part_elem->subpartitions); + List_iterator_fast sub_it(part_elem->subpartitions); for (j= 0; j < m_part_info->no_subparts; j++) { subpart_elem= sub_it++; + tablename_to_filename(part_elem->partition_name, part_name, + FN_REFLEN); + tablename_to_filename(subpart_elem->partition_name, subpart_name, + FN_REFLEN); name_buffer_ptr+= name_add(name_buffer_ptr, - part_elem->partition_name, - subpart_elem->partition_name); + part_name, + subpart_name); *engine_array= (uchar) ha_legacy_type(part_elem->engine_type); engine_array++; } @@ -669,7 +1934,7 @@ bool ha_partition::create_handler_file(const char *name) } chksum= 0; int4store(file_buffer, tot_len_words); - int4store(file_buffer + 8, m_tot_parts); + int4store(file_buffer + 8, tot_parts); int4store(file_buffer + 12 + (tot_partition_words * 4), tot_name_len); for (i= 0; i < tot_len_words; i++) chksum^= uint4korr(file_buffer + 4 * i); @@ -693,6 +1958,15 @@ bool ha_partition::create_handler_file(const char *name) DBUG_RETURN(result); } +/* + Clear handler variables and free some memory + + SYNOPSIS + clear_handler_file() + + RETURN VALUE + NONE +*/ void ha_partition::clear_handler_file() { @@ -703,6 +1977,16 @@ void ha_partition::clear_handler_file() m_engine_array= NULL; } +/* + Create underlying handler objects + + SYNOPSIS + create_handlers() + + RETURN VALUE + TRUE Error + FALSE Success +*/ bool ha_partition::create_handlers() { @@ -736,10 +2020,20 @@ bool ha_partition::create_handlers() DBUG_RETURN(FALSE); } +/* + Create underlying handler objects from partition info + + SYNOPSIS + new_handlers_from_part_info() + + RETURN VALUE + TRUE Error + FALSE Success +*/ bool ha_partition::new_handlers_from_part_info() { - uint i, j; + uint i, j, part_count; partition_element *part_elem; uint alloc_len= (m_tot_parts + 1) * sizeof(handler*); List_iterator_fast part_it(m_part_info->partitions); @@ -747,23 +2041,22 @@ bool ha_partition::new_handlers_from_part_info() DBUG_ENTER("ha_partition::new_handlers_from_part_info"); if (!(m_file= (handler **) sql_alloc(alloc_len))) - goto error; + { + mem_alloc_error(alloc_len); + goto error_end; + } bzero(m_file, alloc_len); DBUG_ASSERT(m_part_info->no_parts > 0); i= 0; + part_count= 0; /* Don't know the size of the underlying storage engine, invent a number of bytes allocated for error message if allocation fails */ - alloc_len= 128; do { part_elem= part_it++; - if (!(m_file[i]= get_new_handler(table_share, thd->mem_root, - part_elem->engine_type))) - goto error; - DBUG_PRINT("info", ("engine_type: %u", (uint) ha_legacy_type(part_elem->engine_type))); if (m_is_sub_partitioned) { for (j= 0; j < m_part_info->no_subparts; j++) @@ -771,9 +2064,18 @@ bool ha_partition::new_handlers_from_part_info() if (!(m_file[i]= get_new_handler(table_share, thd->mem_root, part_elem->engine_type))) goto error; - DBUG_PRINT("info", ("engine_type: %u", (uint) ha_legacy_type(part_elem->engine_type))); + DBUG_PRINT("info", ("engine_type: %u", + (uint) ha_legacy_type(part_elem->engine_type))); } } + else + { + if (!(m_file[part_count++]= get_new_handler(table_share, thd->mem_root, + part_elem->engine_type))) + goto error; + DBUG_PRINT("info", ("engine_type: %u", + (uint) ha_legacy_type(part_elem->engine_type))); + } } while (++i < m_part_info->no_parts); if (part_elem->engine_type == &myisam_hton) { @@ -782,14 +2084,26 @@ bool ha_partition::new_handlers_from_part_info() } DBUG_RETURN(FALSE); error: - my_error(ER_OUTOFMEMORY, MYF(0), alloc_len); + mem_alloc_error(sizeof(handler)); +error_end: DBUG_RETURN(TRUE); } /* - Open handler file to get partition names, engine types and number of - partitions. + Get info about partition engines and their names from the .par file + + SYNOPSIS + get_from_handler_file() + name Full path of table name + + RETURN VALUE + TRUE Error + FALSE Success + + DESCRIPTION + Open handler file to get partition names, engine types and number of + partitions. */ bool ha_partition::get_from_handler_file(const char *name) @@ -825,6 +2139,7 @@ bool ha_partition::get_from_handler_file(const char *name) if (chksum) goto err2; m_tot_parts= uint4korr((file_buffer) + 8); + DBUG_PRINT("info", ("No of parts = %u", m_tot_parts)); tot_partition_words= (m_tot_parts + 3) / 4; if (!(engine_array= (handlerton **) my_malloc(m_tot_parts * sizeof(handlerton*),MYF(0)))) goto err2; @@ -854,17 +2169,31 @@ err1: DBUG_RETURN(TRUE); } + /**************************************************************************** MODULE open/close object ****************************************************************************/ /* - Used for opening tables. The name will be the name of the file. - A table is opened when it needs to be opened. For instance - when a request comes in for a select on the table (tables are not - open and closed for each request, they are cached). + Open handler object - Called from handler.cc by handler::ha_open(). The server opens all tables - by calling ha_open() which then calls the handler specific open(). + SYNOPSIS + open() + name Full path of table name + mode Open mode flags + test_if_locked ? + + RETURN VALUE + >0 Error + 0 Success + + DESCRIPTION + Used for opening tables. The name will be the name of the file. + A table is opened when it needs to be opened. For instance + when a request comes in for a select on the table (tables are not + open and closed for each request, they are cached). + + Called from handler.cc by handler::ha_open(). The server opens all tables + by calling ha_open() which then calls the handler specific open(). */ int ha_partition::open(const char *name, int mode, uint test_if_locked) @@ -877,6 +2206,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) DBUG_ENTER("ha_partition::open"); ref_length= 0; + m_mode= mode; + m_open_test_lock= test_if_locked; m_part_field_array= m_part_info->full_part_field_array; if (get_from_handler_file(name)) DBUG_RETURN(1); @@ -912,7 +2243,8 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) file= m_file; do { - create_partition_name(name_buff, name, name_buffer_ptr); + create_partition_name(name_buff, name, name_buffer_ptr, NORMAL_PART_NAME, + FALSE); if ((error= (*file)->ha_open(table, (const char*) name_buff, mode, test_if_locked))) goto err_handler; @@ -934,7 +2266,7 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) /* Initialise priority queue, initialised to reading forward. */ - if ((error= init_queue(&queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS, + if ((error= init_queue(&m_queue, m_tot_parts, (uint) PARTITION_BYTES_IN_POS, 0, key_rec_cmp, (void*)this))) goto err_handler; /* @@ -952,28 +2284,45 @@ err_handler: DBUG_RETURN(error); } -/* - Closes a table. We call the free_share() function to free any resources - that we have allocated in the "shared" structure. - Called from sql_base.cc, sql_select.cc, and table.cc. - In sql_select.cc it is only used to close up temporary tables or during - the process where a temporary table is converted over to being a - myisam table. - For sql_base.cc look at close_data_tables(). +/* + Close handler object + + SYNOPSIS + close() + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + Called from sql_base.cc, sql_select.cc, and table.cc. + In sql_select.cc it is only used to close up temporary tables or during + the process where a temporary table is converted over to being a + myisam table. + For sql_base.cc look at close_data_tables(). */ int ha_partition::close(void) { handler **file; + bool first= TRUE; DBUG_ENTER("ha_partition::close"); - delete_queue(&queue); + delete_queue(&m_queue); file= m_file; + +repeat: do { (*file)->close(); } while (*(++file)); + if (first && m_added_file && m_added_file[0]) + { + file= m_added_file; + first= FALSE; + goto repeat; + } DBUG_RETURN(0); } @@ -988,30 +2337,47 @@ int ha_partition::close(void) */ /* - First you should go read the section "locking functions for mysql" in - lock.cc to understand this. - This create a lock on the table. If you are implementing a storage engine - that can handle transactions look at ha_berkely.cc to see how you will - want to goo about doing this. Otherwise you should consider calling - flock() here. - Originally this method was used to set locks on file level to enable - several MySQL Servers to work on the same data. For transactional - engines it has been "abused" to also mean start and end of statements - to enable proper rollback of statements and transactions. When LOCK - TABLES has been issued the start_stmt method takes over the role of - indicating start of statement but in this case there is no end of - statement indicator(?). + Set external locks on table - Called from lock.cc by lock_external() and unlock_external(). Also called - from sql_table.cc by copy_data_between_tables(). + SYNOPSIS + external_lock() + thd Thread object + lock_type Type of external lock + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + First you should go read the section "locking functions for mysql" in + lock.cc to understand this. + This create a lock on the table. If you are implementing a storage engine + that can handle transactions look at ha_berkeley.cc to see how you will + want to go about doing this. Otherwise you should consider calling + flock() here. + Originally this method was used to set locks on file level to enable + several MySQL Servers to work on the same data. For transactional + engines it has been "abused" to also mean start and end of statements + to enable proper rollback of statements and transactions. When LOCK + TABLES has been issued the start_stmt method takes over the role of + indicating start of statement but in this case there is no end of + statement indicator(?). + + Called from lock.cc by lock_external() and unlock_external(). Also called + from sql_table.cc by copy_data_between_tables(). */ int ha_partition::external_lock(THD *thd, int lock_type) { uint error; handler **file; + bool first= TRUE; DBUG_ENTER("ha_partition::external_lock"); + file= m_file; + m_lock_type= lock_type; + +repeat: do { if ((error= (*file)->external_lock(thd, lock_type))) @@ -1020,7 +2386,13 @@ int ha_partition::external_lock(THD *thd, int lock_type) goto err_handler; } } while (*(++file)); - m_lock_type= lock_type; // For the future (2009?) + if (first && m_added_file && m_added_file[0]) + { + DBUG_ASSERT(lock_type == F_UNLCK); + file= m_added_file; + first= FALSE; + goto repeat; + } DBUG_RETURN(0); err_handler: @@ -1031,36 +2403,49 @@ err_handler: /* - The idea with handler::store_lock() is the following: + Get the lock(s) for the table and perform conversion of locks if needed - The statement decided which locks we should need for the table - for updates/deletes/inserts we get WRITE locks, for SELECT... we get - read locks. + SYNOPSIS + store_lock() + thd Thread object + to Lock object array + lock_type Table lock type - Before adding the lock into the table lock handler (see thr_lock.c) - mysqld calls store lock with the requested locks. Store lock can now - modify a write lock to a read lock (or some other lock), ignore the - lock (if we don't want to use MySQL table locks at all) or add locks - for many tables (like we do when we are using a MERGE handler). + RETURN VALUE + >0 Error code + 0 Success - Berkeley DB for partition changes all WRITE locks to TL_WRITE_ALLOW_WRITE - (which signals that we are doing WRITES, but we are still allowing other - reader's and writer's. + DESCRIPTION + The idea with handler::store_lock() is the following: - When releasing locks, store_lock() are also called. In this case one - usually doesn't have to do anything. + The statement decided which locks we should need for the table + for updates/deletes/inserts we get WRITE locks, for SELECT... we get + read locks. - store_lock is called when holding a global mutex to ensure that only - one thread at a time changes the locking information of tables. + Before adding the lock into the table lock handler (see thr_lock.c) + mysqld calls store lock with the requested locks. Store lock can now + modify a write lock to a read lock (or some other lock), ignore the + lock (if we don't want to use MySQL table locks at all) or add locks + for many tables (like we do when we are using a MERGE handler). - In some exceptional cases MySQL may send a request for a TL_IGNORE; - This means that we are requesting the same lock as last time and this - should also be ignored. (This may happen when someone does a flush - table when we have opened a part of the tables, in which case mysqld - closes and reopens the tables and tries to get the same locks at last - time). In the future we will probably try to remove this. + Berkeley DB for partition changes all WRITE locks to TL_WRITE_ALLOW_WRITE + (which signals that we are doing WRITES, but we are still allowing other + reader's and writer's. - Called from lock.cc by get_lock_data(). + When releasing locks, store_lock() is also called. In this case one + usually doesn't have to do anything. + + store_lock is called when holding a global mutex to ensure that only + one thread at a time changes the locking information of tables. + + In some exceptional cases MySQL may send a request for a TL_IGNORE; + This means that we are requesting the same lock as last time and this + should also be ignored. (This may happen when someone does a flush + table when we have opened a part of the tables, in which case mysqld + closes and reopens the tables and tries to get the same locks as last + time). In the future we will probably try to remove this. + + Called from lock.cc by get_lock_data(). */ THR_LOCK_DATA **ha_partition::store_lock(THD *thd, @@ -1069,6 +2454,7 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd, { handler **file; DBUG_ENTER("ha_partition::store_lock"); + file= m_file; do { @@ -1077,12 +2463,29 @@ THR_LOCK_DATA **ha_partition::store_lock(THD *thd, DBUG_RETURN(to); } +/* + Start a statement when table is locked + + SYNOPSIS + start_stmt() + thd Thread object + lock_type Type of external lock + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + This method is called instead of external lock when the table is locked + before the statement is executed. +*/ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type) { int error= 0; handler **file; DBUG_ENTER("ha_partition::start_stmt"); + file= m_file; do { @@ -1094,22 +2497,41 @@ int ha_partition::start_stmt(THD *thd, thr_lock_type lock_type) /* - Returns the number of store locks needed in call to store lock. - We return number of partitions since we call store_lock on each - underlying handler. Assists the above functions in allocating - sufficient space for lock structures. + Get number of lock objects returned in store_lock + + SYNOPSIS + lock_count() + + RETURN VALUE + Number of locks returned in call to store_lock + + DESCRIPTION + Returns the number of store locks needed in call to store lock. + We return number of partitions since we call store_lock on each + underlying handler. Assists the above functions in allocating + sufficient space for lock structures. */ uint ha_partition::lock_count() const { DBUG_ENTER("ha_partition::lock_count"); + DBUG_RETURN(m_no_locks); } /* - Record currently processed was not in the result set of the statement - and is thus unlocked. Used for UPDATE and DELETE queries. + Unlock last accessed row + + SYNOPSIS + unlock_row() + + RETURN VALUE + NONE + + DESCRIPTION + Record currently processed was not in the result set of the statement + and is thus unlocked. Used for UPDATE and DELETE queries. */ void ha_partition::unlock_row() @@ -1124,43 +2546,56 @@ void ha_partition::unlock_row() ****************************************************************************/ /* - write_row() inserts a row. buf() is a byte array of data, normally record[0]. + Insert a row to the table - You can use the field information to extract the data from the native byte - array type. + SYNOPSIS + write_row() + buf The row in MySQL Row Format - Example of this would be: - for (Field **field=table->field ; *field ; field++) - { - ... - } + RETURN VALUE + >0 Error code + 0 Success - See ha_tina.cc for an partition of extracting all of the data as strings. - ha_berekly.cc has an partition of how to store it intact by "packing" it - for ha_berkeley's own native storage type. + DESCRIPTION + write_row() inserts a row. buf() is a byte array of data, normally + record[0]. - See the note for update_row() on auto_increments and timestamps. This - case also applied to write_row(). + You can use the field information to extract the data from the native byte + array type. - Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, - sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc. + Example of this would be: + for (Field **field=table->field ; *field ; field++) + { + ... + } - ADDITIONAL INFO: + See ha_tina.cc for a variant of extracting all of the data as strings. + ha_berkeley.cc has a variant of how to store it intact by "packing" it + for ha_berkeley's own native storage type. - Most handlers set timestamp when calling write row if any such fields - exists. Since we are calling an underlying handler we assume the - underlying handler will assume this responsibility. + See the note for update_row() on auto_increments and timestamps. This + case also applied to write_row(). - Underlying handlers will also call update_auto_increment to calculate - the new auto increment value. We will catch the call to - get_auto_increment and ensure this increment value is maintained by - only one of the underlying handlers. + Called from item_sum.cc, item_sum.cc, sql_acl.cc, sql_insert.cc, + sql_insert.cc, sql_select.cc, sql_table.cc, sql_udf.cc, and sql_update.cc. + + ADDITIONAL INFO: + + Most handlers set timestamp when calling write row if any such fields + exists. Since we are calling an underlying handler we assume the´ + underlying handler will assume this responsibility. + + Underlying handlers will also call update_auto_increment to calculate + the new auto increment value. We will catch the call to + get_auto_increment and ensure this increment value is maintained by + only one of the underlying handlers. */ int ha_partition::write_row(byte * buf) { uint32 part_id; int error; + longlong func_value; #ifdef NOT_NEEDED byte *rec0= m_rec0; #endif @@ -1170,17 +2605,19 @@ int ha_partition::write_row(byte * buf) #ifdef NOT_NEEDED if (likely(buf == rec0)) #endif - error= m_part_info->get_partition_id(m_part_info, &part_id); + error= m_part_info->get_partition_id(m_part_info, &part_id, + &func_value); #ifdef NOT_NEEDED else { set_field_ptr(m_part_field_array, buf, rec0); - error= m_part_info->get_partition_id(m_part_info, &part_id); + error= m_part_info->get_partition_id(m_part_info, &part_id, + &func_value); set_field_ptr(m_part_field_array, rec0, buf); } #endif if (unlikely(error)) - DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); + DBUG_RETURN(error); m_last_part= part_id; DBUG_PRINT("info", ("Insert in partition %d", part_id)); DBUG_RETURN(m_file[part_id]->write_row(buf)); @@ -1188,33 +2625,46 @@ int ha_partition::write_row(byte * buf) /* - Yes, update_row() does what you expect, it updates a row. old_data will - have the previous row record in it, while new_data will have the newest - data in it. - Keep in mind that the server can do updates based on ordering if an - ORDER BY clause was used. Consecutive ordering is not guarenteed. + Update an existing row - Currently new_data will not have an updated auto_increament record, or - and updated timestamp field. You can do these for partition by doing these: - if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) - table->timestamp_field->set_time(); - if (table->next_number_field && record == table->record[0]) - update_auto_increment(); + SYNOPSIS + update_row() + old_data Old record in MySQL Row Format + new_data New record in MySQL Row Format - Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc. - new_data is always record[0] - old_data is normally record[1] but may be anything + RETURN VALUE + >0 Error code + 0 Success + DESCRIPTION + Yes, update_row() does what you expect, it updates a row. old_data will + have the previous row record in it, while new_data will have the newest + data in it. + Keep in mind that the server can do updates based on ordering if an + ORDER BY clause was used. Consecutive ordering is not guarenteed. + + Currently new_data will not have an updated auto_increament record, or + and updated timestamp field. You can do these for partition by doing these: + if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) + table->timestamp_field->set_time(); + if (table->next_number_field && record == table->record[0]) + update_auto_increment(); + + Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc. + new_data is always record[0] + old_data is normally record[1] but may be anything */ int ha_partition::update_row(const byte *old_data, byte *new_data) { uint32 new_part_id, old_part_id; int error; + longlong func_value; DBUG_ENTER("ha_partition::update_row"); if ((error= get_parts_for_update(old_data, new_data, table->record[0], - m_part_info, &old_part_id, &new_part_id))) + m_part_info, &old_part_id, &new_part_id, + &func_value))) { DBUG_RETURN(error); } @@ -1249,21 +2699,31 @@ int ha_partition::update_row(const byte *old_data, byte *new_data) /* - This will delete a row. buf will contain a copy of the row to be deleted. - The server will call this right after the current row has been read - (from either a previous rnd_xxx() or index_xxx() call). - If you keep a pointer to the last row or can access a primary key it will - make doing the deletion quite a bit easier. - Keep in mind that the server does no guarentee consecutive deletions. - ORDER BY clauses can be used. + Remove an existing row - Called in sql_acl.cc and sql_udf.cc to manage internal table information. - Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select - it is used for removing duplicates while in insert it is used for REPLACE - calls. + SYNOPSIS + delete_row + buf Deleted row in MySQL Row Format - buf is either record[0] or record[1] + RETURN VALUE + >0 Error Code + 0 Success + DESCRIPTION + This will delete a row. buf will contain a copy of the row to be deleted. + The server will call this right after the current row has been read + (from either a previous rnd_xxx() or index_xxx() call). + If you keep a pointer to the last row or can access a primary key it will + make doing the deletion quite a bit easier. + Keep in mind that the server does no guarentee consecutive deletions. + ORDER BY clauses can be used. + + Called in sql_acl.cc and sql_udf.cc to manage internal table information. + Called in sql_delete.cc, sql_insert.cc, and sql_select.cc. In sql_select + it is used for removing duplicates while in insert it is used for REPLACE + calls. + + buf is either record[0] or record[1] */ int ha_partition::delete_row(const byte *buf) @@ -1282,15 +2742,25 @@ int ha_partition::delete_row(const byte *buf) /* - Used to delete all rows in a table. Both for cases of truncate and - for cases where the optimizer realizes that all rows will be - removed as a result of a SQL statement. + Delete all rows in a table - Called from item_sum.cc by Item_func_group_concat::clear(), - Item_sum_count_distinct::clear(), and Item_func_group_concat::clear(). - Called from sql_delete.cc by mysql_delete(). - Called from sql_select.cc by JOIN::reinit(). - Called from sql_union.cc by st_select_lex_unit::exec(). + SYNOPSIS + delete_all_rows() + + RETURN VALUE + >0 Error Code + 0 Success + + DESCRIPTION + Used to delete all rows in a table. Both for cases of truncate and + for cases where the optimizer realizes that all rows will be + removed as a result of a SQL statement. + + Called from item_sum.cc by Item_func_group_concat::clear(), + Item_sum_count_distinct::clear(), and Item_func_group_concat::clear(). + Called from sql_delete.cc by mysql_delete(). + Called from sql_select.cc by JOIN::reinit(). + Called from sql_union.cc by st_select_lex_unit::exec(). */ int ha_partition::delete_all_rows() @@ -1298,6 +2768,7 @@ int ha_partition::delete_all_rows() int error; handler **file; DBUG_ENTER("ha_partition::delete_all_rows"); + file= m_file; do { @@ -1307,14 +2778,26 @@ int ha_partition::delete_all_rows() DBUG_RETURN(0); } + /* - rows == 0 means we will probably insert many rows + Start a large batch of insert rows + + SYNOPSIS + start_bulk_insert() + rows Number of rows to insert + + RETURN VALUE + NONE + + DESCRIPTION + rows == 0 means we will probably insert many rows */ void ha_partition::start_bulk_insert(ha_rows rows) { handler **file; DBUG_ENTER("ha_partition::start_bulk_insert"); + if (!rows) { /* Avoid allocation big caches in all underlaying handlers */ @@ -1330,6 +2813,17 @@ void ha_partition::start_bulk_insert(ha_rows rows) } +/* + Finish a large batch of insert rows + + SYNOPSIS + end_bulk_insert() + + RETURN VALUE + >0 Error code + 0 Success +*/ + int ha_partition::end_bulk_insert() { int error= 0; @@ -1347,6 +2841,7 @@ int ha_partition::end_bulk_insert() DBUG_RETURN(error); } + /**************************************************************************** MODULE full table scan ****************************************************************************/ @@ -1358,18 +2853,22 @@ int ha_partition::end_bulk_insert() scan 0 Initialize for random reads through rnd_pos() 1 Initialize for random scan through rnd_next() - NOTES - rnd_init() is called when the server wants the storage engine to do a - table scan or when the server wants to access data through rnd_pos. + RETURN VALUE + >0 Error code + 0 Success - When scan is used we will scan one handler partition at a time. - When preparing for rnd_pos we will init all handler partitions. - No extra cache handling is needed when scannning is not performed. + DESCRIPTION + rnd_init() is called when the server wants the storage engine to do a + table scan or when the server wants to access data through rnd_pos. - Before initialising we will call rnd_end to ensure that we clean up from - any previous incarnation of a table scan. - Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, - sql_table.cc, and sql_update.cc. + When scan is used we will scan one handler partition at a time. + When preparing for rnd_pos we will init all handler partitions. + No extra cache handling is needed when scannning is not performed. + + Before initialising we will call rnd_end to ensure that we clean up from + any previous incarnation of a table scan. + Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, + sql_table.cc, and sql_update.cc. */ int ha_partition::rnd_init(bool scan) @@ -1423,10 +2922,22 @@ err: } +/* + End of a table scan + + SYNOPSIS + rnd_end() + + RETURN VALUE + >0 Error code + 0 Success +*/ + int ha_partition::rnd_end() { handler **file; DBUG_ENTER("ha_partition::rnd_end"); + switch (m_scan_value) { case 2: // Error break; @@ -1458,18 +2969,22 @@ int ha_partition::rnd_end() rnd_next() buf buffer that should be filled with data - This is called for each row of the table scan. When you run out of records - you should return HA_ERR_END_OF_FILE. - The Field structure for the table is the key to getting data into buf - in a manner that will allow the server to understand it. + RETURN VALUE + >0 Error code + 0 Success - Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, - sql_table.cc, and sql_update.cc. + DESCRIPTION + This is called for each row of the table scan. When you run out of records + you should return HA_ERR_END_OF_FILE. + The Field structure for the table is the key to getting data into buf + in a manner that will allow the server to understand it. + + Called from filesort.cc, records.cc, sql_handler.cc, sql_select.cc, + sql_table.cc, and sql_update.cc. */ int ha_partition::rnd_next(byte *buf) { - DBUG_ASSERT(m_scan_value); uint part_id= m_part_spec.start_part; // Cache of this variable handler *file= m_file[part_id]; int result= HA_ERR_END_OF_FILE; @@ -1528,37 +3043,38 @@ end: } -inline void store_part_id_in_pos(byte *pos, uint part_id) -{ - int2store(pos, part_id); -} - -inline uint get_part_id_from_pos(const byte *pos) -{ - return uint2korr(pos); -} - /* - position() is called after each call to rnd_next() if the data needs - to be ordered. You can do something like the following to store - the position: - ha_store_ptr(ref, ref_length, current_position); + Save position of current row - The server uses ref to store data. ref_length in the above case is - the size needed to store current_position. ref is just a byte array - that the server will maintain. If you are using offsets to mark rows, then - current_position should be the offset. If it is a primary key like in - BDB, then it needs to be a primary key. + SYNOPSIS + position() + record Current record in MySQL Row Format - Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc. + RETURN VALUE + NONE + + DESCRIPTION + position() is called after each call to rnd_next() if the data needs + to be ordered. You can do something like the following to store + the position: + ha_store_ptr(ref, ref_length, current_position); + + The server uses ref to store data. ref_length in the above case is + the size needed to store current_position. ref is just a byte array + that the server will maintain. If you are using offsets to mark rows, then + current_position should be the offset. If it is a primary key like in + BDB, then it needs to be a primary key. + + Called from filesort.cc, sql_select.cc, sql_delete.cc and sql_update.cc. */ void ha_partition::position(const byte *record) { handler *file= m_file[m_last_part]; DBUG_ENTER("ha_partition::position"); + file->position(record); - store_part_id_in_pos(ref, m_last_part); + int2store(ref, m_last_part); memcpy((ref + PARTITION_BYTES_IN_POS), file->ref, (ref_length - PARTITION_BYTES_IN_POS)); @@ -1571,12 +3087,24 @@ void ha_partition::position(const byte *record) } /* - This is like rnd_next, but you are given a position to use - to determine the row. The position will be of the type that you stored in - ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key - or position you saved when position() was called. - Called from filesort.cc records.cc sql_insert.cc sql_select.cc - sql_update.cc. + Read row using position + + SYNOPSIS + rnd_pos() + out:buf Row read in MySQL Row Format + position Position of read row + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + This is like rnd_next, but you are given a position to use + to determine the row. The position will be of the type that you stored in + ref. You can use ha_get_ptr(pos,ref_length) to retrieve whatever key + or position you saved when position() was called. + Called from filesort.cc records.cc sql_insert.cc sql_select.cc + sql_update.cc. */ int ha_partition::rnd_pos(byte * buf, byte *pos) @@ -1585,7 +3113,7 @@ int ha_partition::rnd_pos(byte * buf, byte *pos) handler *file; DBUG_ENTER("ha_partition::rnd_pos"); - part_id= get_part_id_from_pos((const byte *) pos); + part_id= uint2korr((const byte *) pos); DBUG_ASSERT(part_id < m_tot_parts); file= m_file[part_id]; m_last_part= part_id; @@ -1613,8 +3141,20 @@ int ha_partition::rnd_pos(byte * buf, byte *pos) */ /* - index_init is always called before starting index scans (except when - starting through index_read_idx and using read_range variants). + Initialise handler before start of index scan + + SYNOPSIS + index_init() + inx Index number + sorted Is rows to be returned in sorted order + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + index_init is always called before starting index scans (except when + starting through index_read_idx and using read_range variants). */ int ha_partition::index_init(uint inx, bool sorted) @@ -1645,8 +3185,18 @@ int ha_partition::index_init(uint inx, bool sorted) /* - index_end is called at the end of an index scan to clean up any - things needed to clean up. + End of index scan + + SYNOPSIS + index_end() + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + index_end is called at the end of an index scan to clean up any + things needed to clean up. */ int ha_partition::index_end() @@ -1671,25 +3221,49 @@ int ha_partition::index_end() /* - index_read starts a new index scan using a start key. The MySQL Server - will check the end key on its own. Thus to function properly the - partitioned handler need to ensure that it delivers records in the sort - order of the MySQL Server. - index_read can be restarted without calling index_end on the previous - index scan and without calling index_init. In this case the index_read - is on the same index as the previous index_scan. This is particularly - used in conjuntion with multi read ranges. + Read one record in an index scan and start an index scan + + SYNOPSIS + index_read() + buf Read row in MySQL Row Format + key Key parts in consecutive order + key_len Total length of key parts + find_flag What type of key condition is used + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + index_read starts a new index scan using a start key. The MySQL Server + will check the end key on its own. Thus to function properly the + partitioned handler need to ensure that it delivers records in the sort + order of the MySQL Server. + index_read can be restarted without calling index_end on the previous + index scan and without calling index_init. In this case the index_read + is on the same index as the previous index_scan. This is particularly + used in conjuntion with multi read ranges. */ int ha_partition::index_read(byte * buf, const byte * key, uint key_len, enum ha_rkey_function find_flag) { DBUG_ENTER("ha_partition::index_read"); + end_range= 0; DBUG_RETURN(common_index_read(buf, key, key_len, find_flag)); } +/* + Common routine for a number of index_read variants + + SYNOPSIS + common_index_read + + see index_read for rest +*/ + int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len, enum ha_rkey_function find_flag) { @@ -1738,18 +3312,30 @@ int ha_partition::common_index_read(byte *buf, const byte *key, uint key_len, /* - index_first() asks for the first key in the index. - This is similar to index_read except that there is no start key since - the scan starts from the leftmost entry and proceeds forward with - index_next. + Start an index scan from leftmost record and return first record - Called from opt_range.cc, opt_sum.cc, sql_handler.cc, - and sql_select.cc. + SYNOPSIS + index_first() + buf Read row in MySQL Row Format + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + index_first() asks for the first key in the index. + This is similar to index_read except that there is no start key since + the scan starts from the leftmost entry and proceeds forward with + index_next. + + Called from opt_range.cc, opt_sum.cc, sql_handler.cc, + and sql_select.cc. */ int ha_partition::index_first(byte * buf) { DBUG_ENTER("ha_partition::index_first"); + end_range= 0; m_index_scan_type= partition_index_first; DBUG_RETURN(common_first_last(buf)); @@ -1757,25 +3343,47 @@ int ha_partition::index_first(byte * buf) /* - index_last() asks for the last key in the index. - This is similar to index_read except that there is no start key since - the scan starts from the rightmost entry and proceeds forward with - index_prev. + Start an index scan from rightmost record and return first record + + SYNOPSIS + index_last() + buf Read row in MySQL Row Format - Called from opt_range.cc, opt_sum.cc, sql_handler.cc, - and sql_select.cc. + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + index_last() asks for the last key in the index. + This is similar to index_read except that there is no start key since + the scan starts from the rightmost entry and proceeds forward with + index_prev. + + Called from opt_range.cc, opt_sum.cc, sql_handler.cc, + and sql_select.cc. */ int ha_partition::index_last(byte * buf) { DBUG_ENTER("ha_partition::index_last"); + m_index_scan_type= partition_index_last; DBUG_RETURN(common_first_last(buf)); } +/* + Common routine for index_first/index_last + + SYNOPSIS + common_index_first_last + + see index_first for rest +*/ + int ha_partition::common_first_last(byte *buf) { int error; + if ((error= partition_scan_set_up(buf, FALSE))) return error; if (!m_ordered_scan_ongoing) @@ -1783,10 +3391,18 @@ int ha_partition::common_first_last(byte *buf) return handle_ordered_index_scan(buf); } + /* - Positions an index cursor to the index specified in key. Fetches the - row if any. This is only used to read whole keys. - TODO: Optimise this code to avoid index_init and index_end + Perform index read using index where always only one row is returned + + SYNOPSIS + index_read_idx() + see index_read for rest of parameters and return values + + DESCRIPTION + Positions an index cursor to the index specified in key. Fetches the + row if any. This is only used to read whole keys. + TODO: Optimise this code to avoid index_init and index_end */ int ha_partition::index_read_idx(byte * buf, uint index, const byte * key, @@ -1795,32 +3411,60 @@ int ha_partition::index_read_idx(byte * buf, uint index, const byte * key, { int res; DBUG_ENTER("ha_partition::index_read_idx"); + index_init(index, 0); res= index_read(buf, key, key_len, find_flag); index_end(); DBUG_RETURN(res); } + /* - This is used in join_read_last_key to optimise away an ORDER BY. - Can only be used on indexes supporting HA_READ_ORDER + Read last using key + + SYNOPSIS + index_read_last() + buf Read row in MySQL Row Format + key Key + keylen Length of key + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + This is used in join_read_last_key to optimise away an ORDER BY. + Can only be used on indexes supporting HA_READ_ORDER */ int ha_partition::index_read_last(byte *buf, const byte *key, uint keylen) { DBUG_ENTER("ha_partition::index_read_last"); + m_ordered= TRUE; // Safety measure DBUG_RETURN(index_read(buf, key, keylen, HA_READ_PREFIX_LAST)); } /* - Used to read forward through the index. + Read next record in a forward index scan + + SYNOPSIS + index_next() + buf Read row in MySQL Row Format + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + Used to read forward through the index. */ int ha_partition::index_next(byte * buf) { DBUG_ENTER("ha_partition::index_next"); + /* TODO(low priority): If we want partition to work with the HANDLER commands, we @@ -1836,13 +3480,27 @@ int ha_partition::index_next(byte * buf) /* - This routine is used to read the next but only if the key is the same - as supplied in the call. + Read next record special + + SYNOPSIS + index_next_same() + buf Read row in MySQL Row Format + key Key + keylen Length of key + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + This routine is used to read the next but only if the key is the same + as supplied in the call. */ int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen) { DBUG_ENTER("ha_partition::index_next_same"); + DBUG_ASSERT(keylen == m_start_key.length); DBUG_ASSERT(m_index_scan_type != partition_index_last); if (!m_ordered_scan_ongoing) @@ -1850,13 +3508,26 @@ int ha_partition::index_next_same(byte *buf, const byte *key, uint keylen) DBUG_RETURN(handle_ordered_next(buf, TRUE)); } + /* - Used to read backwards through the index. + Read next record when performing index scan backwards + + SYNOPSIS + index_prev() + buf Read row in MySQL Row Format + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + Used to read backwards through the index. */ int ha_partition::index_prev(byte * buf) { DBUG_ENTER("ha_partition::index_prev"); + /* TODO: read comment in index_next */ DBUG_ASSERT(m_index_scan_type != partition_index_first); DBUG_RETURN(handle_ordered_prev(buf)); @@ -1864,10 +3535,24 @@ int ha_partition::index_prev(byte * buf) /* - We reimplement read_range_first since we don't want the compare_key - check at the end. This is already performed in the partition handler. - read_range_next is very much different due to that we need to scan - all underlying handlers. + Start a read of one range with start and end key + + SYNOPSIS + read_range_first() + start_key Specification of start key + end_key Specification of end key + eq_range_arg Is it equal range + sorted Should records be returned in sorted order + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + We reimplement read_range_first since we don't want the compare_key + check at the end. This is already performed in the partition handler. + read_range_next is very much different due to that we need to scan + all underlying handlers. */ int ha_partition::read_range_first(const key_range *start_key, @@ -1876,6 +3561,7 @@ int ha_partition::read_range_first(const key_range *start_key, { int error; DBUG_ENTER("ha_partition::read_range_first"); + m_ordered= sorted; eq_range= eq_range_arg; end_range= 0; @@ -1904,9 +3590,21 @@ int ha_partition::read_range_first(const key_range *start_key, } +/* + Read next record in read of a range with start and end key + + SYNOPSIS + read_range_next() + + RETURN VALUE + >0 Error code + 0 Success +*/ + int ha_partition::read_range_next() { DBUG_ENTER("ha_partition::read_range_next"); + if (m_ordered) { DBUG_RETURN(handler::read_range_next()); @@ -1915,6 +3613,22 @@ int ha_partition::read_range_next() } +/* + Common routine to set up scans + + SYNOPSIS + buf Buffer to later return record in + idx_read_flag Is it index scan + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + This is where we check which partitions to actually scan if not all + of them +*/ + int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag) { DBUG_ENTER("ha_partition::partition_scan_set_up"); @@ -1959,16 +3673,29 @@ int ha_partition::partition_scan_set_up(byte * buf, bool idx_read_flag) Unordered Index Scan Routines ****************************************************************************/ /* - These routines are used to scan partitions without considering order. - This is performed in two situations. - 1) In read_multi_range this is the normal case - 2) When performing any type of index_read, index_first, index_last where - all fields in the partition function is bound. In this case the index - scan is performed on only one partition and thus it isn't necessary to - perform any sort. + Common routine to handle index_next with unordered results + + SYNOPSIS + handle_unordered_next() + out:buf Read row in MySQL Row Format + next_same Called from index_next_same + + RETURN VALUE + HA_ERR_END_OF_FILE End of scan + 0 Success + other Error code + + DESCRIPTION + These routines are used to scan partitions without considering order. + This is performed in two situations. + 1) In read_multi_range this is the normal case + 2) When performing any type of index_read, index_first, index_last where + all fields in the partition function is bound. In this case the index + scan is performed on only one partition and thus it isn't necessary to + perform any sort. */ -int ha_partition::handle_unordered_next(byte *buf, bool next_same) +int ha_partition::handle_unordered_next(byte *buf, bool is_next_same) { handler *file= file= m_file[m_part_spec.start_part]; int error; @@ -1978,7 +3705,7 @@ int ha_partition::handle_unordered_next(byte *buf, bool next_same) We should consider if this should be split into two functions as next_same is alwas a local constant */ - if (next_same) + if (is_next_same) { if (!(error= file->index_next_same(buf, m_start_key.key, m_start_key.length))) @@ -2007,8 +3734,20 @@ int ha_partition::handle_unordered_next(byte *buf, bool next_same) /* - This routine is used to start the index scan on the next partition. - Both initial start and after completing scan on one partition. + Handle index_next when changing to new partition + + SYNOPSIS + handle_unordered_scan_next_partition() + buf Read row in MySQL Row Format + + RETURN VALUE + HA_ERR_END_OF_FILE End of scan + 0 Success + other Error code + + DESCRIPTION + This routine is used to start the index scan on the next partition. + Both initial start and after completing scan on one partition. */ int ha_partition::handle_unordered_scan_next_partition(byte * buf) @@ -2056,30 +3795,43 @@ int ha_partition::handle_unordered_scan_next_partition(byte * buf) /* - This part contains the logic to handle index scans that require ordered - output. This includes all except those started by read_range_first with - the flag ordered set to FALSE. Thus most direct index_read and all - index_first and index_last. + Common routine to start index scan with ordered results - We implement ordering by keeping one record plus a key buffer for each - partition. Every time a new entry is requested we will fetch a new - entry from the partition that is currently not filled with an entry. - Then the entry is put into its proper sort position. + SYNOPSIS + handle_ordered_index_scan() + out:buf Read row in MySQL Row Format - Returning a record is done by getting the top record, copying the - record to the request buffer and setting the partition as empty on - entries. + RETURN VALUE + HA_ERR_END_OF_FILE End of scan + 0 Success + other Error code + + DESCRIPTION + This part contains the logic to handle index scans that require ordered + output. This includes all except those started by read_range_first with + the flag ordered set to FALSE. Thus most direct index_read and all + index_first and index_last. + + We implement ordering by keeping one record plus a key buffer for each + partition. Every time a new entry is requested we will fetch a new + entry from the partition that is currently not filled with an entry. + Then the entry is put into its proper sort position. + + Returning a record is done by getting the top record, copying the + record to the request buffer and setting the partition as empty on + entries. */ int ha_partition::handle_ordered_index_scan(byte *buf) { - uint i, j= 0; + uint i; + uint j= 0; bool found= FALSE; bool reverse_order= FALSE; DBUG_ENTER("ha_partition::handle_ordered_index_scan"); m_top_entry= NO_CURRENT_PART_ID; - queue_remove_all(&queue); + queue_remove_all(&m_queue); for (i= m_part_spec.start_part; i <= m_part_spec.end_part; i++) { int error; @@ -2112,7 +3864,7 @@ int ha_partition::handle_ordered_index_scan(byte *buf) /* Initialise queue without order first, simply insert */ - queue_element(&queue, j++)= (byte*)queue_buf(i); + queue_element(&m_queue, j++)= (byte*)queue_buf(i); } else if (error != HA_ERR_KEY_NOT_FOUND && error != HA_ERR_END_OF_FILE) { @@ -2125,10 +3877,10 @@ int ha_partition::handle_ordered_index_scan(byte *buf) We found at least one partition with data, now sort all entries and after that read the first entry and copy it to the buffer to return in. */ - queue_set_max_at_top(&queue, reverse_order); - queue_set_cmp_arg(&queue, (void*)m_curr_key_info); - queue.elements= j; - queue_fix(&queue); + queue_set_max_at_top(&m_queue, reverse_order); + queue_set_cmp_arg(&m_queue, (void*)m_curr_key_info); + m_queue.elements= j; + queue_fix(&m_queue); return_top_record(buf); DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry)); DBUG_RETURN(0); @@ -2137,11 +3889,23 @@ int ha_partition::handle_ordered_index_scan(byte *buf) } +/* + Return the top record in sort order + + SYNOPSIS + return_top_record() + out:buf Row returned in MySQL Row Format + + RETURN VALUE + NONE +*/ + void ha_partition::return_top_record(byte *buf) { uint part_id; - byte *key_buffer= queue_top(&queue); + byte *key_buffer= queue_top(&m_queue); byte *rec_buffer= key_buffer + PARTITION_BYTES_IN_POS; + part_id= uint2korr(key_buffer); memcpy(buf, rec_buffer, m_rec_length); m_last_part= part_id; @@ -2149,14 +3913,28 @@ void ha_partition::return_top_record(byte *buf) } -int ha_partition::handle_ordered_next(byte *buf, bool next_same) +/* + Common routine to handle index_next with ordered results + + SYNOPSIS + handle_ordered_next() + out:buf Read row in MySQL Row Format + next_same Called from index_next_same + + RETURN VALUE + HA_ERR_END_OF_FILE End of scan + 0 Success + other Error code +*/ + +int ha_partition::handle_ordered_next(byte *buf, bool is_next_same) { int error; uint part_id= m_top_entry; handler *file= m_file[part_id]; DBUG_ENTER("ha_partition::handle_ordered_next"); - if (!next_same) + if (!is_next_same) error= file->index_next(rec_buf(part_id)); else error= file->index_next_same(rec_buf(part_id), m_start_key.key, @@ -2166,8 +3944,8 @@ int ha_partition::handle_ordered_next(byte *buf, bool next_same) if (error == HA_ERR_END_OF_FILE) { /* Return next buffered row */ - queue_remove(&queue, (uint) 0); - if (queue.elements) + queue_remove(&m_queue, (uint) 0); + if (m_queue.elements) { DBUG_PRINT("info", ("Record returned from partition %u (2)", m_top_entry)); @@ -2177,25 +3955,39 @@ int ha_partition::handle_ordered_next(byte *buf, bool next_same) } DBUG_RETURN(error); } - queue_replaced(&queue); + queue_replaced(&m_queue); return_top_record(buf); DBUG_PRINT("info", ("Record returned from partition %u", m_top_entry)); DBUG_RETURN(0); } +/* + Common routine to handle index_prev with ordered results + + SYNOPSIS + handle_ordered_prev() + out:buf Read row in MySQL Row Format + + RETURN VALUE + HA_ERR_END_OF_FILE End of scan + 0 Success + other Error code +*/ + int ha_partition::handle_ordered_prev(byte *buf) { int error; uint part_id= m_top_entry; handler *file= m_file[part_id]; DBUG_ENTER("ha_partition::handle_ordered_prev"); + if ((error= file->index_prev(rec_buf(part_id)))) { if (error == HA_ERR_END_OF_FILE) { - queue_remove(&queue, (uint) 0); - if (queue.elements) + queue_remove(&m_queue, (uint) 0); + if (m_queue.elements) { return_top_record(buf); DBUG_PRINT("info", ("Record returned from partition %d (2)", @@ -2205,17 +3997,34 @@ int ha_partition::handle_ordered_prev(byte *buf) } DBUG_RETURN(error); } - queue_replaced(&queue); + queue_replaced(&m_queue); return_top_record(buf); DBUG_PRINT("info", ("Record returned from partition %d", m_top_entry)); DBUG_RETURN(0); } +/* + Set fields in partition functions in read set for underlying handlers + + SYNOPSIS + include_partition_fields_in_used_fields() + + RETURN VALUE + NONE + + DESCRIPTION + Some handlers only read fields as specified by the bitmap for the + read set. For partitioned handlers we always require that the + fields of the partition functions are read such that we can + calculate the partition id to place updated and deleted records. +*/ + void ha_partition::include_partition_fields_in_used_fields() { - DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields"); Field **ptr= m_part_field_array; + DBUG_ENTER("ha_partition::include_partition_fields_in_used_fields"); + do { ha_set_bit_in_read_set((*ptr)->fieldnr); @@ -2234,57 +4043,68 @@ void ha_partition::include_partition_fields_in_used_fields() */ /* - ::info() is used to return information to the optimizer. - Currently this table handler doesn't implement most of the fields - really needed. SHOW also makes use of this data - Another note, if your handler doesn't proved exact record count, - you will probably want to have the following in your code: - if (records < 2) - records = 2; - The reason is that the server will optimize for cases of only a single - record. If in a table scan you don't know the number of records - it will probably be better to set records to two so you can return - as many records as you need. + General method to gather info from handler - Along with records a few more variables you may wish to set are: - records - deleted - data_file_length - index_file_length - delete_length - check_time - Take a look at the public variables in handler.h for more information. + SYNOPSIS + info() + flag Specifies what info is requested - Called in: - filesort.cc - ha_heap.cc - item_sum.cc - opt_sum.cc - sql_delete.cc - sql_delete.cc - sql_derived.cc - sql_select.cc - sql_select.cc - sql_select.cc - sql_select.cc - sql_select.cc - sql_show.cc - sql_show.cc - sql_show.cc - sql_show.cc - sql_table.cc - sql_union.cc - sql_update.cc + RETURN VALUE + NONE - Some flags that are not implemented - HA_STATUS_POS: - This parameter is never used from the MySQL Server. It is checked in a - place in MyISAM so could potentially be used by MyISAM specific programs. - HA_STATUS_NO_LOCK: - This is declared and often used. It's only used by MyISAM. - It means that MySQL doesn't need the absolute latest statistics - information. This may save the handler from doing internal locks while - retrieving statistics data. + DESCRIPTION + ::info() is used to return information to the optimizer. + Currently this table handler doesn't implement most of the fields + really needed. SHOW also makes use of this data + Another note, if your handler doesn't proved exact record count, + you will probably want to have the following in your code: + if (records < 2) + records = 2; + The reason is that the server will optimize for cases of only a single + record. If in a table scan you don't know the number of records + it will probably be better to set records to two so you can return + as many records as you need. + + Along with records a few more variables you may wish to set are: + records + deleted + data_file_length + index_file_length + delete_length + check_time + Take a look at the public variables in handler.h for more information. + + Called in: + filesort.cc + ha_heap.cc + item_sum.cc + opt_sum.cc + sql_delete.cc + sql_delete.cc + sql_derived.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_select.cc + sql_show.cc + sql_show.cc + sql_show.cc + sql_show.cc + sql_table.cc + sql_union.cc + sql_update.cc + + Some flags that are not implemented + HA_STATUS_POS: + This parameter is never used from the MySQL Server. It is checked in a + place in MyISAM so could potentially be used by MyISAM specific + programs. + HA_STATUS_NO_LOCK: + This is declared and often used. It's only used by MyISAM. + It means that MySQL doesn't need the absolute latest statistics + information. This may save the handler from doing internal locks while + retrieving statistics data. */ void ha_partition::info(uint flag) @@ -2469,6 +4289,17 @@ void ha_partition::get_dynamic_partition_info(PARTITION_INFO *stat_info, /* + General function to prepare handler for certain behavior + + SYNOPSIS + extra() + operation Operation type for extra call + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION extra() is called whenever the server wishes to send a hint to the storage engine. The MyISAM engine implements the most hints. @@ -2814,8 +4645,18 @@ int ha_partition::extra(enum ha_extra_function operation) /* - This will in the future be called instead of extra(HA_EXTRA_RESET) as this - is such a common call + Special extra call to reset extra parameters + + SYNOPSIS + reset() + + RETURN VALUE + >0 Error code + 0 Success + + DESCRIPTION + This will in the future be called instead of extra(HA_EXTRA_RESET) as this + is such a common call */ int ha_partition::reset(void) @@ -2823,6 +4664,7 @@ int ha_partition::reset(void) int result= 0, tmp; handler **file; DBUG_ENTER("ha_partition::reset"); + file= m_file; if (m_part_info) bitmap_clear_all(&m_part_info->used_partitions); @@ -2835,15 +4677,40 @@ int ha_partition::reset(void) } +/* + Special extra method for HA_EXTRA_CACHE with cachesize as extra parameter + + SYNOPSIS + extra_opt() + operation Must be HA_EXTRA_CACHE + cachesize Size of cache in full table scan + + RETURN VALUE + >0 Error code + 0 Success +*/ + int ha_partition::extra_opt(enum ha_extra_function operation, ulong cachesize) { DBUG_ENTER("ha_partition::extra_opt()"); + DBUG_ASSERT(HA_EXTRA_CACHE == operation); prepare_extra_cache(cachesize); DBUG_RETURN(0); } +/* + Call extra on handler with HA_EXTRA_CACHE and cachesize + + SYNOPSIS + prepare_extra_cache() + cachesize Size of cache for full table scan + + RETURN VALUE + NONE +*/ + void ha_partition::prepare_extra_cache(uint cachesize) { DBUG_ENTER("ha_partition::prepare_extra_cache()"); @@ -2859,11 +4726,24 @@ void ha_partition::prepare_extra_cache(uint cachesize) } +/* + Call extra on all partitions + + SYNOPSIS + loop_extra() + operation extra operation type + + RETURN VALUE + >0 Error code + 0 Success +*/ + int ha_partition::loop_extra(enum ha_extra_function operation) { int result= 0, tmp; handler **file; DBUG_ENTER("ha_partition::loop_extra()"); + for (file= m_file; *file; file++) { if ((tmp= (*file)->extra(operation))) @@ -2873,10 +4753,22 @@ int ha_partition::loop_extra(enum ha_extra_function operation) } +/* + Call extra(HA_EXTRA_CACHE) on next partition_id + + SYNOPSIS + late_extra_cache() + partition_id Partition id to call extra on + + RETURN VALUE + NONE +*/ + void ha_partition::late_extra_cache(uint partition_id) { handler *file; DBUG_ENTER("ha_partition::late_extra_cache"); + if (!m_extra_cache) DBUG_VOID_RETURN; file= m_file[partition_id]; @@ -2888,10 +4780,22 @@ void ha_partition::late_extra_cache(uint partition_id) } +/* + Call extra(HA_EXTRA_NO_CACHE) on next partition_id + + SYNOPSIS + late_extra_no_cache() + partition_id Partition id to call extra on + + RETURN VALUE + NONE +*/ + void ha_partition::late_extra_no_cache(uint partition_id) { handler *file; DBUG_ENTER("ha_partition::late_extra_no_cache"); + if (!m_extra_cache) DBUG_VOID_RETURN; file= m_file[partition_id]; @@ -2904,12 +4808,34 @@ void ha_partition::late_extra_no_cache(uint partition_id) MODULE optimiser support ****************************************************************************/ +/* + Get keys to use for scanning + + SYNOPSIS + keys_to_use_for_scanning() + + RETURN VALUE + key_map of keys usable for scanning +*/ + const key_map *ha_partition::keys_to_use_for_scanning() { DBUG_ENTER("ha_partition::keys_to_use_for_scanning"); + DBUG_RETURN(m_file[0]->keys_to_use_for_scanning()); } + +/* + Return time for a scan of the table + + SYNOPSIS + scan_time() + + RETURN VALUE + time for scan +*/ + double ha_partition::scan_time() { double scan_time= 0; @@ -2923,28 +4849,53 @@ double ha_partition::scan_time() /* - This will be optimised later to include whether or not the index can - be used with partitioning. To achieve we need to add another parameter - that specifies how many of the index fields that are bound in the ranges. - Possibly added as a new call to handlers. + Get time to read + + SYNOPSIS + read_time() + index Index number used + ranges Number of ranges + rows Number of rows + + RETURN VALUE + time for read + + DESCRIPTION + This will be optimised later to include whether or not the index can + be used with partitioning. To achieve we need to add another parameter + that specifies how many of the index fields that are bound in the ranges. + Possibly added as a new call to handlers. */ double ha_partition::read_time(uint index, uint ranges, ha_rows rows) { DBUG_ENTER("ha_partition::read_time"); + DBUG_RETURN(m_file[0]->read_time(index, ranges, rows)); } /* - Given a starting key, and an ending key estimate the number of rows that - will exist between the two. end_key may be empty which in case determine - if start_key matches any rows. + Find number of records in a range - Called from opt_range.cc by check_quick_keys(). + SYNOPSIS + records_in_range() + inx Index number + min_key Start of range + max_key End of range - monty: MUST be called for each range and added. - Note that MySQL will assume that if this returns 0 there is no - matching rows for the range! + RETURN VALUE + Number of rows in range + + DESCRIPTION + Given a starting key, and an ending key estimate the number of rows that + will exist between the two. end_key may be empty which in case determine + if start_key matches any rows. + + Called from opt_range.cc by check_quick_keys(). + + monty: MUST be called for each range and added. + Note that MySQL will assume that if this returns 0 there is no + matching rows for the range! */ ha_rows ha_partition::records_in_range(uint inx, key_range *min_key, @@ -2963,6 +4914,16 @@ ha_rows ha_partition::records_in_range(uint inx, key_range *min_key, } +/* + Estimate upper bound of number of rows + + SYNOPSIS + estimate_rows_upper_bound() + + RETURN VALUE + Number of rows +*/ + ha_rows ha_partition::estimate_rows_upper_bound() { ha_rows rows, tot_rows= 0; @@ -2981,9 +4942,48 @@ ha_rows ha_partition::estimate_rows_upper_bound() } +/* + Is it ok to switch to a new engine for this table + + SYNOPSIS + can_switch_engine() + + RETURN VALUE + TRUE Ok + FALSE Not ok + + DESCRIPTION + Used to ensure that tables with foreign key constraints are not moved + to engines without foreign key support. +*/ + +bool ha_partition::can_switch_engines() +{ + handler **file; + DBUG_ENTER("ha_partition::can_switch_engines"); + + file= m_file; + do + { + if (!(*file)->can_switch_engines()) + DBUG_RETURN(FALSE); + } while (*(++file)); + DBUG_RETURN(TRUE); +} + + +/* + Is table cache supported + + SYNOPSIS + table_cache_type() + +*/ + uint8 ha_partition::table_cache_type() { DBUG_ENTER("ha_partition::table_cache_type"); + DBUG_RETURN(m_file[0]->table_cache_type()); } @@ -2995,6 +4995,7 @@ uint8 ha_partition::table_cache_type() const char *ha_partition::index_type(uint inx) { DBUG_ENTER("ha_partition::index_type"); + DBUG_RETURN(m_file[0]->index_type(inx)); } @@ -3002,11 +5003,17 @@ const char *ha_partition::index_type(uint inx) void ha_partition::print_error(int error, myf errflag) { DBUG_ENTER("ha_partition::print_error"); + /* Should probably look for my own errors first */ /* monty: needs to be called for the last used partition ! */ + DBUG_PRINT("enter", ("error = %d", error)); + if (error == HA_ERR_NO_PARTITION_FOUND) + { + char buf[100]; my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, MYF(0), - m_part_info->part_expr->val_int()); + llstr(m_part_info->part_expr->val_int(), buf)); + } else m_file[0]->print_error(error, errflag); DBUG_VOID_RETURN; @@ -3016,6 +5023,7 @@ void ha_partition::print_error(int error, myf errflag) bool ha_partition::get_error_message(int error, String *buf) { DBUG_ENTER("ha_partition::get_error_message"); + /* Should probably look for my own errors first */ /* monty: needs to be called for the last used partition ! */ DBUG_RETURN(m_file[0]->get_error_message(error, buf)); @@ -3040,7 +5048,8 @@ const char **ha_partition::bas_ext() const { return ha_partition_ext; } -uint ha_partition::min_of_the_max_uint(uint (handler::*operator_func)(void) const) const +uint ha_partition::min_of_the_max_uint( + uint (handler::*operator_func)(void) const) const { handler **file; uint min_of_the_max= ((*m_file)->*operator_func)(); @@ -3088,6 +5097,7 @@ uint ha_partition::extra_rec_buf_length() const { handler **file; uint max= (*m_file)->extra_rec_buf_length(); + for (file= m_file, file++; *file; file++) if (max < (*file)->extra_rec_buf_length()) max= (*file)->extra_rec_buf_length(); @@ -3099,6 +5109,7 @@ uint ha_partition::min_record_length(uint options) const { handler **file; uint max= (*m_file)->min_record_length(options); + for (file= m_file, file++; *file; file++) if (max < (*file)->min_record_length(options)) max= (*file)->min_record_length(options); @@ -3110,10 +5121,23 @@ uint ha_partition::min_record_length(uint options) const MODULE compare records ****************************************************************************/ /* - We get two references and need to check if those records are the same. - If they belong to different partitions we decide that they are not - the same record. Otherwise we use the particular handler to decide if - they are the same. Sort in partition id order if not equal. + Compare two positions + + SYNOPSIS + cmp_ref() + ref1 First position + ref2 Second position + + RETURN VALUE + <0 ref1 < ref2 + 0 Equal + >0 ref1 > ref2 + + DESCRIPTION + We get two references and need to check if those records are the same. + If they belong to different partitions we decide that they are not + the same record. Otherwise we use the particular handler to decide if + they are the same. Sort in partition id order if not equal. */ int ha_partition::cmp_ref(const byte *ref1, const byte *ref2) @@ -3122,9 +5146,10 @@ int ha_partition::cmp_ref(const byte *ref1, const byte *ref2) my_ptrdiff_t diff1, diff2; handler *file; DBUG_ENTER("ha_partition::cmp_ref"); + if ((ref1[0] == ref2[0]) && (ref1[1] == ref2[1])) { - part_id= get_part_id_from_pos(ref1); + part_id= uint2korr(ref1); file= m_file[part_id]; DBUG_ASSERT(part_id < m_tot_parts); DBUG_RETURN(file->cmp_ref((ref1 + PARTITION_BYTES_IN_POS), @@ -3155,6 +5180,7 @@ int ha_partition::cmp_ref(const byte *ref1, const byte *ref2) void ha_partition::restore_auto_increment() { DBUG_ENTER("ha_partition::restore_auto_increment"); + DBUG_VOID_RETURN; } @@ -3169,6 +5195,7 @@ void ha_partition::restore_auto_increment() ulonglong ha_partition::get_auto_increment() { DBUG_ENTER("ha_partition::get_auto_increment"); + DBUG_RETURN(m_file[0]->get_auto_increment()); } @@ -3204,6 +5231,7 @@ static int partition_init= 0; /* Function we use in the creation of our hash to get key. */ + static byte *partition_get_key(PARTITION_SHARE *share, uint *length, my_bool not_used __attribute__ ((unused))) { @@ -3218,7 +5246,6 @@ static byte *partition_get_key(PARTITION_SHARE *share, uint *length, function. */ - static PARTITION_SHARE *get_share(const char *table_name, TABLE *table) { PARTITION_SHARE *share; diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 03acf217419..60f6a5ca15b 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -49,10 +49,15 @@ private: partition_no_index_scan= 3 }; /* Data for the partition handler */ + int m_mode; // Open mode + uint m_open_test_lock; // Open test_if_locked char *m_file_buffer; // Buffer with names char *m_name_buffer_ptr; // Pointer to first partition name handlerton **m_engine_array; // Array of types of the handlers handler **m_file; // Array of references to handler inst. + handler **m_new_file; // Array of references to new handlers + handler **m_reorged_file; // Reorganised partitions + handler **m_added_file; // Added parts kept for errors partition_info *m_part_info; // local reference to partition byte *m_start_key_ref; // Reference of start key in current // index scan info @@ -60,7 +65,7 @@ private: byte *m_ordered_rec_buffer; // Row and key buffer for ord. idx scan KEY *m_curr_key_info; // Current index byte *m_rec0; // table->record[0] - QUEUE queue; // Prio queue used by sorted read + QUEUE m_queue; // Prio queue used by sorted read /* Since the partition handler is a handler on top of other handlers, it is necessary to keep information about what the underlying handler @@ -71,6 +76,7 @@ private: u_long m_table_flags; u_long m_low_byte_first; + uint m_reorged_parts; // Number of reorganised parts uint m_tot_parts; // Total number of partitions; uint m_no_locks; // For engines like ha_blackhole, which needs no locks uint m_last_part; // Last file that we update,write @@ -172,21 +178,38 @@ public: */ virtual int delete_table(const char *from); virtual int rename_table(const char *from, const char *to); - virtual int create(const char *name, TABLE * form, - HA_CREATE_INFO * create_info); + virtual int create(const char *name, TABLE *form, + HA_CREATE_INFO *create_info); virtual int create_handler_files(const char *name); - virtual void update_create_info(HA_CREATE_INFO * create_info); + virtual void update_create_info(HA_CREATE_INFO *create_info); virtual char *update_table_comment(const char *comment); + virtual int change_partitions(HA_CREATE_INFO *create_info, + const char *path, + ulonglong *copied, + ulonglong *deleted, + const void *pack_frm_data, + uint pack_frm_len); virtual int drop_partitions(const char *path); + virtual int rename_partitions(const char *path); + bool get_no_parts(const char *name, uint *no_parts) + { + DBUG_ENTER("ha_partition::get_no_parts"); + *no_parts= m_tot_parts; + DBUG_RETURN(0); + } private: + int copy_partitions(ulonglong *copied, ulonglong *deleted); + void cleanup_new_partition(uint part_count); + int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info, + handler *file, const char *part_name); /* delete_table, rename_table and create uses very similar logic which is packed into this routine. */ uint del_ren_cre_table(const char *from, const char *to= NULL, - TABLE * table_arg= NULL, - HA_CREATE_INFO * create_info= NULL); + TABLE *table_arg= NULL, + HA_CREATE_INFO *create_info= NULL); /* One method to create the table_name.par file containing the names of the underlying partitions, their engine and the number of partitions. @@ -647,30 +670,8 @@ public: index scan module. (NDB) */ - virtual ulong alter_table_flags(void) const - { - //return HA_ONLINE_ADD_EMPTY_PARTITION + HA_ONLINE_DROP_PARTITION; - return HA_ONLINE_DROP_PARTITION; - } virtual ulong table_flags() const { return m_table_flags; } - /* - HA_CAN_PARTITION: - Used by storage engines that can handle partitioning without this - partition handler - (Partition, NDB) - - HA_CAN_UPDATE_PARTITION_KEY: - Set if the handler can update fields that are part of the partition - function. - - HA_CAN_PARTITION_UNIQUE: - Set if the handler can handle unique indexes where the fields of the - unique key are not part of the fields of the partition function. Thus - a unique key can be set on all fields. - */ - virtual ulong partition_flags() const - { return HA_CAN_PARTITION; } /* This is a bitmap of flags that says how the storage engine @@ -834,6 +835,8 @@ public: description of how the CREATE TABLE part to define FOREIGN KEY's is done. free_foreign_key_create_info is used to free the memory area that provided this description. + can_switch_engines checks if it is ok to switch to a new engine based on + the foreign key info in the table. ------------------------------------------------------------------------- virtual char* get_foreign_key_create_info() @@ -843,7 +846,7 @@ public: List *f_key_list) virtual uint referenced_by_foreign_key() */ - + virtual bool can_switch_engines(); /* ------------------------------------------------------------------------- MODULE fulltext index @@ -892,16 +895,35 @@ public: ------------------------------------------------------------------------- MODULE admin MyISAM ------------------------------------------------------------------------- + + ------------------------------------------------------------------------- + OPTIMIZE TABLE, CHECK TABLE, ANALYZE TABLE and REPAIR TABLE are + mapped to a routine that handles looping over a given set of + partitions and those routines send a flag indicating to execute on + all partitions. + ------------------------------------------------------------------------- + */ + virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt); + virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt); + virtual int check(THD* thd, HA_CHECK_OPT *check_opt); + virtual int repair(THD* thd, HA_CHECK_OPT *check_opt); + virtual int optimize_partitions(THD *thd); + virtual int analyze_partitions(THD *thd); + virtual int check_partitions(THD *thd); + virtual int repair_partitions(THD *thd); + + private: + int handle_opt_partitions(THD *thd, HA_CHECK_OPT *check_opt, + uint flags, bool all_parts); + public: + /* + ------------------------------------------------------------------------- Admin commands not supported currently (almost purely MyISAM routines) This means that the following methods are not implemented: ------------------------------------------------------------------------- - virtual int check(THD* thd, HA_CHECK_OPT *check_opt); virtual int backup(TD* thd, HA_CHECK_OPT *check_opt); virtual int restore(THD* thd, HA_CHECK_OPT *check_opt); - virtual int repair(THD* thd, HA_CHECK_OPT *check_opt); - virtual int optimize(THD* thd, HA_CHECK_OPT *check_opt); - virtual int analyze(THD* thd, HA_CHECK_OPT *check_opt); virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt); virtual int preload_keys(THD *thd, HA_CHECK_OPT *check_opt); virtual bool check_and_repair(THD *thd); diff --git a/sql/handler.cc b/sql/handler.cc index 5fd27c87ead..b40a40684fe 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -63,7 +63,7 @@ const handlerton default_hton = NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, create_default, - NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, HTON_NO_FLAGS }; @@ -1425,6 +1425,12 @@ int ha_delete_table(THD *thd, handlerton *table_type, const char *path, ** General handler functions ****************************************************************************/ + +void handler::ha_statistic_increment(ulong SSV::*offset) const +{ + statistic_increment(table->in_use->status_var.*offset, &LOCK_status); +} + /* Open database-handler. @@ -2160,7 +2166,8 @@ int ha_create_table(THD *thd, const char *path, init_tmp_table_share(&share, db, 0, table_name, path); if (open_table_def(thd, &share, 0) || - open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, 0, &table)) + open_table_from_share(thd, &share, "", 0, (uint) READ_ALL, 0, &table, + TRUE)) goto err; if (update_create_info) @@ -2237,7 +2244,7 @@ int ha_create_table_from_engine(THD* thd, const char *db, const char *name) { DBUG_RETURN(3); } - if (open_table_from_share(thd, &share, "" ,0, 0, 0, &table)) + if (open_table_from_share(thd, &share, "" ,0, 0, 0, &table, FALSE)) { free_table_share(&share); DBUG_RETURN(3); diff --git a/sql/handler.h b/sql/handler.h index e766797133d..f43a6514086 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -99,6 +99,7 @@ #define HA_CAN_PARTITION (1 << 0) /* Partition support */ #define HA_CAN_UPDATE_PARTITION_KEY (1 << 1) #define HA_CAN_PARTITION_UNIQUE (1 << 2) +#define HA_USE_AUTO_PARTITION (1 << 3) /* bits in index_flags(index_number) for what you can do with index */ @@ -109,30 +110,58 @@ #define HA_ONLY_WHOLE_INDEX 16 /* Can't use part key searches */ #define HA_KEYREAD_ONLY 64 /* Support HA_EXTRA_KEYREAD */ -/* bits in alter_table_flags */ -#define HA_ONLINE_ADD_EMPTY_PARTITION 0x00000001 -#define HA_ONLINE_DROP_PARTITION 0x00000002 +/* + bits in alter_table_flags: +*/ /* These bits are set if different kinds of indexes can be created off-line without re-create of the table (but with a table lock). */ -#define HA_ONLINE_ADD_INDEX_NO_WRITES 0x00000004 /*add index w/lock*/ -#define HA_ONLINE_DROP_INDEX_NO_WRITES 0x00000008 /*drop index w/lock*/ -#define HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES 0x00000010 /*add unique w/lock*/ -#define HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES 0x00000020 /*drop uniq. w/lock*/ -#define HA_ONLINE_ADD_PK_INDEX_NO_WRITES 0x00000040 /*add prim. w/lock*/ -#define HA_ONLINE_DROP_PK_INDEX_NO_WRITES 0x00000080 /*drop prim. w/lock*/ +#define HA_ONLINE_ADD_INDEX_NO_WRITES (1L << 0) /*add index w/lock*/ +#define HA_ONLINE_DROP_INDEX_NO_WRITES (1L << 1) /*drop index w/lock*/ +#define HA_ONLINE_ADD_UNIQUE_INDEX_NO_WRITES (1L << 2) /*add unique w/lock*/ +#define HA_ONLINE_DROP_UNIQUE_INDEX_NO_WRITES (1L << 3) /*drop uniq. w/lock*/ +#define HA_ONLINE_ADD_PK_INDEX_NO_WRITES (1L << 4) /*add prim. w/lock*/ +#define HA_ONLINE_DROP_PK_INDEX_NO_WRITES (1L << 5) /*drop prim. w/lock*/ /* These are set if different kinds of indexes can be created on-line (without a table lock). If a handler is capable of one or more of these, it should also set the corresponding *_NO_WRITES bit(s). */ -#define HA_ONLINE_ADD_INDEX 0x00000100 /*add index online*/ -#define HA_ONLINE_DROP_INDEX 0x00000200 /*drop index online*/ -#define HA_ONLINE_ADD_UNIQUE_INDEX 0x00000400 /*add unique online*/ -#define HA_ONLINE_DROP_UNIQUE_INDEX 0x00000800 /*drop uniq. online*/ -#define HA_ONLINE_ADD_PK_INDEX 0x00001000 /*add prim. online*/ -#define HA_ONLINE_DROP_PK_INDEX 0x00002000 /*drop prim. online*/ +#define HA_ONLINE_ADD_INDEX (1L << 6) /*add index online*/ +#define HA_ONLINE_DROP_INDEX (1L << 7) /*drop index online*/ +#define HA_ONLINE_ADD_UNIQUE_INDEX (1L << 8) /*add unique online*/ +#define HA_ONLINE_DROP_UNIQUE_INDEX (1L << 9) /*drop uniq. online*/ +#define HA_ONLINE_ADD_PK_INDEX (1L << 10)/*add prim. online*/ +#define HA_ONLINE_DROP_PK_INDEX (1L << 11)/*drop prim. online*/ +/* + HA_PARTITION_FUNCTION_SUPPORTED indicates that the function is + supported at all. + HA_FAST_CHANGE_PARTITION means that optimised variants of the changes + exists but they are not necessarily done online. + + HA_ONLINE_DOUBLE_WRITE means that the handler supports writing to both + the new partition and to the old partitions when updating through the + old partitioning schema while performing a change of the partitioning. + This means that we can support updating of the table while performing + the copy phase of the change. For no lock at all also a double write + from new to old must exist and this is not required when this flag is + set. + This is actually removed even before it was introduced the first time. + The new idea is that handlers will handle the lock level already in + store_lock for ALTER TABLE partitions. + + HA_PARTITION_ONE_PHASE is a flag that can be set by handlers that take + care of changing the partitions online and in one phase. Thus all phases + needed to handle the change are implemented inside the storage engine. + The storage engine must also support auto-discovery since the frm file + is changed as part of the change and this change must be controlled by + the storage engine. A typical engine to support this is NDB (through + WL #2498). +*/ +#define HA_PARTITION_FUNCTION_SUPPORTED (1L << 12) +#define HA_FAST_CHANGE_PARTITION (1L << 13) +#define HA_PARTITION_ONE_PHASE (1L << 14) /* Index scan will not return records in rowid order. Not guaranteed to be @@ -140,7 +169,6 @@ */ #define HA_KEY_SCAN_NOT_ROR 128 - /* operations for disable/enable indexes */ #define HA_KEY_SWITCH_NONUNIQ 0 #define HA_KEY_SWITCH_ALL 1 @@ -540,6 +568,8 @@ typedef struct int (*start_consistent_snapshot)(THD *thd); bool (*flush_logs)(); bool (*show_status)(THD *thd, stat_print_fn *print, enum ha_stat_type stat); + uint (*partition_flags)(); + uint (*alter_table_flags)(uint flags); int (*alter_tablespace)(THD *thd, st_alter_tablespace *ts_info); uint32 flags; /* global handler flags */ /* @@ -604,10 +634,12 @@ enum partition_state { PART_NORMAL= 0, PART_IS_DROPPED= 1, PART_TO_BE_DROPPED= 2, - PART_DROPPING= 3, - PART_IS_ADDED= 4, - PART_ADDING= 5, - PART_ADDED= 6 + PART_TO_BE_ADDED= 3, + PART_TO_BE_REORGED= 4, + PART_REORGED_DROPPED= 5, + PART_CHANGED= 6, + PART_IS_CHANGED= 7, + PART_IS_ADDED= 8 }; typedef struct { @@ -657,13 +689,14 @@ public: typedef struct { longlong list_value; - uint partition_id; + uint32 partition_id; } LIST_PART_ENTRY; class partition_info; -typedef bool (*get_part_id_func)(partition_info *part_info, - uint32 *part_id); +typedef int (*get_part_id_func)(partition_info *part_info, + uint32 *part_id, + longlong *func_value); typedef uint32 (*get_subpart_id_func)(partition_info *part_info); class partition_info :public Sql_alloc { @@ -732,6 +765,8 @@ public: char *part_func_string; char *subpart_func_string; + uchar *part_state; + partition_element *curr_part_elem; partition_element *current_partition; /* @@ -748,12 +783,12 @@ public: partition_type subpart_type; uint part_info_len; + uint part_state_len; uint part_func_len; uint subpart_func_len; uint no_parts; uint no_subparts; - uint count_curr_parts; uint count_curr_subparts; uint part_error_code; @@ -764,14 +799,24 @@ public: uint no_subpart_fields; uint no_full_part_fields; + /* + This variable is used to calculate the partition id when using + LINEAR KEY/HASH. This functionality is kept in the MySQL Server + but mainly of use to handlers supporting partitioning. + */ uint16 linear_hash_mask; bool use_default_partitions; + bool use_default_no_partitions; bool use_default_subpartitions; + bool use_default_no_subpartitions; + bool default_partitions_setup; bool defined_max_value; bool list_of_part_fields; bool list_of_subpart_fields; bool linear_hash_ind; + bool fixed; + bool from_openfrm; partition_info() : get_partition_id(NULL), get_part_partition_id(NULL), @@ -782,19 +827,27 @@ public: list_array(NULL), part_info_string(NULL), part_func_string(NULL), subpart_func_string(NULL), + part_state(NULL), curr_part_elem(NULL), current_partition(NULL), default_engine_type(NULL), part_result_type(INT_RESULT), part_type(NOT_A_PARTITION), subpart_type(NOT_A_PARTITION), - part_info_len(0), part_func_len(0), subpart_func_len(0), + part_info_len(0), part_state_len(0), + part_func_len(0), subpart_func_len(0), no_parts(0), no_subparts(0), - count_curr_parts(0), count_curr_subparts(0), part_error_code(0), + count_curr_subparts(0), part_error_code(0), no_list_values(0), no_part_fields(0), no_subpart_fields(0), no_full_part_fields(0), linear_hash_mask(0), use_default_partitions(TRUE), - use_default_subpartitions(TRUE), defined_max_value(FALSE), + use_default_no_partitions(TRUE), + use_default_subpartitions(TRUE), + use_default_no_subpartitions(TRUE), + default_partitions_setup(FALSE), + defined_max_value(FALSE), list_of_part_fields(FALSE), list_of_subpart_fields(FALSE), - linear_hash_ind(FALSE) + linear_hash_ind(FALSE), + fixed(FALSE), + from_openfrm(FALSE) { all_fields_in_PF.clear_all(); all_fields_in_PPF.clear_all(); @@ -842,6 +895,8 @@ uint get_tot_partitions(partition_info *part_info) return part_info->no_parts * (is_sub_partitioned(part_info) ? part_info->no_subparts : 1); } + + #endif typedef struct st_ha_create_information @@ -891,8 +946,8 @@ typedef struct st_ha_check_opt #ifdef WITH_PARTITION_STORAGE_ENGINE bool is_partition_in_list(char *part_name, List list_part_names); -bool is_partitions_in_table(partition_info *new_part_info, - partition_info *old_part_info); +char *are_partitions_in_table(partition_info *new_part_info, + partition_info *old_part_info); bool check_reorganise_list(partition_info *new_part_info, partition_info *old_part_info, List list_part_names); @@ -903,15 +958,17 @@ bool set_up_defaults_for_partitioning(partition_info *part_info, handler *get_ha_partition(partition_info *part_info); int get_parts_for_update(const byte *old_data, byte *new_data, const byte *rec0, partition_info *part_info, - uint32 *old_part_id, uint32 *new_part_id); + uint32 *old_part_id, uint32 *new_part_id, + longlong *func_value); int get_part_for_delete(const byte *buf, const byte *rec0, partition_info *part_info, uint32 *part_id); -bool check_partition_info(partition_info *part_info,handlerton *eng_type, +bool check_partition_info(partition_info *part_info,handlerton **eng_type, handler *file, ulonglong max_rows); -bool fix_partition_func(THD *thd, const char *name, TABLE *table); +bool fix_partition_func(THD *thd, const char *name, TABLE *table, + bool create_table_ind); char *generate_partition_syntax(partition_info *part_info, uint *buf_length, bool use_sql_alloc, - bool add_default_info); + bool write_all); bool partition_key_modified(TABLE *table, List &fields); void get_partition_set(const TABLE *table, byte *buf, const uint index, const key_range *key_spec, @@ -921,7 +978,9 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf, const key_range *key_spec, part_id_range *part_spec); bool mysql_unpack_partition(THD *thd, const uchar *part_buf, - uint part_info_len, TABLE *table, + uint part_info_len, + uchar *part_state, uint part_state_len, + TABLE *table, bool is_create_table_ind, handlerton *default_db_type); void make_used_partitions_str(partition_info *part_info, String *parts_str); uint32 get_list_array_idx_for_endpoint(partition_info *part_info, @@ -947,6 +1006,8 @@ typedef struct st_handler_buffer byte *end_of_used_area; /* End of area that was used by handler */ } HANDLER_BUFFER; +typedef struct system_status_var SSV; + class handler :public Sql_alloc { #ifdef WITH_PARTITION_STORAGE_ENGINE @@ -968,6 +1029,9 @@ class handler :public Sql_alloc virtual int rnd_init(bool scan) =0; virtual int rnd_end() { return 0; } + void ha_statistic_increment(ulong SSV::*offset) const; + + private: virtual int reset() { return extra(HA_EXTRA_RESET); } public: @@ -1030,6 +1094,33 @@ public: { /* TODO: DBUG_ASSERT(inited == NONE); */ } + /* + Check whether a handler allows to lock the table. + + SYNOPSIS + check_if_locking_is_allowed() + thd Handler of the thread, trying to lock the table + table Table handler to check + count Number of locks already granted to the table + + DESCRIPTION + Check whether a handler allows to lock the table. For instance, + MyISAM does not allow to lock mysql.proc along with other tables. + This limitation stems from the fact that MyISAM does not support + row-level locking and we have to add this limitation to avoid + deadlocks. + + RETURN + TRUE Locking is allowed + FALSE Locking is not allowed. The error was thrown. + */ + virtual bool check_if_locking_is_allowed(uint sql_command, + ulong type, TABLE *table, + uint count, + bool called_by_logger_thread) + { + return TRUE; + } virtual int ha_initialise(); int ha_open(TABLE *table, const char *name, int mode, int test_if_locked); bool update_auto_increment(); @@ -1480,11 +1571,16 @@ public: virtual const char *table_type() const =0; virtual const char **bas_ext() const =0; virtual ulong table_flags(void) const =0; - virtual ulong alter_table_flags(void) const { return 0; } #ifdef WITH_PARTITION_STORAGE_ENGINE - virtual ulong partition_flags(void) const { return 0;} virtual int get_default_no_partitions(ulonglong max_rows) { return 1;} - virtual void set_part_info(partition_info *part_info) { return; } + virtual void set_auto_partitions(partition_info *part_info) { return; } + virtual bool get_no_parts(const char *name, + uint *no_parts) + { + *no_parts= 0; + return 0; + } + virtual void set_part_info(partition_info *part_info) {return;} #endif virtual ulong index_flags(uint idx, uint part, bool all_parts) const =0; @@ -1530,19 +1626,26 @@ public: virtual int create(const char *name, TABLE *form, HA_CREATE_INFO *info)=0; virtual int create_handler_files(const char *name) { return FALSE;} - /* - SYNOPSIS - drop_partitions() - path Complete path of db and table name - RETURN VALUE - TRUE Failure - FALSE Success - DESCRIPTION - Drop a partition, during this operation no other activity is ongoing - in this server on the table. - */ + virtual int change_partitions(HA_CREATE_INFO *create_info, + const char *path, + ulonglong *copied, + ulonglong *deleted, + const void *pack_frm_data, + uint pack_frm_len) + { return HA_ERR_WRONG_COMMAND; } virtual int drop_partitions(const char *path) { return HA_ERR_WRONG_COMMAND; } + virtual int rename_partitions(const char *path) + { return HA_ERR_WRONG_COMMAND; } + virtual int optimize_partitions(THD *thd) + { return HA_ERR_WRONG_COMMAND; } + virtual int analyze_partitions(THD *thd) + { return HA_ERR_WRONG_COMMAND; } + virtual int check_partitions(THD *thd) + { return HA_ERR_WRONG_COMMAND; } + virtual int repair_partitions(THD *thd) + { return HA_ERR_WRONG_COMMAND; } + /* lock_count() can be more than one if the table is a MERGE */ virtual uint lock_count(void) const { return 1; } virtual THR_LOCK_DATA **store_lock(THD *thd, diff --git a/sql/item.h b/sql/item.h index 5de69013605..e8f1697f09d 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1932,7 +1932,7 @@ public: virtual Item *real_item() { return ref; } }; - +#ifdef MYSQL_SERVER #include "gstream.h" #include "spatial.h" #include "item_sum.h" @@ -1945,6 +1945,7 @@ public: #include "item_uniq.h" #include "item_subselect.h" #include "item_xmlfunc.h" +#endif class Item_copy_string :public Item { diff --git a/sql/lex.h b/sql/lex.h index 29c693c2c74..e0b4855abc3 100644 --- a/sql/lex.h +++ b/sql/lex.h @@ -422,6 +422,7 @@ static SYMBOL symbols[] = { { "READ_WRITE", SYM(READ_WRITE_SYM)}, { "READS", SYM(READS_SYM)}, { "REAL", SYM(REAL)}, + { "REBUILD", SYM(REBUILD_SYM)}, { "RECOVER", SYM(RECOVER_SYM)}, { "REDO_BUFFER_SIZE", SYM(REDO_BUFFER_SIZE_SYM)}, { "REDOFILE", SYM(REDOFILE_SYM)}, @@ -434,7 +435,7 @@ static SYMBOL symbols[] = { { "RELEASE", SYM(RELEASE_SYM)}, { "RELOAD", SYM(RELOAD)}, { "RENAME", SYM(RENAME)}, - { "REORGANISE", SYM(REORGANISE_SYM)}, + { "REORGANIZE", SYM(REORGANIZE_SYM)}, { "REPAIR", SYM(REPAIR)}, { "REPEATABLE", SYM(REPEATABLE_SYM)}, { "REPLACE", SYM(REPLACE)}, diff --git a/sql/lock.cc b/sql/lock.cc index 8e24c56799d..5f1141cc841 100644 --- a/sql/lock.cc +++ b/sql/lock.cc @@ -351,9 +351,25 @@ void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table) } } +/* Downgrade all locks on a table to new WRITE level from WRITE_ONLY */ + +void mysql_lock_downgrade_write(THD *thd, TABLE *table, + thr_lock_type new_lock_type) +{ + MYSQL_LOCK *locked; + TABLE *write_lock_used; + if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used))) + { + for (uint i=0; i < locked->lock_count; i++) + thr_downgrade_write_lock(locked->locks[i], new_lock_type); + my_free((gptr) locked,MYF(0)); + } +} + + /* abort all other threads waiting to get lock in table */ -void mysql_lock_abort(THD *thd, TABLE *table) +void mysql_lock_abort(THD *thd, TABLE *table, bool upgrade_lock) { MYSQL_LOCK *locked; TABLE *write_lock_used; @@ -362,7 +378,7 @@ void mysql_lock_abort(THD *thd, TABLE *table) if ((locked = get_lock_data(thd,&table,1,1,&write_lock_used))) { for (uint i=0; i < locked->lock_count; i++) - thr_abort_locks(locked->locks[i]->lock); + thr_abort_locks(locked->locks[i]->lock, upgrade_lock); my_free((gptr) locked,MYF(0)); } DBUG_VOID_RETURN; @@ -598,18 +614,15 @@ static MYSQL_LOCK *get_lock_data(THD *thd, TABLE **table_ptr, uint count, lock_count++; } /* - To be able to open and lock for reading system tables like 'mysql.proc', - when we already have some tables opened and locked, and avoid deadlocks - we have to disallow write-locking of these tables with any other tables. + Check if we can lock the table. For some tables we cannot do that + beacause of handler-specific locking issues. */ - if (table_ptr[i]->s->system_table && - table_ptr[i]->reginfo.lock_type >= TL_WRITE_ALLOW_WRITE && - count != 1) - { - my_error(ER_WRONG_LOCK_OF_SYSTEM_TABLE, MYF(0), table_ptr[i]->s->db.str, - table_ptr[i]->s->table_name.str); - DBUG_RETURN(0); - } + if (!table_ptr[i]-> file-> + check_if_locking_is_allowed(thd->lex->sql_command, thd->lex->type, + table_ptr[i], count, + (thd == logger.get_general_log_thd()) || + (thd == logger.get_slow_log_thd()))) + return 0; } if (!(sql_lock= (MYSQL_LOCK*) diff --git a/sql/log.cc b/sql/log.cc index 7232d3a24dd..b2f7eb582a7 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -34,7 +34,17 @@ #include "message.h" #endif -MYSQL_LOG mysql_log, mysql_slow_log, mysql_bin_log; +/* max size of the log message */ +#define MAX_LOG_BUFFER_SIZE 1024 +#define MAX_USER_HOST_SIZE 512 +#define MAX_TIME_SIZE 32 + +/* we need this for log files intialization */ +extern char *opt_logname, *opt_slow_logname; + +LOGGER logger; + +MYSQL_LOG mysql_bin_log; ulong sync_binlog_counter= 0; static bool test_if_number(const char *str, @@ -88,11 +98,893 @@ handlerton binlog_hton = { NULL, /* Start Consistent Snapshot */ NULL, /* Flush logs */ NULL, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ NULL, /* Alter Tablespace */ HTON_NOT_USER_SELECTABLE | HTON_HIDDEN }; + +/* + Open log table of a given type (general or slow log) + + SYNOPSIS + open_log_table() + + log_type type of the log table to open: QUERY_LOG_GENERAL + or QUERY_LOG_SLOW + + DESCRIPTION + + The function opens a log table and marks it as such. Log tables are open + during the whole time, while server is running. Except for the moments + when they have to be reopened: during FLUSH LOGS and TRUNCATE. This + function is invoked directly only once during startup. All subsequent + calls happen through reopen_log_table(), which performs additional check. + + RETURN + FALSE - OK + TRUE - error occured +*/ + +bool Log_to_csv_event_handler::open_log_table(uint log_type) +{ + THD *log_thd, *curr= current_thd; + TABLE_LIST *table; + bool error= FALSE; + DBUG_ENTER("open_log_table"); + + switch (log_type) { + case QUERY_LOG_GENERAL: + log_thd= general_log_thd; + table= &general_log; + /* clean up table before reuse/initial usage */ + bzero((char*) table, sizeof(TABLE_LIST)); + table->alias= table->table_name= (char*) "general_log"; + table->table_name_length= 11; + break; + case QUERY_LOG_SLOW: + log_thd= slow_log_thd; + table= &slow_log; + bzero((char*) table, sizeof(TABLE_LIST)); + table->alias= table->table_name= (char*) "slow_log"; + table->table_name_length= 8; + break; + default: + DBUG_ASSERT(0); + } + + /* + This way we check that appropriate log thd was created ok during + initialization. We cannot check "is_log_tables_initialized" var, as + the very initialization is not finished until this function is + completed in the very first time. + */ + if (!log_thd) + { + DBUG_PRINT("error",("Cannot initialize log tables")); + DBUG_RETURN(TRUE); + } + + /* + Set THD's thread_stack. This is needed to perform stack overrun + check, which is done by some routines (e.g. open_table()). + In the case we are called by thread, which already has this parameter + set, we use this value. Otherwise we do a wild guess. This won't help + to correctly track the stack overrun in these exceptional cases (which + could probably happen only during startup and shutdown) but at least + lets us to pass asserts. + The problem stems from the fact that logger THDs are not real threads. + */ + if (curr) + log_thd->thread_stack= curr->thread_stack; + else + log_thd->thread_stack= (char*) &log_thd; + + log_thd->store_globals(); + + table->lock_type= TL_WRITE_CONCURRENT_INSERT; + table->db= log_thd->db; + table->db_length= log_thd->db_length; + + if (simple_open_n_lock_tables(log_thd, table) || + table->table->file->extra(HA_EXTRA_MARK_AS_LOG_TABLE) || + table->table->file->ha_rnd_init(0)) + error= TRUE; + else + table->table->locked_by_logger= TRUE; + + /* restore thread settings */ + if (curr) + curr->store_globals(); + else + { + my_pthread_setspecific_ptr(THR_THD, 0); + my_pthread_setspecific_ptr(THR_MALLOC, 0); + } + + DBUG_RETURN(error); +} + + +Log_to_csv_event_handler::Log_to_csv_event_handler() +{ + /* init artificial THD's */ + general_log_thd= new THD; + /* logger thread always works with mysql database */ + general_log_thd->db= my_strdup("mysql", MYF(0)); + general_log_thd->db_length= 5; + + slow_log_thd= new THD; + /* logger thread always works with mysql database */ + slow_log_thd->db= my_strdup("mysql", MYF(0));; + slow_log_thd->db_length= 5; +} + + +Log_to_csv_event_handler::~Log_to_csv_event_handler() +{ + /* now cleanup the tables */ + if (general_log_thd) + { + delete general_log_thd; + general_log_thd= NULL; + } + + if (slow_log_thd) + { + delete slow_log_thd; + slow_log_thd= NULL; + } +} + + +/* + Reopen log table of a given type + + SYNOPSIS + reopen_log_table() + + log_type type of the log table to open: QUERY_LOG_GENERAL + or QUERY_LOG_SLOW + + DESCRIPTION + + The function is a wrapper around open_log_table(). It is used during + FLUSH LOGS and TRUNCATE of the log tables (i.e. when we need to close + and reopen them). The difference is in the check of the + logger.is_log_tables_initialized var, which can't be done in + open_log_table(), as it makes no sense during startup. + + NOTE: this code assumes that we have logger mutex locked + + RETURN + FALSE - ok + TRUE - open_log_table() returned an error +*/ + +bool Log_to_csv_event_handler::reopen_log_table(uint log_type) +{ + /* don't open the log table, if it wasn't enabled during startup */ + if (!logger.is_log_tables_initialized) + return FALSE; + return open_log_table(log_type); +} + +void Log_to_csv_event_handler::cleanup() +{ + close_log_table(QUERY_LOG_GENERAL, FALSE); + close_log_table(QUERY_LOG_SLOW, FALSE); + logger.is_log_tables_initialized= FALSE; +} + +/* log event handlers */ + +/* + Log command to the general log table + + SYNOPSIS + log_general_to_csv() + + event_time command start timestamp + user_host the pointer to the string with user@host info + user_host_len length of the user_host string. this is computed once + and passed to all general log event handlers + thread_id Id of the thread, issued a query + command_type the type of the command being logged + command_type_len the length of the string above + sql_text the very text of the query being executed + sql_text_len the length of sql_text string + + DESCRIPTION + + Log given command to the general log table + + RETURN + FALSE - OK + TRUE - error occured +*/ + +bool Log_to_csv_event_handler:: + log_general(time_t event_time, const char *user_host, + uint user_host_len, int thread_id, + const char *command_type, uint command_type_len, + const char *sql_text, uint sql_text_len) +{ + TABLE *table= general_log.table; + + /* below should never happen */ + if (unlikely(!logger.is_log_tables_initialized)) + return FALSE; + + /* log table entries are not replicated at the moment */ + tmp_disable_binlog(current_thd); + + general_log_thd->start_time= event_time; + /* set default value (which is CURRENT_TIMESTAMP) */ + table->field[0]->set_null(); + + table->field[1]->store(user_host, user_host_len, &my_charset_latin1); + table->field[2]->store((longlong) thread_id); + table->field[3]->store((longlong) server_id); + table->field[4]->store(command_type, command_type_len, &my_charset_latin1); + table->field[5]->store(sql_text, sql_text_len, &my_charset_latin1); + table->file->ha_write_row(table->record[0]); + + reenable_binlog(current_thd); + + return FALSE; +} + + +/* + Log a query to the slow log table + + SYNOPSIS + log_slow_to_csv() + thd THD of the query + current_time current timestamp + query_start_arg command start timestamp + user_host the pointer to the string with user@host info + user_host_len length of the user_host string. this is computed once + and passed to all general log event handlers + query_time Amount of time the query took to execute (in seconds) + lock_time Amount of time the query was locked (in seconds) + is_command The flag, which determines, whether the sql_text is a + query or an administrator command (these are treated + differently by the old logging routines) + sql_text the very text of the query or administrator command + processed + sql_text_len the length of sql_text string + + DESCRIPTION + + Log a query to the slow log table + + RETURN + FALSE - OK + TRUE - error occured +*/ + +bool Log_to_csv_event_handler:: + log_slow(THD *thd, time_t current_time, time_t query_start_arg, + const char *user_host, uint user_host_len, + longlong query_time, longlong lock_time, bool is_command, + const char *sql_text, uint sql_text_len) +{ + /* table variables */ + TABLE *table= slow_log.table; + + DBUG_ENTER("log_slow_to_csv"); + + /* below should never happen */ + if (unlikely(!logger.is_log_tables_initialized)) + return FALSE; + + /* log table entries are not replicated at the moment */ + tmp_disable_binlog(current_thd); + + /* + Set start time for CURRENT_TIMESTAMP to the start of the query. + This will be default value for the field + */ + slow_log_thd->start_time= query_start_arg; + + /* set default value (which is CURRENT_TIMESTAMP) */ + table->field[0]->set_null(); + + /* store the value */ + table->field[1]->store(user_host, user_host_len, &my_charset_latin1); + + if (query_start_arg) + { + /* fill in query_time field */ + table->field[2]->store(query_time); + /* lock_time */ + table->field[3]->store(lock_time); + /* rows_sent */ + table->field[4]->store((longlong) thd->sent_row_count); + /* rows_examined */ + table->field[5]->store((longlong) thd->examined_row_count); + } + else + { + table->field[2]->set_null(); + table->field[3]->set_null(); + table->field[4]->set_null(); + table->field[5]->set_null(); + } + + if (thd->db) + /* fill database field */ + table->field[6]->store(thd->db, thd->db_length, &my_charset_latin1); + else + table->field[6]->set_null(); + + if (thd->last_insert_id_used) + table->field[7]->store((longlong) thd->current_insert_id); + else + table->field[7]->set_null(); + + /* set value if we do an insert on autoincrement column */ + if (thd->insert_id_used) + table->field[8]->store((longlong) thd->last_insert_id); + else + table->field[8]->set_null(); + + table->field[9]->store((longlong) server_id); + + /* sql_text */ + table->field[10]->store(sql_text,sql_text_len, + &my_charset_latin1); + + /* write the row */ + table->file->ha_write_row(table->record[0]); + + reenable_binlog(current_thd); + + DBUG_RETURN(0); +} + +bool Log_to_csv_event_handler:: + log_error(enum loglevel level, const char *format, va_list args) +{ + /* No log table is implemented */ + DBUG_ASSERT(0); + return FALSE; +} + +bool Log_to_file_event_handler:: + log_error(enum loglevel level, const char *format, + va_list args) +{ + return vprint_msg_to_log(level, format, args); +} + +void Log_to_file_event_handler::init_pthread_objects() +{ + mysql_log.init_pthread_objects(); + mysql_slow_log.init_pthread_objects(); +} + + +/* Wrapper around MYSQL_LOG::write() for slow log */ + +bool Log_to_file_event_handler:: + log_slow(THD *thd, time_t current_time, time_t query_start_arg, + const char *user_host, uint user_host_len, + longlong query_time, longlong lock_time, bool is_command, + const char *sql_text, uint sql_text_len) +{ + return mysql_slow_log.write(thd, current_time, query_start_arg, + user_host, user_host_len, + query_time, lock_time, is_command, + sql_text, sql_text_len); +} + + +/* + Wrapper around MYSQL_LOG::write() for general log. We need it since we + want all log event handlers to have the same signature. +*/ + +bool Log_to_file_event_handler:: + log_general(time_t event_time, const char *user_host, + uint user_host_len, int thread_id, + const char *command_type, uint command_type_len, + const char *sql_text, uint sql_text_len) +{ + return mysql_log.write(event_time, user_host, user_host_len, + thread_id, command_type, command_type_len, + sql_text, sql_text_len); +} + + +bool Log_to_file_event_handler::init() +{ + if (!is_initialized) + { + if (opt_slow_log) + mysql_slow_log.open_slow_log(opt_slow_logname); + + if (opt_log) + mysql_log.open_query_log(opt_logname); + + is_initialized= TRUE; + } + + return FALSE; +} + + +void Log_to_file_event_handler::cleanup() +{ + mysql_log.cleanup(); + mysql_slow_log.cleanup(); +} + +void Log_to_file_event_handler::flush() +{ + /* reopen log files */ + mysql_log.new_file(1); + mysql_slow_log.new_file(1); +} + +/* + Log error with all enabled log event handlers + + SYNOPSIS + error_log_print() + + level The level of the error significance: NOTE, + WARNING or ERROR. + format format string for the error message + args list of arguments for the format string + + RETURN + FALSE - OK + TRUE - error occured +*/ + +bool LOGGER::error_log_print(enum loglevel level, const char *format, + va_list args) +{ + bool error= FALSE; + Log_event_handler **current_handler= error_log_handler_list; + + /* currently we don't need locking here as there is no error_log table */ + while (*current_handler) + error= (*current_handler++)->log_error(level, format, args) || error; + + return error; +} + + +void LOGGER::cleanup() +{ + DBUG_ASSERT(inited == 1); + (void) pthread_mutex_destroy(&LOCK_logger); + if (table_log_handler) + table_log_handler->cleanup(); + if (file_log_handler) + file_log_handler->cleanup(); +} + + +void LOGGER::close_log_table(uint log_type, bool lock_in_use) +{ + table_log_handler->close_log_table(log_type, lock_in_use); +} + + +/* + Perform basic log initialization: create file-based log handler and + init error log. +*/ +void LOGGER::init_base() +{ + DBUG_ASSERT(inited == 0); + inited= 1; + + /* + Here we create file log handler. We don't do it for the table log handler + here as it cannot be created so early. The reason is THD initialization, + which depends on the system variables (parsed later). + */ + if (!file_log_handler) + file_log_handler= new Log_to_file_event_handler; + + /* by default we use traditional error log */ + init_error_log(LEGACY); + + file_log_handler->init_pthread_objects(); + (void) pthread_mutex_init(&LOCK_logger, MY_MUTEX_INIT_SLOW); +} + + +void LOGGER::init_log_tables() +{ + if (!table_log_handler) + table_log_handler= new Log_to_csv_event_handler; + + if (!is_log_tables_initialized && + !table_log_handler->init() && !file_log_handler->init()) + is_log_tables_initialized= TRUE; +} + + +bool LOGGER::reopen_log_table(uint log_type) +{ + return table_log_handler->reopen_log_table(log_type); +} + + +bool LOGGER::flush_logs(THD *thd) +{ + TABLE_LIST close_slow_log, close_general_log; + + /* reopen log tables */ + bzero((char*) &close_slow_log, sizeof(TABLE_LIST)); + close_slow_log.alias= close_slow_log.table_name=(char*) "slow_log"; + close_slow_log.table_name_length= 8; + close_slow_log.db= (char*) "mysql"; + close_slow_log.db_length= 5; + + bzero((char*) &close_general_log, sizeof(TABLE_LIST)); + close_general_log.alias= close_general_log.table_name=(char*) "general_log"; + close_general_log.table_name_length= 11; + close_general_log.db= (char*) "mysql"; + close_general_log.db_length= 5; + + /* reopen log files */ + file_log_handler->flush(); + + /* + this will lock and wait for all but the logger thread to release the + tables. Then we could reopen log tables. Then release the name locks. + */ + lock_and_wait_for_table_name(thd, &close_slow_log); + lock_and_wait_for_table_name(thd, &close_general_log); + + /* deny others from logging to general and slow log, while reopening tables */ + logger.lock(); + + table_log_handler->flush(thd, &close_slow_log, &close_general_log); + + /* end of log tables flush */ + logger.unlock(); + return FALSE; +} + + +/* + Log slow query with all enabled log event handlers + + SYNOPSIS + slow_log_print() + + thd THD of the query being logged + query The query being logged + query_length The length of the query string + query_start_arg Query start timestamp + + RETURN + FALSE - OK + TRUE - error occured +*/ + +bool LOGGER::slow_log_print(THD *thd, const char *query, uint query_length, + time_t query_start_arg) +{ + bool error= FALSE; + Log_event_handler **current_handler= slow_log_handler_list; + bool is_command= FALSE; + + char message_buff[MAX_LOG_BUFFER_SIZE]; + char user_host_buff[MAX_USER_HOST_SIZE]; + + my_time_t current_time; + Security_context *sctx= thd->security_ctx; + uint message_buff_len= 0, user_host_len= 0; + longlong query_time= 0, lock_time= 0; + longlong last_insert_id= 0, insert_id= 0; + + /* + Print the message to the buffer if we have slow log enabled + */ + + if (*slow_log_handler_list) + { + current_time= time(NULL); + + if (!(thd->options & OPTION_UPDATE_LOG)) + return 0; + + lock(); + + /* fill in user_host value: the format is "%s[%s] @ %s [%s]" */ + user_host_len= strxnmov(user_host_buff, MAX_USER_HOST_SIZE, + sctx->priv_user ? sctx->priv_user : "", "[", + sctx->user ? sctx->user : "", "] @ ", + sctx->host ? sctx->host : "", " [", + sctx->ip ? sctx->ip : "", "]", NullS) - + user_host_buff; + + if (query_start_arg) + { + query_time= (longlong) (current_time - query_start_arg); + lock_time= (longlong) (thd->time_after_lock - query_start_arg); + } + + if (thd->last_insert_id_used) + last_insert_id= (longlong) thd->current_insert_id; + + /* set value if we do an insert on autoincrement column */ + if (thd->insert_id_used) + insert_id= (longlong) thd->last_insert_id; + + if (!query) + { + is_command= TRUE; + query= command_name[thd->command].str; + query_length= command_name[thd->command].length; + } + + while (*current_handler) + error= (*current_handler++)->log_slow(thd, current_time, query_start_arg, + user_host_buff, user_host_len, + query_time, lock_time, is_command, + query, query_length) || error; + + unlock(); + } + return error; +} + +bool LOGGER::general_log_print(THD *thd, enum enum_server_command command, + const char *format, va_list args) +{ + bool error= FALSE; + Log_event_handler **current_handler= general_log_handler_list; + + /* + Print the message to the buffer if we have at least one log event handler + enabled and want to log this king of commands + */ + if (*general_log_handler_list && (what_to_log & (1L << (uint) command))) + { + char message_buff[MAX_LOG_BUFFER_SIZE]; + char user_host_buff[MAX_USER_HOST_SIZE]; + Security_context *sctx= thd->security_ctx; + ulong id; + uint message_buff_len= 0, user_host_len= 0; + + if (thd) + { /* Normal thread */ + if ((thd->options & OPTION_LOG_OFF) +#ifndef NO_EMBEDDED_ACCESS_CHECKS + && (sctx->master_access & SUPER_ACL) +#endif + ) + { + return 0; /* No logging */ + } + id= thd->thread_id; + } + else + id=0; /* Log from connect handler */ + + lock(); + time_t current_time= time(NULL); + + user_host_len= strxnmov(user_host_buff, MAX_USER_HOST_SIZE, + sctx->priv_user ? sctx->priv_user : "", "[", + sctx->user ? sctx->user : "", "] @ ", + sctx->host ? sctx->host : "", " [", + sctx->ip ? sctx->ip : "", "]", NullS) - + user_host_buff; + + /* prepare message */ + if (format) + message_buff_len= my_vsnprintf(message_buff, + sizeof(message_buff), format, args); + else + message_buff[0]= '\0'; + + while (*current_handler) + error+= (*current_handler++)-> + log_general(current_time, user_host_buff, + user_host_len, id, + command_name[(uint) command].str, + command_name[(uint) command].length, + message_buff, message_buff_len) || error; + unlock(); + } + return error; +} + +void LOGGER::init_error_log(enum enum_printer error_log_printer) +{ + switch (error_log_printer) { + case NONE: + error_log_handler_list[0]= 0; + break; + case LEGACY: + error_log_handler_list[0]= file_log_handler; + error_log_handler_list[1]= 0; + break; + /* these two are disabled for now */ + case CSV: + DBUG_ASSERT(0); + break; + case LEGACY_AND_CSV: + DBUG_ASSERT(0); + break; + } +} + +void LOGGER::init_slow_log(enum enum_printer slow_log_printer) +{ + switch (slow_log_printer) { + case NONE: + slow_log_handler_list[0]= 0; + break; + case LEGACY: + slow_log_handler_list[0]= file_log_handler; + slow_log_handler_list[1]= 0; + break; + case CSV: + slow_log_handler_list[0]= table_log_handler; + slow_log_handler_list[1]= 0; + break; + case LEGACY_AND_CSV: + slow_log_handler_list[0]= file_log_handler; + slow_log_handler_list[1]= table_log_handler; + slow_log_handler_list[2]= 0; + break; + } +} + +void LOGGER::init_general_log(enum enum_printer general_log_printer) +{ + switch (general_log_printer) { + case NONE: + general_log_handler_list[0]= 0; + break; + case LEGACY: + general_log_handler_list[0]= file_log_handler; + general_log_handler_list[1]= 0; + break; + case CSV: + general_log_handler_list[0]= table_log_handler; + general_log_handler_list[1]= 0; + break; + case LEGACY_AND_CSV: + general_log_handler_list[0]= file_log_handler; + general_log_handler_list[1]= table_log_handler; + general_log_handler_list[2]= 0; + break; + } +} + + +bool Log_to_csv_event_handler::flush(THD *thd, TABLE_LIST *close_slow_log, + TABLE_LIST *close_general_log) +{ + VOID(pthread_mutex_lock(&LOCK_open)); + close_log_table(QUERY_LOG_GENERAL, TRUE); + close_log_table(QUERY_LOG_SLOW, TRUE); + close_general_log->next_local= close_slow_log; + query_cache_invalidate3(thd, close_general_log, 0); + unlock_table_name(thd, close_slow_log); + unlock_table_name(thd, close_general_log); + VOID(pthread_mutex_unlock(&LOCK_open)); + return reopen_log_table(QUERY_LOG_SLOW) || + reopen_log_table(QUERY_LOG_GENERAL); +} + +/* the parameters are unused for the log tables */ +bool Log_to_csv_event_handler::init() +{ + /* we always open log tables. even if the logging is disabled */ + return (open_log_table(QUERY_LOG_GENERAL) || open_log_table(QUERY_LOG_SLOW)); +} + +int LOGGER::set_handlers(enum enum_printer error_log_printer, + enum enum_printer slow_log_printer, + enum enum_printer general_log_printer) +{ + /* error log table is not supported yet */ + DBUG_ASSERT(error_log_printer < CSV); + + lock(); + + if ((slow_log_printer >= CSV || general_log_printer >= CSV) && + !is_log_tables_initialized) + { + slow_log_printer= LEGACY; + general_log_printer= LEGACY; + + sql_print_error("Failed to initialize log tables. " + "Falling back to the old-fashioned logs"); + } + + init_error_log(error_log_printer); + init_slow_log(slow_log_printer); + init_general_log(general_log_printer); + + unlock(); + + return 0; +} + + +/* + Close log table of a given type (general or slow log) + + SYNOPSIS + close_log_table() + + log_type type of the log table to close: QUERY_LOG_GENERAL + or QUERY_LOG_SLOW + lock_in_use Set to TRUE if the caller owns LOCK_open. FALSE otherwise. + + DESCRIPTION + + The function closes a log table. It is invoked (1) when we need to reopen + log tables (e.g. FLUSH LOGS or TRUNCATE on the log table is being + executed) or (2) during shutdown. +*/ + +void Log_to_csv_event_handler:: + close_log_table(uint log_type, bool lock_in_use) +{ + THD *log_thd, *curr= current_thd; + TABLE_LIST *table; + + if (!logger.is_log_tables_initialized) + return; /* do nothing */ + + switch (log_type) { + case QUERY_LOG_GENERAL: + log_thd= general_log_thd; + table= &general_log; + break; + case QUERY_LOG_SLOW: + log_thd= slow_log_thd; + table= &slow_log; + break; + default: + DBUG_ASSERT(0); + } + + /* + Set thread stack start for the logger thread. See comment in + open_log_table() for details. + */ + if (curr) + log_thd->thread_stack= curr->thread_stack; + else + log_thd->thread_stack= (char*) &log_thd; + + /* close the table */ + log_thd->store_globals(); + table->table->file->ha_rnd_end(); + /* discard logger mark before unlock*/ + table->table->locked_by_logger= FALSE; + close_thread_tables(log_thd, lock_in_use); + + if (curr) + curr->store_globals(); + else + { + my_pthread_setspecific_ptr(THR_THD, 0); + my_pthread_setspecific_ptr(THR_MALLOC, 0); + } +} + + /* this function is mostly a placeholder. conceptually, binlog initialization (now mostly done in MYSQL_LOG::open) @@ -1525,95 +2417,97 @@ err: /* - Write to normal (not rotable) log - This is the format for the 'normal' log. + Write a command to traditional general log file + + SYNOPSIS + write() + + event_time command start timestamp + user_host the pointer to the string with user@host info + user_host_len length of the user_host string. this is computed once + and passed to all general log event handlers + thread_id Id of the thread, issued a query + command_type the type of the command being logged + command_type_len the length of the string above + sql_text the very text of the query being executed + sql_text_len the length of sql_text string + + DESCRIPTION + + Log given command to to normal (not rotable) log file + + RETURN + FASE - OK + TRUE - error occured */ -bool MYSQL_LOG::write(THD *thd,enum enum_server_command command, - const char *format,...) +bool MYSQL_LOG::write(time_t event_time, const char *user_host, + uint user_host_len, int thread_id, + const char *command_type, uint command_type_len, + const char *sql_text, uint sql_text_len) { - if (is_open() && (what_to_log & (1L << (uint) command))) + char buff[32]; + uint length= 0; + char time_buff[MAX_TIME_SIZE]; + struct tm start; + uint time_buff_len= 0; + + /* Test if someone closed between the is_open test and lock */ + if (is_open()) { - uint length; - int error= 0; - VOID(pthread_mutex_lock(&LOCK_log)); + /* Note that my_b_write() assumes it knows the length for this */ + if (event_time != last_time) + { + last_time= event_time; - /* Test if someone closed between the is_open test and lock */ - if (is_open()) - { - time_t skr; - ulong id; - va_list args; - va_start(args,format); - char buff[32]; + localtime_r(&event_time, &start); - if (thd) - { // Normal thread - if ((thd->options & OPTION_LOG_OFF) -#ifndef NO_EMBEDDED_ACCESS_CHECKS - && (thd->security_ctx->master_access & SUPER_ACL) -#endif -) - { - VOID(pthread_mutex_unlock(&LOCK_log)); - return 0; // No logging - } - id=thd->thread_id; - if (thd->user_time || !(skr=thd->query_start())) - skr=time(NULL); // Connected + time_buff_len= my_snprintf(time_buff, MAX_TIME_SIZE, + "%02d%02d%02d %2d:%02d:%02d", + start.tm_year % 100, start.tm_mon + 1, + start.tm_mday, start.tm_hour, + start.tm_min, start.tm_sec); + + if (my_b_write(&log_file, (byte*) &time_buff, time_buff_len)) + goto err; } else - { // Log from connect handler - skr=time(NULL); - id=0; - } - if (skr != last_time) - { - last_time=skr; - struct tm tm_tmp; - struct tm *start; - localtime_r(&skr,&tm_tmp); - start=&tm_tmp; - /* Note that my_b_write() assumes it knows the length for this */ - sprintf(buff,"%02d%02d%02d %2d:%02d:%02d\t", - start->tm_year % 100, - start->tm_mon+1, - start->tm_mday, - start->tm_hour, - start->tm_min, - start->tm_sec); - if (my_b_write(&log_file, (byte*) buff,16)) - error=errno; - } - else if (my_b_write(&log_file, (byte*) "\t\t",2) < 0) - error=errno; - length=my_sprintf(buff, - (buff, "%7ld %-11.11s", id, - command_name[(uint) command])); - if (my_b_write(&log_file, (byte*) buff,length)) - error=errno; - if (format) - { - if (my_b_write(&log_file, (byte*) " ",1) || - my_b_vprintf(&log_file,format,args) == (uint) -1) - error=errno; - } - if (my_b_write(&log_file, (byte*) "\n",1) || - flush_io_cache(&log_file)) - error=errno; - if (error && ! write_error) - { - write_error=1; - sql_print_error(ER(ER_ERROR_ON_WRITE),name,error); - } - va_end(args); - } - VOID(pthread_mutex_unlock(&LOCK_log)); - return error != 0; + if (my_b_write(&log_file, (byte*) "\t\t" ,2) < 0) + goto err; + + /* command_type, thread_id */ + length= my_snprintf(buff, 32, "%5ld ", thread_id); + + if (my_b_write(&log_file, (byte*) buff, length)) + goto err; + + if (my_b_write(&log_file, (byte*) command_type, command_type_len)) + goto err; + + if (my_b_write(&log_file, (byte*) "\t", 1)) + goto err; + + /* sql_text */ + if (my_b_write(&log_file, (byte*) sql_text, sql_text_len)) + goto err; + + if (my_b_write(&log_file, (byte*) "\n", 1) || + flush_io_cache(&log_file)) + goto err; } - return 0; + + return FALSE; +err: + + if (!write_error) + { + write_error= 1; + sql_print_error(ER(ER_ERROR_ON_WRITE), name, errno); + } + return TRUE; } + bool MYSQL_LOG::flush_and_sync() { int err=0, fd=log_file.file; @@ -2001,6 +2895,34 @@ err: DBUG_RETURN(error); } + +int error_log_print(enum loglevel level, const char *format, + va_list args) +{ + return logger.error_log_print(level, format, args); +} + + +bool slow_log_print(THD *thd, const char *query, uint query_length, + time_t query_start_arg) +{ + return logger.slow_log_print(thd, query, query_length, query_start_arg); +} + + +bool general_log_print(THD *thd, enum enum_server_command command, + const char *format, ...) +{ + va_list args; + uint error= 0; + + va_start(args, format); + error= logger.general_log_print(thd, command, format, args); + va_end(args); + + return error; +} + void MYSQL_LOG::rotate_and_purge(uint flags) { if (!(flags & RP_LOCK_LOG_IS_ALREADY_LOCKED)) @@ -2145,71 +3067,86 @@ err: /* - Write to the slow query log. + Log a query to the traditional slow log file + + SYNOPSIS + write() + + thd THD of the query + current_time current timestamp + query_start_arg command start timestamp + user_host the pointer to the string with user@host info + user_host_len length of the user_host string. this is computed once + and passed to all general log event handlers + query_time Amount of time the query took to execute (in seconds) + lock_time Amount of time the query was locked (in seconds) + is_command The flag, which determines, whether the sql_text is a + query or an administrator command. + sql_text the very text of the query or administrator command + processed + sql_text_len the length of sql_text string + + DESCRIPTION + + Log a query to the slow log file. + + RETURN + FALSE - OK + TRUE - error occured */ -bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, - time_t query_start_arg) +bool MYSQL_LOG::write(THD *thd, time_t current_time, time_t query_start_arg, + const char *user_host, uint user_host_len, + longlong query_time, longlong lock_time, bool is_command, + const char *sql_text, uint sql_text_len) { - bool error=0; - time_t current_time; - if (!is_open()) - return 0; + bool error= 0; DBUG_ENTER("MYSQL_LOG::write"); - VOID(pthread_mutex_lock(&LOCK_log)); + if (!is_open()) + DBUG_RETURN(0); + if (is_open()) { // Safety agains reopen - int tmp_errno=0; - char buff[80],*end; - end=buff; - if (!(thd->options & OPTION_UPDATE_LOG)) - { - VOID(pthread_mutex_unlock(&LOCK_log)); - DBUG_RETURN(0); - } - if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT) || query_start_arg) + int tmp_errno= 0; + char buff[80], *end; + uint buff_len; + end= buff; + + if (!(specialflag & SPECIAL_SHORT_LOG_FORMAT)) { Security_context *sctx= thd->security_ctx; - current_time=time(NULL); if (current_time != last_time) { - last_time=current_time; - struct tm tm_tmp; - struct tm *start; - localtime_r(¤t_time,&tm_tmp); - start=&tm_tmp; + last_time= current_time; + struct tm start; + localtime_r(¤t_time, &start); + + buff_len= my_snprintf(buff, sizeof buff, + "# Time: %02d%02d%02d %2d:%02d:%02d\n", + start.tm_year % 100, start.tm_mon + 1, + start.tm_mday, start.tm_hour, + start.tm_min, start.tm_sec); + /* Note that my_b_write() assumes it knows the length for this */ - sprintf(buff,"# Time: %02d%02d%02d %2d:%02d:%02d\n", - start->tm_year % 100, - start->tm_mon+1, - start->tm_mday, - start->tm_hour, - start->tm_min, - start->tm_sec); - if (my_b_write(&log_file, (byte*) buff,24)) + if (my_b_write(&log_file, (byte*) buff, buff_len)) tmp_errno=errno; } - if (my_b_printf(&log_file, "# User@Host: %s[%s] @ %s [%s]\n", - sctx->priv_user ? - sctx->priv_user : "", - sctx->user ? sctx->user : "", - sctx->host ? sctx->host : "", - sctx->ip ? sctx->ip : "") == - (uint) -1) - tmp_errno=errno; - } - if (query_start_arg) - { - /* For slow query log */ - if (my_b_printf(&log_file, - "# Query_time: %lu Lock_time: %lu Rows_sent: %lu Rows_examined: %lu\n", - (ulong) (current_time - query_start_arg), - (ulong) (thd->time_after_lock - query_start_arg), - (ulong) thd->sent_row_count, - (ulong) thd->examined_row_count) == (uint) -1) + if (my_b_printf(&log_file, "# User@Host: ", sizeof("# User@Host: ") - 1)) + tmp_errno=errno; + if (my_b_printf(&log_file, user_host, user_host_len)) + tmp_errno=errno; + if (my_b_write(&log_file, (byte*) "\n", 1)) tmp_errno=errno; } + /* For slow query log */ + if (my_b_printf(&log_file, + "# Query_time: %lu Lock_time: %lu" + " Rows_sent: %lu Rows_examined: %lu\n", + (ulong) query_time, (ulong) lock_time, + (ulong) thd->sent_row_count, + (ulong) thd->examined_row_count) == (uint) -1) + tmp_errno=errno; if (thd->db && strcmp(thd->db,db)) { // Database changed if (my_b_printf(&log_file,"use %s;\n",thd->db) == (uint) -1) @@ -2230,15 +3167,15 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, end=longlong10_to_str((longlong) thd->last_insert_id,end,-10); } } - if (thd->query_start_used) - { - if (query_start_arg != thd->query_start()) - { - query_start_arg=thd->query_start(); - end=strmov(end,",timestamp="); - end=int10_to_str((long) query_start_arg,end,10); - } - } + + /* + This info used to show up randomly, depending on whether the query + checked the query start time or not. now we always write current + timestamp to the slow log + */ + end= strmov(end, ",timestamp="); + end= int10_to_str((long) current_time, end, 10); + if (end != buff) { *end++=';'; @@ -2247,14 +3184,13 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, my_b_write(&log_file, (byte*) buff+1,(uint) (end-buff))) tmp_errno=errno; } - if (!query) + if (is_command) { - end=strxmov(buff, "# administrator command: ", - command_name[thd->command], NullS); - query_length=(ulong) (end-buff); - query=buff; + end= strxmov(buff, "# administrator command: ", NullS); + buff_len= (ulong) (end - buff); + my_b_write(&log_file, (byte*) buff, buff_len); } - if (my_b_write(&log_file, (byte*) query,query_length) || + if (my_b_write(&log_file, (byte*) sql_text, sql_text_len) || my_b_write(&log_file, (byte*) ";\n",2) || flush_io_cache(&log_file)) tmp_errno=errno; @@ -2268,7 +3204,6 @@ bool MYSQL_LOG::write(THD *thd,const char *query, uint query_length, } } } - VOID(pthread_mutex_unlock(&LOCK_log)); DBUG_RETURN(error); } @@ -2461,6 +3396,7 @@ void print_buffer_to_file(enum loglevel level, const char *buffer) skr=time(NULL); localtime_r(&skr, &tm_tmp); start=&tm_tmp; + fprintf(stderr, "%02d%02d%02d %2d:%02d:%02d [%s] %s\n", start->tm_year % 100, start->tm_mon+1, @@ -2647,23 +3583,26 @@ void print_buffer_to_nt_eventlog(enum loglevel level, char *buff, to other functions to write that message to other logging sources. RETURN VALUES - void + The function always returns 0. The return value is present in the + signature to be compatible with other logging routines, which could + return an error (e.g. logging to the log tables) */ -void vprint_msg_to_log(enum loglevel level, const char *format, va_list args) +int vprint_msg_to_log(enum loglevel level, const char *format, va_list args) { char buff[1024]; uint length; DBUG_ENTER("vprint_msg_to_log"); - length= my_vsnprintf(buff, sizeof(buff)-5, format, args); + /* "- 5" is because of print_buffer_to_nt_eventlog() */ + length= my_vsnprintf(buff, sizeof(buff) - 5, format, args); print_buffer_to_file(level, buff); #ifdef __NT__ print_buffer_to_nt_eventlog(level, buff, length, sizeof(buff)); #endif - DBUG_VOID_RETURN; + DBUG_RETURN(0); } @@ -2673,7 +3612,7 @@ void sql_print_error(const char *format, ...) DBUG_ENTER("sql_print_error"); va_start(args, format); - vprint_msg_to_log(ERROR_LEVEL, format, args); + error_log_print(ERROR_LEVEL, format, args); va_end(args); DBUG_VOID_RETURN; @@ -2686,7 +3625,7 @@ void sql_print_warning(const char *format, ...) DBUG_ENTER("sql_print_warning"); va_start(args, format); - vprint_msg_to_log(WARNING_LEVEL, format, args); + error_log_print(WARNING_LEVEL, format, args); va_end(args); DBUG_VOID_RETURN; @@ -2699,7 +3638,7 @@ void sql_print_information(const char *format, ...) DBUG_ENTER("sql_print_information"); va_start(args, format); - vprint_msg_to_log(INFORMATION_LEVEL, format, args); + error_log_print(INFORMATION_LEVEL, format, args); va_end(args); DBUG_VOID_RETURN; diff --git a/sql/log.h b/sql/log.h index ea2946c2d86..d709a73a391 100644 --- a/sql/log.h +++ b/sql/log.h @@ -132,6 +132,21 @@ typedef struct st_log_info ~st_log_info() { pthread_mutex_destroy(&lock);} } LOG_INFO; +/* + Currently we have only 3 kinds of logging functions: old-fashioned + logs, stdout and csv logging routines. +*/ +#define MAX_LOG_HANDLERS_NUM 3 + +enum enum_printer +{ + NONE, + LEGACY, + CSV, + LEGACY_AND_CSV +}; + + class Log_event; class Rows_log_event; @@ -276,10 +291,18 @@ public: bool open_index_file(const char *index_file_name_arg, const char *log_name); void new_file(bool need_lock); - bool write(THD *thd, enum enum_server_command command, - const char *format,...); - bool write(THD *thd, const char *query, uint query_length, - time_t query_start=0); + /* log a command to the old-fashioned general log */ + bool write(time_t event_time, const char *user_host, + uint user_host_len, int thread_id, + const char *command_type, uint command_type_len, + const char *sql_text, uint sql_text_len); + + /* log a query to the old-fashioned slow query log */ + bool write(THD *thd, time_t current_time, time_t query_start_arg, + const char *user_host, uint user_host_len, + longlong query_time, longlong lock_time, bool is_command, + const char *sql_text, uint sql_text_len); + bool write(Log_event* event_info); // binary log write bool write(THD *thd, IO_CACHE *cache, Log_event *commit_event); @@ -329,4 +352,151 @@ public: inline uint32 get_open_count() { return open_count; } }; +class Log_event_handler +{ +public: + virtual bool init()= 0; + virtual void cleanup()= 0; + + virtual bool log_slow(THD *thd, time_t current_time, + time_t query_start_arg, const char *user_host, + uint user_host_len, longlong query_time, + longlong lock_time, bool is_command, + const char *sql_text, uint sql_text_len)= 0; + virtual bool log_error(enum loglevel level, const char *format, + va_list args)= 0; + virtual bool log_general(time_t event_time, const char *user_host, + uint user_host_len, int thread_id, + const char *command_type, uint command_type_len, + const char *sql_text, uint sql_text_len)= 0; + virtual ~Log_event_handler() {} +}; + + +class Log_to_csv_event_handler: public Log_event_handler +{ + /* + We create artificial THD for each of the logs. This is to avoid + locking issues: we don't want locks on the log tables reside in the + THD's of the query. The reason is the locking order and duration. + */ + THD *general_log_thd, *slow_log_thd; + friend class LOGGER; + TABLE_LIST general_log, slow_log; + +private: + bool open_log_table(uint log_type); + +public: + Log_to_csv_event_handler(); + ~Log_to_csv_event_handler(); + virtual bool init(); + virtual void cleanup(); + + virtual bool log_slow(THD *thd, time_t current_time, + time_t query_start_arg, const char *user_host, + uint user_host_len, longlong query_time, + longlong lock_time, bool is_command, + const char *sql_text, uint sql_text_len); + virtual bool log_error(enum loglevel level, const char *format, + va_list args); + virtual bool log_general(time_t event_time, const char *user_host, + uint user_host_len, int thread_id, + const char *command_type, uint command_type_len, + const char *sql_text, uint sql_text_len); + bool flush(THD *thd, TABLE_LIST *close_slow_Log, + TABLE_LIST* close_general_log); + void close_log_table(uint log_type, bool lock_in_use); + bool reopen_log_table(uint log_type); +}; + + +class Log_to_file_event_handler: public Log_event_handler +{ + MYSQL_LOG mysql_log, mysql_slow_log; + bool is_initialized; +public: + Log_to_file_event_handler(): is_initialized(FALSE) + {} + virtual bool init(); + virtual void cleanup(); + + virtual bool log_slow(THD *thd, time_t current_time, + time_t query_start_arg, const char *user_host, + uint user_host_len, longlong query_time, + longlong lock_time, bool is_command, + const char *sql_text, uint sql_text_len); + virtual bool log_error(enum loglevel level, const char *format, + va_list args); + virtual bool log_general(time_t event_time, const char *user_host, + uint user_host_len, int thread_id, + const char *command_type, uint command_type_len, + const char *sql_text, uint sql_text_len); + void flush(); + void init_pthread_objects(); +}; + + +/* Class which manages slow, general and error log event handlers */ +class LOGGER +{ + pthread_mutex_t LOCK_logger; + /* flag to check whether logger mutex is initialized */ + uint inited; + + /* available log handlers */ + Log_to_csv_event_handler *table_log_handler; + Log_to_file_event_handler *file_log_handler; + + /* NULL-terminated arrays of log handlers */ + Log_event_handler *error_log_handler_list[MAX_LOG_HANDLERS_NUM + 1]; + Log_event_handler *slow_log_handler_list[MAX_LOG_HANDLERS_NUM + 1]; + Log_event_handler *general_log_handler_list[MAX_LOG_HANDLERS_NUM + 1]; + +public: + + bool is_log_tables_initialized; + + LOGGER() : inited(0), table_log_handler(NULL), + file_log_handler(NULL), is_log_tables_initialized(FALSE) + {} + void lock() { (void) pthread_mutex_lock(&LOCK_logger); } + void unlock() { (void) pthread_mutex_unlock(&LOCK_logger); } + /* + We want to initialize all log mutexes as soon as possible, + but we cannot do it in constructor, as safe_mutex relies on + initialization, performed by MY_INIT(). This why this is done in + this function. + */ + void init_base(); + void init_log_tables(); + bool flush_logs(THD *thd); + THD *get_general_log_thd() + { + return (THD *) table_log_handler->general_log_thd; + } + THD *get_slow_log_thd() + { + return (THD *) table_log_handler->slow_log_thd; + } + void cleanup(); + bool error_log_print(enum loglevel level, const char *format, + va_list args); + bool slow_log_print(THD *thd, const char *query, uint query_length, + time_t query_start_arg); + bool general_log_print(THD *thd,enum enum_server_command command, + const char *format, va_list args); + + void close_log_table(uint log_type, bool lock_in_use); + bool reopen_log_table(uint log_type); + + /* we use this function to setup all enabled log event handlers */ + int set_handlers(enum enum_printer error_log_printer, + enum enum_printer slow_log_printer, + enum enum_printer general_log_printer); + void init_error_log(enum enum_printer error_log_printer); + void init_slow_log(enum enum_printer slow_log_printer); + void init_general_log(enum enum_printer general_log_printer); + }; + #endif /* LOG_H */ diff --git a/sql/log_event.cc b/sql/log_event.cc index 0c95e0ec1ef..f63c454b5ed 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1795,7 +1795,7 @@ START SLAVE; . Query: '%s'", expected_error, thd->query); /* If the query was not ignored, it is printed to the general log */ if (thd->net.last_errno != ER_SLAVE_IGNORED_TABLE) - mysql_log.write(thd,COM_QUERY,"%s",thd->query); + general_log_print(thd, COM_QUERY, "%s", thd->query); compare_errors: @@ -3513,7 +3513,8 @@ void Xid_log_event::print(FILE* file, PRINT_EVENT_INFO* print_event_info) int Xid_log_event::exec_event(struct st_relay_log_info* rli) { /* For a slave Xid_log_event is COMMIT */ - mysql_log.write(thd,COM_QUERY,"COMMIT /* implicit, from Xid_log_event */"); + general_log_print(thd, COM_QUERY, + "COMMIT /* implicit, from Xid_log_event */"); return end_trans(thd, COMMIT) || Log_event::exec_event(rli); } #endif /* !MYSQL_CLIENT */ @@ -6171,7 +6172,7 @@ char const *Write_rows_log_event::do_prepare_row(THD *thd, TABLE *table, */ DBUG_ASSERT(table->s->fields >= m_width); DBUG_ASSERT(ptr); - ptr= unpack_row(table, table->record[0], ptr, &m_cols); + ptr= unpack_row(table, (byte*)table->record[0], ptr, &m_cols); return ptr; } diff --git a/sql/log_event.h b/sql/log_event.h index 8b665755aab..a7c532d4c24 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -1784,7 +1784,7 @@ public: /* Special constants representing sets of flags */ enum { - NO_FLAGS = 0U + RLE_NO_FLAGS = 0U }; virtual ~Rows_log_event(); diff --git a/sql/mysql_priv.h b/sql/mysql_priv.h index 026234caf34..71f64d7632a 100644 --- a/sql/mysql_priv.h +++ b/sql/mysql_priv.h @@ -477,6 +477,11 @@ inline THD *_current_thd(void) } #define current_thd _current_thd() +/* below functions are required for plugins as THD class is opaque */ +my_bool thd_in_lock_tables(const THD *thd); +my_bool thd_tablespace_op(const THD *thd); +const char *thd_proc_info(THD *thd, const char *info); + /* External variables */ @@ -507,7 +512,9 @@ enum enum_var_type class sys_var; #include "item.h" extern my_decimal decimal_zero; +#ifdef MYSQL_SERVER typedef Comp_creator* (*chooser_compare_func_creator)(bool invert); +#endif /* sql_parse.cc */ void free_items(Item *item); void cleanup_items(Item *item); @@ -545,6 +552,7 @@ Item *negate_expression(THD *thd, Item *expr); #include "sql_class.h" #include "sql_acl.h" #include "tztime.h" +#ifdef MYSQL_SERVER #include "opt_range.h" #ifdef HAVE_QUERY_CACHE @@ -595,6 +603,11 @@ struct Query_cache_query_flags #define query_cache_invalidate_by_MyISAM_filename_ref NULL #endif /*HAVE_QUERY_CACHE*/ +uint build_table_path(char *buff, size_t bufflen, const char *db, + const char *table, const char *ext); +void write_bin_log(THD *thd, bool clear_error, + char const *query, ulong query_length); + bool mysql_create_db(THD *thd, char *db, HA_CREATE_INFO *create, bool silent); bool mysql_alter_db(THD *thd, const char *db, HA_CREATE_INFO *create); bool mysql_rm_db(THD *thd,char *db,bool if_exists, bool silent); @@ -836,6 +849,8 @@ find_field_in_table(THD *thd, TABLE *table, const char *name, uint length, Field * find_field_in_table_sef(TABLE *table, const char *name); +#endif /* MYSQL_SERVER */ + #ifdef HAVE_OPENSSL #include struct st_des_keyblock @@ -853,6 +868,7 @@ extern pthread_mutex_t LOCK_des_key_file; bool load_des_key_file(const char *file_name); #endif /* HAVE_OPENSSL */ +#ifdef MYSQL_SERVER /* sql_do.cc */ bool mysql_do(THD *thd, List &values); @@ -892,7 +908,7 @@ void free_status_vars(); /* information schema */ extern LEX_STRING information_schema_name; -const extern LEX_STRING partition_keywords[]; +extern const LEX_STRING partition_keywords[]; LEX_STRING *make_lex_string(THD *thd, LEX_STRING *lex_str, const char* str, uint length, bool allocate_lex_string); @@ -1035,6 +1051,22 @@ void remove_db_from_cache(const char *db); void flush_tables(); bool is_equal(const LEX_STRING *a, const LEX_STRING *b); +#ifdef WITH_PARTITION_STORAGE_ENGINE +uint fast_alter_partition_table(THD *thd, TABLE *table, + ALTER_INFO *alter_info, + HA_CREATE_INFO *create_info, + TABLE_LIST *table_list, + List *create_list, + List *key_list, const char *db, + const char *table_name, + uint fast_alter_partition); +uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info, + HA_CREATE_INFO *create_info, + handlerton *old_db_type, + bool *partition_changed, + uint *fast_alter_partition); +#endif + /* bits for last argument to remove_table_from_cache() */ #define RTFC_NO_FLAG 0x0000 #define RTFC_OWNED_BY_THD_FLAG 0x0001 @@ -1043,6 +1075,36 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b); bool remove_table_from_cache(THD *thd, const char *db, const char *table, uint flags); +typedef struct st_lock_param_type +{ + ulonglong copied; + ulonglong deleted; + THD *thd; + HA_CREATE_INFO *create_info; + List *create_list; + List new_create_list; + List *key_list; + List new_key_list; + TABLE *table; + KEY *key_info_buffer; + const char *db; + const char *table_name; + const void *pack_frm_data; + enum thr_lock_type old_lock_type; + uint key_count; + uint db_options; + uint pack_frm_len; +} ALTER_PARTITION_PARAM_TYPE; + +void mem_alloc_error(size_t size); +#define WFRM_INITIAL_WRITE 1 +#define WFRM_CREATE_HANDLER_FILES 2 +#define WFRM_PACK_FRM 4 +bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags); +bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt); +void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt); +void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table); + bool close_cached_tables(THD *thd, bool wait_for_refresh, TABLE_LIST *tables, bool have_lock = FALSE); void copy_field_from_tmp_record(Field *field,int offset); bool fill_record(THD *thd, Field **field, List &values, @@ -1118,17 +1180,30 @@ int key_cmp(KEY_PART_INFO *key_part, const byte *key, uint key_length); int key_rec_cmp(void *key_info, byte *a, byte *b); bool init_errmessage(void); +#endif /* MYSQL_SERVER */ void sql_perror(const char *message); -void vprint_msg_to_log(enum loglevel level, const char *format, va_list args); +int vprint_msg_to_log(enum loglevel level, const char *format, va_list args); void sql_print_error(const char *format, ...); void sql_print_warning(const char *format, ...); void sql_print_information(const char *format, ...); +/* type of the log table */ +#define QUERY_LOG_SLOW 1 +#define QUERY_LOG_GENERAL 2 +int error_log_print(enum loglevel level, const char *format, + va_list args); + +bool slow_log_print(THD *thd, const char *query, uint query_length, + time_t query_start_arg); + +bool general_log_print(THD *thd, enum enum_server_command command, + const char *format,...); bool fn_format_relative_to_data_home(my_string to, const char *name, const char *dir, const char *extension); +#ifdef MYSQL_SERVER File open_binlog(IO_CACHE *log, const char *log_file_name, const char **errmsg); @@ -1166,7 +1241,7 @@ extern char *mysql_data_home,server_version[SERVER_VERSION_LENGTH], def_ft_boolean_syntax[sizeof(ft_boolean_syntax)]; #define mysql_tmpdir (my_tmpdir(&mysql_tmpdir_list)) extern MY_TMPDIR mysql_tmpdir_list; -extern const char *command_name[]; +extern LEX_STRING command_name[]; extern const char *first_keyword, *my_localhost, *delayed_user, *binary_keyword; extern const char **errmesg; /* Error messages */ extern const char *myisam_recover_options_str; @@ -1228,6 +1303,7 @@ extern my_bool locked_in_memory; extern bool opt_using_transactions, mysqld_embedded; extern bool using_update_log, opt_large_files, server_id_supplied; extern bool opt_log, opt_update_log, opt_bin_log, opt_slow_log, opt_error_log; +extern bool opt_old_log_format; extern bool opt_disable_networking, opt_skip_show_db; extern my_bool opt_character_set_client_handshake; extern bool volatile abort_loop, shutdown_in_progress, grant_option; @@ -1249,7 +1325,9 @@ extern char *default_tz_name; extern my_bool opt_large_pages; extern uint opt_large_page_size; -extern MYSQL_LOG mysql_log,mysql_slow_log,mysql_bin_log; +extern MYSQL_LOG mysql_bin_log; +extern LOGGER logger; +extern TABLE_LIST general_log, slow_log; extern FILE *bootstrap_file; extern int bootstrap_error; extern FILE *stderror_file; @@ -1379,7 +1457,9 @@ void mysql_unlock_tables(THD *thd, MYSQL_LOCK *sql_lock); void mysql_unlock_read_tables(THD *thd, MYSQL_LOCK *sql_lock); void mysql_unlock_some_tables(THD *thd, TABLE **table,uint count); void mysql_lock_remove(THD *thd, MYSQL_LOCK *locked,TABLE *table); -void mysql_lock_abort(THD *thd, TABLE *table); +void mysql_lock_abort(THD *thd, TABLE *table, bool upgrade_lock); +void mysql_lock_downgrade_write(THD *thd, TABLE *table, + thr_lock_type new_lock_type); bool mysql_lock_abort_for_thread(THD *thd, TABLE *table); MYSQL_LOCK *mysql_lock_merge(MYSQL_LOCK *a,MYSQL_LOCK *b); TABLE_LIST *mysql_lock_have_duplicate(THD *thd, TABLE_LIST *needle, @@ -1431,9 +1511,7 @@ int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags); void open_table_error(TABLE_SHARE *share, int error, int db_errno, int errarg); int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, uint db_stat, uint prgflag, uint ha_open_flags, - TABLE *outparam); -int openfrm(THD *thd, const char *name,const char *alias,uint filestat, - uint prgflag, uint ha_open_flags, TABLE *outparam); + TABLE *outparam, bool is_create_table); int readfrm(const char *name, const void** data, uint* length); int writefrm(const char* name, const void* data, uint len); int closefrm(TABLE *table, bool free_share); @@ -1674,4 +1752,5 @@ inline void kill_delayed_threads(void) {} #define check_stack_overrun(A, B, C) 0 #endif +#endif /* MYSQL_SERVER */ #endif /* MYSQL_CLIENT */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 8bc005705c9..0712a448f06 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -315,6 +315,7 @@ static const char *sql_mode_str= "OFF"; static char *mysqld_user, *mysqld_chroot, *log_error_file_ptr; static char *opt_init_slave, *language_ptr, *opt_init_connect; static char *default_character_set_name; +static char *character_set_filesystem_name; static char *my_bind_addr_str; static char *default_collation_name; static char mysql_data_home_buff[2]; @@ -331,6 +332,9 @@ static my_bool opt_sync_bdb_logs; bool opt_log, opt_update_log, opt_bin_log, opt_slow_log; bool opt_error_log= IF_WIN(1,0); +#ifdef WITH_CSV_STORAGE_ENGINE +bool opt_old_log_format, opt_both_log_formats; +#endif bool opt_disable_networking=0, opt_skip_show_db=0; my_bool opt_character_set_client_handshake= 1; bool server_id_supplied = 0; @@ -408,6 +412,7 @@ extern char *berkeley_home, *berkeley_tmpdir, *berkeley_logdir; extern long berkeley_lock_scan_time; extern TYPELIB berkeley_lock_typelib; #endif + #ifdef WITH_NDBCLUSTER_STORAGE_ENGINE const char *opt_ndbcluster_connectstring= 0; const char *opt_ndb_connectstring= 0; @@ -561,6 +566,7 @@ MY_BITMAP temp_pool; CHARSET_INFO *system_charset_info, *files_charset_info ; CHARSET_INFO *national_charset_info, *table_alias_charset; +CHARSET_INFO *character_set_filesystem; SHOW_COMP_OPTION have_row_based_replication; SHOW_COMP_OPTION have_raid, have_openssl, have_symlink, have_query_cache; @@ -602,6 +608,7 @@ char *opt_relay_logname = 0, *opt_relaylog_index_name=0; my_bool master_ssl; char *master_ssl_key, *master_ssl_cert; char *master_ssl_ca, *master_ssl_capath, *master_ssl_cipher; +char *opt_logname, *opt_slow_logname; /* Static variables */ @@ -609,8 +616,8 @@ static bool kill_in_progress, segfaulted; static my_bool opt_do_pstack, opt_bootstrap, opt_myisam_log; static int cleanup_done; static ulong opt_specialflag, opt_myisam_block_size; -static char *opt_logname, *opt_update_logname, *opt_binlog_index_name; -static char *opt_slow_logname, *opt_tc_heuristic_recover; +static char *opt_update_logname, *opt_binlog_index_name; +static char *opt_tc_heuristic_recover; static char *mysql_home_ptr, *pidfile_name_ptr; static char **defaults_argv; static char *opt_bin_logname; @@ -1137,8 +1144,7 @@ void clean_up(bool print_message) if (cleanup_done++) return; /* purecov: inspected */ - mysql_log.cleanup(); - mysql_slow_log.cleanup(); + logger.cleanup(); /* make sure that handlers finish up what they have that is dependent on the binlog @@ -2388,6 +2394,9 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) #ifdef EXTRA_DEBUG sql_print_information("Got signal %d to shutdown mysqld",sig); #endif + /* switch to the old log message processing */ + logger.set_handlers(LEGACY, opt_slow_log ? LEGACY:NONE, + opt_log ? LEGACY:NONE); DBUG_PRINT("info",("Got signal: %d abort_loop: %d",sig,abort_loop)); if (!abort_loop) { @@ -2415,6 +2424,9 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) REFRESH_THREADS | REFRESH_HOSTS), (TABLE_LIST*) 0, ¬_used); // Flush logs } + /* reenable logs after the options were reloaded */ + logger.set_handlers(LEGACY, opt_slow_log ? CSV:NONE, + opt_log ? CSV:NONE); break; #ifdef USE_ONE_SIGNAL_HAND case THR_SERVER_ALARM: @@ -2485,9 +2497,7 @@ static int my_message_sql(uint error, const char *str, myf MyFlags) { NET *net= &thd->net; net->report_error= 1; -#ifndef EMBEDDED_LIBRARY /* TODO query cache in embedded library*/ query_cache_abort(net); -#endif if (!net->last_error[0]) // Return only first message { strmake(net->last_error, str, sizeof(net->last_error)-1); @@ -2679,8 +2689,6 @@ static int init_common_variables(const char *conf_file_name, int argc, global MYSQL_LOGs in their constructors, because then they would be inited before MY_INIT(). So we do it here. */ - mysql_log.init_pthread_objects(); - mysql_slow_log.init_pthread_objects(); mysql_bin_log.init_pthread_objects(); if (gethostname(glob_hostname,sizeof(glob_hostname)-4) < 0) @@ -2798,6 +2806,12 @@ static int init_common_variables(const char *conf_file_name, int argc, global_system_variables.character_set_client= default_charset_info; global_system_variables.collation_connection= default_charset_info; + if (!(character_set_filesystem= + get_charset_by_csname(character_set_filesystem_name, + MY_CS_PRIMARY, MYF(MY_WME)))) + return 1; + global_system_variables.character_set_filesystem= character_set_filesystem; + sys_init_connect.value_length= 0; if ((sys_init_connect.value= opt_init_connect)) sys_init_connect.value_length= strlen(opt_init_connect); @@ -3046,9 +3060,48 @@ static int init_server_components() #ifdef HAVE_REPLICATION init_slave_list(); #endif - /* Setup log files */ - if (opt_log) - mysql_log.open_query_log(opt_logname); + /* Setup logs */ + + /* enable old-fashioned error log */ + if (opt_error_log) + { + if (!log_error_file_ptr[0]) + fn_format(log_error_file, glob_hostname, mysql_data_home, ".err", + MY_REPLACE_EXT); /* replace '.' by '.err', bug#4997 */ + else + fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err", + MY_UNPACK_FILENAME | MY_SAFE_PATH); + if (!log_error_file[0]) + opt_error_log= 1; // Too long file name + else + { +#ifndef EMBEDDED_LIBRARY + if (freopen(log_error_file, "a+", stdout)) +#endif + freopen(log_error_file, "a+", stderr); + } + } + +#ifdef WITH_CSV_STORAGE_ENGINE + logger.init_log_tables(); + + if (opt_old_log_format || (have_csv_db != SHOW_OPTION_YES)) + logger.set_handlers(LEGACY, opt_slow_log ? LEGACY:NONE, + opt_log ? LEGACY:NONE); + else + if (opt_both_log_formats) + logger.set_handlers(LEGACY, + opt_slow_log ? LEGACY_AND_CSV:NONE, + opt_log ? LEGACY_AND_CSV:NONE); + else + /* the default is CSV log tables */ + logger.set_handlers(LEGACY, opt_slow_log ? CSV:NONE, + opt_log ? CSV:NONE); +#else + logger.set_handlers(LEGACY, opt_slow_log ? LEGACY:NONE, + opt_log ? LEGACY:NONE); +#endif + if (opt_update_log) { /* @@ -3117,7 +3170,7 @@ with --log-bin instead."); if (opt_binlog_format_id == BF_UNSPECIFIED) { #ifdef HAVE_NDB_BINLOG - if (have_ndbcluster == SHOW_OPTION_YES) + if (opt_bin_log && have_ndbcluster == SHOW_OPTION_YES) opt_binlog_format_id= BF_ROW; else #endif @@ -3145,9 +3198,6 @@ with --log-bin instead."); array_elements(binlog_format_names)-1); opt_binlog_format= binlog_format_names[opt_binlog_format_id]; - if (opt_slow_log) - mysql_slow_log.open_slow_log(opt_slow_logname); - #ifdef HAVE_REPLICATION if (opt_log_slave_updates && replicate_same_server_id) { @@ -3159,25 +3209,6 @@ server."); } #endif - if (opt_error_log) - { - if (!log_error_file_ptr[0]) - fn_format(log_error_file, glob_hostname, mysql_data_home, ".err", - MY_REPLACE_EXT); /* replace '.' by '.err', bug#4997 */ - else - fn_format(log_error_file, log_error_file_ptr, mysql_data_home, ".err", - MY_UNPACK_FILENAME | MY_SAFE_PATH); - if (!log_error_file[0]) - opt_error_log= 1; // Too long file name - else - { -#ifndef EMBEDDED_LIBRARY - if (freopen(log_error_file, "a+", stdout)) -#endif - stderror_file= freopen(log_error_file, "a+", stderr); - } - } - if (opt_bin_log) { char buf[FN_REFLEN]; @@ -3431,6 +3462,12 @@ int main(int argc, char **argv) MY_INIT(argv[0]); // init my_sys library & pthreads + /* + Perform basic logger initialization logger. Should be called after + MY_INIT, as it initializes mutexes. Log tables are inited later. + */ + logger.init_base(); + #ifdef _CUSTOMSTARTUPCONFIG_ if (_cust_check_startup()) { @@ -3576,6 +3613,7 @@ we force server id to 2, but this MySQL server will not act as a slave."); */ error_handler_hook= my_message_sql; start_signal_handler(); // Creates pidfile + if (acl_init(opt_noacl) || my_tz_init((THD *)0, default_tz_name, opt_bootstrap)) { @@ -3700,7 +3738,7 @@ we force server id to 2, but this MySQL server will not act as a slave."); clean_up_mutexes(); shutdown_events(); my_end(opt_endinfo ? MY_CHECK_ERROR | MY_GIVE_INFO : 0); - + exit(0); return(0); /* purecov: deadcode */ } @@ -4638,7 +4676,7 @@ enum options_mysqld OPT_REPLICATE_IGNORE_TABLE, OPT_REPLICATE_WILD_DO_TABLE, OPT_REPLICATE_WILD_IGNORE_TABLE, OPT_REPLICATE_SAME_SERVER_ID, OPT_DISCONNECT_SLAVE_EVENT_COUNT, OPT_TC_HEURISTIC_RECOVER, - OPT_ABORT_SLAVE_EVENT_COUNT, + OPT_ABORT_SLAVE_EVENT_COUNT, OPT_OLD_LOG_FORMAT, OPT_BOTH_LOG_FORMATS, OPT_INNODB_DATA_HOME_DIR, OPT_INNODB_DATA_FILE_PATH, OPT_INNODB_LOG_GROUP_HOME_DIR, @@ -4760,6 +4798,7 @@ enum options_mysqld OPT_GROUP_CONCAT_MAX_LEN, OPT_DEFAULT_COLLATION, OPT_CHARACTER_SET_CLIENT_HANDSHAKE, + OPT_CHARACTER_SET_FILESYSTEM, OPT_INIT_CONNECT, OPT_INIT_SLAVE, OPT_SECURE_AUTH, @@ -4913,6 +4952,11 @@ Disable with --skip-bdb (will save memory).", (gptr*) &opt_character_set_client_handshake, (gptr*) &opt_character_set_client_handshake, 0, GET_BOOL, NO_ARG, 1, 0, 0, 0, 0, 0}, + {"character-set-filesystem", OPT_CHARACTER_SET_FILESYSTEM, + "Set the filesystem character set.", + (gptr*) &character_set_filesystem_name, + (gptr*) &character_set_filesystem_name, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"character-set-server", 'C', "Set the default character set.", (gptr*) &default_character_set_name, (gptr*) &default_character_set_name, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, @@ -5194,6 +5238,16 @@ Disable with --skip-innodb-doublewrite.", (gptr*) &innobase_use_doublewrite, "Log slow queries to this log file. Defaults logging to hostname-slow.log file. Must be enabled to activate other slow log options.", (gptr*) &opt_slow_logname, (gptr*) &opt_slow_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, +#ifdef WITH_CSV_STORAGE_ENGINE + {"old-log-format", OPT_OLD_LOG_FORMAT, + "Enable old log file format. (No SELECT * FROM logs)", + (gptr*) &opt_old_log_format, 0, 0, GET_BOOL, OPT_ARG, + 0, 0, 0, 0, 0, 0}, + {"both-log-formats", OPT_BOTH_LOG_FORMATS, + "Enable old log file format along with log tables", + (gptr*) &opt_both_log_formats, 0, 0, GET_BOOL, OPT_ARG, + 0, 0, 0, 0, 0, 0}, +#endif {"log-tc", OPT_LOG_TC, "Path to transaction coordinator log (used for transactions that affect " "more than one storage engine, when binary log is disabled)", @@ -6885,6 +6939,10 @@ static void mysql_init_variables(void) opt_skip_slave_start= opt_reckless_slave = 0; mysql_home[0]= pidfile_name[0]= log_error_file[0]= 0; opt_log= opt_update_log= opt_slow_log= 0; +#ifdef WITH_CSV_STORAGE_ENGINE + opt_old_log_format= 0; + opt_both_log_formats= 0; +#endif opt_bin_log= 0; opt_disable_networking= opt_skip_show_db=0; opt_logname= opt_update_logname= opt_binlog_index_name= opt_slow_logname= 0; @@ -6923,6 +6981,7 @@ static void mysql_init_variables(void) files_charset_info= &my_charset_utf8_general_ci; national_charset_info= &my_charset_utf8_general_ci; table_alias_charset= &my_charset_bin; + character_set_filesystem= &my_charset_bin; opt_date_time_formats[0]= opt_date_time_formats[1]= opt_date_time_formats[2]= 0; @@ -6976,6 +7035,7 @@ static void mysql_init_variables(void) default_character_set_name= (char*) MYSQL_DEFAULT_CHARSET_NAME; default_collation_name= (char*) MYSQL_DEFAULT_COLLATION_NAME; sys_charset_system.value= (char*) system_charset_info->csname; + character_set_filesystem_name= (char*) "binary"; /* Set default values for some option variables */ @@ -7293,8 +7353,16 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), } #endif /* HAVE_REPLICATION */ case (int) OPT_SLOW_QUERY_LOG: - opt_slow_log=1; + opt_slow_log= 1; break; +#ifdef WITH_CSV_STORAGE_ENGINE + case (int) OPT_OLD_LOG_FORMAT: + opt_old_log_format= 1; + break; + case (int) OPT_BOTH_LOG_FORMATS: + opt_both_log_formats= 1; + break; +#endif case (int) OPT_SKIP_NEW: opt_specialflag|= SPECIAL_NO_NEW_FUNC; delay_key_write_options= (uint) DELAY_KEY_WRITE_NONE; diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 7dd694f3411..7942edd935d 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -2698,8 +2698,10 @@ int find_used_partitions(PART_PRUNE_PARAM *ppar, SEL_ARG *key_tree) DBUG_EXECUTE("info", dbug_print_onepoint_range(ppar->arg_stack, ppar->part_fields);); uint32 part_id; + longlong func_value; /* then find in which partition the {const1, ...,constN} tuple goes */ - if (ppar->get_top_partition_id_func(ppar->part_info, &part_id)) + if (ppar->get_top_partition_id_func(ppar->part_info, &part_id, + &func_value)) { res= 0; /* No satisfying partitions */ goto pop_and_go_right; diff --git a/sql/protocol.cc b/sql/protocol.cc index 0a1b42f5236..98cded56871 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -29,6 +29,7 @@ static const unsigned int PACKET_BUFFER_EXTRA_ALLOC= 1024; static void write_eof_packet(THD *thd, NET *net); +void net_send_error_packet(THD *thd, uint sql_errno, const char *err); #ifndef EMBEDDED_LIBRARY bool Protocol::net_store_data(const char *from, uint length) @@ -56,10 +57,6 @@ bool Protocol_prep::net_store_data(const char *from, uint length) void net_send_error(THD *thd, uint sql_errno, const char *err) { -#ifndef EMBEDDED_LIBRARY - uint length; - char buff[MYSQL_ERRMSG_SIZE+2], *pos; -#endif NET *net= &thd->net; bool generate_warning= thd->killed != THD::KILL_CONNECTION; DBUG_ENTER("net_send_error"); @@ -106,42 +103,8 @@ void net_send_error(THD *thd, uint sql_errno, const char *err) push_warning(thd, MYSQL_ERROR::WARN_LEVEL_ERROR, sql_errno, err); } -#ifdef EMBEDDED_LIBRARY - net->last_errno= sql_errno; - strmake(net->last_error, err, sizeof(net->last_error)-1); - strmov(net->sqlstate, mysql_errno_to_sqlstate(sql_errno)); -#else + net_send_error_packet(thd, sql_errno, err); - if (net->vio == 0) - { - if (thd->bootstrap) - { - /* In bootstrap it's ok to print on stderr */ - fprintf(stderr,"ERROR: %d %s\n",sql_errno,err); - } - DBUG_VOID_RETURN; - } - - if (net->return_errno) - { // new client code; Add errno before message - int2store(buff,sql_errno); - pos= buff+2; - if (thd->client_capabilities & CLIENT_PROTOCOL_41) - { - /* The first # is to make the protocol backward compatible */ - buff[2]= '#'; - pos= strmov(buff+3, mysql_errno_to_sqlstate(sql_errno)); - } - length= (uint) (strmake(pos, err, MYSQL_ERRMSG_SIZE-1) - buff); - err=buff; - } - else - { - length=(uint) strlen(err); - set_if_smaller(length,MYSQL_ERRMSG_SIZE-1); - } - VOID(net_write_command(net,(uchar) 255, "", 0, (char*) err,length)); -#endif /* EMBEDDED_LIBRARY*/ thd->is_fatal_error=0; // Error message is given thd->net.report_error= 0; @@ -430,6 +393,47 @@ bool send_old_password_request(THD *thd) return my_net_write(net, eof_buff, 1) || net_flush(net); } + +void net_send_error_packet(THD *thd, uint sql_errno, const char *err) +{ + NET *net= &thd->net; + uint length; + char buff[MYSQL_ERRMSG_SIZE+2], *pos; + + DBUG_ENTER("send_error_packet"); + + if (net->vio == 0) + { + if (thd->bootstrap) + { + /* In bootstrap it's ok to print on stderr */ + fprintf(stderr,"ERROR: %d %s\n",sql_errno,err); + } + DBUG_VOID_RETURN; + } + + if (net->return_errno) + { // new client code; Add errno before message + int2store(buff,sql_errno); + pos= buff+2; + if (thd->client_capabilities & CLIENT_PROTOCOL_41) + { + /* The first # is to make the protocol backward compatible */ + buff[2]= '#'; + pos= strmov(buff+3, mysql_errno_to_sqlstate(sql_errno)); + } + length= (uint) (strmake(pos, err, MYSQL_ERRMSG_SIZE-1) - buff); + err=buff; + } + else + { + length=(uint) strlen(err); + set_if_smaller(length,MYSQL_ERRMSG_SIZE-1); + } + VOID(net_write_command(net,(uchar) 255, "", 0, (char*) err,length)); + DBUG_VOID_RETURN; +} + #endif /* EMBEDDED_LIBRARY */ /* diff --git a/sql/protocol.h b/sql/protocol.h index 8d9da5774b2..85c22724b74 100644 --- a/sql/protocol.h +++ b/sql/protocol.h @@ -91,6 +91,12 @@ public: virtual bool store_date(TIME *time)=0; virtual bool store_time(TIME *time)=0; virtual bool store(Field *field)=0; +#ifdef EMBEDDED_LIBRARY + int begin_dataset(); + virtual void remove_last_row() {} +#else + void remove_last_row() {} +#endif }; @@ -117,6 +123,9 @@ public: virtual bool store(float nr, uint32 decimals, String *buffer); virtual bool store(double from, uint32 decimals, String *buffer); virtual bool store(Field *field); +#ifdef EMBEDDED_LIBRARY + void remove_last_row(); +#endif }; diff --git a/sql/set_var.cc b/sql/set_var.cc index 1ccd590171f..4f31c6ad1b4 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -193,6 +193,7 @@ sys_var_character_set_database sys_character_set_database("character_set_databas sys_var_character_set_client sys_character_set_client("character_set_client"); sys_var_character_set_connection sys_character_set_connection("character_set_connection"); sys_var_character_set_results sys_character_set_results("character_set_results"); +sys_var_character_set_filesystem sys_character_set_filesystem("character_set_filesystem"); sys_var_thd_ulong sys_completion_type("completion_type", &SV::completion_type, check_completion_type, @@ -706,6 +707,7 @@ SHOW_VAR init_vars[]= { {sys_character_set_client.name,(char*) &sys_character_set_client, SHOW_SYS}, {sys_character_set_connection.name,(char*) &sys_character_set_connection,SHOW_SYS}, {sys_character_set_database.name, (char*) &sys_character_set_database,SHOW_SYS}, + {sys_character_set_filesystem.name,(char*) &sys_character_set_filesystem, SHOW_SYS}, {sys_character_set_results.name,(char*) &sys_character_set_results, SHOW_SYS}, {sys_character_set_server.name, (char*) &sys_character_set_server,SHOW_SYS}, {sys_charset_system.name, (char*) &sys_charset_system, SHOW_SYS}, @@ -2021,6 +2023,32 @@ void sys_var_character_set_client::set_default(THD *thd, enum_var_type type) } +CHARSET_INFO ** +sys_var_character_set_filesystem::ci_ptr(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + return &global_system_variables.character_set_filesystem; + else + return &thd->variables.character_set_filesystem; +} + + +extern CHARSET_INFO *character_set_filesystem; + +void +sys_var_character_set_filesystem::set_default(THD *thd, enum_var_type type) +{ + if (type == OPT_GLOBAL) + global_system_variables.character_set_filesystem= character_set_filesystem; + else + { + thd->variables.character_set_filesystem= (global_system_variables. + character_set_filesystem); + thd->update_charset(); + } +} + + CHARSET_INFO ** sys_var_character_set_results::ci_ptr(THD *thd, enum_var_type type) { diff --git a/sql/set_var.h b/sql/set_var.h index 2dde7f44a55..01705d5ff4d 100644 --- a/sql/set_var.h +++ b/sql/set_var.h @@ -549,6 +549,15 @@ public: virtual CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type)= 0; }; +class sys_var_character_set_filesystem :public sys_var_character_set +{ +public: + sys_var_character_set_filesystem(const char *name_arg) : + sys_var_character_set(name_arg) {} + void set_default(THD *thd, enum_var_type type); + CHARSET_INFO **ci_ptr(THD *thd, enum_var_type type); +}; + class sys_var_character_set_client :public sys_var_character_set { public: diff --git a/sql/share/errmsg.txt b/sql/share/errmsg.txt index e735a87bab4..81bff69b22c 100644 --- a/sql/share/errmsg.txt +++ b/sql/share/errmsg.txt @@ -5601,13 +5601,13 @@ ER_SP_RECURSION_LIMIT eng "Recursive limit %d (as set by the max_sp_recursion_depth variable) was exceeded for routine %.64s" ger "Rekursionsgrenze %d (durch Variable max_sp_recursion_depth gegeben) wurde für Routine %.64s überschritten" ER_SP_PROC_TABLE_CORRUPT - eng "Failed to load routine %s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)" + eng "Failed to load routine %-.64s. The table mysql.proc is missing, corrupt, or contains bad data (internal code %d)" ER_PARTITION_REQUIRES_VALUES_ERROR - eng "%s PARTITIONING requires definition of VALUES %s for each partition" - swe "%s PARTITIONering kräver definition av VALUES %s för varje partition" + eng "%-.64s PARTITIONING requires definition of VALUES %-.64s for each partition" + swe "%-.64s PARTITIONering kräver definition av VALUES %-.64s för varje partition" ER_PARTITION_WRONG_VALUES_ERROR - eng "Only %s PARTITIONING can use VALUES %s in partition definition" - swe "Endast %s partitionering kan använda VALUES %s i definition av partitionen" + eng "Only %-.64s PARTITIONING can use VALUES %-.64s in partition definition" + swe "Endast %-.64s partitionering kan använda VALUES %-.64s i definition av partitionen" ER_PARTITION_MAXVALUE_ERROR eng "MAXVALUE can only be used in last partition definition" swe "MAXVALUE kan bara användas i definitionen av den sista partitionen" @@ -5636,11 +5636,11 @@ ER_INCONSISTENT_PARTITION_INFO_ERROR eng "The partition info in the frm file is not consistent with what can be written into the frm file" swe "Partitioneringsinformationen i frm-filen är inte konsistent med vad som kan skrivas i frm-filen" ER_PARTITION_FUNC_NOT_ALLOWED_ERROR - eng "The %s function returns the wrong type" - swe "%s-funktionen returnerar felaktig typ" + eng "The %-.64s function returns the wrong type" + swe "%-.64s-funktionen returnerar felaktig typ" ER_PARTITIONS_MUST_BE_DEFINED_ERROR - eng "For %s partitions each partition must be defined" - swe "För %s partitionering så måste varje partition definieras" + eng "For %-.64s partitions each partition must be defined" + swe "För %-.64s partitionering så måste varje partition definieras" ER_RANGE_NOT_INCREASING_ERROR eng "VALUES LESS THAN value must be strictly increasing for each partition" swe "Värden i VALUES LESS THAN måste vara strikt växande för varje partition" @@ -5657,8 +5657,8 @@ ER_MIX_HANDLER_ERROR eng "The mix of handlers in the partitions is not allowed in this version of MySQL" swe "Denna mix av lagringsmotorer är inte tillåten i denna version av MySQL" ER_PARTITION_NOT_DEFINED_ERROR - eng "For the partitioned engine it is necessary to define all %s" - swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %s" + eng "For the partitioned engine it is necessary to define all %-.64s" + swe "För partitioneringsmotorn så är det nödvändigt att definiera alla %-.64s" ER_TOO_MANY_PARTITIONS_ERROR eng "Too many partitions were defined" swe "För många partitioner definierades" @@ -5671,30 +5671,36 @@ ER_CANT_CREATE_HANDLER_FILE ER_BLOB_FIELD_IN_PART_FUNC_ERROR eng "A BLOB field is not allowed in partition function" swe "Ett BLOB-fält är inte tillåtet i partitioneringsfunktioner" -ER_CHAR_SET_IN_PART_FIELD_ERROR - eng "VARCHAR only allowed if binary collation for partition functions" - swe "VARCHAR endast tillåten med binär collation för partitioneringsfunktion" ER_UNIQUE_KEY_NEED_ALL_FIELDS_IN_PF - eng "A %s need to include all fields in the partition function" - swe "En %s behöver inkludera alla fält i partitioneringsfunktionen för denna lagringsmotor" + eng "A %-.64s need to include all fields in the partition function" + swe "En %-.64s behöver inkludera alla fält i partitioneringsfunktionen för denna lagringsmotor" ER_NO_PARTS_ERROR - eng "Number of %s = 0 is not an allowed value" - swe "Antal %s = 0 är inte ett tillåten värde" + eng "Number of %-.64s = 0 is not an allowed value" + swe "Antal %-.64s = 0 är inte ett tillåten värde" ER_PARTITION_MGMT_ON_NONPARTITIONED eng "Partition management on a not partitioned table is not possible" swe "Partitioneringskommando på en opartitionerad tabell är inte möjligt" +ER_FOREIGN_KEY_ON_PARTITIONED + eng "Foreign key condition is not yet supported in conjunction with partitioning" + swe "Foreign key villkor är inte ännu implementerad i kombination med partitionering" ER_DROP_PARTITION_NON_EXISTENT - eng "Error in list of partitions to change" - swe "Fel i listan av partitioner att förändra" + eng "Error in list of partitions to %-.64s" + swe "Fel i listan av partitioner att %-.64s" ER_DROP_LAST_PARTITION eng "Cannot remove all partitions, use DROP TABLE instead" swe "Det är inte tillåtet att ta bort alla partitioner, använd DROP TABLE istället" ER_COALESCE_ONLY_ON_HASH_PARTITION eng "COALESCE PARTITION can only be used on HASH/KEY partitions" swe "COALESCE PARTITION kan bara användas på HASH/KEY partitioner" +ER_REORG_HASH_ONLY_ON_SAME_NO + eng "REORGANISE PARTITION can only be used to reorganise partitions not to change their numbers" + swe "REORGANISE PARTITION kan bara användas för att omorganisera partitioner, inte för att ändra deras antal" +ER_REORG_NO_PARAM_ERROR + eng "REORGANISE PARTITION without parameters can only be used on auto-partitioned tables using HASH PARTITIONs" + swe "REORGANISE PARTITION utan parametrar kan bara användas på auto-partitionerade tabeller som använder HASH partitionering" ER_ONLY_ON_RANGE_LIST_PARTITION - eng "%s PARTITION can only be used on RANGE/LIST partitions" - swe "%s PARTITION kan bara användas på RANGE/LIST-partitioner" + eng "%-.64s PARTITION can only be used on RANGE/LIST partitions" + swe "%-.64s PARTITION kan bara användas på RANGE/LIST-partitioner" ER_ADD_PARTITION_SUBPART_ERROR eng "Trying to Add partition(s) with wrong number of subpartitions" swe "ADD PARTITION med fel antal subpartitioner" @@ -5708,25 +5714,31 @@ ER_REORG_PARTITION_NOT_EXIST eng "More partitions to reorganise than there are partitions" swe "Fler partitioner att reorganisera än det finns partitioner" ER_SAME_NAME_PARTITION - eng "All partitions must have unique names in the table" - swe "Alla partitioner i tabellen måste ha unika namn" + eng "Duplicate partition name %-.64s" + swe "Duplicerat partitionsnamn %-.64s" +ER_NO_BINLOG_ERROR + eng "It is not allowed to shut off binlog on this command" + swe "Det är inte tillåtet att stänga av binlog på detta kommando" ER_CONSECUTIVE_REORG_PARTITIONS eng "When reorganising a set of partitions they must be in consecutive order" swe "När ett antal partitioner omorganiseras måste de vara i konsekutiv ordning" ER_REORG_OUTSIDE_RANGE - eng "The new partitions cover a bigger range then the reorganised partitions do" - swe "De nya partitionerna täcker ett större intervall än de omorganiserade partitionerna" -ER_DROP_PARTITION_FAILURE - eng "Drop partition not supported in this version for this handler" -ER_DROP_PARTITION_WHEN_FK_DEFINED - eng "Cannot drop a partition when a foreign key constraint is defined on the table" - swe "Kan inte ta bort en partition när en främmande nyckel är definierad på tabellen" + eng "Reorganize of range partitions cannot change total ranges except for last partition where it can extend the range" + swe "Reorganisering av rangepartitioner kan inte ändra den totala intervallet utom för den sista partitionen där intervallet kan utökas" +ER_PARTITION_FUNCTION_FAILURE + eng "Partition function not supported in this version for this handler" +ER_PART_STATE_ERROR + eng "Partition state cannot be defined from CREATE/ALTER TABLE" + swe "Partition state kan inte definieras från CREATE/ALTER TABLE" +ER_LIMITED_PART_RANGE + eng "The %-.64s handler only supports 32 bit integers in VALUES" + swe "%-.64s stödjer endast 32 bitar i integers i VALUES" ER_PLUGIN_IS_NOT_LOADED eng "Plugin '%-.64s' is not loaded" ER_WRONG_VALUE eng "Incorrect %-.32s value: '%-.128s'" ER_NO_PARTITION_FOR_GIVEN_VALUE - eng "Table has no partition for value %ld" + eng "Table has no partition for value %-.64s" ER_TABLESPACE_OPTION_ONLY_ONCE eng "It is not allowed to specify %s more than once" ER_CREATE_TABLESPACE_FAILED @@ -5782,3 +5794,7 @@ ER_EVENT_DATA_TOO_LONG ER_DROP_INDEX_FK eng "Cannot drop index '%-.64s': needed in a foreign key constraint" ger "Kann Index '%-.64s' nicht löschen: wird für einen einen Fremdschlüssel benötigt" +ER_CANT_WRITE_LOCK_LOG_TABLE + eng "You can't write-lock a log table. Only read access is possible." +ER_CANT_READ_LOCK_LOG_TABLE + eng "You can't use usual read lock with log tables. Try READ LOCAL instead." diff --git a/sql/slave.cc b/sql/slave.cc index 41a13f2f5c5..edca614159a 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -4354,8 +4354,8 @@ replication resumed in log '%s' at position %s", mi->user, else { change_rpl_status(RPL_IDLE_SLAVE,RPL_ACTIVE_SLAVE); - mysql_log.write(thd, COM_CONNECT_OUT, "%s@%s:%d", - mi->user, mi->host, mi->port); + general_log_print(thd, COM_CONNECT_OUT, "%s@%s:%d", + mi->user, mi->host, mi->port); } #ifdef SIGNAL_WITH_VIO_CLOSE thd->set_active_vio(mysql->net.vio); diff --git a/sql/sql_base.cc b/sql/sql_base.cc index d502562ec7c..44b3a22ec52 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -835,7 +835,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, bool found=0; for (TABLE_LIST *table= tables; table; table= table->next_local) { - if (remove_table_from_cache(thd, table->db, table->table_name, + if ((!table->table || !table->table->s->log_table) && + remove_table_from_cache(thd, table->db, table->table_name, RTFC_OWNED_BY_THD_FLAG)) found=1; } @@ -869,7 +870,8 @@ bool close_cached_tables(THD *thd, bool if_wait_for_refresh, for (uint idx=0 ; idx < open_cache.records ; idx++) { TABLE *table=(TABLE*) hash_element(&open_cache,idx); - if ((table->s->version) < refresh_version && table->db_stat) + if (!table->s->log_table && + ((table->s->version) < refresh_version && table->db_stat)) { found=1; pthread_cond_wait(&COND_refresh,&LOCK_open); @@ -1852,7 +1854,7 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, if (!thd->open_tables) thd->version=refresh_version; else if ((thd->version != refresh_version) && - ! (flags & MYSQL_LOCK_IGNORE_FLUSH)) + ! (flags & MYSQL_LOCK_IGNORE_FLUSH) && !table->s->log_table) { /* Someone did a refresh while thread was opening tables */ if (refresh) @@ -1873,7 +1875,11 @@ TABLE *open_table(THD *thd, TABLE_LIST *table_list, MEM_ROOT *mem_root, { if (table->s->version != refresh_version) { - if (flags & MYSQL_LOCK_IGNORE_FLUSH) + /* + Don't close tables if we are working with a log table or were + asked not to close the table explicitly + */ + if (flags & MYSQL_LOCK_IGNORE_FLUSH || table->s->log_table) { /* Force close at once after usage */ thd->version= table->s->version; @@ -2218,7 +2224,7 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks, { if (abort_locks) { - mysql_lock_abort(thd,table); // Close waiting threads + mysql_lock_abort(thd,table, TRUE); // Close waiting threads mysql_lock_remove(thd, thd->locked_tables,table); table->locked_by_flush=1; // Will be reopened with locks } @@ -2236,6 +2242,10 @@ void close_old_data_files(THD *thd, TABLE *table, bool abort_locks, Wait until all threads has closed the tables in the list We have also to wait if there is thread that has a lock on this table even if the table is closed + NOTE: log tables are handled differently by the logging routines. + E.g. general_log is always opened and locked by the logger + and the table handler used by the logger, will be skipped by + this check. */ bool table_is_used(TABLE *table, bool wait_for_name_lock) @@ -2254,9 +2264,10 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock) search= (TABLE*) hash_next(&open_cache, (byte*) key, key_length, &state)) { - DBUG_PRINT("info", ("share: 0x%lx locked_by_flush: %d " - "locked_by_name: %d db_stat: %u version: %u", - (ulong) search->s, + DBUG_PRINT("info", ("share: 0x%lx locked_by_logger: %d " + "locked_by_flush: %d locked_by_name: %d " + "db_stat: %u version: %u", + (ulong) search->s, search->locked_by_logger, search->locked_by_flush, search->locked_by_name, search->db_stat, search->s->version)); @@ -2267,12 +2278,15 @@ bool table_is_used(TABLE *table, bool wait_for_name_lock) - There is an name lock on it (Table is to be deleted or altered) - If we are in flush table and we didn't execute the flush - If the table engine is open and it's an old version - (We must wait until all engines are shut down to use the table) + (We must wait until all engines are shut down to use the table) + However we fo not wait if we encountered a table, locked by the logger. + Log tables are managed separately by logging routines. */ - if (search->locked_by_name && wait_for_name_lock || - search->locked_by_flush || - (search->db_stat && search->s->version < refresh_version)) - return 1; + if (!search->locked_by_logger && + (search->locked_by_name && wait_for_name_lock || + search->locked_by_flush || + (search->db_stat && search->s->version < refresh_version))) + return 1; } } while ((table=table->next)); DBUG_RETURN(0); @@ -2361,7 +2375,7 @@ void abort_locked_tables(THD *thd,const char *db, const char *table_name) if (!strcmp(table->s->table_name.str, table_name) && !strcmp(table->s->db.str, db)) { - mysql_lock_abort(thd,table); + mysql_lock_abort(thd,table, TRUE); break; } } @@ -2473,7 +2487,7 @@ retry: HA_TRY_READ_ONLY), (READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD), - thd->open_options, entry))) + thd->open_options, entry, FALSE))) { if (error == 7) // Table def changed { @@ -2537,7 +2551,7 @@ retry: HA_TRY_READ_ONLY), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, ha_open_options | HA_OPEN_FOR_REPAIR, - entry) || ! entry->file || + entry, FALSE) || ! entry->file || (entry->file->is_crashed() && entry->file->check_and_repair(thd))) { /* Give right error message */ @@ -3366,7 +3380,7 @@ TABLE *open_temporary_table(THD *thd, const char *path, const char *db, HA_GET_INDEX), READ_KEYINFO | COMPUTE_TYPES | EXTRA_RECORD, ha_open_options, - tmp_table)) + tmp_table, FALSE)) { /* No need to lock share->mutex as this is not needed for tmp tables */ free_table_share(share); @@ -5867,6 +5881,7 @@ bool remove_table_from_cache(THD *thd, const char *db, const char *table_name, &state)) { THD *in_use; + table->s->version=0L; /* Free when thread is ready */ if (!(in_use=table->in_use)) { @@ -6069,3 +6084,155 @@ bool is_equal(const LEX_STRING *a, const LEX_STRING *b) { return a->length == b->length && !strncmp(a->str, b->str, a->length); } + + +/* + SYNOPSIS + abort_and_upgrade_lock() + lpt Parameter passing struct + All parameters passed through the ALTER_PARTITION_PARAM_TYPE object + RETURN VALUES + TRUE Failure + FALSE Success + DESCRIPTION + Remember old lock level (for possible downgrade later on), abort all + waiting threads and ensure that all keeping locks currently are + completed such that we own the lock exclusively and no other interaction + is ongoing. + + thd Thread object + table Table object + db Database name + table_name Table name + old_lock_level Old lock level +*/ + +bool abort_and_upgrade_lock(ALTER_PARTITION_PARAM_TYPE *lpt) +{ + uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG; + int error= FALSE; + DBUG_ENTER("abort_and_upgrade_locks"); + + lpt->old_lock_type= lpt->table->reginfo.lock_type; + VOID(pthread_mutex_lock(&LOCK_open)); + mysql_lock_abort(lpt->thd, lpt->table, TRUE); + VOID(remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, flags)); + if (lpt->thd->killed) + { + lpt->thd->no_warnings_for_error= 0; + error= TRUE; + } + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(error); +} + + +/* + SYNOPSIS + close_open_tables_and_downgrade() + RESULT VALUES + NONE + DESCRIPTION + We need to ensure that any thread that has managed to open the table + but not yet encountered our lock on the table is also thrown out to + ensure that no threads see our frm changes premature to the final + version. The intermediate versions are only meant for use after a + crash and later REPAIR TABLE. + We also downgrade locks after the upgrade to WRITE_ONLY +*/ + +void close_open_tables_and_downgrade(ALTER_PARTITION_PARAM_TYPE *lpt) +{ + VOID(pthread_mutex_lock(&LOCK_open)); + remove_table_from_cache(lpt->thd, lpt->db, lpt->table_name, + RTFC_WAIT_OTHER_THREAD_FLAG); + VOID(pthread_mutex_unlock(&LOCK_open)); + mysql_lock_downgrade_write(lpt->thd, lpt->table, lpt->old_lock_type); +} + + +/* + SYNOPSIS + mysql_wait_completed_table() + lpt Parameter passing struct + my_table My table object + All parameters passed through the ALTER_PARTITION_PARAM object + RETURN VALUES + TRUE Failure + FALSE Success + DESCRIPTION + We have changed the frm file and now we want to wait for all users of + the old frm to complete before proceeding to ensure that no one + remains that uses the old frm definition. + Start by ensuring that all users of the table will be removed from cache + once they are done. Then abort all that have stumbled on locks and + haven't been started yet. + + thd Thread object + table Table object + db Database name + table_name Table name +*/ + +void mysql_wait_completed_table(ALTER_PARTITION_PARAM_TYPE *lpt, TABLE *my_table) +{ + char key[MAX_DBKEY_LENGTH]; + uint key_length; + TABLE *table; + DBUG_ENTER("mysql_wait_completed_table"); + + key_length=(uint) (strmov(strmov(key,lpt->db)+1,lpt->table_name)-key)+1; + VOID(pthread_mutex_lock(&LOCK_open)); + HASH_SEARCH_STATE state; + for (table= (TABLE*) hash_first(&open_cache,(byte*) key,key_length, + &state) ; + table; + table= (TABLE*) hash_next(&open_cache,(byte*) key,key_length, + &state)) + { + THD *in_use= table->in_use; + table->s->version= 0L; + if (!in_use) + { + relink_unused(table); + } + else + { + /* Kill delayed insert threads */ + if ((in_use->system_thread & SYSTEM_THREAD_DELAYED_INSERT) && + ! in_use->killed) + { + in_use->killed= THD::KILL_CONNECTION; + pthread_mutex_lock(&in_use->mysys_var->mutex); + if (in_use->mysys_var->current_cond) + { + pthread_mutex_lock(in_use->mysys_var->current_mutex); + pthread_cond_broadcast(in_use->mysys_var->current_cond); + pthread_mutex_unlock(in_use->mysys_var->current_mutex); + } + pthread_mutex_unlock(&in_use->mysys_var->mutex); + } + /* + Now we must abort all tables locks used by this thread + as the thread may be waiting to get a lock for another table + */ + for (TABLE *thd_table= in_use->open_tables; + thd_table ; + thd_table= thd_table->next) + { + if (thd_table->db_stat) // If table is open + mysql_lock_abort_for_thread(lpt->thd, thd_table); + } + } + } + /* + We start by removing all unused objects from the cache and marking + those in use for removal after completion. Now we also need to abort + all that are locked and are not progressing due to being locked + by our lock. We don't upgrade our lock here. + */ + mysql_lock_abort(lpt->thd, my_table, FALSE); + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_VOID_RETURN; +} + diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 3d5c9ac79b6..195afd5023d 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -167,6 +167,25 @@ Open_tables_state::Open_tables_state(ulong version_arg) reset_open_tables_state(); } +my_bool thd_in_lock_tables(const THD *thd) +{ + return thd->in_lock_tables; +} + + +my_bool thd_tablespace_op(const THD *thd) +{ + return thd->tablespace_op; +} + + +const char *thd_proc_info(THD *thd, const char *info) +{ + const char *old_info= thd->proc_info; + thd->proc_info= info; + return old_info; +} + /* Pass nominal parameters to Statement constructor only to ensure that @@ -658,6 +677,9 @@ void THD::update_charset() charset_is_collation_connection= !String::needs_conversion(0,charset(),variables.collation_connection, ¬_used); + charset_is_character_set_filesystem= + !String::needs_conversion(0, charset(), + variables.character_set_filesystem, ¬_used); } @@ -963,7 +985,9 @@ bool select_send::send_data(List &items) thd->sent_row_count++; if (!thd->vio_ok()) DBUG_RETURN(0); - if (!thd->net.report_error) + if (thd->net.report_error) + protocol->remove_last_row(); + else DBUG_RETURN(protocol->write()); DBUG_RETURN(1); } @@ -2014,10 +2038,8 @@ void THD::reset_sub_statement_state(Sub_statement_state *backup, cuted_fields= 0; transaction.savepoints= 0; -#ifndef EMBEDDED_LIBRARY /* Surpress OK packets in case if we will execute statements */ net.no_send_ok= TRUE; -#endif } diff --git a/sql/sql_class.h b/sql/sql_class.h index cb8c2818a19..00440449be8 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -171,6 +171,7 @@ public: class delayed_insert; class select_result; +class Time_zone; #define THD_SENTRY_MAGIC 0xfeedd1ff #define THD_SENTRY_GONE 0xdeadbeef @@ -258,6 +259,7 @@ struct system_variables my_bool old_passwords; /* Only charset part of these variables is sensible */ + CHARSET_INFO *character_set_filesystem; CHARSET_INFO *character_set_client; CHARSET_INFO *character_set_results; @@ -343,6 +345,8 @@ typedef struct system_status_var #define last_system_status_var com_stmt_close +#ifdef MYSQL_SERVER + void free_tmp_table(THD *thd, TABLE *entry); @@ -353,7 +357,6 @@ void free_tmp_table(THD *thd, TABLE *entry); #define INIT_ARENA_DBUG_INFO #endif - class Query_arena { public: @@ -801,13 +804,16 @@ public: #ifdef EMBEDDED_LIBRARY struct st_mysql *mysql; - struct st_mysql_data *data; unsigned long client_stmt_id; unsigned long client_param_count; struct st_mysql_bind *client_params; char *extra_data; ulong extra_length; - String query_rest; + struct st_mysql_data *cur_data; + struct st_mysql_data *first_data; + struct st_mysql_data **data_tail; + void clear_data_list(); + struct st_mysql_data *alloc_new_dataset(); #endif NET net; // client connection descriptor MEM_ROOT warn_root; // For warnings and errors @@ -1126,6 +1132,7 @@ public: bool query_error, bootstrap, cleanup_done; bool tmp_table_used; bool charset_is_system_charset, charset_is_collation_connection; + bool charset_is_character_set_filesystem; bool enable_slow_log; /* enable slow log for current statement */ bool no_trans_update, abort_on_warning; bool got_warning; /* Set on call to push_warning() */ @@ -1443,6 +1450,11 @@ public: */ virtual void cleanup(); void set_thd(THD *thd_arg) { thd= thd_arg; } +#ifdef EMBEDDED_LIBRARY + virtual void begin_dataset() {} +#else + void begin_dataset() {} +#endif }; @@ -1903,3 +1915,5 @@ public: /* Functions in sql_class.cc */ void add_to_status(STATUS_VAR *to_var, STATUS_VAR *from_var); + +#endif /* MYSQL_SERVER */ diff --git a/sql/sql_cursor.cc b/sql/sql_cursor.cc index 89c160cd70a..33ad27b9d14 100644 --- a/sql/sql_cursor.cc +++ b/sql/sql_cursor.cc @@ -603,6 +603,7 @@ void Materialized_cursor::fetch(ulong num_rows) THD *thd= table->in_use; int res= 0; + result->begin_dataset(); for (fetch_limit+= num_rows; fetch_count < fetch_limit; fetch_count++) { if ((res= table->file->rnd_next(table->record[0]))) diff --git a/sql/sql_db.cc b/sql/sql_db.cc index fa01f98d723..5ffa4fd76ed 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -1158,8 +1158,8 @@ bool mysql_change_db(THD *thd, const char *name, bool no_access_check) sctx->priv_user, sctx->priv_host, dbname); - mysql_log.write(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR), - sctx->priv_user, sctx->priv_host, dbname); + general_log_print(thd, COM_INIT_DB, ER(ER_DBACCESS_DENIED_ERROR), + sctx->priv_user, sctx->priv_host, dbname); my_free(dbname,MYF(0)); DBUG_RETURN(1); } diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 7b30b2d4a3d..7d8f8f12383 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -857,6 +857,8 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) char path[FN_REFLEN]; TABLE *table; bool error; + uint closed_log_tables= 0, lock_logger= 0; + TABLE_LIST *tmp_table_list; uint path_length; DBUG_ENTER("mysql_truncate"); @@ -905,13 +907,36 @@ bool mysql_truncate(THD *thd, TABLE_LIST *table_list, bool dont_send_ok) HTON_CAN_RECREATE) || thd->lex->sphead) goto trunc_by_del; + if (lock_and_wait_for_table_name(thd, table_list)) DBUG_RETURN(TRUE); } - // Remove the .frm extension - // AIX 5.2 64-bit compiler bug (BUG#16155): this crashes, replacement works. - // *(path + path_length - reg_ext_length)= '\0'; + /* close log tables in use */ + if (!my_strcasecmp(system_charset_info, table_list->db, "mysql")) + { + if (!my_strcasecmp(system_charset_info, table_list->table_name, + "general_log")) + { + lock_logger= 1; + logger.lock(); + logger.close_log_table(QUERY_LOG_GENERAL, FALSE); + closed_log_tables= closed_log_tables | QUERY_LOG_GENERAL; + } + else + if (!my_strcasecmp(system_charset_info, table_list->table_name, + "slow_log")) + { + lock_logger= 1; + logger.lock(); + logger.close_log_table(QUERY_LOG_SLOW, FALSE); + closed_log_tables= closed_log_tables | QUERY_LOG_SLOW; + } + } + + // Remove the .frm extension AIX 5.2 64-bit compiler bug (BUG#16155): this + // crashes, replacement works. *(path + path_length - reg_ext_length)= + // '\0'; path[path_length - reg_ext_length] = 0; error= ha_create_table(thd, path, table_list->db, table_list->table_name, &create_info, 1); @@ -937,6 +962,14 @@ end: VOID(pthread_mutex_lock(&LOCK_open)); unlock_table_name(thd, table_list); VOID(pthread_mutex_unlock(&LOCK_open)); + + if (closed_log_tables & QUERY_LOG_SLOW) + logger.reopen_log_table(QUERY_LOG_SLOW); + + if (closed_log_tables & QUERY_LOG_GENERAL) + logger.reopen_log_table(QUERY_LOG_GENERAL); + if (lock_logger) + logger.unlock(); } else if (error) { diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 0c4e08abe26..a9050ddf277 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -179,6 +179,7 @@ void lex_start(THD *thd, const uchar *buf, uint length) lex->query_tables_own_last= 0; lex->escape_used= lex->et_compile_phase= FALSE; + lex->name= 0; lex->et= NULL; if (lex->sroutines.records) diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 669cb7f8d47..b0eba863235 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -29,6 +29,7 @@ class st_alter_tablespace; class partition_info; class event_timed; +#ifdef MYSQL_SERVER /* The following hack is needed because mysql_yacc.cc does not define YYSTYPE before including this file @@ -43,6 +44,7 @@ class event_timed; #include "sql_yacc.h" #define LEX_YYSTYPE YYSTYPE * #endif +#endif /* When a command is added here, be sure it's also added in mysqld.cc @@ -115,6 +117,8 @@ enum enum_sql_command { */ #define DESCRIBE_PARTITIONS 4 +#ifdef MYSQL_SERVER + enum enum_sp_suid_behaviour { SP_IS_DEFAULT_SUID= 0, @@ -665,23 +669,31 @@ public: }; typedef class st_select_lex SELECT_LEX; -#define ALTER_ADD_COLUMN 1 -#define ALTER_DROP_COLUMN 2 -#define ALTER_CHANGE_COLUMN 4 -#define ALTER_ADD_INDEX 8 -#define ALTER_DROP_INDEX 16 -#define ALTER_RENAME 32 -#define ALTER_ORDER 64 -#define ALTER_OPTIONS 128 -#define ALTER_CHANGE_COLUMN_DEFAULT 256 -#define ALTER_KEYS_ONOFF 512 -#define ALTER_CONVERT 1024 -#define ALTER_FORCE 2048 -#define ALTER_RECREATE 4096 -#define ALTER_ADD_PARTITION 8192 -#define ALTER_DROP_PARTITION 16384 -#define ALTER_COALESCE_PARTITION 32768 -#define ALTER_REORGANISE_PARTITION 65536 +#define ALTER_ADD_COLUMN (1L << 0) +#define ALTER_DROP_COLUMN (1L << 1) +#define ALTER_CHANGE_COLUMN (1L << 2) +#define ALTER_ADD_INDEX (1L << 3) +#define ALTER_DROP_INDEX (1L << 4) +#define ALTER_RENAME (1L << 5) +#define ALTER_ORDER (1L << 6) +#define ALTER_OPTIONS (1L << 7) +#define ALTER_CHANGE_COLUMN_DEFAULT (1L << 8) +#define ALTER_KEYS_ONOFF (1L << 9) +#define ALTER_CONVERT (1L << 10) +#define ALTER_FORCE (1L << 11) +#define ALTER_RECREATE (1L << 12) +#define ALTER_ADD_PARTITION (1L << 13) +#define ALTER_DROP_PARTITION (1L << 14) +#define ALTER_COALESCE_PARTITION (1L << 15) +#define ALTER_REORGANIZE_PARTITION (1L << 16) +#define ALTER_PARTITION (1L << 17) +#define ALTER_OPTIMIZE_PARTITION (1L << 18) +#define ALTER_TABLE_REORG (1L << 19) +#define ALTER_REBUILD_PARTITION (1L << 20) +#define ALTER_ALL_PARTITION (1L << 21) +#define ALTER_ANALYZE_PARTITION (1L << 22) +#define ALTER_CHECK_PARTITION (1L << 23) +#define ALTER_REPAIR_PARTITION (1L << 24) typedef struct st_alter_info { @@ -1101,3 +1113,5 @@ extern int yylex(void *arg, void *yythd); extern pthread_key(LEX*,THR_LEX); #define current_lex (current_thd->lex) + +#endif diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index ecde4d01ae1..fff7a32b16b 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -73,14 +73,38 @@ static bool append_file_to_dir(THD *thd, const char **filename_ptr, const char *any_db="*any*"; // Special symbol for check_access -const char *command_name[]={ - "Sleep", "Quit", "Init DB", "Query", "Field List", "Create DB", - "Drop DB", "Refresh", "Shutdown", "Statistics", "Processlist", - "Connect","Kill","Debug","Ping","Time","Delayed insert","Change user", - "Binlog Dump","Table Dump", "Connect Out", "Register Slave", - "Prepare", "Execute", "Long Data", "Close stmt", - "Reset stmt", "Set option", "Fetch", "Daemon", - "Error" // Last command number +LEX_STRING command_name[]={ + (char *)STRING_WITH_LEN("Sleep"), + (char *)STRING_WITH_LEN("Quit"), + (char *)STRING_WITH_LEN("Init DB"), + (char *)STRING_WITH_LEN("Query"), + (char *)STRING_WITH_LEN("Field List"), + (char *)STRING_WITH_LEN("Create DB"), + (char *)STRING_WITH_LEN("Drop DB"), + (char *)STRING_WITH_LEN("Refresh"), + (char *)STRING_WITH_LEN("Shutdown"), + (char *)STRING_WITH_LEN("Statistics"), + (char *)STRING_WITH_LEN("Processlist"), + (char *)STRING_WITH_LEN("Connect"), + (char *)STRING_WITH_LEN("Kill"), + (char *)STRING_WITH_LEN("Debug"), + (char *)STRING_WITH_LEN("Ping"), + (char *)STRING_WITH_LEN("Time"), + (char *)STRING_WITH_LEN("Delayed insert"), + (char *)STRING_WITH_LEN("Change user"), + (char *)STRING_WITH_LEN("Binlog Dump"), + (char *)STRING_WITH_LEN("Table Dump"), + (char *)STRING_WITH_LEN("Connect Out"), + (char *)STRING_WITH_LEN("Register Slave"), + (char *)STRING_WITH_LEN("Prepare"), + (char *)STRING_WITH_LEN("Execute"), + (char *)STRING_WITH_LEN("Long Data"), + (char *)STRING_WITH_LEN("Close stmt"), + (char *)STRING_WITH_LEN("Reset stmt"), + (char *)STRING_WITH_LEN("Set option"), + (char *)STRING_WITH_LEN("Fetch"), + (char *)STRING_WITH_LEN("Daemon"), + (char *)STRING_WITH_LEN("Error") // Last command number }; const char *xa_state_names[]={ @@ -322,7 +346,7 @@ int check_user(THD *thd, enum enum_server_command command, if (opt_secure_auth_local && passwd_len == SCRAMBLE_LENGTH_323) { net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE); - mysql_log.write(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE)); + general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE)); DBUG_RETURN(-1); } if (passwd_len != 0 && @@ -356,9 +380,9 @@ int check_user(THD *thd, enum enum_server_command command, net_printf_error(thd, ER_SERVER_IS_IN_SECURE_AUTH_MODE, thd->main_security_ctx.user, thd->main_security_ctx.host_or_ip); - mysql_log.write(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE), - thd->main_security_ctx.user, - thd->main_security_ctx.host_or_ip); + general_log_print(thd, COM_CONNECT, ER(ER_SERVER_IS_IN_SECURE_AUTH_MODE), + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip); DBUG_RETURN(-1); } /* We have to read very specific packet size */ @@ -406,14 +430,14 @@ int check_user(THD *thd, enum enum_server_command command, } /* Why logging is performed before all checks've passed? */ - mysql_log.write(thd, command, - (thd->main_security_ctx.priv_user == - thd->main_security_ctx.user ? - (char*) "%s@%s on %s" : - (char*) "%s@%s as anonymous on %s"), - thd->main_security_ctx.user, - thd->main_security_ctx.host_or_ip, - db ? db : (char*) ""); + general_log_print(thd, command, + (thd->main_security_ctx.priv_user == + thd->main_security_ctx.user ? + (char*) "%s@%s on %s" : + (char*) "%s@%s as anonymous on %s"), + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip, + db ? db : (char*) ""); /* This is the default access rights for the current database. It's @@ -460,17 +484,17 @@ int check_user(THD *thd, enum enum_server_command command, else if (res == 2) // client gave short hash, server has long hash { net_printf_error(thd, ER_NOT_SUPPORTED_AUTH_MODE); - mysql_log.write(thd,COM_CONNECT,ER(ER_NOT_SUPPORTED_AUTH_MODE)); + general_log_print(thd, COM_CONNECT, ER(ER_NOT_SUPPORTED_AUTH_MODE)); DBUG_RETURN(-1); } net_printf_error(thd, ER_ACCESS_DENIED_ERROR, thd->main_security_ctx.user, thd->main_security_ctx.host_or_ip, passwd_len ? ER(ER_YES) : ER(ER_NO)); - mysql_log.write(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR), - thd->main_security_ctx.user, - thd->main_security_ctx.host_or_ip, - passwd_len ? ER(ER_YES) : ER(ER_NO)); + general_log_print(thd, COM_CONNECT, ER(ER_ACCESS_DENIED_ERROR), + thd->main_security_ctx.user, + thd->main_security_ctx.host_or_ip, + passwd_len ? ER(ER_YES) : ER(ER_NO)); DBUG_RETURN(-1); #endif /* NO_EMBEDDED_ACCESS_CHECKS */ } @@ -1570,7 +1594,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, packet, strlen(packet), thd->charset()); if (!mysql_change_db(thd, tmp.str, FALSE)) { - mysql_log.write(thd,command,"%s",thd->db); + general_log_print(thd, command, "%s",thd->db); send_ok(thd); } break; @@ -1703,7 +1727,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (alloc_query(thd, packet, packet_length)) break; // fatal error is set char *packet_end= thd->query + thd->query_length; - mysql_log.write(thd,command,"%s",thd->query); + general_log_print(thd, command, "%s", thd->query); DBUG_PRINT("query",("%-.4096s",thd->query)); if (!(specialflag & SPECIAL_NO_PRIOR)) @@ -1717,13 +1741,10 @@ bool dispatch_command(enum enum_server_command command, THD *thd, net->no_send_error= 0; /* Multiple queries exits, execute them individually - in embedded server - just store them to be executed later */ -#ifndef EMBEDDED_LIBRARY if (thd->lock || thd->open_tables || thd->derived_tables || thd->prelocked_mode) close_thread_tables(thd); -#endif ulong length= (ulong)(packet_end-packet); log_slow_statement(thd); @@ -1741,25 +1762,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->set_time(); /* Reset the query start time. */ /* TODO: set thd->lex->sql_command to SQLCOM_END here */ VOID(pthread_mutex_unlock(&LOCK_thread_count)); -#ifndef EMBEDDED_LIBRARY mysql_parse(thd, packet, length); -#else - /* - 'packet' can point inside the query_rest's buffer - so we have to do memmove here - */ - if (thd->query_rest.length() > length) - { - memmove(thd->query_rest.c_ptr(), packet, length); - thd->query_rest.length(length); - } - else - thd->query_rest.copy(packet, length, thd->query_rest.charset()); - - thd->server_status&= ~ (SERVER_QUERY_NO_INDEX_USED | - SERVER_QUERY_NO_GOOD_INDEX_USED); - break; -#endif /*EMBEDDED_LIBRARY*/ } if (!(specialflag & SPECIAL_NO_PRIOR)) @@ -1812,7 +1815,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, thd->query_length= strlen(packet); // for simplicity: don't optimize if (!(thd->query=fields=thd->memdup(packet,thd->query_length+1))) break; - mysql_log.write(thd,command,"%s %s",table_list.table_name, fields); + general_log_print(thd, command, "%s %s", table_list.table_name, fields); if (lower_case_table_names) my_casedn_str(files_charset_info, table_list.table_name); remove_escape(table_list.table_name); // This can't have wildcards @@ -1841,7 +1844,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #endif case COM_QUIT: /* We don't calculate statistics for this command */ - mysql_log.write(thd,command,NullS); + general_log_print(thd, command, NullS); net->error=0; // Don't give 'abort' message error=TRUE; // End server break; @@ -1861,7 +1864,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, } if (check_access(thd,CREATE_ACL,db,0,1,0,is_schema_db(db))) break; - mysql_log.write(thd,command,packet); + general_log_print(thd, command, packet); bzero(&create_info, sizeof(create_info)); mysql_create_db(thd, (lower_case_table_names == 2 ? alias : db), &create_info, 0); @@ -1886,7 +1889,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, ER(ER_LOCK_OR_ACTIVE_TRANSACTION), MYF(0)); break; } - mysql_log.write(thd,command,db); + general_log_print(thd, command, db); mysql_rm_db(thd, db, 0, 0); break; } @@ -1910,7 +1913,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, kill_zombie_dump_threads(slave_server_id); thd->server_id = slave_server_id; - mysql_log.write(thd, command, "Log: '%s' Pos: %ld", packet+10, + general_log_print(thd, command, "Log: '%s' Pos: %ld", packet+10, (long) pos); mysql_binlog_send(thd, thd->strdup(packet + 10), (my_off_t) pos, flags); unregister_slave(thd,1,1); @@ -1928,7 +1931,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, ulong options= (ulong) (uchar) packet[0]; if (check_global_access(thd,RELOAD_ACL)) break; - mysql_log.write(thd,command,NullS); + general_log_print(thd, command, NullS); if (!reload_acl_and_cache(thd, options, (TABLE_LIST*) 0, ¬_used)) send_ok(thd); break; @@ -1956,7 +1959,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; } DBUG_PRINT("quit",("Got shutdown command for level %u", level)); - mysql_log.write(thd,command,NullS); + general_log_print(thd, command, NullS); send_eof(thd); #ifdef __WIN__ sleep(1); // must wait after eof() @@ -1973,7 +1976,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, #endif case COM_STATISTICS: { - mysql_log.write(thd,command,NullS); + general_log_print(thd, command, NullS); statistic_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS], &LOCK_status); #ifndef EMBEDDED_LIBRARY @@ -2013,7 +2016,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (!thd->security_ctx->priv_user[0] && check_global_access(thd, PROCESS_ACL)) break; - mysql_log.write(thd,command,NullS); + general_log_print(thd, command, NullS); mysqld_list_processes(thd, thd->security_ctx->master_access & PROCESS_ACL ? NullS : thd->security_ctx->priv_user, 0); @@ -2050,7 +2053,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, if (check_global_access(thd, SUPER_ACL)) break; /* purecov: inspected */ mysql_print_status(); - mysql_log.write(thd,command,NullS); + general_log_print(thd, command, NullS); send_eof(thd); break; case COM_SLEEP: @@ -2132,7 +2135,7 @@ void log_slow_statement(THD *thd) (specialflag & SPECIAL_LOG_QUERIES_NOT_USING_INDEXES))) { thd->status_var.long_query_count++; - mysql_slow_log.write(thd, thd->query, thd->query_length, start_of_query); + slow_log_print(thd, thd->query, thd->query_length, start_of_query); } } } @@ -4301,10 +4304,8 @@ end_with_restore_list: goto error; } -#ifndef EMBEDDED_LIBRARY my_bool nsok= thd->net.no_send_ok; thd->net.no_send_ok= TRUE; -#endif if (sp->m_flags & sp_head::MULTI_RESULTS) { if (! (thd->client_capabilities & CLIENT_MULTI_RESULTS)) @@ -4314,9 +4315,7 @@ end_with_restore_list: back */ my_error(ER_SP_BADSELECT, MYF(0), sp->m_qname.str); -#ifndef EMBEDDED_LIBRARY thd->net.no_send_ok= nsok; -#endif goto error; } /* @@ -4333,18 +4332,14 @@ end_with_restore_list: sp->m_db.str, sp->m_name.str, TRUE, 0) || sp_change_security_context(thd, sp, &save_ctx)) { -#ifndef EMBEDDED_LIBRARY thd->net.no_send_ok= nsok; -#endif goto error; } if (save_ctx && check_routine_access(thd, EXECUTE_ACL, sp->m_db.str, sp->m_name.str, TRUE, 0)) { -#ifndef EMBEDDED_LIBRARY thd->net.no_send_ok= nsok; -#endif sp_restore_security_context(thd, save_ctx); goto error; } @@ -4376,9 +4371,7 @@ end_with_restore_list: sp_restore_security_context(thd, save_ctx); #endif -#ifndef EMBEDDED_LIBRARY thd->net.no_send_ok= nsok; -#endif thd->server_status&= ~bits_to_be_cleared; if (!res) @@ -6541,7 +6534,8 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, { /* Flush the normal query log, the update log, the binary log, - the slow query log, and the relay log (if it exists). + the slow query log, the relay log (if it exists) and the log + tables. */ /* @@ -6551,14 +6545,16 @@ bool reload_acl_and_cache(THD *thd, ulong options, TABLE_LIST *tables, than it would help them) */ tmp_write_to_binlog= 0; - mysql_log.new_file(1); - mysql_slow_log.new_file(1); mysql_bin_log.rotate_and_purge(RP_FORCE_ROTATE); #ifdef HAVE_REPLICATION pthread_mutex_lock(&LOCK_active_mi); rotate_relay_log(active_mi); pthread_mutex_unlock(&LOCK_active_mi); #endif + + /* flush slow and general logs */ + logger.flush_logs(thd); + if (ha_flush_logs(NULL)) result=1; if (flush_error_log()) diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 20d14f5f196..2d1854d9520 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -62,34 +62,48 @@ static const char *begin_paren_str= "("; static const char *comma_str= ","; static char buff[22]; -bool get_partition_id_list(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_range(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_hash_nosub(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_key_nosub(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_linear_hash_nosub(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_linear_key_nosub(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_range_sub_hash(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_range_sub_key(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_range_sub_linear_hash(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_range_sub_linear_key(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_list_sub_hash(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_list_sub_key(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_list_sub_linear_hash(partition_info *part_info, - uint32 *part_id); -bool get_partition_id_list_sub_linear_key(partition_info *part_info, - uint32 *part_id); +int get_partition_id_list(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_range(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_hash_nosub(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_key_nosub(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_linear_hash_nosub(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_linear_key_nosub(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_range_sub_hash(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_range_sub_key(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_range_sub_linear_hash(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_range_sub_linear_key(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_list_sub_hash(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_list_sub_key(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_list_sub_linear_hash(partition_info *part_info, + uint32 *part_id, + longlong *func_value); +int get_partition_id_list_sub_linear_key(partition_info *part_info, + uint32 *part_id, + longlong *func_value); uint32 get_partition_id_hash_sub(partition_info *part_info); uint32 get_partition_id_key_sub(partition_info *part_info); uint32 get_partition_id_linear_hash_sub(partition_info *part_info); @@ -100,12 +114,15 @@ uint32 get_partition_id_linear_key_sub(partition_info *part_info); /* A routine used by the parser to decide whether we are specifying a full partitioning or if only partitions to add or to split. + SYNOPSIS is_partition_management() lex Reference to the lex object + RETURN VALUE TRUE Yes, it is part of a management partition command FALSE No, not a management partition command + DESCRIPTION This needs to be outside of WITH_PARTITION_STORAGE_ENGINE since it is used from the sql parser that doesn't have any #ifdef's @@ -115,31 +132,34 @@ my_bool is_partition_management(LEX *lex) { return (lex->sql_command == SQLCOM_ALTER_TABLE && (lex->alter_info.flags == ALTER_ADD_PARTITION || - lex->alter_info.flags == ALTER_REORGANISE_PARTITION)); + lex->alter_info.flags == ALTER_REORGANIZE_PARTITION)); } #ifdef WITH_PARTITION_STORAGE_ENGINE /* - A support function to check if a partition name is in a list of strings + A support function to check if a name is in a list of strings + SYNOPSIS - is_partition_in_list() - part_name String searched for - list_part_names A list of names searched in + is_name_in_list() + name String searched for + list_names A list of names searched in + RETURN VALUES TRUE String found FALSE String not found */ -bool is_partition_in_list(char *part_name, - List list_part_names) +bool is_name_in_list(char *name, + List list_names) { - List_iterator part_names_it(list_part_names); - uint no_names= list_part_names.elements; + List_iterator names_it(list_names); + uint no_names= list_names.elements; uint i= 0; + do { - char *list_name= part_names_it++; - if (!(my_strcasecmp(system_charset_info, part_name, list_name))) + char *list_name= names_it++; + if (!(my_strcasecmp(system_charset_info, name, list_name))) return TRUE; } while (++i < no_names); return FALSE; @@ -149,47 +169,99 @@ bool is_partition_in_list(char *part_name, /* A support function to check partition names for duplication in a partitioned table + SYNOPSIS - is_partitions_in_table() + are_partitions_in_table() new_part_info New partition info old_part_info Old partition info + RETURN VALUES TRUE Duplicate names found FALSE Duplicate names not found + DESCRIPTION Can handle that the new and old parts are the same in which case it checks that the list of names in the partitions doesn't contain any duplicated names. */ -bool is_partitions_in_table(partition_info *new_part_info, - partition_info *old_part_info) +char *are_partitions_in_table(partition_info *new_part_info, + partition_info *old_part_info) { - uint no_new_parts= new_part_info->partitions.elements, new_count; - uint no_old_parts= old_part_info->partitions.elements, old_count; + uint no_new_parts= new_part_info->partitions.elements; + uint no_old_parts= old_part_info->partitions.elements; + uint new_count, old_count; List_iterator new_parts_it(new_part_info->partitions); - bool same_part_info= (new_part_info == old_part_info); - DBUG_ENTER("is_partitions_in_table"); + bool is_same_part_info= (new_part_info == old_part_info); + DBUG_ENTER("are_partitions_in_table"); + DBUG_PRINT("enter", ("%u", no_new_parts)); new_count= 0; do { List_iterator old_parts_it(old_part_info->partitions); char *new_name= (new_parts_it++)->partition_name; + DBUG_PRINT("info", ("%s", new_name)); new_count++; old_count= 0; do { char *old_name= (old_parts_it++)->partition_name; old_count++; - if (same_part_info && old_count == new_count) + if (is_same_part_info && old_count == new_count) break; if (!(my_strcasecmp(system_charset_info, old_name, new_name))) { - DBUG_RETURN(TRUE); + DBUG_PRINT("info", ("old_name = %s, not ok", old_name)); + DBUG_RETURN(old_name); } } while (old_count < no_old_parts); } while (new_count < no_new_parts); + DBUG_RETURN(NULL); +} + +/* + Set-up defaults for partitions. + + SYNOPSIS + partition_default_handling() + table Table object + table_name Table name to use when getting no_parts + db_name Database name to use when getting no_parts + part_info Partition info to set up + + RETURN VALUES + TRUE Error + FALSE Success +*/ + +bool partition_default_handling(TABLE *table, partition_info *part_info) +{ + DBUG_ENTER("partition_default_handling"); + + if (part_info->use_default_no_partitions) + { + if (table->file->get_no_parts(table->s->normalized_path.str, + &part_info->no_parts)) + { + DBUG_RETURN(TRUE); + } + } + else if (is_sub_partitioned(part_info) && + part_info->use_default_no_subpartitions) + { + uint no_parts; + if (table->file->get_no_parts(table->s->normalized_path.str, + &no_parts)) + { + DBUG_RETURN(TRUE); + } + DBUG_ASSERT(part_info->no_parts > 0); + part_info->no_subparts= no_parts / part_info->no_parts; + DBUG_ASSERT((no_parts % part_info->no_parts) == 0); + } + set_up_defaults_for_partitioning(part_info, table->file, + (ulonglong)0, (uint)0); DBUG_RETURN(FALSE); } @@ -240,7 +312,7 @@ bool check_reorganise_list(partition_info *new_part_info, break; if (!(my_strcasecmp(system_charset_info, old_name, new_name))) { - if (!is_partition_in_list(old_name, list_part_names)) + if (!is_name_in_list(old_name, list_part_names)) DBUG_RETURN(TRUE); } } while (old_count < no_old_parts); @@ -252,36 +324,35 @@ bool check_reorganise_list(partition_info *new_part_info, /* A useful routine used by update_row for partition handlers to calculate the partition ids of the old and the new record. + SYNOPSIS get_part_for_update() old_data Buffer of old record new_data Buffer of new record rec0 Reference to table->record[0] part_info Reference to partition information - part_field_array A NULL-terminated array of fields for partition - function - old_part_id The returned partition id of old record - new_part_id The returned partition id of new record + out:old_part_id The returned partition id of old record + out:new_part_id The returned partition id of new record + RETURN VALUE 0 Success > 0 Error code - DESCRIPTION - Dependent on whether buf is not record[0] we need to prepare the - fields. Then we call the function pointer get_partition_id to - calculate the partition ids. */ int get_parts_for_update(const byte *old_data, byte *new_data, const byte *rec0, partition_info *part_info, - uint32 *old_part_id, uint32 *new_part_id) + uint32 *old_part_id, uint32 *new_part_id, + longlong *new_func_value) { Field **part_field_array= part_info->full_part_field_array; int error; + longlong old_func_value; DBUG_ENTER("get_parts_for_update"); - DBUG_ASSERT(new_data == rec0); + DBUG_ASSERT(new_data == rec0); set_field_ptr(part_field_array, old_data, rec0); - error= part_info->get_partition_id(part_info, old_part_id); + error= part_info->get_partition_id(part_info, old_part_id, + &old_func_value); set_field_ptr(part_field_array, rec0, old_data); if (unlikely(error)) // Should never happen { @@ -292,7 +363,9 @@ int get_parts_for_update(const byte *old_data, byte *new_data, if (new_data == rec0) #endif { - if (unlikely(error= part_info->get_partition_id(part_info,new_part_id))) + if (unlikely(error= part_info->get_partition_id(part_info, + new_part_id, + new_func_value))) { DBUG_RETURN(error); } @@ -306,7 +379,8 @@ int get_parts_for_update(const byte *old_data, byte *new_data, condition is false in one test situation before pushing the code. */ set_field_ptr(part_field_array, new_data, rec0); - error= part_info->get_partition_id(part_info, new_part_id); + error= part_info->get_partition_id(part_info, new_part_id, + new_func_value); set_field_ptr(part_field_array, rec0, new_data); if (unlikely(error)) { @@ -321,17 +395,18 @@ int get_parts_for_update(const byte *old_data, byte *new_data, /* A useful routine used by delete_row for partition handlers to calculate the partition id. + SYNOPSIS get_part_for_delete() buf Buffer of old record rec0 Reference to table->record[0] part_info Reference to partition information - part_field_array A NULL-terminated array of fields for partition - function - part_id The returned partition id to delete from + out:part_id The returned partition id to delete from + RETURN VALUE 0 Success > 0 Error code + DESCRIPTION Dependent on whether buf is not record[0] we need to prepare the fields. Then we call the function pointer get_partition_id to @@ -342,11 +417,13 @@ int get_part_for_delete(const byte *buf, const byte *rec0, partition_info *part_info, uint32 *part_id) { int error; + longlong func_value; DBUG_ENTER("get_part_for_delete"); if (likely(buf == rec0)) { - if (unlikely((error= part_info->get_partition_id(part_info, part_id)))) + if (unlikely((error= part_info->get_partition_id(part_info, part_id, + &func_value)))) { DBUG_RETURN(error); } @@ -356,7 +433,7 @@ int get_part_for_delete(const byte *buf, const byte *rec0, { Field **part_field_array= part_info->full_part_field_array; set_field_ptr(part_field_array, buf, rec0); - error= part_info->get_partition_id(part_info, part_id); + error= part_info->get_partition_id(part_info, part_id, &func_value); set_field_ptr(part_field_array, rec0, buf); if (unlikely(error)) { @@ -373,12 +450,15 @@ int get_part_for_delete(const byte *buf, const byte *rec0, check what partition a certain value belongs to. At the same time it does also check that the range constants are defined in increasing order and that the expressions are constant integer expressions. + SYNOPSIS check_range_constants() - part_info + part_info Partition info + RETURN VALUE TRUE An error occurred during creation of range constants FALSE Successful creation of range constant mapping + DESCRIPTION This routine is called from check_partition_info to get a quick error before we came too far into the CREATE TABLE process. It is also called @@ -389,8 +469,10 @@ int get_part_for_delete(const byte *buf, const byte *rec0, static bool check_range_constants(partition_info *part_info) { partition_element* part_def; - longlong current_largest_int= LONGLONG_MIN, part_range_value_int; - uint no_parts= part_info->no_parts, i; + longlong current_largest_int= LONGLONG_MIN; + longlong part_range_value_int; + uint no_parts= part_info->no_parts; + uint i; List_iterator it(part_info->partitions); bool result= TRUE; DBUG_ENTER("check_range_constants"); @@ -401,7 +483,7 @@ static bool check_range_constants(partition_info *part_info) (longlong*)sql_alloc(no_parts * sizeof(longlong)); if (unlikely(part_info->range_int_array == NULL)) { - my_error(ER_OUTOFMEMORY, MYF(0), no_parts*sizeof(longlong)); + mem_alloc_error(no_parts * sizeof(longlong)); goto end; } i= 0; @@ -432,10 +514,12 @@ end: /* A support routine for check_list_constants used by qsort to sort the constant list expressions. + SYNOPSIS list_part_cmp() a First list constant to compare with b Second list constant to compare with + RETURN VALUE +1 a > b 0 a == b @@ -444,9 +528,8 @@ end: static int list_part_cmp(const void* a, const void* b) { - longlong a1, b1; - a1= ((LIST_PART_ENTRY*)a)->list_value; - b1= ((LIST_PART_ENTRY*)b)->list_value; + longlong a1= ((LIST_PART_ENTRY*)a)->list_value; + longlong b1= ((LIST_PART_ENTRY*)b)->list_value; if (a1 < b1) return -1; else if (a1 > b1) @@ -461,12 +544,15 @@ static int list_part_cmp(const void* a, const void* b) check what partition a certain value belongs to. At the same time it does also check that there are no duplicates among the list constants and that that the list expressions are constant integer expressions. + SYNOPSIS check_list_constants() - part_info + part_info Partition info + RETURN VALUE TRUE An error occurred during creation of list constants FALSE Successful creation of list constant mapping + DESCRIPTION This routine is called from check_partition_info to get a quick error before we came too far into the CREATE TABLE process. It is also called @@ -476,9 +562,12 @@ static int list_part_cmp(const void* a, const void* b) static bool check_list_constants(partition_info *part_info) { - uint i, no_list_values= 0, no_parts, list_index= 0; + uint i, no_parts; + uint no_list_values= 0; + uint list_index= 0; longlong *list_value; - bool not_first, result= TRUE; + bool not_first; + bool result= TRUE; longlong curr_value, prev_value; partition_element* part_def; List_iterator list_func_it(part_info->partitions); @@ -516,7 +605,7 @@ static bool check_list_constants(partition_info *part_info) (LIST_PART_ENTRY*)sql_alloc(no_list_values*sizeof(LIST_PART_ENTRY)); if (unlikely(part_info->list_array == NULL)) { - my_error(ER_OUTOFMEMORY, MYF(0), no_list_values*sizeof(LIST_PART_ENTRY)); + mem_alloc_error(no_list_values * sizeof(LIST_PART_ENTRY)); goto end; } @@ -560,12 +649,16 @@ end: /* Create a memory area where default partition names are stored and fill it up with the names. + SYNOPSIS create_default_partition_names() no_parts Number of partitions + start_no Starting partition number subpart Is it subpartitions + RETURN VALUE A pointer to the memory area of the default partition names + DESCRIPTION A support routine for the partition code where default values are generated. @@ -575,17 +668,18 @@ end: #define MAX_PART_NAME_SIZE 8 static char *create_default_partition_names(uint no_parts, uint start_no, - bool subpart) + bool is_subpart) { char *ptr= sql_calloc(no_parts*MAX_PART_NAME_SIZE); char *move_ptr= ptr; uint i= 0; DBUG_ENTER("create_default_partition_names"); + if (likely(ptr != 0)) { do { - if (subpart) + if (is_subpart) my_sprintf(move_ptr, (move_ptr,"sp%u", (start_no + i))); else my_sprintf(move_ptr, (move_ptr,"p%u", (start_no + i))); @@ -594,7 +688,7 @@ static char *create_default_partition_names(uint no_parts, uint start_no, } else { - my_error(ER_OUTOFMEMORY, MYF(0), no_parts*MAX_PART_NAME_SIZE); + mem_alloc_error(no_parts*MAX_PART_NAME_SIZE); } DBUG_RETURN(ptr); } @@ -604,14 +698,18 @@ static char *create_default_partition_names(uint no_parts, uint start_no, Set up all the default partitions not set-up by the user in the SQL statement. Also perform a number of checks that the user hasn't tried to use default values where no defaults exists. + SYNOPSIS set_up_default_partitions() part_info The reference to all partition information file A reference to a handler of the table max_rows Maximum number of rows stored in the table + start_no Starting partition number + RETURN VALUE TRUE Error, attempted default values not possible FALSE Ok, default partitions set-up + DESCRIPTION The routine uses the underlying handler of the partitioning to define the default number of partitions. For some handlers this requires @@ -643,7 +741,6 @@ static bool set_up_default_partitions(partition_info *part_info, if (part_info->no_parts == 0) part_info->no_parts= file->get_default_no_partitions(max_rows); no_parts= part_info->no_parts; - part_info->use_default_partitions= FALSE; if (unlikely(no_parts > MAX_PARTITIONS)) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); @@ -657,16 +754,16 @@ static bool set_up_default_partitions(partition_info *part_info, do { partition_element *part_elem= new partition_element(); - if (likely(part_elem != 0)) + if (likely(part_elem != 0 && + (!part_info->partitions.push_back(part_elem)))) { - part_elem->engine_type= NULL; + part_elem->engine_type= part_info->default_engine_type; part_elem->partition_name= default_name; default_name+=MAX_PART_NAME_SIZE; - part_info->partitions.push_back(part_elem); } else { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element)); + mem_alloc_error(sizeof(partition_element)); goto end; } } while (++i < no_parts); @@ -680,14 +777,17 @@ end: Set up all the default subpartitions not set-up by the user in the SQL statement. Also perform a number of checks that the default partitioning becomes an allowed partitioning scheme. + SYNOPSIS set_up_default_subpartitions() part_info The reference to all partition information file A reference to a handler of the table max_rows Maximum number of rows stored in the table + RETURN VALUE TRUE Error, attempted default values not possible FALSE Ok, default partitions set-up + DESCRIPTION The routine uses the underlying handler of the partitioning to define the default number of partitions. For some handlers this requires @@ -711,7 +811,6 @@ static bool set_up_default_subpartitions(partition_info *part_info, part_info->no_subparts= file->get_default_no_partitions(max_rows); no_parts= part_info->no_parts; no_subparts= part_info->no_subparts; - part_info->use_default_subpartitions= FALSE; if (unlikely((no_parts * no_subparts) > MAX_PARTITIONS)) { my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); @@ -729,16 +828,16 @@ static bool set_up_default_subpartitions(partition_info *part_info, do { partition_element *subpart_elem= new partition_element(); - if (likely(subpart_elem != 0)) + if (likely(subpart_elem != 0 && + (!part_elem->subpartitions.push_back(subpart_elem)))) { - subpart_elem->engine_type= NULL; + subpart_elem->engine_type= part_info->default_engine_type; subpart_elem->partition_name= name_ptr; name_ptr+= MAX_PART_NAME_SIZE; - part_elem->subpartitions.push_back(subpart_elem); } else { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element)); + mem_alloc_error(sizeof(partition_element)); goto end; } } while (++j < no_subparts); @@ -750,18 +849,22 @@ end: /* - Set up defaults for partition or subpartition (cannot set-up for both, - this will return an error. + Support routine for check_partition_info + SYNOPSIS set_up_defaults_for_partitioning() part_info The reference to all partition information file A reference to a handler of the table max_rows Maximum number of rows stored in the table + start_no Starting partition number + RETURN VALUE TRUE Error, attempted default values not possible FALSE Ok, default partitions set-up + DESCRIPTION - Support routine for check_partition_info + Set up defaults for partition or subpartition (cannot set-up for both, + this will return an error. */ bool set_up_defaults_for_partitioning(partition_info *part_info, @@ -770,11 +873,15 @@ bool set_up_defaults_for_partitioning(partition_info *part_info, { DBUG_ENTER("set_up_defaults_for_partitioning"); - if (part_info->use_default_partitions) - DBUG_RETURN(set_up_default_partitions(part_info, file, max_rows, - start_no)); - if (is_sub_partitioned(part_info) && part_info->use_default_subpartitions) - DBUG_RETURN(set_up_default_subpartitions(part_info, file, max_rows)); + if (!part_info->default_partitions_setup) + { + part_info->default_partitions_setup= TRUE; + if (part_info->use_default_partitions) + DBUG_RETURN(set_up_default_partitions(part_info, file, max_rows, + start_no)); + if (is_sub_partitioned(part_info) && part_info->use_default_subpartitions) + DBUG_RETURN(set_up_default_subpartitions(part_info, file, max_rows)); + } DBUG_RETURN(FALSE); } @@ -782,21 +889,22 @@ bool set_up_defaults_for_partitioning(partition_info *part_info, /* Check that all partitions use the same storage engine. This is currently a limitation in this version. + SYNOPSIS check_engine_mix() engine_array An array of engine identifiers no_parts Total number of partitions + RETURN VALUE TRUE Error, mixed engines FALSE Ok, no mixed engines + DESCRIPTION + Current check verifies only that all handlers are the same. + Later this check will be more sophisticated. */ static bool check_engine_mix(handlerton **engine_array, uint no_parts) { - /* - Current check verifies only that all handlers are the same. - Later this check will be more sophisticated. - */ uint i= 0; bool result= FALSE; DBUG_ENTER("check_engine_mix"); @@ -814,31 +922,35 @@ static bool check_engine_mix(handlerton **engine_array, uint no_parts) /* - We will check that the partition info requested is possible to set-up in - this version. This routine is an extension of the parser one could say. - If defaults were used we will generate default data structures for all - partitions. + This code is used early in the CREATE TABLE and ALTER TABLE process. + SYNOPSIS check_partition_info() part_info The reference to all partition information - db_type Default storage engine if no engine specified per - partition. file A reference to a handler of the table max_rows Maximum number of rows stored in the table + engine_type Return value for used engine in partitions + RETURN VALUE TRUE Error, something went wrong FALSE Ok, full partition data structures are now generated + DESCRIPTION - This code is used early in the CREATE TABLE and ALTER TABLE process. + We will check that the partition info requested is possible to set-up in + this version. This routine is an extension of the parser one could say. + If defaults were used we will generate default data structures for all + partitions. + */ -bool check_partition_info(partition_info *part_info,handlerton *eng_type, +bool check_partition_info(partition_info *part_info,handlerton **eng_type, handler *file, ulonglong max_rows) { handlerton **engine_array= NULL; - uint part_count= 0, i, no_parts, tot_partitions; + uint part_count= 0; + uint i, no_parts, tot_partitions; bool result= TRUE; - List_iterator part_it(part_info->partitions); + char *same_name; DBUG_ENTER("check_partition_info"); if (unlikely(is_sub_partitioned(part_info) && @@ -858,9 +970,10 @@ bool check_partition_info(partition_info *part_info,handlerton *eng_type, my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); goto end; } - if (unlikely(is_partitions_in_table(part_info, part_info))) + if (((same_name= are_partitions_in_table(part_info, + part_info)))) { - my_error(ER_SAME_NAME_PARTITION, MYF(0)); + my_error(ER_SAME_NAME_PARTITION, MYF(0), same_name); goto end; } engine_array= (handlerton**)my_malloc(tot_partitions * sizeof(handlerton *), @@ -869,36 +982,44 @@ bool check_partition_info(partition_info *part_info,handlerton *eng_type, goto end; i= 0; no_parts= part_info->no_parts; - do { - partition_element *part_elem= part_it++; - if (!is_sub_partitioned(part_info)) + List_iterator part_it(part_info->partitions); + do { - if (part_elem->engine_type == NULL) - part_elem->engine_type= eng_type; - DBUG_PRINT("info", ("engine = %s", part_elem->engine_type->name)); - engine_array[part_count++]= part_elem->engine_type; - } - else - { - uint j= 0, no_subparts= part_info->no_subparts;; - List_iterator sub_it(part_elem->subpartitions); - do + partition_element *part_elem= part_it++; + if (!is_sub_partitioned(part_info)) { - part_elem= sub_it++; if (part_elem->engine_type == NULL) - part_elem->engine_type= eng_type; - DBUG_PRINT("info", ("engine = %s", part_elem->engine_type->name)); + part_elem->engine_type= part_info->default_engine_type; + DBUG_PRINT("info", ("engine = %d", + ha_legacy_type(part_elem->engine_type))); engine_array[part_count++]= part_elem->engine_type; - } while (++j < no_subparts); - } - } while (++i < part_info->no_parts); + } + else + { + uint j= 0, no_subparts= part_info->no_subparts;; + List_iterator sub_it(part_elem->subpartitions); + do + { + part_elem= sub_it++; + if (part_elem->engine_type == NULL) + part_elem->engine_type= part_info->default_engine_type; + DBUG_PRINT("info", ("engine = %u", + ha_legacy_type(part_elem->engine_type))); + engine_array[part_count++]= part_elem->engine_type; + } while (++j < no_subparts); + } + } while (++i < part_info->no_parts); + } if (unlikely(check_engine_mix(engine_array, part_count))) { my_error(ER_MIX_HANDLER_ERROR, MYF(0)); goto end; } + if (eng_type) + *eng_type= (handlerton*)engine_array[0]; + /* We need to check all constant expressions that they are of the correct type and that they are increasing for ranges and not overlapping for @@ -918,51 +1039,54 @@ end: /* - A great number of functions below here is part of the fix_partition_func - method. It is used to set up the partition structures for execution from - openfrm. It is called at the end of the openfrm when the table struct has - been set-up apart from the partition information. - It involves: - 1) Setting arrays of fields for the partition functions. - 2) Setting up binary search array for LIST partitioning - 3) Setting up array for binary search for RANGE partitioning - 4) Setting up key_map's to assist in quick evaluation whether one - can deduce anything from a given index of what partition to use - 5) Checking whether a set of partitions can be derived from a range on - a field in the partition function. - As part of doing this there is also a great number of error controls. - This is actually the place where most of the things are checked for - partition information when creating a table. - Things that are checked includes - 1) No NULLable fields in partition function - 2) All fields of partition function in Primary keys and unique indexes - (if not supported) - 3) No fields in partition function that are BLOB's or VARCHAR with a - collation other than the binary collation. + This method is used to set-up both partition and subpartitioning + field array and used for all types of partitioning. + It is part of the logic around fix_partition_func. - - - Create an array of partition fields (NULL terminated). Before this method - is called fix_fields or find_table_in_sef has been called to set - GET_FIXED_FIELDS_FLAG on all fields that are part of the partition - function. SYNOPSIS set_up_field_array() table TABLE object for which partition fields are set-up sub_part Is the table subpartitioned as well + RETURN VALUE TRUE Error, some field didn't meet requirements FALSE Ok, partition field array set-up + DESCRIPTION - This method is used to set-up both partition and subpartitioning - field array and used for all types of partitioning. - It is part of the logic around fix_partition_func. + + A great number of functions below here is part of the fix_partition_func + method. It is used to set up the partition structures for execution from + openfrm. It is called at the end of the openfrm when the table struct has + been set-up apart from the partition information. + It involves: + 1) Setting arrays of fields for the partition functions. + 2) Setting up binary search array for LIST partitioning + 3) Setting up array for binary search for RANGE partitioning + 4) Setting up key_map's to assist in quick evaluation whether one + can deduce anything from a given index of what partition to use + 5) Checking whether a set of partitions can be derived from a range on + a field in the partition function. + As part of doing this there is also a great number of error controls. + This is actually the place where most of the things are checked for + partition information when creating a table. + Things that are checked includes + 1) All fields of partition function in Primary keys and unique indexes + (if not supported) + + + Create an array of partition fields (NULL terminated). Before this method + is called fix_fields or find_table_in_sef has been called to set + GET_FIXED_FIELDS_FLAG on all fields that are part of the partition + function. */ + static bool set_up_field_array(TABLE *table, - bool sub_part) + bool is_sub_part) { Field **ptr, *field, **field_array; - uint no_fields= 0, size_field_array, i= 0; + uint no_fields= 0; + uint size_field_array; + uint i= 0; partition_info *part_info= table->part_info; int result= FALSE; DBUG_ENTER("set_up_field_array"); @@ -973,11 +1097,19 @@ static bool set_up_field_array(TABLE *table, if (field->flags & GET_FIXED_FIELDS_FLAG) no_fields++; } + if (no_fields == 0) + { + /* + We are using hidden key as partitioning field + */ + DBUG_ASSERT(!is_sub_part); + DBUG_RETURN(result); + } size_field_array= (no_fields+1)*sizeof(Field*); field_array= (Field**)sql_alloc(size_field_array); if (unlikely(!field_array)) { - my_error(ER_OUTOFMEMORY, MYF(0), size_field_array); + mem_alloc_error(size_field_array); result= TRUE; } ptr= table->field; @@ -997,11 +1129,6 @@ static bool set_up_field_array(TABLE *table, 1) Not be a BLOB of any type A BLOB takes too long time to evaluate so we don't want it for performance reasons. - 2) Not be a VARCHAR other than VARCHAR with a binary collation - A VARCHAR with character sets can have several values being - equal with different number of spaces or NULL's. This is not a - good ground for a safe and exact partition function. Thus it is - not allowed in partition functions. */ if (unlikely(field->flags & BLOB_FLAG)) @@ -1009,17 +1136,11 @@ static bool set_up_field_array(TABLE *table, my_error(ER_BLOB_FIELD_IN_PART_FUNC_ERROR, MYF(0)); result= TRUE; } - else if (unlikely((!field->flags & BINARY_FLAG) && - field->real_type() == MYSQL_TYPE_VARCHAR)) - { - my_error(ER_CHAR_SET_IN_PART_FIELD_ERROR, MYF(0)); - result= TRUE; - } } } } field_array[no_fields]= 0; - if (!sub_part) + if (!is_sub_part) { part_info->part_field_array= field_array; part_info->no_part_fields= no_fields; @@ -1036,13 +1157,16 @@ static bool set_up_field_array(TABLE *table, /* Create a field array including all fields of both the partitioning and the subpartitioning functions. + SYNOPSIS create_full_part_field_array() table TABLE object for which partition fields are set-up part_info Reference to partitioning data structure + RETURN VALUE TRUE Memory allocation of field array failed FALSE Ok + DESCRIPTION If there is no subpartitioning then the same array is used as for the partitioning. Otherwise a new array is built up using the flag @@ -1075,7 +1199,7 @@ static bool create_full_part_field_array(TABLE *table, field_array= (Field**)sql_alloc(size_field_array); if (unlikely(!field_array)) { - my_error(ER_OUTOFMEMORY, MYF(0), size_field_array); + mem_alloc_error(size_field_array); result= TRUE; goto end; } @@ -1096,21 +1220,25 @@ end: /* - These support routines is used to set/reset an indicator of all fields - in a certain key. It is used in conjunction with another support routine - that traverse all fields in the PF to find if all or some fields in the - PF is part of the key. This is used to check primary keys and unique - keys involve all fields in PF (unless supported) and to derive the - key_map's used to quickly decide whether the index can be used to - derive which partitions are needed to scan. - - Clear flag GET_FIXED_FIELDS_FLAG in all fields of a key previously set by set_indicator_in_key_fields (always used in pairs). + SYNOPSIS clear_indicator_in_key_fields() key_info Reference to find the key fields + + RETURN VALUE + NONE + + DESCRIPTION + These support routines is used to set/reset an indicator of all fields + in a certain key. It is used in conjunction with another support routine + that traverse all fields in the PF to find if all or some fields in the + PF is part of the key. This is used to check primary keys and unique + keys involve all fields in PF (unless supported) and to derive the + key_map's used to quickly decide whether the index can be used to + derive which partitions are needed to scan. */ static void clear_indicator_in_key_fields(KEY *key_info) @@ -1124,9 +1252,13 @@ static void clear_indicator_in_key_fields(KEY *key_info) /* Set flag GET_FIXED_FIELDS_FLAG in all fields of a key. + SYNOPSIS set_indicator_in_key_fields key_info Reference to find the key fields + + RETURN VALUE + NONE */ static void set_indicator_in_key_fields(KEY *key_info) @@ -1141,11 +1273,13 @@ static void set_indicator_in_key_fields(KEY *key_info) /* Check if all or some fields in partition field array is part of a key previously used to tag key fields. + SYNOPSIS check_fields_in_PF() ptr Partition field array - all_fields Is all fields of partition field array used in key - some_fields Is some fields of partition field array used in key + out:all_fields Is all fields of partition field array used in key + out:some_fields Is some fields of partition field array used in key + RETURN VALUE all_fields, some_fields */ @@ -1154,8 +1288,14 @@ static void check_fields_in_PF(Field **ptr, bool *all_fields, bool *some_fields) { DBUG_ENTER("check_fields_in_PF"); + *all_fields= TRUE; *some_fields= FALSE; + if ((!ptr) || !(*ptr)) + { + *all_fields= FALSE; + DBUG_VOID_RETURN; + } do { /* Check if the field of the PF is part of the current key investigated */ @@ -1171,9 +1311,13 @@ static void check_fields_in_PF(Field **ptr, bool *all_fields, /* Clear flag GET_FIXED_FIELDS_FLAG in all fields of the table. This routine is used for error handling purposes. + SYNOPSIS clear_field_flag() table TABLE object for which partition fields are set-up + + RETURN VALUE + NONE */ static void clear_field_flag(TABLE *table) @@ -1188,35 +1332,42 @@ static void clear_field_flag(TABLE *table) /* - This routine sets-up the partition field array for KEY partitioning, it - also verifies that all fields in the list of fields is actually a part of - the table. + find_field_in_table_sef finds the field given its name. All fields get + GET_FIXED_FIELDS_FLAG set. + SYNOPSIS handle_list_of_fields() it A list of field names for the partition function table TABLE object for which partition fields are set-up part_info Reference to partitioning data structure sub_part Is the table subpartitioned as well + RETURN VALUE TRUE Fields in list of fields not part of table FALSE All fields ok and array created + DESCRIPTION - find_field_in_table_sef finds the field given its name. All fields get - GET_FIXED_FIELDS_FLAG set. + This routine sets-up the partition field array for KEY partitioning, it + also verifies that all fields in the list of fields is actually a part of + the table. + */ + static bool handle_list_of_fields(List_iterator it, TABLE *table, partition_info *part_info, - bool sub_part) + bool is_sub_part) { Field *field; bool result; char *field_name; + bool is_list_empty= TRUE; DBUG_ENTER("handle_list_of_fields"); while ((field_name= it++)) { + is_list_empty= FALSE; field= find_field_in_table_sef(table, field_name); if (likely(field != 0)) field->flags|= GET_FIXED_FIELDS_FLAG; @@ -1228,19 +1379,54 @@ static bool handle_list_of_fields(List_iterator it, goto end; } } - result= set_up_field_array(table, sub_part); + if (is_list_empty) + { + uint primary_key= table->s->primary_key; + if (primary_key != MAX_KEY) + { + uint no_key_parts= table->key_info[primary_key].key_parts, i; + /* + In the case of an empty list we use primary key as partition key. + */ + for (i= 0; i < no_key_parts; i++) + { + Field *field= table->key_info[primary_key].key_part[i].field; + field->flags|= GET_FIXED_FIELDS_FLAG; + } + } + else + { + if (table->s->db_type->partition_flags && + (table->s->db_type->partition_flags() & HA_USE_AUTO_PARTITION) && + (table->s->db_type->partition_flags() & HA_CAN_PARTITION)) + { + /* + This engine can handle automatic partitioning and there is no + primary key. In this case we rely on that the engine handles + partitioning based on a hidden key. Thus we allocate no + array for partitioning fields. + */ + DBUG_RETURN(FALSE); + } + else + { + my_error(ER_FIELD_NOT_FOUND_PART_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + } + } + result= set_up_field_array(table, is_sub_part); end: DBUG_RETURN(result); } /* - This function is used to build an array of partition fields for the - partitioning function and subpartitioning function. The partitioning - function is an item tree that must reference at least one field in the - table. This is checked first in the parser that the function doesn't - contain non-cacheable parts (like a random function) and by checking - here that the function isn't a constant function. + The function uses a new feature in fix_fields where the flag + GET_FIXED_FIELDS_FLAG is set for all fields in the item tree. + This field must always be reset before returning from the function + since it is used for other purposes as well. + SYNOPSIS fix_fields_part_func() thd The thread object @@ -1248,35 +1434,38 @@ end: func_expr The item tree reference of the partition function part_info Reference to partitioning data structure sub_part Is the table subpartitioned as well + RETURN VALUE TRUE An error occurred, something was wrong with the partition function. FALSE Ok, a partition field array was created - DESCRIPTION - The function uses a new feature in fix_fields where the flag - GET_FIXED_FIELDS_FLAG is set for all fields in the item tree. - This field must always be reset before returning from the function - since it is used for other purposes as well. -*/ -static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables, - Item* func_expr, partition_info *part_info, - bool sub_part) -{ - /* + DESCRIPTION + This function is used to build an array of partition fields for the + partitioning function and subpartitioning function. The partitioning + function is an item tree that must reference at least one field in the + table. This is checked first in the parser that the function doesn't + contain non-cacheable parts (like a random function) and by checking + here that the function isn't a constant function. + Calculate the number of fields in the partition function. Use it allocate memory for array of Field pointers. Initialise array of field pointers. Use information set when calling fix_fields and reset it immediately after. The get_fields_in_item_tree activates setting of bit in flags on the field object. - */ +*/ +static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables, + Item* func_expr, partition_info *part_info, + bool is_sub_part) +{ bool result= TRUE; TABLE *table= tables->table; TABLE_LIST *save_table_list, *save_first_table, *save_last_table; int error; Name_resolution_context *context; + const char *save_where; DBUG_ENTER("fix_fields_part_func"); context= thd->lex->current_context(); @@ -1289,6 +1478,7 @@ static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables, context->first_name_resolution_table= tables; context->last_name_resolution_table= NULL; func_expr->walk(&Item::change_context_processor, (byte*) context); + save_where= thd->where; thd->where= "partition function"; error= func_expr->fix_fields(thd, (Item**)0); context->table_list= save_table_list; @@ -1300,13 +1490,14 @@ static bool fix_fields_part_func(THD *thd, TABLE_LIST *tables, clear_field_flag(table); goto end; } + thd->where= save_where; if (unlikely(func_expr->const_item())) { my_error(ER_CONST_EXPR_IN_PARTITION_FUNC_ERROR, MYF(0)); clear_field_flag(table); goto end; } - result= set_up_field_array(table, sub_part); + result= set_up_field_array(table, is_sub_part); end: table->get_fields_in_item_tree= FALSE; table->map= 0; //Restore old value @@ -1315,24 +1506,30 @@ end: /* - This function verifies that if there is a primary key that it contains - all the fields of the partition function. - This is a temporary limitation that will hopefully be removed after a - while. + Check that the primary key contains all partition fields if defined + SYNOPSIS check_primary_key() table TABLE object for which partition fields are set-up + RETURN VALUES TRUE Not all fields in partitioning function was part of primary key FALSE Ok, all fields of partitioning function were part of primary key + + DESCRIPTION + This function verifies that if there is a primary key that it contains + all the fields of the partition function. + This is a temporary limitation that will hopefully be removed after a + while. */ static bool check_primary_key(TABLE *table) { uint primary_key= table->s->primary_key; - bool all_fields, some_fields, result= FALSE; + bool all_fields, some_fields; + bool result= FALSE; DBUG_ENTER("check_primary_key"); if (primary_key < MAX_KEY) @@ -1352,25 +1549,33 @@ static bool check_primary_key(TABLE *table) /* - This function verifies that if there is a unique index that it contains - all the fields of the partition function. - This is a temporary limitation that will hopefully be removed after a - while. + Check that unique keys contains all partition fields + SYNOPSIS check_unique_keys() table TABLE object for which partition fields are set-up + RETURN VALUES TRUE Not all fields in partitioning function was part of all unique keys FALSE Ok, all fields of partitioning function were part of unique keys + + DESCRIPTION + This function verifies that if there is a unique index that it contains + all the fields of the partition function. + This is a temporary limitation that will hopefully be removed after a + while. */ static bool check_unique_keys(TABLE *table) { - bool all_fields, some_fields, result= FALSE; - uint keys= table->s->keys, i; + bool all_fields, some_fields; + bool result= FALSE; + uint keys= table->s->keys; + uint i; DBUG_ENTER("check_unique_keys"); + for (i= 0; i < keys; i++) { if (table->key_info[i].flags & HA_NOSAME) //Unique index @@ -1434,9 +1639,11 @@ static bool check_unique_keys(TABLE *table) indicating this to notify that we can use also ranges on the field of the PF to deduce a set of partitions if the fields of the PF were not all fully bound. + SYNOPSIS check_range_capable_PF() table TABLE object for which partition fields are set-up + DESCRIPTION Support for this is not implemented yet. */ @@ -1444,35 +1651,76 @@ static bool check_unique_keys(TABLE *table) void check_range_capable_PF(TABLE *table) { DBUG_ENTER("check_range_capable_PF"); + DBUG_VOID_RETURN; } +/* + Set up partition bitmap + + SYNOPSIS + set_up_partition_bitmap() + thd Thread object + part_info Reference to partitioning data structure + + RETURN VALUE + TRUE Memory allocation failure + FALSE Success + + DESCRIPTION + Allocate memory for bitmap of the partitioned table + and initialise it. +*/ + +static bool set_up_partition_bitmap(THD *thd, partition_info *part_info) +{ + uint32 *bitmap_buf; + uint bitmap_bits= part_info->no_subparts? + (part_info->no_subparts* part_info->no_parts): + part_info->no_parts; + uint bitmap_bytes= bitmap_buffer_size(bitmap_bits); + DBUG_ENTER("set_up_partition_bitmap"); + + if (!(bitmap_buf= (uint32*)thd->alloc(bitmap_bytes))) + { + mem_alloc_error(bitmap_bytes); + DBUG_RETURN(TRUE); + } + bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE); + DBUG_RETURN(FALSE); +} + + /* Set up partition key maps + SYNOPSIS set_up_partition_key_maps() table TABLE object for which partition fields are set-up part_info Reference to partitioning data structure + RETURN VALUES None + DESCRIPTION - This function sets up a couple of key maps to be able to quickly check - if an index ever can be used to deduce the partition fields or even - a part of the fields of the partition function. - We set up the following key_map's. - PF = Partition Function - 1) All fields of the PF is set even by equal on the first fields in the - key - 2) All fields of the PF is set if all fields of the key is set - 3) At least one field in the PF is set if all fields is set - 4) At least one field in the PF is part of the key + This function sets up a couple of key maps to be able to quickly check + if an index ever can be used to deduce the partition fields or even + a part of the fields of the partition function. + We set up the following key_map's. + PF = Partition Function + 1) All fields of the PF is set even by equal on the first fields in the + key + 2) All fields of the PF is set if all fields of the key is set + 3) At least one field in the PF is set if all fields is set + 4) At least one field in the PF is part of the key */ static void set_up_partition_key_maps(TABLE *table, partition_info *part_info) { - uint keys= table->s->keys, i; + uint keys= table->s->keys; + uint i; bool all_fields, some_fields; DBUG_ENTER("set_up_partition_key_maps"); @@ -1507,17 +1755,26 @@ static void set_up_partition_key_maps(TABLE *table, /* - Set-up all function pointers for calculation of partition id, - subpartition id and the upper part in subpartitioning. This is to speed up - execution of get_partition_id which is executed once every record to be - written and deleted and twice for updates. + Set up function pointers for partition function + SYNOPSIS - set_up_partition_function_pointers() + set_up_partition_func_pointers() part_info Reference to partitioning data structure + + RETURN VALUE + NONE + + DESCRIPTION + Set-up all function pointers for calculation of partition id, + subpartition id and the upper part in subpartitioning. This is to speed up + execution of get_partition_id which is executed once every record to be + written and deleted and twice for updates. */ static void set_up_partition_func_pointers(partition_info *part_info) { + DBUG_ENTER("set_up_partition_func_pointers"); + if (is_sub_partitioned(part_info)) { if (part_info->part_type == RANGE_PARTITION) @@ -1550,7 +1807,7 @@ static void set_up_partition_func_pointers(partition_info *part_info) } } } - else //LIST Partitioning + else /* LIST Partitioning */ { part_info->get_part_partition_id= get_partition_id_list; if (part_info->list_of_subpart_fields) @@ -1581,7 +1838,7 @@ static void set_up_partition_func_pointers(partition_info *part_info) } } } - else //No subpartitioning + else /* No subpartitioning */ { part_info->get_part_partition_id= NULL; part_info->get_subpartition_id= NULL; @@ -1589,7 +1846,7 @@ static void set_up_partition_func_pointers(partition_info *part_info) part_info->get_partition_id= get_partition_id_range; else if (part_info->part_type == LIST_PARTITION) part_info->get_partition_id= get_partition_id_list; - else //HASH partitioning + else /* HASH partitioning */ { if (part_info->list_of_part_fields) { @@ -1607,21 +1864,27 @@ static void set_up_partition_func_pointers(partition_info *part_info) } } } + DBUG_VOID_RETURN; } /* For linear hashing we need a mask which is on the form 2**n - 1 where 2**n >= no_parts. Thus if no_parts is 6 then mask is 2**3 - 1 = 8 - 1 = 7. + SYNOPSIS set_linear_hash_mask() part_info Reference to partitioning data structure no_parts Number of parts in linear hash partitioning + + RETURN VALUE + NONE */ static void set_linear_hash_mask(partition_info *part_info, uint no_parts) { uint mask; + for (mask= 1; mask < no_parts; mask<<=1) ; part_info->linear_hash_mask= mask - 1; @@ -1631,13 +1894,16 @@ static void set_linear_hash_mask(partition_info *part_info, uint no_parts) /* This function calculates the partition id provided the result of the hash function using linear hashing parameters, mask and number of partitions. + SYNOPSIS get_part_id_from_linear_hash() hash_value Hash value calculated by HASH function or KEY function mask Mask calculated previously by set_linear_hash_mask no_parts Number of partitions in HASH partitioned part + RETURN VALUE part_id The calculated partition identity (starting at 0) + DESCRIPTION The partition is calculated according to the theory of linear hashing. See e.g. Linear hashing: a new tool for file and table addressing, @@ -1649,10 +1915,11 @@ static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask, uint no_parts) { uint32 part_id= (uint32)(hash_value & mask); + if (part_id >= no_parts) { uint new_mask= ((mask + 1) >> 1) - 1; - part_id= hash_value & new_mask; + part_id= (uint32)(hash_value & new_mask); } return part_id; } @@ -1665,10 +1932,12 @@ static uint32 get_part_id_from_linear_hash(longlong hash_value, uint mask, thd The thread object name The name of the partitioned table table TABLE object for which partition fields are set-up + create_table_ind Indicator of whether openfrm was called as part of + CREATE or ALTER TABLE RETURN VALUE - TRUE - FALSE + TRUE Error + FALSE Success DESCRIPTION The name parameter contains the full table name and is used to get the @@ -1683,7 +1952,8 @@ NOTES of an error that is not discovered until here. */ -bool fix_partition_func(THD *thd, const char *name, TABLE *table) +bool fix_partition_func(THD *thd, const char* name, TABLE *table, + bool is_create_table_ind) { bool result= TRUE; uint dir_length, home_dir_length; @@ -1695,6 +1965,10 @@ bool fix_partition_func(THD *thd, const char *name, TABLE *table) ulong save_set_query_id= thd->set_query_id; DBUG_ENTER("fix_partition_func"); + if (part_info->fixed) + { + DBUG_RETURN(FALSE); + } thd->set_query_id= 0; /* Set-up the TABLE_LIST object to be a list with a single table @@ -1714,6 +1988,13 @@ bool fix_partition_func(THD *thd, const char *name, TABLE *table) db_name= &db_name_string[home_dir_length]; tables.db= db_name; + if (!is_create_table_ind) + { + if (partition_default_handling(table, part_info)) + { + DBUG_RETURN(TRUE); + } + } if (is_sub_partitioned(part_info)) { DBUG_ASSERT(part_info->subpart_type == HASH_PARTITION); @@ -1810,12 +2091,16 @@ bool fix_partition_func(THD *thd, const char *name, TABLE *table) goto end; if (unlikely(check_primary_key(table))) goto end; - if (unlikely((!table->file->partition_flags() & HA_CAN_PARTITION_UNIQUE) && + if (unlikely((!(table->s->db_type->partition_flags && + (table->s->db_type->partition_flags() & HA_CAN_PARTITION_UNIQUE))) && check_unique_keys(table))) goto end; + if (unlikely(set_up_partition_bitmap(thd, part_info))) + goto end; check_range_capable_PF(table); set_up_partition_key_maps(table, part_info); set_up_partition_func_pointers(part_info); + part_info->fixed= TRUE; result= FALSE; end: thd->set_query_id= save_set_query_id; @@ -1834,6 +2119,7 @@ end: static int add_write(File fptr, const char *buf, uint len) { uint len_written= my_write(fptr, (const byte*)buf, len, MYF(0)); + if (likely(len == len_written)) return 0; else @@ -1878,6 +2164,7 @@ static int add_begin_parenthesis(File fptr) static int add_part_key_word(File fptr, const char *key_string) { int err= add_string(fptr, key_string); + err+= add_space(fptr); return err + add_begin_parenthesis(fptr); } @@ -1896,6 +2183,7 @@ static int add_partition(File fptr) static int add_subpartition(File fptr) { int err= add_string(fptr, sub_str); + return err + add_partition(fptr); } @@ -1908,6 +2196,7 @@ static int add_partition_by(File fptr) static int add_subpartition_by(File fptr) { int err= add_string(fptr, sub_str); + return err + add_partition_by(fptr); } @@ -1915,17 +2204,19 @@ static int add_key_partition(File fptr, List field_list) { uint i, no_fields; int err; + List_iterator part_it(field_list); err= add_part_key_word(fptr, partition_keywords[PKW_KEY].str); no_fields= field_list.elements; i= 0; - do + while (i < no_fields) { const char *field_str= part_it++; err+= add_string(fptr, field_str); if (i != (no_fields-1)) err+= add_comma(fptr); - } while (++i < no_fields); + i++; + } return err; } @@ -1939,6 +2230,7 @@ static int add_keyword_string(File fptr, const char *keyword, const char *keystr) { int err= add_string(fptr, keyword); + err+= add_space(fptr); err+= add_equal(fptr); err+= add_space(fptr); @@ -1949,6 +2241,7 @@ static int add_keyword_string(File fptr, const char *keyword, static int add_keyword_int(File fptr, const char *keyword, longlong num) { int err= add_string(fptr, keyword); + err+= add_space(fptr); err+= add_equal(fptr); err+= add_space(fptr); @@ -1959,14 +2252,15 @@ static int add_keyword_int(File fptr, const char *keyword, longlong num) static int add_engine(File fptr, handlerton *engine_type) { const char *engine_str= engine_type->name; + DBUG_PRINT("info", ("ENGINE = %s", engine_str)); int err= add_string(fptr, "ENGINE = "); return err + add_string(fptr, engine_str); - return err; } static int add_partition_options(File fptr, partition_element *p_elem) { int err= 0; + if (p_elem->tablespace_name) err+= add_keyword_string(fptr,"TABLESPACE",p_elem->tablespace_name); if (p_elem->nodegroup_id != UNDEF_NODEGROUP) @@ -1988,6 +2282,7 @@ static int add_partition_values(File fptr, partition_info *part_info, partition_element *p_elem) { int err= 0; + if (part_info->part_type == RANGE_PARTITION) { err+= add_string(fptr, "VALUES LESS THAN "); @@ -2024,16 +2319,19 @@ static int add_partition_values(File fptr, partition_info *part_info, Generate the partition syntax from the partition data structure. Useful for support of generating defaults, SHOW CREATE TABLES and easy partition management. + SYNOPSIS generate_partition_syntax() part_info The partitioning data structure buf_length A pointer to the returned buffer length use_sql_alloc Allocate buffer from sql_alloc if true otherwise use my_malloc - add_default_info Add info generated by default + write_all Write everything, also default values + RETURN VALUES NULL error buf, buf_length Buffer and its length + DESCRIPTION Here we will generate the full syntax for the given command where all defaults have been expanded. By so doing the it is also possible to @@ -2057,39 +2355,37 @@ static int add_partition_values(File fptr, partition_info *part_info, char *generate_partition_syntax(partition_info *part_info, uint *buf_length, bool use_sql_alloc, - bool add_default_info) + bool write_all) { - uint i,j, no_parts, no_subparts; + uint i,j, tot_no_parts, no_subparts, no_parts; partition_element *part_elem; + partition_element *save_part_elem= NULL; ulonglong buffer_length; char path[FN_REFLEN]; int err= 0; - DBUG_ENTER("generate_partition_syntax"); + List_iterator part_it(part_info->partitions); + List_iterator temp_it(part_info->temp_partitions); File fptr; char *buf= NULL; //Return buffer - const char *file_name; + uint use_temp= 0; + uint no_temp_parts= part_info->temp_partitions.elements; + bool write_part_state; + DBUG_ENTER("generate_partition_syntax"); - sprintf(path, "%s_%lx_%lx", "part_syntax", current_pid, - current_thd->thread_id); - fn_format(path,path,mysql_tmpdir,".psy", MY_REPLACE_EXT); - file_name= &path[0]; - DBUG_PRINT("info", ("File name = %s", file_name)); - if (unlikely(((fptr= my_open(file_name,O_CREAT|O_RDWR, MYF(MY_WME))) == -1))) + write_part_state= (part_info->part_state && !part_info->part_state_len); + if (unlikely(((fptr= create_temp_file(path,mysql_tmpdir,"psy", 0,0))) < 0)) DBUG_RETURN(NULL); -#if defined(MSDOS) || defined(__WIN__) || defined(__EMX__) || defined(OS2) -#else - my_delete(file_name, MYF(0)); +#ifndef __WIN__ + unlink(path); #endif err+= add_space(fptr); err+= add_partition_by(fptr); switch (part_info->part_type) { case RANGE_PARTITION: - add_default_info= TRUE; err+= add_part_key_word(fptr, partition_keywords[PKW_RANGE].str); break; case LIST_PARTITION: - add_default_info= TRUE; err+= add_part_key_word(fptr, partition_keywords[PKW_LIST].str); break; case HASH_PARTITION: @@ -2111,6 +2407,13 @@ char *generate_partition_syntax(partition_info *part_info, part_info->part_func_len); err+= add_end_parenthesis(fptr); err+= add_space(fptr); + if ((!part_info->use_default_no_partitions) && + part_info->use_default_partitions) + { + err+= add_string(fptr, "PARTITIONS "); + err+= add_int(fptr, part_info->no_parts); + err+= add_space(fptr); + } if (is_sub_partitioned(part_info)) { err+= add_subpartition_by(fptr); @@ -2124,53 +2427,114 @@ char *generate_partition_syntax(partition_info *part_info, part_info->subpart_func_len); err+= add_end_parenthesis(fptr); err+= add_space(fptr); - } - if (add_default_info) - { - err+= add_begin_parenthesis(fptr); - List_iterator part_it(part_info->partitions); - no_parts= part_info->no_parts; - no_subparts= part_info->no_subparts; - i= 0; - do - { - part_elem= part_it++; - err+= add_partition(fptr); - err+= add_string(fptr, part_elem->partition_name); - err+= add_space(fptr); - err+= add_partition_values(fptr, part_info, part_elem); - if (!is_sub_partitioned(part_info)) - err+= add_partition_options(fptr, part_elem); - if (is_sub_partitioned(part_info)) + if ((!part_info->use_default_no_subpartitions) && + part_info->use_default_subpartitions) { + err+= add_string(fptr, "SUBPARTITIONS "); + err+= add_int(fptr, part_info->no_subparts); err+= add_space(fptr); - err+= add_begin_parenthesis(fptr); - List_iterator sub_it(part_elem->subpartitions); - j= 0; - do + } + } + no_parts= part_info->no_parts; + tot_no_parts= no_parts + no_temp_parts; + no_subparts= part_info->no_subparts; + + if (write_all || (!part_info->use_default_partitions)) + { + err+= add_begin_parenthesis(fptr); + i= 0; + do + { + /* + We need to do some clever list manipulation here since we have two + different needs for our list processing and here we take some of the + cost of using a simpler list processing for the other parts of the + code. + + ALTER TABLE REORGANIZE PARTITIONS has the list of partitions to be + the final list as the main list and the reorganised partitions is in + the temporary partition list. Thus when finding the first part added + we insert the temporary list if there is such a list. If there is no + temporary list we are performing an ADD PARTITION. + */ + if (use_temp && use_temp <= no_temp_parts) { - part_elem= sub_it++; - err+= add_subpartition(fptr); + part_elem= temp_it++; + DBUG_ASSERT(no_temp_parts); + no_temp_parts--; + } + else if (use_temp) + { + DBUG_ASSERT(no_parts); + part_elem= save_part_elem; + use_temp= 0; + no_parts--; + } + else + { + part_elem= part_it++; + if ((part_elem->part_state == PART_TO_BE_ADDED || + part_elem->part_state == PART_IS_ADDED) && no_temp_parts) + { + save_part_elem= part_elem; + part_elem= temp_it++; + no_temp_parts--; + use_temp= 1; + } + else + { + DBUG_ASSERT(no_parts); + no_parts--; + } + } + + if (part_elem->part_state != PART_IS_DROPPED) + { + if (write_part_state) + { + uint32 part_state_id= part_info->part_state_len; + part_info->part_state[part_state_id]= (uchar)part_elem->part_state; + part_info->part_state_len= part_state_id+1; + } + err+= add_partition(fptr); err+= add_string(fptr, part_elem->partition_name); err+= add_space(fptr); - err+= add_partition_options(fptr, part_elem); - if (j != (no_subparts-1)) + err+= add_partition_values(fptr, part_info, part_elem); + if (!is_sub_partitioned(part_info)) + err+= add_partition_options(fptr, part_elem); + if (is_sub_partitioned(part_info) && + (write_all || (!part_info->use_default_subpartitions))) + { + err+= add_space(fptr); + err+= add_begin_parenthesis(fptr); + List_iterator sub_it(part_elem->subpartitions); + j= 0; + do + { + part_elem= sub_it++; + err+= add_subpartition(fptr); + err+= add_string(fptr, part_elem->partition_name); + err+= add_space(fptr); + err+= add_partition_options(fptr, part_elem); + if (j != (no_subparts-1)) + { + err+= add_comma(fptr); + err+= add_space(fptr); + } + else + err+= add_end_parenthesis(fptr); + } while (++j < no_subparts); + } + if (i != (tot_no_parts-1)) { err+= add_comma(fptr); err+= add_space(fptr); } - else - err+= add_end_parenthesis(fptr); - } while (++j < no_subparts); - } - if (i != (no_parts-1)) - { - err+= add_comma(fptr); - err+= add_space(fptr); - } - else - err+= add_end_parenthesis(fptr); - } while (++i < no_parts); + } + if (i == (tot_no_parts-1)) + err+= add_end_parenthesis(fptr); + } while (++i < tot_no_parts); + DBUG_ASSERT(!no_parts && !no_temp_parts); } if (err) goto close_file; @@ -2198,19 +2562,7 @@ char *generate_partition_syntax(partition_info *part_info, buf[*buf_length]= 0; close_file: - /* - Delete the file before closing to ensure the file doesn't get synched - to disk unnecessary. We only used the file system as a dynamic array - implementation so we are not really interested in getting the file - present on disk. - This is not possible on Windows so here it has to be done after closing - the file. Also on Unix we delete immediately after opening to ensure no - other process can read the information written into the file. - */ my_close(fptr, MYF(0)); -#if defined(MSDOS) || defined(__WIN__) || defined(__EMX__) || defined(OS2) - my_delete(file_name, MYF(0)); -#endif DBUG_RETURN(buf); } @@ -2218,10 +2570,12 @@ close_file: /* Check if partition key fields are modified and if it can be handled by the underlying storage engine. + SYNOPSIS partition_key_modified table TABLE object for which partition fields are set-up fields A list of the to be modifed + RETURN VALUES TRUE Need special handling of UPDATE FALSE Normal UPDATE handling is ok @@ -2233,9 +2587,11 @@ bool partition_key_modified(TABLE *table, List &fields) partition_info *part_info= table->part_info; Item_field *item_field; DBUG_ENTER("partition_key_modified"); + if (!part_info) DBUG_RETURN(FALSE); - if (table->file->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY) + if (table->s->db_type->partition_flags && + (table->s->db_type->partition_flags() & HA_CAN_UPDATE_PARTITION_KEY)) DBUG_RETURN(FALSE); f.rewind(); while ((item_field=(Item_field*) f++)) @@ -2265,11 +2621,14 @@ bool partition_key_modified(TABLE *table, List &fields) /* Calculate hash value for KEY partitioning using an array of fields. + SYNOPSIS calculate_key_value() field_array An array of the fields in KEY partitioning + RETURN VALUE hash_value calculated + DESCRIPTION Uses the hash function on the character set of the field. Integer and floating point fields use the binary character set by default. @@ -2279,6 +2638,7 @@ static uint32 calculate_key_value(Field **field_array) { uint32 hashnr= 0; ulong nr2= 4; + do { Field *field= *field_array; @@ -2302,6 +2662,7 @@ static uint32 calculate_key_value(Field **field_array) /* A simple support function to calculate part_id given local part and sub part. + SYNOPSIS get_part_id_for_sub() loc_part_id Local partition id @@ -2319,32 +2680,40 @@ static uint32 get_part_id_for_sub(uint32 loc_part_id, uint32 sub_part_id, /* Calculate part_id for (SUB)PARTITION BY HASH + SYNOPSIS get_part_id_hash() no_parts Number of hash partitions part_expr Item tree of hash function + out:func_value Value of hash function + RETURN VALUE Calculated partition id */ inline static uint32 get_part_id_hash(uint no_parts, - Item *part_expr) + Item *part_expr, + longlong *func_value) { DBUG_ENTER("get_part_id_hash"); - longlong int_hash_id= part_expr->val_int() % no_parts; + *func_value= part_expr->val_int(); + longlong int_hash_id= *func_value % no_parts; DBUG_RETURN(int_hash_id < 0 ? -int_hash_id : int_hash_id); } /* Calculate part_id for (SUB)PARTITION BY LINEAR HASH + SYNOPSIS get_part_id_linear_hash() part_info A reference to the partition_info struct where all the desired information is given no_parts Number of hash partitions part_expr Item tree of hash function + out:func_value Value of hash function + RETURN VALUE Calculated partition id */ @@ -2352,10 +2721,13 @@ static uint32 get_part_id_hash(uint no_parts, inline static uint32 get_part_id_linear_hash(partition_info *part_info, uint no_parts, - Item *part_expr) + Item *part_expr, + longlong *func_value) { DBUG_ENTER("get_part_id_linear_hash"); - DBUG_RETURN(get_part_id_from_linear_hash(part_expr->val_int(), + + *func_value= part_expr->val_int(); + DBUG_RETURN(get_part_id_from_linear_hash(*func_value, part_info->linear_hash_mask, no_parts)); } @@ -2363,31 +2735,37 @@ static uint32 get_part_id_linear_hash(partition_info *part_info, /* Calculate part_id for (SUB)PARTITION BY KEY + SYNOPSIS get_part_id_key() field_array Array of fields for PARTTION KEY no_parts Number of KEY partitions + RETURN VALUE Calculated partition id */ inline static uint32 get_part_id_key(Field **field_array, - uint no_parts) + uint no_parts, + longlong *func_value) { DBUG_ENTER("get_part_id_key"); - DBUG_RETURN(calculate_key_value(field_array) % no_parts); + *func_value= calculate_key_value(field_array); + DBUG_RETURN(*func_value % no_parts); } /* Calculate part_id for (SUB)PARTITION BY LINEAR KEY + SYNOPSIS get_part_id_linear_key() part_info A reference to the partition_info struct where all the desired information is given field_array Array of fields for PARTTION KEY no_parts Number of KEY partitions + RETURN VALUE Calculated partition id */ @@ -2395,10 +2773,13 @@ static uint32 get_part_id_key(Field **field_array, inline static uint32 get_part_id_linear_key(partition_info *part_info, Field **field_array, - uint no_parts) + uint no_parts, + longlong *func_value) { DBUG_ENTER("get_partition_id_linear_key"); - DBUG_RETURN(get_part_id_from_linear_hash(calculate_key_value(field_array), + + *func_value= calculate_key_value(field_array); + DBUG_RETURN(get_part_id_from_linear_hash(*func_value, part_info->linear_hash_mask, no_parts)); } @@ -2407,15 +2788,18 @@ static uint32 get_part_id_linear_key(partition_info *part_info, This function is used to calculate the partition id where all partition fields have been prepared to point to a record where the partition field values are bound. + SYNOPSIS get_partition_id() part_info A reference to the partition_info struct where all the desired information is given - part_id The partition id is returned through this pointer + out:part_id The partition id is returned through this pointer + RETURN VALUE part_id return TRUE means that the fields of the partition function didn't fit into any partition and thus the values of the PF-fields are not allowed. + DESCRIPTION A routine used from write_row, update_row and delete_row from any handler supporting partitioning. It is also a support routine for @@ -2445,15 +2829,18 @@ static uint32 get_part_id_linear_key(partition_info *part_info, This function is used to calculate the main partition to use in the case of subpartitioning and we don't know enough to get the partition identity in total. + SYNOPSIS get_part_partition_id() part_info A reference to the partition_info struct where all the desired information is given - part_id The partition id is returned through this pointer + out:part_id The partition id is returned through this pointer + RETURN VALUE part_id return TRUE means that the fields of the partition function didn't fit into any partition and thus the values of the PF-fields are not allowed. + DESCRIPTION It is actually 6 different variants of this function which are called @@ -2468,15 +2855,19 @@ static uint32 get_part_id_linear_key(partition_info *part_info, */ -bool get_partition_id_list(partition_info *part_info, - uint32 *part_id) +int get_partition_id_list(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { - DBUG_ENTER("get_partition_id_list"); LIST_PART_ENTRY *list_array= part_info->list_array; - uint list_index; + int list_index; longlong list_value; - uint min_list_index= 0, max_list_index= part_info->no_list_values - 1; + int min_list_index= 0; + int max_list_index= part_info->no_list_values - 1; longlong part_func_value= part_info->part_expr->val_int(); + DBUG_ENTER("get_partition_id_list"); + + *func_value= part_func_value; while (max_list_index >= min_list_index) { list_index= (max_list_index + min_list_index) >> 1; @@ -2492,12 +2883,12 @@ bool get_partition_id_list(partition_info *part_info, else { *part_id= (uint32)list_array[list_index].partition_id; - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } } notfound: *part_id= 0; - DBUG_RETURN(TRUE); + DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); } @@ -2574,14 +2965,18 @@ notfound: } -bool get_partition_id_range(partition_info *part_info, - uint32 *part_id) +int get_partition_id_range(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { - DBUG_ENTER("get_partition_id_int_range"); longlong *range_array= part_info->range_int_array; uint max_partition= part_info->no_parts - 1; - uint min_part_id= 0, max_part_id= max_partition, loc_part_id; + uint min_part_id= 0; + uint max_part_id= max_partition; + uint loc_part_id; longlong part_func_value= part_info->part_expr->val_int(); + DBUG_ENTER("get_partition_id_int_range"); + while (max_part_id > min_part_id) { loc_part_id= (max_part_id + min_part_id + 1) >> 1; @@ -2595,11 +2990,12 @@ bool get_partition_id_range(partition_info *part_info, if (loc_part_id != max_partition) loc_part_id++; *part_id= (uint32)loc_part_id; + *func_value= part_func_value; if (loc_part_id == max_partition) if (range_array[loc_part_id] != LONGLONG_MAX) if (part_func_value >= range_array[loc_part_id]) - DBUG_RETURN(TRUE); - DBUG_RETURN(FALSE); + DBUG_RETURN(HA_ERR_NO_PARTITION_FOUND); + DBUG_RETURN(0); } @@ -2685,191 +3081,246 @@ uint32 get_partition_id_range_for_endpoint(partition_info *part_info, } -bool get_partition_id_hash_nosub(partition_info *part_info, - uint32 *part_id) +int get_partition_id_hash_nosub(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { - *part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr); - return FALSE; + *part_id= get_part_id_hash(part_info->no_parts, part_info->part_expr, + func_value); + return 0; } -bool get_partition_id_linear_hash_nosub(partition_info *part_info, - uint32 *part_id) +int get_partition_id_linear_hash_nosub(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { *part_id= get_part_id_linear_hash(part_info, part_info->no_parts, - part_info->part_expr); - return FALSE; + part_info->part_expr, func_value); + return 0; } -bool get_partition_id_key_nosub(partition_info *part_info, - uint32 *part_id) +int get_partition_id_key_nosub(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { - *part_id= get_part_id_key(part_info->part_field_array, part_info->no_parts); - return FALSE; + *part_id= get_part_id_key(part_info->part_field_array, + part_info->no_parts, func_value); + return 0; } -bool get_partition_id_linear_key_nosub(partition_info *part_info, - uint32 *part_id) +int get_partition_id_linear_key_nosub(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { *part_id= get_part_id_linear_key(part_info, part_info->part_field_array, - part_info->no_parts); - return FALSE; + part_info->no_parts, func_value); + return 0; } -bool get_partition_id_range_sub_hash(partition_info *part_info, - uint32 *part_id) +int get_partition_id_range_sub_hash(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { uint32 loc_part_id, sub_part_id; uint no_subparts; + longlong local_func_value; + int error; DBUG_ENTER("get_partition_id_range_sub_hash"); - if (unlikely(get_partition_id_range(part_info, &loc_part_id))) + + if (unlikely((error= get_partition_id_range(part_info, &loc_part_id, + func_value)))) { - DBUG_RETURN(TRUE); + DBUG_RETURN(error); } no_subparts= part_info->no_subparts; - sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr); + sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr, + &local_func_value); *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } -bool get_partition_id_range_sub_linear_hash(partition_info *part_info, - uint32 *part_id) +int get_partition_id_range_sub_linear_hash(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { uint32 loc_part_id, sub_part_id; uint no_subparts; + longlong local_func_value; + int error; DBUG_ENTER("get_partition_id_range_sub_linear_hash"); - if (unlikely(get_partition_id_range(part_info, &loc_part_id))) + + if (unlikely((error= get_partition_id_range(part_info, &loc_part_id, + func_value)))) { - DBUG_RETURN(TRUE); + DBUG_RETURN(error); } no_subparts= part_info->no_subparts; sub_part_id= get_part_id_linear_hash(part_info, no_subparts, - part_info->subpart_expr); + part_info->subpart_expr, + &local_func_value); *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } -bool get_partition_id_range_sub_key(partition_info *part_info, - uint32 *part_id) +int get_partition_id_range_sub_key(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { uint32 loc_part_id, sub_part_id; uint no_subparts; + longlong local_func_value; + int error; DBUG_ENTER("get_partition_id_range_sub_key"); - if (unlikely(get_partition_id_range(part_info, &loc_part_id))) + + if (unlikely((error= get_partition_id_range(part_info, &loc_part_id, + func_value)))) { - DBUG_RETURN(TRUE); + DBUG_RETURN(error); } no_subparts= part_info->no_subparts; - sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts); + sub_part_id= get_part_id_key(part_info->subpart_field_array, + no_subparts, &local_func_value); *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } -bool get_partition_id_range_sub_linear_key(partition_info *part_info, - uint32 *part_id) +int get_partition_id_range_sub_linear_key(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { uint32 loc_part_id, sub_part_id; uint no_subparts; + longlong local_func_value; + int error; DBUG_ENTER("get_partition_id_range_sub_linear_key"); - if (unlikely(get_partition_id_range(part_info, &loc_part_id))) + + if (unlikely((error= get_partition_id_range(part_info, &loc_part_id, + func_value)))) { - DBUG_RETURN(TRUE); + DBUG_RETURN(error); } no_subparts= part_info->no_subparts; sub_part_id= get_part_id_linear_key(part_info, part_info->subpart_field_array, - no_subparts); + no_subparts, &local_func_value); *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } -bool get_partition_id_list_sub_hash(partition_info *part_info, - uint32 *part_id) +int get_partition_id_list_sub_hash(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { uint32 loc_part_id, sub_part_id; uint no_subparts; + longlong local_func_value; + int error; DBUG_ENTER("get_partition_id_list_sub_hash"); - if (unlikely(get_partition_id_list(part_info, &loc_part_id))) + + if (unlikely((error= get_partition_id_list(part_info, &loc_part_id, + func_value)))) { - DBUG_RETURN(TRUE); + DBUG_RETURN(error); } no_subparts= part_info->no_subparts; - sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr); + sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr, + &local_func_value); *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } -bool get_partition_id_list_sub_linear_hash(partition_info *part_info, - uint32 *part_id) +int get_partition_id_list_sub_linear_hash(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { uint32 loc_part_id, sub_part_id; uint no_subparts; + longlong local_func_value; + int error; DBUG_ENTER("get_partition_id_list_sub_linear_hash"); - if (unlikely(get_partition_id_list(part_info, &loc_part_id))) + + if (unlikely((error= get_partition_id_list(part_info, &loc_part_id, + func_value)))) { - DBUG_RETURN(TRUE); + DBUG_RETURN(error); } no_subparts= part_info->no_subparts; - sub_part_id= get_part_id_hash(no_subparts, part_info->subpart_expr); + sub_part_id= get_part_id_linear_hash(part_info, no_subparts, + part_info->subpart_expr, + &local_func_value); *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } -bool get_partition_id_list_sub_key(partition_info *part_info, - uint32 *part_id) +int get_partition_id_list_sub_key(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { uint32 loc_part_id, sub_part_id; uint no_subparts; + longlong local_func_value; + int error; DBUG_ENTER("get_partition_id_range_sub_key"); - if (unlikely(get_partition_id_list(part_info, &loc_part_id))) + + if (unlikely((error= get_partition_id_list(part_info, &loc_part_id, + func_value)))) { - DBUG_RETURN(TRUE); + DBUG_RETURN(error); } no_subparts= part_info->no_subparts; - sub_part_id= get_part_id_key(part_info->subpart_field_array, no_subparts); + sub_part_id= get_part_id_key(part_info->subpart_field_array, + no_subparts, &local_func_value); *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } -bool get_partition_id_list_sub_linear_key(partition_info *part_info, - uint32 *part_id) +int get_partition_id_list_sub_linear_key(partition_info *part_info, + uint32 *part_id, + longlong *func_value) { uint32 loc_part_id, sub_part_id; uint no_subparts; + longlong local_func_value; + int error; DBUG_ENTER("get_partition_id_list_sub_linear_key"); - if (unlikely(get_partition_id_list(part_info, &loc_part_id))) + + if (unlikely((error= get_partition_id_list(part_info, &loc_part_id, + func_value)))) { - DBUG_RETURN(TRUE); + DBUG_RETURN(error); } no_subparts= part_info->no_subparts; sub_part_id= get_part_id_linear_key(part_info, part_info->subpart_field_array, - no_subparts); + no_subparts, &local_func_value); *part_id= get_part_id_for_sub(loc_part_id, sub_part_id, no_subparts); - DBUG_RETURN(FALSE); + DBUG_RETURN(0); } /* This function is used to calculate the subpartition id + SYNOPSIS get_subpartition_id() part_info A reference to the partition_info struct where all the desired information is given + RETURN VALUE - part_id - The subpartition identity + part_id The subpartition identity + DESCRIPTION A routine used in some SELECT's when only partial knowledge of the partitions is known. @@ -2885,38 +3336,45 @@ bool get_partition_id_list_sub_linear_key(partition_info *part_info, uint32 get_partition_id_hash_sub(partition_info *part_info) { - return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr); + longlong func_value; + return get_part_id_hash(part_info->no_subparts, part_info->subpart_expr, + &func_value); } uint32 get_partition_id_linear_hash_sub(partition_info *part_info) { + longlong func_value; return get_part_id_linear_hash(part_info, part_info->no_subparts, - part_info->subpart_expr); + part_info->subpart_expr, &func_value); } uint32 get_partition_id_key_sub(partition_info *part_info) { + longlong func_value; return get_part_id_key(part_info->subpart_field_array, - part_info->no_subparts); + part_info->no_subparts, &func_value); } uint32 get_partition_id_linear_key_sub(partition_info *part_info) { + longlong func_value; return get_part_id_linear_key(part_info, part_info->subpart_field_array, - part_info->no_subparts); + part_info->no_subparts, &func_value); } /* - Set an indicator on all partition fields that are set by the key + Set an indicator on all partition fields that are set by the key + SYNOPSIS set_PF_fields_in_key() key_info Information about the index key_length Length of key + RETURN VALUE TRUE Found partition field set by key FALSE No partition field set by key @@ -2957,9 +3415,11 @@ static bool set_PF_fields_in_key(KEY *key_info, uint key_length) /* We have found that at least one partition field was set by a key, now check if a partition function has all its fields bound or not. + SYNOPSIS check_part_func_bound() ptr Array of fields NULL terminated (partition fields) + RETURN VALUE TRUE All fields in partition function are set FALSE Not all fields in partition function are set @@ -2985,14 +3445,17 @@ static bool check_part_func_bound(Field **ptr) /* Get the id of the subpartitioning part by using the key buffer of the index scan. + SYNOPSIS get_sub_part_id_from_key() table The table object buf A buffer that can be used to evaluate the partition function key_info The index object key_spec A key_range containing key and key length + RETURN VALUES part_id Subpartition id to use + DESCRIPTION Use key buffer to set-up record in buf, move field pointers and get the partition identity and restore field pointers afterwards. @@ -3023,36 +3486,43 @@ static uint32 get_sub_part_id_from_key(const TABLE *table,byte *buf, /* Get the id of the partitioning part by using the key buffer of the index scan. + SYNOPSIS get_part_id_from_key() table The table object buf A buffer that can be used to evaluate the partition function key_info The index object key_spec A key_range containing key and key length - part_id Partition to use + out:part_id Partition to use + RETURN VALUES TRUE Partition to use not found FALSE Ok, part_id indicates partition to use + DESCRIPTION Use key buffer to set-up record in buf, move field pointers and get the partition identity and restore field pointers afterwards. */ + bool get_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info, const key_range *key_spec, uint32 *part_id) { bool result; byte *rec0= table->record[0]; partition_info *part_info= table->part_info; + longlong func_value; DBUG_ENTER("get_part_id_from_key"); key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length); if (likely(rec0 == buf)) - result= part_info->get_part_partition_id(part_info, part_id); + result= part_info->get_part_partition_id(part_info, part_id, + &func_value); else { Field **part_field_array= part_info->part_field_array; set_field_ptr(part_field_array, buf, rec0); - result= part_info->get_part_partition_id(part_info, part_id); + result= part_info->get_part_partition_id(part_info, part_id, + &func_value); set_field_ptr(part_field_array, rec0, buf); } DBUG_RETURN(result); @@ -3061,16 +3531,19 @@ bool get_part_id_from_key(const TABLE *table, byte *buf, KEY *key_info, /* Get the partitioning id of the full PF by using the key buffer of the index scan. + SYNOPSIS get_full_part_id_from_key() table The table object buf A buffer that is used to evaluate the partition function key_info The index object key_spec A key_range containing key and key length - part_spec A partition id containing start part and end part + out:part_spec A partition id containing start part and end part + RETURN VALUES part_spec No partitions to scan is indicated by end_part > start_part when returning + DESCRIPTION Use key buffer to set-up record in buf, move field pointers if needed and get the partition identity and restore field pointers afterwards. @@ -3084,16 +3557,19 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf, bool result; partition_info *part_info= table->part_info; byte *rec0= table->record[0]; + longlong func_value; DBUG_ENTER("get_full_part_id_from_key"); key_restore(buf, (byte*)key_spec->key, key_info, key_spec->length); if (likely(rec0 == buf)) - result= part_info->get_partition_id(part_info, &part_spec->start_part); + result= part_info->get_partition_id(part_info, &part_spec->start_part, + &func_value); else { Field **part_field_array= part_info->full_part_field_array; set_field_ptr(part_field_array, buf, rec0); - result= part_info->get_partition_id(part_info, &part_spec->start_part); + result= part_info->get_partition_id(part_info, &part_spec->start_part, + &func_value); set_field_ptr(part_field_array, rec0, buf); } part_spec->end_part= part_spec->start_part; @@ -3104,14 +3580,16 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf, /* Get the set of partitions to use in query. + SYNOPSIS get_partition_set() table The table object buf A buffer that can be used to evaluate the partition function index The index of the key used, if MAX_KEY no index used key_spec A key_range containing key and key length - part_spec Contains start part, end part and indicator if bitmap is + out:part_spec Contains start part, end part and indicator if bitmap is used for which partitions to scan + DESCRIPTION This function is called to discover which partitions to use in an index scan or a full table scan. @@ -3121,6 +3599,7 @@ void get_full_part_id_from_key(const TABLE *table, byte *buf, If start_part > end_part at return it means no partition needs to be scanned. If start_part == end_part it always means a single partition needs to be scanned. + RETURN VALUE part_spec */ @@ -3128,7 +3607,8 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, const key_range *key_spec, part_id_range *part_spec) { partition_info *part_info= table->part_info; - uint no_parts= get_tot_partitions(part_info), i, part_id; + uint no_parts= get_tot_partitions(part_info); + uint i, part_id; uint sub_part= no_parts; uint32 part_part= no_parts; KEY *key_info= NULL; @@ -3170,7 +3650,8 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec); else if (part_info->all_fields_in_PPF.is_set(index)) { - if (get_part_id_from_key(table,buf,key_info,key_spec,(uint32*)&part_part)) + if (get_part_id_from_key(table,buf,key_info, + key_spec,(uint32*)&part_part)) { /* The value of the RANGE or LIST partitioning was outside of @@ -3205,15 +3686,18 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, clear_indicator_in_key_fields(key_info); DBUG_VOID_RETURN; } - else if (check_part_func_bound(part_info->part_field_array)) - sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec); - else if (check_part_func_bound(part_info->subpart_field_array)) + else if (is_sub_partitioned(part_info)) { - if (get_part_id_from_key(table,buf,key_info,key_spec,(uint32*)&part_part)) + if (check_part_func_bound(part_info->subpart_field_array)) + sub_part= get_sub_part_id_from_key(table, buf, key_info, key_spec); + else if (check_part_func_bound(part_info->part_field_array)) { - part_spec->start_part= no_parts; - clear_indicator_in_key_fields(key_info); - DBUG_VOID_RETURN; + if (get_part_id_from_key(table,buf,key_info,key_spec,&part_part)) + { + part_spec->start_part= no_parts; + clear_indicator_in_key_fields(key_info); + DBUG_VOID_RETURN; + } } } } @@ -3282,10 +3766,10 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, | Forminfo 288 bytes | ------------------------------- | Screen buffer, to make | - | field names readable | + | field names readable | ------------------------------- | Packed field info | - | 17 + 1 + strlen(field_name) | + | 17 + 1 + strlen(field_name) | | + 1 end of file character | ------------------------------- | Partition info | @@ -3294,15 +3778,20 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, Read the partition syntax from the frm file and parse it to get the data structures of the partitioning. + SYNOPSIS mysql_unpack_partition() - file File reference of frm file thd Thread object + part_buf Partition info from frm file part_info_len Length of partition syntax table Table object of partitioned table + create_table_ind Is it called from CREATE TABLE + default_db_type What is the default engine of the table + RETURN VALUE TRUE Error FALSE Sucess + DESCRIPTION Read the partition syntax from the current position in the frm file. Initiate a LEX object, save the list of item tree objects to free after @@ -3315,13 +3804,16 @@ void get_partition_set(const TABLE *table, byte *buf, const uint index, */ bool mysql_unpack_partition(THD *thd, const uchar *part_buf, - uint part_info_len, TABLE* table, + uint part_info_len, + uchar *part_state, uint part_state_len, + TABLE* table, bool is_create_table_ind, handlerton *default_db_type) { Item *thd_free_list= thd->free_list; bool result= TRUE; partition_info *part_info; - LEX *old_lex= thd->lex, lex; + LEX *old_lex= thd->lex; + LEX lex; DBUG_ENTER("mysql_unpack_partition"); thd->lex= &lex; @@ -3344,17 +3836,63 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf, we then save in the partition info structure. */ thd->free_list= NULL; - lex.part_info= (partition_info*)1; //Indicate yyparse from this place + lex.part_info= new partition_info();/* Indicates yyparse from this place */ + if (!lex.part_info) + { + mem_alloc_error(sizeof(partition_info)); + goto end; + } + lex.part_info->part_state= part_state; + lex.part_info->part_state_len= part_state_len; + DBUG_PRINT("info", ("Parse: %s", part_buf)); if (yyparse((void*)thd) || thd->is_fatal_error) { free_items(thd->free_list); goto end; } + /* + The parsed syntax residing in the frm file can still contain defaults. + The reason is that the frm file is sometimes saved outside of this + MySQL Server and used in backup and restore of clusters or partitioned + tables. It is not certain that the restore will restore exactly the + same default partitioning. + + The easiest manner of handling this is to simply continue using the + part_info we already built up during mysql_create_table if we are + in the process of creating a table. If the table already exists we + need to discover the number of partitions for the default parts. Since + the handler object hasn't been created here yet we need to postpone this + to the fix_partition_func method. + */ + + DBUG_PRINT("info", ("Successful parse")); part_info= lex.part_info; + DBUG_PRINT("info", ("default engine = %d, default_db_type = %d", + ha_legacy_type(part_info->default_engine_type), + ha_legacy_type(default_db_type))); + if (is_create_table_ind) + { + if (old_lex->name) + { + /* + This code is executed when we do a CREATE TABLE t1 LIKE t2 + old_lex->name contains the t2 and the table we are opening has + name t1. + */ + if (partition_default_handling(table, part_info)) + { + DBUG_RETURN(TRUE); + } + } + else + part_info= old_lex->part_info; + } table->part_info= part_info; table->file->set_part_info(part_info); if (part_info->default_engine_type == NULL) + { part_info->default_engine_type= default_db_type; + } else { DBUG_ASSERT(part_info->default_engine_type == default_db_type); @@ -3373,30 +3911,25 @@ bool mysql_unpack_partition(THD *thd, const uchar *part_buf, */ uint part_func_len= part_info->part_func_len; uint subpart_func_len= part_info->subpart_func_len; - uint bitmap_bits= part_info->no_subparts? - (part_info->no_subparts* part_info->no_parts): - part_info->no_parts; - uint bitmap_bytes= bitmap_buffer_size(bitmap_bits); - uint32 *bitmap_buf; - char *part_func_string, *subpart_func_string= NULL; - if (!((part_func_string= thd->alloc(part_func_len))) || + char *part_func_string= NULL; + char *subpart_func_string= NULL; + if ((part_func_len && + !((part_func_string= thd->alloc(part_func_len)))) || (subpart_func_len && - !((subpart_func_string= thd->alloc(subpart_func_len)))) || - !((bitmap_buf= (uint32*)thd->alloc(bitmap_bytes)))) + !((subpart_func_string= thd->alloc(subpart_func_len))))) { - my_error(ER_OUTOFMEMORY, MYF(0), part_func_len); + mem_alloc_error(part_func_len); free_items(thd->free_list); part_info->item_free_list= 0; goto end; } - memcpy(part_func_string, part_info->part_func_string, part_func_len); + if (part_func_len) + memcpy(part_func_string, part_info->part_func_string, part_func_len); if (subpart_func_len) memcpy(subpart_func_string, part_info->subpart_func_string, subpart_func_len); part_info->part_func_string= part_func_string; part_info->subpart_func_string= subpart_func_string; - - bitmap_init(&part_info->used_partitions, bitmap_buf, bitmap_bytes*8, FALSE); } result= FALSE; @@ -3405,16 +3938,1482 @@ end: thd->lex= old_lex; DBUG_RETURN(result); } + + +/* + SYNOPSIS + fast_alter_partition_error_handler() + lpt Container for parameters + + RETURN VALUES + None + + DESCRIPTION + Support routine to clean up after failures of on-line ALTER TABLE + for partition management. +*/ + +static void fast_alter_partition_error_handler(ALTER_PARTITION_PARAM_TYPE *lpt) +{ + DBUG_ENTER("fast_alter_partition_error_handler"); + /* TODO: WL 2826 Error handling */ + DBUG_VOID_RETURN; +} + + +/* + SYNOPSIS + fast_end_partition() + thd Thread object + out:copied Number of records copied + out:deleted Number of records deleted + table_list Table list with the one table in it + empty Has nothing been done + lpt Struct to be used by error handler + + RETURN VALUES + FALSE Success + TRUE Failure + + DESCRIPTION + Support routine to handle the successful cases for partition + management. +*/ + +static int fast_end_partition(THD *thd, ulonglong copied, + ulonglong deleted, + TABLE_LIST *table_list, bool is_empty, + ALTER_PARTITION_PARAM_TYPE *lpt, + bool written_bin_log) +{ + int error; + DBUG_ENTER("fast_end_partition"); + + thd->proc_info="end"; + if (!is_empty) + query_cache_invalidate3(thd, table_list, 0); + error= ha_commit_stmt(thd); + if (ha_commit(thd)) + error= 1; + if (!error || is_empty) + { + char tmp_name[80]; + if ((!is_empty) && (!written_bin_log) && + (!thd->lex->no_write_to_binlog)) + write_bin_log(thd, FALSE, thd->query, thd->query_length); + close_thread_tables(thd); + my_snprintf(tmp_name, sizeof(tmp_name), ER(ER_INSERT_INFO), + (ulong) (copied + deleted), + (ulong) deleted, + (ulong) 0); + send_ok(thd,copied+deleted,0L,tmp_name); + DBUG_RETURN(FALSE); + } + fast_alter_partition_error_handler(lpt); + DBUG_RETURN(TRUE); +} + + +/* + Check engine mix that it is correct + SYNOPSIS + check_engine_condition() + p_elem Partition element + default_engine Have user specified engine on table level + inout::engine_type Current engine used + inout::first Is it first partition + RETURN VALUE + TRUE Failed check + FALSE Ok + DESCRIPTION + (specified partition handler ) specified table handler + (NDB, NDB) NDB OK + (MYISAM, MYISAM) - OK + (MYISAM, -) - NOT OK + (MYISAM, -) MYISAM OK + (- , MYISAM) - NOT OK + (- , -) MYISAM OK + (-,-) - OK + (NDB, MYISAM) * NOT OK +*/ + +static bool check_engine_condition(partition_element *p_elem, + bool default_engine, + handlerton **engine_type, + bool *first) +{ + if (*first && default_engine) + *engine_type= p_elem->engine_type; + *first= FALSE; + if ((!default_engine && + (p_elem->engine_type != *engine_type && + !p_elem->engine_type)) || + (default_engine && + p_elem->engine_type != *engine_type)) + return TRUE; + else + return FALSE; +} + +/* + We need to check if engine used by all partitions can handle + partitioning natively. + + SYNOPSIS + check_native_partitioned() + create_info Create info in CREATE TABLE + out:ret_val Return value + part_info Partition info + thd Thread object + + RETURN VALUES + Value returned in bool ret_value + TRUE Native partitioning supported by engine + FALSE Need to use partition handler + + Return value from function + TRUE Error + FALSE Success +*/ + +static bool check_native_partitioned(HA_CREATE_INFO *create_info,bool *ret_val, + partition_info *part_info, THD *thd) +{ + List_iterator part_it(part_info->partitions); + bool first= TRUE; + bool default_engine; + handlerton *engine_type= create_info->db_type; + handlerton *old_engine_type= engine_type; + uint i= 0; + handler *file; + uint no_parts= part_info->partitions.elements; + DBUG_ENTER("check_native_partitioned"); + + default_engine= (create_info->used_fields | HA_CREATE_USED_ENGINE) ? + TRUE : FALSE; + DBUG_PRINT("info", ("engine_type = %u, default = %u", + ha_legacy_type(engine_type), + default_engine)); + if (no_parts) + { + do + { + partition_element *part_elem= part_it++; + if (is_sub_partitioned(part_info) && + part_elem->subpartitions.elements) + { + uint no_subparts= part_elem->subpartitions.elements; + uint j= 0; + List_iterator sub_it(part_elem->subpartitions); + do + { + partition_element *sub_elem= sub_it++; + if (check_engine_condition(sub_elem, default_engine, + &engine_type, &first)) + goto error; + } while (++j < no_subparts); + /* + In case of subpartitioning and defaults we allow that only + subparts have specified engines, as long as the parts haven't + specified the wrong engine it's ok. + */ + if (check_engine_condition(part_elem, FALSE, + &engine_type, &first)) + goto error; + } + else if (check_engine_condition(part_elem, default_engine, + &engine_type, &first)) + goto error; + } while (++i < no_parts); + } + + /* + All engines are of the same type. Check if this engine supports + native partitioning. + */ + + if (!engine_type) + engine_type= old_engine_type; + DBUG_PRINT("info", ("engine_type = %s", + ha_resolve_storage_engine_name(engine_type))); + if (engine_type->partition_flags && + (engine_type->partition_flags() & HA_CAN_PARTITION)) + { + create_info->db_type= engine_type; + DBUG_PRINT("info", ("Changed to native partitioning")); + *ret_val= TRUE; + } + DBUG_RETURN(FALSE); +error: + /* + Mixed engines not yet supported but when supported it will need + the partition handler + */ + *ret_val= FALSE; + DBUG_RETURN(TRUE); +} + + +/* + Prepare for ALTER TABLE of partition structure + + SYNOPSIS + prep_alter_part_table() + thd Thread object + table Table object + inout:alter_info Alter information + inout:create_info Create info for CREATE TABLE + old_db_type Old engine type + out:partition_changed Boolean indicating whether partition changed + out:fast_alter_partition Boolean indicating whether fast partition + change is requested + + RETURN VALUES + TRUE Error + FALSE Success + partition_changed + fast_alter_partition + + DESCRIPTION + This method handles all preparations for ALTER TABLE for partitioned + tables + We need to handle both partition management command such as Add Partition + and others here as well as an ALTER TABLE that completely changes the + partitioning and yet others that don't change anything at all. We start + by checking the partition management variants and then check the general + change patterns. +*/ + +uint prep_alter_part_table(THD *thd, TABLE *table, ALTER_INFO *alter_info, + HA_CREATE_INFO *create_info, + handlerton *old_db_type, + bool *partition_changed, + uint *fast_alter_partition) +{ + DBUG_ENTER("prep_alter_part_table"); + + if (alter_info->flags & + (ALTER_ADD_PARTITION | ALTER_DROP_PARTITION | + ALTER_COALESCE_PARTITION | ALTER_REORGANIZE_PARTITION | + ALTER_TABLE_REORG | ALTER_OPTIMIZE_PARTITION | + ALTER_CHECK_PARTITION | ALTER_ANALYZE_PARTITION | + ALTER_REPAIR_PARTITION | ALTER_REBUILD_PARTITION)) + { + partition_info *tab_part_info= table->part_info; + if (!tab_part_info) + { + my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0)); + DBUG_RETURN(TRUE); + } + /* + We are going to manipulate the partition info on the table object + so we need to ensure that the data structure of the table object + is freed by setting version to 0. table->s->version= 0 forces a + flush of the table object in close_thread_tables(). + */ + uint flags; + table->s->version= 0L; + if (alter_info->flags == ALTER_TABLE_REORG) + { + uint new_part_no, curr_part_no; + ulonglong max_rows= table->s->max_rows; + if (tab_part_info->part_type != HASH_PARTITION || + tab_part_info->use_default_no_partitions) + { + my_error(ER_REORG_NO_PARAM_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + new_part_no= table->file->get_default_no_partitions(max_rows); + curr_part_no= tab_part_info->no_parts; + if (new_part_no == curr_part_no) + { + /* + No change is needed, we will have the same number of partitions + after the change as before. Thus we can reply ok immediately + without any changes at all. + */ + DBUG_RETURN(fast_end_partition(thd, ULL(0), ULL(0), NULL, + TRUE, NULL, FALSE)); + } + else if (new_part_no > curr_part_no) + { + /* + We will add more partitions, we use the ADD PARTITION without + setting the flag for no default number of partitions + */ + alter_info->flags|= ALTER_ADD_PARTITION; + thd->lex->part_info->no_parts= new_part_no - curr_part_no; + } + else + { + /* + We will remove hash partitions, we use the COALESCE PARTITION + without setting the flag for no default number of partitions + */ + alter_info->flags|= ALTER_COALESCE_PARTITION; + alter_info->no_parts= curr_part_no - new_part_no; + } + } + if (table->s->db_type->alter_table_flags && + (!(flags= table->s->db_type->alter_table_flags(alter_info->flags)))) + { + my_error(ER_PARTITION_FUNCTION_FAILURE, MYF(0)); + DBUG_RETURN(1); + } + *fast_alter_partition= flags ^ HA_PARTITION_FUNCTION_SUPPORTED; + if (alter_info->flags & ALTER_ADD_PARTITION) + { + /* + We start by moving the new partitions to the list of temporary + partitions. We will then check that the new partitions fit in the + partitioning scheme as currently set-up. + Partitions are always added at the end in ADD PARTITION. + */ + partition_info *alt_part_info= thd->lex->part_info; + uint no_new_partitions= alt_part_info->no_parts; + uint no_orig_partitions= tab_part_info->no_parts; + uint check_total_partitions= no_new_partitions + no_orig_partitions; + uint new_total_partitions= check_total_partitions; + /* + We allow quite a lot of values to be supplied by defaults, however we + must know the number of new partitions in this case. + */ + if (thd->lex->no_write_to_binlog && + tab_part_info->part_type != HASH_PARTITION) + { + my_error(ER_NO_BINLOG_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + if (no_new_partitions == 0) + { + my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0)); + DBUG_RETURN(TRUE); + } + if (is_sub_partitioned(tab_part_info)) + { + if (alt_part_info->no_subparts == 0) + alt_part_info->no_subparts= tab_part_info->no_subparts; + else if (alt_part_info->no_subparts != tab_part_info->no_subparts) + { + my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + check_total_partitions= new_total_partitions* + alt_part_info->no_subparts; + } + if (check_total_partitions > MAX_PARTITIONS) + { + my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } + alt_part_info->part_type= tab_part_info->part_type; + if (set_up_defaults_for_partitioning(alt_part_info, + table->file, + ULL(0), + tab_part_info->no_parts)) + { + DBUG_RETURN(TRUE); + } +/* +Handling of on-line cases: + +ADD PARTITION for RANGE/LIST PARTITIONING: +------------------------------------------ +For range and list partitions add partition is simply adding a +new empty partition to the table. If the handler support this we +will use the simple method of doing this. The figure below shows +an example of this and the states involved in making this change. + +Existing partitions New added partitions +------ ------ ------ ------ | ------ ------ +| | | | | | | | | | | | | +| p0 | | p1 | | p2 | | p3 | | | p4 | | p5 | +------ ------ ------ ------ | ------ ------ +PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_TO_BE_ADDED*2 +PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_ADDED*2 + +The first line is the states before adding the new partitions and the +second line is after the new partitions are added. All the partitions are +in the partitions list, no partitions are placed in the temp_partitions +list. + +ADD PARTITION for HASH PARTITIONING +----------------------------------- +This little figure tries to show the various partitions involved when +adding two new partitions to a linear hash based partitioned table with +four partitions to start with, which lists are used and the states they +pass through. Adding partitions to a normal hash based is similar except +that it is always all the existing partitions that are reorganised not +only a subset of them. + +Existing partitions New added partitions +------ ------ ------ ------ | ------ ------ +| | | | | | | | | | | | | +| p0 | | p1 | | p2 | | p3 | | | p4 | | p5 | +------ ------ ------ ------ | ------ ------ +PART_CHANGED PART_CHANGED PART_NORMAL PART_NORMAL PART_TO_BE_ADDED +PART_IS_CHANGED*2 PART_NORMAL PART_NORMAL PART_IS_ADDED +PART_NORMAL PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_ADDED + +Reorganised existing partitions +------ ------ +| | | | +| p0'| | p1'| +------ ------ + +p0 - p5 will be in the partitions list of partitions. +p0' and p1' will actually not exist as separate objects, there presence can +be deduced from the state of the partition and also the names of those +partitions can be deduced this way. + +After adding the partitions and copying the partition data to p0', p1', +p4 and p5 from p0 and p1 the states change to adapt for the new situation +where p0 and p1 is dropped and replaced by p0' and p1' and the new p4 and +p5 are in the table again. + +The first line above shows the states of the partitions before we start +adding and copying partitions, the second after completing the adding +and copying and finally the third line after also dropping the partitions +that are reorganised. +*/ + if (*fast_alter_partition && + tab_part_info->part_type == HASH_PARTITION) + { + uint part_no= 0, start_part= 1, start_sec_part= 1; + uint end_part= 0, end_sec_part= 0; + uint upper_2n= tab_part_info->linear_hash_mask + 1; + uint lower_2n= upper_2n >> 1; + bool all_parts= TRUE; + if (tab_part_info->linear_hash_ind && + no_new_partitions < upper_2n) + { + /* + An analysis of which parts needs reorganisation shows that it is + divided into two intervals. The first interval is those parts + that are reorganised up until upper_2n - 1. From upper_2n and + onwards it starts again from partition 0 and goes on until + it reaches p(upper_2n - 1). If the last new partition reaches + beyond upper_2n - 1 then the first interval will end with + p(lower_2n - 1) and start with p(no_orig_partitions - lower_2n). + If lower_2n partitions are added then p0 to p(lower_2n - 1) will + be reorganised which means that the two interval becomes one + interval at this point. Thus only when adding less than + lower_2n partitions and going beyond a total of upper_2n we + actually get two intervals. + + To exemplify this assume we have 6 partitions to start with and + add 1, 2, 3, 5, 6, 7, 8, 9 partitions. + The first to add after p5 is p6 = 110 in bit numbers. Thus we + can see that 10 = p2 will be partition to reorganise if only one + partition. + If 2 partitions are added we reorganise [p2, p3]. Those two + cases are covered by the second if part below. + If 3 partitions are added we reorganise [p2, p3] U [p0,p0]. This + part is covered by the else part below. + If 5 partitions are added we get [p2,p3] U [p0, p2] = [p0, p3]. + This is covered by the first if part where we need the max check + to here use lower_2n - 1. + If 7 partitions are added we get [p2,p3] U [p0, p4] = [p0, p4]. + This is covered by the first if part but here we use the first + calculated end_part. + Finally with 9 new partitions we would also reorganise p6 if we + used the method below but we cannot reorganise more partitions + than what we had from the start and thus we simply set all_parts + to TRUE. In this case we don't get into this if-part at all. + */ + all_parts= FALSE; + if (no_new_partitions >= lower_2n) + { + /* + In this case there is only one interval since the two intervals + overlap and this starts from zero to last_part_no - upper_2n + */ + start_part= 0; + end_part= new_total_partitions - (upper_2n + 1); + end_part= max(lower_2n - 1, end_part); + } + else if (new_total_partitions <= upper_2n) + { + /* + Also in this case there is only one interval since we are not + going over a 2**n boundary + */ + start_part= no_orig_partitions - lower_2n; + end_part= start_part + (no_new_partitions - 1); + } + else + { + /* We have two non-overlapping intervals since we are not + passing a 2**n border and we have not at least lower_2n + new parts that would ensure that the intervals become + overlapping. + */ + start_part= no_orig_partitions - lower_2n; + end_part= upper_2n - 1; + start_sec_part= 0; + end_sec_part= new_total_partitions - (upper_2n + 1); + } + } + List_iterator tab_it(tab_part_info->partitions); + part_no= 0; + do + { + partition_element *p_elem= tab_it++; + if (all_parts || + (part_no >= start_part && part_no <= end_part) || + (part_no >= start_sec_part && part_no <= end_sec_part)) + { + p_elem->part_state= PART_CHANGED; + } + } while (++part_no < no_orig_partitions); + } + /* + Need to concatenate the lists here to make it possible to check the + partition info for correctness using check_partition_info. + For on-line add partition we set the state of this partition to + PART_TO_BE_ADDED to ensure that it is known that it is not yet + usable (becomes usable when partition is created and the switch of + partition configuration is made. + */ + { + List_iterator alt_it(alt_part_info->partitions); + uint part_count= 0; + do + { + partition_element *part_elem= alt_it++; + if (*fast_alter_partition) + part_elem->part_state= PART_TO_BE_ADDED; + if (tab_part_info->partitions.push_back(part_elem)) + { + mem_alloc_error(1); + DBUG_RETURN(TRUE); + } + } while (++part_count < no_new_partitions); + tab_part_info->no_parts+= no_new_partitions; + } + /* + If we specify partitions explicitly we don't use defaults anymore. + Using ADD PARTITION also means that we don't have the default number + of partitions anymore. We use this code also for Table reorganisations + and here we don't set any default flags to FALSE. + */ + if (!(alter_info->flags & ALTER_TABLE_REORG)) + { + if (!alt_part_info->use_default_partitions) + { + DBUG_PRINT("info", ("part_info= %x", tab_part_info)); + tab_part_info->use_default_partitions= FALSE; + } + tab_part_info->use_default_no_partitions= FALSE; + } + } + else if (alter_info->flags == ALTER_DROP_PARTITION) + { + /* + Drop a partition from a range partition and list partitioning is + always safe and can be made more or less immediate. It is necessary + however to ensure that the partition to be removed is safely removed + and that REPAIR TABLE can remove the partition if for some reason the + command to drop the partition failed in the middle. + */ + uint part_count= 0; + uint no_parts_dropped= alter_info->partition_names.elements; + uint no_parts_found= 0; + List_iterator part_it(tab_part_info->partitions); + if (!(tab_part_info->part_type == RANGE_PARTITION || + tab_part_info->part_type == LIST_PARTITION)) + { + my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP"); + DBUG_RETURN(TRUE); + } + if (no_parts_dropped >= tab_part_info->no_parts) + { + my_error(ER_DROP_LAST_PARTITION, MYF(0)); + DBUG_RETURN(TRUE); + } + do + { + partition_element *part_elem= part_it++; + if (is_name_in_list(part_elem->partition_name, + alter_info->partition_names)) + { + /* + Set state to indicate that the partition is to be dropped. + */ + no_parts_found++; + part_elem->part_state= PART_TO_BE_DROPPED; + } + } while (++part_count < tab_part_info->no_parts); + if (no_parts_found != no_parts_dropped) + { + my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "DROP"); + DBUG_RETURN(TRUE); + } + if (table->file->is_fk_defined_on_table_or_index(MAX_KEY)) + { + my_error(ER_ROW_IS_REFERENCED, MYF(0)); + DBUG_RETURN(TRUE); + } + } + else if ((alter_info->flags & ALTER_OPTIMIZE_PARTITION) || + (alter_info->flags & ALTER_ANALYZE_PARTITION) || + (alter_info->flags & ALTER_CHECK_PARTITION) || + (alter_info->flags & ALTER_REPAIR_PARTITION) || + (alter_info->flags & ALTER_REBUILD_PARTITION)) + { + uint no_parts_opt= alter_info->partition_names.elements; + uint part_count= 0; + uint no_parts_found= 0; + List_iterator part_it(tab_part_info->partitions); + + do + { + partition_element *part_elem= part_it++; + if ((alter_info->flags & ALTER_ALL_PARTITION) || + (is_name_in_list(part_elem->partition_name, + alter_info->partition_names))) + { + /* + Mark the partition as a partition to be "changed" by + analyzing/optimizing/rebuilding/checking/repairing + */ + no_parts_found++; + part_elem->part_state= PART_CHANGED; + } + } while (++part_count < tab_part_info->no_parts); + if (no_parts_found != no_parts_opt && + (!(alter_info->flags & ALTER_ALL_PARTITION))) + { + const char *ptr; + if (alter_info->flags & ALTER_OPTIMIZE_PARTITION) + ptr= "OPTIMIZE"; + else if (alter_info->flags & ALTER_ANALYZE_PARTITION) + ptr= "ANALYZE"; + else if (alter_info->flags & ALTER_CHECK_PARTITION) + ptr= "CHECK"; + else if (alter_info->flags & ALTER_REPAIR_PARTITION) + ptr= "REPAIR"; + else + ptr= "REBUILD"; + my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), ptr); + DBUG_RETURN(TRUE); + } + } + else if (alter_info->flags & ALTER_COALESCE_PARTITION) + { + uint no_parts_coalesced= alter_info->no_parts; + uint no_parts_remain= tab_part_info->no_parts - no_parts_coalesced; + List_iterator part_it(tab_part_info->partitions); + if (tab_part_info->part_type != HASH_PARTITION) + { + my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0)); + DBUG_RETURN(TRUE); + } + if (no_parts_coalesced == 0) + { + my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0)); + DBUG_RETURN(TRUE); + } + if (no_parts_coalesced >= tab_part_info->no_parts) + { + my_error(ER_DROP_LAST_PARTITION, MYF(0)); + DBUG_RETURN(TRUE); + } +/* +Online handling: +COALESCE PARTITION: +------------------- +The figure below shows the manner in which partitions are handled when +performing an on-line coalesce partition and which states they go through +at start, after adding and copying partitions and finally after dropping +the partitions to drop. The figure shows an example using four partitions +to start with, using linear hash and coalescing one partition (always the +last partition). + +Using linear hash then all remaining partitions will have a new reorganised +part. + +Existing partitions Coalesced partition +------ ------ ------ | ------ +| | | | | | | | | +| p0 | | p1 | | p2 | | | p3 | +------ ------ ------ | ------ +PART_NORMAL PART_CHANGED PART_NORMAL PART_REORGED_DROPPED +PART_NORMAL PART_IS_CHANGED PART_NORMAL PART_TO_BE_DROPPED +PART_NORMAL PART_NORMAL PART_NORMAL PART_IS_DROPPED + +Reorganised existing partitions + ------ + | | + | p1'| + ------ + +p0 - p3 is in the partitions list. +The p1' partition will actually not be in any list it is deduced from the +state of p1. +*/ + { + uint part_count= 0, start_part= 1, start_sec_part= 1; + uint end_part= 0, end_sec_part= 0; + bool all_parts= TRUE; + if (*fast_alter_partition && + tab_part_info->linear_hash_ind) + { + uint upper_2n= tab_part_info->linear_hash_mask + 1; + uint lower_2n= upper_2n >> 1; + all_parts= FALSE; + if (no_parts_coalesced >= lower_2n) + { + all_parts= TRUE; + } + else if (no_parts_remain >= lower_2n) + { + end_part= tab_part_info->no_parts - (lower_2n + 1); + start_part= no_parts_remain - lower_2n; + } + else + { + start_part= 0; + end_part= tab_part_info->no_parts - (lower_2n + 1); + end_sec_part= (lower_2n >> 1) - 1; + start_sec_part= end_sec_part - (lower_2n - (no_parts_remain + 1)); + } + } + do + { + partition_element *p_elem= part_it++; + if (*fast_alter_partition && + (all_parts || + (part_count >= start_part && part_count <= end_part) || + (part_count >= start_sec_part && part_count <= end_sec_part))) + p_elem->part_state= PART_CHANGED; + if (++part_count > no_parts_remain) + { + if (*fast_alter_partition) + p_elem->part_state= PART_REORGED_DROPPED; + else + part_it.remove(); + } + } while (part_count < tab_part_info->no_parts); + tab_part_info->no_parts= no_parts_remain; + } + if (!(alter_info->flags & ALTER_TABLE_REORG)) + tab_part_info->use_default_no_partitions= FALSE; + } + else if (alter_info->flags == ALTER_REORGANIZE_PARTITION) + { + /* + Reorganise partitions takes a number of partitions that are next + to each other (at least for RANGE PARTITIONS) and then uses those + to create a set of new partitions. So data is copied from those + partitions into the new set of partitions. Those new partitions + can have more values in the LIST value specifications or less both + are allowed. The ranges can be different but since they are + changing a set of consecutive partitions they must cover the same + range as those changed from. + This command can be used on RANGE and LIST partitions. + */ + uint no_parts_reorged= alter_info->partition_names.elements; + uint no_parts_new= thd->lex->part_info->partitions.elements; + partition_info *alt_part_info= thd->lex->part_info; + uint check_total_partitions; + if (no_parts_reorged > tab_part_info->no_parts) + { + my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0)); + DBUG_RETURN(TRUE); + } + if (!(tab_part_info->part_type == RANGE_PARTITION || + tab_part_info->part_type == LIST_PARTITION) && + (no_parts_new != no_parts_reorged)) + { + my_error(ER_REORG_HASH_ONLY_ON_SAME_NO, MYF(0)); + DBUG_RETURN(TRUE); + } + check_total_partitions= tab_part_info->no_parts + no_parts_new; + check_total_partitions-= no_parts_reorged; + if (check_total_partitions > MAX_PARTITIONS) + { + my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); + DBUG_RETURN(TRUE); + } +/* +Online handling: +REORGANIZE PARTITION: +--------------------- +The figure exemplifies the handling of partitions, their state changes and +how they are organised. It exemplifies four partitions where two of the +partitions are reorganised (p1 and p2) into two new partitions (p4 and p5). +The reason of this change could be to change range limits, change list +values or for hash partitions simply reorganise the partition which could +also involve moving them to new disks or new node groups (MySQL Cluster). + +Existing partitions +------ ------ ------ ------ +| | | | | | | | +| p0 | | p1 | | p2 | | p3 | +------ ------ ------ ------ +PART_NORMAL PART_TO_BE_REORGED PART_NORMAL +PART_NORMAL PART_TO_BE_DROPPED PART_NORMAL +PART_NORMAL PART_IS_DROPPED PART_NORMAL + +Reorganised new partitions (replacing p1 and p2) +------ ------ +| | | | +| p4 | | p5 | +------ ------ +PART_TO_BE_ADDED +PART_IS_ADDED +PART_IS_ADDED + +All unchanged partitions and the new partitions are in the partitions list +in the order they will have when the change is completed. The reorganised +partitions are placed in the temp_partitions list. PART_IS_ADDED is only a +temporary state not written in the frm file. It is used to ensure we write +the generated partition syntax in a correct manner. +*/ + { + List_iterator tab_it(tab_part_info->partitions); + uint part_count= 0; + bool found_first= FALSE; + bool found_last= FALSE; + bool is_last_partition_reorged; + uint drop_count= 0; + longlong tab_max_range= 0, alt_max_range= 0; + do + { + partition_element *part_elem= tab_it++; + is_last_partition_reorged= FALSE; + if (is_name_in_list(part_elem->partition_name, + alter_info->partition_names)) + { + is_last_partition_reorged= TRUE; + drop_count++; + tab_max_range= part_elem->range_value; + if (*fast_alter_partition && + tab_part_info->temp_partitions.push_back(part_elem)) + { + mem_alloc_error(1); + DBUG_RETURN(TRUE); + } + if (*fast_alter_partition) + part_elem->part_state= PART_TO_BE_REORGED; + if (!found_first) + { + uint alt_part_count= 0; + found_first= TRUE; + List_iterator + alt_it(alt_part_info->partitions); + do + { + partition_element *alt_part_elem= alt_it++; + alt_max_range= alt_part_elem->range_value; + if (*fast_alter_partition) + alt_part_elem->part_state= PART_TO_BE_ADDED; + if (alt_part_count == 0) + tab_it.replace(alt_part_elem); + else + tab_it.after(alt_part_elem); + } while (++alt_part_count < no_parts_new); + } + else if (found_last) + { + my_error(ER_CONSECUTIVE_REORG_PARTITIONS, MYF(0)); + DBUG_RETURN(TRUE); + } + else + tab_it.remove(); + } + else + { + if (found_first) + found_last= TRUE; + } + } while (++part_count < tab_part_info->no_parts); + if (drop_count != no_parts_reorged) + { + my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0), "REORGANIZE"); + DBUG_RETURN(TRUE); + } + if (tab_part_info->part_type == RANGE_PARTITION && + ((is_last_partition_reorged && + alt_max_range < tab_max_range) || + (!is_last_partition_reorged && + alt_max_range != tab_max_range))) + { + /* + For range partitioning the total resulting range before and + after the change must be the same except in one case. This is + when the last partition is reorganised, in this case it is + acceptable to increase the total range. + The reason is that it is not allowed to have "holes" in the + middle of the ranges and thus we should not allow to reorganise + to create "holes". Also we should not allow using REORGANIZE + to drop data. + */ + my_error(ER_REORG_OUTSIDE_RANGE, MYF(0)); + DBUG_RETURN(TRUE); + } + tab_part_info->no_parts= check_total_partitions; + } + } + else + { + DBUG_ASSERT(FALSE); + } + *partition_changed= TRUE; + create_info->db_type= &partition_hton; + thd->lex->part_info= tab_part_info; + if (alter_info->flags == ALTER_ADD_PARTITION || + alter_info->flags == ALTER_REORGANIZE_PARTITION) + { + if (check_partition_info(tab_part_info, (handlerton**)NULL, + table->file, ULL(0))) + { + DBUG_RETURN(TRUE); + } + } + } + else + { + /* + When thd->lex->part_info has a reference to a partition_info the + ALTER TABLE contained a definition of a partitioning. + + Case I: + If there was a partition before and there is a new one defined. + We use the new partitioning. The new partitioning is already + defined in the correct variable so no work is needed to + accomplish this. + We do however need to update partition_changed to ensure that not + only the frm file is changed in the ALTER TABLE command. + + Case IIa: + There was a partitioning before and there is no new one defined. + Also the user has not specified an explicit engine to use. + + We use the old partitioning also for the new table. We do this + by assigning the partition_info from the table loaded in + open_ltable to the partition_info struct used by mysql_create_table + later in this method. + + Case IIb: + There was a partitioning before and there is no new one defined. + The user has specified an explicit engine to use. + + Since the user has specified an explicit engine to use we override + the old partitioning info and create a new table using the specified + engine. This is the reason for the extra check if old and new engine + is equal. + In this case the partition also is changed. + + Case III: + There was no partitioning before altering the table, there is + partitioning defined in the altered table. Use the new partitioning. + No work needed since the partitioning info is already in the + correct variable. + + In this case we discover one case where the new partitioning is using + the same partition function as the default (PARTITION BY KEY or + PARTITION BY LINEAR KEY with the list of fields equal to the primary + key fields OR PARTITION BY [LINEAR] KEY() for tables without primary + key) + Also here partition has changed and thus a new table must be + created. + + Case IV: + There was no partitioning before and no partitioning defined. + Obviously no work needed. + */ + if (table->part_info) + { + if (!thd->lex->part_info && + create_info->db_type == old_db_type) + thd->lex->part_info= table->part_info; + } + if (thd->lex->part_info) + { + /* + Need to cater for engine types that can handle partition without + using the partition handler. + */ + if (thd->lex->part_info != table->part_info) + *partition_changed= TRUE; + if (create_info->db_type == &partition_hton) + { + if (table->part_info) + { + thd->lex->part_info->default_engine_type= + table->part_info->default_engine_type; + } + else + { + thd->lex->part_info->default_engine_type= + ha_checktype(thd, DB_TYPE_DEFAULT, FALSE, FALSE); + } + } + else + { + bool is_native_partitioned= FALSE; + partition_info *part_info= thd->lex->part_info; + part_info->default_engine_type= create_info->db_type; + if (check_native_partitioned(create_info, &is_native_partitioned, + part_info, thd)) + { + DBUG_RETURN(TRUE); + } + if (!is_native_partitioned) + { + DBUG_ASSERT(create_info->db_type != &default_hton); + create_info->db_type= &partition_hton; + } + } + DBUG_PRINT("info", ("default_db_type = %s", + thd->lex->part_info->default_engine_type->name)); + } + } + DBUG_RETURN(FALSE); +} + + +/* + Change partitions, used to implement ALTER TABLE ADD/REORGANIZE/COALESCE + partitions. This method is used to implement both single-phase and multi- + phase implementations of ADD/REORGANIZE/COALESCE partitions. + + SYNOPSIS + mysql_change_partitions() + lpt Struct containing parameters + + RETURN VALUES + TRUE Failure + FALSE Success + + DESCRIPTION + Request handler to add partitions as set in states of the partition + + Elements of the lpt parameters used: + create_info Create information used to create partitions + db Database name + table_name Table name + copied Output parameter where number of copied + records are added + deleted Output parameter where number of deleted + records are added +*/ + +static bool mysql_change_partitions(ALTER_PARTITION_PARAM_TYPE *lpt) +{ + char path[FN_REFLEN+1]; + DBUG_ENTER("mysql_change_partitions"); + + build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, ""); + DBUG_RETURN(lpt->table->file->change_partitions(lpt->create_info, path, + &lpt->copied, + &lpt->deleted, + lpt->pack_frm_data, + lpt->pack_frm_len)); +} + + +/* + Rename partitions in an ALTER TABLE of partitions + + SYNOPSIS + mysql_rename_partitions() + lpt Struct containing parameters + + RETURN VALUES + TRUE Failure + FALSE Success + + DESCRIPTION + Request handler to rename partitions as set in states of the partition + + Parameters used: + db Database name + table_name Table name +*/ + +static bool mysql_rename_partitions(ALTER_PARTITION_PARAM_TYPE *lpt) +{ + char path[FN_REFLEN+1]; + DBUG_ENTER("mysql_rename_partitions"); + + build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, ""); + DBUG_RETURN(lpt->table->file->rename_partitions(path)); +} + + +/* + Drop partitions in an ALTER TABLE of partitions + + SYNOPSIS + mysql_drop_partitions() + lpt Struct containing parameters + + RETURN VALUES + TRUE Failure + FALSE Success + DESCRIPTION + Drop the partitions marked with PART_TO_BE_DROPPED state and remove + those partitions from the list. + + Parameters used: + table Table object + db Database name + table_name Table name +*/ + +static bool mysql_drop_partitions(ALTER_PARTITION_PARAM_TYPE *lpt) +{ + char path[FN_REFLEN+1]; + partition_info *part_info= lpt->table->part_info; + List_iterator part_it(part_info->partitions); + uint i= 0; + uint remove_count= 0; + DBUG_ENTER("mysql_drop_partitions"); + + build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, ""); + if (lpt->table->file->drop_partitions(path)) + { + DBUG_RETURN(TRUE); + } + do + { + partition_element *part_elem= part_it++; + if (part_elem->part_state == PART_IS_DROPPED) + { + part_it.remove(); + remove_count++; + } + } while (++i < part_info->no_parts); + part_info->no_parts-= remove_count; + DBUG_RETURN(FALSE); +} + + +/* + Actually perform the change requested by ALTER TABLE of partitions + previously prepared. + + SYNOPSIS + fast_alter_partition_table() + thd Thread object + table Table object + alter_info ALTER TABLE info + create_info Create info for CREATE TABLE + table_list List of the table involved + create_list The fields in the resulting table + key_list The keys in the resulting table + db Database name of new table + table_name Table name of new table + + RETURN VALUES + TRUE Error + FALSE Success + + DESCRIPTION + Perform all ALTER TABLE operations for partitioned tables that can be + performed fast without a full copy of the original table. +*/ + +uint fast_alter_partition_table(THD *thd, TABLE *table, + ALTER_INFO *alter_info, + HA_CREATE_INFO *create_info, + TABLE_LIST *table_list, + List *create_list, + List *key_list, const char *db, + const char *table_name, + uint fast_alter_partition) +{ + /* Set-up struct used to write frm files */ + ulonglong copied= 0; + ulonglong deleted= 0; + partition_info *part_info= table->part_info; + ALTER_PARTITION_PARAM_TYPE lpt_obj; + ALTER_PARTITION_PARAM_TYPE *lpt= &lpt_obj; + bool written_bin_log= TRUE; + DBUG_ENTER("fast_alter_partition_table"); + + lpt->thd= thd; + lpt->create_info= create_info; + lpt->create_list= create_list; + lpt->key_list= key_list; + lpt->db_options= create_info->table_options; + if (create_info->row_type == ROW_TYPE_DYNAMIC) + lpt->db_options|= HA_OPTION_PACK_RECORD; + lpt->table= table; + lpt->key_info_buffer= 0; + lpt->key_count= 0; + lpt->db= db; + lpt->table_name= table_name; + lpt->copied= 0; + lpt->deleted= 0; + lpt->pack_frm_data= NULL; + lpt->pack_frm_len= 0; + thd->lex->part_info= part_info; + + if (alter_info->flags & ALTER_OPTIMIZE_PARTITION || + alter_info->flags & ALTER_ANALYZE_PARTITION || + alter_info->flags & ALTER_CHECK_PARTITION || + alter_info->flags & ALTER_REPAIR_PARTITION) + { + /* + In this case the user has specified that he wants a set of partitions + to be optimised and the partition engine can handle optimising + partitions natively without requiring a full rebuild of the + partitions. + + In this case it is enough to call optimise_partitions, there is no + need to change frm files or anything else. + */ + written_bin_log= FALSE; + if (((alter_info->flags & ALTER_OPTIMIZE_PARTITION) && + (table->file->optimize_partitions(thd))) || + ((alter_info->flags & ALTER_ANALYZE_PARTITION) && + (table->file->analyze_partitions(thd))) || + ((alter_info->flags & ALTER_CHECK_PARTITION) && + (table->file->check_partitions(thd))) || + ((alter_info->flags & ALTER_REPAIR_PARTITION) && + (table->file->repair_partitions(thd)))) + { + fast_alter_partition_error_handler(lpt); + DBUG_RETURN(TRUE); + } + } + else if (fast_alter_partition & HA_PARTITION_ONE_PHASE) + { + /* + In the case where the engine supports one phase online partition + changes it is not necessary to have any exclusive locks. The + correctness is upheld instead by transactions being aborted if they + access the table after its partition definition has changed (if they + are still using the old partition definition). + + The handler is in this case responsible to ensure that all users + start using the new frm file after it has changed. To implement + one phase it is necessary for the handler to have the master copy + of the frm file and use discovery mechanisms to renew it. Thus + write frm will write the frm, pack the new frm and finally + the frm is deleted and the discovery mechanisms will either restore + back to the old or installing the new after the change is activated. + + Thus all open tables will be discovered that they are old, if not + earlier as soon as they try an operation using the old table. One + should ensure that this is checked already when opening a table, + even if it is found in the cache of open tables. + + change_partitions will perform all operations and it is the duty of + the handler to ensure that the frm files in the system gets updated + in synch with the changes made and if an error occurs that a proper + error handling is done. + + If the MySQL Server crashes at this moment but the handler succeeds + in performing the change then the binlog is not written for the + change. There is no way to solve this as long as the binlog is not + transactional and even then it is hard to solve it completely. + + The first approach here was to downgrade locks. Now a different approach + is decided upon. The idea is that the handler will have access to the + ALTER_INFO when store_lock arrives with TL_WRITE_ALLOW_READ. So if the + handler knows that this functionality can be handled with a lower lock + level it will set the lock level to TL_WRITE_ALLOW_WRITE immediately. + Thus the need to downgrade the lock disappears. + 1) Write the new frm, pack it and then delete it + 2) Perform the change within the handler + */ + if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE | WFRM_PACK_FRM)) || + (mysql_change_partitions(lpt))) + { + fast_alter_partition_error_handler(lpt); + DBUG_RETURN(TRUE); + } + } + else if (alter_info->flags == ALTER_DROP_PARTITION) + { + /* + Now after all checks and setting state on dropped partitions we can + start the actual dropping of the partitions. + + Drop partition is actually two things happening. The first is that + a lot of records are deleted. The second is that the behaviour of + subsequent updates and writes and deletes will change. The delete + part can be handled without any particular high lock level by + transactional engines whereas non-transactional engines need to + ensure that this change is done with an exclusive lock on the table. + The second part, the change of partitioning does however require + an exclusive lock to install the new partitioning as one atomic + operation. If this is not the case, it is possible for two + transactions to see the change in a different order than their + serialisation order. Thus we need an exclusive lock for both + transactional and non-transactional engines. + + For LIST partitions it could be possible to avoid the exclusive lock + (and for RANGE partitions if they didn't rearrange range definitions + after a DROP PARTITION) if one ensured that failed accesses to the + dropped partitions was aborted for sure (thus only possible for + transactional engines). + + 1) Lock the table in TL_WRITE_ONLY to ensure all other accesses to + the table have completed + 2) Write the new frm file where the partitions have changed but are + still remaining with the state PART_TO_BE_DROPPED + 3) Write the bin log + 4) Prepare MyISAM handlers for drop of partitions + 5) Ensure that any users that has opened the table but not yet + reached the abort lock do that before downgrading the lock. + 6) Drop the partitions + 7) Write the frm file that the partition has been dropped + 8) Wait until all accesses using the old frm file has completed + 9) Complete query + */ + if ((abort_and_upgrade_lock(lpt)) || + (mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) || + ((!thd->lex->no_write_to_binlog) && + (write_bin_log(thd, FALSE, + thd->query, thd->query_length), FALSE)) || + (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE)) || + (close_open_tables_and_downgrade(lpt), FALSE) || + (mysql_drop_partitions(lpt)) || + (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) || + (mysql_wait_completed_table(lpt, table), FALSE)) + { + fast_alter_partition_error_handler(lpt); + DBUG_RETURN(TRUE); + } + } + else if ((alter_info->flags & ALTER_ADD_PARTITION) && + (part_info->part_type == RANGE_PARTITION || + part_info->part_type == LIST_PARTITION)) + { + /* + ADD RANGE/LIST PARTITIONS + In this case there are no tuples removed and no tuples are added. + Thus the operation is merely adding a new partition. Thus it is + necessary to perform the change as an atomic operation. Otherwise + someone reading without seeing the new partition could potentially + miss updates made by a transaction serialised before it that are + inserted into the new partition. + + 1) Write the new frm file where state of added partitions is + changed to PART_TO_BE_ADDED + 2) Add the new partitions + 3) Lock all partitions in TL_WRITE_ONLY to ensure that no users + are still using the old partitioning scheme. Wait until all + ongoing users have completed before progressing. + 4) Write a new frm file of the table where the partitions are added + to the table. + 5) Write binlog + 6) Wait until all accesses using the old frm file has completed + 7) Complete query + */ + if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) || + (mysql_change_partitions(lpt)) || + (abort_and_upgrade_lock(lpt)) || + (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) || + ((!thd->lex->no_write_to_binlog) && + (write_bin_log(thd, FALSE, + thd->query, thd->query_length), FALSE)) || + (close_open_tables_and_downgrade(lpt), FALSE)) + { + fast_alter_partition_error_handler(lpt); + DBUG_RETURN(TRUE); + } + } + else + { + /* + ADD HASH PARTITION/ + COALESCE PARTITION/ + REBUILD PARTITION/ + REORGANIZE PARTITION + + In this case all records are still around after the change although + possibly organised into new partitions, thus by ensuring that all + updates go to both the old and the new partitioning scheme we can + actually perform this operation lock-free. The only exception to + this is when REORGANIZE PARTITION adds/drops ranges. In this case + there needs to be an exclusive lock during the time when the range + changes occur. + This is only possible if the handler can ensure double-write for a + period. The double write will ensure that it doesn't matter where the + data is read from since both places are updated for writes. If such + double writing is not performed then it is necessary to perform the + change with the usual exclusive lock. With double writes it is even + possible to perform writes in parallel with the reorganisation of + partitions. + + Without double write procedure we get the following procedure. + The only difference with using double write is that we can downgrade + the lock to TL_WRITE_ALLOW_WRITE. Double write in this case only + double writes from old to new. If we had double writing in both + directions we could perform the change completely without exclusive + lock for HASH partitions. + Handlers that perform double writing during the copy phase can actually + use a lower lock level. This can be handled inside store_lock in the + respective handler. + + 1) Write the new frm file where state of added partitions is + changed to PART_TO_BE_ADDED and the reorganised partitions + are set in state PART_TO_BE_REORGED. + 2) Add the new partitions + Copy from the reorganised partitions to the new partitions + 3) Lock all partitions in TL_WRITE_ONLY to ensure that no users + are still using the old partitioning scheme. Wait until all + ongoing users have completed before progressing. + 4) Prepare MyISAM handlers for rename and delete of partitions + 5) Write a new frm file of the table where the partitions are + reorganised. + 6) Rename the reorged partitions such that they are no longer + used and rename those added to their real new names. + 7) Write bin log + 8) Wait until all accesses using the old frm file has completed + 9) Drop the reorganised partitions + 10)Write a new frm file of the table where the partitions are + reorganised. + 11)Wait until all accesses using the old frm file has completed + 12)Complete query + */ + + if ((mysql_write_frm(lpt, WFRM_INITIAL_WRITE)) || + (mysql_change_partitions(lpt)) || + (abort_and_upgrade_lock(lpt)) || + (mysql_write_frm(lpt, WFRM_CREATE_HANDLER_FILES)) || + (table->file->extra(HA_EXTRA_PREPARE_FOR_DELETE)) || + (mysql_rename_partitions(lpt)) || + ((!thd->lex->no_write_to_binlog) && + (write_bin_log(thd, FALSE, + thd->query, thd->query_length), FALSE)) || + (close_open_tables_and_downgrade(lpt), FALSE) || + (mysql_drop_partitions(lpt)) || + (mysql_write_frm(lpt, 0UL)) || + (mysql_wait_completed_table(lpt, table), FALSE)) + { + fast_alter_partition_error_handler(lpt); + DBUG_RETURN(TRUE); + } + } + /* + A final step is to write the query to the binlog and send ok to the + user + */ + DBUG_RETURN(fast_end_partition(thd, lpt->copied, lpt->deleted, + table_list, FALSE, lpt, + written_bin_log)); +} #endif + /* Prepare for calling val_int on partition function by setting fields to point to the record where the values of the PF-fields are stored. + SYNOPSIS set_field_ptr() ptr Array of fields to change ptr new_buf New record pointer old_buf Old record pointer + DESCRIPTION Set ptr in field objects of field array to refer to new_buf record instead of previously old_buf. Used before calling val_int and after @@ -3424,10 +5423,10 @@ end: */ void set_field_ptr(Field **ptr, const byte *new_buf, - const byte *old_buf) + const byte *old_buf) { my_ptrdiff_t diff= (new_buf - old_buf); - DBUG_ENTER("set_nullable_field_ptr"); + DBUG_ENTER("set_field_ptr"); do { @@ -3442,11 +5441,13 @@ void set_field_ptr(Field **ptr, const byte *new_buf, point to the record where the values of the PF-fields are stored. This variant works on a key_part reference. It is not required that all fields are NOT NULL fields. + SYNOPSIS set_key_field_ptr() - key_part key part with a set of fields to change ptr + key_info key info with a set of fields to change ptr new_buf New record pointer old_buf Old record pointer + DESCRIPTION Set ptr in field objects of field array to refer to new_buf record instead of previously old_buf. Used before calling val_int and after @@ -3459,7 +5460,8 @@ void set_key_field_ptr(KEY *key_info, const byte *new_buf, const byte *old_buf) { KEY_PART_INFO *key_part= key_info->key_part; - uint key_parts= key_info->key_parts, i= 0; + uint key_parts= key_info->key_parts; + uint i= 0; my_ptrdiff_t diff= (new_buf - old_buf); DBUG_ENTER("set_key_field_ptr"); @@ -3472,6 +5474,27 @@ void set_key_field_ptr(KEY *key_info, const byte *new_buf, } +/* + SYNOPSIS + mem_alloc_error() + size Size of memory attempted to allocate + None + + RETURN VALUES + None + + DESCRIPTION + A routine to use for all the many places in the code where memory + allocation error can happen, a tremendous amount of them, needs + simple routine that signals this error. +*/ + +void mem_alloc_error(size_t size) +{ + my_error(ER_OUTOFMEMORY, MYF(0), size); +} + + /* Fill the string comma-separated line of used partitions names SYNOPSIS diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 7b35f057217..b66e617c06e 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -92,6 +92,12 @@ public: virtual bool send_fields(List &list, uint flags); virtual bool send_data(List &items); virtual bool send_eof(); +#ifdef EMBEDDED_LIBRARY + void begin_dataset() + { + protocol.begin_dataset(); + } +#endif }; /****************************************************************************** @@ -524,9 +530,10 @@ void set_param_time(Item_param *param, uchar **pos, ulong len) void set_param_datetime(Item_param *param, uchar **pos, ulong len) { - MYSQL_TIME *to= (MYSQL_TIME*)*pos; + MYSQL_TIME tm= *((MYSQL_TIME*)*pos); + tm.neg= 0; - param->set_time(to, MYSQL_TIMESTAMP_DATETIME, + param->set_time(&tm, MYSQL_TIMESTAMP_DATETIME, MAX_DATETIME_WIDTH * MY_CHARSET_BIN_MB_MAXLEN); } @@ -1866,7 +1873,7 @@ void mysql_stmt_prepare(THD *thd, const char *packet, uint packet_length) thd->stmt_map.erase(stmt); } else - mysql_log.write(thd, COM_STMT_PREPARE, "[%lu] %s", stmt->id, packet); + general_log_print(thd, COM_STMT_PREPARE, "[%lu] %s", stmt->id, packet); /* check_prepared_statemnt sends the metadata packet in case of success */ DBUG_VOID_RETURN; @@ -2228,7 +2235,7 @@ void mysql_stmt_execute(THD *thd, char *packet_arg, uint packet_length) if (!(specialflag & SPECIAL_NO_PRIOR)) my_pthread_setprio(pthread_self(), WAIT_PRIOR); if (error == 0) - mysql_log.write(thd, COM_STMT_EXECUTE, "[%lu] %s", stmt->id, thd->query); + general_log_print(thd, COM_STMT_EXECUTE, "[%lu] %s", stmt->id, thd->query); DBUG_VOID_RETURN; @@ -2607,7 +2614,7 @@ void Prepared_statement::setup_set_params() { /* Setup binary logging */ if (mysql_bin_log.is_open() && is_update_query(lex->sql_command) || - mysql_log.is_open() || mysql_slow_log.is_open()) + opt_log || opt_slow_log) { set_params_from_vars= insert_params_from_vars_with_log; #ifndef EMBEDDED_LIBRARY diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 5c1ec8b3a49..672f7fe8abe 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -605,8 +605,8 @@ bool mysqld_show_create_db(THD *thd, char *dbname, { my_error(ER_DBACCESS_DENIED_ERROR, MYF(0), sctx->priv_user, sctx->host_or_ip, dbname); - mysql_log.write(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR), - sctx->priv_user, sctx->host_or_ip, dbname); + general_log_print(thd,COM_INIT_DB,ER(ER_DBACCESS_DENIED_ERROR), + sctx->priv_user, sctx->host_or_ip, dbname); DBUG_RETURN(TRUE); } #endif @@ -1243,8 +1243,8 @@ store_create_info(THD *thd, TABLE_LIST *table_list, String *packet, char *part_syntax; if (table->part_info && ((part_syntax= generate_partition_syntax(table->part_info, - &part_syntax_len, - FALSE,FALSE)))) + &part_syntax_len, + FALSE,FALSE)))) { packet->append(part_syntax, part_syntax_len); my_free(part_syntax, MYF(0)); @@ -1502,7 +1502,7 @@ void mysqld_list_processes(THD *thd,const char *user, bool verbose) if (thd_info->proc_info) protocol->store(thd_info->proc_info, system_charset_info); else - protocol->store(command_name[thd_info->command], system_charset_info); + protocol->store(command_name[thd_info->command].str, system_charset_info); if (thd_info->start_time) protocol->store((uint32) (now - thd_info->start_time)); else @@ -2835,6 +2835,7 @@ int fill_schema_charsets(THD *thd, TABLE_LIST *tables, COND *cond) CHARSET_INFO *tmp_cs= cs[0]; if (tmp_cs && (tmp_cs->state & MY_CS_PRIMARY) && (tmp_cs->state & MY_CS_AVAILABLE) && + !(tmp_cs->state & MY_CS_HIDDEN) && !(wild && wild[0] && wild_case_compare(scs, tmp_cs->csname,wild))) { @@ -2904,6 +2905,7 @@ int fill_schema_collation(THD *thd, TABLE_LIST *tables, COND *cond) CHARSET_INFO **cl; CHARSET_INFO *tmp_cs= cs[0]; if (!tmp_cs || !(tmp_cs->state & MY_CS_AVAILABLE) || + (tmp_cs->state & MY_CS_HIDDEN) || !(tmp_cs->state & MY_CS_PRIMARY)) continue; for (cl= all_charsets; cl < all_charsets+255 ;cl ++) diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 778d1af8a15..5ea46ec666c 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -41,67 +41,17 @@ static int copy_data_between_tables(TABLE *from,TABLE *to, static bool prepare_blob_field(THD *thd, create_field *sql_field); static bool check_engine(THD *thd, const char *table_name, handlerton **new_engine); +static int mysql_prepare_table(THD *thd, HA_CREATE_INFO *create_info, + List *fields, + List *keys, bool tmp_table, + uint *db_options, + handler *file, KEY **key_info_buffer, + uint *key_count, int select_field_count); -/* - SYNOPSIS - write_bin_log() - thd Thread object - clear_error is clear_error to be called - RETURN VALUES - NONE - DESCRIPTION - Write the binlog if open, routine used in multiple places in this - file -*/ - -static void write_bin_log(THD *thd, bool clear_error, - char const* query, ulong query_length) -{ - if (mysql_bin_log.is_open()) - { - if (clear_error) - thd->clear_error(); - thd->binlog_query(THD::STMT_QUERY_TYPE, - query, query_length, FALSE, FALSE); - } -} - -/* - SYNOPSIS - abort_and_upgrade_lock() - thd Thread object - table Table object - db Database name - table_name Table name - old_lock_level Old lock level - RETURN VALUES - TRUE Failure - FALSE Success - DESCRIPTION - Remember old lock level (for possible downgrade later on), abort all - waiting threads and ensure that all keeping locks currently are - completed such that we own the lock exclusively and no other interaction - is ongoing. -*/ - -static bool abort_and_upgrade_lock(THD *thd, TABLE *table, const char *db, - const char *table_name, - uint *old_lock_level) -{ - uint flags= RTFC_WAIT_OTHER_THREAD_FLAG | RTFC_CHECK_KILLED_FLAG; - DBUG_ENTER("abort_and_upgrade_locks"); - - *old_lock_level= table->reginfo.lock_type; - mysql_lock_abort(thd, table); - VOID(remove_table_from_cache(thd, db, table_name, flags)); - if (thd->killed) - { - thd->no_warnings_for_error= 0; - DBUG_RETURN(TRUE); - } - DBUG_RETURN(FALSE); -} - +static int mysql_copy_create_lists(List *orig_create_list, + List *orig_key, + List *new_create_list, + List *new_key); #define MYSQL50_TABLE_NAME_PREFIX "#mysql50#" #define MYSQL50_TABLE_NAME_PREFIX_LENGTH 9 @@ -192,6 +142,272 @@ uint build_tmptable_filename(char *buff, size_t bufflen, #define ALTER_TABLE_INDEX_CHANGED 2 +/* + SYNOPSIS + mysql_copy_create_list() + orig_create_list Original list of created fields + inout::new_create_list Copy of original list + + RETURN VALUES + FALSE Success + TRUE Memory allocation error + + DESCRIPTION + mysql_prepare_table destroys the create_list and in some cases we need + this lists for more purposes. Thus we copy it specifically for use + by mysql_prepare_table +*/ + +static int mysql_copy_create_list(List *orig_create_list, + + List *new_create_list) +{ + List_iterator prep_field_it(*orig_create_list); + create_field *prep_field; + DBUG_ENTER("mysql_copy_create_list"); + + while ((prep_field= prep_field_it++)) + { + create_field *field= new create_field(*prep_field); + if (!field || new_create_list->push_back(field)) + { + mem_alloc_error(2); + DBUG_RETURN(TRUE); + } + } + DBUG_RETURN(FALSE); +} + + +/* + SYNOPSIS + mysql_copy_key_list() + orig_key Original list of keys + inout::new_key Copy of original list + + RETURN VALUES + FALSE Success + TRUE Memory allocation error + + DESCRIPTION + mysql_prepare_table destroys the key list and in some cases we need + this lists for more purposes. Thus we copy it specifically for use + by mysql_prepare_table +*/ + +static int mysql_copy_key_list(List *orig_key, + List *new_key) +{ + List_iterator prep_key_it(*orig_key); + Key *prep_key; + DBUG_ENTER("mysql_copy_create_lists"); + + while ((prep_key= prep_key_it++)) + { + List prep_columns; + List_iterator prep_col_it(prep_key->columns); + key_part_spec *prep_col; + Key *temp_key; + + while ((prep_col= prep_col_it++)) + { + key_part_spec *prep_key_part; + if (prep_key_part= new key_part_spec(*prep_col)) + { + mem_alloc_error(sizeof(key_part_spec)); + DBUG_RETURN(TRUE); + } + if (prep_columns.push_back(prep_key_part)) + { + mem_alloc_error(2); + DBUG_RETURN(TRUE); + } + } + if ((temp_key= new Key(prep_key->type, prep_key->name, + prep_key->algorithm, + prep_key->generated, + prep_columns, + prep_key->parser_name))) + { + mem_alloc_error(sizeof(Key)); + DBUG_RETURN(TRUE); + } + if (new_key->push_back(temp_key)) + { + mem_alloc_error(2); + DBUG_RETURN(TRUE); + } + } + DBUG_RETURN(FALSE); +} + + +/* + SYNOPSIS + mysql_write_frm() + lpt Struct carrying many parameters needed for this + method + flags Flags as defined below + WFRM_INITIAL_WRITE If set we need to prepare table before + creating the frm file + WFRM_CREATE_HANDLER_FILES If set we need to create the handler file as + part of the creation of the frm file + WFRM_PACK_FRM If set we should pack the frm file and delete + the frm file + + RETURN VALUES + TRUE Error + FALSE Success + + DESCRIPTION + A support method that creates a new frm file and in this process it + regenerates the partition data. It works fine also for non-partitioned + tables since it only handles partitioned data if it exists. +*/ + +bool mysql_write_frm(ALTER_PARTITION_PARAM_TYPE *lpt, uint flags) +{ + /* + Prepare table to prepare for writing a new frm file where the + partitions in add/drop state have temporarily changed their state + We set tmp_table to avoid get errors on naming of primary key index. + */ + int error= 0; + char path[FN_REFLEN+1]; + char frm_name[FN_REFLEN+1]; + DBUG_ENTER("mysql_write_frm"); + + if (flags & WFRM_INITIAL_WRITE) + { + error= mysql_copy_create_list(lpt->create_list, + &lpt->new_create_list); + error+= mysql_copy_key_list(lpt->key_list, + &lpt->new_key_list); + if (error) + { + DBUG_RETURN(TRUE); + } + } + build_table_filename(path, sizeof(path), lpt->db, lpt->table_name, ""); + strxmov(frm_name, path, reg_ext, NullS); + if ((flags & WFRM_INITIAL_WRITE) && + (mysql_prepare_table(lpt->thd, lpt->create_info, &lpt->new_create_list, + &lpt->new_key_list,/*tmp_table*/ 1, &lpt->db_options, + lpt->table->file, &lpt->key_info_buffer, + &lpt->key_count, /*select_field_count*/ 0))) + { + DBUG_RETURN(TRUE); + } +#ifdef WITH_PARTITION_STORAGE_ENGINE + { + partition_info *part_info= lpt->table->part_info; + char *part_syntax_buf; + uint syntax_len, i; + bool any_unnormal_state= FALSE; + + if (part_info) + { + uint max_part_state_len= part_info->partitions.elements + + part_info->temp_partitions.elements; + if (!(part_info->part_state= (uchar*)sql_alloc(max_part_state_len))) + { + DBUG_RETURN(TRUE); + } + part_info->part_state_len= 0; + if (!(part_syntax_buf= generate_partition_syntax(part_info, + &syntax_len, + TRUE, FALSE))) + { + DBUG_RETURN(TRUE); + } + for (i= 0; i < part_info->part_state_len; i++) + { + enum partition_state part_state= + (enum partition_state)part_info->part_state[i]; + if (part_state != PART_NORMAL && part_state != PART_IS_ADDED) + any_unnormal_state= TRUE; + } + if (!any_unnormal_state) + { + part_info->part_state= NULL; + part_info->part_state_len= 0; + } + part_info->part_info_string= part_syntax_buf; + part_info->part_info_len= syntax_len; + } + } +#endif + /* + We write the frm file with the LOCK_open mutex since otherwise we could + overwrite the frm file as another is reading it in open_table. + */ + lpt->create_info->table_options= lpt->db_options; + VOID(pthread_mutex_lock(&LOCK_open)); + if ((mysql_create_frm(lpt->thd, frm_name, lpt->db, lpt->table_name, + lpt->create_info, lpt->new_create_list, lpt->key_count, + lpt->key_info_buffer, lpt->table->file)) || + ((flags & WFRM_CREATE_HANDLER_FILES) && + lpt->table->file->create_handler_files(path))) + { + error= 1; + goto end; + } + if (flags & WFRM_PACK_FRM) + { + /* + We need to pack the frm file and after packing it we delete the + frm file to ensure it doesn't get used. This is only used for + handlers that have the main version of the frm file stored in the + handler. + */ + const void *data= 0; + uint length= 0; + if (readfrm(path, &data, &length) || + packfrm(data, length, &lpt->pack_frm_data, &lpt->pack_frm_len)) + { + my_free((char*)data, MYF(MY_ALLOW_ZERO_PTR)); + my_free((char*)lpt->pack_frm_data, MYF(MY_ALLOW_ZERO_PTR)); + mem_alloc_error(length); + error= 1; + goto end; + } + error= my_delete(frm_name, MYF(MY_WME)); + } + /* Frm file have been updated to reflect the change about to happen. */ +end: + VOID(pthread_mutex_unlock(&LOCK_open)); + DBUG_RETURN(error); +} + + +/* + SYNOPSIS + write_bin_log() + thd Thread object + clear_error is clear_error to be called + query Query to log + query_length Length of query + + RETURN VALUES + NONE + + DESCRIPTION + Write the binlog if open, routine used in multiple places in this + file +*/ + +void write_bin_log(THD *thd, bool clear_error, + char const *query, ulong query_length) +{ + if (mysql_bin_log.is_open()) + { + if (clear_error) + thd->clear_error(); + thd->binlog_query(THD::STMT_QUERY_TYPE, + query, query_length, FALSE, FALSE); + } +} + /* delete (drop) tables. @@ -1807,24 +2023,54 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name, if (!(file=get_new_handler((TABLE_SHARE*) 0, thd->mem_root, create_info->db_type))) { - my_error(ER_OUTOFMEMORY, MYF(0), 128);//128 bytes invented + mem_alloc_error(sizeof(handler)); DBUG_RETURN(TRUE); } #ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *part_info= thd->lex->part_info; + if (!part_info && create_info->db_type->partition_flags && + (create_info->db_type->partition_flags() & HA_USE_AUTO_PARTITION)) + { + /* + Table is not defined as a partitioned table but the engine handles + all tables as partitioned. The handler will set up the partition info + object with the default settings. + */ + thd->lex->part_info= part_info= new partition_info(); + if (!part_info) + { + mem_alloc_error(sizeof(partition_info)); + DBUG_RETURN(TRUE); + } + file->set_auto_partitions(part_info); + } if (part_info) { /* - The table has been specified as a partitioned table. - If this is part of an ALTER TABLE the handler will be the partition - handler but we need to specify the default handler to use for - partitions also in the call to check_partition_info. We transport - this information in the default_db_type variable, it is either - DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command. + The table has been specified as a partitioned table. + If this is part of an ALTER TABLE the handler will be the partition + handler but we need to specify the default handler to use for + partitions also in the call to check_partition_info. We transport + this information in the default_db_type variable, it is either + DB_TYPE_DEFAULT or the engine set in the ALTER TABLE command. + + Check that we don't use foreign keys in the table since it won't + work even with InnoDB beneath it. */ + List_iterator key_iterator(keys); + Key *key; handlerton *part_engine_type= create_info->db_type; char *part_syntax_buf; uint syntax_len; + handlerton *engine_type; + while ((key= key_iterator++)) + { + if (key->type == Key::FOREIGN_KEY) + { + my_error(ER_CANNOT_ADD_FOREIGN, MYF(0)); + goto err; + } + } if (part_engine_type == &partition_hton) { /* @@ -1832,16 +2078,29 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name, default_engine_type was assigned from the engine set in the ALTER TABLE command. */ - part_engine_type= ha_checktype(thd, - ha_legacy_type(part_info->default_engine_type), 0, 0); + ; } else { - part_info->default_engine_type= create_info->db_type; + if (create_info->used_fields & HA_CREATE_USED_ENGINE) + { + part_info->default_engine_type= create_info->db_type; + } + else + { + if (part_info->default_engine_type == NULL) + { + part_info->default_engine_type= ha_checktype(thd, + DB_TYPE_DEFAULT, 0, 0); + } + } } - if (check_partition_info(part_info, part_engine_type, - file, create_info->max_rows)) + DBUG_PRINT("info", ("db_type = %d", + ha_legacy_type(part_info->default_engine_type))); + if (check_partition_info(part_info, &engine_type, file, + create_info->max_rows)) goto err; + part_info->default_engine_type= engine_type; /* We reverse the partitioning parser and generate a standard format @@ -1849,19 +2108,29 @@ bool mysql_create_table(THD *thd,const char *db, const char *table_name, */ if (!(part_syntax_buf= generate_partition_syntax(part_info, &syntax_len, - TRUE,TRUE))) + TRUE, FALSE))) goto err; part_info->part_info_string= part_syntax_buf; part_info->part_info_len= syntax_len; - if ((!(file->partition_flags() & HA_CAN_PARTITION)) || + if (create_info->db_type != engine_type) + { + delete file; + if (!(file= get_new_handler((TABLE_SHARE*) 0, thd->mem_root, engine_type))) + { + mem_alloc_error(sizeof(handler)); + DBUG_RETURN(TRUE); + } + } + if ((!(engine_type->partition_flags && + engine_type->partition_flags() & HA_CAN_PARTITION)) || create_info->db_type == &partition_hton) { /* The handler assigned to the table cannot handle partitioning. Assign the partition handler as the handler of the table. */ - DBUG_PRINT("info", ("db_type: %d part_flag: %d", - create_info->db_type,file->partition_flags())); + DBUG_PRINT("info", ("db_type: %d", + ha_legacy_type(create_info->db_type))); delete file; create_info->db_type= &partition_hton; if (!(file= get_ha_partition(part_info))) @@ -2252,7 +2521,7 @@ static void wait_while_table_is_used(THD *thd,TABLE *table, VOID(table->file->extra(function)); /* Mark all tables that are in use as 'old' */ - mysql_lock_abort(thd, table); // end threads waiting on lock + mysql_lock_abort(thd, table, TRUE); /* end threads waiting on lock */ /* Wait until all there are no other threads that has this table open */ remove_table_from_cache(thd, table->s->db.str, @@ -2405,7 +2674,7 @@ static int prepare_for_repair(THD *thd, TABLE_LIST *table_list, DBUG_RETURN(0); // Can't open frm file } - if (open_table_from_share(thd, share, "", 0, 0, 0, &tmp_table)) + if (open_table_from_share(thd, share, "", 0, 0, 0, &tmp_table, FALSE)) { release_table_share(share, RELEASE_NORMAL); pthread_mutex_unlock(&LOCK_open); @@ -2659,12 +2928,13 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, } /* Close all instances of the table to allow repair to rename files */ - if (lock_type == TL_WRITE && table->table->s->version) + if (lock_type == TL_WRITE && table->table->s->version && + !table->table->s->log_table) { pthread_mutex_lock(&LOCK_open); const char *old_message=thd->enter_cond(&COND_refresh, &LOCK_open, "Waiting to get writelock"); - mysql_lock_abort(thd,table->table); + mysql_lock_abort(thd,table->table, TRUE); remove_table_from_cache(thd, table->table->s->db.str, table->table->s->table_name.str, RTFC_WAIT_OTHER_THREAD_FLAG | @@ -2829,9 +3099,10 @@ send_result_message: } if (table->table) { + /* in the below check we do not refresh the log tables */ if (fatal_error) table->table->s->version=0; // Force close of table - else if (open_for_modify) + else if (open_for_modify && !table->table->s->log_table) { pthread_mutex_lock(&LOCK_open); remove_table_from_cache(thd, table->table->s->db.str, @@ -3014,7 +3285,7 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, Table_ident *table_ident) { TABLE *tmp_table; - char src_path[FN_REFLEN], dst_path[FN_REFLEN]; + char src_path[FN_REFLEN], dst_path[FN_REFLEN], tmp_path[FN_REFLEN]; uint dst_path_length; char *db= table->db; char *table_name= table->table_name; @@ -3120,6 +3391,19 @@ bool mysql_create_like_table(THD* thd, TABLE_LIST* table, creation, instead create the table directly (for both normal and temporary tables). */ +#ifdef WITH_PARTITION_STORAGE_ENGINE + /* + For partitioned tables we need to copy the .par file as well since + it is used in open_table_def to even be able to create a new handler. + There is no way to find out here if the original table is a + partitioned table so we copy the file and ignore any errors. + */ + fn_format(tmp_path, dst_path, reg_ext, ".par", MYF(MY_REPLACE_EXT)); + strmov(dst_path, tmp_path); + fn_format(tmp_path, src_path, reg_ext, ".par", MYF(MY_REPLACE_EXT)); + strmov(src_path, tmp_path); + my_copy(src_path, dst_path, MYF(MY_DONT_OVERWRITE_FILE)); +#endif dst_path[dst_path_length - reg_ext_length]= '\0'; // Remove .frm err= ha_create_table(thd, dst_path, db, table_name, create_info, 1); @@ -3547,10 +3831,8 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, handlerton *old_db_type, *new_db_type; uint need_copy_table= 0; #ifdef WITH_PARTITION_STORAGE_ENGINE - bool online_add_empty_partition= FALSE; - bool online_drop_partition= FALSE; + uint fast_alter_partition= 0; bool partition_changed= FALSE; - handlerton *default_engine_type; #endif List prepared_create_list; List prepared_key_list; @@ -3562,6 +3844,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, uint *index_drop_buffer; uint index_add_count; uint *index_add_buffer; + bool committed= 0; DBUG_ENTER("mysql_alter_table"); thd->proc_info="init"; @@ -3642,413 +3925,10 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, create_info->db_type= old_db_type; #ifdef WITH_PARTITION_STORAGE_ENGINE - /* - We need to handle both partition management command such as Add Partition - and others here as well as an ALTER TABLE that completely changes the - partitioning and yet others that don't change anything at all. We start - by checking the partition management variants and then check the general - change patterns. - */ - if (alter_info->flags & (ALTER_ADD_PARTITION + - ALTER_DROP_PARTITION + ALTER_COALESCE_PARTITION + - ALTER_REORGANISE_PARTITION)) + if (prep_alter_part_table(thd, table, alter_info, create_info, old_db_type, + &partition_changed, &fast_alter_partition)) { - partition_info *tab_part_info= table->part_info; - if (!tab_part_info) - { - my_error(ER_PARTITION_MGMT_ON_NONPARTITIONED, MYF(0)); - DBUG_RETURN(TRUE); - } - default_engine_type= tab_part_info->default_engine_type; - /* - We are going to manipulate the partition info on the table object - so we need to ensure that the data structure of the table object - is freed by setting version to 0. - */ - table->s->version= 0L; - if (alter_info->flags == ALTER_ADD_PARTITION) - { - /* - We start by moving the new partitions to the list of temporary - partitions. We will then check that the new partitions fit in the - partitioning scheme as currently set-up. - Partitions are always added at the end in ADD PARTITION. - */ - partition_info *alt_part_info= thd->lex->part_info; - uint no_new_partitions= alt_part_info->no_parts; - uint no_orig_partitions= tab_part_info->no_parts; - uint check_total_partitions= no_new_partitions + no_orig_partitions; - uint new_total_partitions= check_total_partitions; - /* - We allow quite a lot of values to be supplied by defaults, however we - must know the number of new partitions in this case. - */ - if (no_new_partitions == 0) - { - my_error(ER_ADD_PARTITION_NO_NEW_PARTITION, MYF(0)); - DBUG_RETURN(TRUE); - } - if (is_sub_partitioned(tab_part_info)) - { - if (alt_part_info->no_subparts == 0) - alt_part_info->no_subparts= tab_part_info->no_subparts; - else if (alt_part_info->no_subparts != tab_part_info->no_subparts) - { - my_error(ER_ADD_PARTITION_SUBPART_ERROR, MYF(0)); - DBUG_RETURN(TRUE); - } - check_total_partitions= new_total_partitions* - alt_part_info->no_subparts; - } - if (check_total_partitions > MAX_PARTITIONS) - { - my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); - DBUG_RETURN(TRUE); - } - alt_part_info->part_type= tab_part_info->part_type; - if (set_up_defaults_for_partitioning(alt_part_info, - table->file, - (ulonglong)0ULL, - tab_part_info->no_parts)) - { - DBUG_RETURN(TRUE); - } - /* - Need to concatenate the lists here to make it possible to check the - partition info for correctness using check_partition_info - */ - { - List_iterator alt_it(alt_part_info->partitions); - uint part_count= 0; - do - { - partition_element *part_elem= alt_it++; - tab_part_info->partitions.push_back(part_elem); - tab_part_info->temp_partitions.push_back(part_elem); - } while (++part_count < no_new_partitions); - tab_part_info->no_parts+= no_new_partitions; - } - { - List_iterator tab_it(tab_part_info->partitions); - partition_element *part_elem= tab_it++; - if (is_sub_partitioned(tab_part_info)) - { - List_iterator sub_it(part_elem->subpartitions); - part_elem= sub_it++; - } - if (check_partition_info(tab_part_info, part_elem->engine_type, - table->file, (ulonglong)0ULL)) - { - DBUG_RETURN(TRUE); - } - } - create_info->db_type= &partition_hton; - thd->lex->part_info= tab_part_info; - if (table->file->alter_table_flags() & HA_ONLINE_ADD_EMPTY_PARTITION && - (tab_part_info->part_type == RANGE_PARTITION || - tab_part_info->part_type == LIST_PARTITION)) - { - /* - For range and list partitions add partition is simply adding a new - empty partition to the table. If the handler support this we will - use the simple method of doing this. In this case we need to break - out the new partitions from the list again and only keep them in the - temporary list. Added partitions are always added at the end. - */ - { - List_iterator tab_it(tab_part_info->partitions); - uint part_count= 0; - do - { - tab_it++; - } while (++part_count < no_orig_partitions); - do - { - tab_it++; - tab_it.remove(); - } while (++part_count < new_total_partitions); - } - tab_part_info->no_parts-= no_new_partitions; - online_add_empty_partition= TRUE; - } - else - { - tab_part_info->temp_partitions.empty(); - } - } - else if (alter_info->flags == ALTER_DROP_PARTITION) - { - /* - Drop a partition from a range partition and list partitioning is - always safe and can be made more or less immediate. It is necessary - however to ensure that the partition to be removed is safely removed - and that REPAIR TABLE can remove the partition if for some reason the - command to drop the partition failed in the middle. - */ - uint part_count= 0; - uint no_parts_dropped= alter_info->partition_names.elements; - uint no_parts_found= 0; - List_iterator part_it(tab_part_info->partitions); - if (!(tab_part_info->part_type == RANGE_PARTITION || - tab_part_info->part_type == LIST_PARTITION)) - { - my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "DROP"); - DBUG_RETURN(TRUE); - } - if (no_parts_dropped >= tab_part_info->no_parts) - { - my_error(ER_DROP_LAST_PARTITION, MYF(0)); - DBUG_RETURN(TRUE); - } - do - { - partition_element *part_elem= part_it++; - if (is_partition_in_list(part_elem->partition_name, - alter_info->partition_names)) - { - /* - Remove the partition from the list and put it instead in the - list of temporary partitions with a new state. - */ - no_parts_found++; - part_elem->part_state= PART_IS_DROPPED; - } - } while (++part_count < tab_part_info->no_parts); - if (no_parts_found != no_parts_dropped) - { - my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0)); - DBUG_RETURN(TRUE); - } - if (!(table->file->alter_table_flags() & HA_ONLINE_DROP_PARTITION)) - { - my_error(ER_DROP_PARTITION_FAILURE, MYF(0)); - DBUG_RETURN(TRUE); - } - if (table->file->is_fk_defined_on_table_or_index(MAX_KEY)) - { - my_error(ER_DROP_PARTITION_WHEN_FK_DEFINED, MYF(0)); - DBUG_RETURN(TRUE); - } - /* - This code needs set-up of structures needed by mysql_create_table - before it is called and thus we only set a boolean variable to be - checked later down in the code when all needed data structures are - prepared. - */ - online_drop_partition= TRUE; - } - else if (alter_info->flags == ALTER_COALESCE_PARTITION) - { - /* - In this version COALESCE PARTITION is implemented by simply removing - a partition from the table and using the normal ALTER TABLE code - and ensuring that copy to a new table occurs. Later on we can optimise - this function for Linear Hash partitions. In that case we can avoid - reorganising the entire table. For normal hash partitions it will - be a complete reorganise anyways so that can only be made on-line - if it still uses a copy table. - */ - uint part_count= 0; - uint no_parts_coalesced= alter_info->no_parts; - uint no_parts_remain= tab_part_info->no_parts - no_parts_coalesced; - List_iterator part_it(tab_part_info->partitions); - if (tab_part_info->part_type != HASH_PARTITION) - { - my_error(ER_COALESCE_ONLY_ON_HASH_PARTITION, MYF(0)); - DBUG_RETURN(TRUE); - } - if (no_parts_coalesced == 0) - { - my_error(ER_COALESCE_PARTITION_NO_PARTITION, MYF(0)); - DBUG_RETURN(TRUE); - } - if (no_parts_coalesced >= tab_part_info->no_parts) - { - my_error(ER_DROP_LAST_PARTITION, MYF(0)); - DBUG_RETURN(TRUE); - } - do - { - part_it++; - if (++part_count > no_parts_remain) - part_it.remove(); - } while (part_count < tab_part_info->no_parts); - tab_part_info->no_parts= no_parts_remain; - } - else if (alter_info->flags == ALTER_REORGANISE_PARTITION) - { - /* - Reorganise partitions takes a number of partitions that are next - to each other (at least for RANGE PARTITIONS) and then uses those - to create a set of new partitions. So data is copied from those - partitions into the new set of partitions. Those new partitions - can have more values in the LIST value specifications or less both - are allowed. The ranges can be different but since they are - changing a set of consecutive partitions they must cover the same - range as those changed from. - This command can be used on RANGE and LIST partitions. - */ - uint no_parts_reorged= alter_info->partition_names.elements; - uint no_parts_new= thd->lex->part_info->partitions.elements; - partition_info *alt_part_info= thd->lex->part_info; - uint check_total_partitions; - if (no_parts_reorged > tab_part_info->no_parts) - { - my_error(ER_REORG_PARTITION_NOT_EXIST, MYF(0)); - DBUG_RETURN(TRUE); - } - if (!(tab_part_info->part_type == RANGE_PARTITION || - tab_part_info->part_type == LIST_PARTITION)) - { - my_error(ER_ONLY_ON_RANGE_LIST_PARTITION, MYF(0), "REORGANISE"); - DBUG_RETURN(TRUE); - } - if (check_reorganise_list(alt_part_info, tab_part_info, - alter_info->partition_names)) - { - my_error(ER_SAME_NAME_PARTITION, MYF(0)); - DBUG_RETURN(TRUE); - } - check_total_partitions= tab_part_info->no_parts + no_parts_new; - check_total_partitions-= no_parts_reorged; - if (check_total_partitions > MAX_PARTITIONS) - { - my_error(ER_TOO_MANY_PARTITIONS_ERROR, MYF(0)); - DBUG_RETURN(TRUE); - } - { - List_iterator tab_it(tab_part_info->partitions); - uint part_count= 0; - bool found_first= FALSE, found_last= FALSE; - uint drop_count= 0; - longlong tab_max_range, alt_max_range; - do - { - partition_element *part_elem= tab_it++; - if (is_partition_in_list(part_elem->partition_name, - alter_info->partition_names)) - { - drop_count++; - tab_max_range= part_elem->range_value; - if (!found_first) - { - uint alt_part_count= 0; - found_first= TRUE; - List_iterator alt_it(alt_part_info->partitions); - do - { - partition_element *alt_part_elem= alt_it++; - alt_max_range= alt_part_elem->range_value; - if (alt_part_count == 0) - tab_it.replace(alt_part_elem); - else - tab_it.after(alt_part_elem); - } while (++alt_part_count < no_parts_new); - } - else if (found_last) - { - my_error(ER_CONSECUTIVE_REORG_PARTITIONS, MYF(0)); - DBUG_RETURN(TRUE); - } - else - tab_it.remove(); - } - else - { - if (found_first) - found_last= TRUE; - } - } while (++part_count < tab_part_info->no_parts); - if (drop_count != no_parts_reorged) - { - my_error(ER_DROP_PARTITION_NON_EXISTENT, MYF(0)); - DBUG_RETURN(TRUE); - } - if (tab_part_info->part_type == RANGE_PARTITION && - alt_max_range > tab_max_range) - { - my_error(ER_REORG_OUTSIDE_RANGE, MYF(0)); - DBUG_RETURN(TRUE); - } - } - } - partition_changed= TRUE; - tab_part_info->no_parts= tab_part_info->partitions.elements; - create_info->db_type= &partition_hton; - thd->lex->part_info= tab_part_info; - if (alter_info->flags == ALTER_ADD_PARTITION || - alter_info->flags == ALTER_REORGANISE_PARTITION) - { - if (check_partition_info(tab_part_info, default_engine_type, - table->file, (ulonglong)0ULL)) - { - DBUG_RETURN(TRUE); - } - } - } - else - { - /* - When thd->lex->part_info has a reference to a partition_info the - ALTER TABLE contained a definition of a partitioning. - - Case I: - If there was a partition before and there is a new one defined. - We use the new partitioning. The new partitioning is already - defined in the correct variable so no work is needed to - accomplish this. - We do however need to update partition_changed to ensure that not - only the frm file is changed in the ALTER TABLE command. - - Case IIa: - There was a partitioning before and there is no new one defined. - Also the user has not specified an explicit engine to use. - - We use the old partitioning also for the new table. We do this - by assigning the partition_info from the table loaded in - open_ltable to the partition_info struct used by mysql_create_table - later in this method. - - Case IIb: - There was a partitioning before and there is no new one defined. - The user has specified an explicit engine to use. - - Since the user has specified an explicit engine to use we override - the old partitioning info and create a new table using the specified - engine. This is the reason for the extra check if old and new engine - is equal. - In this case the partition also is changed. - - Case III: - There was no partitioning before altering the table, there is - partitioning defined in the altered table. Use the new partitioning. - No work needed since the partitioning info is already in the - correct variable. - Also here partition has changed and thus a new table must be - created. - - Case IV: - There was no partitioning before and no partitioning defined. - Obviously no work needed. - */ - if (table->part_info) - { - if (!thd->lex->part_info && - create_info->db_type == old_db_type) - thd->lex->part_info= table->part_info; - } - if (thd->lex->part_info) - { - /* - Need to cater for engine types that can handle partition without - using the partition handler. - */ - if (thd->lex->part_info != table->part_info) - partition_changed= TRUE; - if (create_info->db_type != &partition_hton) - thd->lex->part_info->default_engine_type= create_info->db_type; - create_info->db_type= &partition_hton; - } + DBUG_RETURN(TRUE); } #endif if (check_engine(thd, new_name, &create_info->db_type)) @@ -4057,7 +3937,9 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, if (create_info->row_type == ROW_TYPE_NOT_USED) create_info->row_type= table->s->row_type; - DBUG_PRINT("info", ("old type: %d new type: %d", old_db_type, new_db_type)); + DBUG_PRINT("info", ("old type: %s new type: %s", + ha_resolve_storage_engine_name(old_db_type), + ha_resolve_storage_engine_name(new_db_type))); if (ha_check_storage_engine_flag(old_db_type, HTON_ALTER_NOT_SUPPORTED) || ha_check_storage_engine_flag(new_db_type, HTON_ALTER_NOT_SUPPORTED)) { @@ -4503,14 +4385,16 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, if (need_copy_table == ALTER_TABLE_INDEX_CHANGED) { int pk_changed= 0; - ulong alter_flags= table->file->alter_table_flags(); + ulong alter_flags= 0; ulong needed_online_flags= 0; ulong needed_fast_flags= 0; KEY *key; uint *idx_p; uint *idx_end_p; - DBUG_PRINT("info", ("alter_flags: %lu", alter_flags)); + if (table->s->db_type->alter_table_flags) + alter_flags= table->s->db_type->alter_table_flags(alter_info->flags); + DBUG_PRINT("info", ("alter_flags: %lu", alter_flags)); /* Check dropped indexes. */ for (idx_p= index_drop_buffer, idx_end_p= idx_p + index_drop_count; idx_p < idx_end_p; @@ -4610,103 +4494,13 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, create_info->frm_only= 1; #ifdef WITH_PARTITION_STORAGE_ENGINE - if (partition_changed) + if (fast_alter_partition) { - if (online_drop_partition) - { - /* - Now after all checks and setting state on dropped partitions we can - start the actual dropping of the partitions. - 1) Lock table in TL_WRITE_ONLY to ensure all other accesses on table - are completed and no new ones are started until we have changed - the frm file. - 2) Write the new frm file where state of dropped partitions is - changed to PART_IS_DROPPED - 3) Perform the actual drop of the partition using the handler of the - table. - 4) Write a new frm file of the table where the partitions are dropped - from the table. - - */ - uint old_lock_type; - partition_info *part_info= table->part_info; - char path[FN_REFLEN+1], noext_path[FN_REFLEN+1]; - uint syntax_len; - char *part_syntax_buf; - - VOID(pthread_mutex_lock(&LOCK_open)); - if (abort_and_upgrade_lock(thd, table, db, table_name, &old_lock_type)) - { - DBUG_RETURN(TRUE); - } - VOID(pthread_mutex_unlock(&LOCK_open)); - if (!(part_syntax_buf= generate_partition_syntax(part_info, - &syntax_len, - TRUE,TRUE))) - { - DBUG_RETURN(TRUE); - } - part_info->part_info_string= part_syntax_buf; - part_info->part_info_len= syntax_len; - build_table_filename(path, sizeof(path), db, table_name, reg_ext); - if (mysql_create_frm(thd, path, db, table_name, create_info, - prepared_create_list, key_count, key_info_buffer, - table->file)) - { - DBUG_RETURN(TRUE); - } - thd->lex->part_info= part_info; - build_table_filename(path, sizeof(path), db, table_name, ""); - if (table->file->drop_partitions(path)) - { - DBUG_RETURN(TRUE); - } - { - List_iterator part_it(part_info->partitions); - uint i= 0, remove_count= 0; - do - { - partition_element *part_elem= part_it++; - if (is_partition_in_list(part_elem->partition_name, - alter_info->partition_names)) - { - part_it.remove(); - remove_count++; - } - } while (++i < part_info->no_parts); - part_info->no_parts-= remove_count; - } - if (!(part_syntax_buf= generate_partition_syntax(part_info, - &syntax_len, - TRUE,TRUE))) - { - DBUG_RETURN(TRUE); - } - part_info->part_info_string= part_syntax_buf; - part_info->part_info_len= syntax_len; - build_table_filename(path, sizeof(path), db, table_name, reg_ext); - build_table_filename(noext_path, sizeof(noext_path), db, table_name, ""); - if (mysql_create_frm(thd, path, db, table_name, create_info, - prepared_create_list, key_count, key_info_buffer, - table->file) || - table->file->create_handler_files(noext_path)) - { - DBUG_RETURN(TRUE); - } - thd->proc_info="end"; - query_cache_invalidate3(thd, table_list, 0); - error= ha_commit_stmt(thd); - if (ha_commit(thd)) - error= 1; - if (!error) - { - close_thread_tables(thd); - write_bin_log(thd, FALSE, thd->query, thd->query_length); - send_ok(thd); - DBUG_RETURN(FALSE); - } - DBUG_RETURN(error); - } + DBUG_RETURN(fast_alter_partition_table(thd, table, alter_info, + create_info, table_list, + &create_list, &key_list, + db, table_name, + fast_alter_partition)); } #endif @@ -4968,6 +4762,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, DBUG_PRINT("info", ("Committing after add/drop index")); if (ha_commit_stmt(thd) || ha_commit(thd)) goto err; + committed= 1; } } /*end of if (! new_table) for add/drop index*/ @@ -5099,7 +4894,6 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, VOID(pthread_mutex_unlock(&LOCK_open)); goto err; } -#ifdef XXX_TO_BE_DONE_LATER_BY_WL1892 if (! need_copy_table) { if (! table) @@ -5116,7 +4910,6 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, goto err; } } -#endif if (thd->lock || new_name != table_name) // True if WIN32 { /* @@ -5143,7 +4936,7 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, /* Mark in-use copies old */ remove_table_from_cache(thd,db,table_name,RTFC_NO_FLAG); /* end threads waiting on lock */ - mysql_lock_abort(thd,table); + mysql_lock_abort(thd,table, TRUE); } VOID(quick_rm_table(old_db_type,db,old_name)); if (close_data_tables(thd,db,table_name) || @@ -5166,11 +4959,14 @@ bool mysql_alter_table(THD *thd,char *new_db, char *new_name, wait_if_global_read_lock(), which could create a deadlock if called with LOCK_open. */ - error = ha_commit_stmt(thd); - if (ha_commit(thd)) - error=1; - if (error) - goto err; + if (!committed) + { + error = ha_commit_stmt(thd); + if (ha_commit(thd)) + error=1; + if (error) + goto err; + } thd->proc_info="end"; DBUG_ASSERT(!(mysql_bin_log.is_open() && binlog_row_based && diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index fde014898f4..71851a5d175 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -530,6 +530,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token READ_SYM %token READ_WRITE_SYM %token REAL +%token REBUILD_SYM %token RECOVER_SYM %token REDO_BUFFER_SIZE_SYM %token REDOFILE_SYM @@ -542,7 +543,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %token RELEASE_SYM %token RELOAD %token RENAME -%token REORGANISE_SYM +%token REORGANIZE_SYM %token REPAIR %token REPEATABLE_SYM %token REPEAT_SYM @@ -729,7 +730,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); LEX_HOSTNAME ULONGLONG_NUM field_ident select_alias ident ident_or_text UNDERSCORE_CHARSET IDENT_sys TEXT_STRING_sys TEXT_STRING_literal NCHAR_STRING opt_component key_cache_name - sp_opt_label BIN_NUM label_ident + sp_opt_label BIN_NUM label_ident TEXT_STRING_filesystem %type opt_table_alias opt_fulltext_parser @@ -3214,9 +3215,9 @@ size_number: ulonglong number, test_number; uint text_shift_number= 0; longlong prefix_number; - char *end_ptr; char *start_ptr= $1.str; uint str_len= strlen(start_ptr); + char *end_ptr= start_ptr + str_len; int error; prefix_number= my_strtoll10(start_ptr, &end_ptr, &error); if ((start_ptr + str_len - 1) == end_ptr) @@ -3331,9 +3332,13 @@ partitioning: lex->part_info= new partition_info(); if (!lex->part_info) { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info)); + mem_alloc_error(sizeof(partition_info)); YYABORT; } + if (lex->sql_command == SQLCOM_ALTER_TABLE) + { + lex->alter_info.flags|= ALTER_PARTITION; + } } partition ; @@ -3342,24 +3347,15 @@ partition_entry: PARTITION_SYM { LEX *lex= Lex; - if (lex->part_info) - { - /* - We enter here when opening the frm file to translate - partition info string into part_info data structure. - */ - lex->part_info= new partition_info(); - if (!lex->part_info) - { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info)); - YYABORT; - } - } - else + if (!lex->part_info) { yyerror(ER(ER_PARTITION_ENTRY_ERROR)); YYABORT; } + /* + We enter here when opening the frm file to translate + partition info string into part_info data structure. + */ } partition {} ; @@ -3393,14 +3389,23 @@ opt_linear: ; part_field_list: + /* empty */ {} + | part_field_item_list {} + ; + +part_field_item_list: part_field_item {} - | part_field_list ',' part_field_item {} + | part_field_item_list ',' part_field_item {} ; part_field_item: ident { - Lex->part_info->part_field_list.push_back($1.str); + if (Lex->part_info->part_field_list.push_back($1.str)) + { + mem_alloc_error(1); + YYABORT; + } } ; @@ -3434,12 +3439,15 @@ opt_no_parts: | PARTITIONS_SYM ulong_num { uint no_parts= $2; + LEX *lex= Lex; if (no_parts == 0) { my_error(ER_NO_PARTS_ERROR, MYF(0), "partitions"); YYABORT; } - Lex->part_info->no_parts= no_parts; + + lex->part_info->no_parts= no_parts; + lex->part_info->use_default_no_partitions= FALSE; } ; @@ -3465,7 +3473,13 @@ sub_part_field_list: sub_part_field_item: ident - { Lex->part_info->subpart_field_list.push_back($1.str); } + { + if (Lex->part_info->subpart_field_list.push_back($1.str)) + { + mem_alloc_error(1); + YYABORT; + } + } ; part_func_expr: @@ -3489,12 +3503,14 @@ opt_no_subparts: | SUBPARTITIONS_SYM ulong_num { uint no_parts= $2; + LEX *lex= Lex; if (no_parts == 0) { my_error(ER_NO_PARTS_ERROR, MYF(0), "subpartitions"); YYABORT; } - Lex->part_info->no_subparts= no_parts; + lex->part_info->no_subparts= no_parts; + lex->part_info->use_default_no_subpartitions= FALSE; } ; @@ -3505,21 +3521,21 @@ part_defs: { LEX *lex= Lex; partition_info *part_info= lex->part_info; + uint count_curr_parts= part_info->partitions.elements; if (part_info->no_parts != 0) { if (part_info->no_parts != - part_info->count_curr_parts) + count_curr_parts) { yyerror(ER(ER_PARTITION_WRONG_NO_PART_ERROR)); YYABORT; } } - else if (part_info->count_curr_parts > 0) + else if (count_curr_parts > 0) { - part_info->no_parts= part_info->count_curr_parts; + part_info->no_parts= count_curr_parts; } part_info->count_curr_subparts= 0; - part_info->count_curr_parts= 0; } ; @@ -3534,17 +3550,79 @@ part_definition: LEX *lex= Lex; partition_info *part_info= lex->part_info; partition_element *p_elem= new partition_element(); - if (!p_elem) + uint part_id= part_info->partitions.elements + + part_info->temp_partitions.elements; + enum partition_state part_state; + + if (part_info->part_state) + part_state= (enum partition_state)part_info->part_state[part_id]; + else + part_state= PART_NORMAL; + switch (part_state) { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element)); - YYABORT; + case PART_TO_BE_DROPPED: + /* + This part is currently removed so we keep it in a + temporary list for REPAIR TABLE to be able to handle + failures during drop partition process. + */ + case PART_TO_BE_ADDED: + /* + This part is currently being added so we keep it in a + temporary list for REPAIR TABLE to be able to handle + failures during add partition process. + */ + if (!p_elem || part_info->temp_partitions.push_back(p_elem)) + { + mem_alloc_error(sizeof(partition_element)); + YYABORT; + } + break; + case PART_IS_ADDED: + /* + Part has been added and is now a normal partition + */ + case PART_TO_BE_REORGED: + /* + This part is currently reorganised, it is still however + used so we keep it in the list of partitions. We do + however need the state to be able to handle REPAIR TABLE + after failures in the reorganisation process. + */ + case PART_REORGED_DROPPED: + /* + This part is currently reorganised as part of a + COALESCE PARTITION and it will be dropped without a new + replacement partition after completing the reorganisation. + */ + case PART_CHANGED: + /* + This part is currently split or merged as part of ADD + PARTITION for a hash partition or as part of COALESCE + PARTITION for a hash partitioned table. + */ + case PART_IS_CHANGED: + /* + This part has been split or merged as part of ADD + PARTITION for a hash partition or as part of COALESCE + PARTITION for a hash partitioned table. + */ + case PART_NORMAL: + if (!p_elem || part_info->partitions.push_back(p_elem)) + { + mem_alloc_error(sizeof(partition_element)); + YYABORT; + } + break; + default: + mem_alloc_error((part_id * 1000) + part_state); + YYABORT; } + p_elem->part_state= part_state; part_info->curr_part_elem= p_elem; part_info->current_partition= p_elem; part_info->use_default_partitions= FALSE; - part_info->partitions.push_back(p_elem); - p_elem->engine_type= NULL; - part_info->count_curr_parts++; + part_info->use_default_no_partitions= FALSE; } part_name {} opt_part_values {} @@ -3554,7 +3632,12 @@ part_definition: part_name: ident_or_text - { Lex->part_info->curr_part_elem->partition_name= $1.str; } + { + LEX *lex= Lex; + partition_info *part_info= lex->part_info; + partition_element *p_elem= part_info->curr_part_elem; + p_elem->partition_name= $1.str; + } ; opt_part_values: @@ -3643,13 +3726,13 @@ part_list_item: part_bit_expr { longlong *value_ptr; - if (!(value_ptr= (longlong*)sql_alloc(sizeof(longlong)))) + if (!(value_ptr= (longlong*)sql_alloc(sizeof(longlong))) || + ((*value_ptr= $1, FALSE) || + Lex->part_info->curr_part_elem->list_val_list.push_back(value_ptr))) { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(longlong)); + mem_alloc_error(sizeof(longlong)); YYABORT; } - *value_ptr= $1; - Lex->part_info->curr_part_elem->list_val_list.push_back(value_ptr); } ; @@ -3659,20 +3742,23 @@ part_bit_expr: Item *part_expr= $1; bool not_corr_func; LEX *lex= Lex; + THD *thd= YYTHD; longlong item_value; Name_resolution_context *context= &lex->current_select->context; TABLE_LIST *save_list= context->table_list; + const char *save_where= thd->where; context->table_list= 0; - part_expr->fix_fields(YYTHD, (Item**)0); - context->table_list= save_list; - not_corr_func= !part_expr->const_item() || - !lex->safe_to_cache_query; - if (not_corr_func) + thd->where= "partition function"; + if (part_expr->fix_fields(YYTHD, (Item**)0) || + ((context->table_list= save_list), FALSE) || + (!part_expr->const_item()) || + (!lex->safe_to_cache_query)) { yyerror(ER(ER_NO_CONST_EXPR_IN_RANGE_OR_LIST_ERROR)); YYABORT; } + thd->where= save_where; if (part_expr->result_type() != INT_RESULT) { yyerror(ER(ER_INCONSISTENT_TYPE_OF_FUNCTIONS_ERROR)); @@ -3717,16 +3803,16 @@ sub_part_definition: LEX *lex= Lex; partition_info *part_info= lex->part_info; partition_element *p_elem= new partition_element(); - if (!p_elem) + if (!p_elem || + part_info->current_partition->subpartitions.push_back(p_elem)) { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_element)); + mem_alloc_error(sizeof(partition_element)); YYABORT; } part_info->curr_part_elem= p_elem; - part_info->current_partition->subpartitions.push_back(p_elem); part_info->use_default_subpartitions= FALSE; + part_info->use_default_no_subpartitions= FALSE; part_info->count_curr_subparts++; - p_elem->engine_type= NULL; } sub_name opt_part_options {} ; @@ -4794,7 +4880,7 @@ alter_commands: | DISCARD TABLESPACE { Lex->alter_info.tablespace_op= DISCARD_TABLESPACE; } | IMPORT TABLESPACE { Lex->alter_info.tablespace_op= IMPORT_TABLESPACE; } | alter_list - opt_partitioning + opt_partitioning | partitioning /* This part was added for release 5.1 by Mikael Ronström. @@ -4809,26 +4895,77 @@ alter_commands: { Lex->alter_info.flags|= ALTER_DROP_PARTITION; } - | COALESCE PARTITION_SYM ulong_num + | REBUILD_SYM PARTITION_SYM opt_no_write_to_binlog + all_or_alt_part_name_list + { + LEX *lex= Lex; + lex->alter_info.flags|= ALTER_REBUILD_PARTITION; + lex->no_write_to_binlog= $3; + } + | OPTIMIZE PARTITION_SYM opt_no_write_to_binlog + all_or_alt_part_name_list + { + LEX *lex= Lex; + lex->alter_info.flags|= ALTER_OPTIMIZE_PARTITION; + lex->no_write_to_binlog= $3; + lex->check_opt.init(); + } + opt_no_write_to_binlog opt_mi_check_type + | ANALYZE_SYM PARTITION_SYM opt_no_write_to_binlog + all_or_alt_part_name_list + { + LEX *lex= Lex; + lex->alter_info.flags|= ALTER_ANALYZE_PARTITION; + lex->no_write_to_binlog= $3; + lex->check_opt.init(); + } + opt_mi_check_type + | CHECK_SYM PARTITION_SYM all_or_alt_part_name_list + { + LEX *lex= Lex; + lex->alter_info.flags|= ALTER_CHECK_PARTITION; + lex->check_opt.init(); + } + opt_mi_check_type + | REPAIR PARTITION_SYM opt_no_write_to_binlog + all_or_alt_part_name_list + { + LEX *lex= Lex; + lex->alter_info.flags|= ALTER_REPAIR_PARTITION; + lex->no_write_to_binlog= $3; + lex->check_opt.init(); + } + opt_mi_repair_type + | COALESCE PARTITION_SYM opt_no_write_to_binlog ulong_num { LEX *lex= Lex; lex->alter_info.flags|= ALTER_COALESCE_PARTITION; - lex->alter_info.no_parts= $3; + lex->no_write_to_binlog= $3; + lex->alter_info.no_parts= $4; } | reorg_partition_rule ; +all_or_alt_part_name_list: + | ALL + { + Lex->alter_info.flags|= ALTER_ALL_PARTITION; + } + | alt_part_name_list + ; + add_partition_rule: - ADD PARTITION_SYM + ADD PARTITION_SYM opt_no_write_to_binlog { LEX *lex= Lex; lex->part_info= new partition_info(); if (!lex->part_info) { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info)); + mem_alloc_error(sizeof(partition_info)); YYABORT; } lex->alter_info.flags|= ALTER_ADD_PARTITION; + lex->no_write_to_binlog= $3; } add_part_extra {} @@ -4838,7 +4975,7 @@ add_part_extra: | '(' part_def_list ')' { LEX *lex= Lex; - lex->part_info->no_parts= lex->part_info->count_curr_parts; + lex->part_info->no_parts= lex->part_info->partitions.elements; } | PARTITIONS_SYM ulong_num { @@ -4848,21 +4985,34 @@ add_part_extra: ; reorg_partition_rule: - REORGANISE_SYM PARTITION_SYM + REORGANIZE_SYM PARTITION_SYM opt_no_write_to_binlog { LEX *lex= Lex; lex->part_info= new partition_info(); if (!lex->part_info) { - my_error(ER_OUTOFMEMORY, MYF(0), sizeof(partition_info)); + mem_alloc_error(sizeof(partition_info)); YYABORT; } - lex->alter_info.flags|= ALTER_REORGANISE_PARTITION; + lex->no_write_to_binlog= $3; } - alt_part_name_list INTO '(' part_def_list ')' + reorg_parts_rule + ; + +reorg_parts_rule: + /* empty */ + { + Lex->alter_info.flags|= ALTER_TABLE_REORG; + } + | + alt_part_name_list + { + Lex->alter_info.flags|= ALTER_REORGANIZE_PARTITION; + } + INTO '(' part_def_list ')' { LEX *lex= Lex; - lex->part_info->no_parts= lex->part_info->count_curr_parts; + lex->part_info->no_parts= lex->part_info->partitions.elements; } ; @@ -4874,7 +5024,11 @@ alt_part_name_list: alt_part_name_item: ident { - Lex->alter_info.partition_names.push_back($1.str); + if (Lex->alter_info.partition_names.push_back($1.str)) + { + mem_alloc_error(1); + YYABORT; + } } ; @@ -7370,7 +7524,7 @@ select_var_ident: ; into: - INTO OUTFILE TEXT_STRING_sys + INTO OUTFILE TEXT_STRING_filesystem { LEX *lex= Lex; lex->uncacheable(UNCACHEABLE_SIDEEFFECT); @@ -7379,7 +7533,7 @@ into: YYABORT; } opt_field_term opt_line_term - | INTO DUMPFILE TEXT_STRING_sys + | INTO DUMPFILE TEXT_STRING_filesystem { LEX *lex=Lex; if (!lex->describe) @@ -8442,7 +8596,7 @@ load: LOAD DATA_SYM }; load_data: - load_data_lock opt_local INFILE TEXT_STRING_sys + load_data_lock opt_local INFILE TEXT_STRING_filesystem { LEX *lex=Lex; lex->sql_command= SQLCOM_LOAD; @@ -8970,6 +9124,18 @@ TEXT_STRING_literal: ; +TEXT_STRING_filesystem: + TEXT_STRING + { + THD *thd= YYTHD; + if (thd->charset_is_character_set_filesystem) + $$= $1; + else + thd->convert_string(&$$, thd->variables.character_set_filesystem, + $1.str, $1.length, thd->charset()); + } + ; + ident: IDENT_sys { $$=$1; } | READ_ONLY_SYM @@ -9262,6 +9428,7 @@ keyword_sp: | RAID_CHUNKSIZE {} | RAID_STRIPED_SYM {} | RAID_TYPE {} + | REBUILD_SYM {} | RECOVER_SYM {} | REDO_BUFFER_SIZE_SYM {} | REDOFILE_SYM {} @@ -9269,7 +9436,7 @@ keyword_sp: | RELAY_LOG_FILE_SYM {} | RELAY_LOG_POS_SYM {} | RELOAD {} - | REORGANISE_SYM {} + | REORGANIZE_SYM {} | REPEATABLE_SYM {} | REPLICATION {} | RESOURCES {} diff --git a/sql/table.cc b/sql/table.cc index b8811366524..8345ad53d82 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -310,16 +310,29 @@ int open_table_def(THD *thd, TABLE_SHARE *share, uint db_flags) error= open_binary_frm(thd, share, head, file); *root_ptr= old_root; - /* - We can't mark all tables in 'mysql' database as system since we don't - allow to lock such tables for writing with any other tables (even with - other system tables) and some privilege tables need this. - */ if (share->db.length == 5 && - !my_strcasecmp(system_charset_info, share->db.str, "mysql") && - (!my_strcasecmp(system_charset_info, share->table_name.str, "proc") || - !my_strcasecmp(system_charset_info, share->table_name.str, "event"))) - share->system_table= 1; + !my_strcasecmp(system_charset_info, share->db.str, "mysql")) + { + /* + We can't mark all tables in 'mysql' database as system since we don't + allow to lock such tables for writing with any other tables (even with + other system tables) and some privilege tables need this. + */ + if (!my_strcasecmp(system_charset_info, share->table_name.str, "proc") + || !my_strcasecmp(system_charset_info, share->table_name.str, + "event")) + share->system_table= 1; + else + { + if (!my_strcasecmp(system_charset_info, share->table_name.str, + "general_log")) + share->log_table= QUERY_LOG_GENERAL; + else + if (!my_strcasecmp(system_charset_info, share->table_name.str, + "slow_log")) + share->log_table= QUERY_LOG_SLOW; + } + } error_given= 1; } @@ -388,6 +401,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, #ifdef WITH_PARTITION_STORAGE_ENGINE share->default_part_db_type= ha_checktype(thd, (enum legacy_db_type) (uint) *(head+61), 0, 0); + DBUG_PRINT("info", ("default_part_db_type = %u", head[61])); #endif legacy_db_type= (enum legacy_db_type) (uint) *(head+3); share->db_type= ha_checktype(thd, legacy_db_type, 0, 0); @@ -525,7 +539,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, ((uint2korr(head+14) == 0xffff ? uint4korr(head+47) : uint2korr(head+14)))); - if ((n_length= uint2korr(head+55))) + if ((n_length= uint4korr(head+55))) { /* Read extra data segment */ char *buff, *next_chunk, *buff_end; @@ -599,6 +613,38 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, #endif next_chunk+= 5 + partition_info_len; } + if (share->mysql_version > 50105 && next_chunk + 5 < buff_end) + { + /* + Partition state was introduced to support partition management in version 5.1.5 + */ + uint32 part_state_len= uint4korr(next_chunk); +#ifdef WITH_PARTITION_STORAGE_ENGINE + if ((share->part_state_len= part_state_len)) + if (!(share->part_state= + (uchar*) memdup_root(&share->mem_root, next_chunk + 4, + part_state_len))) + { + my_free(buff, MYF(0)); + goto err; + } +#else + if (part_state_len) + { + DBUG_PRINT("info", ("WITH_PARTITION_STORAGE_ENGINE is not defined")); + my_free(buff, MYF(0)); + goto err; + } +#endif + next_chunk+= 4 + part_state_len; + } +#ifdef WITH_PARTITION_STORAGE_ENGINE + else + { + share->part_state_len= 0; + share->part_state= NULL; + } +#endif keyinfo= share->key_info; for (i= 0; i < keys; i++, keyinfo++) { @@ -1223,7 +1269,7 @@ static int open_binary_frm(THD *thd, TABLE_SHARE *share, uchar *head, int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, uint db_stat, uint prgflag, uint ha_open_flags, - TABLE *outparam) + TABLE *outparam, bool is_create_table) { int error; uint records, i; @@ -1379,13 +1425,17 @@ int open_table_from_share(THD *thd, TABLE_SHARE *share, const char *alias, { if (mysql_unpack_partition(thd, share->partition_info, share->partition_info_len, - outparam, share->default_part_db_type)) + (uchar*)share->part_state, + share->part_state_len, + outparam, is_create_table, + share->default_part_db_type)) goto err; /* Fix the partition functions and ensure they are not constant functions */ - if (fix_partition_func(thd, share->normalized_path.str, outparam)) + if (fix_partition_func(thd, share->normalized_path.str, outparam, + is_create_table)) goto err; } #endif @@ -1503,6 +1553,7 @@ int closefrm(register TABLE *table, bool free_share) if (table->part_info) { free_items(table->part_info->item_free_list); + table->part_info->item_free_list= 0; table->part_info= 0; } #endif @@ -1985,7 +2036,7 @@ File create_frm(THD *thd, const char *name, const char *db, int4store(fileinfo+47, key_length); tmp= MYSQL_VERSION_ID; // Store to avoid warning from int4store int4store(fileinfo+51, tmp); - int2store(fileinfo+55, create_info->extra_size); + int4store(fileinfo+55, create_info->extra_size); bzero(fill,IO_SIZE); for (; length > IO_SIZE ; length-= IO_SIZE) { diff --git a/sql/table.h b/sql/table.h index 99b818ef47b..eb0c0cf98d3 100644 --- a/sql/table.h +++ b/sql/table.h @@ -198,9 +198,16 @@ typedef struct st_table_share locking of this table for writing. FALSE - otherwise. */ bool system_table; + /* + This flag is set for the log tables. Used during FLUSH instances to skip + log tables, while closing tables (since logs must be always available) + */ + bool log_table; #ifdef WITH_PARTITION_STORAGE_ENGINE const uchar *partition_info; uint partition_info_len; + const uchar *part_state; + uint part_state_len; handlerton *default_part_db_type; #endif } TABLE_SHARE; @@ -284,6 +291,7 @@ struct st_table { my_bool distinct,const_table,no_rows; my_bool key_read, no_keyread; my_bool locked_by_flush; + my_bool locked_by_logger; my_bool locked_by_name; my_bool fulltext_searched; my_bool no_cache; diff --git a/sql/unireg.cc b/sql/unireg.cc index 7b15e14bdaf..4200a36ab58 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -89,9 +89,6 @@ bool mysql_create_frm(THD *thd, const char *file_name, partition_info *part_info= thd->lex->part_info; #endif DBUG_ENTER("mysql_create_frm"); -#ifdef WITH_PARTITION_STORAGE_ENGINE - thd->lex->part_info= NULL; -#endif DBUG_ASSERT(*fn_rext((char*)file_name)); // Check .frm extension formnames.type_names=0; @@ -134,10 +131,13 @@ bool mysql_create_frm(THD *thd, const char *file_name, create_info->extra_size= (2 + str_db_type.length + 2 + create_info->connect_string.length); /* Partition */ - create_info->extra_size+= 5; + create_info->extra_size+= 9; #ifdef WITH_PARTITION_STORAGE_ENGINE if (part_info) + { create_info->extra_size+= part_info->part_info_len; + create_info->extra_size+= part_info->part_state_len; + } #endif for (i= 0; i < keys; i++) @@ -171,7 +171,10 @@ bool mysql_create_frm(THD *thd, const char *file_name, #ifdef WITH_PARTITION_STORAGE_ENGINE if (part_info) + { fileinfo[61]= (uchar) ha_legacy_type(part_info->default_engine_type); + DBUG_PRINT("info", ("part_db_type = %d", fileinfo[61])); + } #endif int2store(fileinfo+59,db_file->extra_rec_buf_length()); if (my_pwrite(file,(byte*) fileinfo,64,0L,MYF_RW) || @@ -206,12 +209,18 @@ bool mysql_create_frm(THD *thd, const char *file_name, my_write(file, (const byte*)part_info->part_info_string, part_info->part_info_len + 1, MYF_RW)) goto err; + DBUG_PRINT("info", ("Part state len = %d", part_info->part_state_len)); + int4store(buff, part_info->part_state_len); + if (my_write(file, (const byte*)buff, 4, MYF_RW) || + my_write(file, (const byte*)part_info->part_state, + part_info->part_state_len, MYF_RW)) + goto err; } else #endif { - bzero(buff, 5); - if (my_write(file, (byte*) buff, 5, MYF_RW)) + bzero(buff, 9); + if (my_write(file, (byte*) buff, 9, MYF_RW)) goto err; } for (i= 0; i < keys; i++) diff --git a/storage/archive/azio.c b/storage/archive/azio.c index aa969b39b53..ebf45c01710 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -114,8 +114,16 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) char buffer[10]; /* Write a very simple .gz header: */ - snprintf(buffer, 10, "%c%c%c%c%c%c%c%c%c%c", gz_magic[0], gz_magic[1], - Z_DEFLATED, 0 /*flags*/, 0,0,0,0 /*time*/, 0 /*xflags*/, 0x03); + buffer[0] = gz_magic[0]; + buffer[1] = gz_magic[1]; + buffer[2] = Z_DEFLATED; + buffer[3] = 0 /*flags*/; + buffer[4] = 0; + buffer[5] = 0; + buffer[6] = 0; + buffer[7] = 0 /*time*/; + buffer[8] = 0 /*xflags*/; + buffer[9] = 0x03; s->start = 10L; my_write(s->file, buffer, s->start, MYF(0)); /* We use 10L instead of ftell(s->file) to because ftell causes an diff --git a/storage/csv/Makefile.am b/storage/csv/Makefile.am index f1802f4e5b0..509cef954ff 100644 --- a/storage/csv/Makefile.am +++ b/storage/csv/Makefile.am @@ -24,17 +24,16 @@ INCLUDES = -I$(top_srcdir)/include \ -I$(top_srcdir)/regex \ -I$(top_srcdir)/sql \ -I$(srcdir) -WRAPLIBS= -pkglib_LTLIBRARIES = ha_csv.la - -ha_csv_la_LDFLAGS = -module -noinst_HEADERS = ha_tina.h -ha_csv_la_SOURCES = ha_tina.cc +pkglib_LIBRARIES = libcsv.a LDADD = -DEFS = -DMYSQL_SERVER @DEFS@ +DEFS = @DEFS@ + +libcsv_a_CXXFLAGS = $(AM_CFLAGS) +noinst_HEADERS = ha_tina.h +libcsv_a_SOURCES = ha_tina.cc # Don't update the files from bitkeeper %::SCCS/s.% diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index f258b1b1f99..98d28fea93d 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -88,6 +88,8 @@ handlerton tina_hton= { NULL, /* Start Consistent Snapshot */ NULL, /* Flush logs */ NULL, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ NULL, /* Alter Tablespace */ HTON_CAN_RECREATE }; @@ -195,8 +197,16 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table) char *tmp_name; uint length; + if (!tina_init) + tina_init_func(); + pthread_mutex_lock(&tina_mutex); length=(uint) strlen(table_name); + + /* + If share is not present in the hash, create a new share and + initialize its members. + */ if (!(share=(TINA_SHARE*) hash_search(&tina_open_tables, (byte*) table_name, length))) @@ -212,6 +222,7 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table) } share->use_count= 0; + share->is_log_table= FALSE; share->table_name_length= length; share->table_name= tmp_name; strmov(share->table_name, table_name); @@ -236,6 +247,9 @@ static TINA_SHARE *get_share(const char *table_name, TABLE *table) share->mapped_file= NULL; // We don't know the state as we just allocated it if (get_mmap(share, 0) > 0) goto error3; + + /* init file length value used by readers */ + share->saved_data_file_length= share->file_stat.st_size; } share->use_count++; pthread_mutex_unlock(&tina_mutex); @@ -309,14 +323,16 @@ ha_tina::ha_tina(TABLE_SHARE *table_arg) These definitions are found in handler.h They are not probably completely right. */ - current_position(0), next_position(0), chain_alloced(0), - chain_size(DEFAULT_CHAIN_LENGTH), records_is_known(0) + current_position(0), next_position(0), local_saved_data_file_length(0), + chain_alloced(0), chain_size(DEFAULT_CHAIN_LENGTH), + records_is_known(0) { /* Set our original buffers from pre-allocated memory */ buffer.set(byte_buffer, IO_SIZE, system_charset_info); chain= chain_buffer; } + /* Encode a buffer into the quoted format. */ @@ -425,13 +441,18 @@ int ha_tina::chain_append() */ int ha_tina::find_current_row(byte *buf) { - byte *mapped_ptr= (byte *)share->mapped_file + current_position; + byte *mapped_ptr; byte *end_ptr; DBUG_ENTER("ha_tina::find_current_row"); - /* EOF should be counted as new line */ + mapped_ptr= (byte *)share->mapped_file + current_position; + + /* + We do not read further then local_saved_data_file_length in order + not to conflict with undergoing concurrent insert. + */ if ((end_ptr= find_eoln(share->mapped_file, current_position, - share->file_stat.st_size)) == 0) + local_saved_data_file_length)) == 0) DBUG_RETURN(HA_ERR_END_OF_FILE); for (Field **field=table->field ; *field ; field++) @@ -489,6 +510,114 @@ const char **ha_tina::bas_ext() const return ha_tina_exts; } +/* + Three functions below are needed to enable concurrent insert functionality + for CSV engine. For more details see mysys/thr_lock.c +*/ + +void tina_get_status(void* param, int concurrent_insert) +{ + ha_tina *tina= (ha_tina*) param; + tina->get_status(); +} + +void tina_update_status(void* param) +{ + ha_tina *tina= (ha_tina*) param; + tina->update_status(); +} + +/* this should exist and return 0 for concurrent insert to work */ +my_bool tina_check_status(void* param) +{ + return 0; +} + +/* + Save the state of the table + + SYNOPSIS + get_status() + + DESCRIPTION + This function is used to retrieve the file length. During the lock + phase of concurrent insert. For more details see comment to + ha_tina::update_status below. +*/ + +void ha_tina::get_status() +{ + if (share->is_log_table) + { + /* + We have to use mutex to follow pthreads memory visibility + rules for share->saved_data_file_length + */ + pthread_mutex_lock(&share->mutex); + local_saved_data_file_length= share->saved_data_file_length; + pthread_mutex_unlock(&share->mutex); + return; + } + local_saved_data_file_length= share->saved_data_file_length; +} + + +/* + Correct the state of the table. Called by unlock routines + before the write lock is released. + + SYNOPSIS + update_status() + + DESCRIPTION + When we employ concurrent insert lock, we save current length of the file + during the lock phase. We do not read further saved value, as we don't + want to interfere with undergoing concurrent insert. Writers update file + length info during unlock with update_status(). + + NOTE + For log tables concurrent insert works different. The reason is that + log tables are always opened and locked. And as they do not unlock + tables, the file length after writes should be updated in a different + way. For this purpose we need is_log_table flag. When this flag is set + we call update_status() explicitly after each row write. +*/ + +void ha_tina::update_status() +{ + /* correct local_saved_data_file_length for writers */ + share->saved_data_file_length= share->file_stat.st_size; +} + + +bool ha_tina::check_if_locking_is_allowed(uint sql_command, + ulong type, TABLE *table, + uint count, + bool called_by_logger_thread) +{ + /* + Deny locking of the log tables, which is incompatible with + concurrent insert. Unless called from a logger THD: + general_log_thd or slow_log_thd. + */ + if (table->s->log_table && + sql_command != SQLCOM_TRUNCATE && + !(sql_command == SQLCOM_FLUSH && + type & REFRESH_LOG) && + !called_by_logger_thread && + (table->reginfo.lock_type >= TL_READ_NO_INSERT)) + { + /* + The check >= TL_READ_NO_INSERT denies all write locks + plus the only read lock (TL_READ_NO_INSERT itself) + */ + table->reginfo.lock_type == TL_READ_NO_INSERT ? + my_error(ER_CANT_READ_LOCK_LOG_TABLE, MYF(0)) : + my_error(ER_CANT_WRITE_LOCK_LOG_TABLE, MYF(0)); + return FALSE; + } + return TRUE; +} /* Open a database file. Keep in mind that tables are caches, so @@ -501,9 +630,19 @@ int ha_tina::open(const char *name, int mode, uint test_if_locked) if (!(share= get_share(name, table))) DBUG_RETURN(1); - thr_lock_data_init(&share->lock,&lock,NULL); + + /* + Init locking. Pass handler object to the locking routines, + so that they could save/update local_saved_data_file_length value + during locking. This is needed to enable concurrent inserts. + */ + thr_lock_data_init(&share->lock, &lock, (void*) this); ref_length=sizeof(off_t); + share->lock.get_status= tina_get_status; + share->lock.update_status= tina_update_status; + share->lock.check_status= tina_check_status; + DBUG_RETURN(0); } @@ -528,7 +667,7 @@ int ha_tina::write_row(byte * buf) int size; DBUG_ENTER("ha_tina::write_row"); - statistic_increment(table->in_use->status_var.ha_write_count, &LOCK_status); + ha_statistic_increment(&SSV::ha_write_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_INSERT) table->timestamp_field->set_time(); @@ -547,6 +686,18 @@ int ha_tina::write_row(byte * buf) */ if (get_mmap(share, 0) > 0) DBUG_RETURN(-1); + + /* update local copy of the max position to see our own changes */ + local_saved_data_file_length= share->file_stat.st_size; + + /* update status for the log tables */ + if (share->is_log_table) + { + pthread_mutex_lock(&share->mutex); + update_status(); + pthread_mutex_unlock(&share->mutex); + } + records++; DBUG_RETURN(0); } @@ -565,8 +716,7 @@ int ha_tina::update_row(const byte * old_data, byte * new_data) int size; DBUG_ENTER("ha_tina::update_row"); - statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE) table->timestamp_field->set_time(); @@ -578,6 +728,13 @@ int ha_tina::update_row(const byte * old_data, byte * new_data) if (my_write(share->data_file, buffer.ptr(), size, MYF(MY_WME | MY_NABP))) DBUG_RETURN(-1); + + /* UPDATE should never happen on the log tables */ + DBUG_ASSERT(!share->is_log_table); + + /* update local copy of the max position to see our own changes */ + local_saved_data_file_length= share->file_stat.st_size; + DBUG_RETURN(0); } @@ -594,88 +751,19 @@ int ha_tina::update_row(const byte * old_data, byte * new_data) int ha_tina::delete_row(const byte * buf) { DBUG_ENTER("ha_tina::delete_row"); - statistic_increment(table->in_use->status_var.ha_delete_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_delete_count); if (chain_append()) DBUG_RETURN(-1); --records; + /* DELETE should never happen on the log table */ + DBUG_ASSERT(!share->is_log_table); + DBUG_RETURN(0); } -/* - Fill buf with value from key. Simply this is used for a single index read - with a key. -*/ -int ha_tina::index_read(byte * buf, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) -{ - DBUG_ENTER("ha_tina::index_read"); - DBUG_ASSERT(0); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); -} - -/* - Fill buf with value from key. Simply this is used for a single index read - with a key. - Whatever the current key is we will use it. This is what will be in "index". -*/ -int ha_tina::index_read_idx(byte * buf, uint index, const byte * key, - uint key_len __attribute__((unused)), - enum ha_rkey_function find_flag - __attribute__((unused))) -{ - DBUG_ENTER("ha_tina::index_read_idx"); - DBUG_ASSERT(0); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); -} - - -/* - Read the next position in the index. -*/ -int ha_tina::index_next(byte * buf) -{ - DBUG_ENTER("ha_tina::index_next"); - DBUG_ASSERT(0); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); -} - -/* - Read the previous position in the index. -*/ -int ha_tina::index_prev(byte * buf) -{ - DBUG_ENTER("ha_tina::index_prev"); - DBUG_ASSERT(0); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); -} - -/* - Read the first position in the index -*/ -int ha_tina::index_first(byte * buf) -{ - DBUG_ENTER("ha_tina::index_first"); - DBUG_ASSERT(0); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); -} - -/* - Read the last position in the index - With this we don't need to do a filesort() with index. - We just read the last row and call previous. -*/ -int ha_tina::index_last(byte * buf) -{ - DBUG_ENTER("ha_tina::index_last"); - DBUG_ASSERT(0); - DBUG_RETURN(HA_ADMIN_NOT_IMPLEMENTED); -} /* All table scans call this first. @@ -743,8 +831,7 @@ int ha_tina::rnd_next(byte *buf) { DBUG_ENTER("ha_tina::rnd_next"); - statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); current_position= next_position; if (!share->mapped_file) @@ -781,8 +868,7 @@ void ha_tina::position(const byte *record) int ha_tina::rnd_pos(byte * buf, byte *pos) { DBUG_ENTER("ha_tina::rnd_pos"); - statistic_increment(table->in_use->status_var.ha_read_rnd_next_count, - &LOCK_status); + ha_statistic_increment(&SSV::ha_read_rnd_next_count); current_position= my_get_ptr(pos,ref_length); DBUG_RETURN(find_current_row(buf)); } @@ -809,20 +895,15 @@ void ha_tina::info(uint flag) int ha_tina::extra(enum ha_extra_function operation) { DBUG_ENTER("ha_tina::extra"); + if (operation == HA_EXTRA_MARK_AS_LOG_TABLE) + { + pthread_mutex_lock(&share->mutex); + share->is_log_table= TRUE; + pthread_mutex_unlock(&share->mutex); + } DBUG_RETURN(0); } -/* - This is no longer used. -*/ -int ha_tina::reset(void) -{ - DBUG_ENTER("ha_tina::reset"); - ha_tina::extra(HA_EXTRA_RESET); - DBUG_RETURN(0); -} - - /* Called after each table scan. In particular after deletes, and updates. In the last case we employ chain of deleted @@ -900,15 +981,6 @@ int ha_tina::delete_all_rows() DBUG_RETURN(rc); } -/* - Always called by the start of a transaction (or by "lock tables"); -*/ -int ha_tina::external_lock(THD *thd, int lock_type) -{ - DBUG_ENTER("ha_tina::external_lock"); - DBUG_RETURN(0); // No external locking -} - /* Called by the database to lock the table. Keep in mind that this is an internal lock. diff --git a/storage/csv/ha_tina.h b/storage/csv/ha_tina.h index c46750fb703..a11d4281389 100644 --- a/storage/csv/ha_tina.h +++ b/storage/csv/ha_tina.h @@ -23,9 +23,20 @@ typedef struct st_tina_share { char *table_name; byte *mapped_file; /* mapped region of file */ - uint table_name_length,use_count; + uint table_name_length, use_count; + /* + Below flag is needed to make log tables work with concurrent insert. + For more details see comment to ha_tina::update_status. + */ + my_bool is_log_table; MY_STAT file_stat; /* Stat information for the data file */ File data_file; /* Current open data file */ + /* + Here we save the length of the file for readers. This is updated by + inserts, updates and deletes. The var is initialized along with the + share initialization. + */ + off_t saved_data_file_length; pthread_mutex_t mutex; THR_LOCK lock; } TINA_SHARE; @@ -41,6 +52,7 @@ class ha_tina: public handler TINA_SHARE *share; /* Shared lock info */ off_t current_position; /* Current position in the file during a file scan */ off_t next_position; /* Next position in the file scan */ + off_t local_saved_data_file_length; /* save position for reads */ byte byte_buffer[IO_SIZE]; String buffer; /* @@ -92,19 +104,15 @@ public: */ ha_rows estimate_rows_upper_bound() { return HA_POS_ERROR; } + virtual bool check_if_locking_is_allowed(uint sql_command, + ulong type, TABLE *table, + uint count, + bool called_by_logger_thread); int open(const char *name, int mode, uint test_if_locked); int close(void); int write_row(byte * buf); int update_row(const byte * old_data, byte * new_data); int delete_row(const byte * buf); - int index_read(byte * buf, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_read_idx(byte * buf, uint idx, const byte * key, - uint key_len, enum ha_rkey_function find_flag); - int index_next(byte * buf); - int index_prev(byte * buf); - int index_first(byte * buf); - int index_last(byte * buf); int rnd_init(bool scan=1); int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); @@ -112,14 +120,19 @@ public: void position(const byte *record); void info(uint); int extra(enum ha_extra_function operation); - int reset(void); - int external_lock(THD *thd, int lock_type); int delete_all_rows(void); int create(const char *name, TABLE *form, HA_CREATE_INFO *create_info); THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to, enum thr_lock_type lock_type); + /* + These functions used to get/update status of the handler. + Needed to enable concurrent inserts. + */ + void get_status(); + void update_status(); + /* The following methods were added just for TINA */ int encode_quote(byte *buf); int find_current_row(byte *buf); diff --git a/storage/example/ha_example.cc b/storage/example/ha_example.cc index f4b1276198d..30034496291 100644 --- a/storage/example/ha_example.cc +++ b/storage/example/ha_example.cc @@ -103,6 +103,8 @@ handlerton example_hton= { NULL, /* Start Consistent Snapshot */ NULL, /* Flush logs */ NULL, /* Show status */ + NULL, /* Partition flags */ + NULL, /* Alter table flags */ NULL, /* Alter tablespace */ HTON_CAN_RECREATE }; diff --git a/storage/ndb/config/type_ndbapitools.mk.am b/storage/ndb/config/type_ndbapitools.mk.am index e0f2fd1c0f6..d7ab5797f14 100644 --- a/storage/ndb/config/type_ndbapitools.mk.am +++ b/storage/ndb/config/type_ndbapitools.mk.am @@ -3,7 +3,7 @@ LDADD += \ $(top_builddir)/storage/ndb/src/libndbclient.la \ $(top_builddir)/dbug/libdbug.a \ $(top_builddir)/mysys/libmysys.a \ - $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ + $(top_builddir)/strings/libmystrings.a @NDB_SCI_LIBS@ @ZLIB_LIBS@ INCLUDES += -I$(srcdir) -I$(top_srcdir)/include \ -I$(top_srcdir)/storage/ndb/include \ diff --git a/storage/ndb/include/kernel/ndb_limits.h b/storage/ndb/include/kernel/ndb_limits.h index 3f46f3802c7..ef6b8370888 100644 --- a/storage/ndb/include/kernel/ndb_limits.h +++ b/storage/ndb/include/kernel/ndb_limits.h @@ -27,6 +27,7 @@ */ #define MAX_NDB_NODES 49 #define MAX_NODES 64 +#define UNDEF_NODEGROUP 0xFFFF /** * MAX_API_NODES = MAX_NODES - No of NDB Nodes in use @@ -64,6 +65,7 @@ #define MAX_NULL_BITS 4096 #define MAX_FRAGMENT_DATA_BYTES (4+(2 * 8 * MAX_REPLICAS * MAX_NDB_NODES)) #define MAX_NDB_PARTITIONS 1024 +#define MAX_RANGE_DATA (131072+MAX_NDB_PARTITIONS) //0.5 MByte of list data #define MIN_ATTRBUF ((MAX_ATTRIBUTES_IN_TABLE/24) + 1) /* diff --git a/storage/ndb/include/kernel/signaldata/AlterTable.hpp b/storage/ndb/include/kernel/signaldata/AlterTable.hpp index 572e97afbd6..260c8511bd4 100644 --- a/storage/ndb/include/kernel/signaldata/AlterTable.hpp +++ b/storage/ndb/include/kernel/signaldata/AlterTable.hpp @@ -63,6 +63,10 @@ private: /* n = Changed name f = Changed frm + d = Changed fragment data + r = Changed range or list array + t = Changed tablespace name array + s = Changed tablespace id array 1111111111222222222233 01234567890123456789012345678901 @@ -70,6 +74,10 @@ private: */ #define NAME_SHIFT (0) #define FRM_SHIFT (1) +#define FRAG_DATA_SHIFT (2) +#define RANGE_LIST_SHIFT (3) +#define TS_NAME_SHIFT (4) +#define TS_SHIFT (5) /** * Getters and setters @@ -78,8 +86,28 @@ private: static void setNameFlag(UintR & changeMask, Uint32 nameFlg); static Uint8 getFrmFlag(const UintR & changeMask); static void setFrmFlag(UintR & changeMask, Uint32 frmFlg); + static Uint8 getFragDataFlag(const UintR & changeMask); + static void setFragDataFlag(UintR & changeMask, Uint32 fragFlg); + static Uint8 getRangeListFlag(const UintR & changeMask); + static void setRangeListFlag(UintR & changeMask, Uint32 rangeFlg); + static Uint8 getTsNameFlag(const UintR & changeMask); + static void setTsNameFlag(UintR & changeMask, Uint32 tsNameFlg); + static Uint8 getTsFlag(const UintR & changeMask); + static void setTsFlag(UintR & changeMask, Uint32 tsFlg); }; +inline +Uint8 +AlterTableReq::getTsFlag(const UintR & changeMask){ + return (Uint8)((changeMask >> TS_SHIFT) & 1); +} + +inline +void +AlterTableReq::setTsFlag(UintR & changeMask, Uint32 tsFlg){ + changeMask |= (tsFlg << TS_SHIFT); +} + inline Uint8 AlterTableReq::getNameFlag(const UintR & changeMask){ @@ -104,6 +132,42 @@ AlterTableReq::setFrmFlag(UintR & changeMask, Uint32 frmFlg){ changeMask |= (frmFlg << FRM_SHIFT); } +inline +Uint8 +AlterTableReq::getFragDataFlag(const UintR & changeMask){ + return (Uint8)((changeMask >> FRAG_DATA_SHIFT) & 1); +} + +inline +void +AlterTableReq::setFragDataFlag(UintR & changeMask, Uint32 fragDataFlg){ + changeMask |= (fragDataFlg << FRAG_DATA_SHIFT); +} + +inline +Uint8 +AlterTableReq::getRangeListFlag(const UintR & changeMask){ + return (Uint8)((changeMask >> RANGE_LIST_SHIFT) & 1); +} + +inline +void +AlterTableReq::setRangeListFlag(UintR & changeMask, Uint32 rangeFlg){ + changeMask |= (rangeFlg << RANGE_LIST_SHIFT); +} + +inline +Uint8 +AlterTableReq::getTsNameFlag(const UintR & changeMask){ + return (Uint8)((changeMask >> TS_NAME_SHIFT) & 1); +} + +inline +void +AlterTableReq::setTsNameFlag(UintR & changeMask, Uint32 tsNameFlg){ + changeMask |= (tsNameFlg << TS_NAME_SHIFT); +} + class AlterTableRef { /** diff --git a/storage/ndb/include/kernel/signaldata/BackupImpl.hpp b/storage/ndb/include/kernel/signaldata/BackupImpl.hpp index ae6bfee6fe1..c7bfd07a63d 100644 --- a/storage/ndb/include/kernel/signaldata/BackupImpl.hpp +++ b/storage/ndb/include/kernel/signaldata/BackupImpl.hpp @@ -139,21 +139,11 @@ class StartBackupReq { friend bool printSTART_BACKUP_REQ(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( MaxTableTriggers = 4 ); - STATIC_CONST( HeaderLength = 5 ); - STATIC_CONST( TableTriggerLength = 4); - + STATIC_CONST( SignalLength = 2 ); + private: Uint32 backupId; Uint32 backupPtr; - Uint32 signalNo; - Uint32 noOfSignals; - Uint32 noOfTableTriggers; - - struct TableTriggers { - Uint32 tableId; - Uint32 triggerIds[3]; - } tableTriggers[MaxTableTriggers]; }; class StartBackupRef { @@ -169,7 +159,7 @@ class StartBackupRef { friend bool printSTART_BACKUP_REF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 5 ); + STATIC_CONST( SignalLength = 4 ); enum ErrorCode { FailedToAllocateTriggerRecord = 1 @@ -177,7 +167,6 @@ public: private: Uint32 backupId; Uint32 backupPtr; - Uint32 signalNo; Uint32 errorCode; Uint32 nodeId; }; @@ -195,12 +184,11 @@ class StartBackupConf { friend bool printSTART_BACKUP_CONF(FILE *, const Uint32 *, Uint32, Uint16); public: - STATIC_CONST( SignalLength = 3 ); + STATIC_CONST( SignalLength = 2 ); private: Uint32 backupId; Uint32 backupPtr; - Uint32 signalNo; }; class BackupFragmentReq { diff --git a/storage/ndb/include/kernel/signaldata/CreateTable.hpp b/storage/ndb/include/kernel/signaldata/CreateTable.hpp index 44a95469b38..b43a5e76eaf 100644 --- a/storage/ndb/include/kernel/signaldata/CreateTable.hpp +++ b/storage/ndb/include/kernel/signaldata/CreateTable.hpp @@ -95,7 +95,8 @@ public: InvalidTablespace = 755, VarsizeBitfieldNotSupported = 757, NotATablespace = 758, - InvalidTablespaceVersion = 759 + InvalidTablespaceVersion = 759, + OutOfStringBuffer = 773 }; private: diff --git a/storage/ndb/include/kernel/signaldata/DiAddTab.hpp b/storage/ndb/include/kernel/signaldata/DiAddTab.hpp index 6b17515eb6f..47456f11842 100644 --- a/storage/ndb/include/kernel/signaldata/DiAddTab.hpp +++ b/storage/ndb/include/kernel/signaldata/DiAddTab.hpp @@ -32,6 +32,7 @@ class DiAddTabReq { public: STATIC_CONST( SignalLength = 9 ); SECTION( FRAGMENTATION = 0 ); + SECTION( TS_RANGE = 0 ); private: Uint32 connectPtr; diff --git a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp index a46750228b6..2f2b26f34a4 100644 --- a/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp +++ b/storage/ndb/include/kernel/signaldata/DictTabInfo.hpp @@ -122,6 +122,16 @@ public: FragmentData = 130, // CREATE_FRAGMENTATION reply TablespaceId = 131, TablespaceVersion = 132, + TablespaceDataLen = 133, + TablespaceData = 134, + RangeListDataLen = 135, + RangeListData = 136, + ReplicaDataLen = 137, + ReplicaData = 138, + MaxRowsLow = 139, + MaxRowsHigh = 140, + DefaultNoPartFlag = 141, + LinearHashFlag = 142, RowGCIFlag = 150, RowChecksumFlag = 151, @@ -298,11 +308,26 @@ public: Uint32 CustomTriggerId; Uint32 TablespaceId; Uint32 TablespaceVersion; + Uint32 MaxRowsLow; + Uint32 MaxRowsHigh; + Uint32 DefaultNoPartFlag; + Uint32 LinearHashFlag; + /* + TODO RONM: + We need to replace FRM, Fragment Data, Tablespace Data and in + very particular RangeListData with dynamic arrays + */ Uint32 FrmLen; char FrmData[MAX_FRM_DATA_SIZE]; Uint32 FragmentCount; + Uint32 ReplicaDataLen; + Uint16 ReplicaData[MAX_FRAGMENT_DATA_BYTES]; Uint32 FragmentDataLen; - Uint16 FragmentData[(MAX_FRAGMENT_DATA_BYTES+1)/2]; + Uint16 FragmentData[3*MAX_NDB_PARTITIONS]; + Uint32 TablespaceDataLen; + Uint32 TablespaceData[2*MAX_NDB_PARTITIONS]; + Uint32 RangeListDataLen; + char RangeListData[4*2*MAX_NDB_PARTITIONS*2]; Uint32 RowGCIFlag; Uint32 RowChecksumFlag; diff --git a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp index cd3f8849552..4c77e337122 100644 --- a/storage/ndb/include/kernel/signaldata/LqhFrag.hpp +++ b/storage/ndb/include/kernel/signaldata/LqhFrag.hpp @@ -49,6 +49,7 @@ private: Uint32 nodeId; Uint32 totalFragments; Uint32 startGci; + Uint32 tablespaceId; Uint32 logPartId; }; diff --git a/storage/ndb/include/kernel/signaldata/SystemError.hpp b/storage/ndb/include/kernel/signaldata/SystemError.hpp index c2c51e88bf2..b3646a858f6 100644 --- a/storage/ndb/include/kernel/signaldata/SystemError.hpp +++ b/storage/ndb/include/kernel/signaldata/SystemError.hpp @@ -43,10 +43,11 @@ public: enum ErrorCode { GCPStopDetected = 3, CopyFragRefError = 5, - TestStopOnError = 6 + TestStopOnError = 6, + CopySubscriptionRef = 7, + CopySubscriberRef = 8 }; -private: Uint32 errorRef; Uint32 errorCode; Uint32 data1; diff --git a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h index c2524cddfc3..59d048370ae 100644 --- a/storage/ndb/include/mgmapi/mgmapi_config_parameters.h +++ b/storage/ndb/include/mgmapi/mgmapi_config_parameters.h @@ -86,6 +86,7 @@ #define CFG_DB_MAX_OPEN_FILES 159 #define CFG_DB_DISK_PAGE_BUFFER_MEMORY 160 +#define CFG_DB_STRING_MEMORY 161 #define CFG_NODE_ARBIT_RANK 200 #define CFG_NODE_ARBIT_DELAY 201 diff --git a/storage/ndb/include/ndb_version.h.in b/storage/ndb/include/ndb_version.h.in index c953088bc07..5b67796a019 100644 --- a/storage/ndb/include/ndb_version.h.in +++ b/storage/ndb/include/ndb_version.h.in @@ -63,6 +63,6 @@ char ndb_version_string_buf[NDB_VERSION_STRING_BUF_SZ]; #define NDBD_ROWID_VERSION (MAKE_VERSION(5,1,6)) #define NDBD_INCL_NODECONF_VERSION_4 MAKE_VERSION(4,1,17) #define NDBD_INCL_NODECONF_VERSION_5 MAKE_VERSION(5,0,18) - +#define NDBD_FRAGID_VERSION (MAKE_VERSION(5,1,6)) #endif diff --git a/storage/ndb/include/ndbapi/NdbDictionary.hpp b/storage/ndb/include/ndbapi/NdbDictionary.hpp index 2599a391318..6f8ead63a81 100644 --- a/storage/ndb/include/ndbapi/NdbDictionary.hpp +++ b/storage/ndb/include/ndbapi/NdbDictionary.hpp @@ -669,10 +669,22 @@ public: Uint32 getFrmLength() const; /** - * Get Node Group and Tablespace id's for fragments in table + * Get Fragment Data (id, state and node group) */ - const void *getNodeGroupIds() const; - Uint32 getNodeGroupIdsLength() const; + const void *getFragmentData() const; + Uint32 getFragmentDataLen() const; + + /** + * Get Range or List Array (value, partition) + */ + const void *getRangeListData() const; + Uint32 getRangeListDataLen() const; + + /** + * Get Tablespace Data (id, version) + */ + const void *getTablespaceData() const; + Uint32 getTablespaceDataLen() const; /** @} *******************************************************************/ @@ -720,7 +732,23 @@ public: * @see NdbDictionary::Table::getLogging. */ void setLogging(bool); - + + /** + * Set/Get Linear Hash Flag + */ + void setLinearFlag(Uint32 flag); + bool getLinearFlag() const; + + /** + * Set fragment count + */ + void setFragmentCount(Uint32); + + /** + * Get fragment count + */ + Uint32 getFragmentCount() const; + /** * Set fragmentation type */ @@ -772,6 +800,19 @@ public: */ virtual int getObjectVersion() const; + /** + * Set/Get Maximum number of rows in table (only used to calculate + * number of partitions). + */ + void setMaxRows(Uint64 maxRows); + Uint64 getMaxRows() const; + + /** + * Set/Get indicator if default number of partitions is used in table. + */ + void setDefaultNoPartitionsFlag(Uint32 indicator); + Uint32 getDefaultNoPartitionsFlag() const; + /** * Get object id */ @@ -783,9 +824,34 @@ public: void setFrm(const void* data, Uint32 len); /** - * Set node group for fragments + * Set array of fragment information containing + * Fragment Identity + * Node group identity + * Fragment State */ - void setNodeGroupIds(const void *data, Uint32 len); + void setFragmentData(const void* data, Uint32 len); + + /** + * Set/Get tablespace names per fragment + */ + void setTablespaceNames(const void* data, Uint32 len); + const void *getTablespaceNames(); + Uint32 getTablespaceNamesLen() const; + + /** + * Set tablespace information per fragment + * Contains a tablespace id and a tablespace version + */ + void setTablespaceData(const void* data, Uint32 len); + + /** + * Set array of information mapping range values and list values + * to fragments. This is essentially a sorted map consisting of + * pairs of value, fragment identity. For range partitions there is + * one pair per fragment. For list partitions it could be any number + * of pairs, at least as many as there are fragments. + */ + void setRangeListData(const void* data, Uint32 len); /** * Set table object type diff --git a/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp b/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp index e9b0188d93b..8d624ea311e 100644 --- a/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp +++ b/storage/ndb/src/common/debugger/signaldata/BackupImpl.cpp @@ -48,16 +48,8 @@ printDEFINE_BACKUP_CONF(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ bool printSTART_BACKUP_REQ(FILE * out, const Uint32 * data, Uint32 l, Uint16 bno){ StartBackupReq* sig = (StartBackupReq*)data; - fprintf(out, " backupPtr: %d backupId: %d signalNo: %d of %d\n", - sig->backupPtr, sig->backupId, - sig->signalNo + 1, sig->noOfSignals); - for(Uint32 i = 0; inoOfTableTriggers; i++) - fprintf(out, - " Table: %d Triggers = [ insert: %d update: %d delete: %d ]\n", - sig->tableTriggers[i].tableId, - sig->tableTriggers[i].triggerIds[TriggerEvent::TE_INSERT], - sig->tableTriggers[i].triggerIds[TriggerEvent::TE_UPDATE], - sig->tableTriggers[i].triggerIds[TriggerEvent::TE_DELETE]); + fprintf(out, " backupPtr: %d backupId: %d\n", + sig->backupPtr, sig->backupId); return true; } diff --git a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp index 885c2a03d93..d0f48597c77 100644 --- a/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp +++ b/storage/ndb/src/common/debugger/signaldata/DictTabInfo.cpp @@ -44,11 +44,22 @@ DictTabInfo::TableMapping[] = { DTIMAP(Table, CustomTriggerId, CustomTriggerId), DTIMAP2(Table, FrmLen, FrmLen, 0, MAX_FRM_DATA_SIZE), DTIMAPB(Table, FrmData, FrmData, 0, MAX_FRM_DATA_SIZE, FrmLen), - DTIMAP(Table, FragmentCount, FragmentCount), - DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, MAX_FRAGMENT_DATA_BYTES), - DTIMAPB(Table, FragmentData, FragmentData, 0, MAX_FRAGMENT_DATA_BYTES, FragmentDataLen), + DTIMAP2(Table, FragmentCount, FragmentCount, 0, MAX_NDB_PARTITIONS), + DTIMAP2(Table, ReplicaDataLen, ReplicaDataLen, 0, 2*MAX_FRAGMENT_DATA_BYTES), + DTIMAPB(Table, ReplicaData, ReplicaData, 0, 2*MAX_FRAGMENT_DATA_BYTES, ReplicaDataLen), + DTIMAP2(Table, FragmentDataLen, FragmentDataLen, 0, 6*MAX_NDB_PARTITIONS), + DTIMAPB(Table, FragmentData, FragmentData, 0, 6*MAX_NDB_PARTITIONS, FragmentDataLen), + DTIMAP2(Table, TablespaceDataLen, TablespaceDataLen, 0, 8*MAX_NDB_PARTITIONS), + DTIMAPB(Table, TablespaceData, TablespaceData, 0, 8*MAX_NDB_PARTITIONS, TablespaceDataLen), + DTIMAP2(Table, RangeListDataLen, RangeListDataLen, 0, 8*MAX_NDB_PARTITIONS), + DTIMAPB(Table, RangeListData, RangeListData, 0, 8*MAX_NDB_PARTITIONS, RangeListDataLen), DTIMAP(Table, TablespaceId, TablespaceId), DTIMAP(Table, TablespaceVersion, TablespaceVersion), + DTIMAP(Table, MaxRowsLow, MaxRowsLow), + DTIMAP(Table, MaxRowsHigh, MaxRowsHigh), + DTIMAP(Table, DefaultNoPartFlag, DefaultNoPartFlag), + DTIMAP(Table, LinearHashFlag, LinearHashFlag), + DTIMAP(Table, TablespaceVersion, TablespaceVersion), DTIMAP(Table, RowGCIFlag, RowGCIFlag), DTIMAP(Table, RowChecksumFlag, RowChecksumFlag), DTIBREAK(AttributeName) @@ -124,12 +135,22 @@ DictTabInfo::Table::init(){ DeleteTriggerId = RNIL; CustomTriggerId = RNIL; FrmLen = 0; - memset(FrmData, 0, sizeof(FrmData)); - FragmentCount = 0; FragmentDataLen = 0; + ReplicaDataLen = 0; + RangeListDataLen = 0; + TablespaceDataLen = 0; + memset(FrmData, 0, sizeof(FrmData)); memset(FragmentData, 0, sizeof(FragmentData)); + memset(ReplicaData, 0, sizeof(ReplicaData)); + memset(RangeListData, 0, sizeof(RangeListData)); + memset(TablespaceData, 0, sizeof(TablespaceData)); + FragmentCount = 0; TablespaceId = RNIL; TablespaceVersion = ~0; + MaxRowsLow = 0; + MaxRowsHigh = 0; + DefaultNoPartFlag = 1; + LinearHashFlag = 1; RowGCIFlag = ~0; RowChecksumFlag = ~0; diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.cpp b/storage/ndb/src/kernel/blocks/backup/Backup.cpp index 5c79277521c..49d814e612e 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.cpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.cpp @@ -501,12 +501,6 @@ const TriggerEvent::Value triggerEventValues[] = { TriggerEvent::TE_DELETE }; -const char* triggerNameFormat[] = { - "NDB$BACKUP_%d_%d_INSERT", - "NDB$BACKUP_%d_%d_UPDATE", - "NDB$BACKUP_%d_%d_DELETE" -}; - const Backup::State Backup::validSlaveTransitions[] = { INITIAL, DEFINING, @@ -776,7 +770,6 @@ Backup::checkNodeFail(Signal* signal, ref->backupPtr = ptr.i; ref->backupId = ptr.p->backupId; ref->errorCode = AbortBackupOrd::BackupFailureDueToNodeFail; - ref->signalNo = ptr.p->masterData.startBackup.signalNo; gsn= GSN_START_BACKUP_REF; len= StartBackupRef::SignalLength; pos= &ref->nodeId - signal->getDataPtr(); @@ -928,9 +921,7 @@ Backup::execBACKUP_REQ(Signal* signal) ptr.p->backupKey[1] = 0; ptr.p->backupDataLen = 0; ptr.p->masterData.errorCode = 0; - ptr.p->masterData.dropTrig.tableId = RNIL; - ptr.p->masterData.alterTrig.tableId = RNIL; - + UtilSequenceReq * utilReq = (UtilSequenceReq*)signal->getDataPtrSend(); ptr.p->masterData.gsn = GSN_UTIL_SEQUENCE_REQ; @@ -1241,13 +1232,18 @@ Backup::defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) signal->theData[2] = ptr.p->backupId; ptr.p->nodes.copyto(NdbNodeBitmask::Size, signal->theData+3); sendSignal(CMVMI_REF, GSN_EVENT_REP, signal, 3+NdbNodeBitmask::Size, JBB); - + /** - * Prepare Trig + * We've received GSN_DEFINE_BACKUP_CONF from all participants. + * + * Our next step is to send START_BACKUP_REQ to all participants, + * who will then send CREATE_TRIG_REQ for all tables to their local + * DBTUP. */ TablePtr tabPtr; - ndbrequire(ptr.p->tables.first(tabPtr)); - sendCreateTrig(signal, ptr, tabPtr); + ptr.p->tables.first(tabPtr); + + sendStartBackup(signal, ptr, tabPtr); } /***************************************************************************** @@ -1275,43 +1271,72 @@ Backup::sendCreateTrig(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr) { CreateTrigReq * req =(CreateTrigReq *)signal->getDataPtrSend(); - - ptr.p->masterData.gsn = GSN_CREATE_TRIG_REQ; - ptr.p->masterData.sendCounter = 3; - ptr.p->masterData.createTrig.tableId = tabPtr.p->tableId; + + /* + * First, setup the structures + */ + for(Uint32 j=0; j<3; j++) { + jam(); + + TriggerPtr trigPtr; + if(!ptr.p->triggers.seize(trigPtr)) { + jam(); + ptr.p->m_gsn = GSN_START_BACKUP_REF; + StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend(); + ref->backupPtr = ptr.i; + ref->backupId = ptr.p->backupId; + ref->errorCode = StartBackupRef::FailedToAllocateTriggerRecord; + ref->nodeId = getOwnNodeId(); + sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal, + StartBackupRef::SignalLength, JBB); + return; + } // if + + const Uint32 triggerId= trigPtr.i; + tabPtr.p->triggerIds[j] = triggerId; + tabPtr.p->triggerAllocated[j] = true; + trigPtr.p->backupPtr = ptr.i; + trigPtr.p->tableId = tabPtr.p->tableId; + trigPtr.p->tab_ptr_i = tabPtr.i; + trigPtr.p->logEntry = 0; + trigPtr.p->event = j; + trigPtr.p->maxRecordSize = 2048; + trigPtr.p->operation = + &ptr.p->files.getPtr(ptr.p->logFilePtr)->operation; + trigPtr.p->operation->noOfBytes = 0; + trigPtr.p->operation->noOfRecords = 0; + trigPtr.p->errorCode = 0; + } // for + + /* + * now ask DBTUP to create + */ + ptr.p->slaveData.gsn = GSN_CREATE_TRIG_REQ; + ptr.p->slaveData.trigSendCounter = 3; + ptr.p->slaveData.createTrig.tableId = tabPtr.p->tableId; req->setUserRef(reference()); + req->setReceiverRef(reference()); req->setConnectionPtr(ptr.i); req->setRequestType(CreateTrigReq::RT_USER); - + Bitmask attrMask; createAttributeMask(tabPtr, attrMask); req->setAttributeMask(attrMask); req->setTableId(tabPtr.p->tableId); req->setIndexId(RNIL); // not used - req->setTriggerId(RNIL); // to be created req->setTriggerType(TriggerType::SUBSCRIPTION); req->setTriggerActionTime(TriggerActionTime::TA_DETACHED); req->setMonitorReplicas(true); req->setMonitorAllAttributes(false); - req->setOnline(false); // leave trigger offline + req->setOnline(true); - char triggerName[MAX_TAB_NAME_SIZE]; - Uint32 nameBuffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string - LinearWriter w(nameBuffer, sizeof(nameBuffer) >> 2); - LinearSectionPtr lsPtr[3]; - for (int i=0; i < 3; i++) { + req->setTriggerId(tabPtr.p->triggerIds[i]); req->setTriggerEvent(triggerEventValues[i]); - req->setReportAllMonitoredAttributes(false); - BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i], - ptr.p->backupId, tabPtr.p->tableId); - w.reset(); - w.add(CreateTrigReq::TriggerNameKey, triggerName); - lsPtr[0].p = nameBuffer; - lsPtr[0].sz = w.getWordsUsed(); - sendSignal(DBDICT_REF, GSN_CREATE_TRIG_REQ, - signal, CreateTrigReq::SignalLength, JBB, lsPtr, 1); + + sendSignal(DBTUP_REF, GSN_CREATE_TRIG_REQ, + signal, CreateTrigReq::SignalLength, JBB); } } @@ -1331,25 +1356,25 @@ Backup::execCREATE_TRIG_CONF(Signal* signal) /** * Verify that I'm waiting for this conf + * + * ptr.p->masterRef != reference() + * as slaves and masters have triggers now. */ - ndbrequire(ptr.p->masterRef == reference()); - ndbrequire(ptr.p->masterData.gsn == GSN_CREATE_TRIG_REQ); - ndbrequire(ptr.p->masterData.sendCounter.done() == false); - ndbrequire(ptr.p->masterData.createTrig.tableId == tableId); - + ndbrequire(ptr.p->slaveData.gsn == GSN_CREATE_TRIG_REQ); + ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false); + ndbrequire(ptr.p->slaveData.createTrig.tableId == tableId); + TablePtr tabPtr; ndbrequire(findTable(ptr, tabPtr, tableId)); ndbrequire(type < 3); // if some decides to change the enums - ndbrequire(tabPtr.p->triggerIds[type] == ILLEGAL_TRIGGER_ID); - tabPtr.p->triggerIds[type] = triggerId; - createTrigReply(signal, ptr); } void Backup::execCREATE_TRIG_REF(Signal* signal) { + jamEntry(); CreateTrigRef* ref = (CreateTrigRef*)signal->getDataPtr(); const Uint32 ptrI = ref->getConnectionPtr(); @@ -1360,14 +1385,16 @@ Backup::execCREATE_TRIG_REF(Signal* signal) /** * Verify that I'm waiting for this ref + * + * ptr.p->masterRef != reference() + * as slaves and masters have triggers now */ - ndbrequire(ptr.p->masterRef == reference()); - ndbrequire(ptr.p->masterData.gsn == GSN_CREATE_TRIG_REQ); - ndbrequire(ptr.p->masterData.sendCounter.done() == false); - ndbrequire(ptr.p->masterData.createTrig.tableId == tableId); + ndbrequire(ptr.p->slaveData.gsn == GSN_CREATE_TRIG_REQ); + ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false); + ndbrequire(ptr.p->slaveData.createTrig.tableId == tableId); ptr.p->setErrorCode(ref->getErrorCode()); - + createTrigReply(signal, ptr); } @@ -1379,26 +1406,33 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr) /** * Check finished with table */ - ptr.p->masterData.sendCounter--; - if(ptr.p->masterData.sendCounter.done() == false){ + ptr.p->slaveData.trigSendCounter--; + if(ptr.p->slaveData.trigSendCounter.done() == false){ jam(); return; }//if - if (ERROR_INSERTED(10025)) + if (ERROR_INSERTED(10025)) { ptr.p->errorCode = 325; } if(ptr.p->checkError()) { jam(); - masterAbort(signal, ptr); + ptr.p->m_gsn = GSN_START_BACKUP_REF; + StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend(); + ref->backupPtr = ptr.i; + ref->backupId = ptr.p->backupId; + ref->errorCode = ptr.p->errorCode; + ref->nodeId = getOwnNodeId(); + sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal, + StartBackupRef::SignalLength, JBB); return; }//if TablePtr tabPtr; - ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.createTrig.tableId)); - + ndbrequire(findTable(ptr, tabPtr, ptr.p->slaveData.createTrig.tableId)); + /** * Next table */ @@ -1410,14 +1444,16 @@ Backup::createTrigReply(Signal* signal, BackupRecordPtr ptr) }//if /** - * Finished with all tables, send StartBackupReq + * We've finished creating triggers. + * + * send conf and wait */ - ptr.p->tables.first(tabPtr); - ptr.p->masterData.startBackup.signalNo = 0; - ptr.p->masterData.startBackup.noOfSignals = - (ptr.p->tables.noOfElements() + StartBackupReq::MaxTableTriggers - 1) / - StartBackupReq::MaxTableTriggers; - sendStartBackup(signal, ptr, tabPtr); + ptr.p->m_gsn = GSN_START_BACKUP_CONF; + StartBackupConf* conf = (StartBackupConf*)signal->getDataPtrSend(); + conf->backupPtr = ptr.i; + conf->backupId = ptr.p->backupId; + sendSignal(ptr.p->masterRef, GSN_START_BACKUP_CONF, signal, + StartBackupConf::SignalLength, JBB); } /***************************************************************************** @@ -1430,33 +1466,23 @@ Backup::sendStartBackup(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr) { ptr.p->masterData.startBackup.tablePtr = tabPtr.i; - + StartBackupReq* req = (StartBackupReq*)signal->getDataPtrSend(); req->backupId = ptr.p->backupId; req->backupPtr = ptr.i; - req->signalNo = ptr.p->masterData.startBackup.signalNo; - req->noOfSignals = ptr.p->masterData.startBackup.noOfSignals; - Uint32 i; - for(i = 0; itableTriggers[i].tableId = tabPtr.p->tableId; - req->tableTriggers[i].triggerIds[0] = tabPtr.p->triggerIds[0]; - req->tableTriggers[i].triggerIds[1] = tabPtr.p->triggerIds[1]; - req->tableTriggers[i].triggerIds[2] = tabPtr.p->triggerIds[2]; - if(!ptr.p->tables.next(tabPtr)){ - jam(); - i++; - break; - }//if - }//for - req->noOfTableTriggers = i; + /** + * We use trigger Ids that are unique to BACKUP. + * These don't interfere with other triggers (e.g. from DBDICT) + * as there is a special case in DBTUP. + * + * Consequently, backups during online upgrade won't work + */ ptr.p->masterData.gsn = GSN_START_BACKUP_REQ; ptr.p->masterData.sendCounter = ptr.p->nodes; NodeReceiverGroup rg(BACKUP, ptr.p->nodes); - sendSignal(rg, GSN_START_BACKUP_REQ, signal, - StartBackupReq::HeaderLength + - (i * StartBackupReq::TableTriggerLength), JBB); + sendSignal(rg, GSN_START_BACKUP_REQ, signal, + StartBackupReq::SignalLength, JBB); } void @@ -1467,14 +1493,13 @@ Backup::execSTART_BACKUP_REF(Signal* signal) StartBackupRef* ref = (StartBackupRef*)signal->getDataPtr(); const Uint32 ptrI = ref->backupPtr; //const Uint32 backupId = ref->backupId; - const Uint32 signalNo = ref->signalNo; const Uint32 nodeId = ref->nodeId; BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); ptr.p->setErrorCode(ref->errorCode); - startBackupReply(signal, ptr, nodeId, signalNo); + startBackupReply(signal, ptr, nodeId); } void @@ -1485,23 +1510,20 @@ Backup::execSTART_BACKUP_CONF(Signal* signal) StartBackupConf* conf = (StartBackupConf*)signal->getDataPtr(); const Uint32 ptrI = conf->backupPtr; //const Uint32 backupId = conf->backupId; - const Uint32 signalNo = conf->signalNo; const Uint32 nodeId = refToNode(signal->senderBlockRef()); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - startBackupReply(signal, ptr, nodeId, signalNo); + startBackupReply(signal, ptr, nodeId); } void -Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr, - Uint32 nodeId, Uint32 signalNo) +Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId) { CRASH_INSERTION((10004)); - ndbrequire(ptr.p->masterData.startBackup.signalNo == signalNo); if (!haveAllSignals(ptr, GSN_START_BACKUP_REQ, nodeId)) { jam(); return; @@ -1518,148 +1540,20 @@ Backup::startBackupReply(Signal* signal, BackupRecordPtr ptr, return; } - TablePtr tabPtr; - c_tablePool.getPtr(tabPtr, ptr.p->masterData.startBackup.tablePtr); - for(Uint32 i = 0; itables.next(tabPtr)) { - jam(); - break; - }//if - }//for - - if(tabPtr.i != RNIL) { - jam(); - ptr.p->masterData.startBackup.signalNo++; - sendStartBackup(signal, ptr, tabPtr); - return; - } - - sendAlterTrig(signal, ptr); -} - -/***************************************************************************** - * - * Master functionallity - Activate triggers - * - *****************************************************************************/ -void -Backup::sendAlterTrig(Signal* signal, BackupRecordPtr ptr) -{ - AlterTrigReq * req =(AlterTrigReq *)signal->getDataPtrSend(); - - ptr.p->masterData.gsn = GSN_ALTER_TRIG_REQ; - ptr.p->masterData.sendCounter = 0; - - req->setUserRef(reference()); - req->setConnectionPtr(ptr.i); - req->setRequestType(AlterTrigReq::RT_USER); - req->setTriggerInfo(0); // not used on ALTER via DICT - req->setOnline(true); - req->setReceiverRef(reference()); - - TablePtr tabPtr; - - if (ptr.p->masterData.alterTrig.tableId == RNIL) { - jam(); - ptr.p->tables.first(tabPtr); - } else { - jam(); - ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.alterTrig.tableId)); - ptr.p->tables.next(tabPtr); - }//if - if (tabPtr.i != RNIL) { - jam(); - ptr.p->masterData.alterTrig.tableId = tabPtr.p->tableId; - req->setTableId(tabPtr.p->tableId); - - req->setTriggerId(tabPtr.p->triggerIds[0]); - sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ, - signal, AlterTrigReq::SignalLength, JBB); - - req->setTriggerId(tabPtr.p->triggerIds[1]); - sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ, - signal, AlterTrigReq::SignalLength, JBB); - - req->setTriggerId(tabPtr.p->triggerIds[2]); - sendSignal(DBDICT_REF, GSN_ALTER_TRIG_REQ, - signal, AlterTrigReq::SignalLength, JBB); - - ptr.p->masterData.sendCounter += 3; - return; - }//if - ptr.p->masterData.alterTrig.tableId = RNIL; - /** - * Finished with all tables + * Wait for GCP */ ptr.p->masterData.gsn = GSN_WAIT_GCP_REQ; ptr.p->masterData.waitGCP.startBackup = true; - + WaitGCPReq * waitGCPReq = (WaitGCPReq*)signal->getDataPtrSend(); waitGCPReq->senderRef = reference(); waitGCPReq->senderData = ptr.i; waitGCPReq->requestType = WaitGCPReq::CompleteForceStart; - sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, + sendSignal(DBDIH_REF, GSN_WAIT_GCP_REQ, signal, WaitGCPReq::SignalLength,JBB); } -void -Backup::execALTER_TRIG_CONF(Signal* signal) -{ - jamEntry(); - - AlterTrigConf* conf = (AlterTrigConf*)signal->getDataPtr(); - const Uint32 ptrI = conf->getConnectionPtr(); - - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, ptrI); - - alterTrigReply(signal, ptr); -} - -void -Backup::execALTER_TRIG_REF(Signal* signal) -{ - jamEntry(); - - AlterTrigRef* ref = (AlterTrigRef*)signal->getDataPtr(); - const Uint32 ptrI = ref->getConnectionPtr(); - - BackupRecordPtr ptr; - c_backupPool.getPtr(ptr, ptrI); - - ptr.p->setErrorCode(ref->getErrorCode()); - - alterTrigReply(signal, ptr); -} - -void -Backup::alterTrigReply(Signal* signal, BackupRecordPtr ptr) -{ - - CRASH_INSERTION((10005)); - - ndbrequire(ptr.p->masterRef == reference()); - ndbrequire(ptr.p->masterData.gsn == GSN_ALTER_TRIG_REQ); - ndbrequire(ptr.p->masterData.sendCounter.done() == false); - - ptr.p->masterData.sendCounter--; - - if(ptr.p->masterData.sendCounter.done() == false){ - jam(); - return; - }//if - - if(ptr.p->checkError()){ - jam(); - masterAbort(signal, ptr); - return; - }//if - - sendAlterTrig(signal, ptr); -} - void Backup::execWAIT_GCP_REF(Signal* signal) { @@ -1720,7 +1614,12 @@ Backup::execWAIT_GCP_CONF(Signal* signal){ { CRASH_INSERTION((10009)); ptr.p->stopGCP = gcp; - sendDropTrig(signal, ptr); // regular dropping of triggers + /** + * Backup is complete - begin cleanup + * STOP_BACKUP_REQ is sent to participants. + * They then drop the local triggers + */ + sendStopBackup(signal, ptr); return; }//if @@ -1927,8 +1826,8 @@ err: } /***************************************************************************** - * - * Master functionallity - Drop triggers + * + * Slave functionallity - Drop triggers * *****************************************************************************/ @@ -1936,23 +1835,63 @@ void Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr) { TablePtr tabPtr; - if (ptr.p->masterData.dropTrig.tableId == RNIL) { + ptr.p->slaveData.gsn = GSN_DROP_TRIG_REQ; + + if (ptr.p->slaveData.dropTrig.tableId == RNIL) { jam(); ptr.p->tables.first(tabPtr); } else { jam(); - ndbrequire(findTable(ptr, tabPtr, ptr.p->masterData.dropTrig.tableId)); + ndbrequire(findTable(ptr, tabPtr, ptr.p->slaveData.dropTrig.tableId)); ptr.p->tables.next(tabPtr); }//if if (tabPtr.i != RNIL) { jam(); sendDropTrig(signal, ptr, tabPtr); } else { - jam(); - ptr.p->masterData.dropTrig.tableId = RNIL; + /** + * Insert footers + */ + { + BackupFilePtr filePtr; + ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); + Uint32 * dst; + ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, 1)); + * dst = 0; + filePtr.p->operation.dataBuffer.updateWritePtr(1); + } - sendStopBackup(signal, ptr); - }//if + { + BackupFilePtr filePtr; + ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); + + const Uint32 gcpSz = sizeof(BackupFormat::CtlFile::GCPEntry) >> 2; + + Uint32 * dst; + ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, gcpSz)); + + BackupFormat::CtlFile::GCPEntry * gcp = + (BackupFormat::CtlFile::GCPEntry*)dst; + + gcp->SectionType = htonl(BackupFormat::GCP_ENTRY); + gcp->SectionLength = htonl(gcpSz); + gcp->StartGCP = htonl(ptr.p->startGCP); + gcp->StopGCP = htonl(ptr.p->stopGCP - 1); + filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz); + } + + { // UNLOCK while dropping trigger for better timeslicing + TablePtr tabPtr; + for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL; + ptr.p->tables.next(tabPtr)) + { + signal->theData[0] = tabPtr.p->tableId; + signal->theData[1] = 0; // unlock + EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); + } + } + closeFiles(signal, ptr); + } } void @@ -1961,40 +1900,26 @@ Backup::sendDropTrig(Signal* signal, BackupRecordPtr ptr, TablePtr tabPtr) jam(); DropTrigReq * req = (DropTrigReq *)signal->getDataPtrSend(); - ptr.p->masterData.gsn = GSN_DROP_TRIG_REQ; - ptr.p->masterData.sendCounter = 0; - + ptr.p->slaveData.gsn = GSN_DROP_TRIG_REQ; + ptr.p->slaveData.trigSendCounter = 0; req->setConnectionPtr(ptr.i); req->setUserRef(reference()); // Sending to myself req->setRequestType(DropTrigReq::RT_USER); req->setIndexId(RNIL); - req->setTriggerInfo(0); // not used on DROP via DICT + req->setTriggerInfo(0); // not used on DROP + req->setTriggerType(TriggerType::SUBSCRIPTION); + req->setTriggerActionTime(TriggerActionTime::TA_DETACHED); - char triggerName[MAX_TAB_NAME_SIZE]; - Uint32 nameBuffer[2 + ((MAX_TAB_NAME_SIZE + 3) >> 2)]; // SP string - LinearWriter w(nameBuffer, sizeof(nameBuffer) >> 2); - LinearSectionPtr lsPtr[3]; - - ptr.p->masterData.dropTrig.tableId = tabPtr.p->tableId; + ptr.p->slaveData.dropTrig.tableId = tabPtr.p->tableId; req->setTableId(tabPtr.p->tableId); for (int i = 0; i < 3; i++) { Uint32 id = tabPtr.p->triggerIds[i]; req->setTriggerId(id); - if (id != ILLEGAL_TRIGGER_ID) { - sendSignal(DBDICT_REF, GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB); - } else { - BaseString::snprintf(triggerName, sizeof(triggerName), triggerNameFormat[i], - ptr.p->backupId, tabPtr.p->tableId); - w.reset(); - w.add(CreateTrigReq::TriggerNameKey, triggerName); - lsPtr[0].p = nameBuffer; - lsPtr[0].sz = w.getWordsUsed(); - sendSignal(DBDICT_REF, GSN_DROP_TRIG_REQ, - signal, DropTrigReq::SignalLength, JBB, lsPtr, 1); - } - ptr.p->masterData.sendCounter ++; + req->setTriggerEvent(triggerEventValues[i]); + sendSignal(DBTUP_REF, GSN_DROP_TRIG_REQ, + signal, DropTrigReq::SignalLength, JBB); + ptr.p->slaveData.trigSendCounter ++; } } @@ -2005,11 +1930,13 @@ Backup::execDROP_TRIG_REF(Signal* signal) DropTrigRef* ref = (DropTrigRef*)signal->getDataPtr(); const Uint32 ptrI = ref->getConnectionPtr(); - + BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - - //ndbrequire(ref->getErrorCode() == DropTrigRef::NoSuchTrigger); + + ndbout << "ERROR DROPPING TRIGGER: " << ref->getConf()->getTriggerId(); + ndbout << " Err: " << (Uint32)ref->getErrorCode() << endl << endl; + dropTrigReply(signal, ptr); } @@ -2020,29 +1947,29 @@ Backup::execDROP_TRIG_CONF(Signal* signal) DropTrigConf* conf = (DropTrigConf*)signal->getDataPtr(); const Uint32 ptrI = conf->getConnectionPtr(); - + const Uint32 triggerId= conf->getTriggerId(); + BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - + dropTrigReply(signal, ptr); } void Backup::dropTrigReply(Signal* signal, BackupRecordPtr ptr) { - CRASH_INSERTION((10012)); - ndbrequire(ptr.p->masterRef == reference()); - ndbrequire(ptr.p->masterData.gsn == GSN_DROP_TRIG_REQ); - ndbrequire(ptr.p->masterData.sendCounter.done() == false); - - ptr.p->masterData.sendCounter--; - if(ptr.p->masterData.sendCounter.done() == false){ + ndbrequire(ptr.p->slaveData.gsn == GSN_DROP_TRIG_REQ); + ndbrequire(ptr.p->slaveData.trigSendCounter.done() == false); + + // move from .masterData to .slaveData + ptr.p->slaveData.trigSendCounter--; + if(ptr.p->slaveData.trigSendCounter.done() == false){ jam(); return; }//if - + sendDropTrig(signal, ptr); // recursive next } @@ -2165,6 +2092,9 @@ Backup::masterAbort(Signal* signal, BackupRecordPtr ptr) #ifdef DEBUG_ABORT ndbout_c("************ masterAbort"); #endif + + ndbassert(ptr.p->masterRef == reference()); + if(ptr.p->masterData.errorCode != 0) { jam(); @@ -2208,13 +2138,13 @@ Backup::masterAbort(Signal* signal, BackupRecordPtr ptr) case GSN_BACKUP_FRAGMENT_REQ: jam(); ptr.p->stopGCP= ptr.p->startGCP + 1; - sendDropTrig(signal, ptr); // dropping due to error + sendStopBackup(signal, ptr); // dropping due to error return; case GSN_UTIL_SEQUENCE_REQ: case GSN_UTIL_LOCK_REQ: - case GSN_DROP_TRIG_REQ: ndbrequire(false); return; + case GSN_DROP_TRIG_REQ: case GSN_STOP_BACKUP_REQ: return; } @@ -2329,6 +2259,7 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) ptr.p->m_gsn = GSN_DEFINE_BACKUP_REQ; ptr.p->slaveState.forceState(INITIAL); ptr.p->slaveState.setState(DEFINING); + ptr.p->slaveData.dropTrig.tableId = RNIL; ptr.p->errorCode = 0; ptr.p->clientRef = req->clientRef; ptr.p->clientData = req->clientData; @@ -2345,14 +2276,14 @@ Backup::execDEFINE_BACKUP_REQ(Signal* signal) ptr.p->backupKey[0] = req->backupKey[0]; ptr.p->backupKey[1] = req->backupKey[1]; ptr.p->backupDataLen = req->backupDataLen; - ptr.p->masterData.dropTrig.tableId = RNIL; - ptr.p->masterData.alterTrig.tableId = RNIL; ptr.p->masterData.errorCode = 0; ptr.p->noOfBytes = 0; ptr.p->noOfRecords = 0; ptr.p->noOfLogBytes = 0; ptr.p->noOfLogRecords = 0; ptr.p->currGCP = 0; + ptr.p->startGCP = 0; + ptr.p->stopGCP = 0; /** * Allocate files @@ -3261,63 +3192,22 @@ Backup::execSTART_BACKUP_REQ(Signal* signal) jamEntry(); CRASH_INSERTION((10015)); - + StartBackupReq* req = (StartBackupReq*)signal->getDataPtr(); const Uint32 ptrI = req->backupPtr; - //const Uint32 backupId = req->backupId; - const Uint32 signalNo = req->signalNo; - + BackupRecordPtr ptr; c_backupPool.getPtr(ptr, ptrI); - + ptr.p->slaveState.setState(STARTED); ptr.p->m_gsn = GSN_START_BACKUP_REQ; - for(Uint32 i = 0; inoOfTableTriggers; i++) { - jam(); - TablePtr tabPtr; - ndbrequire(findTable(ptr, tabPtr, req->tableTriggers[i].tableId)); - for(Uint32 j = 0; j<3; j++) { - jam(); - const Uint32 triggerId = req->tableTriggers[i].triggerIds[j]; - tabPtr.p->triggerIds[j] = triggerId; - - TriggerPtr trigPtr; - if(!ptr.p->triggers.seizeId(trigPtr, triggerId)) { - jam(); - ptr.p->m_gsn = GSN_START_BACKUP_REF; - StartBackupRef* ref = (StartBackupRef*)signal->getDataPtrSend(); - ref->backupPtr = ptr.i; - ref->backupId = ptr.p->backupId; - ref->signalNo = signalNo; - ref->errorCode = StartBackupRef::FailedToAllocateTriggerRecord; - ref->nodeId = getOwnNodeId(); - sendSignal(ptr.p->masterRef, GSN_START_BACKUP_REF, signal, - StartBackupRef::SignalLength, JBB); - return; - }//if - - tabPtr.p->triggerAllocated[j] = true; - trigPtr.p->backupPtr = ptr.i; - trigPtr.p->tableId = tabPtr.p->tableId; - trigPtr.p->tab_ptr_i = tabPtr.i; - trigPtr.p->logEntry = 0; - trigPtr.p->event = j; - trigPtr.p->maxRecordSize = 2048; - trigPtr.p->operation = - &ptr.p->files.getPtr(ptr.p->logFilePtr)->operation; - trigPtr.p->operation->noOfBytes = 0; - trigPtr.p->operation->noOfRecords = 0; - trigPtr.p->errorCode = 0; - }//for - }//for - /** * Start file threads... */ BackupFilePtr filePtr; - for(ptr.p->files.first(filePtr); - filePtr.i!=RNIL; + for(ptr.p->files.first(filePtr); + filePtr.i!=RNIL; ptr.p->files.next(filePtr)){ jam(); if(filePtr.p->fileRunning == 0) { @@ -3328,14 +3218,13 @@ Backup::execSTART_BACKUP_REQ(Signal* signal) sendSignalWithDelay(BACKUP_REF, GSN_CONTINUEB, signal, 100, 2); }//if }//for - - ptr.p->m_gsn = GSN_START_BACKUP_CONF; - StartBackupConf* conf = (StartBackupConf*)signal->getDataPtrSend(); - conf->backupPtr = ptr.i; - conf->backupId = ptr.p->backupId; - conf->signalNo = signalNo; - sendSignal(ptr.p->masterRef, GSN_START_BACKUP_CONF, signal, - StartBackupConf::SignalLength, JBB); + + /** + * Tell DBTUP to create triggers + */ + TablePtr tabPtr; + ndbrequire(ptr.p->tables.first(tabPtr)); + sendCreateTrig(signal, ptr, tabPtr); } /***************************************************************************** @@ -3887,7 +3776,7 @@ void Backup::execFSAPPENDCONF(Signal* signal) { jamEntry(); - + CRASH_INSERTION((10018)); //FsConf * conf = (FsConf*)signal->getDataPtr(); @@ -3990,10 +3879,13 @@ Backup::execBACKUP_TRIG_REQ(Signal* signal) Uint32 result; jamEntry(); + c_triggerPool.getPtr(trigPtr, trigger_id); + c_tablePool.getPtr(tabPtr, trigPtr.p->tab_ptr_i); tabPtr.p->fragments.getPtr(fragPtr, frag_id); if (fragPtr.p->node != getOwnNodeId()) { + jam(); result = ZFALSE; } else { @@ -4014,12 +3906,12 @@ Backup::execTRIG_ATTRINFO(Signal* signal) { TriggerPtr trigPtr; c_triggerPool.getPtr(trigPtr, trg->getTriggerId()); ndbrequire(trigPtr.p->event != ILLEGAL_TRIGGER_ID); // Online... - + if(trigPtr.p->errorCode != 0) { jam(); return; }//if - + if(trg->getAttrInfoType() == TrigAttrInfo::BEFORE_VALUES) { jam(); /** @@ -4056,18 +3948,29 @@ Backup::execTRIG_ATTRINFO(Signal* signal) { memcpy(signal->getDataPtrSend(), save, 4*TrigAttrInfo::StaticLength); return; }//if - + logEntry = (BackupFormat::LogFile::LogEntry *)dst; trigPtr.p->logEntry = logEntry; logEntry->Length = 0; logEntry->TableId = htonl(trigPtr.p->tableId); - logEntry->TriggerEvent = htonl(trigPtr.p->event); + + + if(trigPtr.p->event==0) + logEntry->TriggerEvent= htonl(TriggerEvent::TE_INSERT); + else if(trigPtr.p->event==1) + logEntry->TriggerEvent= htonl(TriggerEvent::TE_UPDATE); + else if(trigPtr.p->event==2) + logEntry->TriggerEvent= htonl(TriggerEvent::TE_DELETE); + else { + ndbout << "Bad Event: " << trigPtr.p->event << endl; + ndbrequire(false); + } } else { ndbrequire(logEntry->TableId == htonl(trigPtr.p->tableId)); - ndbrequire(logEntry->TriggerEvent == htonl(trigPtr.p->event)); +// ndbrequire(logEntry->TriggerEvent == htonl(trigPtr.p->event)); }//if - - const Uint32 pos = logEntry->Length; + + const Uint32 pos = logEntry->Length; const Uint32 dataLen = signal->length() - TrigAttrInfo::StaticLength; memcpy(&logEntry->Data[pos], trg->getData(), dataLen << 2); @@ -4082,6 +3985,7 @@ Backup::execFIRE_TRIG_ORD(Signal* signal) const Uint32 gci = trg->getGCI(); const Uint32 trI = trg->getTriggerId(); + const Uint32 fragId = trg->fragId; TriggerPtr trigPtr; c_triggerPool.getPtr(trigPtr, trI); @@ -4095,19 +3999,19 @@ Backup::execFIRE_TRIG_ORD(Signal* signal) ndbrequire(trigPtr.p->logEntry != 0); Uint32 len = trigPtr.p->logEntry->Length; + trigPtr.p->logEntry->FragId = htonl(fragId); BackupRecordPtr ptr; c_backupPool.getPtr(ptr, trigPtr.p->backupPtr); - if(gci != ptr.p->currGCP) + if(gci != ptr.p->currGCP) { jam(); - - trigPtr.p->logEntry->TriggerEvent = htonl(trigPtr.p->event | 0x10000); + trigPtr.p->logEntry->TriggerEvent|= htonl(0x10000); trigPtr.p->logEntry->Data[len] = htonl(gci); - len ++; + len++; ptr.p->currGCP = gci; - }//if - + } + len += (sizeof(BackupFormat::LogFile::LogEntry) >> 2) - 2; trigPtr.p->logEntry->Length = htonl(len); @@ -4163,7 +4067,7 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) * At least one GCP must have passed */ ndbrequire(stopGCP > startGCP); - + /** * Get backup record */ @@ -4172,50 +4076,13 @@ Backup::execSTOP_BACKUP_REQ(Signal* signal) ptr.p->slaveState.setState(STOPPING); ptr.p->m_gsn = GSN_STOP_BACKUP_REQ; + ptr.p->startGCP= startGCP; + ptr.p->stopGCP= stopGCP; /** - * Insert footers + * Destroy the triggers in local DBTUP we created */ - { - BackupFilePtr filePtr; - ptr.p->files.getPtr(filePtr, ptr.p->logFilePtr); - Uint32 * dst; - ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, 1)); - * dst = 0; - filePtr.p->operation.dataBuffer.updateWritePtr(1); - } - - { - BackupFilePtr filePtr; - ptr.p->files.getPtr(filePtr, ptr.p->ctlFilePtr); - - const Uint32 gcpSz = sizeof(BackupFormat::CtlFile::GCPEntry) >> 2; - - Uint32 * dst; - ndbrequire(filePtr.p->operation.dataBuffer.getWritePtr(&dst, gcpSz)); - - BackupFormat::CtlFile::GCPEntry * gcp = - (BackupFormat::CtlFile::GCPEntry*)dst; - - gcp->SectionType = htonl(BackupFormat::GCP_ENTRY); - gcp->SectionLength = htonl(gcpSz); - gcp->StartGCP = htonl(startGCP); - gcp->StopGCP = htonl(stopGCP - 1); - filePtr.p->operation.dataBuffer.updateWritePtr(gcpSz); - } - - { - TablePtr tabPtr; - for(ptr.p->tables.first(tabPtr); tabPtr.i != RNIL; - ptr.p->tables.next(tabPtr)) - { - signal->theData[0] = tabPtr.p->tableId; - signal->theData[1] = 0; // unlock - EXECUTE_DIRECT(DBDICT, GSN_BACKUP_FRAGMENT_REQ, signal, 2); - } - } - - closeFiles(signal, ptr); + sendDropTrig(signal, ptr); } void @@ -4481,19 +4348,10 @@ Backup::execABORT_BACKUP_ORD(Signal* signal) ptr.p->masterRef = reference(); ptr.p->nodes.clear(); ptr.p->nodes.set(getOwnNodeId()); - - if(ref == reference()) - { - ptr.p->stopGCP= ptr.p->startGCP + 1; - sendDropTrig(signal, ptr); - } - else - { - ptr.p->masterData.gsn = GSN_STOP_BACKUP_REQ; - ptr.p->masterData.sendCounter.clearWaitingFor(); - ptr.p->masterData.sendCounter.setWaitingFor(getOwnNodeId()); - closeFiles(signal, ptr); - } + + + ptr.p->stopGCP= ptr.p->startGCP + 1; + sendStopBackup(signal, ptr); } diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.hpp b/storage/ndb/src/kernel/blocks/backup/Backup.hpp index 2144ddeac11..64a34ca5147 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.hpp +++ b/storage/ndb/src/kernel/blocks/backup/Backup.hpp @@ -96,8 +96,6 @@ protected: void execGET_TABINFO_CONF(Signal* signal); void execCREATE_TRIG_REF(Signal* signal); void execCREATE_TRIG_CONF(Signal* signal); - void execALTER_TRIG_REF(Signal* signal); - void execALTER_TRIG_CONF(Signal* signal); void execDROP_TRIG_REF(Signal* signal); void execDROP_TRIG_CONF(Signal* signal); @@ -426,6 +424,7 @@ public: Uint32 clientRef; Uint32 clientData; Uint32 flags; + Uint32 signalNo; Uint32 backupId; Uint32 backupKey[2]; Uint32 masterRef; @@ -451,7 +450,18 @@ public: Uint32 backupDataLen; // Used for (un)packing backup request Array pages; // Used for (un)packing backup request SimpleProperties props;// Used for (un)packing backup request - + + struct SlaveData { + SignalCounter trigSendCounter; + Uint32 gsn; + struct { + Uint32 tableId; + } createTrig; + struct { + Uint32 tableId; + } dropTrig; + } slaveData; + struct MasterData { MasterData(Backup & b) { @@ -462,15 +472,6 @@ public: Uint32 gsn; SignalCounter sendCounter; Uint32 errorCode; - struct { - Uint32 tableId; - } createTrig; - struct { - Uint32 tableId; - } dropTrig; - struct { - Uint32 tableId; - } alterTrig; union { struct { Uint32 startBackup; @@ -563,7 +564,7 @@ public: void defineBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId); void createTrigReply(Signal* signal, BackupRecordPtr ptr); void alterTrigReply(Signal* signal, BackupRecordPtr ptr); - void startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32, Uint32); + void startBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32); void stopBackupReply(Signal* signal, BackupRecordPtr ptr, Uint32 nodeId); void defineBackupRef(Signal*, BackupRecordPtr, Uint32 errCode = 0); diff --git a/storage/ndb/src/kernel/blocks/backup/Backup.txt b/storage/ndb/src/kernel/blocks/backup/Backup.txt index acc9efff02d..38b93f2d3c4 100644 --- a/storage/ndb/src/kernel/blocks/backup/Backup.txt +++ b/storage/ndb/src/kernel/blocks/backup/Backup.txt @@ -25,15 +25,12 @@ BACKUP_REQ <------------------------------- BACKUP_CONF <---------------- - CREATE_TRIG - --------------> (If master crashes here -> rouge triggers/memory leak) - <-------------- START_BACKUP ------------------------------> + CREATE_TRIG + --------------> + <-------------- <------------------------------ - ALTER_TRIG - --------------> - <-------------- WAIT_GCP --------------> <-------------- @@ -46,11 +43,11 @@ BACKUP_CONF WAIT_GCP --------------> <-------------- - DROP_TRIG - --------------> - <-------------- STOP_BACKUP ------------------------------> + DROP_TRIG + --------------> + <-------------- <------------------------------ BACKUP_COMPLETE_REP <---------------- diff --git a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp index 76c1f1aedad..92680a5b6c9 100644 --- a/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp +++ b/storage/ndb/src/kernel/blocks/backup/BackupFormat.hpp @@ -142,7 +142,8 @@ struct BackupFormat { Uint32 Length; Uint32 TableId; // If TriggerEvent & 0x10000 == true then GCI is right after data - Uint32 TriggerEvent; + Uint32 TriggerEvent; + Uint32 FragId; Uint32 Data[1]; // Len = Length - 2 }; }; diff --git a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp index d99ff7950c4..a10b3d6e11a 100644 --- a/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp +++ b/storage/ndb/src/kernel/blocks/backup/BackupInit.cpp @@ -62,9 +62,6 @@ Backup::Backup(const Configuration & conf) : addRecSignal(GSN_CREATE_TRIG_REF, &Backup::execCREATE_TRIG_REF); addRecSignal(GSN_CREATE_TRIG_CONF, &Backup::execCREATE_TRIG_CONF); - addRecSignal(GSN_ALTER_TRIG_REF, &Backup::execALTER_TRIG_REF); - addRecSignal(GSN_ALTER_TRIG_CONF, &Backup::execALTER_TRIG_CONF); - addRecSignal(GSN_DROP_TRIG_REF, &Backup::execDROP_TRIG_REF); addRecSignal(GSN_DROP_TRIG_CONF, &Backup::execDROP_TRIG_CONF); diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp index 2ebf06a0219..2d28e8c7a3b 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp @@ -408,6 +408,9 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w, union { char tableName[MAX_TAB_NAME_SIZE]; char frmData[MAX_FRM_DATA_SIZE]; + char rangeData[16*MAX_NDB_PARTITIONS]; + char ngData[2*MAX_NDB_PARTITIONS]; + char tsData[2*2*MAX_NDB_PARTITIONS]; char defaultValue[MAX_ATTR_DEFAULT_VALUE_SIZE]; char attributeName[MAX_ATTR_NAME_SIZE]; }; @@ -434,13 +437,16 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w, w.add(DictTabInfo::TableKValue, tablePtr.p->kValue); w.add(DictTabInfo::FragmentTypeVal, tablePtr.p->fragmentType); w.add(DictTabInfo::TableTypeVal, tablePtr.p->tableType); - - if(!signal) - { - w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount); - } - else + w.add(DictTabInfo::MaxRowsLow, tablePtr.p->maxRowsLow); + w.add(DictTabInfo::MaxRowsHigh, tablePtr.p->maxRowsHigh); + w.add(DictTabInfo::DefaultNoPartFlag, tablePtr.p->defaultNoPartFlag); + w.add(DictTabInfo::LinearHashFlag, tablePtr.p->linearHashFlag); + w.add(DictTabInfo::FragmentCount, tablePtr.p->fragmentCount); + + if(signal) { + /* Denna branch körs vid GET_TABINFOREQ */ + Uint32 * theData = signal->getDataPtrSend(); CreateFragmentationReq * const req = (CreateFragmentationReq*)theData; req->senderRef = 0; @@ -450,18 +456,16 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w, req->primaryTableId = tablePtr.i; EXECUTE_DIRECT(DBDIH, GSN_CREATE_FRAGMENTATION_REQ, signal, CreateFragmentationReq::SignalLength); - if(signal->theData[0] == 0) - { - Uint16 *data = (Uint16*)&signal->theData[25]; - Uint32 count = 2 + data[0] * data[1]; - w.add(DictTabInfo::FragmentDataLen, 2*count); - w.add(DictTabInfo::FragmentData, data, 2*count); - ndbrequire(count > 0); - } - else - { - ndbrequire(false); - } + ndbrequire(signal->theData[0] == 0); + Uint16 *data = (Uint16*)&signal->theData[25]; + Uint32 count = 2 + data[0] * data[1]; + w.add(DictTabInfo::ReplicaDataLen, 2*count); + w.add(DictTabInfo::ReplicaData, data, 2*count); + } + else + { + /* Denna del körs vid CREATE_TABLEREQ, ALTER_TABLEREQ */ + ; } if (tablePtr.p->primaryTableId != RNIL){ @@ -480,10 +484,27 @@ Dbdict::packTableIntoPages(SimpleProperties::Writer & w, ConstRope frm(c_rope_pool, tablePtr.p->frmData); frm.copy(frmData); - w.add(DictTabInfo::FrmLen, frm.size()); w.add(DictTabInfo::FrmData, frmData, frm.size()); + { + jam(); + ConstRope ts(c_rope_pool, tablePtr.p->tsData); + ts.copy(tsData); + w.add(DictTabInfo::TablespaceDataLen, ts.size()); + w.add(DictTabInfo::TablespaceData, tsData, ts.size()); + + ConstRope ng(c_rope_pool, tablePtr.p->ngData); + ng.copy(ngData); + w.add(DictTabInfo::FragmentDataLen, ng.size()); + w.add(DictTabInfo::FragmentData, ngData, ng.size()); + + ConstRope range(c_rope_pool, tablePtr.p->rangeData); + range.copy(rangeData); + w.add(DictTabInfo::RangeListDataLen, range.size()); + w.add(DictTabInfo::RangeListData, rangeData, range.size()); + } + if(tablePtr.p->m_tablespace_id != RNIL) { w.add(DictTabInfo::TablespaceId, tablePtr.p->m_tablespace_id); @@ -1797,8 +1818,6 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) tablePtr.p->gciTableCreated = 0; tablePtr.p->noOfAttributes = ZNIL; tablePtr.p->noOfNullAttr = 0; - tablePtr.p->ngLen = 0; - memset(tablePtr.p->ngData, 0, sizeof(tablePtr.p->ngData)); tablePtr.p->fragmentCount = 0; /* tablePtr.p->lh3PageIndexBits = 0; @@ -1811,6 +1830,10 @@ void Dbdict::initialiseTableRecord(TableRecordPtr tablePtr) tablePtr.p->minLoadFactor = 70; tablePtr.p->noOfPrimkey = 1; tablePtr.p->tupKeyLength = 1; + tablePtr.p->maxRowsLow = 0; + tablePtr.p->maxRowsHigh = 0; + tablePtr.p->defaultNoPartFlag = true; + tablePtr.p->linearHashFlag = true; tablePtr.p->m_bits = 0; tablePtr.p->tableType = DictTabInfo::UserTable; tablePtr.p->primaryTableId = RNIL; @@ -2033,8 +2056,30 @@ void Dbdict::execREAD_CONFIG_REQ(Signal* signal) c_schemaOp.setSize(8); //c_opDropObj.setSize(8); c_Trans.setSize(8); - c_rope_pool.setSize(100000/28); + Uint32 rps = 0; + rps += tablerecSize * (MAX_TAB_NAME_SIZE + MAX_FRM_DATA_SIZE); + rps += attributesize * (MAX_ATTR_NAME_SIZE + MAX_ATTR_DEFAULT_VALUE_SIZE); + rps += c_maxNoOfTriggers * MAX_TAB_NAME_SIZE; + rps += (10 + 10) * MAX_TAB_NAME_SIZE; + + Uint32 sm = 5; + ndb_mgm_get_int_parameter(p, CFG_DB_STRING_MEMORY, &sm); + if (sm == 0) + sm = 5; + + Uint32 sb = 0; + if (sm < 100) + { + sb = (rps * sm) / 100; + } + else + { + sb = sm; + } + + c_rope_pool.setSize(sb/28 + 100); + // Initialize BAT for interface to file system NewVARIABLE* bat = allocateBat(2); bat[0].WA = &c_schemaPageRecordArray.getPtr(0)->word[0]; @@ -3608,15 +3653,15 @@ Dbdict::execCREATE_TABLE_REQ(Signal* signal){ Uint32 key = c_opRecordSequence + 1; Uint32 *theData = signal->getDataPtrSend(), i; - Uint16 *node_group= (Uint16*)&signal->theData[25]; + Uint16 *frag_data= (Uint16*)&signal->theData[25]; CreateFragmentationReq * const req = (CreateFragmentationReq*)theData; req->senderRef = reference(); req->senderData = key; req->primaryTableId = parseRecord.tablePtr.p->primaryTableId; - req->noOfFragments = parseRecord.tablePtr.p->ngLen >> 1; + req->noOfFragments = parseRecord.tablePtr.p->fragmentCount; req->fragmentationType = parseRecord.tablePtr.p->fragmentType; - for (i = 0; i < req->noOfFragments; i++) - node_group[i] = parseRecord.tablePtr.p->ngData[i]; + MEMCOPY_NO_WORDS(frag_data, c_fragData, c_fragDataLen); + if (parseRecord.tablePtr.p->isOrderedIndex()) { jam(); // ordered index has same fragmentation as the table @@ -4520,6 +4565,9 @@ int Dbdict::handleAlterTab(AlterTabReq * req, ndbrequire(org.assign(tmp, src.size())); } +/* + TODO RONM: Lite ny kod för FragmentData och RangeOrListData +*/ if (supportedAlteration) { // Set new schema version @@ -4727,11 +4775,12 @@ Dbdict::execCREATE_FRAGMENTATION_CONF(Signal* signal){ packTableIntoPages(w, tabPtr); SegmentedSectionPtr spDataPtr; + Ptr tmpTsPtr; w.getPtr(spDataPtr); signal->setSection(spDataPtr, CreateTabReq::DICT_TAB_INFO); signal->setSection(fragDataPtr, CreateTabReq::FRAGMENTATION); - + NodeReceiverGroup rg(DBDICT, c_aliveNodes); SafeCounter tmp(c_counterMgr, createTabPtr.p->m_coordinatorData.m_counter); createTabPtr.p->m_coordinatorData.m_gsn = GSN_CREATE_TAB_REQ; @@ -5109,6 +5158,9 @@ Dbdict::createTab_dih(Signal* signal, req->schemaVersion = tabPtr.p->tableVersion; req->primaryTableId = tabPtr.p->primaryTableId; +/* + Behöver fiska upp fragDataPtr från table object istället +*/ if(!fragDataPtr.isNull()){ signal->setSection(fragDataPtr, DiAddTabReq::FRAGMENTATION); } @@ -5203,6 +5255,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { Uint32 fragCount = req->totalFragments; Uint32 requestInfo = req->requestInfo; Uint32 startGci = req->startGci; + Uint32 tablespace_id= req->tablespaceId; Uint32 logPart = req->logPartId; ndbrequire(node == getOwnNodeId()); @@ -5258,6 +5311,7 @@ Dbdict::execADD_FRAGREQ(Signal* signal) { req->tableType = tabPtr.p->tableType; req->primaryTableId = tabPtr.p->primaryTableId; req->tablespace_id= tabPtr.p->m_tablespace_id; + //req->tablespace_id= tablespace_id; req->logPartId = logPart; sendSignal(DBLQH_REF, GSN_LQHFRAGREQ, signal, LqhFragReq::SignalLength, JBB); @@ -5740,8 +5794,8 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, it.first(); SimpleProperties::UnpackStatus status; - DictTabInfo::Table tableDesc; tableDesc.init(); - status = SimpleProperties::unpack(it, &tableDesc, + c_tableDesc.init(); + status = SimpleProperties::unpack(it, &c_tableDesc, DictTabInfo::TableMapping, DictTabInfo::TableMappingSize, true, true); @@ -5767,12 +5821,12 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, // Verify that table name is an allowed table name. // TODO /* ---------------------------------------------------------------- */ - const Uint32 tableNameLength = strlen(tableDesc.TableName) + 1; - const Uint32 name_hash = Rope::hash(tableDesc.TableName, tableNameLength); + const Uint32 tableNameLength = strlen(c_tableDesc.TableName) + 1; + const Uint32 name_hash = Rope::hash(c_tableDesc.TableName, tableNameLength); if(checkExist){ jam(); - tabRequire(get_object(tableDesc.TableName, tableNameLength) == 0, + tabRequire(get_object(c_tableDesc.TableName, tableNameLength) == 0, CreateTableRef::TableAlreadyExist); } @@ -5783,7 +5837,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, } case DictTabInfo::AlterTableFromAPI:{ jam(); - tablePtr.i = getFreeTableRecord(tableDesc.PrimaryTableId); + tablePtr.i = getFreeTableRecord(c_tableDesc.PrimaryTableId); /* ---------------------------------------------------------------- */ // Check if no free tables existed. /* ---------------------------------------------------------------- */ @@ -5799,7 +5853,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, /* ---------------------------------------------------------------- */ // Get table id and check that table doesn't already exist /* ---------------------------------------------------------------- */ - tablePtr.i = tableDesc.TableId; + tablePtr.i = c_tableDesc.TableId; if (parseP->requestType == DictTabInfo::ReadTableFromDiskSR) { ndbrequire(tablePtr.i == c_restartRecord.activeTable); @@ -5821,7 +5875,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, /* ---------------------------------------------------------------- */ // Set table version /* ---------------------------------------------------------------- */ - Uint32 tableVersion = tableDesc.TableVersion; + Uint32 tableVersion = c_tableDesc.TableVersion; tablePtr.p->tableVersion = tableVersion; break; @@ -5834,7 +5888,8 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, { Rope name(c_rope_pool, tablePtr.p->tableName); - ndbrequire(name.assign(tableDesc.TableName, tableNameLength, name_hash)); + tabRequire(name.assign(c_tableDesc.TableName, tableNameLength, name_hash), + CreateTableRef::OutOfStringBuffer); } Ptr obj_ptr; @@ -5842,7 +5897,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, jam(); ndbrequire(c_obj_hash.seize(obj_ptr)); obj_ptr.p->m_id = tablePtr.i; - obj_ptr.p->m_type = tableDesc.TableType; + obj_ptr.p->m_type = c_tableDesc.TableType; obj_ptr.p->m_name = tablePtr.p->tableName; obj_ptr.p->m_ref_count = 0; c_obj_hash.add(obj_ptr); @@ -5850,42 +5905,59 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, #ifdef VM_TRACE ndbout_c("Dbdict: name=%s,id=%u,obj_ptr_i=%d", - tableDesc.TableName, tablePtr.i, tablePtr.p->m_obj_ptr_i); + c_tableDesc.TableName, tablePtr.i, tablePtr.p->m_obj_ptr_i); #endif } - tablePtr.p->noOfAttributes = tableDesc.NoOfAttributes; + tablePtr.p->noOfAttributes = c_tableDesc.NoOfAttributes; tablePtr.p->m_bits |= - (tableDesc.TableLoggedFlag ? TableRecord::TR_Logged : 0); + (c_tableDesc.TableLoggedFlag ? TableRecord::TR_Logged : 0); tablePtr.p->m_bits |= - (tableDesc.RowChecksumFlag ? TableRecord::TR_RowChecksum : 0); + (c_tableDesc.RowChecksumFlag ? TableRecord::TR_RowChecksum : 0); tablePtr.p->m_bits |= - (tableDesc.RowGCIFlag ? TableRecord::TR_RowGCI : 0); - tablePtr.p->minLoadFactor = tableDesc.MinLoadFactor; - tablePtr.p->maxLoadFactor = tableDesc.MaxLoadFactor; - tablePtr.p->fragmentType = (DictTabInfo::FragmentType)tableDesc.FragmentType; - tablePtr.p->tableType = (DictTabInfo::TableType)tableDesc.TableType; - tablePtr.p->kValue = tableDesc.TableKValue; - tablePtr.p->fragmentCount = tableDesc.FragmentCount; - tablePtr.p->m_tablespace_id = tableDesc.TablespaceId; + (c_tableDesc.RowGCIFlag ? TableRecord::TR_RowGCI : 0); + tablePtr.p->minLoadFactor = c_tableDesc.MinLoadFactor; + tablePtr.p->maxLoadFactor = c_tableDesc.MaxLoadFactor; + tablePtr.p->fragmentType = (DictTabInfo::FragmentType)c_tableDesc.FragmentType; + tablePtr.p->tableType = (DictTabInfo::TableType)c_tableDesc.TableType; + tablePtr.p->kValue = c_tableDesc.TableKValue; + tablePtr.p->fragmentCount = c_tableDesc.FragmentCount; + tablePtr.p->m_tablespace_id = c_tableDesc.TablespaceId; + tablePtr.p->maxRowsLow = c_tableDesc.MaxRowsLow; + tablePtr.p->maxRowsHigh = c_tableDesc.MaxRowsHigh; + tablePtr.p->defaultNoPartFlag = c_tableDesc.DefaultNoPartFlag; + tablePtr.p->linearHashFlag = c_tableDesc.LinearHashFlag; { Rope frm(c_rope_pool, tablePtr.p->frmData); - ndbrequire(frm.assign(tableDesc.FrmData, tableDesc.FrmLen)); + tabRequire(frm.assign(c_tableDesc.FrmData, c_tableDesc.FrmLen), + CreateTableRef::OutOfStringBuffer); + Rope range(c_rope_pool, tablePtr.p->rangeData); + tabRequire(range.assign(c_tableDesc.RangeListData, + c_tableDesc.RangeListDataLen), + CreateTableRef::OutOfStringBuffer); + Rope fd(c_rope_pool, tablePtr.p->ngData); + tabRequire(fd.assign((const char*)c_tableDesc.FragmentData, + c_tableDesc.FragmentDataLen), + CreateTableRef::OutOfStringBuffer); + Rope ts(c_rope_pool, tablePtr.p->tsData); + tabRequire(ts.assign((const char*)c_tableDesc.TablespaceData, + c_tableDesc.TablespaceDataLen), + CreateTableRef::OutOfStringBuffer); } - tablePtr.p->ngLen = tableDesc.FragmentDataLen; - memcpy(tablePtr.p->ngData, tableDesc.FragmentData, - tableDesc.FragmentDataLen); + c_fragDataLen = c_tableDesc.FragmentDataLen; + memcpy(c_fragData, c_tableDesc.FragmentData, + c_tableDesc.FragmentDataLen); - if(tableDesc.PrimaryTableId != RNIL) { + if(c_tableDesc.PrimaryTableId != RNIL) { - tablePtr.p->primaryTableId = tableDesc.PrimaryTableId; - tablePtr.p->indexState = (TableRecord::IndexState)tableDesc.IndexState; - tablePtr.p->insertTriggerId = tableDesc.InsertTriggerId; - tablePtr.p->updateTriggerId = tableDesc.UpdateTriggerId; - tablePtr.p->deleteTriggerId = tableDesc.DeleteTriggerId; - tablePtr.p->customTriggerId = tableDesc.CustomTriggerId; + tablePtr.p->primaryTableId = c_tableDesc.PrimaryTableId; + tablePtr.p->indexState = (TableRecord::IndexState)c_tableDesc.IndexState; + tablePtr.p->insertTriggerId = c_tableDesc.InsertTriggerId; + tablePtr.p->updateTriggerId = c_tableDesc.UpdateTriggerId; + tablePtr.p->deleteTriggerId = c_tableDesc.DeleteTriggerId; + tablePtr.p->customTriggerId = c_tableDesc.CustomTriggerId; } else { tablePtr.p->primaryTableId = RNIL; tablePtr.p->indexState = TableRecord::IS_UNDEFINED; @@ -5897,7 +5969,7 @@ void Dbdict::handleTabInfoInit(SimpleProperties::Reader & it, tablePtr.p->buildTriggerId = RNIL; tablePtr.p->indexLocal = 0; - handleTabInfo(it, parseP, tableDesc); + handleTabInfo(it, parseP, c_tableDesc); if(parseP->errorCode != 0) { @@ -5986,7 +6058,13 @@ void Dbdict::handleTabInfo(SimpleProperties::Reader & it, */ { Rope name(c_rope_pool, attrPtr.p->attributeName); - name.assign(attrDesc.AttributeName, len, name_hash); + if (!name.assign(attrDesc.AttributeName, len, name_hash)) + { + jam(); + parseP->errorCode = CreateTableRef::OutOfStringBuffer; + parseP->errorLine = __LINE__; + return; + } } attrPtr.p->attributeId = i; //attrPtr.p->attributeId = attrDesc.AttributeId; @@ -7460,10 +7538,9 @@ Dbdict::execCREATE_INDX_REQ(Signal* signal) // save name and index table properties signal->getSection(ssPtr, CreateIndxReq::INDEX_NAME_SECTION); SimplePropertiesSectionReader r1(ssPtr, getSectionSegmentPool()); - DictTabInfo::Table tableDesc; - tableDesc.init(); + c_tableDesc.init(); SimpleProperties::UnpackStatus status = SimpleProperties::unpack( - r1, &tableDesc, + r1, &c_tableDesc, DictTabInfo::TableMapping, DictTabInfo::TableMappingSize, true, true); if (status != SimpleProperties::Eof) { @@ -7473,8 +7550,8 @@ Dbdict::execCREATE_INDX_REQ(Signal* signal) createIndex_sendReply(signal, opPtr, opPtr.p->m_isMaster); return; } - memcpy(opPtr.p->m_indexName, tableDesc.TableName, MAX_TAB_NAME_SIZE); - opPtr.p->m_storedIndex = tableDesc.TableLoggedFlag; + memcpy(opPtr.p->m_indexName, c_tableDesc.TableName, MAX_TAB_NAME_SIZE); + opPtr.p->m_storedIndex = c_tableDesc.TableLoggedFlag; releaseSections(signal); // master expects to hear from all if (opPtr.p->m_isMaster) @@ -13097,7 +13174,7 @@ Dbdict::getTableKeyList(TableRecordPtr tablePtr, list.id[list.sz++] = attrPtr.p->attributeId; } } - ndbrequire(list.sz == tablePtr.p->noOfPrimkey + 1); + ndbrequire(list.sz == (uint)(tablePtr.p->noOfPrimkey + 1)); ndbrequire(list.sz <= MAX_ATTRIBUTES_IN_INDEX + 1); } @@ -14719,7 +14796,7 @@ Dbdict::create_fg_prepare_start(Signal* signal, SchemaOp* op){ { Rope name(c_rope_pool, obj_ptr.p->m_name); if(!name.assign(fg.FilegroupName, len, hash)){ - op->m_errorCode = CreateTableRef::TableNameTooLong; + op->m_errorCode = CreateTableRef::OutOfStringBuffer; break; } } @@ -14958,7 +15035,7 @@ Dbdict::create_file_prepare_start(Signal* signal, SchemaOp* op){ { Rope name(c_rope_pool, obj_ptr.p->m_name); if(!name.assign(f.FileName, len, hash)){ - op->m_errorCode = CreateTableRef::TableNameTooLong; + op->m_errorCode = CreateTableRef::OutOfStringBuffer; break; } } diff --git a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp index 7ad5e0d8b49..293632bb8b8 100644 --- a/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp +++ b/storage/ndb/src/kernel/blocks/dbdict/Dbdict.hpp @@ -203,6 +203,8 @@ public: */ struct TableRecord { TableRecord(){} + Uint32 maxRowsLow; + Uint32 maxRowsHigh; /* Table id (array index in DICT and other blocks) */ Uint32 tableId; Uint32 m_obj_ptr_i; @@ -268,6 +270,16 @@ public: */ Uint8 maxLoadFactor; + /* + Flag to indicate default number of partitions + */ + bool defaultNoPartFlag; + + /* + Flag to indicate using linear hash function + */ + bool linearHashFlag; + /* * Used when shrinking to decide when to merge buckets. Hysteresis * is thus possible. Should be smaller but not much smaller than @@ -353,10 +365,9 @@ public: /** frm data for this table */ RopeHandle frmData; - /** Node Group and Tablespace id for this table */ - /** TODO Could preferrably be made dynamic size */ - Uint32 ngLen; - Uint16 ngData[MAX_NDB_PARTITIONS]; + RopeHandle tsData; + RopeHandle ngData; + RopeHandle rangeData; Uint32 fragmentCount; Uint32 m_tablespace_id; @@ -365,6 +376,15 @@ public: typedef Ptr TableRecordPtr; ArrayPool c_tableRecordPool; + /** Node Group and Tablespace id+version + range or list data. + * This is only stored temporarily in DBDICT during an ongoing + * change. + * TODO RONM: Look into improvements of this + */ + Uint32 c_fragDataLen; + Uint16 c_fragData[MAX_NDB_PARTITIONS]; + Uint32 c_tsIdData[2*MAX_NDB_PARTITIONS]; + /** * Triggers. This is volatile data not saved on disk. Setting a * trigger online creates the trigger in TC (if index) and LQH-TUP. @@ -504,6 +524,8 @@ public: CArray c_schemaPageRecordArray; + DictTabInfo::Table c_tableDesc; + /** * A page for create index table signal. */ diff --git a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp index 2938d1197aa..de9524d1aee 100644 --- a/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp +++ b/storage/ndb/src/kernel/blocks/dbdih/DbdihMain.cpp @@ -6398,7 +6398,6 @@ void Dbdih::execDIRELEASEREQ(Signal* signal) *************************************** */ -#define UNDEF_NODEGROUP 65535 static inline void inc_node_or_group(Uint32 &node, Uint32 max_node) { Uint32 next = node + 1; diff --git a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp index be9f988cb7d..6ae79dd73e7 100644 --- a/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp +++ b/storage/ndb/src/kernel/blocks/dblqh/DblqhMain.cpp @@ -5785,6 +5785,9 @@ void Dblqh::execCOMPLETE(Signal* signal) errorReport(signal, 1); return; }//if + if (ERROR_INSERTED(5042)) { + ndbrequire(false); + } if (ERROR_INSERTED(5013)) { CLEAR_ERROR_INSERT_VALUE; sendSignalWithDelay(cownref, GSN_COMPLETE, signal, 2000, 3); diff --git a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp index 0a3c4919a28..e49ca17b880 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp +++ b/storage/ndb/src/kernel/blocks/dbtup/Dbtup.hpp @@ -777,6 +777,10 @@ struct TupTriggerData { /** * Trigger id, used by DICT/TRIX to identify the trigger + * + * trigger Ids are unique per block for SUBSCRIPTION triggers. + * This is so that BACKUP can use TUP triggers directly and delete them + * properly. */ Uint32 triggerId; @@ -2012,7 +2016,9 @@ private: bool createTrigger(Tablerec* table, const CreateTrigReq* req); - Uint32 dropTrigger(Tablerec* table, const DropTrigReq* req); + Uint32 dropTrigger(Tablerec* table, + const DropTrigReq* req, + BlockNumber sender); void checkImmediateTriggersAfterInsert(KeyReqStruct *req_struct, diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp index 643863b31a1..1f393b036e3 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupMeta.cpp @@ -534,6 +534,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) CreateFilegroupImplReq rep; if(regTabPtr.p->m_no_of_disk_attributes) { + ljam(); Tablespace_client tsman(0, c_tsman, 0, 0, regFragPtr.p->m_tablespace_id); ndbrequire(tsman.get_tablespace_info(&rep) == 0); @@ -545,11 +546,14 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) if (regTabPtr.p->m_no_of_disk_attributes) { + ljam(); if(!(getNodeState().getSystemRestartInProgress() && getNodeState().startLevel == NodeState::SL_STARTING && getNodeState().starting.startPhase <= 4)) { Callback cb; + ljam(); + cb.m_callbackData= fragOperPtr.i; cb.m_callbackFunction = safe_cast(&Dbtup::undo_createtable_callback); @@ -562,6 +566,7 @@ void Dbtup::execTUP_ADD_ATTRREQ(Signal* signal) int res= lgman.get_log_buffer(signal, sz, &cb); switch(res){ case 0: + ljam(); signal->theData[0] = 1; return; case -1: diff --git a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp index 080c9cbb589..7f402bfdd43 100644 --- a/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp +++ b/storage/ndb/src/kernel/blocks/dbtup/DbtupTrigger.cpp @@ -186,7 +186,7 @@ Dbtup::execDROP_TRIG_REQ(Signal* signal) ptrCheckGuard(tabPtr, cnoOfTablerec, tablerec); // Drop trigger - Uint32 r = dropTrigger(tabPtr.p, req); + Uint32 r = dropTrigger(tabPtr.p, req, refToBlock(senderRef)); if (r == 0){ // Send conf DropTrigConf* const conf = (DropTrigConf*)signal->getDataPtrSend(); @@ -318,7 +318,7 @@ Dbtup::primaryKey(Tablerec* const regTabPtr, Uint32 attrId) /* */ /* ---------------------------------------------------------------- */ Uint32 -Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req) +Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req, BlockNumber sender) { if (ERROR_INSERTED(4004)) { CLEAR_ERROR_INSERT_VALUE; @@ -330,7 +330,7 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req) TriggerActionTime::Value ttime = req->getTriggerActionTime(); TriggerEvent::Value tevent = req->getTriggerEvent(); - // ndbout_c("Drop TupTrigger %u = %u %u %u %u", triggerId, table, ttype, ttime, tevent); + // ndbout_c("Drop TupTrigger %u = %u %u %u %u by %u", triggerId, table, ttype, ttime, tevent, sender); ArrayList* tlist = findTriggerList(table, ttype, ttime, tevent); ndbrequire(tlist != NULL); @@ -339,6 +339,19 @@ Dbtup::dropTrigger(Tablerec* table, const DropTrigReq* req) for (tlist->first(ptr); !ptr.isNull(); tlist->next(ptr)) { ljam(); if (ptr.p->triggerId == triggerId) { + if(ttype==TriggerType::SUBSCRIPTION && sender != ptr.p->m_receiverBlock) + { + /** + * You can only drop your own triggers for subscription triggers. + * Trigger IDs are private for each block. + * + * SUMA encodes information in the triggerId + * + * Backup doesn't really care about the Ids though. + */ + ljam(); + continue; + } ljam(); tlist->release(ptr.i); return 0; @@ -916,7 +929,8 @@ bool Dbtup::readTriggerInfo(TupTriggerData* const trigPtr, req_struct->m_tuple_ptr= save; ndbrequire(ret != -1); noBeforeWords = ret; - if ((noAfterWords == noBeforeWords) && + if (trigPtr->m_receiverBlock != SUMA && + (noAfterWords == noBeforeWords) && (memcmp(afterBuffer, beforeBuffer, noAfterWords << 2) == 0)) { //-------------------------------------------------------------------- // Although a trigger was fired it was not necessary since the old diff --git a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp index b6ca421064f..d2f4c7d57cc 100644 --- a/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp +++ b/storage/ndb/src/kernel/blocks/ndbcntr/NdbcntrMain.cpp @@ -203,6 +203,20 @@ void Ndbcntr::execSYSTEM_ERROR(Signal* signal) killingNode, data1); break; + case SystemError::CopySubscriptionRef: + BaseString::snprintf(buf, sizeof(buf), + "Node %d killed this node because " + "it could not copy a subscription during node restart. " + "Copy subscription error code: %u.", + killingNode, data1); + break; + case SystemError::CopySubscriberRef: + BaseString::snprintf(buf, sizeof(buf), + "Node %d killed this node because " + "it could not start a subscriber during node restart. " + "Copy subscription error code: %u.", + killingNode, data1); + break; default: BaseString::snprintf(buf, sizeof(buf), "System error %d, " " this node was killed by node %d", diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.cpp b/storage/ndb/src/kernel/blocks/suma/Suma.cpp index 44ff6e97110..5b929c30817 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.cpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.cpp @@ -44,6 +44,7 @@ #include #include #include +#include #include @@ -690,7 +691,7 @@ Suma::execNODE_FAILREP(Signal* signal){ if(failed.get(Restart.nodeId)) { - Restart.nodeId = 0; + Restart.resetRestart(signal); } signal->theData[0] = SumaContinueB::RESEND_BUCKET; @@ -1392,6 +1393,8 @@ Suma::initTable(Signal *signal, Uint32 tableId, TablePtr &tabPtr) DBUG_PRINT("info",("Suma::Table[%u,i=%u]::n_subscribers: %u", tabPtr.p->m_tableId, tabPtr.i, tabPtr.p->n_subscribers)); + tabPtr.p->m_reportAll = false; + tabPtr.p->m_error = 0; tabPtr.p->m_schemaVersion = RNIL; tabPtr.p->m_state = Table::DEFINING; @@ -3756,7 +3759,33 @@ Suma::execSUB_CREATE_REF(Signal* signal) { jamEntry(); DBUG_ENTER("Suma::execSUB_CREATE_REF"); ndbassert(signal->getNoOfSections() == 0); - ndbrequire(false); + SubCreateRef *const ref= (SubCreateRef *)signal->getDataPtr(); + Uint32 error= ref->errorCode; + if (error != 1415) + { + /* + * This will happen if an api node connects during while other node + * is restarting, and in this case the subscription will already + * have been created. + * ToDo: more complete handling of api nodes joining during + * node restart + */ + Uint32 senderRef = signal->getSendersBlockRef(); + BlockReference cntrRef = calcNdbCntrBlockRef(refToNode(senderRef)); + // for some reason we did not manage to create a subscription + // on the starting node + SystemError * const sysErr = (SystemError*)&signal->theData[0]; + sysErr->errorCode = SystemError::CopySubscriptionRef; + sysErr->errorRef = reference(); + sysErr->data1 = error; + sysErr->data2 = 0; + sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal, + SystemError::SignalLength, JBB); + Restart.resetRestart(signal); + DBUG_VOID_RETURN; + } + // SubCreateConf has same signaldata as SubCreateRef + Restart.runSUB_CREATE_CONF(signal); DBUG_VOID_RETURN; } @@ -3785,7 +3814,22 @@ Suma::execSUB_START_REF(Signal* signal) { jamEntry(); DBUG_ENTER("Suma::execSUB_START_REF"); ndbassert(signal->getNoOfSections() == 0); - ndbrequire(false); + SubStartRef *const ref= (SubStartRef *)signal->getDataPtr(); + Uint32 error= ref->errorCode; + { + Uint32 senderRef = signal->getSendersBlockRef(); + BlockReference cntrRef = calcNdbCntrBlockRef(refToNode(senderRef)); + // for some reason we did not manage to start a subscriber + // on the starting node + SystemError * const sysErr = (SystemError*)&signal->theData[0]; + sysErr->errorCode = SystemError::CopySubscriberRef; + sysErr->errorRef = reference(); + sysErr->data1 = error; + sysErr->data2 = 0; + sendSignal(cntrRef, GSN_SYSTEM_ERROR, signal, + SystemError::SignalLength, JBB); + Restart.resetRestart(signal); + } DBUG_VOID_RETURN; } @@ -4080,6 +4124,15 @@ Suma::Restart::completeRestartingNode(Signal* signal, Uint32 sumaRef) //SumaStartMeConf *conf= (SumaStartMeConf*)signal->getDataPtrSend(); suma.sendSignal(sumaRef, GSN_SUMA_START_ME_CONF, signal, SumaStartMeConf::SignalLength, JBB); + resetRestart(signal); + DBUG_VOID_RETURN; +} + +void +Suma::Restart::resetRestart(Signal* signal) +{ + jam(); + DBUG_ENTER("Suma::Restart::resetRestart"); nodeId = 0; DBUG_VOID_RETURN; } diff --git a/storage/ndb/src/kernel/blocks/suma/Suma.hpp b/storage/ndb/src/kernel/blocks/suma/Suma.hpp index 68cd3499fde..61e0ffc1c43 100644 --- a/storage/ndb/src/kernel/blocks/suma/Suma.hpp +++ b/storage/ndb/src/kernel/blocks/suma/Suma.hpp @@ -489,6 +489,7 @@ public: void completeSubscriber(Signal* signal, Uint32 sumaRef); void completeRestartingNode(Signal* signal, Uint32 sumaRef); + void resetRestart(Signal* signal); } Restart; private: diff --git a/storage/ndb/src/mgmsrv/ConfigInfo.cpp b/storage/ndb/src/mgmsrv/ConfigInfo.cpp index b2592c0deea..7fef28c3a92 100644 --- a/storage/ndb/src/mgmsrv/ConfigInfo.cpp +++ b/storage/ndb/src/mgmsrv/ConfigInfo.cpp @@ -1214,6 +1214,18 @@ const ConfigInfo::ParamInfo ConfigInfo::m_ParamInfo[] = { "0", STR_VALUE(MAX_INT_RNIL) }, + { + CFG_DB_STRING_MEMORY, + "StringMemory", + DB_TOKEN, + "Default size of string memory (0 -> 5% of max 1-100 -> %of max, >100 -> actual bytes)", + ConfigInfo::CI_USED, + false, + ConfigInfo::CI_INT, + "0", + "0", + STR_VALUE(MAX_INT_RNIL) }, + /*************************************************************************** * API ***************************************************************************/ diff --git a/storage/ndb/src/ndbapi/NdbBlob.cpp b/storage/ndb/src/ndbapi/NdbBlob.cpp index 8d098a9f493..2f5325bd844 100644 --- a/storage/ndb/src/ndbapi/NdbBlob.cpp +++ b/storage/ndb/src/ndbapi/NdbBlob.cpp @@ -76,11 +76,16 @@ NdbBlob::getBlobTable(NdbTableImpl& bt, const NdbTableImpl* t, const NdbColumnIm BLOB tables use the same fragmentation as the original table but may change the fragment type if it is UserDefined since it must be hash based so that the kernel can handle it on its own. + It also uses the same tablespaces and it never uses any range or + list arrays. */ bt.m_primaryTableId = t->m_id; + bt.m_fd.clear(); + bt.m_ts.clear(); + bt.m_range.clear(); + bt.setFragmentCount(t->getFragmentCount()); bt.m_tablespace_id = t->m_tablespace_id; bt.m_tablespace_version = t->m_tablespace_version; - bt.m_ng.clear(); switch (t->getFragmentType()) { case NdbDictionary::Object::FragAllSmall: diff --git a/storage/ndb/src/ndbapi/NdbDictionary.cpp b/storage/ndb/src/ndbapi/NdbDictionary.cpp index 19069d2a16d..fd11aed14e3 100644 --- a/storage/ndb/src/ndbapi/NdbDictionary.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionary.cpp @@ -413,6 +413,30 @@ NdbDictionary::Table::getNoOfPrimaryKeys() const { return m_impl.m_noOfKeys; } +void +NdbDictionary::Table::setMaxRows(Uint64 maxRows) +{ + m_impl.m_max_rows = maxRows; +} + +Uint64 +NdbDictionary::Table::getMaxRows() const +{ + return m_impl.m_max_rows; +} + +void +NdbDictionary::Table::setDefaultNoPartitionsFlag(Uint32 flag) +{ + m_impl.m_default_no_part_flag = flag;; +} + +Uint32 +NdbDictionary::Table::getDefaultNoPartitionsFlag() const +{ + return m_impl.m_default_no_part_flag; +} + const char* NdbDictionary::Table::getPrimaryKey(int no) const { int count = 0; @@ -435,25 +459,99 @@ NdbDictionary::Table::getFrmLength() const { return m_impl.getFrmLength(); } +void +NdbDictionary::Table::setTablespaceNames(const void *data, Uint32 len) +{ + m_impl.setTablespaceNames(data, len); +} + +const void* +NdbDictionary::Table::getTablespaceNames() +{ + return m_impl.getTablespaceNames(); +} + +Uint32 +NdbDictionary::Table::getTablespaceNamesLen() const +{ + return m_impl.getTablespaceNamesLen(); +} + +void +NdbDictionary::Table::setLinearFlag(Uint32 flag) +{ + m_impl.m_linear_flag = flag; +} + +bool +NdbDictionary::Table::getLinearFlag() const +{ + return m_impl.m_linear_flag; +} + +void +NdbDictionary::Table::setFragmentCount(Uint32 count) +{ + m_impl.setFragmentCount(count); +} + +Uint32 +NdbDictionary::Table::getFragmentCount() const +{ + return m_impl.getFragmentCount(); +} + void NdbDictionary::Table::setFrm(const void* data, Uint32 len){ m_impl.setFrm(data, len); } const void* -NdbDictionary::Table::getNodeGroupIds() const { - return m_impl.m_ng.get_data(); +NdbDictionary::Table::getFragmentData() const { + return m_impl.getFragmentData(); } Uint32 -NdbDictionary::Table::getNodeGroupIdsLength() const { - return m_impl.m_ng.length(); +NdbDictionary::Table::getFragmentDataLen() const { + return m_impl.getFragmentDataLen(); } void -NdbDictionary::Table::setNodeGroupIds(const void* data, Uint32 noWords) +NdbDictionary::Table::setFragmentData(const void* data, Uint32 len) { - m_impl.m_ng.assign(data, 2*noWords); + m_impl.setFragmentData(data, len); +} + +const void* +NdbDictionary::Table::getTablespaceData() const { + return m_impl.getTablespaceData(); +} + +Uint32 +NdbDictionary::Table::getTablespaceDataLen() const { + return m_impl.getTablespaceDataLen(); +} + +void +NdbDictionary::Table::setTablespaceData(const void* data, Uint32 len) +{ + m_impl.setTablespaceData(data, len); +} + +const void* +NdbDictionary::Table::getRangeListData() const { + return m_impl.getRangeListData(); +} + +Uint32 +NdbDictionary::Table::getRangeListDataLen() const { + return m_impl.getRangeListDataLen(); +} + +void +NdbDictionary::Table::setRangeListData(const void* data, Uint32 len) +{ + m_impl.setRangeListData(data, len); } NdbDictionary::Object::Status @@ -1523,7 +1621,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col) out << " AT=MEDIUM_VAR"; break; default: - out << " AT=" << col.getArrayType() << "?"; + out << " AT=" << (int)col.getArrayType() << "?"; break; } @@ -1535,7 +1633,7 @@ operator<<(NdbOut& out, const NdbDictionary::Column& col) out << " ST=DISK"; break; default: - out << " ST=" << col.getStorageType() << "?"; + out << " ST=" << (int)col.getStorageType() << "?"; break; } diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp index 3bb1e5838f0..5716288263d 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.cpp @@ -42,6 +42,7 @@ #include #include #include +#include #define DEBUG_PRINT 0 #define INCOMPATIBLE_VERSION -2 @@ -370,28 +371,48 @@ void NdbTableImpl::init(){ m_changeMask= 0; m_id= RNIL; + m_version = ~0; + m_status = NdbDictionary::Object::Invalid; + m_type = NdbDictionary::Object::TypeUndefined; m_primaryTableId= RNIL; + m_internalName.clear(); + m_externalName.clear(); + m_newExternalName.clear(); + m_mysqlName.clear(); m_frm.clear(); m_newFrm.clear(); - m_fragmentType= NdbDictionary::Object::DistrKeyHash; + m_ts_name.clear(); + m_new_ts_name.clear(); + m_ts.clear(); + m_new_ts.clear(); + m_fd.clear(); + m_new_fd.clear(); + m_range.clear(); + m_new_range.clear(); + m_fragmentType= NdbDictionary::Object::FragAllSmall; m_hashValueMask= 0; m_hashpointerValue= 0; + m_linear_flag= true; + m_primaryTable.clear(); + m_max_rows = 0; + m_default_no_part_flag = 1; m_logging= true; + m_row_gci = true; + m_row_checksum = true; m_kvalue= 6; m_minLoadFactor= 78; m_maxLoadFactor= 80; m_keyLenInWords= 0; m_fragmentCount= 0; - m_dictionary= NULL; m_index= NULL; m_indexType= NdbDictionary::Object::TypeUndefined; m_noOfKeys= 0; m_noOfDistributionKeys= 0; m_noOfBlobs= 0; m_replicaCount= 0; + m_tablespace_name.clear(); m_tablespace_id = ~0; - m_row_gci = true; - m_row_checksum = true; + m_tablespace_version = ~0; } bool @@ -401,63 +422,192 @@ NdbTableImpl::equal(const NdbTableImpl& obj) const if ((m_internalName.c_str() == NULL) || (strcmp(m_internalName.c_str(), "") == 0) || (obj.m_internalName.c_str() == NULL) || - (strcmp(obj.m_internalName.c_str(), "") == 0)) { + (strcmp(obj.m_internalName.c_str(), "") == 0)) + { // Shallow equal - if(strcmp(getName(), obj.getName()) != 0){ + if(strcmp(getName(), obj.getName()) != 0) + { DBUG_PRINT("info",("name %s != %s",getName(),obj.getName())); DBUG_RETURN(false); } - } else + } + else + { // Deep equal - if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0){ + if(strcmp(m_internalName.c_str(), obj.m_internalName.c_str()) != 0) { DBUG_PRINT("info",("m_internalName %s != %s", m_internalName.c_str(),obj.m_internalName.c_str())); DBUG_RETURN(false); } } - if(m_fragmentType != obj.m_fragmentType){ - DBUG_PRINT("info",("m_fragmentType %d != %d",m_fragmentType,obj.m_fragmentType)); + if (m_frm.length() != obj.m_frm.length() || + (memcmp(m_frm.get_data(), obj.m_frm.get_data(), m_frm.length()))) + { + DBUG_PRINT("info",("m_frm not equal")); DBUG_RETURN(false); } - if(m_columns.size() != obj.m_columns.size()){ - DBUG_PRINT("info",("m_columns.size %d != %d",m_columns.size(),obj.m_columns.size())); + if (m_fd.length() != obj.m_fd.length() || + (memcmp(m_fd.get_data(), obj.m_fd.get_data(), m_fd.length()))) + { + DBUG_PRINT("info",("m_fd not equal")); + DBUG_RETURN(false); + } + if (m_ts.length() != obj.m_ts.length() || + (memcmp(m_ts.get_data(), obj.m_ts.get_data(), m_ts.length()))) + { + DBUG_PRINT("info",("m_ts not equal")); + DBUG_RETURN(false); + } + if (m_range.length() != obj.m_range.length() || + (memcmp(m_range.get_data(), obj.m_range.get_data(), m_range.length()))) + { + DBUG_PRINT("info",("m_range not equal")); + DBUG_RETURN(false); + } + if(m_fragmentType != obj.m_fragmentType) + { + DBUG_PRINT("info",("m_fragmentType %d != %d",m_fragmentType, + obj.m_fragmentType)); + DBUG_RETURN(false); + } + if(m_columns.size() != obj.m_columns.size()) + { + DBUG_PRINT("info",("m_columns.size %d != %d",m_columns.size(), + obj.m_columns.size())); DBUG_RETURN(false); } - for(unsigned i = 0; iequal(* obj.m_columns[i])){ + for(unsigned i = 0; iequal(* obj.m_columns[i])) + { DBUG_PRINT("info",("m_columns [%d] != [%d]",i,i)); DBUG_RETURN(false); } } - if(m_logging != obj.m_logging){ + if(m_linear_flag != obj.m_linear_flag) + { + DBUG_PRINT("info",("m_linear_flag %d != %d",m_linear_flag, + obj.m_linear_flag)); + DBUG_RETURN(false); + } + + if(m_max_rows != obj.m_max_rows) + { + DBUG_PRINT("info",("m_max_rows %d != %d",(int32)m_max_rows, + (int32)obj.m_max_rows)); + DBUG_RETURN(false); + } + + if(m_default_no_part_flag != obj.m_default_no_part_flag) + { + DBUG_PRINT("info",("m_default_no_part_flag %d != %d",m_default_no_part_flag, + obj.m_default_no_part_flag)); + DBUG_RETURN(false); + } + + if(m_logging != obj.m_logging) + { DBUG_PRINT("info",("m_logging %d != %d",m_logging,obj.m_logging)); DBUG_RETURN(false); } - if(m_kvalue != obj.m_kvalue){ + if(m_row_gci != obj.m_row_gci) + { + DBUG_PRINT("info",("m_row_gci %d != %d",m_row_gci,obj.m_row_gci)); + DBUG_RETURN(false); + } + + if(m_row_checksum != obj.m_row_checksum) + { + DBUG_PRINT("info",("m_row_checksum %d != %d",m_row_checksum, + obj.m_row_checksum)); + DBUG_RETURN(false); + } + + if(m_kvalue != obj.m_kvalue) + { DBUG_PRINT("info",("m_kvalue %d != %d",m_kvalue,obj.m_kvalue)); DBUG_RETURN(false); } - if(m_minLoadFactor != obj.m_minLoadFactor){ - DBUG_PRINT("info",("m_minLoadFactor %d != %d",m_minLoadFactor,obj.m_minLoadFactor)); + if(m_minLoadFactor != obj.m_minLoadFactor) + { + DBUG_PRINT("info",("m_minLoadFactor %d != %d",m_minLoadFactor, + obj.m_minLoadFactor)); DBUG_RETURN(false); } - if(m_maxLoadFactor != obj.m_maxLoadFactor){ - DBUG_PRINT("info",("m_maxLoadFactor %d != %d",m_maxLoadFactor,obj.m_maxLoadFactor)); + if(m_maxLoadFactor != obj.m_maxLoadFactor) + { + DBUG_PRINT("info",("m_maxLoadFactor %d != %d",m_maxLoadFactor, + obj.m_maxLoadFactor)); DBUG_RETURN(false); } + if(m_tablespace_id != obj.m_tablespace_id) + { + DBUG_PRINT("info",("m_tablespace_id %d != %d",m_tablespace_id, + obj.m_tablespace_id)); + DBUG_RETURN(false); + } + + if(m_tablespace_version != obj.m_tablespace_version) + { + DBUG_PRINT("info",("m_tablespace_version %d != %d",m_tablespace_version, + obj.m_tablespace_version)); + DBUG_RETURN(false); + } + + if(m_id != obj.m_id) + { + DBUG_PRINT("info",("m_id %d != %d",m_id,obj.m_id)); + DBUG_RETURN(false); + } + + if(m_version != obj.m_version) + { + DBUG_PRINT("info",("m_version %d != %d",m_version,obj.m_version)); + DBUG_RETURN(false); + } + + if(m_type != obj.m_type) + { + DBUG_PRINT("info",("m_type %d != %d",m_type,obj.m_type)); + DBUG_RETURN(false); + } + + if (m_type == NdbDictionary::Object::UniqueHashIndex || + m_type == NdbDictionary::Object::OrderedIndex) + { + if(m_primaryTableId != obj.m_primaryTableId) + { + DBUG_PRINT("info",("m_primaryTableId %d != %d",m_primaryTableId, + obj.m_primaryTableId)); + DBUG_RETURN(false); + } + if (m_indexType != obj.m_indexType) + { + DBUG_PRINT("info",("m_indexType %d != %d",m_indexType,obj.m_indexType)); + DBUG_RETURN(false); + } + if(strcmp(m_primaryTable.c_str(), obj.m_primaryTable.c_str()) != 0) + { + DBUG_PRINT("info",("m_primaryTable %s != %s", + m_primaryTable.c_str(),obj.m_primaryTable.c_str())); + DBUG_RETURN(false); + } + } DBUG_RETURN(true); } void NdbTableImpl::assign(const NdbTableImpl& org) { + /* m_changeMask intentionally not copied */ + m_primaryTableId = org.m_primaryTableId; m_internalName.assign(org.m_internalName); updateMysqlName(); // If the name has been explicitly set, use that name @@ -467,10 +617,21 @@ NdbTableImpl::assign(const NdbTableImpl& org) else m_externalName.assign(org.m_externalName); m_frm.assign(org.m_frm.get_data(), org.m_frm.length()); - m_ng.assign(org.m_ng.get_data(), org.m_ng.length()); - m_fragmentType = org.m_fragmentType; - m_fragmentCount = org.m_fragmentCount; + m_ts_name.assign(org.m_ts_name.get_data(), org.m_ts_name.length()); + m_new_ts_name.assign(org.m_new_ts_name.get_data(), + org.m_new_ts_name.length()); + m_ts.assign(org.m_ts.get_data(), org.m_ts.length()); + m_new_ts.assign(org.m_new_ts.get_data(), org.m_new_ts.length()); + m_fd.assign(org.m_fd.get_data(), org.m_fd.length()); + m_new_fd.assign(org.m_new_fd.get_data(), org.m_new_fd.length()); + m_range.assign(org.m_range.get_data(), org.m_range.length()); + m_new_range.assign(org.m_new_range.get_data(), org.m_new_range.length()); + m_fragmentType = org.m_fragmentType; + /* + m_columnHashMask, m_columnHash, m_hashValueMask, m_hashpointerValue + is state calculated by computeAggregates and buildColumnHash + */ for(unsigned i = 0; iinit(); + s = SimpleProperties::unpack(it, tableDesc, DictTabInfo::TableMapping, DictTabInfo::TableMappingSize, true, true); if(s != SimpleProperties::Break){ + NdbMem_Free((void*)tableDesc); DBUG_RETURN(703); } - const char * internalName = tableDesc.TableName; + const char * internalName = tableDesc->TableName; const char * externalName = Ndb::externalizeTableName(internalName, fullyQualifiedNames); NdbTableImpl * impl = new NdbTableImpl(); - impl->m_id = tableDesc.TableId; - impl->m_version = tableDesc.TableVersion; + impl->m_id = tableDesc->TableId; + impl->m_version = tableDesc->TableVersion; impl->m_status = NdbDictionary::Object::Retrieved; impl->m_internalName.assign(internalName); impl->updateMysqlName(); impl->m_externalName.assign(externalName); - impl->m_frm.assign(tableDesc.FrmData, tableDesc.FrmLen); - impl->m_ng.assign(tableDesc.FragmentData, tableDesc.FragmentDataLen); + impl->m_frm.assign(tableDesc->FrmData, tableDesc->FrmLen); + impl->m_fd.assign(tableDesc->FragmentData, tableDesc->FragmentDataLen); + impl->m_range.assign(tableDesc->RangeListData, tableDesc->RangeListDataLen); + impl->m_fragmentCount = tableDesc->FragmentCount; + + /* + We specifically don't get tablespace data and range/list arrays here + since those are known by the MySQL Server through analysing the + frm file. + Fragment Data contains the real node group mapping and the fragment + identities used for each fragment. At the moment we have no need for + this. + Frm file is needed for autodiscovery. + */ impl->m_fragmentType = (NdbDictionary::Object::FragmentType) - getApiConstant(tableDesc.FragmentType, + getApiConstant(tableDesc->FragmentType, fragmentTypeMapping, (Uint32)NdbDictionary::Object::FragUndefined); - impl->m_logging = tableDesc.TableLoggedFlag; - impl->m_row_gci = tableDesc.RowGCIFlag; - impl->m_row_checksum = tableDesc.RowChecksumFlag; - impl->m_kvalue = tableDesc.TableKValue; - impl->m_minLoadFactor = tableDesc.MinLoadFactor; - impl->m_maxLoadFactor = tableDesc.MaxLoadFactor; + Uint64 max_rows = ((Uint64)tableDesc->MaxRowsHigh) << 32; + max_rows += tableDesc->MaxRowsLow; + impl->m_max_rows = max_rows; + impl->m_default_no_part_flag = tableDesc->DefaultNoPartFlag; + impl->m_linear_flag = tableDesc->LinearHashFlag; + impl->m_logging = tableDesc->TableLoggedFlag; + impl->m_row_gci = tableDesc->RowGCIFlag; + impl->m_row_checksum = tableDesc->RowChecksumFlag; + impl->m_kvalue = tableDesc->TableKValue; + impl->m_minLoadFactor = tableDesc->MinLoadFactor; + impl->m_maxLoadFactor = tableDesc->MaxLoadFactor; impl->m_indexType = (NdbDictionary::Object::Type) - getApiConstant(tableDesc.TableType, + getApiConstant(tableDesc->TableType, indexTypeMapping, NdbDictionary::Object::TypeUndefined); if(impl->m_indexType == NdbDictionary::Object::TypeUndefined){ } else { const char * externalPrimary = - Ndb::externalizeTableName(tableDesc.PrimaryTable, fullyQualifiedNames); + Ndb::externalizeTableName(tableDesc->PrimaryTable, fullyQualifiedNames); impl->m_primaryTable.assign(externalPrimary); } Uint32 i; - for(i = 0; i < tableDesc.NoOfAttributes; i++) { + for(i = 0; i < tableDesc->NoOfAttributes; i++) { DictTabInfo::Attribute attrDesc; attrDesc.init(); s = SimpleProperties::unpack(it, &attrDesc, @@ -1573,6 +1872,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, true, true); if(s != SimpleProperties::Break){ delete impl; + NdbMem_Free((void*)tableDesc); DBUG_RETURN(703); } @@ -1583,6 +1883,7 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, // check type and compute attribute size and array size if (! attrDesc.translateExtType()) { delete impl; + NdbMem_Free((void*)tableDesc); DBUG_RETURN(703); } col->m_type = (NdbDictionary::Column::Type)attrDesc.AttributeExtType; @@ -1594,12 +1895,14 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, // charset is defined exactly for char types if (col->getCharType() != (cs_number != 0)) { delete impl; + NdbMem_Free((void*)tableDesc); DBUG_RETURN(703); } if (col->getCharType()) { col->m_cs = get_charset(cs_number, MYF(0)); if (col->m_cs == NULL) { delete impl; + NdbMem_Free((void*)tableDesc); DBUG_RETURN(743); } } @@ -1627,17 +1930,17 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, impl->computeAggregates(); - if(tableDesc.FragmentDataLen > 0) + if(tableDesc->ReplicaDataLen > 0) { - Uint16 replicaCount = tableDesc.FragmentData[0]; - Uint16 fragCount = tableDesc.FragmentData[1]; + Uint16 replicaCount = tableDesc->ReplicaData[0]; + Uint16 fragCount = tableDesc->ReplicaData[1]; impl->m_replicaCount = replicaCount; impl->m_fragmentCount = fragCount; DBUG_PRINT("info", ("replicaCount=%x , fragCount=%x",replicaCount,fragCount)); for(i = 0; i < (Uint32) (fragCount*replicaCount); i++) { - impl->m_fragments.push_back(tableDesc.FragmentData[i+2]); + impl->m_fragments.push_back(tableDesc->ReplicaData[i+2]); } Uint32 topBit = (1 << 31); @@ -1649,17 +1952,18 @@ NdbDictInterface::parseTableInfo(NdbTableImpl ** ret, } else { - impl->m_fragmentCount = tableDesc.FragmentCount; + impl->m_fragmentCount = tableDesc->FragmentCount; impl->m_replicaCount = 0; impl->m_hashValueMask = 0; impl->m_hashpointerValue = 0; } - impl->m_tablespace_id = tableDesc.TablespaceId; - impl->m_tablespace_version = tableDesc.TablespaceVersion; + impl->m_tablespace_id = tableDesc->TablespaceId; + impl->m_tablespace_version = tableDesc->TablespaceVersion; * ret = impl; + NdbMem_Free((void*)tableDesc); DBUG_ASSERT(impl->m_fragmentCount > 0); DBUG_RETURN(0); } @@ -1800,8 +2104,9 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, NdbTableImpl & impl, bool alter) { - DBUG_ENTER("NdbDictInterface::createOrAlterTable"); unsigned i; + char *ts_names[MAX_NDB_PARTITIONS]; + DBUG_ENTER("NdbDictInterface::createOrAlterTable"); impl.computeAggregates(); @@ -1827,7 +2132,8 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, impl.m_newExternalName.clear(); } // Definition change (frm) - if (!impl.m_newFrm.empty()) { + if (!impl.m_newFrm.empty()) + { if (alter) { AlterTableReq::setFrmFlag(impl.m_changeMask, true); @@ -1835,6 +2141,55 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, impl.m_frm.assign(impl.m_newFrm.get_data(), impl.m_newFrm.length()); impl.m_newFrm.clear(); } + // Change FragmentData (fragment identity, state, tablespace id) + if (!impl.m_new_fd.empty()) + { + if (alter) + { + AlterTableReq::setFragDataFlag(impl.m_changeMask, true); + } + impl.m_fd.assign(impl.m_new_fd.get_data(), impl.m_new_fd.length()); + impl.m_new_fd.clear(); + } + // Change Tablespace Name Data + if (!impl.m_new_ts_name.empty()) + { + if (alter) + { + AlterTableReq::setTsNameFlag(impl.m_changeMask, true); + } + impl.m_ts_name.assign(impl.m_new_ts_name.get_data(), + impl.m_new_ts_name.length()); + impl.m_new_ts_name.clear(); + } + // Change Range/List Data + if (!impl.m_new_range.empty()) + { + if (alter) + { + AlterTableReq::setRangeListFlag(impl.m_changeMask, true); + } + impl.m_range.assign(impl.m_new_range.get_data(), + impl.m_new_range.length()); + impl.m_new_range.clear(); + } + // Change Tablespace Data + if (!impl.m_new_ts.empty()) + { + if (alter) + { + AlterTableReq::setTsFlag(impl.m_changeMask, true); + } + impl.m_ts.assign(impl.m_new_ts.get_data(), + impl.m_new_ts.length()); + impl.m_new_ts.clear(); + } + + + /* + TODO RONM: Here I need to insert checks for fragment array and + range or list array + */ //validate(); //aggregate(); @@ -1843,10 +2198,17 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, ndb.internalize_table_name(impl.m_externalName.c_str())); impl.m_internalName.assign(internalName); impl.updateMysqlName(); - DictTabInfo::Table tmpTab; - tmpTab.init(); - BaseString::snprintf(tmpTab.TableName, - sizeof(tmpTab.TableName), + DictTabInfo::Table *tmpTab; + + tmpTab = (DictTabInfo::Table*)NdbMem_Allocate(sizeof(DictTabInfo::Table)); + if (!tmpTab) + { + m_error.code = 4000; + DBUG_RETURN(-1); + } + tmpTab->init(); + BaseString::snprintf(tmpTab->TableName, + sizeof(tmpTab->TableName), internalName.c_str()); bool haveAutoIncrement = false; @@ -1859,6 +2221,7 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, if (col->m_autoIncrement) { if (haveAutoIncrement) { m_error.code= 4335; + NdbMem_Free((void*)tmpTab); DBUG_RETURN(-1); } haveAutoIncrement = true; @@ -1877,35 +2240,89 @@ NdbDictInterface::createOrAlterTable(Ndb & ndb, // Check max length of frm data if (impl.m_frm.length() > MAX_FRM_DATA_SIZE){ m_error.code= 1229; + NdbMem_Free((void*)tmpTab); DBUG_RETURN(-1); } - tmpTab.FrmLen = impl.m_frm.length(); - memcpy(tmpTab.FrmData, impl.m_frm.get_data(), impl.m_frm.length()); - tmpTab.FragmentDataLen = impl.m_ng.length(); - memcpy(tmpTab.FragmentData, impl.m_ng.get_data(), impl.m_ng.length()); + /* + TODO RONM: This needs to change to dynamic arrays instead + Frm Data, FragmentData, TablespaceData, RangeListData, TsNameData + */ + tmpTab->FrmLen = impl.m_frm.length(); + memcpy(tmpTab->FrmData, impl.m_frm.get_data(), impl.m_frm.length()); - tmpTab.TableLoggedFlag = impl.m_logging; - tmpTab.RowGCIFlag = impl.m_row_gci; - tmpTab.RowChecksumFlag = impl.m_row_checksum; - tmpTab.TableLoggedFlag = impl.m_logging; - tmpTab.TableKValue = impl.m_kvalue; - tmpTab.MinLoadFactor = impl.m_minLoadFactor; - tmpTab.MaxLoadFactor = impl.m_maxLoadFactor; - tmpTab.TableType = DictTabInfo::UserTable; - tmpTab.PrimaryTableId = impl.m_primaryTableId; - tmpTab.NoOfAttributes = sz; - - tmpTab.FragmentType = getKernelConstant(impl.m_fragmentType, - fragmentTypeMapping, - DictTabInfo::AllNodesSmallTable); - tmpTab.TableVersion = rand(); + tmpTab->FragmentDataLen = impl.m_fd.length(); + memcpy(tmpTab->FragmentData, impl.m_fd.get_data(), impl.m_fd.length()); - const char* tablespace_name= impl.m_tablespace_name.c_str(); + tmpTab->TablespaceDataLen = impl.m_ts.length(); + memcpy(tmpTab->TablespaceData, impl.m_ts.get_data(), impl.m_ts.length()); + + tmpTab->RangeListDataLen = impl.m_range.length(); + memcpy(tmpTab->RangeListData, impl.m_range.get_data(), + impl.m_range.length()); + + memcpy(ts_names, impl.m_ts_name.get_data(), + impl.m_ts_name.length()); + + tmpTab->FragmentCount= impl.m_fragmentCount; + tmpTab->TableLoggedFlag = impl.m_logging; + tmpTab->RowGCIFlag = impl.m_row_gci; + tmpTab->RowChecksumFlag = impl.m_row_checksum; + tmpTab->TableKValue = impl.m_kvalue; + tmpTab->MinLoadFactor = impl.m_minLoadFactor; + tmpTab->MaxLoadFactor = impl.m_maxLoadFactor; + tmpTab->TableType = DictTabInfo::UserTable; + tmpTab->PrimaryTableId = impl.m_primaryTableId; + tmpTab->NoOfAttributes = sz; + tmpTab->MaxRowsHigh = (Uint32)(impl.m_max_rows >> 32); + tmpTab->MaxRowsLow = (Uint32)(impl.m_max_rows & 0xFFFFFFFF); + tmpTab->DefaultNoPartFlag = impl.m_default_no_part_flag; + tmpTab->LinearHashFlag = impl.m_linear_flag; + + if (impl.m_ts_name.length()) + { + char **ts_name_ptr= (char**)ts_names; + i= 0; + do + { + NdbTablespaceImpl tmp; + if (*ts_name_ptr) + { + if(get_filegroup(tmp, NdbDictionary::Object::Tablespace, + (const char*)*ts_name_ptr) == 0) + { + tmpTab->TablespaceData[2*i] = tmp.m_id; + tmpTab->TablespaceData[2*i + 1] = tmp.m_version; + } + else + { + NdbMem_Free((void*)tmpTab); + DBUG_RETURN(-1); + } + } + else + { + /* + No tablespace used, set tablespace id to NULL + */ + tmpTab->TablespaceData[2*i] = RNIL; + tmpTab->TablespaceData[2*i + 1] = 0; + } + ts_name_ptr++; + } while (++i < tmpTab->FragmentCount); + tmpTab->TablespaceDataLen= 4*i; + } + + tmpTab->FragmentType = getKernelConstant(impl.m_fragmentType, + fragmentTypeMapping, + DictTabInfo::AllNodesSmallTable); + tmpTab->TableVersion = rand(); + + const char *tablespace_name= impl.m_tablespace_name.c_str(); loop: if(impl.m_tablespace_id != ~(Uint32)0) { - tmpTab.TablespaceId = impl.m_tablespace_id; - tmpTab.TablespaceVersion = impl.m_tablespace_version; + tmpTab->TablespaceId = impl.m_tablespace_id; + tmpTab->TablespaceVersion = impl.m_tablespace_version; } else if(strlen(tablespace_name)) { @@ -1913,13 +2330,14 @@ loop: if(get_filegroup(tmp, NdbDictionary::Object::Tablespace, tablespace_name) == 0) { - tmpTab.TablespaceId = tmp.m_id; - tmpTab.TablespaceVersion = tmp.m_version; + tmpTab->TablespaceId = tmp.m_id; + tmpTab->TablespaceVersion = tmp.m_version; } else { // error set by get filegroup - return -1; + NdbMem_Free((void*)tmpTab); + DBUG_RETURN(-1); } } else @@ -1937,13 +2355,14 @@ loop: UtilBufferWriter w(m_buffer); SimpleProperties::UnpackStatus s; s = SimpleProperties::pack(w, - &tmpTab, + tmpTab, DictTabInfo::TableMapping, DictTabInfo::TableMappingSize, true); if(s != SimpleProperties::Eof){ abort(); } + NdbMem_Free((void*)tmpTab); DBUG_PRINT("info",("impl.m_noOfDistributionKeys: %d impl.m_noOfKeys: %d distKeys: %d", impl.m_noOfDistributionKeys, impl.m_noOfKeys, distKeys)); @@ -2053,7 +2472,7 @@ loop: if(m_error.code == AlterTableRef::InvalidTableVersion) { // Clear caches and try again - return INCOMPATIBLE_VERSION; + DBUG_RETURN(INCOMPATIBLE_VERSION); } } else { tSignal.theVerId_signalNumber = GSN_CREATE_TABLE_REQ; diff --git a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp index c69172cd489..f812860c164 100644 --- a/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp +++ b/storage/ndb/src/ndbapi/NdbDictionaryImpl.hpp @@ -120,9 +120,24 @@ public: void init(); void setName(const char * name); const char * getName() const; + void setFragmentCount(Uint32 count); + Uint32 getFragmentCount() const; void setFrm(const void* data, Uint32 len); const void * getFrmData() const; Uint32 getFrmLength() const; + void setFragmentData(const void* data, Uint32 len); + const void * getFragmentData() const; + Uint32 getFragmentDataLen() const; + void setTablespaceNames(const void* data, Uint32 len); + Uint32 getTablespaceNamesLen() const; + const void * getTablespaceNames() const; + void setTablespaceData(const void* data, Uint32 len); + const void * getTablespaceData() const; + Uint32 getTablespaceDataLen() const; + void setRangeListData(const void* data, Uint32 len); + const void * getRangeListData() const; + Uint32 getRangeListDataLen() const; + const char * getMysqlName() const; void updateMysqlName(); @@ -133,8 +148,15 @@ public: BaseString m_mysqlName; BaseString m_newExternalName; // Used for alter table UtilBuffer m_frm; - UtilBuffer m_newFrm; // Used for alter table - UtilBuffer m_ng; + UtilBuffer m_newFrm; // Used for alter table + UtilBuffer m_ts_name; //Tablespace Names + UtilBuffer m_new_ts_name; //Tablespace Names + UtilBuffer m_ts; //TablespaceData + UtilBuffer m_new_ts; //TablespaceData + UtilBuffer m_fd; //FragmentData + UtilBuffer m_new_fd; //FragmentData + UtilBuffer m_range; //Range Or List Array + UtilBuffer m_new_range; //Range Or List Array NdbDictionary::Object::FragmentType m_fragmentType; /** @@ -153,6 +175,9 @@ public: Uint32 m_hashpointerValue; Vector m_fragments; + Uint64 m_max_rows; + Uint32 m_default_no_part_flag; + bool m_linear_flag; bool m_logging; bool m_row_gci; bool m_row_checksum; @@ -162,7 +187,6 @@ public: Uint16 m_keyLenInWords; Uint16 m_fragmentCount; - NdbDictionaryImpl * m_dictionary; NdbIndexImpl * m_index; NdbColumnImpl * getColumn(unsigned attrId); NdbColumnImpl * getColumn(const char * name); diff --git a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp index aa7b8fd133b..94022d9a737 100644 --- a/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp +++ b/storage/ndb/src/ndbapi/NdbEventOperationImpl.cpp @@ -55,6 +55,19 @@ static const Uint32 ACTIVE_GCI_MASK = ACTIVE_GCI_DIRECTORY_SIZE - 1; */ //#define EVENT_DEBUG +#ifdef EVENT_DEBUG +#define DBUG_ENTER_EVENT(A) DBUG_ENTER(A) +#define DBUG_RETURN_EVENT(A) DBUG_RETURN(A) +#define DBUG_VOID_RETURN_EVENT DBUG_VOID_RETURN +#define DBUG_PRINT_EVENT(A,B) DBUG_PRINT(A,B) +#define DBUG_DUMP_EVENT(A,B,C) DBUG_SUMP(A,B,C) +#else +#define DBUG_ENTER_EVENT(A) +#define DBUG_RETURN_EVENT(A) return(A) +#define DBUG_VOID_RETURN_EVENT return +#define DBUG_PRINT_EVENT(A,B) +#define DBUG_DUMP_EVENT(A,B,C) +#endif // todo handle several ndb objects // todo free allocated data when closing NdbEventBuffer @@ -343,14 +356,14 @@ NdbEventOperationImpl::getLatestGCI() int NdbEventOperationImpl::receive_event() { - DBUG_ENTER("NdbEventOperationImpl::receive_event"); + DBUG_ENTER_EVENT("NdbEventOperationImpl::receive_event"); Uint32 operation= (Uint32)m_data_item->sdata->operation; - DBUG_PRINT("info",("sdata->operation %u",operation)); + DBUG_PRINT_EVENT("info",("sdata->operation %u",operation)); if (unlikely(operation >= NdbDictionary::Event::_TE_FIRST_NON_DATA_EVENT)) { - DBUG_RETURN(1); + DBUG_RETURN_EVENT(1); } // now move the data into the RecAttrs @@ -361,8 +374,8 @@ NdbEventOperationImpl::receive_event() Uint32 *aAttrEndPtr = aAttrPtr + m_data_item->ptr[0].sz; Uint32 *aDataPtr = m_data_item->ptr[1].p; - DBUG_DUMP("after",(char*)m_data_item->ptr[1].p, m_data_item->ptr[1].sz*4); - DBUG_DUMP("before",(char*)m_data_item->ptr[2].p, m_data_item->ptr[2].sz*4); + DBUG_DUMP_EVENT("after",(char*)m_data_item->ptr[1].p, m_data_item->ptr[1].sz*4); + DBUG_DUMP_EVENT("before",(char*)m_data_item->ptr[2].p, m_data_item->ptr[2].sz*4); // copy data into the RecAttr's // we assume that the respective attribute lists are sorted @@ -402,8 +415,8 @@ NdbEventOperationImpl::receive_event() tDataSz = AttributeHeader(*aAttrPtr).getByteSize(); while (tAttrId > tRecAttrId) { - DBUG_PRINT("info",("undef [%u] %u 0x%x [%u] 0x%x", - tAttrId, tDataSz, *aDataPtr, tRecAttrId, aDataPtr)); + DBUG_PRINT_EVENT("info",("undef [%u] %u 0x%x [%u] 0x%x", + tAttrId, tDataSz, *aDataPtr, tRecAttrId, aDataPtr)); tWorkingRecAttr->setUNDEFINED(); tWorkingRecAttr = tWorkingRecAttr->next(); if (tWorkingRecAttr == NULL) @@ -416,8 +429,8 @@ NdbEventOperationImpl::receive_event() if (tAttrId == tRecAttrId) { hasSomeData++; - DBUG_PRINT("info",("set [%u] %u 0x%x [%u] 0x%x", - tAttrId, tDataSz, *aDataPtr, tRecAttrId, aDataPtr)); + DBUG_PRINT_EVENT("info",("set [%u] %u 0x%x [%u] 0x%x", + tAttrId, tDataSz, *aDataPtr, tRecAttrId, aDataPtr)); receive_data(tWorkingRecAttr, aDataPtr, tDataSz); tWorkingRecAttr = tWorkingRecAttr->next(); @@ -467,10 +480,10 @@ NdbEventOperationImpl::receive_event() if (hasSomeData || !is_update) { - DBUG_RETURN(1); + DBUG_RETURN_EVENT(1); } - DBUG_RETURN(0); + DBUG_RETURN_EVENT(0); } NdbDictionary::Event::TableEvent @@ -714,7 +727,7 @@ print_std(const char* tag, const SubTableData * sdata, LinearSectionPtr ptr[3]) NdbEventOperation * NdbEventBuffer::nextEvent() { - DBUG_ENTER("NdbEventBuffer::nextEvent"); + DBUG_ENTER_EVENT("NdbEventBuffer::nextEvent"); #ifdef VM_TRACE const char *m_latest_command_save= m_latest_command; #endif @@ -764,7 +777,7 @@ NdbEventBuffer::nextEvent() #ifdef VM_TRACE m_latest_command= m_latest_command_save; #endif - DBUG_RETURN(op->m_facade); + DBUG_RETURN_EVENT(op->m_facade); } // the next event belonged to an event op that is no // longer valid, skip to next @@ -778,7 +791,7 @@ NdbEventBuffer::nextEvent() #ifdef VM_TRACE m_latest_command= m_latest_command_save; #endif - DBUG_RETURN(0); + DBUG_RETURN_EVENT(0); } void @@ -882,7 +895,7 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep) return; } - DBUG_ENTER("NdbEventBuffer::execSUB_GCP_COMPLETE_REP"); + DBUG_ENTER_EVENT("NdbEventBuffer::execSUB_GCP_COMPLETE_REP"); const Uint64 gci= rep->gci; const Uint32 cnt= rep->gcp_complete_rep_count; @@ -901,7 +914,7 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep) { ndbout << i << " - " << m_active_gci[i] << endl; } - DBUG_VOID_RETURN; + DBUG_VOID_RETURN_EVENT; } Uint32 old_cnt = bucket->m_gcp_complete_rep_count; @@ -949,7 +962,7 @@ NdbEventBuffer::execSUB_GCP_COMPLETE_REP(const SubGcpCompleteRep * const rep) } } - DBUG_VOID_RETURN; + DBUG_VOID_RETURN_EVENT; } void @@ -1145,18 +1158,17 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, const SubTableData * const sdata, LinearSectionPtr ptr[3]) { - DBUG_ENTER("NdbEventBuffer::insertDataL"); - + DBUG_ENTER_EVENT("NdbEventBuffer::insertDataL"); Uint64 gci= sdata->gci; if ( likely((Uint32)op->mi_type & 1 << (Uint32)sdata->operation) ) { Gci_container* bucket= find_bucket(&m_active_gci, gci); - DBUG_PRINT("info", ("data insertion in eventId %d", op->m_eventId)); - DBUG_PRINT("info", ("gci=%d tab=%d op=%d node=%d", - sdata->gci, sdata->tableId, sdata->operation, - sdata->req_nodeid)); + DBUG_PRINT_EVENT("info", ("data insertion in eventId %d", op->m_eventId)); + DBUG_PRINT_EVENT("info", ("gci=%d tab=%d op=%d node=%d", + sdata->gci, sdata->tableId, sdata->operation, + sdata->req_nodeid)); if (unlikely(bucket == 0)) { @@ -1164,7 +1176,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, * Already completed GCI... * Possible in case of resend during NF handling */ - DBUG_RETURN(0); + DBUG_RETURN_EVENT(0); } bool use_hash = @@ -1187,13 +1199,13 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, if (unlikely(data == 0)) { op->m_has_error = 2; - DBUG_RETURN(-1); + DBUG_RETURN_EVENT(-1); } if (unlikely(copy_data(sdata, ptr, data))) { op->m_has_error = 3; - DBUG_RETURN(-1); + DBUG_RETURN_EVENT(-1); } // add it to list and hash table bucket->m_data.append(data); @@ -1211,7 +1223,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, if (unlikely(merge_data(sdata, ptr, data))) { op->m_has_error = 3; - DBUG_RETURN(-1); + DBUG_RETURN_EVENT(-1); } } data->m_event_op = op; @@ -1219,22 +1231,22 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, { data->m_pkhash = hpos.pkhash; } - DBUG_RETURN(0); + DBUG_RETURN_EVENT(0); } #ifdef VM_TRACE if ((Uint32)op->m_eventImpl->mi_type & 1 << (Uint32)sdata->operation) { // XXX never reached - DBUG_PRINT("info",("Data arrived before ready eventId", op->m_eventId)); - DBUG_RETURN(0); + DBUG_PRINT_EVENT("info",("Data arrived before ready eventId", op->m_eventId)); + DBUG_RETURN_EVENT(0); } else { - DBUG_PRINT("info",("skipped")); - DBUG_RETURN(0); + DBUG_PRINT_EVENT("info",("skipped")); + DBUG_RETURN_EVENT(0); } #else - return 0; + DBUG_RETURN_EVENT(0); #endif } @@ -1242,7 +1254,7 @@ NdbEventBuffer::insertDataL(NdbEventOperationImpl *op, EventBufData* NdbEventBuffer::alloc_data() { - DBUG_ENTER("alloc_data"); + DBUG_ENTER_EVENT("alloc_data"); EventBufData* data = m_free_data; if (unlikely(data == 0)) @@ -1268,7 +1280,7 @@ NdbEventBuffer::alloc_data() m_available_data.m_tail ? m_available_data.m_tail->sdata->gci : 0); printf("m_used_data_count %d\n", m_used_data.m_count); #endif - DBUG_RETURN(0); // TODO handle this, overrun, or, skip? + DBUG_RETURN_EVENT(0); // TODO handle this, overrun, or, skip? } } @@ -1280,7 +1292,7 @@ NdbEventBuffer::alloc_data() assert(m_free_data_sz >= data->sz); #endif m_free_data_sz -= data->sz; - DBUG_RETURN(data); + DBUG_RETURN_EVENT(data); } // allocate initial or bigger memory area in EventBufData @@ -1328,36 +1340,36 @@ NdbEventBuffer::copy_data(const SubTableData * const sdata, LinearSectionPtr ptr[3], EventBufData* data) { - DBUG_ENTER("NdbEventBuffer::copy_data"); + DBUG_ENTER_EVENT("NdbEventBuffer::copy_data"); if (alloc_mem(data, ptr) != 0) - DBUG_RETURN(-1); + DBUG_RETURN_EVENT(-1); memcpy(data->sdata, sdata, sizeof(SubTableData)); int i; for (i = 0; i <= 2; i++) memcpy(data->ptr[i].p, ptr[i].p, ptr[i].sz << 2); - DBUG_RETURN(0); + DBUG_RETURN_EVENT(0); } static struct Ev_t { enum { - INS = NdbDictionary::Event::_TE_INSERT, - DEL = NdbDictionary::Event::_TE_DELETE, - UPD = NdbDictionary::Event::_TE_UPDATE, - NUL = NdbDictionary::Event::_TE_NUL, - ERR = 255 + enum_INS = NdbDictionary::Event::_TE_INSERT, + enum_DEL = NdbDictionary::Event::_TE_DELETE, + enum_UPD = NdbDictionary::Event::_TE_UPDATE, + enum_NUL = NdbDictionary::Event::_TE_NUL, + enum_ERR = 255 }; int t1, t2, t3; } ev_t[] = { - { Ev_t::INS, Ev_t::INS, Ev_t::ERR }, - { Ev_t::INS, Ev_t::DEL, Ev_t::NUL }, //ok - { Ev_t::INS, Ev_t::UPD, Ev_t::INS }, //ok - { Ev_t::DEL, Ev_t::INS, Ev_t::UPD }, //ok - { Ev_t::DEL, Ev_t::DEL, Ev_t::ERR }, - { Ev_t::DEL, Ev_t::UPD, Ev_t::ERR }, - { Ev_t::UPD, Ev_t::INS, Ev_t::ERR }, - { Ev_t::UPD, Ev_t::DEL, Ev_t::DEL }, //ok - { Ev_t::UPD, Ev_t::UPD, Ev_t::UPD } //ok + { Ev_t::enum_INS, Ev_t::enum_INS, Ev_t::enum_ERR }, + { Ev_t::enum_INS, Ev_t::enum_DEL, Ev_t::enum_NUL }, //ok + { Ev_t::enum_INS, Ev_t::enum_UPD, Ev_t::enum_INS }, //ok + { Ev_t::enum_DEL, Ev_t::enum_INS, Ev_t::enum_UPD }, //ok + { Ev_t::enum_DEL, Ev_t::enum_DEL, Ev_t::enum_ERR }, + { Ev_t::enum_DEL, Ev_t::enum_UPD, Ev_t::enum_ERR }, + { Ev_t::enum_UPD, Ev_t::enum_INS, Ev_t::enum_ERR }, + { Ev_t::enum_UPD, Ev_t::enum_DEL, Ev_t::enum_DEL }, //ok + { Ev_t::enum_UPD, Ev_t::enum_UPD, Ev_t::enum_UPD } //ok }; /* @@ -1406,14 +1418,14 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, LinearSectionPtr ptr2[3], EventBufData* data) { - DBUG_ENTER("NdbEventBuffer::merge_data"); + DBUG_ENTER_EVENT("NdbEventBuffer::merge_data"); Uint32 nkey = data->m_event_op->m_eventImpl->m_tableImpl->m_noOfKeys; int t1 = data->sdata->operation; int t2 = sdata->operation; - if (t1 == Ev_t::NUL) - DBUG_RETURN(copy_data(sdata, ptr2, data)); + if (t1 == Ev_t::enum_NUL) + DBUG_RETURN_EVENT(copy_data(sdata, ptr2, data)); Ev_t* tp = 0; int i; @@ -1423,7 +1435,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, break; } } - assert(tp != 0 && tp->t3 != Ev_t::ERR); + assert(tp != 0 && tp->t3 != Ev_t::enum_ERR); // save old data EventBufData olddata = *data; @@ -1441,7 +1453,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, if (loop == 1) { if (alloc_mem(data, ptr) != 0) - DBUG_RETURN(-1); + DBUG_RETURN_EVENT(-1); *data->sdata = *sdata; data->sdata->operation = tp->t3; } @@ -1465,7 +1477,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, } // merge after values, new version overrides - if (tp->t3 != Ev_t::DEL) + if (tp->t3 != Ev_t::enum_DEL) { AttributeHeader ah; Uint32 i = ptr[0].sz; @@ -1514,7 +1526,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, } // merge before values, old version overrides - if (tp->t3 != Ev_t::INS) + if (tp->t3 != Ev_t::enum_INS) { AttributeHeader ah; Uint32 k = 0; @@ -1558,7 +1570,7 @@ NdbEventBuffer::merge_data(const SubTableData * const sdata, // free old data NdbMem_Free((char*)olddata.memory); - DBUG_RETURN(0); + DBUG_RETURN_EVENT(0); } NdbEventOperationImpl * @@ -1581,11 +1593,11 @@ NdbEventBuffer::move_data() } if (!m_available_data.is_empty()) { - DBUG_ENTER("NdbEventBuffer::move_data"); + DBUG_ENTER_EVENT("NdbEventBuffer::move_data"); #ifdef VM_TRACE - DBUG_PRINT("exit",("m_available_data_count %u", m_available_data.m_count)); + DBUG_PRINT_EVENT("exit",("m_available_data_count %u", m_available_data.m_count)); #endif - DBUG_RETURN(m_available_data.m_head->m_event_op); + DBUG_RETURN_EVENT(m_available_data.m_head->m_event_op); } return 0; } diff --git a/storage/ndb/src/ndbapi/ndberror.c b/storage/ndb/src/ndbapi/ndberror.c index c6a25ced3d3..fc027721b23 100644 --- a/storage/ndb/src/ndbapi/ndberror.c +++ b/storage/ndb/src/ndbapi/ndberror.c @@ -411,7 +411,8 @@ ErrorBundle ErrorCodes[] = { { 1512, DMEC, SE, "File read error" }, { 1513, DMEC, IE, "Filegroup not online" }, - + { 773, DMEC, SE, "Out of string memory, please modify StringMemory config parameter" }, + /** * FunctionNotImplemented */ diff --git a/storage/ndb/test/ndbapi/testBackup.cpp b/storage/ndb/test/ndbapi/testBackup.cpp index da3c52cf4d2..810ec3260fd 100644 --- a/storage/ndb/test/ndbapi/testBackup.cpp +++ b/storage/ndb/test/ndbapi/testBackup.cpp @@ -193,7 +193,7 @@ runDDL(NDBT_Context* ctx, NDBT_Step* step){ } -int runRestartInitial(NDBT_Context* ctx, NDBT_Step* step){ +int runDropTablesRestart(NDBT_Context* ctx, NDBT_Step* step){ NdbRestarter restarter; Ndb* pNdb = GETNDB(step); @@ -201,7 +201,7 @@ int runRestartInitial(NDBT_Context* ctx, NDBT_Step* step){ const NdbDictionary::Table *tab = ctx->getTab(); pNdb->getDictionary()->dropTable(tab->getName()); - if (restarter.restartAll(true) != 0) + if (restarter.restartAll(false) != 0) return NDBT_FAILED; if (restarter.waitClusterStarted() != 0) @@ -406,6 +406,7 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){ // TEMPORARY FIX // To erase all tables from cache(s) // To be removed, maybe replaced by ndb.invalidate(); + runDropTable(ctx,step); { Bank bank(ctx->m_cluster_connection); @@ -416,8 +417,8 @@ int runRestoreBankAndVerify(NDBT_Context* ctx, NDBT_Step* step){ } // END TEMPORARY FIX - ndbout << "Performing initial restart" << endl; - if (restarter.restartAll(true) != 0) + ndbout << "Performing restart" << endl; + if (restarter.restartAll(false) != 0) return NDBT_FAILED; if (restarter.waitClusterStarted() != 0) @@ -465,12 +466,12 @@ TESTCASE("BackupOne", "Test that backup and restore works on one table \n" "1. Load table\n" "2. Backup\n" - "3. Restart -i\n" + "3. Drop tables and restart \n" "4. Restore\n" "5. Verify count and content of table\n"){ INITIALIZER(runLoadTable); INITIALIZER(runBackupOne); - INITIALIZER(runRestartInitial); + INITIALIZER(runDropTablesRestart); INITIALIZER(runRestoreOne); VERIFIER(runVerifyOne); FINALIZER(runClearTable); diff --git a/storage/ndb/test/ndbapi/test_event.cpp b/storage/ndb/test/ndbapi/test_event.cpp index 87065e754b8..a09f6d7c9c8 100644 --- a/storage/ndb/test/ndbapi/test_event.cpp +++ b/storage/ndb/test/ndbapi/test_event.cpp @@ -334,7 +334,7 @@ int runCreateShadowTable(NDBT_Context* ctx, NDBT_Step* step) table_shadow.setName(buf); // TODO should be removed // This should work wo/ next line - table_shadow.setNodeGroupIds(0, 0); + //table_shadow.setNodeGroupIds(0, 0); GETNDB(step)->getDictionary()->createTable(table_shadow); if (GETNDB(step)->getDictionary()->getTable(buf)) return NDBT_OK; diff --git a/storage/ndb/test/src/NdbBackup.cpp b/storage/ndb/test/src/NdbBackup.cpp index 9f65fe6b3bc..588f785c5a3 100644 --- a/storage/ndb/test/src/NdbBackup.cpp +++ b/storage/ndb/test/src/NdbBackup.cpp @@ -199,7 +199,6 @@ int NFDuringBackupM_codes[] = { 10003, 10004, - 10005, 10007, 10008, 10009, @@ -349,6 +348,7 @@ NdbBackup::NF(NdbRestarter& _restarter, int *NFDuringBackup_codes, const int sz, int FailS_codes[] = { + 10025, 10027, 10033 }; diff --git a/storage/ndb/tools/restore/Restore.cpp b/storage/ndb/tools/restore/Restore.cpp index c60cf782fc8..3752069a62f 100644 --- a/storage/ndb/tools/restore/Restore.cpp +++ b/storage/ndb/tools/restore/Restore.cpp @@ -16,6 +16,7 @@ #include "Restore.hpp" #include +#include #include #include @@ -23,6 +24,7 @@ #include #include #include +#include Uint16 Twiddle16(Uint16 in); // Byte shift 16-bit data Uint32 Twiddle32(Uint32 in); // Byte shift 32-bit data @@ -321,6 +323,7 @@ TableS::~TableS() delete allAttributesDesc[i]; } + // Parse dictTabInfo buffer and pushback to to vector storage bool RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len) @@ -336,8 +339,6 @@ RestoreMetaData::parseTableDescriptor(const Uint32 * data, Uint32 len) return false; debug << "parseTableInfo " << tableImpl->getName() << " done" << endl; - tableImpl->m_ng.clear(); - tableImpl->m_fragmentType = NdbDictionary::Object::FragAllSmall; TableS * table = new TableS(m_fileHeader.NdbVersion, tableImpl); if(table == NULL) { return false; @@ -738,7 +739,7 @@ BackupFile::validateFooter(){ return true; } -bool RestoreDataIterator::readFragmentHeader(int & ret) +bool RestoreDataIterator::readFragmentHeader(int & ret, Uint32 *fragmentId) { BackupFormat::DataFile::FragmentHeader Header; @@ -780,7 +781,7 @@ bool RestoreDataIterator::readFragmentHeader(int & ret) m_count = 0; ret = 0; - + *fragmentId = Header.FragmentNo; return true; } // RestoreDataIterator::getNextFragment @@ -901,7 +902,7 @@ RestoreLogIterator::RestoreLogIterator(const RestoreMetaData & md) } const LogEntry * -RestoreLogIterator::getNextLogEntry(int & res) { +RestoreLogIterator::getNextLogEntry(int & res, bool *alloc_flag) { // Read record length typedef BackupFormat::LogFile::LogEntry LogE; @@ -925,7 +926,30 @@ RestoreLogIterator::getNextLogEntry(int & res) { res= 0; return 0; } - + if (m_metaData.getFileHeader().NdbVersion < NDBD_FRAGID_VERSION) + { + /* + FragId was introduced in LogEntry in version + 5.1.6 + We set FragId to 0 in older versions (these versions + do not support restore of user defined partitioned + tables. + */ + int i; + LogE *tmpLogE = (LogE*)NdbMem_Allocate(data_len + 4); + if (!tmpLogE) + { + res = -2; + return 0; + } + tmpLogE->Length = logE->Length; + tmpLogE->TableId = logE->TableId; + tmpLogE->TriggerEvent = logE->TriggerEvent; + tmpLogE->FragId = 0; + for (i = 0; i < len - 3; i++) + tmpLogE->Data[i] = logE->Data[i-1]; + *alloc_flag= true; + } logE->TableId= ntohl(logE->TableId); logE->TriggerEvent= ntohl(logE->TriggerEvent); @@ -960,6 +984,7 @@ RestoreLogIterator::getNextLogEntry(int & res) { AttributeHeader * ah = (AttributeHeader *)&logE->Data[0]; AttributeHeader *end = (AttributeHeader *)&logE->Data[len - 2]; AttributeS * attr; + m_logEntry.m_frag_id = ntohl(logE->FragId); while(ah < end){ attr= m_logEntry.add_attr(); if(attr == NULL) { diff --git a/storage/ndb/tools/restore/Restore.hpp b/storage/ndb/tools/restore/Restore.hpp index 2c821c998bc..f8035662fd6 100644 --- a/storage/ndb/tools/restore/Restore.hpp +++ b/storage/ndb/tools/restore/Restore.hpp @@ -225,6 +225,8 @@ public: TableS& operator=(TableS& org) ; }; // TableS; +class RestoreLogIterator; + class BackupFile { protected: FILE * m_file; @@ -320,7 +322,7 @@ public: ~RestoreDataIterator() {}; // Read data file fragment header - bool readFragmentHeader(int & res); + bool readFragmentHeader(int & res, Uint32 *fragmentId); bool validateFragmentFooter(); const TupleS *getNextTuple(int & res); @@ -333,8 +335,9 @@ public: LE_DELETE, LE_UPDATE }; + Uint32 m_frag_id; EntryType m_type; - TableS * m_table; + TableS * m_table; Vector m_values; Vector m_values_e; AttributeS *add_attr() { @@ -378,7 +381,7 @@ public: RestoreLogIterator(const RestoreMetaData &); virtual ~RestoreLogIterator() {}; - const LogEntry * getNextLogEntry(int & res); + const LogEntry * getNextLogEntry(int & res, bool *alloc_flag); }; NdbOut& operator<<(NdbOut& ndbout, const TableS&); diff --git a/storage/ndb/tools/restore/consumer.hpp b/storage/ndb/tools/restore/consumer.hpp index f40492a76a1..5a09bf54270 100644 --- a/storage/ndb/tools/restore/consumer.hpp +++ b/storage/ndb/tools/restore/consumer.hpp @@ -18,6 +18,7 @@ #define CONSUMER_HPP #include "Restore.hpp" +#include "ndb_nodegroup_map.h" #include "../../../../sql/ha_ndbcluster_tables.h" extern const char *Ndb_apply_table; @@ -29,13 +30,15 @@ public: virtual bool object(Uint32 tableType, const void*) { return true;} virtual bool table(const TableS &){return true;} virtual bool endOfTables() { return true; } - virtual void tuple(const TupleS &){} + virtual void tuple(const TupleS &, Uint32 fragId){} virtual void tuple_free(){} virtual void endOfTuples(){} virtual void logEntry(const LogEntry &){} virtual void endOfLogEntrys(){} virtual bool finalize_table(const TableS &){return true;} virtual bool update_apply_status(const RestoreMetaData &metaData){return true;} + NODE_GROUP_MAP *m_nodegroup_map; + uint m_nodegroup_map_len; }; #endif diff --git a/storage/ndb/tools/restore/consumer_printer.cpp b/storage/ndb/tools/restore/consumer_printer.cpp index 36b2bf29a64..b4faea6b56f 100644 --- a/storage/ndb/tools/restore/consumer_printer.cpp +++ b/storage/ndb/tools/restore/consumer_printer.cpp @@ -28,7 +28,7 @@ BackupPrinter::table(const TableS & tab) } void -BackupPrinter::tuple(const TupleS & tup) +BackupPrinter::tuple(const TupleS & tup, Uint32 fragId) { m_dataCount++; if (m_print || m_print_data) diff --git a/storage/ndb/tools/restore/consumer_printer.hpp b/storage/ndb/tools/restore/consumer_printer.hpp index 2433a8511aa..ead950213ec 100644 --- a/storage/ndb/tools/restore/consumer_printer.hpp +++ b/storage/ndb/tools/restore/consumer_printer.hpp @@ -23,8 +23,12 @@ class BackupPrinter : public BackupConsumer { NdbOut & m_ndbout; public: - BackupPrinter(NdbOut & out = ndbout) : m_ndbout(out) + BackupPrinter(NODE_GROUP_MAP *ng_map, + uint ng_map_len, + NdbOut & out = ndbout) : m_ndbout(out) { + m_nodegroup_map = ng_map; + m_nodegroup_map_len= ng_map_len; m_print = false; m_print_log = false; m_print_data = false; @@ -37,7 +41,7 @@ public: #ifdef USE_MYSQL virtual bool table(const TableS &, MYSQL* mysqlp); #endif - virtual void tuple(const TupleS &); + virtual void tuple(const TupleS &, Uint32 fragId); virtual void logEntry(const LogEntry &); virtual void endOfTuples() {}; virtual void endOfLogEntrys(); diff --git a/storage/ndb/tools/restore/consumer_restore.cpp b/storage/ndb/tools/restore/consumer_restore.cpp index 2fc7b193199..0d177366a57 100644 --- a/storage/ndb/tools/restore/consumer_restore.cpp +++ b/storage/ndb/tools/restore/consumer_restore.cpp @@ -16,6 +16,7 @@ #include #include "consumer_restore.hpp" +#include #include extern my_bool opt_core; @@ -25,6 +26,8 @@ extern FilteredNdbOut info; extern FilteredNdbOut debug; static void callback(int, NdbTransaction*, void*); +static Uint32 get_part_id(const NdbDictionary::Table *table, + Uint32 hash_value); extern const char * g_connect_string; bool @@ -152,6 +155,284 @@ BackupRestore::finalize_table(const TableS & table){ return ret; } + +static bool default_nodegroups(NdbDictionary::Table *table) +{ + Uint16 *node_groups = (Uint16*)table->getFragmentData(); + Uint32 no_parts = table->getFragmentDataLen() >> 1; + Uint32 i; + + if (node_groups[0] != 0) + return false; + for (i = 1; i < no_parts; i++) + { + if (node_groups[i] != UNDEF_NODEGROUP) + return false; + } + return true; +} + + +static Uint32 get_no_fragments(Uint64 max_rows, Uint32 no_nodes) +{ + Uint32 i = 0; + Uint32 acc_row_size = 27; + Uint32 acc_fragment_size = 512*1024*1024; + Uint32 no_parts= (max_rows*acc_row_size)/acc_fragment_size + 1; + Uint32 reported_parts = no_nodes; + while (reported_parts < no_parts && ++i < 4 && + (reported_parts + no_parts) < MAX_NDB_PARTITIONS) + reported_parts+= no_nodes; + if (reported_parts < no_parts) + { + err << "Table will be restored but will not be able to handle the maximum"; + err << " amount of rows as requested" << endl; + } + return reported_parts; +} + + +static void set_default_nodegroups(NdbDictionary::Table *table) +{ + Uint32 no_parts = table->getFragmentCount(); + Uint16 node_group[MAX_NDB_PARTITIONS]; + Uint32 i; + + node_group[0] = 0; + for (i = 1; i < no_parts; i++) + { + node_group[i] = UNDEF_NODEGROUP; + } + table->setFragmentData((const void*)node_group, 2 * no_parts); +} + +Uint32 BackupRestore::map_ng(Uint32 ng) +{ + NODE_GROUP_MAP *ng_map = m_nodegroup_map; + + if (ng == UNDEF_NODEGROUP || + ng_map[ng].map_array[0] == UNDEF_NODEGROUP) + { + return ng; + } + else + { + Uint32 new_ng; + Uint32 curr_inx = ng_map[ng].curr_index; + Uint32 new_curr_inx = curr_inx + 1; + + assert(ng < MAX_NDB_PARTITIONS); + assert(curr_inx < MAX_MAPS_PER_NODE_GROUP); + assert(new_curr_inx < MAX_MAPS_PER_NODE_GROUP); + + if (new_curr_inx >= MAX_MAPS_PER_NODE_GROUP) + new_curr_inx = 0; + else if (ng_map[ng].map_array[new_curr_inx] == UNDEF_NODEGROUP) + new_curr_inx = 0; + new_ng = ng_map[ng].map_array[curr_inx]; + ng_map[ng].curr_index = new_curr_inx; + return new_ng; + } +} + + +bool BackupRestore::map_nodegroups(Uint16 *ng_array, Uint32 no_parts) +{ + Uint32 i; + bool mapped = FALSE; + DBUG_ENTER("map_nodegroups"); + + assert(no_parts < MAX_NDB_PARTITIONS); + for (i = 0; i < no_parts; i++) + { + Uint32 ng; + ng = map_ng((Uint32)ng_array[i]); + if (ng != ng_array[i]) + mapped = TRUE; + ng_array[i] = ng; + } + DBUG_RETURN(mapped); +} + + +static void copy_byte(const char **data, char **new_data, uint *len) +{ + **new_data = **data; + (*data)++; + (*new_data)++; + (*len)++; +} + + +bool BackupRestore::search_replace(char *search_str, char **new_data, + const char **data, const char *end_data, + uint *new_data_len) +{ + uint search_str_len = strlen(search_str); + uint inx = 0; + bool in_delimiters = FALSE; + bool escape_char = FALSE; + char start_delimiter = 0; + DBUG_ENTER("search_replace"); + + do + { + char c = **data; + copy_byte(data, new_data, new_data_len); + if (escape_char) + { + escape_char = FALSE; + } + else if (in_delimiters) + { + if (c == start_delimiter) + in_delimiters = FALSE; + } + else if (c == '\'' || c == '\"') + { + in_delimiters = TRUE; + start_delimiter = c; + } + else if (c == '\\') + { + escape_char = TRUE; + } + else if (c == search_str[inx]) + { + inx++; + if (inx == search_str_len) + { + bool found = FALSE; + uint number = 0; + while (*data != end_data) + { + if (isdigit(**data)) + { + found = TRUE; + number = (10 * number) + (**data); + if (number > MAX_NDB_NODES) + break; + } + else if (found) + { + /* + After long and tedious preparations we have actually found + a node group identifier to convert. We'll use the mapping + table created for node groups and then insert the new number + instead of the old number. + */ + uint temp = map_ng(number); + int no_digits = 0; + char digits[10]; + while (temp != 0) + { + digits[no_digits] = temp % 10; + no_digits++; + temp/=10; + } + for (no_digits--; no_digits >= 0; no_digits--) + { + **new_data = digits[no_digits]; + *new_data_len+=1; + } + DBUG_RETURN(FALSE); + } + else + break; + (*data)++; + } + DBUG_RETURN(TRUE); + } + } + else + inx = 0; + } while (*data < end_data); + DBUG_RETURN(FALSE); +} + +bool BackupRestore::map_in_frm(char *new_data, const char *data, + uint data_len, uint *new_data_len) +{ + const char *end_data= data + data_len; + const char *end_part_data; + const char *part_data; + char *extra_ptr; + uint start_key_definition_len = uint2korr(data + 6); + uint key_definition_len = uint4korr(data + 47); + uint part_info_len; + DBUG_ENTER("map_in_frm"); + + if (data_len < 4096) goto error; + extra_ptr = (char*)data + start_key_definition_len + key_definition_len; + if ((int)data_len < ((extra_ptr - data) + 2)) goto error; + extra_ptr = extra_ptr + 2 + uint2korr(extra_ptr); + if ((int)data_len < ((extra_ptr - data) + 2)) goto error; + extra_ptr = extra_ptr + 2 + uint2korr(extra_ptr); + if ((int)data_len < ((extra_ptr - data) + 4)) goto error; + part_info_len = uint4korr(extra_ptr); + part_data = extra_ptr + 4; + if ((int)data_len < ((part_data + part_info_len) - data)) goto error; + + do + { + copy_byte(&data, &new_data, new_data_len); + } while (data < part_data); + end_part_data = part_data + part_info_len; + do + { + if (search_replace((char*)" NODEGROUP = ", &new_data, &data, + end_part_data, new_data_len)) + goto error; + } while (data != end_part_data); + do + { + copy_byte(&data, &new_data, new_data_len); + } while (data < end_data); + DBUG_RETURN(FALSE); +error: + DBUG_RETURN(TRUE); +} + + +bool BackupRestore::translate_frm(NdbDictionary::Table *table) +{ + const void *pack_data, *data, *new_pack_data; + char *new_data; + uint data_len, pack_len, new_data_len, new_pack_len; + uint no_parts, extra_growth; + DBUG_ENTER("translate_frm"); + + pack_data = table->getFrmData(); + no_parts = table->getFragmentCount(); + /* + Add max 4 characters per partition to handle worst case + of mapping from single digit to 5-digit number. + Fairly future-proof, ok up to 99999 node groups. + */ + extra_growth = no_parts * 4; + if (unpackfrm(&data, &data_len, pack_data)) + { + DBUG_RETURN(TRUE); + } + if ((new_data = my_malloc(data_len + extra_growth, MYF(0)))) + { + DBUG_RETURN(TRUE); + } + if (map_in_frm(new_data, (const char*)data, data_len, &new_data_len)) + { + my_free(new_data, MYF(0)); + DBUG_RETURN(TRUE); + } + if (packfrm((const void*)new_data, new_data_len, + &new_pack_data, &new_pack_len)) + { + my_free(new_data, MYF(0)); + DBUG_RETURN(TRUE); + } + table->setFrm(new_pack_data, new_pack_len); + DBUG_RETURN(FALSE); +} + #include bool @@ -190,7 +471,7 @@ BackupRestore::object(Uint32 type, const void * ptr) NdbDictionary::Tablespace* currptr = new NdbDictionary::Tablespace(curr); NdbDictionary::Tablespace * null = 0; m_tablespaces.set(currptr, id, null); - debug << "Retreived tablspace: " << currptr->getName() + debug << "Retreived tablespace: " << currptr->getName() << " oldid: " << id << " newid: " << currptr->getObjectId() << " " << (void*)currptr << endl; return true; @@ -348,7 +629,7 @@ BackupRestore::table(const TableS & table){ return true; const char * name = table.getTableName(); - + /** * Ignore blob tables */ @@ -372,7 +653,8 @@ BackupRestore::table(const TableS & table){ m_ndb->setSchemaName(split[1].c_str()); NdbDictionary::Dictionary* dict = m_ndb->getDictionary(); - if(m_restore_meta){ + if(m_restore_meta) + { NdbDictionary::Table copy(*table.m_dictTable); copy.setName(split[2].c_str()); @@ -385,10 +667,57 @@ BackupRestore::table(const TableS & table){ copy.setTablespace(* ts); } + if (copy.getDefaultNoPartitionsFlag()) + { + /* + Table was defined with default number of partitions. We can restore + it with whatever is the default in this cluster. + We use the max_rows parameter in calculating the default number. + */ + Uint32 no_nodes = m_cluster_connection->no_db_nodes(); + copy.setFragmentCount(get_no_fragments(copy.getMaxRows(), + no_nodes)); + set_default_nodegroups(©); + } + else + { + /* + Table was defined with specific number of partitions. It should be + restored with the same number of partitions. It will either be + restored in the same node groups as when backup was taken or by + using a node group map supplied to the ndb_restore program. + */ + Uint16 *ng_array = (Uint16*)copy.getFragmentData(); + Uint16 no_parts = copy.getFragmentCount(); + if (map_nodegroups(ng_array, no_parts)) + { + if (translate_frm(©)) + { + err << "Create table " << table.getTableName() << " failed: "; + err << "Translate frm error" << endl; + return false; + } + } + copy.setFragmentData((const void *)ng_array, no_parts << 1); + } + if (dict->createTable(copy) == -1) { err << "Create table " << table.getTableName() << " failed: " - << dict->getNdbError() << endl; + << dict->getNdbError() << endl; + if (dict->getNdbError().code == 771) + { + /* + The user on the cluster where the backup was created had specified + specific node groups for partitions. Some of these node groups + didn't exist on this cluster. We will warn the user of this and + inform him of his option. + */ + err << "The node groups defined in the table didn't exist in this"; + err << " cluster." << endl << "There is an option to use the"; + err << " the parameter ndb-nodegroup-map to define a mapping from"; + err << endl << "the old nodegroups to new nodegroups" << endl; + } return false; } info << "Successfully restored table " << table.getTableName()<< endl ; @@ -503,7 +832,7 @@ BackupRestore::endOfTables(){ return true; } -void BackupRestore::tuple(const TupleS & tup) +void BackupRestore::tuple(const TupleS & tup, Uint32 fragmentId) { if (!m_restore) return; @@ -523,6 +852,7 @@ void BackupRestore::tuple(const TupleS & tup) m_free_callback = cb->next; cb->retries = 0; + cb->fragId = fragmentId; cb->tup = tup; // must do copy! tuple_a(cb); @@ -530,6 +860,7 @@ void BackupRestore::tuple(const TupleS & tup) void BackupRestore::tuple_a(restore_callback_t *cb) { + Uint32 partition_id = cb->fragId; while (cb->retries < 10) { /** @@ -543,6 +874,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb) m_ndb->sendPollNdb(3000, 1); continue; } + err << "Cannot start transaction" << endl; exitHandler(); } // if @@ -555,6 +887,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb) { if (errorHandler(cb)) continue; + err << "Cannot get operation: " << cb->connection->getNdbError() << endl; exitHandler(); } // if @@ -562,9 +895,37 @@ void BackupRestore::tuple_a(restore_callback_t *cb) { if (errorHandler(cb)) continue; + err << "Error defining op: " << cb->connection->getNdbError() << endl; exitHandler(); } // if - + + if (table->getFragmentType() == NdbDictionary::Object::UserDefined) + { + if (table->getDefaultNoPartitionsFlag()) + { + /* + This can only happen for HASH partitioning with + user defined hash function where user hasn't + specified the number of partitions and we + have to calculate it. We use the hash value + stored in the record to calculate the partition + to use. + */ + int i = tup.getNoOfAttributes() - 1; + const AttributeData *attr_data = tup.getData(i); + Uint32 hash_value = *attr_data->u_int32_value; + op->setPartitionId(get_part_id(table, hash_value)); + } + else + { + /* + Either RANGE or LIST (with or without subparts) + OR HASH partitioning with user defined hash + function but with fixed set of partitions. + */ + op->setPartitionId(partition_id); + } + } int ret = 0; for (int j = 0; j < 2; j++) { @@ -607,6 +968,7 @@ void BackupRestore::tuple_a(restore_callback_t *cb) { if (errorHandler(cb)) continue; + err << "Error defining op: " << cb->connection->getNdbError() << endl; exitHandler(); } @@ -679,30 +1041,28 @@ bool BackupRestore::errorHandler(restore_callback_t *cb) switch(error.status) { case NdbError::Success: + err << "Success error: " << error << endl; return false; // ERROR! - break; case NdbError::TemporaryError: err << "Temporary error: " << error << endl; NdbSleep_MilliSleep(sleepTime); return true; // RETRY - break; case NdbError::UnknownResult: - err << error << endl; + err << "Unknown: " << error << endl; return false; // ERROR! - break; default: case NdbError::PermanentError: //ERROR - err << error << endl; + err << "Permanent: " << error << endl; return false; - break; } + err << "No error status" << endl; return false; } @@ -736,6 +1096,35 @@ BackupRestore::endOfTuples() tuple_free(); } +static bool use_part_id(const NdbDictionary::Table *table) +{ + if (table->getDefaultNoPartitionsFlag() && + (table->getFragmentType() == NdbDictionary::Object::UserDefined)) + return false; + else + return true; +} + +static Uint32 get_part_id(const NdbDictionary::Table *table, + Uint32 hash_value) +{ + Uint32 no_frags = table->getFragmentCount(); + + if (table->getLinearFlag()) + { + Uint32 part_id; + Uint32 mask = 1; + while (no_frags > mask) mask <<= 1; + mask--; + part_id = hash_value & mask; + if (part_id >= no_frags) + part_id = hash_value & (mask >> 1); + return part_id; + } + else + return (hash_value % no_frags); +} + void BackupRestore::logEntry(const LogEntry & tup) { @@ -781,7 +1170,19 @@ BackupRestore::logEntry(const LogEntry & tup) err << "Error defining op: " << trans->getNdbError() << endl; exitHandler(); } // if - + + if (table->getFragmentType() == NdbDictionary::Object::UserDefined) + { + if (table->getDefaultNoPartitionsFlag()) + { + const AttributeS * attr = tup[tup.size()-1]; + Uint32 hash_value = *(Uint32*)attr->Data.string_value; + op->setPartitionId(get_part_id(table, hash_value)); + } + else + op->setPartitionId(tup.m_frag_id); + } + Bitmask<4096> keys; for (Uint32 i= 0; i < tup.size(); i++) { diff --git a/storage/ndb/tools/restore/consumer_restore.hpp b/storage/ndb/tools/restore/consumer_restore.hpp index 73aabdb3acf..4389f71724e 100644 --- a/storage/ndb/tools/restore/consumer_restore.hpp +++ b/storage/ndb/tools/restore/consumer_restore.hpp @@ -19,12 +19,15 @@ #include "consumer.hpp" +bool map_nodegroups(Uint16 *ng_array, Uint32 no_parts); + struct restore_callback_t { class BackupRestore *restore; class TupleS tup; class NdbTransaction *connection; int retries; int error_code; + Uint32 fragId; restore_callback_t *next; }; @@ -32,10 +35,14 @@ struct restore_callback_t { class BackupRestore : public BackupConsumer { public: - BackupRestore(Uint32 parallelism=1) + BackupRestore(NODE_GROUP_MAP *ng_map, + uint ng_map_len, + Uint32 parallelism=1) { m_ndb = 0; m_cluster_connection = 0; + m_nodegroup_map = ng_map; + m_nodegroup_map_len = ng_map_len; m_logCount = m_dataCount = 0; m_restore = false; m_restore_meta = false; @@ -54,7 +61,7 @@ public: virtual bool object(Uint32 type, const void* ptr); virtual bool table(const TableS &); virtual bool endOfTables(); - virtual void tuple(const TupleS &); + virtual void tuple(const TupleS &, Uint32 fragId); virtual void tuple_free(); virtual void tuple_a(restore_callback_t *cb); virtual void cback(int result, restore_callback_t *cb); @@ -66,6 +73,15 @@ public: virtual bool finalize_table(const TableS &); virtual bool update_apply_status(const RestoreMetaData &metaData); void connectToMysql(); + bool map_in_frm(char *new_data, const char *data, + uint data_len, uint *new_data_len); + bool search_replace(char *search_str, char **new_data, + const char **data, const char *end_data, + uint *new_data_len); + bool map_nodegroups(Uint16 *ng_array, Uint32 no_parts); + Uint32 map_ng(Uint32 ng); + bool translate_frm(NdbDictionary::Table *table); + Ndb * m_ndb; Ndb_cluster_connection * m_cluster_connection; bool m_restore; diff --git a/storage/ndb/tools/restore/consumer_restorem.cpp b/storage/ndb/tools/restore/consumer_restorem.cpp index 56179a60ab0..fee64891f34 100644 --- a/storage/ndb/tools/restore/consumer_restorem.cpp +++ b/storage/ndb/tools/restore/consumer_restorem.cpp @@ -211,7 +211,7 @@ BackupRestore::table(const TableS & table){ return true; } -void BackupRestore::tuple(const TupleS & tup) +void BackupRestore::tuple(const TupleS & tup, Uint32 fragId) { if (!m_restore) { @@ -225,6 +225,7 @@ void BackupRestore::tuple(const TupleS & tup) { m_free_callback = cb->next; cb->retries = 0; + cb->fragId = fragId; cb->tup = &tup; tuple_a(cb); } diff --git a/storage/ndb/tools/restore/ndb_nodegroup_map.h b/storage/ndb/tools/restore/ndb_nodegroup_map.h new file mode 100644 index 00000000000..1bc658c44e6 --- /dev/null +++ b/storage/ndb/tools/restore/ndb_nodegroup_map.h @@ -0,0 +1,35 @@ +/* Copyright (C) 2003 MySQL AB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ + +/** + * @file ndb_nodegroup_map.h + * + * Declarations of data types for node group map + */ + +#ifndef NDB_NODEGROUP_MAP_H +#define NDB_NODEGROUP_MAP_H + +#define MAX_MAPS_PER_NODE_GROUP 4 +#define MAX_NODE_GROUP_MAPS 128 +typedef struct node_group_map +{ + uint no_maps; + uint curr_index; + uint16 map_array[MAX_MAPS_PER_NODE_GROUP]; +} NODE_GROUP_MAP; + +#endif diff --git a/storage/ndb/tools/restore/restore_main.cpp b/storage/ndb/tools/restore/restore_main.cpp index 4ca26c7c683..11687860bf5 100644 --- a/storage/ndb/tools/restore/restore_main.cpp +++ b/storage/ndb/tools/restore/restore_main.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -37,6 +38,11 @@ static Vector g_consumers; static const char* ga_backupPath = "." DIR_SEPARATOR; +static const char *opt_nodegroup_map_str= 0; +static unsigned opt_nodegroup_map_len= 0; +static NODE_GROUP_MAP opt_nodegroup_map[MAX_NODE_GROUP_MAPS]; +#define OPT_NDB_NODEGROUP_MAP 'z' + NDB_STD_OPTS_VARS; /** @@ -107,9 +113,124 @@ static struct my_option my_long_options[] = "Experimental. Do not ignore system table during restore.", (gptr*) &ga_dont_ignore_systab_0, (gptr*) &ga_dont_ignore_systab_0, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0 }, + { "ndb-nodegroup-map", OPT_NDB_NODEGROUP_MAP, + "Nodegroup map for ndbcluster. Syntax: list of (source_ng, dest_ng)", + (gptr*) &opt_nodegroup_map_str, + (gptr*) &opt_nodegroup_map_str, + 0, + GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, { 0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; + +static char* analyse_one_map(char *map_str, uint16 *source, uint16 *dest) +{ + char *end_ptr; + int number; + DBUG_ENTER("analyse_one_map"); + /* + Search for pattern ( source_ng , dest_ng ) + */ + + while (isspace(*map_str)) map_str++; + + if (*map_str != '(') + { + DBUG_RETURN(NULL); + } + map_str++; + + while (isspace(*map_str)) map_str++; + + number= strtol(map_str, &end_ptr, 10); + if (!end_ptr || number < 0 || number >= MAX_NODE_GROUP_MAPS) + { + DBUG_RETURN(NULL); + } + *source= (uint16)number; + map_str= end_ptr; + + while (isspace(*map_str)) map_str++; + + if (*map_str != ',') + { + DBUG_RETURN(NULL); + } + map_str++; + + number= strtol(map_str, &end_ptr, 10); + if (!end_ptr || number < 0 || number >= UNDEF_NODEGROUP) + { + DBUG_RETURN(NULL); + } + *dest= (uint16)number; + map_str= end_ptr; + + if (*map_str != ')') + { + DBUG_RETURN(NULL); + } + map_str++; + + while (isspace(*map_str)) map_str++; + DBUG_RETURN(map_str); +} + +static bool insert_ng_map(NODE_GROUP_MAP *ng_map, + uint16 source_ng, uint16 dest_ng) +{ + uint index= source_ng; + uint ng_index= ng_map[index].no_maps; + + opt_nodegroup_map_len++; + if (ng_index >= MAX_MAPS_PER_NODE_GROUP) + return true; + ng_map[index].no_maps++; + ng_map[index].map_array[ng_index]= dest_ng; + return false; +} + +static void init_nodegroup_map() +{ + uint i,j; + NODE_GROUP_MAP *ng_map = &opt_nodegroup_map[0]; + + for (i = 0; i < MAX_NODE_GROUP_MAPS; i++) + { + ng_map[i].no_maps= 0; + for (j= 0; j < MAX_MAPS_PER_NODE_GROUP; j++) + ng_map[i].map_array[j]= UNDEF_NODEGROUP; + } +} + +static bool analyse_nodegroup_map(const char *ng_map_str, + NODE_GROUP_MAP *ng_map) +{ + uint16 source_ng, dest_ng; + char *local_str= (char*)ng_map_str; + DBUG_ENTER("analyse_nodegroup_map"); + + do + { + if (!local_str) + { + DBUG_RETURN(TRUE); + } + local_str= analyse_one_map(local_str, &source_ng, &dest_ng); + if (!local_str) + { + DBUG_RETURN(TRUE); + } + if (insert_ng_map(ng_map, source_ng, dest_ng)) + { + DBUG_RETURN(TRUE); + } + if (!(*local_str)) + break; + } while (TRUE); + DBUG_RETURN(FALSE); +} + static void short_usage_sub(void) { printf("Usage: %s [OPTIONS] []\n", my_progname); @@ -136,6 +257,7 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), printf("Error in --nodeid,-n setting, see --help\n"); exit(NDBT_ProgramExit(NDBT_WRONGARGS)); } + info << "Nodeid = " << ga_nodeId << endl; break; case 'b': if (ga_backupId == 0) @@ -143,6 +265,20 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), printf("Error in --backupid,-b setting, see --help\n"); exit(NDBT_ProgramExit(NDBT_WRONGARGS)); } + info << "Backup Id = " << ga_backupId << endl; + break; + case OPT_NDB_NODEGROUP_MAP: + /* + This option is used to set a map from nodegroup in original cluster + to nodegroup in new cluster. + */ + opt_nodegroup_map_len= 0; + info << "Analyse node group map" << endl; + if (analyse_nodegroup_map(opt_nodegroup_map_str, + &opt_nodegroup_map[0])) + { + exit(NDBT_ProgramExit(NDBT_WRONGARGS)); + } break; } return 0; @@ -150,18 +286,55 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)), bool readArguments(int *pargc, char*** pargv) { + Uint32 i; + debug << "Load defaults" << endl; const char *load_default_groups[]= { "mysql_cluster","ndb_restore",0 }; + + init_nodegroup_map(); load_defaults("my",load_default_groups,pargc,pargv); + debug << "handle_options" << endl; if (handle_options(pargc, pargv, my_long_options, get_one_option)) { exit(NDBT_ProgramExit(NDBT_WRONGARGS)); } + for (i = 0; i < MAX_NODE_GROUP_MAPS; i++) + opt_nodegroup_map[i].curr_index = 0; - BackupPrinter* printer = new BackupPrinter(); +#if 0 + /* + Test code written t{ +o verify nodegroup mapping + */ + printf("Handled options successfully\n"); + Uint16 map_ng[16]; + Uint32 j; + for (j = 0; j < 4; j++) + { + for (i = 0; i < 4 ; i++) + map_ng[i] = i; + map_nodegroups(&map_ng[0], (Uint32)4); + for (i = 0; i < 4 ; i++) + printf("NG %u mapped to %u \n", i, map_ng[i]); + } + for (j = 0; j < 4; j++) + { + for (i = 0; i < 8 ; i++) + map_ng[i] = i >> 1; + map_nodegroups(&map_ng[0], (Uint32)8); + for (i = 0; i < 8 ; i++) + printf("NG %u mapped to %u \n", i >> 1, map_ng[i]); + } + exit(NDBT_ProgramExit(NDBT_WRONGARGS)); +#endif + + BackupPrinter* printer = new BackupPrinter(opt_nodegroup_map, + opt_nodegroup_map_len); if (printer == NULL) return false; - BackupRestore* restore = new BackupRestore(ga_nParallelism); + BackupRestore* restore = new BackupRestore(opt_nodegroup_map, + opt_nodegroup_map_len, + ga_nParallelism); if (restore == NULL) { delete printer; @@ -225,7 +398,7 @@ readArguments(int *pargc, char*** pargv) { ga_backupPath = *pargv[0]; } - + info << "backup path = " << ga_backupPath << endl; return true; } @@ -271,6 +444,7 @@ main(int argc, char** argv) { NDB_INIT(argv[0]); + debug << "Start readArguments" << endl; if (!readArguments(&argc, &argv)) { exitHandler(NDBT_FAILED); @@ -281,10 +455,11 @@ main(int argc, char** argv) /** * we must always load meta data, even if we will only print it to stdout */ + debug << "Start restoring meta data" << endl; RestoreMetaData metaData(ga_backupPath, ga_nodeId, ga_backupId); if (!metaData.readHeader()) { - ndbout << "Failed to read " << metaData.getFilename() << endl << endl; + err << "Failed to read " << metaData.getFilename() << endl << endl; exitHandler(NDBT_FAILED); } @@ -292,57 +467,58 @@ main(int argc, char** argv) const Uint32 version = tmp.NdbVersion; char buf[NDB_VERSION_STRING_BUF_SZ]; - ndbout << "Ndb version in backup files: " + info << "Ndb version in backup files: " << getVersionString(version, 0, buf, sizeof(buf)) << endl; /** * check wheater we can restore the backup (right version). */ + debug << "Load content" << endl; int res = metaData.loadContent(); if (res == 0) { - ndbout_c("Restore: Failed to load content"); + err << "Restore: Failed to load content" << endl; exitHandler(NDBT_FAILED); } - + debug << "Get no of Tables" << endl; if (metaData.getNoOfTables() == 0) { - ndbout_c("Restore: The backup contains no tables "); + err << "The backup contains no tables" << endl; exitHandler(NDBT_FAILED); } - + debug << "Validate Footer" << endl; if (!metaData.validateFooter()) { - ndbout_c("Restore: Failed to validate footer."); + err << "Restore: Failed to validate footer." << endl; exitHandler(NDBT_FAILED); } - + debug << "Init Backup objects" << endl; Uint32 i; for(i= 0; i < g_consumers.size(); i++) { if (!g_consumers[i]->init()) { clearConsumers(); + err << "Failed to initialize consumers" << endl; exitHandler(NDBT_FAILED); } } - + debug << "Restore objects (tablespaces, ..)" << endl; for(i = 0; iobject(metaData.getObjType(i), metaData.getObjPtr(i))) { - ndbout_c("Restore: Failed to restore table: %s. " - "Exiting...", - metaData[i]->getTableName()); + err << "Restore: Failed to restore table: "; + err << metaData[i]->getTableName() << " ... Exiting " << endl; exitHandler(NDBT_FAILED); } } - + debug << "Restoring tables" << endl; for(i = 0; igetTableName())) @@ -350,21 +526,20 @@ main(int argc, char** argv) for(Uint32 j= 0; j < g_consumers.size(); j++) if (!g_consumers[j]->table(* metaData[i])) { - ndbout_c("Restore: Failed to restore table: %s. " - "Exiting...", - metaData[i]->getTableName()); + err << "Restore: Failed to restore table: "; + err << metaData[i]->getTableName() << " ... Exiting " << endl; exitHandler(NDBT_FAILED); } } } - + debug << "Close tables" << endl; for(i= 0; i < g_consumers.size(); i++) if (!g_consumers[i]->endOfTables()) { - ndbout_c("Restore: Failed while closing tables"); + err << "Restore: Failed while closing tables" << endl; exitHandler(NDBT_FAILED); } - + debug << "Iterate over data" << endl; if (ga_restore || ga_print) { if(_restore_data || _print_data) @@ -374,30 +549,30 @@ main(int argc, char** argv) // Read data file header if (!dataIter.readHeader()) { - ndbout << "Failed to read header of data file. Exiting..." ; + err << "Failed to read header of data file. Exiting..." << endl; exitHandler(NDBT_FAILED); } - - while (dataIter.readFragmentHeader(res= 0)) + Uint32 fragmentId; + while (dataIter.readFragmentHeader(res= 0, &fragmentId)) { const TupleS* tuple; while ((tuple = dataIter.getNextTuple(res= 1)) != 0) { if (checkSysTable(tuple->getTable()->getTableName())) for(Uint32 i= 0; i < g_consumers.size(); i++) - g_consumers[i]->tuple(* tuple); + g_consumers[i]->tuple(* tuple, fragmentId); } // while (tuple != NULL); if (res < 0) { - ndbout_c("Restore: An error occured while restoring data. " - "Exiting..."); + err <<" Restore: An error occured while restoring data. Exiting..."; + err << endl; exitHandler(NDBT_FAILED); } if (!dataIter.validateFragmentFooter()) { - ndbout_c("Restore: Error validating fragment footer. " - "Exiting..."); + err << "Restore: Error validating fragment footer. "; + err << "Exiting..." << endl; exitHandler(NDBT_FAILED); } } // while (dataIter.readFragmentHeader(res)) @@ -405,7 +580,7 @@ main(int argc, char** argv) if (res < 0) { err << "Restore: An error occured while restoring data. Exiting... " - << "res=" << res << endl; + << "res= " << res << endl; exitHandler(NDBT_FAILED); } @@ -426,11 +601,14 @@ main(int argc, char** argv) } const LogEntry * logEntry = 0; - while ((logEntry = logIter.getNextLogEntry(res= 0)) != 0) + bool alloc_flag = false; + while ((logEntry = logIter.getNextLogEntry(res= 0, &alloc_flag)) != 0) { if (checkSysTable(logEntry->m_table->getTableName())) for(Uint32 i= 0; i < g_consumers.size(); i++) g_consumers[i]->logEntry(* logEntry); + if (alloc_flag) + NdbMem_Free((void*)logEntry); } if (res < 0) { @@ -452,9 +630,8 @@ main(int argc, char** argv) for(Uint32 j= 0; j < g_consumers.size(); j++) if (!g_consumers[j]->finalize_table(* metaData[i])) { - ndbout_c("Restore: Failed to finalize restore table: %s. " - "Exiting...", - metaData[i]->getTableName()); + err << "Restore: Failed to finalize restore table: %s. "; + err << "Exiting... " << metaData[i]->getTableName() << endl; exitHandler(NDBT_FAILED); } } @@ -466,7 +643,7 @@ main(int argc, char** argv) for (i= 0; i < g_consumers.size(); i++) if (!g_consumers[i]->update_apply_status(metaData)) { - ndbout_c("Restore: Failed to restore epoch"); + err << "Restore: Failed to restore epoch" << endl; return -1; } } diff --git a/strings/ctype-utf8.c b/strings/ctype-utf8.c index 839eabe59ed..62f405f6049 100644 --- a/strings/ctype-utf8.c +++ b/strings/ctype-utf8.c @@ -4048,8 +4048,8 @@ static MY_CHARSET_HANDLER my_charset_filename_handler= CHARSET_INFO my_charset_filename= { - 33,0,0, /* number */ - MY_CS_COMPILED|MY_CS_PRIMARY|MY_CS_STRNXFRM|MY_CS_UNICODE, /* state */ + 17,0,0, /* number */ + MY_CS_COMPILED|MY_CS_PRIMARY|MY_CS_STRNXFRM|MY_CS_UNICODE|MY_CS_HIDDEN, "filename", /* cs name */ "filename", /* name */ "", /* comment */