diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b836ee68d09..9316f0f3732 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -510,9 +510,9 @@ mini-benchmark: - | mariadb --skip-column-names -e "SELECT @@version, @@version_comment" | tee /tmp/version grep $MARIADB_MAJOR_VERSION /tmp/version || echo "MariaDB didn't install properly" - - yum install -y sysbench procps-ng perf util-linux || yum install -y https://kojipkgs.fedoraproject.org//packages/luajit/2.0.4/3.el7/x86_64/luajit-2.0.4-3.el7.x86_64.rpm https://kojipkgs.fedoraproject.org//packages/sysbench/1.0.17/2.el7/x86_64/sysbench-1.0.17-2.el7.x86_64.rpm https://kojipkgs.fedoraproject.org//packages/ck/0.5.2/2.el7/x86_64/ck-0.5.2-2.el7.x86_64.rpm + - yum install -y sysbench procps-ng perf flamegraph flamegraph-stackcollapse-perf util-linux dnf-utils - /usr/share/mysql/mini-benchmark - - cp -av */sysbench-run-*.log */metrics.txt .. # Move files one level down so they can be saved as artifacts + - cp -av */sysbench-run-*.log */metrics.txt . # Move files one level down so they can be saved as artifacts artifacts: when: always paths: diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 185672648f1..26916db608f 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -615,7 +615,7 @@ void replace_strings_append(struct st_replace *rep, DYNAMIC_STRING* ds, const char *from); ATTRIBUTE_NORETURN -static void cleanup_and_exit(int exit_code); +static void cleanup_and_exit(int exit_code, bool called_from_die); ATTRIBUTE_NORETURN static void really_die(const char *msg); @@ -932,6 +932,7 @@ pthread_attr_t cn_thd_attrib; pthread_handler_t connection_thread(void *arg) { struct st_connection *cn= (struct st_connection*)arg; + DBUG_ENTER("connection_thread"); mysql_thread_init(); while (cn->command != EMB_END_CONNECTION) @@ -943,6 +944,7 @@ pthread_handler_t connection_thread(void *arg) pthread_cond_wait(&cn->query_cond, &cn->query_mutex); pthread_mutex_unlock(&cn->query_mutex); } + DBUG_PRINT("info", ("executing command: %d", cn->command)); switch (cn->command) { case EMB_END_CONNECTION: @@ -963,24 +965,26 @@ pthread_handler_t connection_thread(void *arg) break; case EMB_CLOSE_STMT: cn->result= mysql_stmt_close(cn->stmt); + cn->stmt= 0; break; default: DBUG_ASSERT(0); } - cn->command= 0; pthread_mutex_lock(&cn->result_mutex); cn->query_done= 1; + cn->command= 0; pthread_cond_signal(&cn->result_cond); pthread_mutex_unlock(&cn->result_mutex); } end_thread: - cn->query_done= 1; + DBUG_ASSERT(cn->stmt == 0); mysql_close(cn->mysql); cn->mysql= 0; + cn->query_done= 1; mysql_thread_end(); pthread_exit(0); - return 0; + DBUG_RETURN(0); } static void wait_query_thread_done(struct st_connection *con) @@ -998,12 +1002,16 @@ static void wait_query_thread_done(struct st_connection *con) static void signal_connection_thd(struct st_connection *cn, int command) { + DBUG_ENTER("signal_connection_thd"); + DBUG_PRINT("enter", ("command: %d", command)); + DBUG_ASSERT(cn->has_thread); cn->query_done= 0; - cn->command= command; pthread_mutex_lock(&cn->query_mutex); + cn->command= command; pthread_cond_signal(&cn->query_cond); pthread_mutex_unlock(&cn->query_mutex); + DBUG_VOID_RETURN; } @@ -1068,27 +1076,37 @@ static int do_stmt_execute(struct st_connection *cn) static int do_stmt_close(struct st_connection *cn) { DBUG_ENTER("do_stmt_close"); - /* The cn->stmt is already set. */ if (!cn->has_thread) - DBUG_RETURN(mysql_stmt_close(cn->stmt)); + { + /* The cn->stmt is already set. */ + int res= mysql_stmt_close(cn->stmt); + cn->stmt= 0; + DBUG_RETURN(res); + } + wait_query_thread_done(cn); signal_connection_thd(cn, EMB_CLOSE_STMT); wait_query_thread_done(cn); + DBUG_ASSERT(cn->stmt == 0); DBUG_RETURN(cn->result); } static void emb_close_connection(struct st_connection *cn) { + DBUG_ENTER("emb_close_connection"); if (!cn->has_thread) - return; + DBUG_VOID_RETURN; wait_query_thread_done(cn); signal_connection_thd(cn, EMB_END_CONNECTION); pthread_join(cn->tid, NULL); cn->has_thread= FALSE; + DBUG_ASSERT(cn->mysql == 0); + DBUG_ASSERT(cn->stmt == 0); pthread_mutex_destroy(&cn->query_mutex); pthread_cond_destroy(&cn->query_cond); pthread_mutex_destroy(&cn->result_mutex); pthread_cond_destroy(&cn->result_cond); + DBUG_VOID_RETURN; } @@ -1112,7 +1130,13 @@ static void init_connection_thd(struct st_connection *cn) #define do_read_query_result(cn) mysql_read_query_result(cn->mysql) #define do_stmt_prepare(cn, q, q_len) mysql_stmt_prepare(cn->stmt, q, (ulong)q_len) #define do_stmt_execute(cn) mysql_stmt_execute(cn->stmt) -#define do_stmt_close(cn) mysql_stmt_close(cn->stmt) + +static int do_stmt_close(struct st_connection *cn) +{ + int res= mysql_stmt_close(cn->stmt); + cn->stmt= 0; + return res; +} #endif /*EMBEDDED_LIBRARY*/ @@ -1440,7 +1464,6 @@ void close_statements() { if (con->stmt) do_stmt_close(con); - con->stmt= 0; } DBUG_VOID_RETURN; } @@ -1512,7 +1535,8 @@ void ha_pre_shutdown(); #endif -ATTRIBUTE_NORETURN static void cleanup_and_exit(int exit_code) +ATTRIBUTE_NORETURN static void cleanup_and_exit(int exit_code, + bool called_from_die) { #ifdef EMBEDDED_LIBRARY if (server_initialized) @@ -1525,16 +1549,6 @@ ATTRIBUTE_NORETURN static void cleanup_and_exit(int exit_code) if (server_initialized) mysql_server_end(); - /* - mysqltest is fundamentally written in a way that makes impossible - to free all memory before exit (consider memory allocated - for frame local DYNAMIC_STRING's and die() invoked down the stack. - - We close stderr here to stop unavoidable safemalloc reports - from polluting the output. - */ - fclose(stderr); - my_end(my_end_arg); if (!silent) { @@ -1554,6 +1568,11 @@ ATTRIBUTE_NORETURN static void cleanup_and_exit(int exit_code) } } + /* + Report memory leaks, if not called from 'die()', as die() will not release + all memory. + */ + sf_leaking_memory= called_from_die; exit(exit_code); } @@ -1620,7 +1639,7 @@ static void really_die(const char *msg) second time, just exit */ if (dying) - cleanup_and_exit(1); + cleanup_and_exit(1, 1); dying= 1; log_file.show_tail(opt_tail_lines); @@ -1632,7 +1651,7 @@ static void really_die(const char *msg) if (cur_con && !cur_con->pending) show_warnings_before_error(cur_con->mysql); - cleanup_and_exit(1); + cleanup_and_exit(1, 1); } void report_or_die(const char *fmt, ...) @@ -1686,7 +1705,7 @@ void abort_not_supported_test(const char *fmt, ...) } va_end(args); - cleanup_and_exit(62); + cleanup_and_exit(62, 0); } @@ -2233,14 +2252,14 @@ int dyn_string_cmp(DYNAMIC_STRING* ds, const char *fname) check_result RETURN VALUES - error - the function will not return - + 0 ok + 1 error */ -void check_result() +int check_result() { const char *mess= 0; - + int error= 1; DBUG_ENTER("check_result"); DBUG_ASSERT(result_file_name); DBUG_PRINT("enter", ("result_file_name: %s", result_file_name)); @@ -2248,7 +2267,10 @@ void check_result() switch (compare_files(log_file.file_name(), result_file_name)) { case RESULT_OK: if (!error_count) + { + error= 0; break; /* ok */ + } mess= "Got errors while running test"; /* Fallthrough */ case RESULT_LENGTH_MISMATCH: @@ -2287,14 +2309,13 @@ void check_result() log_file.file_name(), reject_file, errno); show_diff(NULL, result_file_name, reject_file); - die("%s", mess); + fprintf(stderr, "%s", mess); break; } default: /* impossible */ die("Unknown error code from dyn_string_cmp()"); } - - DBUG_VOID_RETURN; + DBUG_RETURN(error); } @@ -5693,7 +5714,6 @@ void do_close_connection(struct st_command *command) #endif /*!EMBEDDED_LIBRARY*/ if (con->stmt) do_stmt_close(con); - con->stmt= 0; #ifdef EMBEDDED_LIBRARY /* As query could be still executed in a separate thread @@ -7375,17 +7395,17 @@ get_one_option(const struct my_option *opt, const char *argument, const char *) break; case 'V': print_version(); - exit(0); + cleanup_and_exit(0,0); case OPT_MYSQL_PROTOCOL: #ifndef EMBEDDED_LIBRARY if ((opt_protocol= find_type_with_warning(argument, &sql_protocol_typelib, opt->name)) <= 0) - exit(1); + cleanup_and_exit(1,0); #endif break; case '?': usage(); - exit(0); + cleanup_and_exit(0,0); } return 0; } @@ -7397,12 +7417,12 @@ int parse_args(int argc, char **argv) default_argv= argv; if ((handle_options(&argc, &argv, my_long_options, get_one_option))) - exit(1); + cleanup_and_exit(1, 0); if (argc > 1) { usage(); - exit(1); + cleanup_and_exit(1, 0); } if (argc == 1) opt_db= *argv; @@ -8523,7 +8543,7 @@ void run_query_stmt(struct st_connection *cn, struct st_command *command, my_bool ds_res_1st_execution_init = FALSE; my_bool compare_2nd_execution = TRUE; int query_match_ps2_re; - + MYSQL_RES *res; DBUG_ENTER("run_query_stmt"); DBUG_PRINT("query", ("'%-.60s'", query)); DBUG_PRINT("info", @@ -8729,10 +8749,13 @@ void run_query_stmt(struct st_connection *cn, struct st_command *command, The --enable_prepare_warnings command can be used to change this so that warnings from both the prepare and execute phase are shown. */ - if ((mysql_stmt_result_metadata(stmt) != NULL) && - !disable_warnings && - !prepare_warnings_enabled) - dynstr_set(&ds_prepare_warnings, NULL); + if ((res= mysql_stmt_result_metadata(stmt))) + { + if (!disable_warnings && + !prepare_warnings_enabled) + dynstr_set(&ds_prepare_warnings, NULL); + mysql_free_result(res); + } /* Fetch info before fetching warnings, since it will be reset @@ -9860,6 +9883,7 @@ static sig_handler signal_handler(int sig) fflush(stderr); my_write_core(sig); #ifndef _WIN32 + sf_leaking_memory= 1; exit(1); // Shouldn't get here but just in case #endif } @@ -9933,12 +9957,10 @@ int main(int argc, char **argv) uint command_executed= 0, last_command_executed= 0; char save_file[FN_REFLEN]; bool empty_result= FALSE; + int error= 0; MY_INIT(argv[0]); DBUG_ENTER("main"); - /* mysqltest has no way to free all its memory correctly */ - sf_leaking_memory= 1; - save_file[0]= 0; TMPDIR[0]= 0; @@ -10631,7 +10653,7 @@ int main(int argc, char **argv) die("Test ended with parsing disabled"); /* - The whole test has been executed _successfully_. + The whole test has been executed successfully. Time to compare result or save it to record file. The entire output from test is in the log file */ @@ -10654,7 +10676,7 @@ int main(int argc, char **argv) else { /* Check that the output from test is equal to result file */ - check_result(); + error= check_result(); } } } @@ -10664,7 +10686,8 @@ int main(int argc, char **argv) if (! result_file_name || record || compare_files (log_file.file_name(), result_file_name)) { - die("The test didn't produce any output"); + fprintf(stderr, "mysqltest: The test didn't produce any output\n"); + error= 1; } else { @@ -10673,12 +10696,15 @@ int main(int argc, char **argv) } if (!command_executed && result_file_name && !empty_result) - die("No queries executed but non-empty result file found!"); + { + fprintf(stderr, "mysqltest: No queries executed but non-empty result file found!\n"); + error= 1; + } - verbose_msg("Test has succeeded!"); + if (!error) + verbose_msg("Test has succeeded!"); timer_output(); - /* Yes, if we got this far the test has succeeded! Sakila smiles */ - cleanup_and_exit(0); + cleanup_and_exit(error, 0); return 0; /* Keep compiler happy too */ } diff --git a/cmake/ssl.cmake b/cmake/ssl.cmake index 646aa37a91c..51176a84c51 100644 --- a/cmake/ssl.cmake +++ b/cmake/ssl.cmake @@ -53,7 +53,7 @@ MACRO (MYSQL_USE_BUNDLED_SSL) ${CMAKE_SOURCE_DIR}/extra/wolfssl/wolfssl ${CMAKE_SOURCE_DIR}/extra/wolfssl/wolfssl/wolfssl ) - SET(SSL_LIBRARIES wolfssl wolfcrypt) + SET(SSL_LIBRARIES wolfssl) SET(SSL_INCLUDE_DIRS ${INC_DIRS}) SET(SSL_DEFINES "-DHAVE_OPENSSL -DHAVE_WOLFSSL -DWOLFSSL_USER_SETTINGS") SET(HAVE_ERR_remove_thread_state ON CACHE INTERNAL "wolfssl doesn't have ERR_remove_thread_state") diff --git a/extra/mariabackup/CMakeLists.txt b/extra/mariabackup/CMakeLists.txt index f1f62199a09..51b65b274ee 100644 --- a/extra/mariabackup/CMakeLists.txt +++ b/extra/mariabackup/CMakeLists.txt @@ -31,6 +31,7 @@ ENDIF() INCLUDE_DIRECTORIES( ${CMAKE_SOURCE_DIR}/include ${CMAKE_SOURCE_DIR}/sql + ${CMAKE_SOURCE_DIR}/storage/maria ${CMAKE_CURRENT_SOURCE_DIR}/quicklz ${CMAKE_CURRENT_SOURCE_DIR} ) @@ -71,8 +72,12 @@ MYSQL_ADD_EXECUTABLE(mariadb-backup xbstream_write.cc backup_mysql.cc backup_copy.cc - xb_plugin.cc + encryption_plugin.cc ${PROJECT_BINARY_DIR}/sql/sql_builtin.cc + aria_backup_client.cc + thread_pool.cc + ddl_log.cc + common_engine.cc ${PROJECT_SOURCE_DIR}/sql/net_serv.cc ${PROJECT_SOURCE_DIR}/libmysqld/libmysql.c COMPONENT Backup @@ -81,7 +86,8 @@ MYSQL_ADD_EXECUTABLE(mariadb-backup # Export all symbols on Unix, for better crash callstacks SET_TARGET_PROPERTIES(mariadb-backup PROPERTIES ENABLE_EXPORTS TRUE) -TARGET_LINK_LIBRARIES(mariadb-backup sql sql_builtins) +TARGET_LINK_LIBRARIES(mariadb-backup sql sql_builtins aria) + IF(NOT HAVE_SYSTEM_REGEX) TARGET_LINK_LIBRARIES(mariadb-backup pcre2-posix) ENDIF() diff --git a/extra/mariabackup/aria_backup_client.cc b/extra/mariabackup/aria_backup_client.cc new file mode 100644 index 00000000000..1ea1486d7cb --- /dev/null +++ b/extra/mariabackup/aria_backup_client.cc @@ -0,0 +1,1016 @@ +#include +#include +extern "C" { +#include "maria_def.h" +} +#undef LSN_MAX +#include "aria_backup_client.h" +#include "backup_copy.h" +#include "common.h" +#include "sql_table.h" +#include "ma_checkpoint.h" +#include "ma_recovery.h" +#include "backup_debug.h" +#include "aria_backup.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace aria { + +const char *log_preffix = "aria_log."; + + +static std::string log_file_name_only(size_t log_num) { + std::string log_file; + { + std::stringstream ss; + ss << std::setw(8) << std::setfill('0') << log_num; + log_file.append(log_preffix).append(ss.str()); + } + return log_file; +} + + +static std::string log_file_name(const char *datadir_path, size_t log_num) { + std::string log_file(datadir_path); + return log_file.append("/").append(log_file_name_only(log_num)); +} + + +class LogFileCollection +{ + uint32 m_first; + uint32 m_count; +public: + uint32 first() const { return m_first; } + uint32 count() const { return m_count; } + uint32 last() const + { + DBUG_ASSERT(m_count > 0); + return m_first + m_count - 1; + } + + // Initialize by checking existing log files on the disk + LogFileCollection(const char *datadir, uint32 max_log_no) + { + uint32 end= find_greatest_existing_log(datadir, max_log_no); + if (!end) + { + // No log files were found at all + m_first= 0; + m_count= 0; + } + else if (end == 1) + { + // Just the very first one log file (aria_log.00000001) was found. + m_first= 1; + m_count= 1; + } + else + { + // Multiple files were found + m_first= find_greatest_missing_log(datadir, end - 1) + 1; + m_count= 1 + end - m_first; + } + } + + /* + Skip all missing log files and find the greatest existing log file, or + Skip all existing log files and find the greatest missing log file. + + @param datadir - Search files in this directory + @param start - Start searching from this log number and go downto 1. + @param kind - true - search for an existing file + false - search for a missing file. + @returns - [1..start] - the greatest found log file + of the searched kind + - 0 - if no log files of this kind + were found in the range [1..start]. + */ + static uint32 find_greatest_existing_or_missing_log(const char *datadir, + uint32 start, + bool kind) + { + DBUG_ASSERT(start > 0); + for (uint32 i= start; i > 0; i--) + { + if (file_exists(log_file_name(datadir, i).c_str()) == kind) + return i; + } + return 0; // No log files of the searched kind were found + } + + static uint32 find_greatest_existing_log(const char *datadir, uint32 start) + { + return find_greatest_existing_or_missing_log(datadir, start, true); + } + + static uint32 find_greatest_missing_log(const char *datadir, uint32 start) + { + return find_greatest_existing_or_missing_log(datadir, start, false); + } + + /* + In some scenarios (e.g. log rotate) some new log files can appear + outside of the initially assumed [first,last] log number range. + This function adds all extra files behind "last". + */ + void find_logs_after_last(const char *datadir) + { + DBUG_ASSERT(m_count > 0); + for ( ; + file_exists(log_file_name(datadir, last() + 1).c_str()) ; + m_count++) + { } + } + + void report_found(unsigned thread_num) const + { + if (m_count) + msg(thread_num, + "Found %u aria log files, " + "minimum log number %u, " + "maximum log number %u", + m_count, m_first, last()); + } + + void die_if_missing(uint32 logno) const + { + DBUG_ASSERT(logno > 0); + if (!m_count || m_first > logno || last() < logno) + die("Aria log file %u does not exists.", logno); + } +}; + + +class Table { +public: + struct Partition { + std::string m_file_path; + File m_index_file = -1; + MY_STAT m_index_file_stat; + File m_data_file = -1; + MY_STAT m_data_file_stat; + }; + Table() = default; + Table (Table &&other) = delete; + Table & operator= (Table &&other) = delete; + Table(const Table &) = delete; + Table & operator= (const Table &) = delete; + ~Table(); + bool init(const char *data_file_path); + bool open(MYSQL *con, bool opt_no_lock, unsigned thread_num); + bool close(); + bool copy(ds_ctxt_t *ds, unsigned thread_num); + + bool is_online_backup_safe() const { + DBUG_ASSERT(is_opened()); + return m_cap.online_backup_safe; + } + bool is_stats() const { + return is_stats_table(m_db.c_str(), m_table.c_str()); + } + bool is_log() const { + return is_log_table(m_db.c_str(), m_table.c_str()); + } + bool is_opened() const { + return !m_partitions.empty() && + m_partitions[0].m_index_file >= 0 && m_partitions[0].m_data_file >= 0; + }; + std::string &get_full_name() { + return m_full_name; + } + std::string &get_db() { return m_db; } + std::string &get_table() { return m_table; } + std::string &get_version() { return m_table_version; } + bool is_partitioned() const { return m_partitioned; } + void add_partition(const Table &partition) { + DBUG_ASSERT(is_partitioned()); + m_partitions.push_back(partition.m_partitions[0]); + } +#ifndef DBUG_OFF + const std::string& get_sql_name() const { return m_sql_name; } +#endif //DBUG_OFF +private: + + bool copy(ds_ctxt_t *ds, bool is_index, unsigned thread_num); + // frm and par files will be copied under BLOCK_DDL stage in + // backup_copy_non_system() + bool copy_frm_and_par(ds_ctxt_t *ds, unsigned thread_num); + bool read_table_version_id(File file); + + std::string m_db; + std::string m_table; + std::string m_full_name; + std::string m_frm_par_path; + std::string m_table_version; +#ifndef DBUG_OFF + std::string m_sql_name; +#endif //DBUG_OFF + bool m_partitioned = false; + std::vector m_partitions; + ARIA_TABLE_CAPABILITIES m_cap; +}; + +Table::~Table() { + (void)close(); +} + +bool Table::init(const char *data_file_path) { + DBUG_ASSERT(data_file_path); + + const char *ext_pos = strrchr(data_file_path, '.'); + if (!ext_pos) + return false; + + char db_name_orig[FN_REFLEN]; + char table_name_orig[FN_REFLEN]; + parse_db_table_from_file_path( + data_file_path, db_name_orig, table_name_orig); + if (!db_name_orig[0] || !table_name_orig[0]) + return false; + char db_name_conv[FN_REFLEN]; + char table_name_conv[FN_REFLEN]; + filename_to_tablename(db_name_orig, db_name_conv, sizeof(db_name_conv)); + filename_to_tablename( + table_name_orig, table_name_conv, sizeof(table_name_conv)); + if (!db_name_conv[0] || !table_name_conv[0]) + return false; + + if (strstr(data_file_path, "#P#")) + m_partitioned = true; + + const char *table_name_begin = strrchr(data_file_path, FN_LIBCHAR); + if (!table_name_begin) + return false; + m_frm_par_path.assign(data_file_path, table_name_begin + 1). + append(table_name_orig); + + m_db.assign(db_name_conv); + m_table.assign(table_name_conv); + // TODO: find the correct way to represent quoted table/db names + m_full_name.assign("`").append(m_db).append("`.`"). + append(m_table).append("`"); +#ifndef DBUG_OFF + m_sql_name.assign(m_db).append("/").append(m_table); +#endif // DBUG_OFF + Partition partition; + partition.m_file_path.assign(data_file_path, ext_pos - data_file_path); + m_partitions.push_back(std::move(partition)); + return true; +} + +bool Table::read_table_version_id(File file) { + m_table_version = ::read_table_version_id(file); + return m_table_version.empty(); +} + +bool Table::open(MYSQL *con, bool opt_no_lock, unsigned thread_num) { + int error= 1; + bool have_capabilities = false; + File frm_file = -1; + + if (!opt_no_lock && !backup_lock(con, m_full_name.c_str())) { + msg(thread_num, "Error on BACKUP LOCK for aria table %s", + m_full_name.c_str()); + goto exit; + } + + for (Partition &partition : m_partitions) { + std::string file_path = partition.m_file_path + ".MAI"; + if ((partition.m_index_file= my_open(file_path.c_str(), + O_RDONLY | O_SHARE | O_NOFOLLOW | O_CLOEXEC, + MYF(MY_WME))) < 0) { + msg(thread_num, "Error on aria table file open %s", file_path.c_str()); + goto exit; + } + if (!my_stat(file_path.c_str(), &partition.m_index_file_stat, MYF(0))) { + msg(thread_num, "Error on aria table file stat %s", file_path.c_str()); + goto exit; + } + if (!have_capabilities) { + if ((error= aria_get_capabilities(partition.m_index_file, &m_cap))) { + msg(thread_num, "aria_get_capabilities failed: %d", error); + goto exit; + } + have_capabilities = true; + } + + file_path = partition.m_file_path + ".MAD"; + if ((partition.m_data_file= my_open(file_path.c_str(), + O_RDONLY | O_SHARE | O_NOFOLLOW | O_CLOEXEC, MYF(MY_WME))) < 0) { + msg(thread_num, "Error on aria table file open %s", file_path.c_str()); + goto exit; + } + if (!my_stat(file_path.c_str(), &partition.m_data_file_stat, MYF(0))) { + msg(thread_num, "Error on aria table file stat %s", file_path.c_str()); + goto exit; + } + } + + if ((frm_file = mysql_file_open( + key_file_frm, (m_frm_par_path + ".frm").c_str(), + O_RDONLY | O_SHARE, MYF(0))) < 0) { + msg(thread_num, "Error on aria table %s file open", + (m_frm_par_path + ".frm").c_str()); + goto exit; + } + + error = 0; + +exit: + if (!opt_no_lock && !backup_unlock(con)) { + msg(thread_num, "Error on BACKUP UNLOCK for aria table %s", + m_full_name.c_str()); + error = 1; + } + if (error) + (void)close(); + else { + (void)read_table_version_id(frm_file); + mysql_file_close(frm_file, MYF(MY_WME)); + } + return !error; +} + +bool Table::close() { + for (Partition &partition : m_partitions) { + if (partition.m_index_file >= 0) { + my_close(partition.m_index_file, MYF(MY_WME)); + partition.m_index_file = -1; + } + if (partition.m_data_file >= 0) { + my_close(partition.m_data_file, MYF(MY_WME)); + partition.m_data_file = -1; + } + } + return true; +} + +bool Table::copy(ds_ctxt_t *ds, unsigned thread_num) { + DBUG_ASSERT(is_opened()); + DBUG_MARIABACKUP_EVENT_LOCK("before_aria_table_copy", + fil_space_t::name_type(m_sql_name.data(), m_sql_name.size())); + bool result = +// copy_frm_and_par(ds, thread_num) && + copy(ds, true, thread_num) && copy(ds, false, thread_num); + return result; +} + +bool Table::copy(ds_ctxt_t *ds, bool is_index, unsigned thread_num) { + DBUG_ASSERT(ds); + const char *ext = is_index ? ".MAI" : ".MAD"; + int error= 1; + for (const Partition &partition : m_partitions) { + ds_file_t *dst_file = nullptr; + uchar *copy_buffer = nullptr; + std::string full_name = partition.m_file_path + ext; + const char *dst_path = + (xtrabackup_copy_back || xtrabackup_move_back) ? + full_name.c_str() : trim_dotslash(full_name.c_str()); + + dst_file = ds_open(ds, dst_path, + is_index ? &partition.m_index_file_stat : &partition.m_data_file_stat); + if (!dst_file) { + msg(thread_num, "error: cannot open the destination stream for %s", + dst_path); + goto err; + } + + copy_buffer = + reinterpret_cast(my_malloc(PSI_NOT_INSTRUMENTED, + m_cap.block_size, MYF(0))); + + DBUG_MARIABACKUP_EVENT_LOCK( + is_index ? + "before_aria_index_file_copy": + "before_aria_data_file_copy", + fil_space_t::name_type(m_sql_name.data(), + m_sql_name.size())); + + for (ulonglong block= 0 ; ; block++) { + size_t length = m_cap.block_size; + if (is_index) { + if ((error= aria_read_index( + partition.m_index_file, &m_cap, block, copy_buffer) == + HA_ERR_END_OF_FILE)) + break; + } else { + if ((error= aria_read_data( + partition.m_data_file, &m_cap, block, copy_buffer, &length) == + HA_ERR_END_OF_FILE)) + break; + } + if (error) { + msg(thread_num, "error: aria_read %s failed: %d", + is_index ? "index" : "data", error); + goto err; + } + xtrabackup_io_throttling(); + if ((error = ds_write(dst_file, copy_buffer, length))) { + msg(thread_num, "error: aria_write failed: %d", error); + goto err; + } + } + + DBUG_MARIABACKUP_EVENT_LOCK( + is_index ? + "after_aria_index_file_copy": + "after_aria_data_file_copy", + fil_space_t::name_type(m_sql_name.data(), + m_sql_name.size())); + + error = 0; + msg(thread_num, "aria table file %s is copied successfully.", + full_name.c_str()); + + err: + if (dst_file) + ds_close(dst_file); + if (copy_buffer) + my_free(copy_buffer); + if (error) + break; + } + return !error; +} + +class BackupImpl { +public: + BackupImpl( + const char *datadir_path, + const char *aria_log_path, + ds_ctxt_t *datasink, bool opt_no_lock, + std::vector &con_pool, ThreadPool &thread_pool) : + m_datadir_path(datadir_path), + m_aria_log_dir_path(aria_log_path), + m_ds(datasink), m_con_pool(con_pool), + m_tasks_group(thread_pool), m_thread_pool(thread_pool) { } + ~BackupImpl() { destroy(); } + bool init(); + bool start(bool no_lock); + bool wait_for_finish(); + bool copy_offline_tables( + const std::unordered_set *exclude_tables, bool no_lock, + bool copy_stats); + bool finalize(); + void set_post_copy_table_hook(const post_copy_table_hook_t &hook) { + m_table_post_copy_hook = hook; + } + bool copy_log_tail() { return copy_log_tail(0, false); } +private: + void destroy(); + void scan_job(bool no_lock, unsigned thread_num); + bool copy_log_tail(unsigned thread_num, bool finalize); + void copy_log_file_job(size_t log_num, unsigned thread_num); + void destroy_log_tail(); + void process_table_job(Table *table, bool online_only, bool copy_stats, + bool no_lock, unsigned thread_num); + + const char *m_datadir_path; + const char *m_aria_log_dir_path; + std::string aria_log_dir_path() const + { + if (!m_aria_log_dir_path || !m_aria_log_dir_path[0]) + return m_datadir_path; + if (is_absolute_path(m_aria_log_dir_path)) + return m_aria_log_dir_path; + return std::string(m_datadir_path).append("/") + .append(m_aria_log_dir_path); + } + ds_ctxt_t *m_ds; + std::vector &m_con_pool; + + TasksGroup m_tasks_group; + + std::mutex m_offline_tables_mutex; + std::vector> m_offline_tables; + post_copy_table_hook_t m_table_post_copy_hook; + + ThreadPool &m_thread_pool; + + size_t m_last_log_num = 0; + ds_file_t* m_last_log_dst = nullptr; + File m_last_log_src = -1; +}; + +bool BackupImpl::init() { + DBUG_ASSERT(m_tasks_group.is_finished()); + return true; +}; + +void BackupImpl::destroy() { + DBUG_ASSERT(m_tasks_group.is_finished()); + destroy_log_tail(); +} + +bool BackupImpl::start(bool no_lock) { + DBUG_ASSERT(m_tasks_group.is_finished()); + m_tasks_group.push_task( + std::bind(&BackupImpl::scan_job, this, no_lock, std::placeholders::_1)); + return true; +} + +void BackupImpl::process_table_job( + Table *table_ptr, bool online_only, bool copy_stats, bool no_lock, + unsigned thread_num) { + DBUG_ASSERT(table_ptr); + DBUG_ASSERT(thread_num < m_con_pool.size()); + std::unique_ptr table(table_ptr); + bool is_online; + bool is_stats; + bool need_copy; + int result = 1; + + if (!m_tasks_group.get_result()) + goto exit; + + if (!table->open(m_con_pool[thread_num], no_lock, thread_num)) { + // if table can't be opened, it might be removed or renamed, this is not + // error for transactional tables + table->close(); // Close opened table files + goto exit; + } + + is_online = table->is_online_backup_safe(); + is_stats = table->is_stats(); + + need_copy = (!online_only || is_online) && (copy_stats || !is_stats); + + if (need_copy && !table->copy(m_ds, thread_num)) { + table->close(); + DBUG_MARIABACKUP_EVENT_LOCK("after_aria_table_copy", + fil_space_t::name_type(table->get_sql_name().data(), + table->get_sql_name().size())); + // if table is opened, it must be copied, + // the corresponding diagnostic messages must be issued in Table::copy() + result = 0; + goto exit; + } + + if (!table->close()) { + msg(thread_num, "Can't close aria table %s.\n", + table->get_full_name().c_str()); + result = 0; + goto exit; + } + + if (!need_copy) { + std::lock_guard lock(m_offline_tables_mutex); + m_offline_tables.push_back(std::move(table)); + } + else { + DBUG_MARIABACKUP_EVENT_LOCK("after_aria_table_copy", + fil_space_t::name_type(table->get_sql_name().data(), + table->get_sql_name().size())); + if (m_table_post_copy_hook) + m_table_post_copy_hook( + std::move(table->get_db()), + std::move(table->get_table()), + std::move(table->get_version())); + } +exit: + m_tasks_group.finish_task(result); +} + + +void BackupImpl::scan_job(bool no_lock, unsigned thread_num) { + std::unordered_map> partitioned_tables; + + std::string aria_log_dir_path_cache(aria_log_dir_path()); + std::string log_control_file_path(aria_log_dir_path_cache); + log_control_file_path.append("/aria_log_control"); + if (!m_ds->copy_file( + log_control_file_path.c_str(), "aria_log_control", + 0, false)) { + msg("Aria log control file copying error."); + m_tasks_group.finish_task(0); + return; + } + + msg(thread_num, "Loading aria_log_control."); + aria_readonly= 1; + maria_data_root= aria_log_dir_path_cache.c_str(); + if (ma_control_file_open(FALSE, FALSE, FALSE, O_RDONLY)) + die("Can't open Aria control file (%d)", errno); + uint32 aria_log_control_last_log_number= last_logno; + msg(thread_num, "aria_log_control: last_log_number: %d", + aria_log_control_last_log_number); + ma_control_file_end(); + + msg(thread_num, "Start scanning aria tables."); + + foreach_file_in_db_dirs(m_datadir_path, [&](const char *file_path)->bool { + + if (check_if_skip_table(file_path)) { + msg(thread_num, "Skipping %s.", file_path); + return true; + } + + if (!ends_with(file_path, ".MAD")) + return true; + + std::unique_ptr
table(new Table()); + if (!table->init(file_path)) { + msg(thread_num, "Can't init aria table %s.\n", file_path); + return true; + } + + if (table->is_log()) + return true; + + if (table->is_partitioned()) { + auto table_it = partitioned_tables.find(table->get_full_name()); + if (table_it == partitioned_tables.end()) { + partitioned_tables[table->get_full_name()] = std::move(table); + } else { + table_it->second->add_partition(*table); + } + return true; + } + + m_tasks_group.push_task( + std::bind(&BackupImpl::process_table_job, this, table.release(), true, + false, no_lock, std::placeholders::_1)); + return true; + }); + + for (auto &table_it : partitioned_tables) { + m_tasks_group.push_task( + std::bind(&BackupImpl::process_table_job, this, table_it.second.release(), + true, false, no_lock, std::placeholders::_1)); + } + + msg(thread_num, "Start scanning aria log files."); + + LogFileCollection logs(aria_log_dir_path_cache.c_str(), + aria_log_control_last_log_number); + logs.report_found(thread_num); + logs.die_if_missing(aria_log_control_last_log_number); + + m_last_log_num= logs.last(); + + DBUG_MARIABACKUP_EVENT("after_scanning_log_files", {}); + + for (uint32 i= logs.first(); i <= logs.last(); ++i) + m_tasks_group.push_task( + std::bind(&BackupImpl::copy_log_file_job, this, + i, std::placeholders::_1)); + + msg(thread_num, "Stop scanning aria tables."); + + m_tasks_group.finish_task(1); +} + +template +T align_down(T n, ulint align_no) +{ + DBUG_ASSERT(align_no > 0); + DBUG_ASSERT(ut_is_2pow(align_no)); + return n & ~(static_cast(align_no) - 1); +} + +static ssize_t copy_file_chunk(File src, ds_file_t* dst, size_t size) { + size_t bytes_read; + static const size_t max_buf_size = 10 * 1024 * 1024; + size_t buf_size = size ? std::min(size, max_buf_size) : max_buf_size; + std::unique_ptr buf(new uchar[buf_size]); + ssize_t copied_size = 0; + bool unlim = !size; + while((unlim || size) && (bytes_read = my_read(src, buf.get(), + unlim ? buf_size : std::min(buf_size, size), MY_WME))) { + if (bytes_read == size_t(-1)) + return -1; + xtrabackup_io_throttling(); + if (ds_write(dst, buf.get(), bytes_read)) + return -1; + copied_size += bytes_read; + if (!unlim) + size -= bytes_read; + } + return copied_size; +} + +bool BackupImpl::copy_log_tail(unsigned thread_num, bool finalize) { + bool result = false; + std::string log_file = log_file_name(aria_log_dir_path().c_str(), m_last_log_num); + std::string prev_log_file; + ssize_t total_bytes_copied = 0; + MY_STAT stat_info; + my_off_t file_offset = 0; + size_t to_copy_size = 0; + +repeat: + memset(&stat_info, 0, sizeof(MY_STAT)); + if (!m_tasks_group.get_result()) { + msg(thread_num, "Skip copying aria lof file tail %s due to error.", + log_file.c_str()); + result = true; + goto exit; + } + + msg(thread_num, "Start copying aria log file tail: %s", log_file.c_str()); + + if (m_last_log_src < 0 && (m_last_log_src = + my_open(log_file.c_str(), O_RDONLY | O_SHARE | O_NOFOLLOW | O_CLOEXEC, + MYF(MY_WME))) < 0) { + msg("Aria log file %s open failed: %d", log_file.c_str(), my_errno); + goto exit; + } + + if (!m_last_log_dst && + !(m_last_log_dst = ds_open(m_ds, + log_file_name_only(m_last_log_num).c_str(), + &stat_info, false))) { + msg(thread_num, "error: failed to open the target stream for " + "aria log file %s.", + log_file.c_str()); + goto exit; + } + +// If there is no need to finalize log file copying, calculate the size to copy +// without the last page, which can be rewritten by the server +// (see translog_force_current_buffer_to_finish()). + if (!finalize) { + if (my_fstat(m_last_log_src, &stat_info, MYF(0))) { + msg(thread_num, "error: failed to get file size for aria log file: %s.", + log_file.c_str()); + goto exit; + } + if ((file_offset = my_tell(m_last_log_src, MYF(0))) == (my_off_t)(-1)) { + msg(thread_num, "error: failed to get file offset for aria log file: %s.", + log_file.c_str()); + goto exit; + } + DBUG_ASSERT(file_offset <= static_cast(stat_info.st_size)); + to_copy_size = static_cast(stat_info.st_size) - file_offset; + to_copy_size = to_copy_size >= TRANSLOG_PAGE_SIZE ? + (align_down(to_copy_size, TRANSLOG_PAGE_SIZE) - TRANSLOG_PAGE_SIZE) : 0; + } + +// Copy from the last position to the end of file, +// excluding the last page is there is no need to finalize the copy. + if ((to_copy_size || finalize) && + (total_bytes_copied = copy_file_chunk(m_last_log_src, + m_last_log_dst, to_copy_size)) < 0) { + msg(thread_num, "Aria log file %s chunk copy error", log_file.c_str()); + goto exit; + } + + msg(thread_num, "Stop copying aria log file tail: %s, copied %zu bytes", + log_file.c_str(), total_bytes_copied); + +// Check if there is new log file, if yes, then copy the last page of the old +// one, and fix it last LSN in the log header, as it is changed on new +// log file creating by the server (see translog_create_new_file() and +// translog_max_lsn_to_header()). Then close the old log file and repeat +// the copying for the new log file. + prev_log_file = std::move(log_file); + log_file = log_file_name(aria_log_dir_path().c_str(), m_last_log_num + 1); + if (file_exists(log_file.c_str())) { + uchar lsn_buff[LSN_STORE_SIZE]; + msg(thread_num, "Found new aria log tail file: %s, start copy %s tail", + log_file.c_str(), prev_log_file.c_str()); + if ((total_bytes_copied = copy_file_chunk(m_last_log_src, + m_last_log_dst, 0)) < 0) { + msg(thread_num, "Aria log file %s tail copy error", + prev_log_file.c_str()); + goto exit; + } + + if (my_pread(m_last_log_src, lsn_buff, LSN_STORE_SIZE, + (LOG_HEADER_DATA_SIZE - LSN_STORE_SIZE), MYF(0)) < LSN_STORE_SIZE) { + msg(thread_num, "Aria lsn store read error for log file %s", + prev_log_file.c_str()); + goto exit; + } + + if (ds_seek_set(m_last_log_dst, (LOG_HEADER_DATA_SIZE - LSN_STORE_SIZE))) { + msg(thread_num, "Set aria log pointer error for log file %s", + prev_log_file.c_str()); + goto exit; + } + + if (ds_write(m_last_log_dst, lsn_buff, LSN_STORE_SIZE)) { + msg(thread_num, "LSN write error for aria log file %s", + prev_log_file.c_str()); + goto exit; + } + + msg(thread_num, "The last %zu bytes were copied for %s.", + total_bytes_copied, prev_log_file.c_str()); + destroy_log_tail(); + ++m_last_log_num; + goto repeat; + } + + result = true; + +exit: + if (!result) + destroy_log_tail(); + return result; +} + +void BackupImpl::copy_log_file_job(size_t log_num, unsigned thread_num) { + DBUG_ASSERT(log_num <= m_last_log_num); + + if (!m_tasks_group.get_result()) { + msg(thread_num, "Skip copying %zu aria log file due to error", log_num); + m_tasks_group.finish_task(0); + return; + } + +// Copy log file if the file is not the last one. + if (log_num < m_last_log_num) { + std::string log_file = log_file_name(aria_log_dir_path().c_str(), log_num); + if (!m_ds->copy_file(log_file.c_str(), + log_file_name_only(log_num).c_str(), + thread_num, false)) { + msg(thread_num, "Error on copying %s aria log file.", log_file.c_str()); + m_tasks_group.finish_task(0); + } + else + m_tasks_group.finish_task(1); + return; + } +// Copy the last log file. + m_tasks_group.finish_task(copy_log_tail(thread_num, false) ? 1 : 0); +} + +void BackupImpl::destroy_log_tail() { + if (m_last_log_src >= 0) { + my_close(m_last_log_src, MYF(MY_WME)); + m_last_log_src = -1; + } + if (m_last_log_dst) { + ds_close(m_last_log_dst); + m_last_log_dst = nullptr; + } +} + +bool BackupImpl::wait_for_finish() { + return m_tasks_group.wait_for_finish(); +} + +bool BackupImpl::copy_offline_tables( + const std::unordered_set *exclude_tables, bool no_lock, + bool copy_stats) { + DBUG_ASSERT(m_tasks_group.is_finished()); + + std::vector> ignored_tables; + + while (true) { + std::unique_lock lock(m_offline_tables_mutex); + if (m_offline_tables.empty()) + break; + auto table = std::move(m_offline_tables.back()); + m_offline_tables.pop_back(); + lock.unlock(); + if ((exclude_tables && + exclude_tables->count(table_key(table->get_db(), table->get_table()))) || + (!copy_stats && table->is_stats())) { + ignored_tables.push_back(std::move(table)); + continue; + } + m_tasks_group.push_task( + std::bind(&BackupImpl::process_table_job, this, table.release(), false, + copy_stats, no_lock, std::placeholders::_1)); + } + + if (!ignored_tables.empty()) { + std::lock_guard lock(m_offline_tables_mutex); + m_offline_tables = std::move(ignored_tables); + } + + return true; +} + +bool BackupImpl::finalize() { + DBUG_ASSERT(m_tasks_group.is_finished()); + DBUG_ASSERT(!m_con_pool.empty()); + bool result = true; + msg("Start copying statistics aria tables."); + copy_offline_tables(nullptr, true, true); + while (!m_tasks_group.is_finished()) + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + msg("Stop copying statistics aria tables."); + copy_log_tail(0, true); + destroy_log_tail(); + return result; +} + +Backup::Backup(const char *datadir_path, + const char *aria_log_path, + ds_ctxt_t *datasink, + std::vector &con_pool, ThreadPool &thread_pool) : + m_backup_impl( + new BackupImpl(datadir_path, aria_log_path, + datasink, opt_no_lock, con_pool, + thread_pool)) { } + +Backup::~Backup() { + delete m_backup_impl; +} + +bool Backup::init() { + return m_backup_impl->init(); +} + +bool Backup::start(bool no_lock) { + return m_backup_impl->start(no_lock); +} + +bool Backup::wait_for_finish() { + return m_backup_impl->wait_for_finish(); +} + +bool Backup::copy_offline_tables( + const std::unordered_set *exclude_tables, bool no_lock, + bool copy_stats) { + return m_backup_impl->copy_offline_tables(exclude_tables, no_lock, + copy_stats); +} + +bool Backup::finalize() { + return m_backup_impl->finalize(); +} + +bool Backup::copy_log_tail() { + return m_backup_impl->copy_log_tail(); +} + +void Backup::set_post_copy_table_hook(const post_copy_table_hook_t &hook) { + m_backup_impl->set_post_copy_table_hook(hook); +} + +bool prepare(const char *target_dir) { + maria_data_root= (char *)target_dir; + + if (maria_init()) + die("Can't init Aria engine (%d)", errno); + + maria_block_size= 0; /* Use block size from file */ + /* we don't want to create a control file, it MUST exist */ + if (ma_control_file_open(FALSE, TRUE, TRUE, control_file_open_flags)) + die("Can't open Aria control file (%d)", errno); + + if (last_logno == FILENO_IMPOSSIBLE) + die("Can't find any Aria log"); + + LogFileCollection logs(target_dir, last_logno); + logs.die_if_missing(last_logno); // Fatal, a broken backup. + /* + "mariadb-backup --backup" can put extra log files, + with log number greater than last_logno. For example, + this combination of files is possible: + - aria_log_control (with last_logno==1) + - aria_log.00000001 (last_logno) + - aria_log.00000002 (last_logno+1, the extra log file) + This can happen if during the ealier run of + "mariadb-backup --backup" a log rotate happened. + The extra log file is copied to the backup directory, + but last_logno in aria_log_control does not get updated. + This mismatch is probably not good and should eventually be fixed. + But during "mariadb-backup --prepare" this mismatch goes away: + aria_log_control gets fixed to say last_logno==2. + See mysql-test/suite/mariabackup/aria_log_rotate_during_backup.test, + it covers the scenario with one extra file created during --backup. + */ + logs.find_logs_after_last(target_dir); + last_logno= logs.last(); // Update last_logno if extra logs were found + + if (init_pagecache(maria_pagecache, 1024L*1024L, 0, 0, + static_cast(maria_block_size), 0, MY_WME) == 0) + die("Got error in Aria init_pagecache() (errno: %d)", errno); + + if (init_pagecache(maria_log_pagecache, 1024L*1024L, + 0, 0, TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0 || + translog_init(maria_data_root, TRANSLOG_FILE_SIZE, + 0, 0, maria_log_pagecache, TRANSLOG_DEFAULT_FLAGS, FALSE)) + die("Can't init Aria loghandler (%d)", errno); + + if (maria_recovery_from_log()) + die("Aria log apply FAILED"); + + if (maria_recovery_changed_data || recovery_failures) { + if (ma_control_file_write_and_force(last_checkpoint_lsn, last_logno, + max_trid_in_control_file, 0)) + die("Aria control file update error"); +// TODO: find out do we need checkpoint here + } + + maria_end(); + return true; +} + +} // namespace aria diff --git a/extra/mariabackup/aria_backup_client.h b/extra/mariabackup/aria_backup_client.h new file mode 100644 index 00000000000..7a581b5862e --- /dev/null +++ b/extra/mariabackup/aria_backup_client.h @@ -0,0 +1,38 @@ +#pragma once +#include "my_global.h" +#include "datasink.h" +#include "backup_mysql.h" +#include "thread_pool.h" +#include "xtrabackup.h" + +namespace aria { + +bool prepare(const char *target_dir); + +class BackupImpl; + +class Backup { + public: + Backup(const char *datadir_path, + const char *aria_log_path, + ds_ctxt_t *datasink, + std::vector &con_pool, ThreadPool &thread_pool); + ~Backup(); + Backup (Backup &&other) = delete; + Backup & operator= (Backup &&other) = delete; + Backup(const Backup &) = delete; + Backup & operator= (const Backup &) = delete; + bool init(); + bool start(bool no_lock); + bool wait_for_finish(); + bool copy_offline_tables( + const std::unordered_set *exclude_tables, bool no_lock, + bool copy_stats); + bool finalize(); + bool copy_log_tail(); + void set_post_copy_table_hook(const post_copy_table_hook_t &hook); + private: + BackupImpl *m_backup_impl; +}; + +} // namespace aria diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index f8d315d9eb7..733281a49de 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -41,6 +41,9 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA *******************************************************/ #include +#include +#include +#include #include #include #include @@ -66,19 +69,26 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA #include #endif +#ifdef MYSQL_CLIENT +#define WAS_MYSQL_CLIENT 1 +#undef MYSQL_CLIENT +#endif + +#include "table.h" + +#ifdef WAS_MYSQL_CLIENT +#define MYSQL_CLIENT 1 +#undef WAS_MYSQL_CLIENT +#endif #define ROCKSDB_BACKUP_DIR "#rocksdb" -/* list of files to sync for --rsync mode */ -static std::set rsync_list; /* locations of tablespaces read from .isl files */ static std::map tablespace_locations; /* Whether LOCK BINLOG FOR BACKUP has been issued during backup */ bool binlog_locked; -static void rocksdb_create_checkpoint(); -static bool has_rocksdb_plugin(); static void rocksdb_backup_checkpoint(ds_ctxt *ds_data); static void rocksdb_copy_back(ds_ctxt *ds_data); @@ -135,10 +145,6 @@ struct datadir_thread_ctxt_t { bool ret; }; -static bool backup_files_from_datadir(ds_ctxt_t *ds_data, - const char *dir_path, - const char *prefix); - /************************************************************************ Retirn true if character if file separator */ bool @@ -585,7 +591,6 @@ datafile_read(datafile_cur_t *cursor) Check to see if a file exists. Takes name of the file to check. @return true if file exists. */ -static bool file_exists(const char *filename) { @@ -601,7 +606,6 @@ file_exists(const char *filename) /************************************************************************ Trim leading slashes from absolute path so it becomes relative */ -static const char * trim_dotslash(const char *path) { @@ -634,7 +638,7 @@ ends_with(const char *str, const char *suffix) && strcmp(str + str_len - suffix_len, suffix) == 0); } -static bool starts_with(const char *str, const char *prefix) +bool starts_with(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; } @@ -785,7 +789,6 @@ directory_exists_and_empty(const char *dir, const char *comment) /************************************************************************ Check if file name ends with given set of suffixes. @return true if it does. */ -static bool filename_matches(const char *filename, const char **ext_list) { @@ -800,6 +803,115 @@ filename_matches(const char *filename, const char **ext_list) return(false); } +// TODO: the code can be used to find storage engine of partitions +/* +static +bool is_aria_frm_or_par(const char *path) { + if (!ends_with(path, ".frm") && !ends_with(path, ".par")) + return false; + + const char *frm_path = path; + if (ends_with(path, ".par")) { + size_t frm_path_len = strlen(path); + DBUG_ASSERT(frm_path_len > strlen("frm")); + frm_path = strdup(path); + strcpy(const_cast(frm_path) + frm_path_len - strlen("frm"), "frm"); + } + + bool result = false; + File file; + uchar header[40]; + legacy_db_type dbt; + + if ((file= mysql_file_open(key_file_frm, frm_path, O_RDONLY | O_SHARE, MYF(0))) + < 0) + goto err; + + if (mysql_file_read(file, (uchar*) header, sizeof(header), MYF(MY_NABP))) + goto err; + + if (!strncmp((char*) header, "TYPE=VIEW\n", 10)) + goto err; + + if (!is_binary_frm_header(header)) + goto err; + + dbt = (legacy_db_type)header[3]; + + if (dbt == DB_TYPE_ARIA) { + result = true; + } + else if (dbt == DB_TYPE_PARTITION_DB) { + MY_STAT state; + uchar *frm_image= 0; +// uint n_length; + + if (mysql_file_fstat(file, &state, MYF(MY_WME))) + goto err; + + if (mysql_file_seek(file, 0, SEEK_SET, MYF(MY_WME))) + goto err; + + if (read_string(file, &frm_image, (size_t)state.st_size)) + goto err; + + dbt = (legacy_db_type)frm_image[61]; + if (dbt == DB_TYPE_ARIA) { + result = true; + } + my_free(frm_image); + } + +err: + if (file >= 0) + mysql_file_close(file, MYF(MY_WME)); + if (frm_path != path) + free(const_cast(frm_path)); + return result; +} +*/ + +void parse_db_table_from_file_path( + const char *filepath, char *dbname, char *tablename) { + dbname[0] = '\0'; + tablename[0] = '\0'; + const char *dbname_start = nullptr; + const char *tablename_start = filepath; + const char *const_ptr; + while ((const_ptr = strchr(tablename_start, FN_LIBCHAR)) != NULL) { + dbname_start = tablename_start; + tablename_start = const_ptr + 1; + } + if (!dbname_start) + return; + size_t dbname_len = tablename_start - dbname_start - 1; + if (dbname_len >= FN_REFLEN) + dbname_len = FN_REFLEN-1; + strmake(dbname, dbname_start, dbname_len); + strmake(tablename, tablename_start, FN_REFLEN-1); + char *ptr; + if ((ptr = strchr(tablename, '.'))) + *ptr = '\0'; + if ((ptr = strstr(tablename, "#P#"))) + *ptr = '\0'; +} + +bool is_system_table(const char *dbname, const char *tablename) +{ + DBUG_ASSERT(dbname); + DBUG_ASSERT(tablename); + + LEX_CSTRING lex_dbname; + LEX_CSTRING lex_tablename; + lex_dbname.str = dbname; + lex_dbname.length = strlen(dbname); + lex_tablename.str = tablename; + lex_tablename.length = strlen(tablename); + + TABLE_CATEGORY tg = get_table_category(&lex_dbname, &lex_tablename); + + return (tg == TABLE_CATEGORY_LOG) || (tg == TABLE_CATEGORY_SYSTEM); +} /************************************************************************ Copy data file for backup. Also check if it is allowed to copy by @@ -810,9 +922,8 @@ static bool datafile_copy_backup(ds_ctxt *ds_data, const char *filepath, uint thread_n) { - const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI", - "MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par", - NULL}; + const char *ext_list[] = {".frm", ".isl", ".TRG", ".TRN", ".opt", ".par", + NULL}; /* Get the name and the path for the tablespace. node->name always contains the path (which may be absolute for remote tablespaces in @@ -830,42 +941,7 @@ datafile_copy_backup(ds_ctxt *ds_data, const char *filepath, uint thread_n) if (filename_matches(filepath, ext_list)) { return ds_data->copy_file(filepath, filepath, thread_n); - } - - return(true); -} - - -/************************************************************************ -Same as datafile_copy_backup, but put file name into the list for -rsync command. */ -static -bool -datafile_rsync_backup(const char *filepath, bool save_to_list, FILE *f) -{ - const char *ext_list[] = {"frm", "isl", "MYD", "MYI", "MAD", "MAI", - "MRG", "TRG", "TRN", "ARM", "ARZ", "CSM", "CSV", "opt", "par", - NULL}; - - /* Get the name and the path for the tablespace. node->name always - contains the path (which may be absolute for remote tablespaces in - 5.6+). space->name contains the tablespace name in the form - "./database/table.ibd" (in 5.5-) or "database/table" (in 5.6+). For a - multi-node shared tablespace, space->name contains the name of the first - node, but that's irrelevant, since we only need node_name to match them - against filters, and the shared tablespace is always copied regardless - of the filters value. */ - - if (check_if_skip_table(filepath)) { - return(true); - } - - if (filename_matches(filepath, ext_list)) { - fprintf(f, "%s\n", filepath); - if (save_to_list) { - rsync_list.insert(filepath); - } - } + } return(true); } @@ -1004,16 +1080,15 @@ Copy file for backup/restore. bool ds_ctxt_t::copy_file(const char *src_file_path, const char *dst_file_path, - uint thread_n) + uint thread_n, + bool rewrite) { char dst_name[FN_REFLEN]; ds_file_t *dstfile = NULL; datafile_cur_t cursor; xb_fil_cur_result_t res; DBUG_ASSERT(datasink->remove); - const char *dst_path = - (xtrabackup_copy_back || xtrabackup_move_back)? - dst_file_path : trim_dotslash(dst_file_path); + const char *dst_path = convert_dst(dst_file_path); if (!datafile_open(src_file_path, &cursor, thread_n)) { goto error_close; @@ -1021,7 +1096,7 @@ ds_ctxt_t::copy_file(const char *src_file_path, strncpy(dst_name, cursor.rel_path, sizeof(dst_name)); - dstfile = ds_open(this, dst_path, &cursor.statinfo); + dstfile = ds_open(this, dst_path, &cursor.statinfo, rewrite); if (dstfile == NULL) { msg(thread_n,"error: " "cannot open the destination stream for %s", dst_name); @@ -1245,278 +1320,45 @@ cleanup: } - - -static bool -backup_files(ds_ctxt *ds_data, const char *from, bool prep_mode) +backup_files(ds_ctxt *ds_data, const char *from) { - char rsync_tmpfile_name[FN_REFLEN]; - FILE *rsync_tmpfile = NULL; datadir_iter_t *it; datadir_node_t node; bool ret = true; - - if (prep_mode && !opt_rsync) { - return(true); - } - - if (opt_rsync) { - snprintf(rsync_tmpfile_name, sizeof(rsync_tmpfile_name), - "%s/%s%d", opt_mysql_tmpdir, - "xtrabackup_rsyncfiles_pass", - prep_mode ? 1 : 2); - rsync_tmpfile = fopen(rsync_tmpfile_name, "w"); - if (rsync_tmpfile == NULL) { - msg("Error: can't create file %s", - rsync_tmpfile_name); - return(false); - } - } - - msg("Starting %s non-InnoDB tables and files", - prep_mode ? "prep copy of" : "to backup"); - + msg("Starting to backup non-InnoDB tables and files"); datadir_node_init(&node); it = datadir_iter_new(from); - while (datadir_iter_next(it, &node)) { - if (!node.is_empty_dir) { - if (opt_rsync) { - ret = datafile_rsync_backup(node.filepath, - !prep_mode, rsync_tmpfile); - } else { - ret = datafile_copy_backup(ds_data, node.filepath, 1); - } + ret = datafile_copy_backup(ds_data, node.filepath, 1); if (!ret) { msg("Failed to copy file %s", node.filepath); goto out; } - } else if (!prep_mode) { + } else { /* backup fake file into empty directory */ char path[FN_REFLEN]; - snprintf(path, sizeof(path), - "%s/db.opt", node.filepath); - if (!(ret = ds_data->backup_file_printf( - trim_dotslash(path), "%s", ""))) { + snprintf(path, sizeof(path), "%s/db.opt", node.filepath); + if (!(ret = ds_data->backup_file_printf(trim_dotslash(path), "%s", ""))) { msg("Failed to create file %s", path); goto out; } } } - - if (opt_rsync) { - std::stringstream cmd; - int err; - - if (buffer_pool_filename && file_exists(buffer_pool_filename)) { - fprintf(rsync_tmpfile, "%s\n", buffer_pool_filename); - rsync_list.insert(buffer_pool_filename); - } - if (file_exists("ib_lru_dump")) { - fprintf(rsync_tmpfile, "%s\n", "ib_lru_dump"); - rsync_list.insert("ib_lru_dump"); - } - - fclose(rsync_tmpfile); - rsync_tmpfile = NULL; - - cmd << "rsync -t . --files-from=" << rsync_tmpfile_name - << " " << xtrabackup_target_dir; - - msg("Starting rsync as: %s", cmd.str().c_str()); - if ((err = system(cmd.str().c_str()) && !prep_mode) != 0) { - msg("Error: rsync failed with error code %d", err); - ret = false; - goto out; - } - msg("rsync finished successfully."); - - if (!prep_mode && !opt_no_lock) { - char path[FN_REFLEN]; - char dst_path[FN_REFLEN]; - char *newline; - - /* Remove files that have been removed between first and - second passes. Cannot use "rsync --delete" because it - does not work with --files-from. */ - snprintf(rsync_tmpfile_name, sizeof(rsync_tmpfile_name), - "%s/%s", opt_mysql_tmpdir, - "xtrabackup_rsyncfiles_pass1"); - - rsync_tmpfile = fopen(rsync_tmpfile_name, "r"); - if (rsync_tmpfile == NULL) { - msg("Error: can't open file %s", - rsync_tmpfile_name); - ret = false; - goto out; - } - - while (fgets(path, sizeof(path), rsync_tmpfile)) { - - newline = strchr(path, '\n'); - if (newline) { - *newline = 0; - } - if (rsync_list.count(path) < 1) { - snprintf(dst_path, sizeof(dst_path), - "%s/%s", xtrabackup_target_dir, - path); - msg("Removing %s", dst_path); - unlink(dst_path); - } - } - - fclose(rsync_tmpfile); - rsync_tmpfile = NULL; - } - } - - msg("Finished %s non-InnoDB tables and files", - prep_mode ? "a prep copy of" : "backing up"); - + msg("Finished backing up non-InnoDB tables and files"); out: datadir_iter_free(it); datadir_node_free(&node); - - if (rsync_tmpfile != NULL) { - fclose(rsync_tmpfile); - } - return(ret); } - -lsn_t get_current_lsn(MYSQL *connection) -{ - static const char lsn_prefix[] = "\nLog sequence number "; - lsn_t lsn = 0; - if (MYSQL_RES *res = xb_mysql_query(connection, - "SHOW ENGINE INNODB STATUS", - true, false)) { - if (MYSQL_ROW row = mysql_fetch_row(res)) { - const char *p= strstr(row[2], lsn_prefix); - DBUG_ASSERT(p); - if (p) { - p += sizeof lsn_prefix - 1; - lsn = lsn_t(strtoll(p, NULL, 10)); - } - } - mysql_free_result(res); - } - return lsn; -} - lsn_t server_lsn_after_lock; extern void backup_wait_for_lsn(lsn_t lsn); -/** Start --backup */ -bool backup_start(ds_ctxt *ds_data, ds_ctxt *ds_meta, - CorruptedPages &corrupted_pages) -{ - if (!opt_no_lock) { - if (opt_safe_slave_backup) { - if (!wait_for_safe_slave(mysql_connection)) { - return(false); - } - } - - if (!backup_files(ds_data, fil_path_to_mysql_datadir, true)) { - return(false); - } - - history_lock_time = time(NULL); - - if (!lock_tables(mysql_connection)) { - return(false); - } - server_lsn_after_lock = get_current_lsn(mysql_connection); - } - - if (!backup_files(ds_data, fil_path_to_mysql_datadir, false)) { - return(false); - } - - if (!backup_files_from_datadir(ds_data, fil_path_to_mysql_datadir, - "aws-kms-key") || - !backup_files_from_datadir(ds_data, - aria_log_dir_path, - "aria_log")) { - return false; - } - - if (has_rocksdb_plugin()) { - rocksdb_create_checkpoint(); - } - - msg("Waiting for log copy thread to read lsn %llu", (ulonglong)server_lsn_after_lock); - backup_wait_for_lsn(server_lsn_after_lock); - DBUG_EXECUTE_FOR_KEY("sleep_after_waiting_for_lsn", {}, - { - ulong milliseconds = strtoul(dbug_val, NULL, 10); - msg("sleep_after_waiting_for_lsn"); - my_sleep(milliseconds*1000UL); - }); - - corrupted_pages.backup_fix_ddl(ds_data, ds_meta); - - // There is no need to stop slave thread before coping non-Innodb data when - // --no-lock option is used because --no-lock option requires that no DDL or - // DML to non-transaction tables can occur. - if (opt_no_lock) { - if (opt_safe_slave_backup) { - if (!wait_for_safe_slave(mysql_connection)) { - return(false); - } - } - } - - if (opt_slave_info) { - lock_binlog_maybe(mysql_connection); - - if (!write_slave_info(ds_data, mysql_connection)) { - return(false); - } - } - - /* The only reason why Galera/binlog info is written before - wait_for_ibbackup_log_copy_finish() is that after that call the xtrabackup - binary will start streamig a temporary copy of REDO log to stdout and - thus, any streaming from innobackupex would interfere. The only way to - avoid that is to have a single process, i.e. merge innobackupex and - xtrabackup. */ - if (opt_galera_info) { - if (!write_galera_info(ds_data, mysql_connection)) { - return(false); - } - } - - if (opt_binlog_info == BINLOG_INFO_ON) { - - lock_binlog_maybe(mysql_connection); - write_binlog_info(ds_data, mysql_connection); - } - - if (!opt_no_lock) { - msg("Executing FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS..."); - xb_mysql_query(mysql_connection, - "FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS", false); - } - - return(true); -} /** Release resources after backup_start() */ void backup_release() { - /* release all locks */ - if (!opt_no_lock) { - unlock_all(mysql_connection); - history_lock_time = 0; - } else { - history_lock_time = time(NULL) - history_lock_time; - } - if (opt_lock_ddl_per_table) { mdl_unlock_all(); } @@ -1534,7 +1376,7 @@ static const char *default_buffer_pool_file = "ib_buffer_pool"; bool backup_finish(ds_ctxt *ds_data) { /* Copy buffer pool dump or LRU dump */ - if (!opt_rsync && opt_galera_info) { + if (opt_galera_info) { if (buffer_pool_filename && file_exists(buffer_pool_filename)) { ds_data->copy_file(buffer_pool_filename, default_buffer_pool_file, 0); } @@ -1893,8 +1735,6 @@ copy_back() return(false); } - srv_max_n_threads = 1000; - /* copy undo tablespaces */ Copy_back_dst_dir dst_dir_buf; @@ -1922,7 +1762,8 @@ copy_back() dst_dir = dst_dir_buf.make(srv_log_group_home_dir); - /* --backup generates a single ib_logfile0, which we must copy. */ + /* --backup generates a single LOG_FILE_NAME, which we must copy + if it exists. */ ds_tmp = ds_create(dst_dir, DS_TYPE_LOCAL); if (!(ret = copy_or_move_file(ds_tmp, LOG_FILE_NAME, LOG_FILE_NAME, @@ -2155,8 +1996,6 @@ decrypt_decompress() bool ret; datadir_iter_t *it = NULL; - srv_max_n_threads = 1000; - /* cd to backup directory */ if (my_setwd(xtrabackup_target_dir, MYF(MY_WME))) { @@ -2169,8 +2008,6 @@ decrypt_decompress() it = datadir_iter_new(".", false); - ut_a(xtrabackup_parallel >= 0); - ret = run_data_threads(it, decrypt_decompress_thread_func, xtrabackup_parallel ? xtrabackup_parallel : 1); @@ -2192,9 +2029,9 @@ decrypt_decompress() Do not copy the Innodb files (ibdata1, redo log files), as this is done in a separate step. */ -static bool backup_files_from_datadir(ds_ctxt_t *ds_data, - const char *dir_path, - const char *prefix) +bool backup_files_from_datadir(ds_ctxt_t *ds_data, + const char *dir_path, + const char *prefix) { os_file_dir_t dir = os_file_opendir(dir_path); if (dir == IF_WIN(INVALID_HANDLE_VALUE, nullptr)) return false; @@ -2218,10 +2055,6 @@ static bool backup_files_from_datadir(ds_ctxt_t *ds_data, pname = info.name; if (!starts_with(pname, prefix)) - /* For ES exchange the above line with the following code: - (!xtrabackup_prepare || !xtrabackup_incremental_dir || - !starts_with(pname, "aria_log"))) - */ continue; if (xtrabackup_prepare && xtrabackup_incremental_dir && @@ -2244,7 +2077,7 @@ static int rocksdb_remove_checkpoint_directory() return 0; } -static bool has_rocksdb_plugin() +bool has_rocksdb_plugin() { static bool first_time = true; static bool has_plugin= false; @@ -2390,7 +2223,7 @@ static void rocksdb_unlock_checkpoint() #define MARIADB_CHECKPOINT_DIR "mariabackup-checkpoint" static char rocksdb_checkpoint_dir[FN_REFLEN]; -static void rocksdb_create_checkpoint() +void rocksdb_create_checkpoint() { MYSQL_RES *result = xb_mysql_query(mysql_connection, "SELECT @@rocksdb_datadir,@@datadir", true, true); MYSQL_ROW row = mysql_fetch_row(result); @@ -2470,3 +2303,39 @@ static void rocksdb_copy_back(ds_ctxt *ds_data) { mkdirp(rocksdb_home_dir, 0777, MYF(0)); ds_data->copy_or_move_dir(ROCKSDB_BACKUP_DIR, rocksdb_home_dir, xtrabackup_copy_back, xtrabackup_copy_back); } + +void foreach_file_in_db_dirs( + const char *dir_path, std::function func) { + DBUG_ASSERT(dir_path); + + datadir_iter_t *it; + datadir_node_t node; + + datadir_node_init(&node); + it = datadir_iter_new(dir_path); + + while (datadir_iter_next(it, &node)) + if (!node.is_empty_dir && !func(node.filepath)) + break; + + datadir_iter_free(it); + datadir_node_free(&node); +} + +void foreach_file_in_datadir( + const char *dir_path, std::function func) +{ + DBUG_ASSERT(dir_path); + os_file_dir_t dir = os_file_opendir(dir_path); + os_file_stat_t info; + while (os_file_readdir_next_file(dir_path, dir, &info) == 0) { + if (info.type != OS_FILE_TYPE_FILE) + continue; + const char *pname = strrchr(info.name, IF_WIN('\\', '/')); + if (!pname) + pname = info.name; + if (!func(pname)) + break; + } + os_file_closedir(dir); +} diff --git a/extra/mariabackup/backup_copy.h b/extra/mariabackup/backup_copy.h index b5aaf3121e9..43b75e19939 100644 --- a/extra/mariabackup/backup_copy.h +++ b/extra/mariabackup/backup_copy.h @@ -2,6 +2,7 @@ #ifndef XTRABACKUP_BACKUP_COPY_H #define XTRABACKUP_BACKUP_COPY_H +#include #include #include #include "datasink.h" @@ -21,8 +22,7 @@ bool equal_paths(const char *first, const char *second); /** Start --backup */ -bool backup_start(ds_ctxt *ds_data, ds_ctxt *ds_meta, - CorruptedPages &corrupted_pages); +bool backup_files(ds_ctxt *ds_data, const char *from); /** Release resources after backup_start() */ void backup_release(); /** Finish after backup_start() and backup_release() */ @@ -38,7 +38,25 @@ is_path_separator(char); bool directory_exists(const char *dir, bool create); -lsn_t -get_current_lsn(MYSQL *connection); +bool has_rocksdb_plugin(); +void rocksdb_create_checkpoint(); +void foreach_file_in_db_dirs( + const char *dir_path, std::function func); +void foreach_file_in_datadir( + const char *dir_path, std::function func); +bool ends_with(const char *str, const char *suffix); +bool starts_with(const char *str, const char *prefix); +void parse_db_table_from_file_path( + const char *filepath, char *dbname, char *tablename); +const char *trim_dotslash(const char *path); +bool backup_files_from_datadir(ds_ctxt_t *ds_data, + const char *dir_path, + const char *prefix); +bool is_system_table(const char *dbname, const char *tablename); +std::unique_ptr> + find_files(const char *dir_path, const char *prefix, const char *suffix); +bool file_exists(const char *filename); +bool +filename_matches(const char *filename, const char **ext_list); #endif diff --git a/extra/mariabackup/backup_debug.h b/extra/mariabackup/backup_debug.h index 777b4f4adeb..9286bc7b4e2 100644 --- a/extra/mariabackup/backup_debug.h +++ b/extra/mariabackup/backup_debug.h @@ -1,5 +1,6 @@ #pragma once #include "my_dbug.h" + #ifndef DBUG_OFF char *dbug_mariabackup_get_val(const char *event, fil_space_t::name_type key); /* @@ -14,11 +15,21 @@ To use this facility, you need to for the variable) 3. start mariabackup with --dbug=+d,debug_mariabackup_events */ -#define DBUG_EXECUTE_FOR_KEY(EVENT, KEY, CODE) \ - DBUG_EXECUTE_IF("mariabackup_inject_code", \ - { char *dbug_val= dbug_mariabackup_get_val(EVENT, KEY); \ - if (dbug_val) CODE }) +extern void dbug_mariabackup_event( + const char *event, const fil_space_t::name_type key, bool need_lock); +#define DBUG_MARIABACKUP_EVENT(A, B) \ + DBUG_EXECUTE_IF("mariabackup_events", \ + dbug_mariabackup_event(A,B,false);); +#define DBUG_MARIABACKUP_EVENT_LOCK(A, B) \ + DBUG_EXECUTE_IF("mariabackup_events", \ + dbug_mariabackup_event(A,B, true);); +#define DBUG_EXECUTE_FOR_KEY(EVENT, KEY, CODE) \ + DBUG_EXECUTE_IF("mariabackup_inject_code", {\ + char *dbug_val = dbug_mariabackup_get_val(EVENT, KEY); \ + if (dbug_val && *dbug_val) CODE \ + }) #else +#define DBUG_MARIABACKUP_EVENT(A,B) +#define DBUG_MARIABACKUP_EVENT_LOCK(A,B) #define DBUG_EXECUTE_FOR_KEY(EVENT, KEY, CODE) #endif - diff --git a/extra/mariabackup/backup_mysql.cc b/extra/mariabackup/backup_mysql.cc index 5b98c630030..ab543b5ad0b 100644 --- a/extra/mariabackup/backup_mysql.cc +++ b/extra/mariabackup/backup_mysql.cc @@ -60,10 +60,11 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA #include "backup_copy.h" #include "backup_mysql.h" #include "mysqld.h" -#include "xb_plugin.h" +#include "encryption_plugin.h" #include #include #include "page0zip.h" +#include "backup_debug.h" char *tool_name; char tool_args[2048]; @@ -71,7 +72,7 @@ char tool_args[2048]; ulong mysql_server_version; /* server capabilities */ -bool have_backup_locks = false; +bool have_changed_page_bitmaps = false; bool have_lock_wait_timeout = false; bool have_galera_enabled = false; bool have_multi_threaded_slave = false; @@ -251,13 +252,14 @@ struct mysql_variable { static -void +uint read_mysql_variables(MYSQL *connection, const char *query, mysql_variable *vars, bool vertical_result) { MYSQL_RES *mysql_result; MYSQL_ROW row; mysql_variable *var; + uint n_values=0; mysql_result = xb_mysql_query(connection, query, true); @@ -271,6 +273,7 @@ read_mysql_variables(MYSQL *connection, const char *query, mysql_variable *vars, if (strcmp(var->name, name) == 0 && value != NULL) { *(var->value) = strdup(value); + n_values++; } } } @@ -287,6 +290,7 @@ read_mysql_variables(MYSQL *connection, const char *query, mysql_variable *vars, if (strcmp(var->name, name) == 0 && value != NULL) { *(var->value) = strdup(value); + n_values++; } } ++i; @@ -295,6 +299,7 @@ read_mysql_variables(MYSQL *connection, const char *query, mysql_variable *vars, } mysql_free_result(mysql_result); + return n_values; } @@ -359,7 +364,6 @@ bool get_mysql_vars(MYSQL *connection) { char *gtid_mode_var= NULL; char *version_var= NULL; - char *have_backup_locks_var= NULL; char *log_bin_var= NULL; char *lock_wait_timeout_var= NULL; char *wsrep_on_var= NULL; @@ -384,7 +388,6 @@ bool get_mysql_vars(MYSQL *connection) bool ret= true; mysql_variable mysql_vars[]= { - {"have_backup_locks", &have_backup_locks_var}, {"log_bin", &log_bin_var}, {"lock_wait_timeout", &lock_wait_timeout_var}, {"gtid_mode", >id_mode_var}, @@ -409,11 +412,6 @@ bool get_mysql_vars(MYSQL *connection) read_mysql_variables(connection, "SHOW VARIABLES", mysql_vars, true); - if (have_backup_locks_var != NULL && !opt_no_backup_locks) - { - have_backup_locks= true; - } - if (opt_binlog_info == BINLOG_INFO_AUTO) { if (log_bin_var != NULL && !strcmp(log_bin_var, "ON")) @@ -867,11 +865,11 @@ static void stop_query_killer() /*********************************************************************//** -Function acquires either a backup tables lock, if supported -by the server, or a global read lock (FLUSH TABLES WITH READ LOCK) -otherwise. +Function acquires backup locks @returns true if lock acquired */ -bool lock_tables(MYSQL *connection) + +bool +lock_for_backup_stage_start(MYSQL *connection) { if (have_lock_wait_timeout || opt_lock_wait_timeout) { @@ -884,12 +882,6 @@ bool lock_tables(MYSQL *connection) xb_mysql_query(connection, buf, false); } - if (have_backup_locks) - { - msg("Executing LOCK TABLES FOR BACKUP..."); - xb_mysql_query(connection, "LOCK TABLES FOR BACKUP", false); - return (true); - } if (opt_lock_wait_timeout) { @@ -914,8 +906,6 @@ bool lock_tables(MYSQL *connection) xb_mysql_query(connection, "BACKUP STAGE START", true); DBUG_MARIABACKUP_EVENT("after_backup_stage_start", {}); - xb_mysql_query(connection, "BACKUP STAGE BLOCK_COMMIT", true); - DBUG_MARIABACKUP_EVENT("after_backup_stage_block_commit", {}); /* Set the maximum supported session value for lock_wait_timeout to prevent unnecessary timeouts when the global value is changed from the default */ @@ -931,24 +921,68 @@ bool lock_tables(MYSQL *connection) return (true); } -/*********************************************************************//** -If backup locks are used, execute LOCK BINLOG FOR BACKUP provided that we are -not in the --no-lock mode and the lock has not been acquired already. -@returns true if lock acquired */ bool -lock_binlog_maybe(MYSQL *connection) -{ - if (have_backup_locks && !opt_no_lock && !binlog_locked) { - msg("Executing LOCK BINLOG FOR BACKUP..."); - xb_mysql_query(connection, "LOCK BINLOG FOR BACKUP", false); - binlog_locked = true; - - return(true); +lock_for_backup_stage_flush(MYSQL *connection) { + if (opt_kill_long_queries_timeout) { + start_query_killer(); } - - return(false); + xb_mysql_query(connection, "BACKUP STAGE FLUSH", true); + if (opt_kill_long_queries_timeout) { + stop_query_killer(); + } + return true; } +bool +lock_for_backup_stage_block_ddl(MYSQL *connection) { + if (opt_kill_long_queries_timeout) { + start_query_killer(); + } + xb_mysql_query(connection, "BACKUP STAGE BLOCK_DDL", true); + DBUG_MARIABACKUP_EVENT("after_backup_stage_block_ddl", {}); + if (opt_kill_long_queries_timeout) { + stop_query_killer(); + } + return true; +} + +bool +lock_for_backup_stage_commit(MYSQL *connection) { + if (opt_kill_long_queries_timeout) { + start_query_killer(); + } + xb_mysql_query(connection, "BACKUP STAGE BLOCK_COMMIT", true); + DBUG_MARIABACKUP_EVENT("after_backup_stage_block_commit", {}); + if (opt_kill_long_queries_timeout) { + stop_query_killer(); + } + return true; +} + +bool backup_lock(MYSQL *con, const char *table_name) { + static const std::string backup_lock_prefix("BACKUP LOCK "); + std::string backup_lock_query = backup_lock_prefix + table_name; + xb_mysql_query(con, backup_lock_query.c_str(), true); + return true; +} + +bool backup_unlock(MYSQL *con) { + xb_mysql_query(con, "BACKUP UNLOCK", true); + return true; +} + +std::unordered_set +get_tables_in_use(MYSQL *con) { + std::unordered_set result; + MYSQL_RES *q_res = + xb_mysql_query(con, "SHOW OPEN TABLES WHERE In_use = 1", true); + while (MYSQL_ROW row = mysql_fetch_row(q_res)) { + auto tk = table_key(row[0], row[1]); + msg("Table %s is in use", tk.c_str()); + result.insert(std::move(tk)); + } + return result; +} /*********************************************************************//** Releases either global read lock acquired with FTWRL and the binlog @@ -1383,77 +1417,103 @@ write_slave_info(ds_ctxt *datasink, MYSQL *connection) /*********************************************************************//** -Retrieves MySQL Galera and -saves it in a file. It also prints it to stdout. */ +Retrieves MySQL Galera and saves it in a file. It also prints it to stdout. + +We should create xtrabackup_galelera_info file even when backup locks +are used because donor's wsrep_gtid_domain_id is needed later in joiner. +Note that at this stage wsrep_local_state_uuid and wsrep_last_committed +are inconsistent but they are not used in joiner. Joiner will rewrite this file +at mariabackup --prepare phase and thus there is extra file donor_galera_info. +Information is needed to maitain wsrep_gtid_domain_id and gtid_binlog_pos +same across the cluster. If joiner node have different wsrep_gtid_domain_id +we should still receive effective domain id from the donor node, +and use it. +*/ bool write_galera_info(ds_ctxt *datasink, MYSQL *connection) { - char *state_uuid = NULL, *state_uuid55 = NULL; - char *last_committed = NULL, *last_committed55 = NULL; - char *domain_id = NULL, *domain_id55 = NULL; - bool result; + char *state_uuid = NULL, *state_uuid55 = NULL; + char *last_committed = NULL, *last_committed55 = NULL; + char *domain_id = NULL, *domain_id55 = NULL; + bool result=true; + uint n_values=0; + char *wsrep_on = NULL, *wsrep_on55 = NULL; - mysql_variable status[] = { - {"Wsrep_local_state_uuid", &state_uuid}, - {"wsrep_local_state_uuid", &state_uuid55}, - {"Wsrep_last_committed", &last_committed}, - {"wsrep_last_committed", &last_committed55}, - {NULL, NULL} - }; + mysql_variable vars[] = { + {"Wsrep_on", &wsrep_on}, + {"wsrep_on", &wsrep_on55}, + {NULL, NULL} + }; - mysql_variable value[] = { - {"Wsrep_gtid_domain_id", &domain_id}, - {"wsrep_gtid_domain_id", &domain_id55}, - {NULL, NULL} - }; + mysql_variable status[] = { + {"Wsrep_local_state_uuid", &state_uuid}, + {"wsrep_local_state_uuid", &state_uuid55}, + {"Wsrep_last_committed", &last_committed}, + {"wsrep_last_committed", &last_committed55}, + {NULL, NULL} + }; - /* When backup locks are supported by the server, we should skip - creating xtrabackup_galera_info file on the backup stage, because - wsrep_local_state_uuid and wsrep_last_committed will be inconsistent - without blocking commits. The state file will be created on the prepare - stage using the WSREP recovery procedure. */ - if (have_backup_locks) { - return(true); - } + mysql_variable value[] = { + {"Wsrep_gtid_domain_id", &domain_id}, + {"wsrep_gtid_domain_id", &domain_id55}, + {NULL, NULL} + }; - read_mysql_variables(connection, "SHOW STATUS", status, true); + n_values= read_mysql_variables(connection, "SHOW VARIABLES", vars, true); - if ((state_uuid == NULL && state_uuid55 == NULL) - || (last_committed == NULL && last_committed55 == NULL)) { - msg("Warning: failed to get master wsrep state from SHOW STATUS."); - result = true; - goto cleanup; - } + if (n_values == 0 || (wsrep_on == NULL && wsrep_on55 == NULL)) + { + msg("Server is not Galera node thus --galera-info does not " + "have any effect."); + result = true; + goto cleanup; + } - read_mysql_variables(connection, "SHOW VARIABLES LIKE 'wsrep%'", value, true); + read_mysql_variables(connection, "SHOW STATUS", status, true); - if (domain_id == NULL && domain_id55 == NULL) { - msg("Warning: failed to get master wsrep state from SHOW VARIABLES."); - result = true; - goto cleanup; - } + if ((state_uuid == NULL && state_uuid55 == NULL) + || (last_committed == NULL && last_committed55 == NULL)) + { + msg("Warning: failed to get master wsrep state from SHOW STATUS."); + result = true; + goto cleanup; + } - result = datasink->backup_file_printf(XTRABACKUP_GALERA_INFO, - "%s:%s %s\n", state_uuid ? state_uuid : state_uuid55, - last_committed ? last_committed : last_committed55, - domain_id ? domain_id : domain_id55); + n_values= read_mysql_variables(connection, "SHOW VARIABLES LIKE 'wsrep%'", value, true); - if (result) - { - result= datasink->backup_file_printf(XTRABACKUP_DONOR_GALERA_INFO, - "%s:%s %s\n", state_uuid ? state_uuid : state_uuid55, - last_committed ? last_committed : last_committed55, - domain_id ? domain_id : domain_id55); - } - if (result) - { - write_current_binlog_file(datasink, connection); - } + if (n_values == 0 || (domain_id == NULL && domain_id55 == NULL)) + { + msg("Warning: failed to get master wsrep state from SHOW VARIABLES."); + result = true; + goto cleanup; + } + + result= datasink->backup_file_printf(XTRABACKUP_GALERA_INFO, + "%s:%s %s\n", state_uuid ? state_uuid : state_uuid55, + last_committed ? last_committed : last_committed55, + domain_id ? domain_id : domain_id55); + + if (result) + { + result= datasink->backup_file_printf(XTRABACKUP_DONOR_GALERA_INFO, + "%s:%s %s\n", state_uuid ? state_uuid : state_uuid55, + last_committed ? last_committed : last_committed55, + domain_id ? domain_id : domain_id55); + } + + if (result) + write_current_binlog_file(datasink, connection); + + if (result) + msg("Writing Galera info succeeded with %s:%s %s", + state_uuid ? state_uuid : state_uuid55, + last_committed ? last_committed : last_committed55, + domain_id ? domain_id : domain_id55); cleanup: - free_mysql_variables(status); + free_mysql_variables(status); - return(result); + return(result); } @@ -1496,8 +1556,6 @@ write_current_binlog_file(ds_ctxt *datasink, MYSQL *connection) if (gtid_exists) { size_t log_bin_dir_length; - lock_binlog_maybe(connection); - xb_mysql_query(connection, "FLUSH BINARY LOGS", false); read_mysql_variables(connection, "SHOW MASTER STATUS", @@ -1856,13 +1914,13 @@ bool write_backup_config_file(ds_ctxt *datasink) srv_log_file_size, srv_page_size, srv_undo_dir, - srv_undo_tablespaces, + (uint) srv_undo_tablespaces, page_zip_level, innobase_buffer_pool_filename ? "innodb_buffer_pool_filename=" : "", innobase_buffer_pool_filename ? innobase_buffer_pool_filename : "", - xb_plugin_get_config()); + encryption_plugin_get_config()); return rc; } @@ -1987,3 +2045,23 @@ mdl_unlock_all() mysql_close(mdl_con); spaceid_to_tablename.clear(); } + +ulonglong get_current_lsn(MYSQL *connection) +{ + static const char lsn_prefix[] = "\nLog sequence number "; + ulonglong lsn = 0; + if (MYSQL_RES *res = xb_mysql_query(connection, + "SHOW ENGINE INNODB STATUS", + true, false)) { + if (MYSQL_ROW row = mysql_fetch_row(res)) { + const char *p= strstr(row[2], lsn_prefix); + DBUG_ASSERT(p); + if (p) { + p += sizeof lsn_prefix - 1; + lsn = lsn_t(strtoll(p, NULL, 10)); + } + } + mysql_free_result(res); + } + return lsn; +} diff --git a/extra/mariabackup/backup_mysql.h b/extra/mariabackup/backup_mysql.h index 039934be02d..c87efd21c11 100644 --- a/extra/mariabackup/backup_mysql.h +++ b/extra/mariabackup/backup_mysql.h @@ -2,12 +2,15 @@ #define XTRABACKUP_BACKUP_MYSQL_H #include +#include +#include +#include "datasink.h" /* MariaDB version */ extern ulong mysql_server_version; /* server capabilities */ -extern bool have_backup_locks; +extern bool have_changed_page_bitmaps; extern bool have_lock_wait_timeout; extern bool have_galera_enabled; extern bool have_multi_threaded_slave; @@ -71,7 +74,21 @@ bool lock_binlog_maybe(MYSQL *connection); bool -lock_tables(MYSQL *connection); +lock_for_backup_stage_start(MYSQL *connection); + +bool +lock_for_backup_stage_flush(MYSQL *connection); + +bool +lock_for_backup_stage_block_ddl(MYSQL *connection); + +bool +lock_for_backup_stage_commit(MYSQL *connection); + +bool backup_lock(MYSQL *con, const char *table_name); +bool backup_unlock(MYSQL *con); + +std::unordered_set get_tables_in_use(MYSQL *con); bool wait_for_safe_slave(MYSQL *connection); @@ -82,5 +99,6 @@ write_galera_info(ds_ctxt *datasink, MYSQL *connection); bool write_slave_info(ds_ctxt *datasink, MYSQL *connection); +ulonglong get_current_lsn(MYSQL *connection); #endif diff --git a/extra/mariabackup/common_engine.cc b/extra/mariabackup/common_engine.cc new file mode 100644 index 00000000000..a4a8706243a --- /dev/null +++ b/extra/mariabackup/common_engine.cc @@ -0,0 +1,512 @@ +#include "common_engine.h" +#include "backup_copy.h" +#include "xtrabackup.h" +#include "common.h" +#include "backup_debug.h" + +#include +#include +#include +#include + +namespace common_engine { + +class Table { +public: + Table(std::string &db, std::string &table, std::string &fs_name) : + m_db(std::move(db)), m_table(std::move(table)), + m_fs_name(std::move(fs_name)) {} + virtual ~Table() {} + void add_file_name(const char *file_name) { m_fnames.push_back(file_name); } + virtual bool copy(ds_ctxt_t *ds, MYSQL *con, bool no_lock, + bool finalize, unsigned thread_num); + std::string &get_db() { return m_db; } + std::string &get_table() { return m_table; } + std::string &get_version() { return m_version; } + +protected: + std::string m_db; + std::string m_table; + std::string m_fs_name; + std::string m_version; + std::vector m_fnames; +}; + +bool +Table::copy(ds_ctxt_t *ds, MYSQL *con, bool no_lock, bool, unsigned thread_num) { + static const size_t buf_size = 10 * 1024 * 1024; + std::unique_ptr buf; + bool result = false; + File frm_file = -1; + std::vector files; + bool locked = false; + std::string full_tname("`"); + full_tname.append(m_db).append("`.`").append(m_table).append("`"); + + if (!no_lock && !backup_lock(con, full_tname.c_str())) { + msg(thread_num, "Error on executing BACKUP LOCK for table %s", + full_tname.c_str()); + goto exit; + } + else + locked = !no_lock; + + if ((frm_file = mysql_file_open(key_file_frm, (m_fs_name + ".frm").c_str(), + O_RDONLY | O_SHARE, MYF(0))) < 0 && !m_fnames.empty() && + !ends_with(m_fnames[0].c_str(), ".ARZ") && + !ends_with(m_fnames[0].c_str(), ".ARM")) { + // Don't treat it as error, as the table can be dropped after it + // was added to queue for copying + result = true; + goto exit; + } + + for (const auto &fname : m_fnames) { + File file = mysql_file_open(0, fname.c_str(),O_RDONLY | O_SHARE, MYF(0)); + if (file < 0) { + msg(thread_num, "Error on file %s open during %s table copy", + fname.c_str(), full_tname.c_str()); + goto exit; + } + files.push_back(file); + } + + if (locked && !backup_unlock(con)) { + msg(thread_num, "Error on BACKUP UNLOCK for table %s", full_tname.c_str()); + locked = false; + goto exit; + } + + locked = false; + + buf.reset(new uchar[buf_size]); + + for (size_t i = 0; i < m_fnames.size(); ++i) { + ds_file_t *dst_file = nullptr; + size_t bytes_read; + size_t copied_size = 0; + MY_STAT stat_info; + + if (my_fstat(files[i], &stat_info, MYF(0))) { + msg(thread_num, "error: failed to get stat info for file %s of " + "table %s", m_fnames[i].c_str(), full_tname.c_str()); + goto exit; + } + + const char *dst_path = + (xtrabackup_copy_back || xtrabackup_move_back) ? + m_fnames[i].c_str() : trim_dotslash(m_fnames[i].c_str()); + + dst_file = ds_open(ds, dst_path, &stat_info, false); + if (!dst_file) { + msg(thread_num, "error: cannot open destination stream for %s, table %s", + dst_path, full_tname.c_str()); + goto exit; + } + + while ((bytes_read = my_read(files[i], buf.get(), buf_size, MY_WME))) { + if (bytes_read == size_t(-1)) { + msg(thread_num, "error: file %s read for table %s", + m_fnames[i].c_str(), full_tname.c_str()); + ds_close(dst_file); + goto exit; + } + xtrabackup_io_throttling(); + if (ds_write(dst_file, buf.get(), bytes_read)) { + msg(thread_num, "error: file %s write for table %s", + dst_path, full_tname.c_str()); + ds_close(dst_file); + goto exit; + } + copied_size += bytes_read; + } + mysql_file_close(files[i], MYF(MY_WME)); + files[i] = -1; + ds_close(dst_file); + msg(thread_num, "Copied file %s for table %s, %zu bytes", + m_fnames[i].c_str(), full_tname.c_str(), copied_size); + } + + result = true; + +#ifndef DBUG_OFF + { + std::string sql_name(m_db); + sql_name.append("/").append(m_table); + DBUG_MARIABACKUP_EVENT_LOCK("after_ce_table_copy", fil_space_t::name_type(sql_name.data(), sql_name.size())); + } +#endif // DBUG_OFF +exit: + if (frm_file >= 0) { + m_version = ::read_table_version_id(frm_file); + mysql_file_close(frm_file, MYF(MY_WME)); + } + if (locked && !backup_unlock(con)) { + msg(thread_num, "Error on BACKUP UNLOCK for table %s", full_tname.c_str()); + result = false; + } + for (auto file : files) + if (file >= 0) + mysql_file_close(file, MYF(MY_WME)); + return result; +} + +// Append-only tables +class LogTable : public Table { + public: + LogTable(std::string &db, std::string &table, std::string &fs_name) : + Table(db, table, fs_name) {} + + virtual ~LogTable() { (void)close(); } + bool + copy(ds_ctxt_t *ds, MYSQL *con, bool no_lock, bool finalize, + unsigned thread_num) override; + bool close(); + private: + bool open(ds_ctxt_t *ds, unsigned thread_num); + std::vector m_src; + std::vector m_dst; +}; + +bool +LogTable::open(ds_ctxt_t *ds, unsigned thread_num) { + DBUG_ASSERT(m_src.empty()); + DBUG_ASSERT(m_dst.empty()); + + std::string full_tname("`"); + full_tname.append(m_db).append("`.`").append(m_table).append("`"); + + for (const auto &fname : m_fnames) { + File file = mysql_file_open(0, fname.c_str(),O_RDONLY | O_SHARE, MYF(0)); + if (file < 0) { + msg(thread_num, "Error on file %s open during %s log table copy", + fname.c_str(), full_tname.c_str()); + return false; + } + m_src.push_back(file); + + MY_STAT stat_info; + if (my_fstat(file, &stat_info, MYF(0))) { + msg(thread_num, "error: failed to get stat info for file %s of " + "log table %s", fname.c_str(), full_tname.c_str()); + return false; + } + const char *dst_path = + (xtrabackup_copy_back || xtrabackup_move_back) ? + fname.c_str() : trim_dotslash(fname.c_str()); + ds_file_t *dst_file = ds_open(ds, dst_path, &stat_info, false); + if (!dst_file) { + msg(thread_num, "error: cannot open destination stream for %s, " + "log table %s", dst_path, full_tname.c_str()); + return false; + } + m_dst.push_back(dst_file); + } + + File frm_file; + if ((frm_file = mysql_file_open(key_file_frm, (m_fs_name + ".frm").c_str(), + O_RDONLY | O_SHARE, MYF(0))) < 0 && !m_fnames.empty() && + !ends_with(m_fnames[0].c_str(), ".ARZ") && + !ends_with(m_fnames[0].c_str(), ".ARM")) { + msg(thread_num, "Error on .frm file open for log table %s", + full_tname.c_str()); + return false; + } + + m_version = ::read_table_version_id(frm_file); + mysql_file_close(frm_file, MYF(MY_WME)); + + return true; +} + +bool LogTable::close() { + while (!m_src.empty()) { + auto f = m_src.back(); + m_src.pop_back(); + mysql_file_close(f, MYF(MY_WME)); + } + while (!m_dst.empty()) { + auto f = m_dst.back(); + m_dst.pop_back(); + ds_close(f); + } + return true; +} + +bool +LogTable::copy(ds_ctxt_t *ds, MYSQL *con, bool no_lock, bool finalize, + unsigned thread_num) { + static const size_t buf_size = 10 * 1024 * 1024; + DBUG_ASSERT(ds); + DBUG_ASSERT(con); + if (m_src.empty() && !open(ds, thread_num)) { + close(); + return false; + } + DBUG_ASSERT(m_src.size() == m_dst.size()); + + std::unique_ptr buf(new uchar[buf_size]); + for (size_t i = 0; i < m_src.size(); ++i) { + // .CSM can be rewritten (see write_meta_file() usage in ha_tina.cc) + if (!finalize && ends_with(m_fnames[i].c_str(), ".CSM")) + continue; + size_t bytes_read; + size_t copied_size = 0; + while ((bytes_read = my_read(m_src[i], buf.get(), buf_size, MY_WME))) { + if (bytes_read == size_t(-1)) { + msg(thread_num, "error: file %s read for log table %s", + m_fnames[i].c_str(), + std::string("`").append(m_db).append("`.`"). + append(m_table).append("`").c_str()); + close(); + return false; + } + xtrabackup_io_throttling(); + if (ds_write(m_dst[i], buf.get(), bytes_read)) { + msg(thread_num, "error: file %s write for log table %s", + m_fnames[i].c_str(), std::string("`").append(m_db).append("`.`"). + append(m_table).append("`").c_str()); + close(); + return false; + } + copied_size += bytes_read; + } + msg(thread_num, "Copied file %s for log table %s, %zu bytes", + m_fnames[i].c_str(), std::string("`").append(m_db).append("`.`"). + append(m_table).append("`").c_str(), copied_size); + } + + return true; +} + +class BackupImpl { + public: + BackupImpl( + const char *datadir_path, ds_ctxt_t *datasink, + std::vector &con_pool, ThreadPool &thread_pool) : + m_datadir_path(datadir_path), m_ds(datasink), m_con_pool(con_pool), + m_process_table_jobs(thread_pool) {} + ~BackupImpl() { } + bool scan( + const std::unordered_set &exclude_tables, + std::unordered_set *out_processed_tables, + bool no_lock, bool collect_log_and_stats); + void set_post_copy_table_hook(const post_copy_table_hook_t &hook) { + m_table_post_copy_hook = hook; + } + bool copy_log_tables(bool finalize); + bool copy_stats_tables(); + bool wait_for_finish(); + bool close_log_tables(); + private: + + void process_table_job(Table *table, bool no_lock, bool delete_table, + bool finalize, unsigned thread_num); + + const char *m_datadir_path; + ds_ctxt_t *m_ds; + std::vector &m_con_pool; + TasksGroup m_process_table_jobs; + + post_copy_table_hook_t m_table_post_copy_hook; + std::unordered_map> m_log_tables; + std::unordered_map> m_stats_tables; +}; + +void BackupImpl::process_table_job(Table *table, bool no_lock, + bool delete_table, bool finalize, unsigned thread_num) { + int result = 0; + + if (!m_process_table_jobs.get_result()) + goto exit; + + if (!table->copy(m_ds, m_con_pool[thread_num], no_lock, finalize, thread_num)) + goto exit; + + if (m_table_post_copy_hook) + m_table_post_copy_hook(table->get_db(), table->get_table(), + table->get_version()); + + result = 1; + +exit: + if (delete_table) + delete table; + m_process_table_jobs.finish_task(result); +} + +bool BackupImpl::scan(const std::unordered_set &exclude_tables, + std::unordered_set *out_processed_tables, bool no_lock, + bool collect_log_and_stats) { + + msg("Start scanning common engine tables, need backup locks: %d, " + "collect log and stat tables: %d", no_lock, collect_log_and_stats); + + std::unordered_map> found_tables; + + foreach_file_in_db_dirs(m_datadir_path, + [&](const char *file_path)->bool { + + static const char *ext_list[] = + {".MYD", ".MYI", ".MRG", ".ARM", ".ARZ", ".CSM", ".CSV", NULL}; + + bool is_aria = ends_with(file_path, ".MAD") || ends_with(file_path, ".MAI"); + + if (!collect_log_and_stats && is_aria) + return true; + + if (!is_aria && !filename_matches(file_path, ext_list)) + return true; + + if (check_if_skip_table(file_path)) { + msg("Skipping %s.", file_path); + return true; + } + + auto db_table_fs = convert_filepath_to_tablename(file_path); + auto tk = + table_key(std::get<0>(db_table_fs), std::get<1>(db_table_fs)); + + // log and stats tables are only collected in this function, + // so there is no need to filter out them with exclude_tables. + if (collect_log_and_stats) { + if (is_log_table(std::get<0>(db_table_fs).c_str(), + std::get<1>(db_table_fs).c_str())) { + auto table_it = m_log_tables.find(tk); + if (table_it == m_log_tables.end()) { + msg("Log table found: %s", tk.c_str()); + table_it = m_log_tables.emplace(tk, + std::unique_ptr(new LogTable(std::get<0>(db_table_fs), + std::get<1>(db_table_fs), std::get<2>(db_table_fs)))).first; + } + msg("Collect log table file: %s", file_path); + table_it->second->add_file_name(file_path); + return true; + } + // Aria can handle statistics tables + else if (is_stats_table(std::get<0>(db_table_fs).c_str(), + std::get<1>(db_table_fs).c_str()) && !is_aria) { + auto table_it = m_stats_tables.find(tk); + if (table_it == m_stats_tables.end()) { + msg("Stats table found: %s", tk.c_str()); + table_it = m_stats_tables.emplace(tk, + std::unique_ptr
(new Table(std::get<0>(db_table_fs), + std::get<1>(db_table_fs), std::get<2>(db_table_fs)))).first; + } + msg("Collect stats table file: %s", file_path); + table_it->second->add_file_name(file_path); + return true; + } + } else if (is_log_table(std::get<0>(db_table_fs).c_str(), + std::get<1>(db_table_fs).c_str()) || + is_stats_table(std::get<0>(db_table_fs).c_str(), + std::get<1>(db_table_fs).c_str())) + return true; + + if (is_aria) + return true; + + if (exclude_tables.count(tk)) { + msg("Skip table %s at it is in exclude list", tk.c_str()); + return true; + } + + auto table_it = found_tables.find(tk); + if (table_it == found_tables.end()) { + table_it = found_tables.emplace(tk, + std::unique_ptr
(new Table(std::get<0>(db_table_fs), + std::get<1>(db_table_fs), std::get<2>(db_table_fs)))).first; + } + + table_it->second->add_file_name(file_path); + + return true; + }); + + for (auto &table_it : found_tables) { + m_process_table_jobs.push_task( + std::bind(&BackupImpl::process_table_job, this, table_it.second.release(), + no_lock, true, false, std::placeholders::_1)); + if (out_processed_tables) + out_processed_tables->insert(table_it.first); + } + + msg("Stop scanning common engine tables"); + return true; +} + +bool BackupImpl::copy_log_tables(bool finalize) { + for (auto &table_it : m_log_tables) { + // Do not execute BACKUP LOCK for log tables as it's supposed + // that they must be copied on BLOCK_DDL and BLOCK_COMMIT locks. + m_process_table_jobs.push_task( + std::bind(&BackupImpl::process_table_job, this, table_it.second.get(), + true, false, finalize, std::placeholders::_1)); + } + return true; +} + +bool BackupImpl::copy_stats_tables() { + for (auto &table_it : m_stats_tables) { + // Do not execute BACKUP LOCK for stats tables as it's supposed + // that they must be copied on BLOCK_DDL and BLOCK_COMMIT locks. + // Delete stats table object after copy (see process_table_job()) + m_process_table_jobs.push_task( + std::bind(&BackupImpl::process_table_job, this, table_it.second.release(), + true, true, false, std::placeholders::_1)); + } + m_stats_tables.clear(); + return true; +} + +bool BackupImpl::wait_for_finish() { + /* Wait for threads to exit */ + return m_process_table_jobs.wait_for_finish(); +} + +bool BackupImpl::close_log_tables() { + bool result = wait_for_finish(); + for (auto &table_it : m_log_tables) + table_it.second->close(); + return result; +} + +Backup::Backup(const char *datadir_path, ds_ctxt_t *datasink, + std::vector &con_pool, ThreadPool &thread_pool) : + m_backup_impl( + new BackupImpl(datadir_path, datasink, con_pool, + thread_pool)) { } + +Backup::~Backup() { + delete m_backup_impl; +} + +bool Backup::scan( + const std::unordered_set &exclude_tables, + std::unordered_set *out_processed_tables, + bool no_lock, bool collect_log_and_stats) { + return m_backup_impl->scan(exclude_tables, out_processed_tables, no_lock, + collect_log_and_stats); +} + +bool Backup::copy_log_tables(bool finalize) { + return m_backup_impl->copy_log_tables(finalize); +} + +bool Backup::copy_stats_tables() { + return m_backup_impl->copy_stats_tables(); +} + +bool Backup::wait_for_finish() { + return m_backup_impl->wait_for_finish(); +} + +bool Backup::close_log_tables() { + return m_backup_impl->close_log_tables(); +} + +void Backup::set_post_copy_table_hook(const post_copy_table_hook_t &hook) { + m_backup_impl->set_post_copy_table_hook(hook); +} + +} // namespace common_engine diff --git a/extra/mariabackup/common_engine.h b/extra/mariabackup/common_engine.h new file mode 100644 index 00000000000..6f5d8062e50 --- /dev/null +++ b/extra/mariabackup/common_engine.h @@ -0,0 +1,39 @@ +#pragma once +#include "my_global.h" +#include "backup_mysql.h" +#include "datasink.h" +#include "thread_pool.h" +#include "xtrabackup.h" + +#include +#include +#include + +namespace common_engine { + +class BackupImpl; + +class Backup { + public: + Backup(const char *datadir_path, ds_ctxt_t *datasink, + std::vector &con_pool, ThreadPool &thread_pool); + ~Backup(); + Backup (Backup &&other) = delete; + Backup & operator= (Backup &&other) = delete; + Backup(const Backup &) = delete; + Backup & operator= (const Backup &) = delete; + bool scan( + const std::unordered_set &exclude_tables, + std::unordered_set *out_processed_tables, + bool no_lock, bool collect_log_and_stats); + bool copy_log_tables(bool finalize); + bool copy_stats_tables(); + bool wait_for_finish(); + bool close_log_tables(); + void set_post_copy_table_hook(const post_copy_table_hook_t &hook); + private: + BackupImpl *m_backup_impl; +}; + +} // namespace common_engine + diff --git a/extra/mariabackup/datasink.cc b/extra/mariabackup/datasink.cc index a576526d7ff..132ff3fcfb6 100644 --- a/extra/mariabackup/datasink.cc +++ b/extra/mariabackup/datasink.cc @@ -80,11 +80,11 @@ ds_create(const char *root, ds_type_t type) /************************************************************************ Open a datasink file */ ds_file_t * -ds_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat) +ds_open(ds_ctxt_t *ctxt, const char *path, const MY_STAT *stat, bool rewrite) { ds_file_t *file; - file = ctxt->datasink->open(ctxt, path, stat); + file = ctxt->datasink->open(ctxt, path, stat, rewrite); if (file != NULL) { file->datasink = ctxt->datasink; } @@ -104,6 +104,30 @@ ds_write(ds_file_t *file, const void *buf, size_t len) return file->datasink->write(file, (const uchar *)buf, len); } +int ds_seek_set(ds_file_t *file, my_off_t offset) { + DBUG_ASSERT(file); + DBUG_ASSERT(file->datasink); + if (file->datasink->seek_set) + return file->datasink->seek_set(file, offset); + return 0; +} + +int ds_rename(ds_ctxt_t *ctxt, const char *old_path, const char *new_path) { + DBUG_ASSERT(ctxt); + DBUG_ASSERT(ctxt->datasink); + if (ctxt->datasink->rename) + return ctxt->datasink->rename(ctxt, old_path, new_path); + return 0; +} + +int ds_remove(ds_ctxt_t *ctxt, const char *path) { + DBUG_ASSERT(ctxt); + DBUG_ASSERT(ctxt->datasink); + if (ctxt->datasink->remove) + return ctxt->datasink->mremove(ctxt, path); + return 0; +} + /************************************************************************ Close a datasink file. @return 0 on success, 1, on error. */ diff --git a/extra/mariabackup/datasink.h b/extra/mariabackup/datasink.h index 57468e0c9c7..98cbe5252ac 100644 --- a/extra/mariabackup/datasink.h +++ b/extra/mariabackup/datasink.h @@ -43,7 +43,8 @@ typedef struct ds_ctxt { */ bool copy_file(const char *src_file_path, const char *dst_file_path, - uint thread_n); + uint thread_n, + bool rewrite = false); bool move_file(const char *src_file_path, const char *dst_file_path, @@ -76,10 +77,15 @@ typedef struct { struct datasink_struct { ds_ctxt_t *(*init)(const char *root); - ds_file_t *(*open)(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat); + ds_file_t *(*open)(ds_ctxt_t *ctxt, const char *path, + const MY_STAT *stat, bool rewrite); int (*write)(ds_file_t *file, const unsigned char *buf, size_t len); + int (*seek_set)(ds_file_t *file, my_off_t offset); int (*close)(ds_file_t *file); int (*remove)(const char *path); + // TODO: consider to return bool from "rename" and "remove" + int (*rename)(ds_ctxt_t *ctxt, const char *old_path, const char *new_path); + int (*mremove)(ds_ctxt_t *ctxt, const char *path); void (*deinit)(ds_ctxt_t *ctxt); }; @@ -106,12 +112,17 @@ ds_ctxt_t *ds_create(const char *root, ds_type_t type); /************************************************************************ Open a datasink file */ -ds_file_t *ds_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat); +ds_file_t *ds_open( + ds_ctxt_t *ctxt, const char *path, const MY_STAT *stat, bool rewrite = false); /************************************************************************ Write to a datasink file. @return 0 on success, 1 on error. */ int ds_write(ds_file_t *file, const void *buf, size_t len); +int ds_seek_set(ds_file_t *file, my_off_t offset); + +int ds_rename(ds_ctxt_t *ctxt, const char *old_path, const char *new_path); +int ds_remove(ds_ctxt_t *ctxt, const char *path); /************************************************************************ Close a datasink file. diff --git a/extra/mariabackup/ddl_log.cc b/extra/mariabackup/ddl_log.cc new file mode 100644 index 00000000000..6af34172dcb --- /dev/null +++ b/extra/mariabackup/ddl_log.cc @@ -0,0 +1,553 @@ +#include "ddl_log.h" +#include "common.h" +#include "my_sys.h" +#include "sql_table.h" +#include "backup_copy.h" +#include "xtrabackup.h" +#include +#include +#include +#include + +namespace ddl_log { + +struct Entry { + enum Type { + CREATE, + ALTER, + RENAME, + REPAIR, + OPTIMIZE, + DROP, + TRUNCATE, + CHANGE_INDEX, + BULK_INSERT + }; + Type type; + std::string date; + std::string engine; + bool partitioned; + std::string db; + std::string table; + std::string id; + std::string new_engine; + bool new_partitioned; + std::string new_db; + std::string new_table; + std::string new_id; +}; + +typedef std::vector> entries_t; +typedef std::function)> store_entry_func_t; + +const char *aria_engine_name = "Aria"; +static const char *frm_ext = ".frm"; +static const char *database_keyword = "DATABASE"; + +const std::unordered_map> engine_exts = +{ + {"Aria", {".MAD", ".MAI"}}, + {"MyISAM", {".MYD", ".MYI"}}, + {"MRG_MyISAM", {".MRG"}}, + {"ARCHIVE", {".ARM", ".ARZ"}}, + {"CSV", {".CSM", ".CSV"}} +}; + +static inline bool known_engine(const std::string &engine) { + return engine_exts.count(engine); +} + +// TODO: add error messages +size_t parse(const uchar *buf, size_t buf_size, bool &error_flag, + store_entry_func_t &store_entry_func) { + DBUG_ASSERT(buf); + static constexpr char token_delimiter = '\t'; + static constexpr char line_delimiter = '\n'; + enum { + TOKEN_FIRST = 0, + TOKEN_DATE = TOKEN_FIRST, + TOKEN_TYPE, + TOKEN_ENGINE, + TOKEN_PARTITIONED, + TOKEN_DB, + TOKEN_TABLE, + TOKEN_ID, + TOKEN_MANDATORY = TOKEN_ID, + TOKEN_NEW_ENGINE, + TOKEN_NEW_PARTITIONED, + TOKEN_NEW_DB, + TOKEN_NEW_TABLE, + TOKEN_NEW_ID, + TOKEN_LAST = TOKEN_NEW_ID + }; + const size_t string_offsets[TOKEN_LAST + 1] = { + offsetof(Entry, date), + offsetof(Entry, type), // not a string, be careful + offsetof(Entry, engine), + offsetof(Entry, partitioned), // not a string, be careful + offsetof(Entry, db), + offsetof(Entry, table), + offsetof(Entry, id), + offsetof(Entry, new_engine), + offsetof(Entry, new_partitioned), // not a string, be careful + offsetof(Entry, new_db), + offsetof(Entry, new_table), + offsetof(Entry, new_id) + }; + const std::unordered_map str_to_type = { + {"CREATE", Entry::CREATE}, + {"ALTER", Entry::ALTER}, + {"RENAME", Entry::RENAME}, + // TODO: fix to use uppercase-only + {"repair", Entry::REPAIR}, + {"optimize", Entry::OPTIMIZE}, + {"DROP", Entry::DROP}, + {"TRUNCATE", Entry::TRUNCATE}, + {"CHANGE_INDEX", Entry::CHANGE_INDEX}, + {"BULK_INSERT", Entry::BULK_INSERT} + }; + + const uchar *new_line = buf; + const uchar *token_start = buf; + unsigned token_num = TOKEN_FIRST; + + error_flag = false; + + std::unique_ptr entry(new Entry()); + + for (const uchar *ptr = buf; ptr < buf + buf_size; ++ptr) { + + if (*ptr != token_delimiter && *ptr != line_delimiter) + continue; + + if (token_start != ptr) { + std::string token(token_start, ptr); + + if (token_num == TOKEN_TYPE) { + const auto type_it = str_to_type.find(token); + if (type_it == str_to_type.end()) { + error_flag = true; + goto exit; + } + entry->type = type_it->second; + } + else if (token_num == TOKEN_PARTITIONED) { + entry->partitioned = token[0] - '0'; + } + else if (token_num == TOKEN_NEW_PARTITIONED) { + entry->new_partitioned = token[0] - '0'; + } + else if (token_num <= TOKEN_LAST) { + DBUG_ASSERT(token_num != TOKEN_TYPE); + DBUG_ASSERT(token_num != TOKEN_PARTITIONED); + DBUG_ASSERT(token_num != TOKEN_NEW_PARTITIONED); + reinterpret_cast + (reinterpret_cast(entry.get()) + string_offsets[token_num])-> + assign(std::move(token)); + } + else { + error_flag = true; + goto exit; + } + } + token_start = ptr + 1; + + if (*ptr == line_delimiter) { + if (token_num < TOKEN_MANDATORY) { + error_flag = true; + goto exit; + } + if (!store_entry_func(std::move(entry))) { + error_flag = true; + goto exit; + } + entry.reset(new Entry()); + token_num = TOKEN_FIRST; + new_line = ptr + 1; + } else + ++token_num; + } + +exit: + return new_line - buf; +} + +bool parse(const char *file_path, store_entry_func_t store_entry_func) { + DBUG_ASSERT(file_path); + DBUG_ASSERT(store_entry_func); + File file= -1; + bool result = true; + uchar buf[1024]; + size_t bytes_read = 0; + size_t buf_read_offset = 0; + + if ((file= my_open(file_path, O_RDONLY | O_SHARE | O_NOFOLLOW | O_CLOEXEC, + MYF(MY_WME))) < 0) { + msg("DDL log file %s open failed: %d", file_path, my_errno); + result = false; + goto exit; + } + + while((bytes_read = my_read( + file, &buf[buf_read_offset], sizeof(buf) - buf_read_offset, MY_WME)) > 0) { + if (bytes_read == size_t(-1)) { + msg("DDL log file %s read error: %d", file_path, my_errno); + result = false; + break; + } + bytes_read += buf_read_offset; + bool parse_error_flag = false; + size_t bytes_parsed = parse( + buf, bytes_read, parse_error_flag, store_entry_func); + if (parse_error_flag) { + result = false; + break; + } + size_t rest_size = bytes_read - bytes_parsed; + if (rest_size) + memcpy(buf, buf + bytes_parsed, rest_size); + buf_read_offset = rest_size; + } + +exit: + if (file >= 0) + my_close(file, MYF(MY_WME)); + return result; +}; + + +static +bool process_database( + const char *datadir_path, + ds_ctxt_t *ds, + const Entry &entry, + std::unordered_set &dropped_databases) { + + if (entry.type == Entry::Type::CREATE || + entry.type == Entry::Type::ALTER) { + std::string opt_file(datadir_path); + opt_file.append("/").append(entry.db).append("/db.opt"); + if (!ds->copy_file(opt_file.c_str(), opt_file.c_str(), 0, true)) { + msg("Failed to re-copy %s.", opt_file.c_str()); + return false; + } + if (entry.type == Entry::Type::CREATE) + dropped_databases.erase(entry.db); + return true; + } + + DBUG_ASSERT(entry.type == Entry::Type::DROP); + + std::string db_path(datadir_path); + db_path.append("/").append(entry.db); + const char *dst_path = convert_dst(db_path.c_str()); + if (!ds_remove(ds, dst_path)) { + dropped_databases.insert(entry.db); + return true; + } + return false; +} + +static +std::unique_ptr> + find_table_files( + const char *dir_path, + const std::string &db, + const std::string &table) { + + std::unique_ptr> + result(new std::vector()); + + std::string prefix = convert_tablename_to_filepath(dir_path, db, table); + foreach_file_in_db_dirs(dir_path, [&](const char *file_name)->bool { + if (!strncmp(file_name, prefix.c_str(), prefix.size())) { + DBUG_ASSERT(strlen(file_name) >= prefix.size()); + if (file_name[prefix.size()] == '.' || + !strncmp(file_name + prefix.size(), "#P#", strlen("#P#"))) + result->push_back(std::string(file_name)); + } + return true; + }); + + return result; +} + +static +bool process_remove( + const char *datadir_path, + ds_ctxt_t *ds, + const Entry &entry, + bool remove_frm) { + + if (check_if_skip_table( + std::string(entry.db).append("/").append(entry.table).c_str())) + return true; + + auto ext_it = engine_exts.find(entry.engine); + if (ext_it == engine_exts.end()) + return true; + + std::string file_preffix = convert_tablename_to_filepath(datadir_path, + entry.db, entry.table); + const char *dst_preffix = convert_dst(file_preffix.c_str()); + + for (const char *ext : ext_it->second) { + std::string old_name(dst_preffix); + if (!entry.partitioned) + old_name.append(ext); + else + old_name.append("#P#*"); + if (ds_remove(ds, old_name.c_str())) { + msg("Failed to remove %s.", old_name.c_str()); + return false; + } + } + + if (remove_frm) { + std::string old_frm_name(dst_preffix); + old_frm_name.append(frm_ext); + if (ds_remove(ds, old_frm_name.c_str())) { + msg("Failed to remove %s.", old_frm_name.c_str()); + return false; + } + } + return true; + +} + +static +bool process_recopy( + const char *datadir_path, + ds_ctxt_t *ds, + const Entry &entry, + const tables_t &tables) { + + if (check_if_skip_table( + std::string(entry.db).append("/").append(entry.table).c_str())) + return true; + + const std::string &new_table_id = + entry.new_id.empty() ? entry.id : entry.new_id; + DBUG_ASSERT(!new_table_id.empty()); + const std::string &new_table = + entry.new_table.empty() ? entry.table : entry.new_table; + DBUG_ASSERT(!new_table.empty()); + const std::string &new_db = + entry.new_db.empty() ? entry.db : entry.new_db; + DBUG_ASSERT(!new_db.empty()); + const std::string &new_engine = + entry.new_engine.empty() ? entry.engine : entry.new_engine; + DBUG_ASSERT(!new_engine.empty()); + + if (entry.type != Entry::Type::BULK_INSERT) { + auto table_it = tables.find(table_key(new_db, new_table)); + if (table_it != tables.end() && + table_it->second == new_table_id) + return true; + } + + if (!entry.new_engine.empty() && + entry.engine != entry.new_engine && + !known_engine(entry.new_engine)) { + return process_remove(datadir_path, ds, entry, false); + } + + if ((entry.partitioned || entry.new_partitioned) && + !process_remove(datadir_path, ds, entry, false)) + return false; + + if (entry.partitioned || entry.new_partitioned) { + auto files = find_table_files(datadir_path, new_db, new_table); + if (!files.get()) + return true; + for (const auto &file : *files) { + const char *dst_path = convert_dst(file.c_str()); + if (!ds->copy_file(file.c_str(), dst_path, 0, true)) { + msg("Failed to re-copy %s.", file.c_str()); + return false; + } + } + return true; + } + + auto ext_it = engine_exts.find(new_engine); + if (ext_it == engine_exts.end()) + return false; + + for (const char *ext : ext_it->second) { + std::string file_name = + convert_tablename_to_filepath(datadir_path, new_db, new_table). + append(ext); + const char *dst_path = convert_dst(file_name.c_str()); + if (file_exists(file_name.c_str()) && + !ds->copy_file(file_name.c_str(), dst_path, 0, true)) { + msg("Failed to re-copy %s.", file_name.c_str()); + return false; + } + } + + std::string frm_file = + convert_tablename_to_filepath(datadir_path, new_db, new_table). + append(frm_ext); + const char *frm_dst_path = convert_dst(frm_file.c_str()); + if (file_exists(frm_file.c_str()) && + !ds->copy_file(frm_file.c_str(), frm_dst_path, 0, true)) { + msg("Failed to re-copy %s.", frm_file.c_str()); + return false; + } + + return true; +} + +static +bool process_rename( + const char *datadir_path, + ds_ctxt_t *ds, + const Entry &entry) { + + if (check_if_skip_table( + std::string(entry.db).append("/").append(entry.table).c_str())) + return true; + + DBUG_ASSERT(entry.db != "partition"); + + auto ext_it = engine_exts.find(entry.engine); + if (ext_it == engine_exts.end()) + return false; + + std::string new_preffix = convert_tablename_to_filepath(datadir_path, + entry.new_db, entry.new_table); + const char *dst_path = convert_dst(new_preffix.c_str()); + + std::string old_preffix = convert_tablename_to_filepath(datadir_path, + entry.db, entry.table); + const char *src_path = convert_dst(old_preffix.c_str()); + + for (const char *ext : ext_it->second) { + std::string old_name(src_path); + old_name.append(ext); + std::string new_name(dst_path); + new_name.append(ext); + if (ds_rename(ds, old_name.c_str(), new_name.c_str())) { + msg("Failed to rename %s to %s.", + old_name.c_str(), new_name.c_str()); + return false; + } + } + + std::string new_frm_file = new_preffix + frm_ext; + const char *new_frm_dst = convert_dst(new_frm_file.c_str()); + if (file_exists(new_frm_file.c_str()) && + !ds->copy_file(new_frm_file.c_str(), new_frm_dst, 0, true)) { + msg("Failed to re-copy %s.", new_frm_file.c_str()); + return false; + } + +// TODO: return this code if .frm is copied not under BLOCK_DDL +/* + std::string old_frm_name(src_path); + old_frm_name.append(frm_ext); + std::string new_frm_name(dst_path); + new_frm_name.append(frm_ext); + if (ds_rename(ds, old_frm_name.c_str(), new_frm_name.c_str())) { + msg("Failed to rename %s to %s.", + old_frm_name.c_str(), new_frm_name.c_str()); + return false; + } +*/ + return true; +} + +bool backup( + const char *datadir_path, + ds_ctxt_t *ds, + const tables_t &tables) { + DBUG_ASSERT(datadir_path); + DBUG_ASSERT(ds); + char ddl_log_path[FN_REFLEN]; + fn_format(ddl_log_path, "ddl", datadir_path, ".log", 0); + std::vector> entries; + + std::unordered_set processed_tables; + std::unordered_set dropped_databases; + + bool parsing_result = + parse(ddl_log_path, [&](std::unique_ptr entry)->bool { + + if (entry->engine == database_keyword) + return process_database(datadir_path, ds, *entry, dropped_databases); + + if (!known_engine(entry->engine) && !known_engine(entry->new_engine)) + return true; + + if (entry->type == Entry::Type::CREATE || + (entry->type == Entry::Type::ALTER && + !entry->new_engine.empty() && + entry->engine != entry->new_engine)) { + if (!process_recopy(datadir_path, ds, *entry, tables)) + return false; + processed_tables.insert(table_key(entry->db, entry->table)); + if (entry->type == Entry::Type::ALTER) + processed_tables.insert(table_key(entry->new_db, entry->new_table)); + return true; + } + + if (entry->type == Entry::Type::DROP) { + if (!process_remove(datadir_path, ds, *entry, true)) + return false; + processed_tables.insert(table_key(entry->db, entry->table)); + return true; + } + if (entry->type == Entry::Type::RENAME) { + if (entry->partitioned) { + if (!process_remove(datadir_path, ds, *entry, true)) + return false; + Entry recopy_entry { + entry->type, + {}, + entry->new_engine.empty() ? entry->engine : entry->new_engine, + true, + entry->new_db, + entry->new_table, + entry->new_id, + {}, true, {}, {}, {} + }; + if (!process_recopy(datadir_path, ds, recopy_entry, tables)) + return false; + } + else if (!process_rename(datadir_path, ds, *entry)) + return false; + processed_tables.insert(table_key(entry->db, entry->table)); + processed_tables.insert(table_key(entry->new_db, entry->new_table)); + return true; + } + + entries.push_back(std::move(entry)); + return true; + + }); + + if (!parsing_result) + return false; + + + while (!entries.empty()) { + auto entry = std::move(entries.back()); + entries.pop_back(); + auto tk = table_key( + entry->new_db.empty() ? entry->db : entry->new_db, + entry->new_table.empty() ? entry->table : entry->new_table); + if (dropped_databases.count(entry->db) || + dropped_databases.count(entry->new_db)) + continue; + if (processed_tables.count(tk)) + continue; + processed_tables.insert(std::move(tk)); + if (!process_recopy(datadir_path, ds, *entry, tables)) + return false; + } + + return true; +} + +} // namespace ddl_log diff --git a/extra/mariabackup/ddl_log.h b/extra/mariabackup/ddl_log.h new file mode 100644 index 00000000000..5cac3e5dcfc --- /dev/null +++ b/extra/mariabackup/ddl_log.h @@ -0,0 +1,15 @@ +#pragma once +#include "my_global.h" +#include "datasink.h" +#include "aria_backup_client.h" +#include +#include +#include +#include + +namespace ddl_log { + +typedef std::unordered_map tables_t; +bool backup(const char *datadir_path, ds_ctxt_t *ds, const tables_t &tables); + +} // namespace ddl_log diff --git a/extra/mariabackup/ds_buffer.cc b/extra/mariabackup/ds_buffer.cc index d6a420951cb..bc1d466350a 100644 --- a/extra/mariabackup/ds_buffer.cc +++ b/extra/mariabackup/ds_buffer.cc @@ -44,7 +44,7 @@ typedef struct { static ds_ctxt_t *buffer_init(const char *root); static ds_file_t *buffer_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat); + const MY_STAT *mystat, bool rewrite); static int buffer_write(ds_file_t *file, const uchar *buf, size_t len); static int buffer_close(ds_file_t *file); static void buffer_deinit(ds_ctxt_t *ctxt); @@ -53,8 +53,11 @@ datasink_t datasink_buffer = { &buffer_init, &buffer_open, &buffer_write, + nullptr, &buffer_close, &dummy_remove, + nullptr, + nullptr, &buffer_deinit }; @@ -84,8 +87,10 @@ buffer_init(const char *root) } static ds_file_t * -buffer_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat) +buffer_open(ds_ctxt_t *ctxt, const char *path, + const MY_STAT *mystat, bool rewrite) { + DBUG_ASSERT(rewrite == false); ds_buffer_ctxt_t *buffer_ctxt; ds_ctxt_t *pipe_ctxt; ds_file_t *dst_file; diff --git a/extra/mariabackup/ds_compress.cc b/extra/mariabackup/ds_compress.cc index f7a9b7a1fbd..0cb52e978db 100644 --- a/extra/mariabackup/ds_compress.cc +++ b/extra/mariabackup/ds_compress.cc @@ -65,7 +65,7 @@ extern ulonglong xtrabackup_compress_chunk_size; static ds_ctxt_t *compress_init(const char *root); static ds_file_t *compress_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat); + const MY_STAT *mystat, bool rewrite); static int compress_write(ds_file_t *file, const uchar *buf, size_t len); static int compress_close(ds_file_t *file); static void compress_deinit(ds_ctxt_t *ctxt); @@ -74,8 +74,11 @@ datasink_t datasink_compress = { &compress_init, &compress_open, &compress_write, + nullptr, &compress_close, &dummy_remove, + nullptr, + nullptr, &compress_deinit }; @@ -116,8 +119,10 @@ compress_init(const char *root) static ds_file_t * -compress_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat) +compress_open(ds_ctxt_t *ctxt, const char *path, + const MY_STAT *mystat, bool rewrite) { + DBUG_ASSERT(rewrite == false); ds_compress_ctxt_t *comp_ctxt; ds_ctxt_t *dest_ctxt; ds_file_t *dest_file; diff --git a/extra/mariabackup/ds_local.cc b/extra/mariabackup/ds_local.cc index f86612b951a..ff2021fc035 100644 --- a/extra/mariabackup/ds_local.cc +++ b/extra/mariabackup/ds_local.cc @@ -42,8 +42,9 @@ typedef struct { static ds_ctxt_t *local_init(const char *root); static ds_file_t *local_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat); + const MY_STAT *mystat, bool rewrite); static int local_write(ds_file_t *file, const uchar *buf, size_t len); +static int local_seek_set(ds_file_t *file, my_off_t offset); static int local_close(ds_file_t *file); static void local_deinit(ds_ctxt_t *ctxt); @@ -52,13 +53,20 @@ static int local_remove(const char *path) return unlink(path); } +static int local_rename( + ds_ctxt_t *ctxt, const char *old_path, const char *new_path); +static int local_mremove(ds_ctxt_t *ctxt, const char *path); + extern "C" { datasink_t datasink_local = { &local_init, &local_open, &local_write, + &local_seek_set, &local_close, &local_remove, + &local_rename, + &local_mremove, &local_deinit }; } @@ -89,7 +97,7 @@ local_init(const char *root) static ds_file_t * local_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat __attribute__((unused))) + const MY_STAT *mystat __attribute__((unused)), bool rewrite) { char fullpath[FN_REFLEN]; char dirpath[FN_REFLEN]; @@ -111,8 +119,10 @@ local_open(ds_ctxt_t *ctxt, const char *path, return NULL; } - fd = my_create(fullpath, 0, O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW, - MYF(MY_WME)); + // TODO: check in Windows and set the corresponding flags on fail + fd = my_create(fullpath, 0, + O_WRONLY | O_BINARY | (rewrite ? O_TRUNC : O_EXCL) | O_NOFOLLOW, + MYF(MY_WME)); if (fd < 0) { return NULL; } @@ -194,8 +204,8 @@ static void init_ibd_data(ds_local_file_t *local_file, const uchar *buf, size_t return; } - auto flags = mach_read_from_4(&buf[FIL_PAGE_DATA + FSP_SPACE_FLAGS]); - auto ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags); + uint32_t flags = mach_read_from_4(&buf[FIL_PAGE_DATA + FSP_SPACE_FLAGS]); + uint32_t ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags); local_file->pagesize= ssize == 0 ? UNIV_PAGE_SIZE_ORIG : ((UNIV_ZIP_SIZE_MIN >> 1) << ssize); local_file->compressed = fil_space_t::full_crc32(flags) ? fil_space_t::is_compressed(flags) @@ -239,6 +249,15 @@ local_write(ds_file_t *file, const uchar *buf, size_t len) return 1; } +static +int +local_seek_set(ds_file_t *file, my_off_t offset) { + ds_local_file_t *local_file= (ds_local_file_t *)file->ptr; + if (my_seek(local_file->fd, offset, SEEK_SET, MYF(0)) == MY_FILEPOS_ERROR) + return 1; + return 0; +} + /* Set EOF at file's current position.*/ static int set_eof(File fd) { @@ -276,3 +295,77 @@ local_deinit(ds_ctxt_t *ctxt) my_free(ctxt->root); my_free(ctxt); } + + +static int local_rename( + ds_ctxt_t *ctxt, const char *old_path, const char *new_path) { + char full_old_path[FN_REFLEN]; + char full_new_path[FN_REFLEN]; + fn_format(full_old_path, old_path, ctxt->root, "", MYF(MY_RELATIVE_PATH)); + fn_format(full_new_path, new_path, ctxt->root, "", MYF(MY_RELATIVE_PATH)); + // Ignore errors as .frm files can me copied separately. + // TODO: return error processing here after the corresponding changes in + // xtrabackup.cc + (void)my_rename(full_old_path, full_new_path, MYF(0)); +// if (my_rename(full_old_path, full_new_path, MYF(0))) { +// msg("Failed to rename file %s to %s", old_path, new_path); +// return 1; +// } + return 0; +} + +// It's ok if destination does not contain the file or folder +static int local_mremove(ds_ctxt_t *ctxt, const char *path) { + char full_path[FN_REFLEN]; + fn_format(full_path, path, ctxt->root, "", MYF(MY_RELATIVE_PATH)); + size_t full_path_len = strlen(full_path); + if (full_path[full_path_len - 1] == '*') { + full_path[full_path_len - 1] = '\0'; + char *preffix = strrchr(full_path, '/'); + const char *full_path_dir = full_path; + size_t preffix_len; + if (preffix) { + preffix_len = (full_path_len - 1) - (preffix - full_path); + *(preffix++) = '\0'; + } + else { + preffix = full_path; + preffix_len = full_path_len - 1; + full_path_dir= IF_WIN(".\\", "./"); + } + if (!preffix_len) + return 0; + MY_DIR *dir= my_dir(full_path_dir, 0); + if (!dir) + return 0; + for (size_t i = 0; i < dir->number_of_files; ++i) { + char full_fpath[FN_REFLEN]; + if (strncmp(dir->dir_entry[i].name, preffix, preffix_len)) + continue; + fn_format(full_fpath, dir->dir_entry[i].name, + full_path_dir, "", MYF(MY_RELATIVE_PATH)); + (void)my_delete(full_fpath, MYF(0)); + } + my_dirend(dir); + } + else { + MY_STAT stat; + if (!my_stat(full_path, &stat, MYF(0))) + return 0; + MY_DIR *dir= my_dir(full_path, 0); + if (!dir) { + // TODO: check for error here if necessary + (void)my_delete(full_path, MYF(0)); + return 0; + } + for (size_t i = 0; i < dir->number_of_files; ++i) { + char full_fpath[FN_REFLEN]; + fn_format(full_fpath, dir->dir_entry[i].name, + full_path, "", MYF(MY_RELATIVE_PATH)); + (void)my_delete(full_fpath, MYF(0)); + } + my_dirend(dir); + (void)my_rmtree(full_path, MYF(0)); + } + return 0; +} diff --git a/extra/mariabackup/ds_stdout.cc b/extra/mariabackup/ds_stdout.cc index a9639ff7739..3fc0873b6ca 100644 --- a/extra/mariabackup/ds_stdout.cc +++ b/extra/mariabackup/ds_stdout.cc @@ -30,7 +30,7 @@ typedef struct { static ds_ctxt_t *stdout_init(const char *root); static ds_file_t *stdout_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat); + const MY_STAT *mystat, bool rewrite); static int stdout_write(ds_file_t *file, const uchar *buf, size_t len); static int stdout_close(ds_file_t *file); static void stdout_deinit(ds_ctxt_t *ctxt); @@ -39,8 +39,11 @@ datasink_t datasink_stdout = { &stdout_init, &stdout_open, &stdout_write, + nullptr, &stdout_close, &dummy_remove, + nullptr, + nullptr, &stdout_deinit }; @@ -61,8 +64,9 @@ static ds_file_t * stdout_open(ds_ctxt_t *ctxt __attribute__((unused)), const char *path __attribute__((unused)), - MY_STAT *mystat __attribute__((unused))) + const MY_STAT *mystat __attribute__((unused)), bool rewrite) { + DBUG_ASSERT(rewrite == false); ds_stdout_file_t *stdout_file; ds_file_t *file; size_t pathlen; diff --git a/extra/mariabackup/ds_tmpfile.cc b/extra/mariabackup/ds_tmpfile.cc index 80b9d3bb4d0..6bafee25971 100644 --- a/extra/mariabackup/ds_tmpfile.cc +++ b/extra/mariabackup/ds_tmpfile.cc @@ -41,7 +41,7 @@ typedef struct { static ds_ctxt_t *tmpfile_init(const char *root); static ds_file_t *tmpfile_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat); + const MY_STAT *mystat, bool rewrite); static int tmpfile_write(ds_file_t *file, const uchar *buf, size_t len); static int tmpfile_close(ds_file_t *file); static void tmpfile_deinit(ds_ctxt_t *ctxt); @@ -50,8 +50,11 @@ datasink_t datasink_tmpfile = { &tmpfile_init, &tmpfile_open, &tmpfile_write, + nullptr, &tmpfile_close, &dummy_remove, + nullptr, + nullptr, &tmpfile_deinit }; @@ -80,8 +83,9 @@ tmpfile_init(const char *root) static ds_file_t * tmpfile_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat) + const MY_STAT *mystat, bool rewrite) { + DBUG_ASSERT(rewrite == false); ds_tmpfile_ctxt_t *tmpfile_ctxt; char tmp_path[FN_REFLEN]; ds_tmp_file_t *tmp_file; diff --git a/extra/mariabackup/ds_xbstream.cc b/extra/mariabackup/ds_xbstream.cc index 3bf8bd086c2..96e0cf7aea3 100644 --- a/extra/mariabackup/ds_xbstream.cc +++ b/extra/mariabackup/ds_xbstream.cc @@ -40,24 +40,31 @@ General streaming interface */ static ds_ctxt_t *xbstream_init(const char *root); static ds_file_t *xbstream_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat); + const MY_STAT *mystat, bool rewrite); static int xbstream_write(ds_file_t *file, const uchar *buf, size_t len); +static int xbstream_seek_set(ds_file_t *file, my_off_t offset); static int xbstream_close(ds_file_t *file); static void xbstream_deinit(ds_ctxt_t *ctxt); +static int xbstream_rename( + ds_ctxt_t *ctxt, const char *old_path, const char *new_path); +static int xbstream_mremove(ds_ctxt_t *ctxt, const char *path); + datasink_t datasink_xbstream = { &xbstream_init, &xbstream_open, &xbstream_write, + &xbstream_seek_set, &xbstream_close, &dummy_remove, + &xbstream_rename, + &xbstream_mremove, &xbstream_deinit }; static ssize_t -my_xbstream_write_callback(xb_wstream_file_t *f __attribute__((unused)), - void *userdata, const void *buf, size_t len) +my_xbstream_write_callback(void *userdata, const void *buf, size_t len) { ds_stream_ctxt_t *stream_ctxt; @@ -89,7 +96,7 @@ xbstream_init(const char *root __attribute__((unused))) goto err; } - xbstream = xb_stream_write_new(); + xbstream = xb_stream_write_new(my_xbstream_write_callback, stream_ctxt); if (xbstream == NULL) { msg("xb_stream_write_new() failed."); goto err; @@ -108,7 +115,8 @@ err: static ds_file_t * -xbstream_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat) +xbstream_open(ds_ctxt_t *ctxt, const char *path, + const MY_STAT *mystat, bool rewrite) { ds_file_t *file; ds_stream_file_t *stream_file; @@ -144,9 +152,7 @@ xbstream_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat) xbstream = stream_ctxt->xbstream; - xbstream_file = xb_stream_write_open(xbstream, path, mystat, - stream_ctxt, - my_xbstream_write_callback); + xbstream_file = xb_stream_write_open(xbstream, path, mystat, rewrite); if (xbstream_file == NULL) { msg("xb_stream_write_open() failed."); @@ -190,6 +196,45 @@ xbstream_write(ds_file_t *file, const uchar *buf, size_t len) return 0; } +static +int +xbstream_seek_set(ds_file_t *file, my_off_t offset) +{ + ds_stream_file_t *stream_file; + xb_wstream_file_t *xbstream_file; + + + stream_file = (ds_stream_file_t *) file->ptr; + + xbstream_file = stream_file->xbstream_file; + + if (xb_stream_write_seek_set(xbstream_file, offset)) { + msg("xb_stream_write_seek_set() failed."); + return 1; + } + + return 0; +} + +static +int +xbstream_mremove(ds_ctxt_t *ctxt, const char *path) { + ds_stream_ctxt_t *stream_ctxt = + reinterpret_cast(ctxt->ptr); + xb_wstream_t *xbstream = stream_ctxt->xbstream; + return xb_stream_write_remove(xbstream, path); +} + +static +int +xbstream_rename( + ds_ctxt_t *ctxt, const char *old_path, const char *new_path) { + ds_stream_ctxt_t *stream_ctxt = + reinterpret_cast(ctxt->ptr); + xb_wstream_t *xbstream = stream_ctxt->xbstream; + return xb_stream_write_rename(xbstream, old_path, new_path); +} + static int xbstream_close(ds_file_t *file) diff --git a/extra/mariabackup/xb_plugin.cc b/extra/mariabackup/encryption_plugin.cc similarity index 83% rename from extra/mariabackup/xb_plugin.cc rename to extra/mariabackup/encryption_plugin.cc index 7470d376eaa..d71ceaac7f4 100644 --- a/extra/mariabackup/xb_plugin.cc +++ b/extra/mariabackup/encryption_plugin.cc @@ -1,4 +1,4 @@ -/* Copyright (c) 2017, 2022, MariaDB Corporation. +/* Copyright (c) 2017, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -17,18 +17,18 @@ #include #include #include -#include +#include #include #include #include #include #include -#include +#include extern struct st_maria_plugin *mysql_optional_plugins[]; extern struct st_maria_plugin *mysql_mandatory_plugins[]; -static void xb_plugin_init(int argc, char **argv); +static void encryption_plugin_init(int argc, char **argv); extern char *xb_plugin_load; extern char *xb_plugin_dir; @@ -42,7 +42,7 @@ const char *QUERY_PLUGIN = " OR (plugin_type = 'DAEMON' AND plugin_name LIKE 'provider\\_%')" " AND plugin_status='ACTIVE'"; -std::string xb_plugin_config; +std::string encryption_plugin_config; static void add_to_plugin_load_list(const char *plugin_def) { @@ -52,16 +52,16 @@ static void add_to_plugin_load_list(const char *plugin_def) static char XTRABACKUP_EXE[] = "xtrabackup"; /* - Read "plugin-load" value from backup-my.cnf during prepare phase. + Read "plugin-load" value (encryption plugin) from backup-my.cnf during + prepare phase. The value is stored during backup phase. */ -static std::string get_plugin_from_cnf(const char *dir) +static std::string get_encryption_plugin_from_cnf() { - std::string path = dir + std::string("/backup-my.cnf"); - FILE *f = fopen(path.c_str(), "r"); + FILE *f = fopen("backup-my.cnf", "r"); if (!f) { - die("Can't open %s for reading", path.c_str()); + die("Can't open backup-my.cnf for reading"); } char line[512]; std::string plugin_load; @@ -80,7 +80,7 @@ static std::string get_plugin_from_cnf(const char *dir) } -void xb_plugin_backup_init(MYSQL *mysql) +void encryption_plugin_backup_init(MYSQL *mysql) { MYSQL_RES *result; MYSQL_ROW row; @@ -163,7 +163,7 @@ void xb_plugin_backup_init(MYSQL *mysql) mysql_free_result(result); } - xb_plugin_config = oss.str(); + encryption_plugin_config = oss.str(); argc = 0; argv[argc++] = XTRABACKUP_EXE; @@ -175,23 +175,23 @@ void xb_plugin_backup_init(MYSQL *mysql) } argv[argc] = 0; - xb_plugin_init(argc, argv); + encryption_plugin_init(argc, argv); } -const char *xb_plugin_get_config() +const char *encryption_plugin_get_config() { - return xb_plugin_config.c_str(); + return encryption_plugin_config.c_str(); } extern int finalize_encryption_plugin(st_plugin_int *plugin); -void xb_plugin_prepare_init(int argc, char **argv, const char *dir) +void encryption_plugin_prepare_init(int argc, char **argv) { - std::string plugin_load= get_plugin_from_cnf(dir ? dir : "."); + std::string plugin_load= get_encryption_plugin_from_cnf(); if (plugin_load.size()) { - msg("Loading plugins from %s", plugin_load.c_str()); + msg("Loading encryption plugin from %s", plugin_load.c_str()); } else { @@ -211,19 +211,19 @@ void xb_plugin_prepare_init(int argc, char **argv, const char *dir) new_argv[0] = XTRABACKUP_EXE; memcpy(&new_argv[1], argv, argc*sizeof(char *)); - xb_plugin_init(argc+1, new_argv); + encryption_plugin_init(argc+1, new_argv); delete[] new_argv; } -static void xb_plugin_init(int argc, char **argv) +static void encryption_plugin_init(int argc, char **argv) { /* Patch optional and mandatory plugins, we only need to load the one in xb_plugin_load. */ mysql_optional_plugins[0] = mysql_mandatory_plugins[0] = 0; plugin_maturity = MariaDB_PLUGIN_MATURITY_UNKNOWN; /* mariabackup accepts all plugins */ - msg("Loading plugins"); + msg("Loading encryption plugin"); for (int i= 1; i < argc; i++) - msg("\t Plugin parameter : '%s'", argv[i]); + msg("\t Encryption plugin parameter : '%s'", argv[i]); plugin_init(&argc, argv, PLUGIN_INIT_SKIP_PLUGIN_TABLE); } diff --git a/extra/mariabackup/encryption_plugin.h b/extra/mariabackup/encryption_plugin.h new file mode 100644 index 00000000000..16d74790254 --- /dev/null +++ b/extra/mariabackup/encryption_plugin.h @@ -0,0 +1,7 @@ +#include +#include +extern void encryption_plugin_backup_init(MYSQL *mysql); +extern const char* encryption_plugin_get_config(); +extern void encryption_plugin_prepare_init(int argc, char **argv); + +//extern void encryption_plugin_init(int argc, char **argv); diff --git a/extra/mariabackup/innobackupex.cc b/extra/mariabackup/innobackupex.cc index b925b41552d..2de57a14c85 100644 --- a/extra/mariabackup/innobackupex.cc +++ b/extra/mariabackup/innobackupex.cc @@ -78,10 +78,8 @@ my_bool opt_ibx_galera_info = FALSE; my_bool opt_ibx_slave_info = FALSE; my_bool opt_ibx_no_lock = FALSE; my_bool opt_ibx_safe_slave_backup = FALSE; -my_bool opt_ibx_rsync = FALSE; my_bool opt_ibx_force_non_empty_dirs = FALSE; my_bool opt_ibx_noversioncheck = FALSE; -my_bool opt_ibx_no_backup_locks = FALSE; my_bool opt_ibx_decompress = FALSE; char *opt_ibx_incremental_history_name = NULL; @@ -268,8 +266,10 @@ static struct my_option ibx_long_options[] = (uchar *) &opt_ibx_incremental, (uchar *) &opt_ibx_incremental, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"no-lock", OPT_NO_LOCK, "Use this option to disable table lock " - "with \"FLUSH TABLES WITH READ LOCK\". Use it only if ALL your " + {"no-lock", OPT_NO_LOCK, "This option should not be used as " + "mariadb-backup now is using BACKUP LOCKS, which minimizes the " + "lock time. ALTER TABLE can run in parallel with BACKUP LOCKS." + "Use the --no-lock option it only if ALL your " "tables are InnoDB and you DO NOT CARE about the binary log " "position of the backup. This option shouldn't be used if there " "are any DDL statements being executed or if any updates are " @@ -297,15 +297,6 @@ static struct my_option ibx_long_options[] = (uchar *) &opt_ibx_safe_slave_backup, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"rsync", OPT_RSYNC, "Uses the rsync utility to optimize local file " - "transfers. When this option is specified, innobackupex uses rsync " - "to copy all non-InnoDB files instead of spawning a separate cp for " - "each file, which can be much faster for servers with a large number " - "of databases or tables. This option cannot be used together with " - "--stream.", - (uchar *) &opt_ibx_rsync, (uchar *) &opt_ibx_rsync, - 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"force-non-empty-directories", OPT_FORCE_NON_EMPTY_DIRS, "This " "option, when specified, makes --copy-back or --move-back transfer " "files to non-empty directories. Note that no existing files will be " @@ -330,13 +321,9 @@ static struct my_option ibx_long_options[] = (uchar *) &opt_ibx_noversioncheck, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"no-backup-locks", OPT_NO_BACKUP_LOCKS, "This option controls if " - "backup locks should be used instead of FLUSH TABLES WITH READ LOCK " - "on the backup stage. The option has no effect when backup locks are " - "not supported by the server. This option is enabled by default, " - "disable with --no-backup-locks.", - (uchar *) &opt_ibx_no_backup_locks, - (uchar *) &opt_ibx_no_backup_locks, + {"no-backup-locks", OPT_NO_BACKUP_LOCKS, + "Old disabled option which has no effect anymore.", + (uchar *) 0, (uchar*) 0, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"decompress", OPT_DECOMPRESS, "Decompresses all files with the .qp " @@ -402,11 +389,10 @@ static struct my_option ibx_long_options[] = REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ftwrl-wait-query-type", OPT_LOCK_WAIT_QUERY_TYPE, - "This option specifies which types of queries are allowed to complete " - "before innobackupex will issue the global lock. Default is all.", - (uchar*) &opt_ibx_lock_wait_query_type, - (uchar*) &opt_ibx_lock_wait_query_type, &query_type_typelib, - GET_ENUM, REQUIRED_ARG, QUERY_TYPE_ALL, 0, 0, 0, 0, 0}, + "Old disabled option which has no effect anymore (not needed " + "with BACKUP LOCKS)", + (uchar*) 0, (uchar*) 0, &query_type_typelib, GET_ENUM, + REQUIRED_ARG, QUERY_TYPE_ALL, 0, 0, 0, 0, 0}, {"kill-long-query-type", OPT_KILL_LONG_QUERY_TYPE, "This option specifies which types of queries should be killed to " @@ -447,32 +433,32 @@ static struct my_option ibx_long_options[] = REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"kill-long-queries-timeout", OPT_KILL_LONG_QUERIES_TIMEOUT, - "This option specifies the number of seconds innobackupex waits " - "between starting FLUSH TABLES WITH READ LOCK and killing those " - "queries that block it. Default is 0 seconds, which means " - "innobackupex will not attempt to kill any queries.", - (uchar*) &opt_ibx_kill_long_queries_timeout, - (uchar*) &opt_ibx_kill_long_queries_timeout, 0, GET_UINT, + "Old disabled option which has no effect anymore (not needed " + "with BACKUP LOCKS)", + (uchar*) 0, (uchar*) 0, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ftwrl-wait-timeout", OPT_LOCK_WAIT_TIMEOUT, - "This option specifies time in seconds that innobackupex should wait " - "for queries that would block FTWRL before running it. If there are " - "still such queries when the timeout expires, innobackupex terminates " - "with an error. Default is 0, in which case innobackupex does not " - "wait for queries to complete and starts FTWRL immediately.", - (uchar*) &opt_ibx_lock_wait_timeout, - (uchar*) &opt_ibx_lock_wait_timeout, 0, GET_UINT, + "Alias for startup-wait-timeout", + (uchar*) &opt_ibx_lock_wait_timeout, + (uchar*) &opt_ibx_lock_wait_timeout, 0, GET_UINT, + REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + + {"startup-wait-timeout", OPT_LOCK_WAIT_TIMEOUT, + "This option specifies time in seconds that mariadb-backup should wait for " + "BACKUP STAGE START to complete. BACKUP STAGE START has to wait until all " + "currently running queries using explicite LOCK TABLES has ended. " + "If there are still such queries when the timeout expires, mariadb-backup " + "terminates with an error. Default is 0, in which case mariadb-backup waits " + "indefinitely for BACKUP STAGE START to finish", + (uchar*) &opt_ibx_lock_wait_timeout, + (uchar*) &opt_ibx_lock_wait_timeout, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ftwrl-wait-threshold", OPT_LOCK_WAIT_THRESHOLD, - "This option specifies the query run time threshold which is used by " - "innobackupex to detect long-running queries with a non-zero value " - "of --ftwrl-wait-timeout. FTWRL is not started until such " - "long-running queries exist. This option has no effect if " - "--ftwrl-wait-timeout is 0. Default value is 60 seconds.", - (uchar*) &opt_ibx_lock_wait_threshold, - (uchar*) &opt_ibx_lock_wait_threshold, 0, GET_UINT, + "Old disabled option which has no effect anymore (not needed " + "with BACKUP LOCKS)", + (uchar*) 0, (uchar*) 0, 0, GET_UINT, REQUIRED_ARG, 60, 0, 0, 0, 0, 0}, {"safe-slave-backup-timeout", OPT_SAFE_SLAVE_BACKUP_TIMEOUT, @@ -864,10 +850,8 @@ ibx_init() opt_slave_info = opt_ibx_slave_info; opt_no_lock = opt_ibx_no_lock; opt_safe_slave_backup = opt_ibx_safe_slave_backup; - opt_rsync = opt_ibx_rsync; opt_force_non_empty_dirs = opt_ibx_force_non_empty_dirs; opt_noversioncheck = opt_ibx_noversioncheck; - opt_no_backup_locks = opt_ibx_no_backup_locks; opt_decompress = opt_ibx_decompress; opt_incremental_history_name = opt_ibx_incremental_history_name; diff --git a/extra/mariabackup/thread_pool.cc b/extra/mariabackup/thread_pool.cc new file mode 100644 index 00000000000..e18581f4b24 --- /dev/null +++ b/extra/mariabackup/thread_pool.cc @@ -0,0 +1,50 @@ +#include "thread_pool.h" +#include "common.h" + +bool ThreadPool::start(size_t threads_count) { + if (!m_stopped) + return false; + m_stopped = false; + for (unsigned i = 0; i < threads_count; ++i) + m_threads.emplace_back(&ThreadPool::thread_func, this, i); + return true; +} + +void ThreadPool::stop() { + if (m_stopped) + return; + m_stop = true; + m_cv.notify_all(); + for (auto &t : m_threads) + t.join(); + m_stopped = true; +}; + +void ThreadPool::push(ThreadPool::job_t &&j) { + std::unique_lock lock(m_mutex); + m_jobs.push(j); + lock.unlock(); + m_cv.notify_one(); +} + +void ThreadPool::thread_func(unsigned thread_num) { + if (my_thread_init()) + die("Can't init mysql thread"); + std::unique_lock lock(m_mutex); + while(true) { + if (m_stop) + goto exit; + while (!m_jobs.empty()) { + if (m_stop) + goto exit; + job_t j = std::move(m_jobs.front()); + m_jobs.pop(); + lock.unlock(); + j(thread_num); + lock.lock(); + } + m_cv.wait(lock, [&] { return m_stop || !m_jobs.empty(); }); + } +exit: + my_thread_end(); +} diff --git a/extra/mariabackup/thread_pool.h b/extra/mariabackup/thread_pool.h new file mode 100644 index 00000000000..10ad74c6220 --- /dev/null +++ b/extra/mariabackup/thread_pool.h @@ -0,0 +1,62 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include "trx0sys.h" + +class ThreadPool { +public: + typedef std::function job_t; + + ThreadPool() { m_stop = false; m_stopped = true; } + ThreadPool (ThreadPool &&other) = delete; + ThreadPool & operator= (ThreadPool &&other) = delete; + ThreadPool(const ThreadPool &) = delete; + ThreadPool & operator= (const ThreadPool &) = delete; + + bool start(size_t threads_count); + void stop(); + void push(job_t &&j); + size_t threads_count() const { return m_threads.size(); } +private: + void thread_func(unsigned thread_num); + std::mutex m_mutex; + std::condition_variable m_cv; + std::queue m_jobs; + std::atomic m_stop; + std::atomic m_stopped; + std::vector m_threads; +}; + +class TasksGroup { +public: + TasksGroup(ThreadPool &thread_pool) : m_thread_pool(thread_pool) { + m_tasks_count = 0; + m_tasks_result = 1; + } + void push_task(ThreadPool::job_t &&j) { + ++m_tasks_count; + m_thread_pool.push(std::forward(j)); + } + void finish_task(int res) { + --m_tasks_count; + m_tasks_result.fetch_and(res); + } + int get_result() const { return m_tasks_result; } + bool is_finished() const { + return !m_tasks_count; + } + bool wait_for_finish() { + while (!is_finished()) + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + return get_result(); + } +private: + ThreadPool &m_thread_pool; + std::atomic m_tasks_count; + std::atomic m_tasks_result; +}; diff --git a/extra/mariabackup/write_filt.cc b/extra/mariabackup/write_filt.cc index 052cea26ef6..13f19ca6b6a 100644 --- a/extra/mariabackup/write_filt.cc +++ b/extra/mariabackup/write_filt.cc @@ -144,6 +144,18 @@ wf_incremental_process(xb_write_filt_ctxt_t *ctxt, ds_file_t *dstfile) return false; } + /* Check whether TRX_SYS page has been changed */ + if (mach_read_from_4(page + FIL_PAGE_SPACE_ID) + == TRX_SYS_SPACE + && mach_read_from_4(page + FIL_PAGE_OFFSET) + == TRX_SYS_PAGE_NO) { + msg(cursor->thread_n, + "--incremental backup is impossible if " + "the server had been restarted with " + "different innodb_undo_tablespaces."); + return false; + } + /* updated page */ if (cp->npages == page_size / 4) { /* flush buffer */ diff --git a/extra/mariabackup/wsrep.cc b/extra/mariabackup/wsrep.cc index acaf5c50e7c..15463a85476 100644 --- a/extra/mariabackup/wsrep.cc +++ b/extra/mariabackup/wsrep.cc @@ -55,6 +55,9 @@ permission notice: #define XB_GALERA_INFO_FILENAME "xtrabackup_galera_info" #define XB_GALERA_DONOR_INFO_FILENAME "donor_galera_info" +/* backup copy of galera info file as sent by donor */ +#define XB_GALERA_INFO_FILENAME_SST "xtrabackup_galera_info_SST" + /*********************************************************************** Store Galera checkpoint info in the 'xtrabackup_galera_info' file, if that information is present in the trx system header. Otherwise, do nothing. */ @@ -68,19 +71,45 @@ xb_write_galera_info(bool incremental_prepare) long long seqno; MY_STAT statinfo; - /* Do not overwrite an existing file to be compatible with - servers with older server versions */ - if (!incremental_prepare && - my_stat(XB_GALERA_INFO_FILENAME, &statinfo, MYF(0)) != NULL) { + xid.null(); + /* try to read last wsrep XID from innodb rsegs, we will use it + instead of galera info file received from donor + */ + if (!trx_rseg_read_wsrep_checkpoint(xid)) { + /* no worries yet, SST may have brought in galera info file + from some old MariaDB version, which does not support + wsrep XID storing in innodb rsegs + */ return; } - xid.null(); + /* if SST brought in galera info file, copy it as *_SST file + this will not be used, saved just for future reference + */ + if (my_stat(XB_GALERA_INFO_FILENAME, &statinfo, MYF(0)) != NULL) { + FILE* fp_in = fopen(XB_GALERA_INFO_FILENAME, "r"); + FILE* fp_out = fopen(XB_GALERA_INFO_FILENAME_SST, "w"); - if (!trx_rseg_read_wsrep_checkpoint(xid)) { - - return; + char buf[BUFSIZ] = {'\0'}; + size_t size; + while ((size = fread(buf, 1, BUFSIZ, fp_in))) { + if (fwrite(buf, 1, size, fp_out) != strlen(buf)) { + die( + "could not write to " + XB_GALERA_INFO_FILENAME_SST + ", errno = %d\n", + errno); + } + } + if (!feof(fp_in)) { + die( + XB_GALERA_INFO_FILENAME_SST + " not fully copied\n" + ); + } + fclose(fp_out); + fclose(fp_in); } wsrep_uuid_t uuid; @@ -97,7 +126,6 @@ xb_write_galera_info(bool incremental_prepare) "could not create " XB_GALERA_INFO_FILENAME ", errno = %d\n", errno); - exit(EXIT_FAILURE); } seqno = wsrep_xid_seqno(&xid); diff --git a/extra/mariabackup/xb_plugin.h b/extra/mariabackup/xb_plugin.h deleted file mode 100644 index fea24b6b052..00000000000 --- a/extra/mariabackup/xb_plugin.h +++ /dev/null @@ -1,5 +0,0 @@ -#include -#include -extern void xb_plugin_backup_init(MYSQL *mysql); -extern const char* xb_plugin_get_config(); -extern void xb_plugin_prepare_init(int argc, char **argv, const char *dir); diff --git a/extra/mariabackup/xbstream.cc b/extra/mariabackup/xbstream.cc index 3a3ba55b8b2..5a54caceb72 100644 --- a/extra/mariabackup/xbstream.cc +++ b/extra/mariabackup/xbstream.cc @@ -255,7 +255,7 @@ mode_create(int argc, char **argv) return 1; } - stream = xb_stream_write_new(); + stream = xb_stream_write_new(nullptr, nullptr); if (stream == NULL) { msg("%s: xb_stream_write_new() failed.", my_progname); return 1; @@ -280,7 +280,7 @@ mode_create(int argc, char **argv) goto err; } - file = xb_stream_write_open(stream, filepath, &mystat, NULL, NULL); + file = xb_stream_write_open(stream, filepath, &mystat, false); if (file == NULL) { goto err; } @@ -307,7 +307,8 @@ err: static file_entry_t * -file_entry_new(extract_ctxt_t *ctxt, const char *path, uint pathlen) +file_entry_new(extract_ctxt_t *ctxt, const char *path, uint pathlen, + uchar chunk_flags) { file_entry_t *entry; ds_file_t *file; @@ -324,7 +325,8 @@ file_entry_new(extract_ctxt_t *ctxt, const char *path, uint pathlen) } entry->pathlen = pathlen; - file = ds_open(ctxt->ds_ctxt, path, NULL); + file = ds_open(ctxt->ds_ctxt, path, NULL, + chunk_flags == XB_STREAM_FLAG_REWRITE); if (file == NULL) { msg("%s: failed to create file.", my_progname); @@ -405,10 +407,50 @@ extract_worker_thread_func(void *arg) (uchar *) chunk.path, chunk.pathlen); + if (entry && (chunk.type == XB_CHUNK_TYPE_REMOVE || + chunk.type == XB_CHUNK_TYPE_RENAME)) { + msg("%s: rename and remove chunks can not be applied to opened file: %s", + my_progname, chunk.path); + pthread_mutex_unlock(ctxt->mutex); + break; + } + + if (chunk.type == XB_CHUNK_TYPE_REMOVE) { + if (ds_remove(ctxt->ds_ctxt, chunk.path)) { + msg("%s: error on file removing: %s", my_progname, chunk.path); + pthread_mutex_unlock(ctxt->mutex); + res = XB_STREAM_READ_ERROR; + break; + } + pthread_mutex_unlock(ctxt->mutex); + continue; + } + + if (chunk.type == XB_CHUNK_TYPE_RENAME) { + if (my_hash_search(ctxt->filehash, + reinterpret_cast(chunk.data), chunk.length)) { + msg("%s: rename chunks can not be applied to opened file: %s", + my_progname, reinterpret_cast(chunk.data)); + pthread_mutex_unlock(ctxt->mutex); + break; + } + if (ds_rename(ctxt->ds_ctxt, chunk.path, + reinterpret_cast(chunk.data))) { + msg("%s: error on file renaming: %s to %s", my_progname, + reinterpret_cast(chunk.data), chunk.path); + pthread_mutex_unlock(ctxt->mutex); + res = XB_STREAM_READ_ERROR; + break; + } + pthread_mutex_unlock(ctxt->mutex); + continue; + } + if (entry == NULL) { entry = file_entry_new(ctxt, chunk.path, - chunk.pathlen); + chunk.pathlen, + chunk.flags); if (entry == NULL) { pthread_mutex_unlock(ctxt->mutex); break; @@ -425,6 +467,18 @@ extract_worker_thread_func(void *arg) pthread_mutex_unlock(ctxt->mutex); + if (chunk.type == XB_CHUNK_TYPE_SEEK) { + if (ds_seek_set(entry->file, chunk.offset)) { + msg("%s: my_seek() failed.", my_progname); + pthread_mutex_unlock(&entry->mutex); + res = XB_STREAM_READ_ERROR; + break; + } + entry->offset = chunk.offset; + pthread_mutex_unlock(&entry->mutex); + continue; + } + res = xb_stream_validate_checksum(&chunk); if (res != XB_STREAM_READ_CHUNK) { diff --git a/extra/mariabackup/xbstream.h b/extra/mariabackup/xbstream.h index 1b36ec249b6..c8b2997d402 100644 --- a/extra/mariabackup/xbstream.h +++ b/extra/mariabackup/xbstream.h @@ -29,6 +29,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA /* Chunk flags */ /* Chunk can be ignored if unknown version/format */ #define XB_STREAM_FLAG_IGNORABLE 0x01 +#define XB_STREAM_FLAG_REWRITE 0x02 /* Magic + flags + type + path len */ #define CHUNK_HEADER_CONSTANT_LEN ((sizeof(XB_STREAM_CHUNK_MAGIC) - 1) + \ @@ -48,18 +49,21 @@ typedef enum { /************************************************************************ Write interface. */ -typedef ssize_t xb_stream_write_callback(xb_wstream_file_t *file, +typedef ssize_t xb_stream_write_callback( void *userdata, const void *buf, size_t len); -xb_wstream_t *xb_stream_write_new(void); - +xb_wstream_t *xb_stream_write_new( + xb_stream_write_callback *write_callback, void *user_data); xb_wstream_file_t *xb_stream_write_open(xb_wstream_t *stream, const char *path, - MY_STAT *mystat, void *userdata, - xb_stream_write_callback *onwrite); + const MY_STAT *mystat, bool rewrite); int xb_stream_write_data(xb_wstream_file_t *file, const void *buf, size_t len); - +int xb_stream_write_seek_set(xb_wstream_file_t *file, my_off_t offset); +int xb_stream_write_remove(xb_wstream_t *stream, const char *path); +int +xb_stream_write_rename( + xb_wstream_t *stream, const char *old_path, const char *new_path); int xb_stream_write_close(xb_wstream_file_t *file); int xb_stream_write_done(xb_wstream_t *stream); @@ -76,6 +80,9 @@ typedef enum { typedef enum { XB_CHUNK_TYPE_UNKNOWN = '\0', XB_CHUNK_TYPE_PAYLOAD = 'P', + XB_CHUNK_TYPE_RENAME = 'R', + XB_CHUNK_TYPE_REMOVE = 'D', + XB_CHUNK_TYPE_SEEK = 'S', XB_CHUNK_TYPE_EOF = 'E' } xb_chunk_type_t; diff --git a/extra/mariabackup/xbstream_read.cc b/extra/mariabackup/xbstream_read.cc index b54a98157ea..d82176ad8be 100644 --- a/extra/mariabackup/xbstream_read.cc +++ b/extra/mariabackup/xbstream_read.cc @@ -59,6 +59,9 @@ validate_chunk_type(uchar code) { switch ((xb_chunk_type_t) code) { case XB_CHUNK_TYPE_PAYLOAD: + case XB_CHUNK_TYPE_RENAME: + case XB_CHUNK_TYPE_REMOVE: + case XB_CHUNK_TYPE_SEEK: case XB_CHUNK_TYPE_EOF: return (xb_chunk_type_t) code; default: @@ -159,57 +162,91 @@ xb_stream_read_chunk(xb_rstream_t *stream, xb_rstream_chunk_t *chunk) } chunk->path[pathlen] = '\0'; - if (chunk->type == XB_CHUNK_TYPE_EOF) { + if (chunk->type == XB_CHUNK_TYPE_EOF || + chunk->type == XB_CHUNK_TYPE_REMOVE) { return XB_STREAM_READ_CHUNK; } - /* Payload length */ - F_READ(tmpbuf, 16); - ullval = uint8korr(tmpbuf); - if (ullval > (ulonglong) SIZE_T_MAX) { - msg("xb_stream_read_chunk(): chunk length is too large at " - "offset 0x%llx: 0x%llx.", (ulonglong) stream->offset, - ullval); - goto err; - } - chunk->length = (size_t) ullval; - stream->offset += 8; - - /* Payload offset */ - ullval = uint8korr(tmpbuf + 8); - if (ullval > (ulonglong) MY_OFF_T_MAX) { - msg("xb_stream_read_chunk(): chunk offset is too large at " - "offset 0x%llx: 0x%llx.", (ulonglong) stream->offset, - ullval); - goto err; - } - chunk->offset = (my_off_t) ullval; - stream->offset += 8; - - /* Reallocate the buffer if needed */ - if (chunk->length > chunk->buflen) { - chunk->data = my_realloc(PSI_NOT_INSTRUMENTED, chunk->data, chunk->length, - MYF(MY_WME | MY_ALLOW_ZERO_PTR)); - if (chunk->data == NULL) { - msg("xb_stream_read_chunk(): failed to increase buffer " - "to %lu bytes.", (ulong) chunk->length); + if (chunk->type == XB_CHUNK_TYPE_RENAME) { + F_READ(tmpbuf, 4); + size_t new_pathlen = uint4korr(tmpbuf); + if (new_pathlen >= FN_REFLEN) { + msg("xb_stream_read_chunk(): path length (%lu) for new name of 'rename'" + " chunk is too large", (ulong) new_pathlen); goto err; } - chunk->buflen = chunk->length; + chunk->length = new_pathlen; + stream->offset +=4; + } + else if (chunk->type == XB_CHUNK_TYPE_SEEK) { + F_READ(tmpbuf, 8); + chunk->offset = uint8korr(tmpbuf); + stream->offset += 8; + return XB_STREAM_READ_CHUNK; + } + else { + /* Payload length */ + F_READ(tmpbuf, 16); + ullval = uint8korr(tmpbuf); + if (ullval > (ulonglong) SIZE_T_MAX) { + msg("xb_stream_read_chunk(): chunk length is too large at " + "offset 0x%llx: 0x%llx.", (ulonglong) stream->offset, + ullval); + goto err; + } + chunk->length = (size_t) ullval; + stream->offset += 8; + + /* Payload offset */ + ullval = uint8korr(tmpbuf + 8); + if (ullval > (ulonglong) MY_OFF_T_MAX) { + msg("xb_stream_read_chunk(): chunk offset is too large at " + "offset 0x%llx: 0x%llx.", (ulonglong) stream->offset, + ullval); + goto err; + } + chunk->offset = (my_off_t) ullval; + stream->offset += 8; } - /* Checksum */ - F_READ(tmpbuf, 4); - chunk->checksum = uint4korr(tmpbuf); - chunk->checksum_offset = stream->offset; + /* Reallocate the buffer if needed, take into account trailing '\0' for + new file name in the case of XB_CHUNK_TYPE_RENAME */ + if (chunk->length + 1 > chunk->buflen) { + chunk->data = my_realloc(PSI_NOT_INSTRUMENTED, chunk->data, + chunk->length + 1, MYF(MY_WME | MY_ALLOW_ZERO_PTR)); + if (chunk->data == NULL) { + msg("xb_stream_read_chunk(): failed to increase buffer " + "to %lu bytes.", (ulong) chunk->length + 1); + goto err; + } + chunk->buflen = chunk->length + 1; + } - /* Payload */ - if (chunk->length > 0) { + if (chunk->type == XB_CHUNK_TYPE_RENAME) { + if (chunk->length == 0) { + msg("xb_stream_read_chunk(): failed to read new name for file to rename " + ": %s", chunk->path); + goto err; + } F_READ(chunk->data, chunk->length); stream->offset += chunk->length; + reinterpret_cast(chunk->data)[chunk->length] = '\0'; + ++chunk->length; } + else { + /* Checksum */ + F_READ(tmpbuf, 4); + chunk->checksum = uint4korr(tmpbuf); + chunk->checksum_offset = stream->offset; - stream->offset += 4; + /* Payload */ + if (chunk->length > 0) { + F_READ(chunk->data, chunk->length); + stream->offset += chunk->length; + } + + stream->offset += 4; + } return XB_STREAM_READ_CHUNK; diff --git a/extra/mariabackup/xbstream_write.cc b/extra/mariabackup/xbstream_write.cc index 5801e867aac..926e091becb 100644 --- a/extra/mariabackup/xbstream_write.cc +++ b/extra/mariabackup/xbstream_write.cc @@ -21,6 +21,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA #include #include #include +#include #include "common.h" #include "xbstream.h" @@ -29,6 +30,8 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA struct xb_wstream_struct { pthread_mutex_t mutex; + xb_stream_write_callback *write; + void *user_data; }; struct xb_wstream_file_struct { @@ -39,8 +42,7 @@ struct xb_wstream_file_struct { char *chunk_ptr; size_t chunk_free; my_off_t offset; - void *userdata; - xb_stream_write_callback *write; + bool rewrite; }; static int xb_stream_flush(xb_wstream_file_t *file); @@ -50,7 +52,7 @@ static int xb_stream_write_eof(xb_wstream_file_t *file); static ssize_t -xb_stream_default_write_callback(xb_wstream_file_t *file __attribute__((unused)), +xb_stream_default_write_callback( void *userdata __attribute__((unused)), const void *buf, size_t len) { @@ -60,21 +62,31 @@ xb_stream_default_write_callback(xb_wstream_file_t *file __attribute__((unused)) } xb_wstream_t * -xb_stream_write_new(void) +xb_stream_write_new( + xb_stream_write_callback *write_callback, void *user_data) { xb_wstream_t *stream; stream = (xb_wstream_t *) my_malloc(PSI_NOT_INSTRUMENTED, sizeof(xb_wstream_t), MYF(MY_FAE)); pthread_mutex_init(&stream->mutex, NULL); + if (write_callback) { +#ifdef _WIN32 + setmode(fileno(stdout), _O_BINARY); +#endif + stream->write = write_callback; + stream->user_data = user_data; + } + else { + stream->write = xb_stream_default_write_callback; + stream->user_data = user_data; + } return stream;; } xb_wstream_file_t * xb_stream_write_open(xb_wstream_t *stream, const char *path, - MY_STAT *mystat __attribute__((unused)), - void *userdata, - xb_stream_write_callback *onwrite) + const MY_STAT *mystat __attribute__((unused)), bool rewrite) { xb_wstream_file_t *file; size_t path_len; @@ -109,16 +121,7 @@ xb_stream_write_open(xb_wstream_t *stream, const char *path, file->offset = 0; file->chunk_ptr = file->chunk; file->chunk_free = XB_STREAM_MIN_CHUNK_SIZE; - if (onwrite) { -#ifdef _WIN32 - setmode(fileno(stdout), _O_BINARY); -#endif - file->userdata = userdata; - file->write = onwrite; - } else { - file->userdata = NULL; - file->write = xb_stream_default_write_callback; - } + file->rewrite = rewrite; return file; } @@ -202,7 +205,8 @@ xb_stream_write_chunk(xb_wstream_file_t *file, const void *buf, size_t len) memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1); ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1; - *ptr++ = 0; /* Chunk flags */ + *ptr++ = + file->rewrite ? XB_STREAM_FLAG_REWRITE : 0; /* Chunk flags */ *ptr++ = (uchar) XB_CHUNK_TYPE_PAYLOAD; /* Chunk type */ @@ -227,11 +231,11 @@ xb_stream_write_chunk(xb_wstream_file_t *file, const void *buf, size_t len) xb_ad(ptr <= tmpbuf + sizeof(tmpbuf)); - if (file->write(file, file->userdata, tmpbuf, ptr-tmpbuf) == -1) + if (stream->write(stream->user_data, tmpbuf, ptr-tmpbuf) == -1) goto err; - if (file->write(file, file->userdata, buf, len) == -1) /* Payload */ + if (stream->write(stream->user_data, buf, len) == -1) /* Payload */ goto err; file->offset+= len; @@ -247,6 +251,38 @@ err: return 1; } +int xb_stream_write_seek_set(xb_wstream_file_t *file, my_off_t offset) +{ + /* Chunk magic + flags + chunk type + path_len + path + offset */ + uchar tmpbuf[sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1 + 4 + + FN_REFLEN + 8]; + int error = 0; + xb_wstream_t *stream = file->stream; + uchar *ptr = tmpbuf; + /* Chunk magic */ + memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1); + ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1; + *ptr++ = 0; /* Chunk flags */ + *ptr++ = (uchar) XB_CHUNK_TYPE_SEEK; /* Chunk type */ + int4store(ptr, file->path_len); /* Path length */ + ptr += 4; + memcpy(ptr, file->path, file->path_len); /* Path */ + ptr += file->path_len; + int8store(ptr, static_cast(offset)); /* Offset */ + ptr += 8; + if (xb_stream_flush(file)) + return 1; + pthread_mutex_lock(&stream->mutex); + if (stream->write(stream->user_data, tmpbuf, ptr-tmpbuf) == -1) + error = 1; + if (!error) + file->offset = offset; + pthread_mutex_unlock(&stream->mutex); + if (xb_stream_flush(file)) + return 1; + return error; +} + static int xb_stream_write_eof(xb_wstream_file_t *file) @@ -278,7 +314,7 @@ xb_stream_write_eof(xb_wstream_file_t *file) xb_ad(ptr <= tmpbuf + sizeof(tmpbuf)); - if (file->write(file, file->userdata, tmpbuf, + if (stream->write(stream->user_data, tmpbuf, (ulonglong) (ptr - tmpbuf)) == -1) goto err; @@ -291,3 +327,77 @@ err: return 1; } + + +int +xb_stream_write_remove(xb_wstream_t *stream, const char *path) { + /* Chunk magic + flags + chunk type + path_len + path */ + uchar tmpbuf[sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1 + 4 + FN_REFLEN]; + uchar *ptr = tmpbuf; + /* Chunk magic */ + memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1); + ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1; + + *ptr++ = 0; /* Chunk flags */ + + *ptr++ = (uchar) XB_CHUNK_TYPE_REMOVE; /* Chunk type */ + size_t path_len = strlen(path); + int4store(ptr, path_len); /* Path length */ + ptr += 4; + + memcpy(ptr, path, path_len); /* Path */ + ptr += path_len; + + xb_ad(ptr <= tmpbuf + sizeof(tmpbuf)); + + pthread_mutex_lock(&stream->mutex); + + ssize_t result = stream->write(stream->user_data, tmpbuf, + (ulonglong) (ptr - tmpbuf)); + + pthread_mutex_unlock(&stream->mutex); + + return result < 0; + +} + +int +xb_stream_write_rename( + xb_wstream_t *stream, const char *old_path, const char *new_path) { + /* Chunk magic + flags + chunk type + path_len + path + path_len + path*/ + uchar tmpbuf[sizeof(XB_STREAM_CHUNK_MAGIC) - 1 + 1 + 1 + + 4 + FN_REFLEN + 4 + FN_REFLEN]; + uchar *ptr = tmpbuf; + /* Chunk magic */ + memcpy(ptr, XB_STREAM_CHUNK_MAGIC, sizeof(XB_STREAM_CHUNK_MAGIC) - 1); + ptr += sizeof(XB_STREAM_CHUNK_MAGIC) - 1; + + *ptr++ = 0; /* Chunk flags */ + + *ptr++ = (uchar) XB_CHUNK_TYPE_RENAME; /* Chunk type */ + size_t path_len = strlen(old_path); + int4store(ptr, path_len); /* Path length */ + ptr += 4; + + memcpy(ptr, old_path, path_len); /* Path */ + ptr += path_len; + + path_len = strlen(new_path); + int4store(ptr, path_len); /* Path length */ + ptr += 4; + + memcpy(ptr, new_path, path_len); /* Path */ + ptr += path_len; + + xb_ad(ptr <= tmpbuf + sizeof(tmpbuf)); + + pthread_mutex_lock(&stream->mutex); + + ssize_t result = stream->write(stream->user_data, tmpbuf, + (ulonglong) (ptr - tmpbuf)); + + pthread_mutex_unlock(&stream->mutex); + + return result < 0; +} + diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 29abc931002..5facdefb193 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -69,6 +69,7 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA # include #endif +#include "aria_backup_client.h" #include #include @@ -81,6 +82,7 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA #include #include #include "ha_innodb.h" +#include "fts0types.h" #include #include @@ -96,7 +98,6 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA #include "xb_regex.h" #include "fil_cur.h" #include "write_filt.h" -#include "xtrabackup.h" #include "ds_buffer.h" #include "ds_tmpfile.h" #include "xbstream.h" @@ -106,12 +107,17 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA #include "backup_mysql.h" #include "backup_copy.h" #include "backup_mysql.h" -#include "xb_plugin.h" +#include "encryption_plugin.h" #include #include #include #include #include +#include +#include "ddl_log.h" +#include "common_engine.h" +#include "lex_string.h" +#include "sql_table.h" #include "backup_debug.h" #define MB_CORRUPTED_PAGES_FILE "innodb_corrupted_pages" @@ -124,6 +130,9 @@ int sd_notifyf() { return 0; } int sys_var_init(); +extern const char* fts_common_tables[]; +extern const fts_index_selector_t fts_index_selector[]; + /* === xtrabackup specific options === */ #define DEFAULT_TARGET_DIR "./xtrabackup_backupfiles/" char xtrabackup_real_target_dir[FN_REFLEN] = DEFAULT_TARGET_DIR; @@ -138,8 +147,8 @@ my_bool xtrabackup_decrypt_decompress; my_bool xtrabackup_print_param; my_bool xtrabackup_mysqld_args; my_bool xtrabackup_help; - my_bool xtrabackup_export; +my_bool ignored_option; longlong xtrabackup_use_memory; @@ -192,10 +201,12 @@ struct xb_filter_entry_t{ xb_filter_entry_t *name_hash; }; +lsn_t checkpoint_lsn_start; +lsn_t checkpoint_no_start; /** whether log_copying_thread() is active; protected by recv_sys.mutex */ static bool log_copying_running; -int xtrabackup_parallel; +uint xtrabackup_parallel; char *xtrabackup_stream_str = NULL; xb_stream_fmt_t xtrabackup_stream_fmt = XB_STREAM_FMT_NONE; @@ -252,7 +263,7 @@ recv_sys.mutex. */ static std::set fail_undo_ids; longlong innobase_page_size = (1LL << 14); /* 16KB */ -char* innobase_buffer_pool_filename = NULL; +char *innobase_buffer_pool_filename = NULL; /* The default values for the following char* start-up parameters are determined in innobase_init below: */ @@ -358,10 +369,8 @@ my_bool opt_galera_info = FALSE; my_bool opt_slave_info = FALSE; my_bool opt_no_lock = FALSE; my_bool opt_safe_slave_backup = FALSE; -my_bool opt_rsync = FALSE; my_bool opt_force_non_empty_dirs = FALSE; my_bool opt_noversioncheck = FALSE; -my_bool opt_no_backup_locks = FALSE; my_bool opt_decompress = FALSE; my_bool opt_remove_original; my_bool opt_log_innodb_page_corruption; @@ -697,8 +706,190 @@ typedef void (*process_single_tablespace_func_t)(const char *dirname, uint32_t defer_space_id); static dberr_t enumerate_ibd_files(process_single_tablespace_func_t callback); +const char *convert_dst(const char *dst) { + return + (xtrabackup_copy_back || xtrabackup_move_back) ? + dst : trim_dotslash(dst); +} + +std::string convert_tablename_to_filepath( + const char *data_dir_path, const std::string &db, const std::string &table) { + char dbbuff[FN_REFLEN]; + char tbbuff[FN_REFLEN]; + (void)tablename_to_filename(db.c_str(), dbbuff, sizeof(dbbuff)); + (void)tablename_to_filename(table.c_str(), tbbuff, sizeof(tbbuff)); + std::string result(data_dir_path); + result.append(1, FN_LIBCHAR).append(dbbuff). + append(1, FN_LIBCHAR).append(tbbuff); + return result; +} + +std::tuple +convert_filepath_to_tablename(const char *filepath) { + char db_name_orig[FN_REFLEN]; + char table_name_orig[FN_REFLEN]; + parse_db_table_from_file_path(filepath, db_name_orig, table_name_orig); + if (!db_name_orig[0] || !table_name_orig[0]) + return std::make_tuple("", "", ""); + char db_name_conv[FN_REFLEN]; + char table_name_conv[FN_REFLEN]; + filename_to_tablename(db_name_orig, db_name_conv, sizeof(db_name_conv)); + filename_to_tablename( + table_name_orig, table_name_conv, sizeof(table_name_conv)); + if (!db_name_conv[0] || !table_name_conv[0]) + return std::make_tuple("", "", ""); + return std::make_tuple(db_name_conv, table_name_conv, + std::string(db_name_orig).append("/").append(table_name_orig)); +} + +std::string get_table_version_from_image(const std::vector &frm_image) { + DBUG_ASSERT(frm_image.size() >= 64); + + if (!strncmp((char*) frm_image.data(), "TYPE=VIEW\n", 10)) + return {}; + + if (!is_binary_frm_header(frm_image.data())) + return {}; + + /* Length of the MariaDB extra2 segment in the form file. */ + uint len = uint2korr(frm_image.data() + 4); + const uchar *extra2= frm_image.data() + 64; + + if (*extra2 == '/') // old frm had '/' there + return {}; + + const uchar *e2end= extra2 + len; + while (extra2 + 3 <= e2end) + { + uchar type= *extra2++; + size_t length= *extra2++; + if (!length) + { + if (extra2 + 2 >= e2end) + return {}; + length= uint2korr(extra2); + extra2+= 2; + if (length < 256) + return {}; + } + if (extra2 + length > e2end) + return {}; + if (type == EXTRA2_TABLEDEF_VERSION) { + char buff[MY_UUID_STRING_LENGTH]; + my_uuid2str(extra2, buff, 1); + return std::string(buff, buff + MY_UUID_STRING_LENGTH); + } + extra2+= length; + } + + return {}; +} + +std::pair + get_table_engine_from_image(const std::vector &frm_image) { + + DBUG_ASSERT(frm_image.size() >= 64); + + if (!strncmp((char*) frm_image.data(), "TYPE=VIEW\n", 10)) + return std::make_pair(false, DB_TYPE_UNKNOWN); + + if (!is_binary_frm_header(frm_image.data())) + return std::make_pair(false, DB_TYPE_UNKNOWN); + + legacy_db_type dbt = (legacy_db_type)frm_image[3]; + + if (dbt >= DB_TYPE_FIRST_DYNAMIC) + return std::make_pair(false, DB_TYPE_UNKNOWN); + + if (dbt != DB_TYPE_PARTITION_DB) + return std::make_pair(false, dbt); + + dbt = (legacy_db_type)frm_image[61]; + return std::make_pair(true, + dbt < DB_TYPE_FIRST_DYNAMIC ? dbt : DB_TYPE_UNKNOWN); +} + +std::vector read_frm_image(File file) { + std::vector frm_image; + MY_STAT state; + + if (mysql_file_fstat(file, &state, MYF(MY_WME))) + return frm_image; + + frm_image.resize((size_t)state.st_size, 0); + + if (mysql_file_read( + file, frm_image.data(), (size_t)state.st_size, MYF(MY_NABP))) + frm_image.clear(); + + return frm_image; +} + +std::string read_table_version_id(File file) { + auto frm_image = read_frm_image(file); + if (frm_image.empty()) + return {}; + return get_table_version_from_image(frm_image); +} + +bool is_log_table(const char *dbname, const char *tablename) { + DBUG_ASSERT(dbname); + DBUG_ASSERT(tablename); + + LEX_CSTRING lex_db; + LEX_CSTRING lex_table; + lex_db.str = dbname; + lex_db.length = strlen(dbname); + lex_table.str = tablename; + lex_table.length = strlen(tablename); + + if (!lex_string_eq(&MYSQL_SCHEMA_NAME, &lex_db)) + return false; + + if (lex_string_eq(&GENERAL_LOG_NAME, &lex_table)) + return true; + + if (lex_string_eq(&SLOW_LOG_NAME, &lex_table)) + return true; + + return false; +} + +bool is_stats_table(const char *dbname, const char *tablename) { + DBUG_ASSERT(dbname); + DBUG_ASSERT(tablename); + + LEX_CSTRING lex_db; + LEX_CSTRING lex_table; + lex_db.str = dbname; + lex_db.length = strlen(dbname); + lex_table.str = tablename; + lex_table.length = strlen(tablename); + + if (!lex_string_eq(&MYSQL_SCHEMA_NAME, &lex_db)) + return false; + + CHARSET_INFO *ci= system_charset_info; + + return (lex_table.length > 4 && + /* one of mysql.*_stat tables, but not mysql.innodb* tables*/ + ((my_tolower(ci, lex_table.str[lex_table.length-5]) == 's' && + my_tolower(ci, lex_table.str[lex_table.length-4]) == 't' && + my_tolower(ci, lex_table.str[lex_table.length-3]) == 'a' && + my_tolower(ci, lex_table.str[lex_table.length-2]) == 't' && + my_tolower(ci, lex_table.str[lex_table.length-1]) == 's') && + !(my_tolower(ci, lex_table.str[0]) == 'i' && + my_tolower(ci, lex_table.str[1]) == 'n' && + my_tolower(ci, lex_table.str[2]) == 'n' && + my_tolower(ci, lex_table.str[3]) == 'o'))); +} + /* ======== Datafiles iterator ======== */ struct datafiles_iter_t { + datafiles_iter_t() : space(fil_system.space_list.end()), node(nullptr), started(FALSE) { + } + ~datafiles_iter_t() { + } space_list_t::iterator space = fil_system.space_list.end(); fil_node_t *node = nullptr; bool started = false; @@ -778,8 +969,6 @@ static void *dbug_execute_in_new_connection(void *arg) return nullptr; } -static pthread_t dbug_alter_thread; - /* Execute query from a new connection, in own thread. @@ -790,8 +979,9 @@ Execute query from a new connection, in own thread. otherwise query should return error. @param expected_errno - if not 0, and query finished with error, expected mysql_errno() +@return created thread id */ -static void dbug_start_query_thread( +static pthread_t dbug_start_query_thread( const char *query, const char *wait_state, int expected_err, @@ -803,12 +993,14 @@ static void dbug_start_query_thread( par->expect_err = expected_err; par->expect_errno = expected_errno; par->con = xb_mysql_connect(); - - mysql_thread_create(0, &dbug_alter_thread, nullptr, + if (mysql_set_server_option(par->con, MYSQL_OPTION_MULTI_STATEMENTS_ON)) + die("Can't set multistatement option for query: %s", query); + pthread_t result_thread; + mysql_thread_create(0, &result_thread, nullptr, dbug_execute_in_new_connection, par); if (!wait_state) - return; + return result_thread; char q[256]; snprintf(q, sizeof(q), @@ -830,7 +1022,11 @@ static void dbug_start_query_thread( end: msg("query '%s' on connection %lu reached state '%s'", query, mysql_thread_id(par->con), wait_state); + return result_thread; } + +static pthread_t dbug_alter_thread; +static pthread_t dbug_emulate_ddl_on_intermediate_table_thread; #endif void mdl_lock_all() @@ -953,6 +1149,31 @@ static void backup_file_op(uint32_t space_id, int type, } } +static bool check_if_fts_table(const char *file_name) { + const char *table_name_start = strrchr(file_name, '/'); + if (table_name_start) + ++table_name_start; + else + table_name_start = file_name; + + if (!starts_with(table_name_start,"FTS_")) + return false; + + const char *table_name_end = strrchr(table_name_start, '.'); + if (!table_name_end) + table_name_end = table_name_start + strlen(table_name_start); + ptrdiff_t table_name_len = table_name_end - table_name_end; + + for (const char **suffix = fts_common_tables; *suffix; ++suffix) + if (!strncmp(table_name_start, *suffix, table_name_len)) + return true; + for (size_t i = 0; fts_index_selector[i].suffix; ++i) + if (!strncmp(table_name_start, fts_index_selector[i].suffix, + table_name_len)) + return true; + + return false; +} /* This callback is called if DDL operation is detected, @@ -986,8 +1207,9 @@ static void backup_file_op_fail(uint32_t space_id, int type, break; case FILE_DELETE: fail = !check_if_skip_table( - filename_to_spacename(name, len).c_str()); - msg("DDL tracking : delete %u \"%.*s\"", space_id, int(len), name); + filename_to_spacename(name, len).c_str()) + && !check_if_fts_table(reinterpret_cast(name)); + msg("DDL tracking : delete %u \"%.*s\"", space_id, int(len), name); break; default: ut_ad(0); @@ -1115,6 +1337,7 @@ enum options_xtrabackup OPT_INNODB_DATA_FILE_BUFFERING, OPT_INNODB_DATA_FILE_WRITE_THROUGH, OPT_INNODB_LOG_FILE_SIZE, + OPT_INNODB_LOG_FILES_IN_GROUP, OPT_INNODB_OPEN_FILES, OPT_XTRA_DEBUG_SYNC, OPT_INNODB_CHECKSUM_ALGORITHM, @@ -1132,9 +1355,9 @@ enum options_xtrabackup OPT_NO_LOCK, OPT_SAFE_SLAVE_BACKUP, OPT_RSYNC, + OPT_NO_BACKUP_LOCKS, OPT_FORCE_NON_EMPTY_DIRS, OPT_NO_VERSION_CHECK, - OPT_NO_BACKUP_LOCKS, OPT_DECOMPRESS, OPT_INCREMENTAL_HISTORY_NAME, OPT_INCREMENTAL_HISTORY_UUID, @@ -1346,8 +1569,10 @@ struct my_option xb_client_options[]= { 0, 0, 0, 0, 0, 0}, {"no-lock", OPT_NO_LOCK, - "Use this option to disable table lock " - "with \"FLUSH TABLES WITH READ LOCK\". Use it only if ALL your " + "This option should not be used as " + "mariadb-backup now is using BACKUP LOCKS, which minimizes the " + "lock time. ALTER TABLE can run in parallel with BACKUP LOCKS." + "Use the --no-lock option it only if ALL your " "tables are InnoDB and you DO NOT CARE about the binary log " "position of the backup. This option shouldn't be used if there " "are any DDL statements being executed or if any updates are " @@ -1376,14 +1601,12 @@ struct my_option xb_client_options[]= { GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"rsync", OPT_RSYNC, - "Uses the rsync utility to optimize local file " - "transfers. When this option is specified, " XB_TOOL_NAME " uses rsync " - "to copy all non-InnoDB files instead of spawning a separate cp for " - "each file, which can be much faster for servers with a large number " - "of databases or tables. This option cannot be used together with " - "--stream.", - (uchar *) &opt_rsync, (uchar *) &opt_rsync, 0, GET_BOOL, NO_ARG, 0, 0, 0, - 0, 0, 0}, + "Obsolete depricated option", + &ignored_option, &ignored_option, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, + + {"no-backup-locks", OPT_NO_BACKUP_LOCKS, + "Obsolete depricated option", + &ignored_option, &ignored_option, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"force-non-empty-directories", OPT_FORCE_NON_EMPTY_DIRS, "This " @@ -1401,15 +1624,6 @@ struct my_option xb_client_options[]= { (uchar *) &opt_noversioncheck, (uchar *) &opt_noversioncheck, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"no-backup-locks", OPT_NO_BACKUP_LOCKS, - "This option controls if " - "backup locks should be used instead of FLUSH TABLES WITH READ LOCK " - "on the backup stage. The option has no effect when backup locks are " - "not supported by the server. This option is enabled by default, " - "disable with --no-backup-locks.", - (uchar *) &opt_no_backup_locks, (uchar *) &opt_no_backup_locks, 0, - GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, - {"decompress", OPT_DECOMPRESS, "Decompresses all files with the .qp " "extension in a backup previously made with the --compress option. " @@ -1486,11 +1700,10 @@ struct my_option xb_client_options[]= { (uchar *) &opt_remove_original, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"ftwrl-wait-query-type", OPT_LOCK_WAIT_QUERY_TYPE, - "This option specifies which types of queries are allowed to complete " - "before " XB_TOOL_NAME " will issue the global lock. Default is all.", - (uchar *) &opt_lock_wait_query_type, (uchar *) &opt_lock_wait_query_type, - &query_type_typelib, GET_ENUM, REQUIRED_ARG, QUERY_TYPE_ALL, 0, 0, 0, 0, - 0}, + "Old disabled option which has no effect anymore (not needed " + "with BACKUP LOCKS)", + (uchar*) 0, (uchar*) 0, &query_type_typelib, GET_ENUM, + REQUIRED_ARG, QUERY_TYPE_ALL, 0, 0, 0, 0, 0}, {"kill-long-query-type", OPT_KILL_LONG_QUERY_TYPE, "This option specifies which types of queries should be killed to " @@ -1507,32 +1720,31 @@ struct my_option xb_client_options[]= { NULL, NULL, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, {"kill-long-queries-timeout", OPT_KILL_LONG_QUERIES_TIMEOUT, - "This option specifies the number of seconds " XB_TOOL_NAME " waits " - "between starting FLUSH TABLES WITH READ LOCK and killing those " - "queries that block it. Default is 0 seconds, which means " - XB_TOOL_NAME " will not attempt to kill any queries.", - (uchar *) &opt_kill_long_queries_timeout, - (uchar *) &opt_kill_long_queries_timeout, 0, GET_UINT, REQUIRED_ARG, 0, 0, + "Old disabled option which has no effect anymore (not needed " + "with BACKUP LOCKS)", + (uchar*) 0, (uchar*) 0, 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ftwrl-wait-timeout", OPT_LOCK_WAIT_TIMEOUT, - "This option specifies time in seconds that " XB_TOOL_NAME " should wait " - "for queries that would block FTWRL before running it. If there are " - "still such queries when the timeout expires, " XB_TOOL_NAME " terminates " - "with an error. Default is 0, in which case " XB_TOOL_NAME " does not " - "wait for queries to complete and starts FTWRL immediately.", - (uchar *) &opt_lock_wait_timeout, (uchar *) &opt_lock_wait_timeout, 0, - GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + "Alias for startup-wait-timeout", + (uchar*) &opt_lock_wait_timeout, (uchar*) &opt_lock_wait_timeout, + 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, + + {"startup-wait-timeout", OPT_LOCK_WAIT_TIMEOUT, + "This option specifies time in seconds that mariadb-backup should wait for " + "BACKUP STAGE START to complete. BACKUP STAGE START has to wait until all " + "currently running queries using explicite LOCK TABLES has ended. " + "If there are still such queries when the timeout expires, mariadb-backup " + "terminates with an error. Default is 0, in which case mariadb-backup waits " + "indefinitely for BACKUP STAGE START to finish", + (uchar*) &opt_lock_wait_timeout, (uchar*) &opt_lock_wait_timeout, + 0, GET_UINT, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"ftwrl-wait-threshold", OPT_LOCK_WAIT_THRESHOLD, - "This option specifies the query run time threshold which is used by " - XB_TOOL_NAME " to detect long-running queries with a non-zero value " - "of --ftwrl-wait-timeout. FTWRL is not started until such " - "long-running queries exist. This option has no effect if " - "--ftwrl-wait-timeout is 0. Default value is 60 seconds.", - (uchar *) &opt_lock_wait_threshold, (uchar *) &opt_lock_wait_threshold, 0, - GET_UINT, REQUIRED_ARG, 60, 0, 0, 0, 0, 0}, - + "Old disabled option which has no effect anymore (not needed " + "with BACKUP LOCKS)", + (uchar*) 0, (uchar*) 0, 0, GET_UINT, + REQUIRED_ARG, 60, 0, 0, 0, 0, 0}, {"safe-slave-backup-timeout", OPT_SAFE_SLAVE_BACKUP_TIMEOUT, "How many seconds --safe-slave-backup should wait for " @@ -1598,7 +1810,7 @@ struct my_option xb_server_options[] = {"parallel", OPT_XTRA_PARALLEL, "Number of threads to use for parallel datafiles transfer. " "The default value is 1.", - (G_PTR*) &xtrabackup_parallel, (G_PTR*) &xtrabackup_parallel, 0, GET_INT, + (G_PTR*) &xtrabackup_parallel, (G_PTR*) &xtrabackup_parallel, 0, GET_UINT, REQUIRED_ARG, 1, 1, INT_MAX, 0, 0, 0}, {"extended_validation", OPT_XTRA_EXTENDED_VALIDATION, @@ -1767,10 +1979,10 @@ struct my_option xb_server_options[] = 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0}, {"plugin-dir", OPT_PLUGIN_DIR, - "Server plugin directory. Used to load plugins during 'prepare' phase." - "Has no effect in the 'backup' phase (plugin directory during backup is the same as server's)", - &xb_plugin_dir, &xb_plugin_dir, - 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, + "Server plugin directory. Used to load plugins during 'prepare' phase." + "Has no effect in the 'backup' phase (plugin directory during backup is the same as server's)", + &xb_plugin_dir, &xb_plugin_dir, + 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0 }, {"aria_log_dir_path", OPT_ARIA_LOG_DIR_PATH, "Path to individual files and their sizes.", @@ -1898,7 +2110,7 @@ static int prepare_export() IF_WIN("\"","") "\"%s\" --mysqld \"%s\"" " --defaults-extra-file=./backup-my.cnf --defaults-group-suffix=%s --datadir=." " --innodb --innodb-fast-shutdown=0 --loose-partition" - " --innodb-buffer-pool-size=%llu" + " --innodb_purge_rseg_truncate_frequency=1 --innodb-buffer-pool-size=%llu" " --console --skip-log-error --skip-log-bin --bootstrap %s< " BOOTSTRAP_FILENAME IF_WIN("\"",""), mariabackup_exe, @@ -1912,7 +2124,7 @@ static int prepare_export() IF_WIN("\"","") "\"%s\" --mysqld" " --defaults-file=./backup-my.cnf --defaults-group-suffix=%s --datadir=." " --innodb --innodb-fast-shutdown=0 --loose-partition" - " --innodb-buffer-pool-size=%llu" + " --innodb_purge_rseg_truncate_frequency=1 --innodb-buffer-pool-size=%llu" " --console --log-error= --skip-log-bin --bootstrap %s< " BOOTSTRAP_FILENAME IF_WIN("\"",""), mariabackup_exe, @@ -1961,7 +2173,8 @@ static void usage(void) puts("Open source backup tool for InnoDB and XtraDB\n\ \n\ Copyright (C) 2009-2015 Percona LLC and/or its affiliates.\n\ -Portions Copyright (C) 2000, 2011, MySQL AB & Innobase Oy. All Rights Reserved.\n\ +Portions Copyright (C) 2000, 2011, MySQL AB & Innobase Oy.\n\ +Portions Copyright (C) 2017-2023 MariaDB Corporation / MariaDB Plc.\n\ \n\ This program is free software; you can redistribute it and/or\n\ modify it under the terms of the GNU General Public License\n\ @@ -2129,6 +2342,11 @@ xb_get_one_option(const struct my_option *opt, } } break; + case OPT_RSYNC: + case OPT_NO_BACKUP_LOCKS: + if (my_handle_options_init_variables) + fprintf(stderr, "Obsolete option: %s. Ignored\n", opt->name); + break; #define MYSQL_CLIENT #include "sslopt-case.h" #undef MYSQL_CLIENT @@ -3345,50 +3563,22 @@ To use this facility, you need to 3. start mariabackup with --dbug=+d,debug_mariabackup_events */ void dbug_mariabackup_event(const char *event, - const fil_space_t::name_type key) + const fil_space_t::name_type key, + bool need_lock) { + static std::mutex dbug_mariabackup_event_mutex; char *sql = dbug_mariabackup_get_val(event, key); if (sql && *sql) { msg("dbug_mariabackup_event : executing '%s'", sql); - xb_mysql_query(mysql_connection, sql, false, true); - } + if (need_lock) { + std::lock_guard lock(dbug_mariabackup_event_mutex); + xb_mysql_query(mysql_connection, sql, false, true); + } else + xb_mysql_query(mysql_connection, sql, false, true); + } } #endif // DBUG_OFF -/** Datafiles copying thread.*/ -static void data_copy_thread_func(data_thread_ctxt_t *ctxt) /* thread context */ -{ - uint num = ctxt->num; - fil_node_t* node; - ut_ad(ctxt->corrupted_pages); - - /* - Initialize mysys thread-specific memory so we can - use mysys functions in this thread. - */ - my_thread_init(); - - while ((node = datafiles_iter_next(ctxt->it)) != NULL) { - DBUG_MARIABACKUP_EVENT("before_copy", node->space->name()); - DBUG_EXECUTE_FOR_KEY("wait_innodb_redo_before_copy", - node->space->name(), - backup_wait_for_lsn(get_current_lsn(mysql_connection));); - /* copy the datafile */ - if (xtrabackup_copy_datafile(ctxt->datasinks->m_data, - ctxt->datasinks->m_meta, node, num, NULL, - xtrabackup_incremental ? wf_incremental : wf_write_through, - *ctxt->corrupted_pages)) - die("failed to copy datafile."); - - DBUG_MARIABACKUP_EVENT("after_copy", node->space->name()); - } - - pthread_mutex_lock(ctxt->count_mutex); - (*ctxt->count)--; - pthread_mutex_unlock(ctxt->count_mutex); - - my_thread_end(); -} /************************************************************************ Initialize the appropriate datasink(s). Both local backups and streaming in the @@ -3543,6 +3733,11 @@ static void xb_load_single_table_tablespace(const char *dirname, } if (file->open_read_only(true) != DB_SUCCESS) { + // Ignore FTS tables, as they can be removed for intermediate tables, + // this code must be executed under stronger or equal to BLOCK_DDL lock, + // so there must not be errors for non-intermediate FTS tables. + if (check_if_fts_table(filname)) + return; die("Can't open datafile %s", name); } @@ -4548,7 +4743,6 @@ bool Backup_datasinks::backup_low() if (failed_ids.size() > 0) { return false; } - if (!xtrabackup_incremental) { safe_strcpy(metadata_type, sizeof(metadata_type), "full-backuped"); @@ -4587,16 +4781,441 @@ bool Backup_datasinks::backup_low() return true; } +class InnodbDataCopier { +public: + InnodbDataCopier(Backup_datasinks &backup_datasinks, + CorruptedPages &corrupted_pages, + ThreadPool &thread_pool) : + m_backup_datasinks(backup_datasinks), + m_corrupted_pages(corrupted_pages), + m_tasks(thread_pool) {} + + ~InnodbDataCopier() { + DBUG_ASSERT(m_tasks.is_finished()); + } + + bool start() { + DBUG_ASSERT(m_tasks.is_finished()); + m_tasks.push_task( + std::bind(&InnodbDataCopier::scan_job, this, std::placeholders::_1)); + return true; + } + + bool wait_for_finish() { + return m_tasks.wait_for_finish(); + } + +private: + void scan_job(unsigned thread_num) { + datafiles_iter_t it; + fil_node_t* node; + while ((node = datafiles_iter_next(&it)) != nullptr) { + m_tasks.push_task( + std::bind(&InnodbDataCopier::copy_job, this, node, + std::placeholders::_1)); + } + m_tasks.finish_task(1); + } + + void copy_job(fil_node_t *node, unsigned thread_num) { + DBUG_ASSERT(node); + // TODO: this came from the old code, where it was not thread-safe + // too, use separate mysql connection per thread here + DBUG_MARIABACKUP_EVENT("before_copy", node->space->name()); + DBUG_EXECUTE_FOR_KEY("wait_innodb_redo_before_copy", + node->space->name(), + backup_wait_for_lsn( + get_current_lsn(mysql_connection));); + /* copy the datafile */ + if(xtrabackup_copy_datafile(m_backup_datasinks.m_data, + m_backup_datasinks.m_meta, + node, thread_num, NULL, + xtrabackup_incremental + ? wf_incremental : wf_write_through, + m_corrupted_pages)) + die("mariabackup: Error: failed to copy datafile."); + // TODO: this came from the old code, where it was not thread-safe + // too, use separate mysql connection per thread here + DBUG_MARIABACKUP_EVENT("after_copy", node->space->name()); + m_tasks.finish_task(1); + } + + Backup_datasinks &m_backup_datasinks; + CorruptedPages &m_corrupted_pages; + TasksGroup m_tasks; +}; + + +class BackupStages { + + public: + + BackupStages(ds_ctxt_t *ds_data) : + m_bs_con(nullptr), + m_aria_backup(fil_path_to_mysql_datadir, + aria_log_dir_path, + ds_data, m_con_pool, m_thread_pool), + m_common_backup(fil_path_to_mysql_datadir, ds_data, m_con_pool, + m_thread_pool) {} + + ~BackupStages() { destroy(); } + + bool init() { + if ((m_bs_con = xb_mysql_connect()) == nullptr) + return false; + + while(m_con_pool.size() < xtrabackup_parallel) { + MYSQL *con = xb_mysql_connect(); + if (con == nullptr) + return false; + m_con_pool.push_back(con); + } + + if (!m_thread_pool.start(xtrabackup_parallel)) + return false; + if (!m_aria_backup.init()) + return false; + m_aria_backup.set_post_copy_table_hook( + std::bind(&BackupStages::store_table_version, this, + std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); + m_common_backup.set_post_copy_table_hook( + std::bind(&BackupStages::store_table_version, this, + std::placeholders::_1, std::placeholders::_2, std::placeholders::_3)); + return true; + } + + void destroy() { + m_thread_pool.stop(); + while (!m_con_pool.empty()) { + MYSQL *con = m_con_pool.back(); + m_con_pool.pop_back(); + mysql_close(con); + } + if (m_bs_con) + mysql_close(m_bs_con); + m_bs_con = nullptr; + } + + bool stage_start(Backup_datasinks &backup_datasinks, + CorruptedPages &corrupted_pages) { + msg("BACKUP STAGE START"); + if (!opt_no_lock) { + if (opt_safe_slave_backup) { + if (!wait_for_safe_slave(mysql_connection)) { + return(false); + } + } + + history_lock_time = time(NULL); + + if (!lock_for_backup_stage_start(m_bs_con)) { + msg("Error on BACKUP STAGE START query execution"); + return(false); + } + } + + InnodbDataCopier innodb_data_copier(backup_datasinks, + corrupted_pages, + m_thread_pool); + // Start InnoDB data files copy in background + if (!innodb_data_copier.start()) { + msg("Error on starting InnoDB data files backup"); + return false; + } + // Start online non-stats-log Aria tables copying in background + if (!m_aria_backup.start(opt_no_lock)) { + msg("Error on starting Aria data files backup"); + innodb_data_copier.wait_for_finish(); + return false; + } + + // Wait for all innodb data files copy finish + if(!innodb_data_copier.wait_for_finish()) { + msg("InnoDB data files backup process is finished with error"); + return false; + } + // Wait for online non-stats-log Aria tables copy finish + if (!m_aria_backup.wait_for_finish()) { + msg("Aria data files backup process is finished with error"); + return false; + } + + DBUG_MARIABACKUP_EVENT_LOCK("after_aria_background", {}); + + return true; + } + + bool stage_flush() { + msg("BACKUP STAGE FLUSH"); + if (!opt_no_lock && !lock_for_backup_stage_flush(m_bs_con)) { + msg("Error on BACKUP STAGE FLUSH query execution"); + return false; + } + auto tables_in_use = get_tables_in_use(mysql_connection); + // Copy non-stats-log non-in-use tables of non-InnoDB-Aria-RocksDB engines + // in background + if (!m_common_backup.scan(tables_in_use, + &m_copied_common_tables, opt_no_lock, true)) { + msg("Error on scan data directory for common engines"); + return false; + } + // Copy Aria offline non-stats-log non-in-use tables in background + if (!m_aria_backup.copy_offline_tables(&tables_in_use, opt_no_lock, + false)) { + msg("Error on start Aria tables backup"); + return false; + } + + if (!m_aria_backup.copy_log_tail()) { + msg("Error on Aria log tail copy"); + return false; + }; + + // Wait for Aria tables copy finish + if (!m_aria_backup.wait_for_finish()) { + msg("Aria data files backup process is finished with error"); + return false; + } + // Wait for non-InnoDB-Aria-RocksDB engines copy finish + if (!m_common_backup.wait_for_finish()) { + msg("Data files backup process is finished with error"); + return false; + } + + DBUG_EXECUTE_IF("emulate_ddl_on_intermediate_table", + dbug_emulate_ddl_on_intermediate_table_thread = + dbug_start_query_thread( + "SET debug_sync='copy_data_between_tables_after_set_backup_lock " + "SIGNAL copy_started';" + "SET debug_sync='copy_data_between_tables_before_reset_backup_lock " + "SIGNAL before_backup_lock_reset WAIT_FOR backup_lock_reset';" + "SET debug_sync='alter_table_after_temp_table_drop " + "SIGNAL temp_table_dropped';" + "SET SESSION lock_wait_timeout = 1;" + "ALTER TABLE test.t1 ADD COLUMN col1_copy INT, ALGORITHM = COPY;", + NULL, 0, 0); + xb_mysql_query(mysql_connection, + "SET debug_sync='now WAIT_FOR copy_started'", false, true); + ); + + return true; + } + + bool stage_block_ddl(Backup_datasinks &backup_datasinks, + CorruptedPages &corrupted_pages) { + if (!opt_no_lock) { + if (!lock_for_backup_stage_block_ddl(m_bs_con)) { + msg("BACKUP STAGE BLOCK_DDL"); + return false; + } + if (have_galera_enabled) + { + xb_mysql_query(mysql_connection, "SET SESSION wsrep_sync_wait=0", false); + } + } + + ulonglong server_lsn_after_lock = get_current_lsn(mysql_connection); + + // Copy the rest of non-stats-lognon-InnoDB-Aria-RocksDB tables + // Do not execute BACKUP LOCK under BLOCK_DDL stage + if (!m_common_backup.scan(m_copied_common_tables, &m_copied_common_tables, + true, false)) { + msg("Error on scan data directory for common engines"); + return false; + } + // Copy log tables tail + if (!m_common_backup.copy_log_tables(false)) { + msg("Error on copy system tables"); + return false; + } + + // Copy the rest of non-stats Aria tables in background + if (!m_aria_backup.copy_offline_tables(nullptr, true, false)) { + msg("Error on start Aria tables backup"); + return false; + } + + // Copy .frm, .trn and other files + if (!backup_files(backup_datasinks.m_data, + fil_path_to_mysql_datadir)) { + msg("Backup files error"); + return false; + } + + msg("Waiting for log copy thread to read lsn %llu", + server_lsn_after_lock); + backup_wait_for_lsn(server_lsn_after_lock); + corrupted_pages.backup_fix_ddl(backup_datasinks.m_data, + backup_datasinks.m_meta); + + if (!m_aria_backup.copy_log_tail()) { + msg("Error on Aria log tail copy"); + return false; + } + + // Wait for Aria tables copy finish + if (!m_aria_backup.wait_for_finish()) { + msg("Aria data files backup process is finished with error"); + return false; + } + // Wait for non-InnoDB-Aria-RocksDB engines copy finish + if (!m_common_backup.wait_for_finish()) { + msg("Data files backup process is finished with error"); + return false; + } + + ddl_log::backup(fil_path_to_mysql_datadir, + backup_datasinks.m_data, m_tables); + + DBUG_MARIABACKUP_EVENT_LOCK("after_stage_block_ddl", {}); + + return true; + } + + bool stage_block_commit(Backup_datasinks &backup_datasinks) { + msg("BACKUP STAGE BLOCK_COMMIT"); + if (!opt_no_lock && !lock_for_backup_stage_commit(m_bs_con)) { + msg("Error on BACKUP STAGE BLOCK_COMMIT query execution"); + return false; + } + + // Copy log tables tail + if (!m_common_backup.copy_log_tables(true)) { + msg("Error on copy log tables"); + return false; + } + + // Copy stats tables + if (!m_common_backup.copy_stats_tables()) { + msg("Error on copy stats tables"); + return false; + } + + // Copy system Aria files + if (!m_aria_backup.finalize()) { + msg("Error on finalize Aria tables backup"); + return false; + } + + if (!m_common_backup.wait_for_finish()) { + msg("Error on finish common engines backup"); + return false; + } + + if (!m_common_backup.close_log_tables()) { + msg("Error on close log tables"); + return false; + } + + if (!backup_files_from_datadir(backup_datasinks.m_data, + fil_path_to_mysql_datadir, + "aws-kms-key")) { + msg("Error on root data dir files backup"); + return false; + } + + if (has_rocksdb_plugin()) { + rocksdb_create_checkpoint(); + } + + // There is no need to stop slave thread before coping non-Innodb data when + // --no-lock option is used because --no-lock option requires that no DDL or + // DML to non-transaction tables can occur. + if (opt_no_lock) { + if (opt_safe_slave_backup) { + if (!wait_for_safe_slave(mysql_connection)) { + return(false); + } + } + } + + if (opt_slave_info) { + if (!write_slave_info(backup_datasinks.m_data, + mysql_connection)) { + return(false); + } + } + + /* The only reason why Galera/binlog info is written before + wait_for_ibbackup_log_copy_finish() is that after that call the xtrabackup + binary will start streamig a temporary copy of REDO log to stdout and + thus, any streaming from innobackupex would interfere. The only way to + avoid that is to have a single process, i.e. merge innobackupex and + xtrabackup. */ + if (opt_galera_info) { + if (!write_galera_info(backup_datasinks.m_data, + mysql_connection)) { + return(false); + } + } + + bool with_binlogs = opt_binlog_info == BINLOG_INFO_ON; + + if (with_binlogs || opt_galera_info) { + if (!write_binlog_info(backup_datasinks.m_data, + mysql_connection)) { + return(false); + } + } + + if (!opt_no_lock) { + msg("Executing FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS..."); + xb_mysql_query(mysql_connection, + "FLUSH NO_WRITE_TO_BINLOG ENGINE LOGS", false); + } + + return backup_datasinks.backup_low(); + } + + bool stage_end(Backup_datasinks &backup_datasinks) { + msg("BACKUP STAGE END"); + /* release all locks */ + if (!opt_no_lock) { + unlock_all(m_bs_con); + history_lock_time = 0; + } else { + history_lock_time = time(NULL) - history_lock_time; + } + backup_release(); + DBUG_EXECUTE_IF("check_mdl_lock_works", + pthread_join(dbug_alter_thread, nullptr); + ); + + DBUG_EXECUTE_IF("emulate_ddl_on_intermediate_table", + pthread_join( + dbug_emulate_ddl_on_intermediate_table_thread, + nullptr); + ); + + backup_finish(backup_datasinks.m_data); + return true; + } + + void store_table_version( + std::string db, std::string table, std::string table_version) { + auto tk = table_key(db, table); + std::lock_guard lock(m_tables_mutex); + m_tables[std::move(tk)] = std::move(table_version); + } + + private: + Backup_datasinks *backup_datasinks; + MYSQL *m_bs_con; + ThreadPool m_thread_pool; + std::vector m_con_pool; + std::mutex m_tables_mutex; + ddl_log::tables_t m_tables; + aria::Backup m_aria_backup; + common_engine::Backup m_common_backup; + std::unordered_set m_copied_common_tables; +}; + /** Implement --backup @return whether the operation succeeded */ static bool xtrabackup_backup_func() { MY_STAT stat_info; - uint i; - uint count; - pthread_mutex_t count_mutex; CorruptedPages corrupted_pages; - data_thread_ctxt_t *data_threads; Backup_datasinks backup_datasinks; pthread_cond_init(&scanned_lsn_cond, NULL); @@ -4612,7 +5231,7 @@ static bool xtrabackup_backup_func() return(false); } msg("cd to %s", mysql_real_data_home); - xb_plugin_backup_init(mysql_connection); + encryption_plugin_backup_init(mysql_connection); msg("open files limit requested %lu, set to %lu", xb_open_files_limit, xb_set_max_open_files(xb_open_files_limit)); @@ -4653,22 +5272,6 @@ fail: return(false); } - if (srv_buf_pool_size >= 1000 * 1024 * 1024) { - /* Here we still have srv_pool_size counted - in kilobytes (in 4.0 this was in bytes) - srv_boot() converts the value to - pages; if buffer pool is less than 1000 MB, - assume fewer threads. */ - srv_max_n_threads = 50000; - - } else if (srv_buf_pool_size >= 8 * 1024 * 1024) { - - srv_max_n_threads = 10000; - } else { - srv_max_n_threads = 1000; /* saves several MB of memory, - especially in 64-bit - computers */ - } srv_thread_pool_init(); /* Reset the system variables in the recovery module. */ trx_pool_init(); @@ -4688,9 +5291,10 @@ fail: } /* get current checkpoint_lsn */ { + log_sys.latch.wr_lock(SRW_LOCK_CALL); mysql_mutex_lock(&recv_sys.mutex); - dberr_t err = recv_sys.find_checkpoint(); + log_sys.latch.wr_unlock(); if (err != DB_SUCCESS) { msg("Error: cannot read redo log header"); @@ -4797,67 +5401,35 @@ fail: mdl_lock_all(); DBUG_EXECUTE_IF("check_mdl_lock_works", - dbug_start_query_thread("ALTER TABLE test.t ADD COLUMN mdl_lock_column int", + dbug_alter_thread = + dbug_start_query_thread("ALTER TABLE test.t ADD COLUMN mdl_lock_column int", "Waiting for table metadata lock", 0, 0);); } - datafiles_iter_t it; + BackupStages stages(backup_datasinks.m_data); - /* Create data copying threads */ - data_threads = (data_thread_ctxt_t *) - malloc(sizeof(data_thread_ctxt_t) * xtrabackup_parallel); - count = xtrabackup_parallel; - pthread_mutex_init(&count_mutex, NULL); - - for (i = 0; i < (uint) xtrabackup_parallel; i++) { - data_threads[i].it = ⁢ - data_threads[i].num = i+1; - data_threads[i].count = &count; - data_threads[i].count_mutex = &count_mutex; - data_threads[i].corrupted_pages = &corrupted_pages; - data_threads[i].datasinks= &backup_datasinks; - std::thread(data_copy_thread_func, data_threads + i).detach(); - } - - /* Wait for threads to exit */ - while (1) { - std::this_thread::sleep_for(std::chrono::seconds(1)); - pthread_mutex_lock(&count_mutex); - bool stop = count == 0; - pthread_mutex_unlock(&count_mutex); - if (stop) { - break; - } - } - - pthread_mutex_destroy(&count_mutex); - free(data_threads); - - DBUG_ASSERT(backup_datasinks.m_data); - DBUG_ASSERT(backup_datasinks.m_meta); - bool ok = backup_start(backup_datasinks.m_data, - backup_datasinks.m_meta, corrupted_pages); - - if (ok) { - ok = backup_datasinks.backup_low(); - - backup_release(); - - DBUG_EXECUTE_IF("check_mdl_lock_works", - pthread_join(dbug_alter_thread, nullptr);); - - if (ok) { - backup_finish(backup_datasinks.m_data); - } - } - - if (opt_log_innodb_page_corruption) - ok = corrupted_pages.print_to_file(backup_datasinks.m_data, - MB_CORRUPTED_PAGES_FILE); - - if (!ok) { + if (!stages.init()) + goto fail; + + if (!stages.stage_start(backup_datasinks, corrupted_pages)) + goto fail; + + if (!stages.stage_flush()) + goto fail; + + if (!stages.stage_block_ddl(backup_datasinks, corrupted_pages)) + goto fail; + + if (!stages.stage_block_commit(backup_datasinks)) + goto fail; + + if (!stages.stage_end(backup_datasinks)) + goto fail; + + if (opt_log_innodb_page_corruption + && !corrupted_pages.print_to_file(backup_datasinks.m_data, + MB_CORRUPTED_PAGES_FILE)) goto fail; - } backup_datasinks.destroy(); @@ -4920,6 +5492,12 @@ void CorruptedPages::backup_fix_ddl(ds_ctxt *ds_data, ds_ctxt *ds_meta) DBUG_MARIABACKUP_EVENT("backup_fix_ddl", {}); + DBUG_EXECUTE_IF("emulate_ddl_on_intermediate_table", + xb_mysql_query(mysql_connection, + "SET debug_sync='now SIGNAL backup_lock_reset " + "WAIT_FOR temp_table_dropped'", false, true); + ); + for (space_id_to_name_t::iterator iter = ddl_tracker.tables_in_backup.begin(); iter != ddl_tracker.tables_in_backup.end(); iter++) { @@ -5064,6 +5642,7 @@ void CorruptedPages::backup_fix_ddl(ds_ctxt *ds_data, ds_ctxt *ds_meta) } } + /* ================= prepare ================= */ /*********************************************************************** @@ -5550,7 +6129,7 @@ std::string change_extension(std::string filename, std::string new_ext) { } -static void rename_file(const char *from,const char *to) { +void rename_file(const char *from,const char *to) { msg("Renaming %s to %s\n", from, to); if (my_rename(from, to, MY_WME)) { die("Can't rename %s to %s errno %d", from, to, errno); @@ -5572,7 +6151,7 @@ typedef ibool (*handle_datadir_entry_func_t)( void* arg); /*! /dev/null 2>&1") == 0)); - if (!have_rsync) - { - msg("Error: rsync executable not found, cannot run backup with --rsync\n"); - return false; - } - } - n_mixed_options = 0; if (opt_decompress) { @@ -6475,6 +7040,7 @@ xb_init() if (opt_check_privileges && !check_all_privileges()) { return(false); } + history_start_time = time(NULL); } diff --git a/extra/mariabackup/xtrabackup.h b/extra/mariabackup/xtrabackup.h index 14036b7a92c..38d7e5fdd03 100644 --- a/extra/mariabackup/xtrabackup.h +++ b/extra/mariabackup/xtrabackup.h @@ -26,6 +26,13 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA #include "xbstream.h" #include "fil0fil.h" #include +#include "handler.h" + +#include +#include +#include +#include + #define XB_TOOL_NAME "mariadb-backup" #define XB_HISTORY_TABLE "mysql.mariadb_backup_history" @@ -110,7 +117,7 @@ extern my_bool xtrabackup_decrypt_decompress; extern char *innobase_data_file_path; extern longlong innobase_page_size; -extern int xtrabackup_parallel; +extern uint xtrabackup_parallel; extern my_bool xb_close_files; extern const char *xtrabackup_compress_alg; @@ -129,7 +136,6 @@ extern my_bool opt_galera_info; extern my_bool opt_slave_info; extern my_bool opt_no_lock; extern my_bool opt_safe_slave_backup; -extern my_bool opt_rsync; extern my_bool opt_force_non_empty_dirs; extern my_bool opt_noversioncheck; extern my_bool opt_no_backup_locks; @@ -286,15 +292,40 @@ fil_file_readdir_next_file( os_file_stat_t* info); /*!< in/out: buffer where the info is returned */ -#ifndef DBUG_OFF -#include -extern void dbug_mariabackup_event(const char *event, - const fil_space_t::name_type key); +const char *convert_dst(const char *dst); -#define DBUG_MARIABACKUP_EVENT(A, B) \ - DBUG_EXECUTE_IF("mariabackup_events", dbug_mariabackup_event(A, B);) -#else -#define DBUG_MARIABACKUP_EVENT(A, B) /* empty */ -#endif // DBUG_OFF +std::string get_table_version_from_image(const std::vector &frm_image); +std::pair + get_table_engine_from_image(const std::vector &frm_image); +std::string read_table_version_id(File file); +std::string convert_tablename_to_filepath( + const char *data_dir_path, const std::string &db, const std::string &table); + +std::tuple +convert_filepath_to_tablename(const char *filepath); + +typedef std::string table_key_t; + +inline table_key_t table_key(const std::string &db, const std::string &table) { + return std::string(db).append(".").append(table); +}; + +inline table_key_t table_key(const char *db, const char *table) { + return std::string(db).append(".").append(table); +}; + +typedef std::function + post_copy_table_hook_t; + +my_bool +check_if_skip_table( +/******************/ + const char* name); /*!< in: path to the table */ + +bool is_log_table(const char *dbname, const char *tablename); +bool is_stats_table(const char *dbname, const char *tablename); + +extern my_bool xtrabackup_copy_back; +extern my_bool xtrabackup_move_back; #endif /* XB_XTRABACKUP_H */ diff --git a/extra/wolfssl/CMakeLists.txt b/extra/wolfssl/CMakeLists.txt index 271e76b8c45..e3f8da21f76 100644 --- a/extra/wolfssl/CMakeLists.txt +++ b/extra/wolfssl/CMakeLists.txt @@ -1,86 +1,57 @@ IF(MSVC_INTEL) PROJECT(wolfssl C ASM_MASM) ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64") - PROJECT(wolfssl C ASM) + PROJECT(wolfssl C ASM) ELSE() PROJECT(wolfssl C) ENDIF() IF(CMAKE_SIZEOF_VOID_P MATCHES 8) -IF(MSVC_INTEL) +IF(MSVC_INTEL AND NOT (CMAKE_C_COMPILER_ID MATCHES Clang)) SET(WOLFSSL_INTELASM ON) - SET(WOLFSSL_X86_64_BUILD 1) SET(HAVE_INTEL_RDSEED 1) SET(HAVE_INTEL_RDRAND 1) -ELSEIF(CMAKE_ASM_COMPILER_ID MATCHES "Clang" AND CMAKE_VERSION VERSION_LESS 3.16) - - # WolfSSL 5.5.4 bug workaround below does not work, due to some CMake bug ELSEIF(CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64|amd64") - SET(WOLFSSL_X86_64_BUILD 1) IF(CMAKE_C_COMPILER_ID MATCHES GNU AND CMAKE_C_COMPILER_VERSION VERSION_LESS 4.9) MESSAGE_ONCE(NO_INTEL_ASSEMBLY "Disable Intel assembly for WolfSSL - compiler is too old") + ELSEIF(WITH_MSAN) + MESSAGE_ONCE(MSAN_CANT_HANDLE_IT "Disable Intel assembly for WolfSSL - MSAN can't handle it") ELSE() - IF(WITH_MSAN) - MESSAGE_ONCE(MSAN_CANT_HANDLE_IT - "Disable Intel assembly for WolfSSL - MSAN can't handle it") - ELSE() - MY_CHECK_C_COMPILER_FLAG(-maes) - MY_CHECK_C_COMPILER_FLAG(-msse4) - MY_CHECK_C_COMPILER_FLAG(-mpclmul) - IF(have_C__maes AND have_C__msse4 AND have_C__mpclmul) - SET(WOLFSSL_INTELASM ON) + MY_CHECK_C_COMPILER_FLAG(-maes) + MY_CHECK_C_COMPILER_FLAG(-msse4) + MY_CHECK_C_COMPILER_FLAG(-mpclmul) + IF(have_C__maes AND have_C__msse4 AND have_C__mpclmul) + SET(WOLFSSL_INTELASM ON) + MY_CHECK_C_COMPILER_FLAG(-mrdrnd) + MY_CHECK_C_COMPILER_FLAG(-mrdseed) + IF(have_C__mrdrnd) + SET(HAVE_INTEL_RDRAND ON) + ENDIF() + IF(have_C__mrdseed) + SET(HAVE_INTEL_RDSEED ON) ENDIF() - ENDIF() - MY_CHECK_C_COMPILER_FLAG(-mrdrnd) - MY_CHECK_C_COMPILER_FLAG(-mrdseed) - IF(have_C__mrdrnd) - SET(HAVE_INTEL_RDRAND ON) - ENDIF() - IF(have_C__mrdseed) - SET(HAVE_INTEL_RDSEED ON) ENDIF() ENDIF() ENDIF() ENDIF() SET(WOLFSSL_SRCDIR ${CMAKE_CURRENT_SOURCE_DIR}/wolfssl/src) -ADD_DEFINITIONS(${SSL_DEFINES}) - -SET(WOLFSSL_SOURCES - ${WOLFSSL_SRCDIR}/crl.c - ${WOLFSSL_SRCDIR}/internal.c - ${WOLFSSL_SRCDIR}/keys.c - ${WOLFSSL_SRCDIR}/tls.c - ${WOLFSSL_SRCDIR}/wolfio.c - ${WOLFSSL_SRCDIR}/ocsp.c - ${WOLFSSL_SRCDIR}/ssl.c - ${WOLFSSL_SRCDIR}/tls13.c) - -ADD_DEFINITIONS(-DWOLFSSL_LIB -DBUILDING_WOLFSSL) - -INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/wolfssl) -IF(MSVC) - # size_t to long truncation warning - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -wd4267 -wd4334 -wd4028 -wd4244") -ENDIF() - -ADD_CONVENIENCE_LIBRARY(wolfssl ${WOLFSSL_SOURCES}) - -# Workaround linker crash with older Ubuntu binutils -# e.g aborting at ../../bfd/merge.c line 873 in _bfd_merged_section_offset -IF(CMAKE_SYSTEM_NAME MATCHES "Linux") - STRING(REPLACE "-g " "-g1 " CMAKE_C_FLAGS_RELWITHDEBINFO - ${CMAKE_C_FLAGS_RELWITHDEBINFO}) - STRING(REPLACE "-g " "-g1 " CMAKE_C_FLAGS_DEBUG - ${CMAKE_C_FLAGS_DEBUG}) - STRING(REPLACE "-ggdb3 " " " CMAKE_C_FLAGS_RELWITHDEBINFO - ${CMAKE_C_FLAGS_RELWITHDEBINFO}) - STRING(REPLACE "-ggdb3 " " " CMAKE_C_FLAGS_DEBUG - ${CMAKE_C_FLAGS_DEBUG}) -ENDIF() - SET(WOLFCRYPT_SRCDIR ${CMAKE_CURRENT_SOURCE_DIR}/wolfssl/wolfcrypt/src) -SET(WOLFCRYPT_SOURCES +ADD_DEFINITIONS(${SSL_DEFINES}) +ADD_DEFINITIONS(-DWOLFSSL_LIB -DBUILDING_WOLFSSL) +ADD_DEFINITIONS(-DWOLFSSL_SP_4096) +INCLUDE_DIRECTORIES(BEFORE ${CMAKE_CURRENT_SOURCE_DIR}/wolfssl) +INCLUDE_DIRECTORIES(${SSL_INCLUDE_DIRS}) + +add_library(wolfssl STATIC +${WOLFSSL_SRCDIR}/crl.c +${WOLFSSL_SRCDIR}/internal.c +${WOLFSSL_SRCDIR}/keys.c +${WOLFSSL_SRCDIR}/tls.c +${WOLFSSL_SRCDIR}/wolfio.c +${WOLFSSL_SRCDIR}/ocsp.c +${WOLFSSL_SRCDIR}/ssl.c +${WOLFSSL_SRCDIR}/tls13.c ${WOLFCRYPT_SRCDIR}/aes.c ${WOLFCRYPT_SRCDIR}/arc4.c ${WOLFCRYPT_SRCDIR}/asn.c @@ -110,69 +81,56 @@ ${WOLFCRYPT_SRCDIR}/wc_encrypt.c ${WOLFCRYPT_SRCDIR}/hash.c ${WOLFCRYPT_SRCDIR}/wolfmath.c ${WOLFCRYPT_SRCDIR}/kdf.c +${WOLFCRYPT_SRCDIR}/sp_int.c +${WOLFCRYPT_SRCDIR}/sp_c32.c +${WOLFCRYPT_SRCDIR}/sp_c64.c ) -# Use fastmath large number math library. -IF(NOT (MSVC AND CMAKE_C_COMPILER_ID MATCHES Clang)) - # Can't use clang-cl with WOLFSSL_FASTMATH - # due to https://bugs.llvm.org/show_bug.cgi?id=25305 - SET(WOLFSSL_FASTMATH 1) -ENDIF() - -IF(WOLFSSL_FASTMATH) - SET(USE_FAST_MATH 1) - SET(TFM_TIMING_RESISTANT 1) - # FP_MAX_BITS is set high solely to satisfy ssl_8k_key.test - # WolfSSL will use more stack space with it - SET(FP_MAX_BITS 16384) - SET(WOLFCRYPT_SOURCES ${WOLFCRYPT_SOURCES} ${WOLFCRYPT_SRCDIR}/tfm.c) - IF((CMAKE_SIZEOF_VOID_P MATCHES 4) AND (CMAKE_SYSTEM_PROCESSOR MATCHES "86") - AND (NOT MSVC)) - # Workaround https://github.com/wolfSSL/wolfssl/issues/4245 - # On 32bit Intel, to satisfy inline assembly's wish for free registers - # 1. use -fomit-frame-pointer - # 2. With GCC 4, additionally use -fno-PIC, which works on x86 - # (modern GCC has PIC optimizations, that make it unnecessary) - # The following assumes GCC or Clang - SET(TFM_COMPILE_FLAGS "-fomit-frame-pointer") - IF(CMAKE_C_COMPILER_VERSION VERSION_LESS "5") - SET(TFM_COMPILE_FLAGS "${TFM_COMPILE_FLAGS} -fno-PIC") - ENDIF() - SET_SOURCE_FILES_PROPERTIES(${WOLFCRYPT_SRCDIR}/tfm.c - PROPERTIES COMPILE_FLAGS ${TFM_COMPILE_FLAGS}) - ENDIF() -ELSE() - SET(WOLFSSL_SP_MATH_ALL 1) - SET(WOLFCRYPT_SOURCES ${WOLFCRYPT_SOURCES} ${WOLFCRYPT_SRCDIR}/sp_int.c) -ENDIF() - -IF(WOLFSSL_X86_64_BUILD) - LIST(APPEND WOLFCRYPT_SOURCES ${WOLFCRYPT_SRCDIR}/cpuid.c) - IF(MSVC) - SET(WOLFSSL_AESNI 1) - LIST(APPEND WOLFCRYPT_SOURCES - ${WOLFCRYPT_SRCDIR}/aes_asm.asm - ${WOLFCRYPT_SRCDIR}/aes_gcm_asm.asm) - IF(CMAKE_C_COMPILER_ID MATCHES Clang) - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -maes -msse4.2 -mpclmul -mrdrnd -mrdseed") - ENDIF() - ELSEIF(WOLFSSL_INTELASM) - SET(WOLFSSL_AESNI 1) - SET(USE_INTEL_SPEEDUP 1) - LIST(APPEND WOLFCRYPT_SOURCES +# Optimizations, assembly +if(WOLFSSL_INTELASM) + set(WOLFSSL_X86_64_BUILD 1) + set(WOLFSSL_SP_X86_64 1) + set(WOLFSSL_SP_X86_64_ASM 1) + set(WOLFSSL_AESNI 1) + target_sources(wolfssl PRIVATE + ${WOLFCRYPT_SRCDIR}/cpuid.c + ${WOLFCRYPT_SRCDIR}/sp_x86_64.c + ) + if(MSVC_INTEL) + target_sources(wolfssl PRIVATE + ${WOLFCRYPT_SRCDIR}/aes_asm.asm + ${WOLFCRYPT_SRCDIR}/aes_gcm_asm.asm + ${WOLFCRYPT_SRCDIR}/sp_x86_64_asm.asm + ) + target_compile_options(wolfssl PRIVATE + $<$:-maes -msse4.2 -mpclmul -mrdrnd -mrdseed> + $<$:/Zi> + ) + else() + set(USE_INTEL_SPEEDUP 1) + target_sources(wolfssl PRIVATE ${WOLFCRYPT_SRCDIR}/aes_asm.S ${WOLFCRYPT_SRCDIR}/aes_gcm_asm.S ${WOLFCRYPT_SRCDIR}/chacha_asm.S ${WOLFCRYPT_SRCDIR}/poly1305_asm.S ${WOLFCRYPT_SRCDIR}/sha512_asm.S - ${WOLFCRYPT_SRCDIR}/sha256_asm.S) - ADD_DEFINITIONS(-maes -msse4.2 -mpclmul) - # WolfSSL 5.5.4 bug - user_settings.h not included into aes_asm.S - SET_PROPERTY(SOURCE ${WOLFCRYPT_SRCDIR}/aes_asm.S APPEND PROPERTY COMPILE_OPTIONS "-DWOLFSSL_X86_64_BUILD") - ENDIF() -ENDIF() + ${WOLFCRYPT_SRCDIR}/sha256_asm.S + ${WOLFCRYPT_SRCDIR}/sp_x86_64_asm.S + ) + target_compile_options(wolfssl PRIVATE -maes -msse4.2 -mpclmul) + # Workaround 5.5.4 bug (user_settings.h not included into aes_asm.S) + set_property(SOURCE ${WOLFCRYPT_SRCDIR}/aes_asm.S APPEND PROPERTY COMPILE_OPTIONS "-DWOLFSSL_X86_64_BUILD") + endif() +endif() + +# Silence some warnings +if(MSVC) + # truncation warnings + target_compile_options(wolfssl PRIVATE $<$:/wd4244>) + if(CMAKE_C_COMPILER_ID MATCHES Clang) + target_compile_options(wolfssl PRIVATE $<$:-Wno-incompatible-function-pointer-types>) + endif() +endif() CONFIGURE_FILE(user_settings.h.in user_settings.h) -INCLUDE_DIRECTORIES(${SSL_INCLUDE_DIRS}) -ADD_CONVENIENCE_LIBRARY(wolfcrypt ${WOLFCRYPT_SOURCES}) diff --git a/extra/wolfssl/user_settings.h.in b/extra/wolfssl/user_settings.h.in index baa64fcdfbe..489118b33b4 100644 --- a/extra/wolfssl/user_settings.h.in +++ b/extra/wolfssl/user_settings.h.in @@ -21,6 +21,7 @@ #define HAVE_AESGCM #define HAVE_CHACHA #define HAVE_POLY1305 +#define HAVE_THREAD_LS #define WOLFSSL_AES_COUNTER #define NO_WOLFSSL_STUB #define OPENSSL_ALL @@ -51,20 +52,19 @@ #define NO_RABBIT #define NO_RC4 -/* - FP_MAX_BITS is set high solely to satisfy ssl_8k_key.test - WolfSSL will use more stack space with it, with fastmath -*/ -#cmakedefine FP_MAX_BITS 16384 #define RSA_MAX_SIZE 8192 +#define WOLFSSL_SP_MATH_ALL +#define WOLFSSL_HAVE_SP_RSA +#ifndef WOLFSSL_SP_4096 +#define WOLFSSL_SP_4096 +#endif + #cmakedefine WOLFSSL_AESNI -#cmakedefine USE_FAST_MATH -#cmakedefine TFM_TIMING_RESISTANT #cmakedefine HAVE_INTEL_RDSEED #cmakedefine HAVE_INTEL_RDRAND #cmakedefine USE_INTEL_SPEEDUP -#cmakedefine USE_FAST_MATH #cmakedefine WOLFSSL_X86_64_BUILD -#cmakedefine WOLFSSL_SP_MATH_ALL +#cmakedefine WOLFSSL_SP_X86_64 +#cmakedefine WOLFSSL_SP_X86_64_ASM #endif /* WOLFSSL_USER_SETTINGS_H */ diff --git a/include/my_base.h b/include/my_base.h index a8014f819b4..b214ad045f3 100644 --- a/include/my_base.h +++ b/include/my_base.h @@ -49,6 +49,7 @@ #define HA_OPEN_MERGE_TABLE 2048U #define HA_OPEN_FOR_CREATE 4096U #define HA_OPEN_FOR_DROP (1U << 13) /* Open part of drop */ +#define HA_OPEN_GLOBAL_TMP_TABLE (1U << 14) /* TMP table used by repliction */ /* Allow opening even if table is incompatible as this is for ALTER TABLE which @@ -377,6 +378,12 @@ enum ha_base_keytype { #define HA_CREATE_INTERNAL_TABLE 256U #define HA_PRESERVE_INSERT_ORDER 512U #define HA_CREATE_NO_ROLLBACK 1024U +/* + A temporary table that can be used by different threads, eg. replication + threads. This flag ensure that memory is not allocated with THREAD_SPECIFIC, + as we do for other temporary tables. +*/ +#define HA_CREATE_GLOBAL_TMP_TABLE 2048U /* Flags used by start_bulk_insert */ diff --git a/include/my_bitmap.h b/include/my_bitmap.h index f88a6fe8d9d..d54670653f3 100644 --- a/include/my_bitmap.h +++ b/include/my_bitmap.h @@ -22,14 +22,15 @@ #include #include -typedef uint32 my_bitmap_map; +typedef ulonglong my_bitmap_map; typedef struct st_bitmap { my_bitmap_map *bitmap; my_bitmap_map *last_word_ptr; - my_bitmap_map last_word_mask; + my_bitmap_map last_bit_mask; uint32 n_bits; /* number of bits occupied by the above */ + my_bool bitmap_allocated; } MY_BITMAP; #ifdef __cplusplus @@ -39,7 +40,7 @@ extern "C" { /* Reset memory. Faster then doing a full bzero */ #define my_bitmap_clear(A) ((A)->bitmap= 0) -extern void create_last_word_mask(MY_BITMAP *map); +extern void create_last_bit_mask(MY_BITMAP *map); extern my_bool my_bitmap_init(MY_BITMAP *map, my_bitmap_map *buf, uint n_bits); extern my_bool bitmap_is_clear_all(const MY_BITMAP *map); extern my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size); @@ -53,12 +54,12 @@ extern my_bool bitmap_fast_test_and_set(MY_BITMAP *map, uint bitmap_bit); extern my_bool bitmap_fast_test_and_clear(MY_BITMAP *map, uint bitmap_bit); extern my_bool bitmap_union_is_set_all(const MY_BITMAP *map1, const MY_BITMAP *map2); -extern my_bool bitmap_exists_intersection(const MY_BITMAP **bitmap_array, +extern my_bool bitmap_exists_intersection(MY_BITMAP **bitmap_array, uint bitmap_count, uint start_bit, uint end_bit); extern uint bitmap_set_next(MY_BITMAP *map); -extern uint bitmap_get_first(const MY_BITMAP *map); +extern uint bitmap_get_first_clear(const MY_BITMAP *map); extern uint bitmap_get_first_set(const MY_BITMAP *map); extern uint bitmap_bits_set(const MY_BITMAP *map); extern uint bitmap_get_next_set(const MY_BITMAP *map, uint bitmap_bit); @@ -71,54 +72,70 @@ extern void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2); extern void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2); extern void bitmap_invert(MY_BITMAP *map); extern void bitmap_copy(MY_BITMAP *map, const MY_BITMAP *map2); +/* Functions to export/import bitmaps to an architecture independent format */ +extern void bitmap_export(uchar *to, MY_BITMAP *map); +extern void bitmap_import(MY_BITMAP *map, uchar *from); + +#define my_bitmap_map_bytes sizeof(my_bitmap_map) +#define my_bitmap_map_bits (my_bitmap_map_bytes*8) +/* Size in bytes to store 'bits' number of bits */ +#define bitmap_buffer_size(bits) (MY_ALIGN((bits), my_bitmap_map_bits)/8) +#define my_bitmap_buffer_size(map) bitmap_buffer_size((map)->n_bits) +#define no_bytes_in_export_map(map) (((map)->n_bits + 7)/8) +#define no_words_in_map(map) (((map)->n_bits + (my_bitmap_map_bits-1))/my_bitmap_map_bits) /* Fast, not thread safe, bitmap functions */ -#define bitmap_buffer_size(bits) (((bits)+31)/32)*4 -#define no_bytes_in_map(map) (((map)->n_bits + 7)/8) -#define no_words_in_map(map) (((map)->n_bits + 31)/32) -#define bytes_word_aligned(bytes) (4*((bytes + 3)/4)) -/* The following functions must be compatible with create_last_word_mask()! */ +/* The following functions must be compatible with create_last_bit_mask()! */ static inline void bitmap_set_bit(MY_BITMAP *map,uint bit) { - uchar *b= (uchar*) map->bitmap + bit / 8; DBUG_ASSERT(bit < map->n_bits); - *b= (uchar) (*b | 1U << (bit & 7)); + map->bitmap[bit/my_bitmap_map_bits]|= + (1ULL << (bit & (my_bitmap_map_bits-1))); } static inline void bitmap_flip_bit(MY_BITMAP *map,uint bit) { - uchar *b= (uchar*) map->bitmap + bit / 8; DBUG_ASSERT(bit < map->n_bits); - *b= (uchar) (*b ^ 1U << (bit & 7)); + map->bitmap[bit/my_bitmap_map_bits]^= + (1ULL << (bit & (my_bitmap_map_bits-1))); } static inline void bitmap_clear_bit(MY_BITMAP *map,uint bit) { - uchar *b= (uchar*) map->bitmap + bit / 8; DBUG_ASSERT(bit < map->n_bits); - *b= (uchar) (*b & ~(1U << (bit & 7))); + map->bitmap[bit/my_bitmap_map_bits]&= + ~(1ULL << (bit & (my_bitmap_map_bits-1))); } static inline uint bitmap_is_set(const MY_BITMAP *map,uint bit) { - const uchar *b= (const uchar*) map->bitmap + bit / 8; DBUG_ASSERT(bit < map->n_bits); - return !!(*b & (1U << (bit & 7))); + return (!!(map->bitmap[bit/my_bitmap_map_bits] & + (1ULL << (bit & (my_bitmap_map_bits-1))))); } +/* Return true if bitmaps are equal */ static inline my_bool bitmap_cmp(const MY_BITMAP *map1, const MY_BITMAP *map2) { - if (memcmp(map1->bitmap, map2->bitmap, 4*(no_words_in_map(map1)-1)) != 0) - return FALSE; - return ((*map1->last_word_ptr | map1->last_word_mask) == - (*map2->last_word_ptr | map2->last_word_mask)); + DBUG_ASSERT(map1->n_bits == map2->n_bits); + return (memcmp(map1->bitmap, map2->bitmap, + my_bitmap_buffer_size(map1)) == 0); } #define bitmap_clear_all(MAP) \ - { memset((MAP)->bitmap, 0, 4*no_words_in_map((MAP))); } -#define bitmap_set_all(MAP) \ - (memset((MAP)->bitmap, 0xFF, 4*no_words_in_map((MAP)))) + { memset((MAP)->bitmap, 0, my_bitmap_buffer_size(MAP)); } + +static inline void +bitmap_set_all(const MY_BITMAP *map) +{ + if (map->n_bits) + { + memset(map->bitmap, 0xFF, my_bitmap_map_bytes * (no_words_in_map(map)-1)); + DBUG_ASSERT(map->bitmap + no_words_in_map(map)-1 == map->last_word_ptr); + *map->last_word_ptr= ~map->last_bit_mask; + } +} #ifdef __cplusplus } diff --git a/include/my_global.h b/include/my_global.h index 952e65c9728..e6cbb933401 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -974,6 +974,7 @@ typedef struct st_mysql_lex_string LEX_STRING; #define SOCKET_ECONNRESET WSAECONNRESET #define SOCKET_ENFILE ENFILE #define SOCKET_EMFILE EMFILE +#define SOCKET_CLOSED EIO #else /* Unix */ #define socket_errno errno #define closesocket(A) close(A) @@ -983,6 +984,7 @@ typedef struct st_mysql_lex_string LEX_STRING; #define SOCKET_EADDRINUSE EADDRINUSE #define SOCKET_ETIMEDOUT ETIMEDOUT #define SOCKET_ECONNRESET ECONNRESET +#define SOCKET_CLOSED EIO #define SOCKET_ENFILE ENFILE #define SOCKET_EMFILE EMFILE #endif diff --git a/include/mysql_com.h b/include/mysql_com.h index 9775436fd37..06f7fb0ec7d 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -457,6 +457,7 @@ typedef struct st_net { my_bool thread_specific_malloc; unsigned char compress; my_bool pkt_nr_can_be_reset; + my_bool using_proxy_protocol; /* Pointer to query object in query cache, do not equal NULL (0) for queries in cache that have not stored its results yet diff --git a/include/violite.h b/include/violite.h index b823e62b2e1..f1e5c95a648 100644 --- a/include/violite.h +++ b/include/violite.h @@ -41,6 +41,13 @@ enum enum_vio_type VIO_TYPE_SSL /* see also vio_type_names[] */ }; + +enum enum_vio_state +{ + VIO_STATE_NOT_INITIALIZED, VIO_STATE_ACTIVE, VIO_STATE_SHUTDOWN, + VIO_STATE_CLOSED +}; + #define FIRST_VIO_TYPE VIO_CLOSED #define LAST_VIO_TYPE VIO_TYPE_SSL @@ -244,6 +251,7 @@ struct st_vio struct sockaddr_storage local; /* Local internet address */ struct sockaddr_storage remote; /* Remote internet address */ enum enum_vio_type type; /* Type of connection */ + enum enum_vio_state state; /* State of the connection */ const char *desc; /* String description */ char *read_buffer; /* buffer for vio_read_buff */ char *read_pos; /* start of unfetched data in the diff --git a/libmysqld/embedded_priv.h b/libmysqld/embedded_priv.h index 2262706217e..345c3ebd2d5 100644 --- a/libmysqld/embedded_priv.h +++ b/libmysqld/embedded_priv.h @@ -23,6 +23,8 @@ void init_embedded_mysql(MYSQL *mysql, ulong client_flag); void *create_embedded_thd(ulong client_flag); int check_embedded_connection(MYSQL *mysql, const char *db); void free_old_query(MYSQL *mysql); +THD *embedded_get_current_thd(); +void embedded_set_current_thd(THD *thd); extern MYSQL_METHODS embedded_methods; /* This one is used by embedded library to gather returning data */ diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 0be844fbd20..cf958c4e9a0 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -111,7 +111,7 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, MYSQL_STMT *stmt) { my_bool result= 1; - THD *thd=(THD *) mysql->thd; + THD *thd=(THD *) mysql->thd, *old_current_thd= current_thd; NET *net= &mysql->net; my_bool stmt_skip= stmt ? stmt->state != MYSQL_STMT_INIT_DONE : FALSE; @@ -122,6 +122,8 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, else { free_embedded_thd(mysql); + if (old_current_thd == thd) + old_current_thd= 0; thd= 0; } } @@ -179,6 +181,8 @@ emb_advanced_command(MYSQL *mysql, enum enum_server_command command, end: thd->reset_globals(); + if (old_current_thd) + old_current_thd->store_globals(); return result; } @@ -265,6 +269,7 @@ static my_bool emb_read_prepare_result(MYSQL *mysql, MYSQL_STMT *stmt) mysql->server_status|= SERVER_STATUS_IN_TRANS; stmt->fields= mysql->fields; + free_root(&stmt->mem_root, MYF(0)); stmt->mem_root= res->alloc; mysql->fields= NULL; my_free(res); @@ -374,6 +379,7 @@ int emb_read_binary_rows(MYSQL_STMT *stmt) set_stmt_errmsg(stmt, &stmt->mysql->net); return 1; } + free_root(&stmt->result.alloc, MYF(0)); stmt->result= *data; my_free(data); set_stmt_errmsg(stmt, &stmt->mysql->net); @@ -432,12 +438,15 @@ int emb_unbuffered_fetch(MYSQL *mysql, char **row) static void free_embedded_thd(MYSQL *mysql) { - THD *thd= (THD*)mysql->thd; + THD *thd= (THD*)mysql->thd, *org_current_thd= current_thd; server_threads.erase(thd); thd->clear_data_list(); thd->store_globals(); delete thd; - set_current_thd(nullptr); + if (thd == org_current_thd) + set_current_thd(nullptr); + else + set_current_thd(org_current_thd); mysql->thd=0; } @@ -727,6 +736,17 @@ void *create_embedded_thd(ulong client_flag) } +THD *embedded_get_current_thd() +{ + return current_thd; +} + +void embedded_set_current_thd(THD *thd) +{ + set_current_thd(thd); +} + + #ifdef NO_EMBEDDED_ACCESS_CHECKS static void emb_transfer_connect_attrs(MYSQL *mysql) diff --git a/libmysqld/libmysqld.c b/libmysqld/libmysqld.c index 14cca6e073f..444c1cfbca3 100644 --- a/libmysqld/libmysqld.c +++ b/libmysqld/libmysqld.c @@ -78,7 +78,7 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, uint port, const char *unix_socket,ulong client_flag) { char name_buff[USERNAME_LENGTH]; - + THD *org_current_thd= embedded_get_current_thd(); DBUG_ENTER("mysql_real_connect"); DBUG_PRINT("enter",("host: %s db: %s user: %s (libmysqld)", host ? host : "(Null)", @@ -200,6 +200,7 @@ mysql_real_connect(MYSQL *mysql,const char *host, const char *user, } } } + embedded_set_current_thd(org_current_thd); DBUG_PRINT("exit",("Mysql handler: %p", mysql)); DBUG_RETURN(mysql); @@ -216,6 +217,7 @@ error: mysql_close(mysql); mysql->free_me=free_me; } + embedded_set_current_thd(org_current_thd); DBUG_RETURN(0); } diff --git a/mysql-test/include/aria_log_control_load.inc b/mysql-test/include/aria_log_control_load.inc new file mode 100644 index 00000000000..34db3aeb4f5 --- /dev/null +++ b/mysql-test/include/aria_log_control_load.inc @@ -0,0 +1,11 @@ +# +# This file loads aria_log_control file into a user variable @aria_log_control. +# Set $ARIA_DATADIR before including this file +# + +--disable_query_log +--copy_file $ARIA_DATADIR/aria_log_control $MYSQLTEST_VARDIR/aria_log_control_tmp +--chmod 0777 $MYSQLTEST_VARDIR/aria_log_control_tmp +--eval SET @aria_log_control=(SELECT LOAD_FILE('$MYSQLTEST_VARDIR/aria_log_control_tmp')) +--remove_file $MYSQLTEST_VARDIR/aria_log_control_tmp +--enable_query_log diff --git a/mysql-test/include/check-testcase.test b/mysql-test/include/check-testcase.test index 1038ff30c11..8623964df25 100644 --- a/mysql-test/include/check-testcase.test +++ b/mysql-test/include/check-testcase.test @@ -32,7 +32,6 @@ if ($tmp) --echo Relay_Master_Log_File # --echo Slave_IO_Running No --echo Slave_SQL_Running No - --echo Replicate_Rewrite_DB # --echo Replicate_Do_DB # --echo Replicate_Ignore_DB # --echo Replicate_Do_Table # @@ -74,13 +73,22 @@ if ($tmp) --echo Slave_DDL_Groups # --echo Slave_Non_Transactional_Groups # --echo Slave_Transactional_Groups # + --echo Replicate_Rewrite_DB # } if (!$tmp) { # Note: after WL#5177, fields 13-18 shall not be filtered-out. - --replace_column 4 # 5 # 6 # 7 # 8 # 9 # 10 # 13 # 14 # 15 # 16 # 17 # 18 # 19 # 23 # 24 # 25 # 26 # 27 # 41 # 42 # 43 # 45 # 52 # 53 # 54 # + --replace_column 4 # 5 # 6 # 7 # 8 # 9 # 10 # 13 # 14 # 15 # 16 # 17 # 18 # 22 # 23 # 24 # 25 # 26 # 40 # 41 # 42 # 44 # 51 # 52 # 53 # 54 # query_vertical SHOW SLAVE STATUS; } +# +# Note, we must never, _ever_, add extra rows to this output of SHOW SLAVE +# STATUS, except at the very end, as this breaks backwards compatibility +# with applications or scripts that parse the output. This also means that +# we cannot add _any_ new rows in a GA version if a different row was +# already added in a later MariaDB version, as this would make it impossible +# to merge the change up while preserving the order of rows. +# # # Ensure that we don't get warnings from mysql.proc (used by check_mysqld) diff --git a/mysql-test/include/deadlock.inc b/mysql-test/include/deadlock.inc index abf217aea75..362d456e3f2 100644 --- a/mysql-test/include/deadlock.inc +++ b/mysql-test/include/deadlock.inc @@ -103,7 +103,6 @@ connection con2; # The following query should hang because con1 is locking the record update t2 set a=2 where b = 0; -select * from t2; --send update t1 set x=2 where id = 0; --sleep 2 diff --git a/mysql-test/include/innodb_rollback_on_timeout.inc b/mysql-test/include/innodb_rollback_on_timeout.inc index 274bbe12566..883b0820589 100644 --- a/mysql-test/include/innodb_rollback_on_timeout.inc +++ b/mysql-test/include/innodb_rollback_on_timeout.inc @@ -22,7 +22,6 @@ select * from t1; connection con1; begin work; insert into t1 values (5); -select * from t1; # Lock wait timeout set to 2 seconds in -master.opt; this # statement will time out; in 5.0.13+, it will not roll back transaction. --error ER_LOCK_WAIT_TIMEOUT diff --git a/mysql-test/include/search_pattern_in_file.inc b/mysql-test/include/search_pattern_in_file.inc index 3105f7f9077..1c23b356c4d 100644 --- a/mysql-test/include/search_pattern_in_file.inc +++ b/mysql-test/include/search_pattern_in_file.inc @@ -9,9 +9,6 @@ # # The environment variables SEARCH_FILE and SEARCH_PATTERN must be set # before sourcing this routine. -# SEARCH_TYPE can also be set to either NULL(default) or _gm_ -# NULL is equivalent of using m/SEARCH_PATTERN/gs -# _gm_ is equivalent of using m/SEARCH_RANGE/gm # # Optionally, SEARCH_RANGE can be set to the max number of bytes of the file # to search. If negative, it will search that many bytes at the end of the @@ -25,6 +22,7 @@ # Supported formats: # - (default) : "FOUND n /pattern/ in FILE " or "NOT FOUND ..." # - "matches" : Each match is printed, on a separate line +# - "count" : "FOUND n matches in FILE" or "NOT FOUND ..." (omit pattern) # # In case of # - SEARCH_FILE and/or SEARCH_PATTERN is not set @@ -51,15 +49,12 @@ # Created: 2011-11-11 mleich # ---error 0,1 perl; use strict; die "SEARCH_FILE not set" unless $ENV{SEARCH_FILE}; my @search_files= glob($ENV{SEARCH_FILE}); my $search_pattern= $ENV{SEARCH_PATTERN} or die "SEARCH_PATTERN not set"; my $search_range= $ENV{SEARCH_RANGE}; - my $silent= $ENV{SEARCH_SILENT}; - my $search_result= 0; my $content; foreach my $search_file (@search_files) { open(FILE, '<', $search_file) || die("Can't open file $search_file: $!"); @@ -83,48 +78,23 @@ perl; close(FILE); $content.= $file_content; } - my @matches; - if (not defined($ENV{SEARCH_TYPE})) - { - @matches=($content =~ /$search_pattern/gs); - } - elsif($ENV{SEARCH_TYPE} == "_gm_") - { - @matches=($content =~ /$search_pattern/gm); - } - my $res; - if (@matches) - { - $res="FOUND " . scalar(@matches); - $search_result= 1; - } - else - { - $res= "NOT FOUND"; - } + my @matches= ($content =~ /$search_pattern/gs); + my $res=@matches ? "FOUND " . scalar(@matches) : "NOT FOUND"; + $ENV{SEARCH_FILE} =~ s{^.*?([^/\\]+)$}{$1}; - if (!$silent || $search_result) - { - if ($ENV{SEARCH_OUTPUT} eq "matches") - { - foreach (@matches) - { - print $_ . "\n"; - } - } - else - { - print "$res /$search_pattern/ in $ENV{SEARCH_FILE}\n"; + if ($ENV{SEARCH_OUTPUT} eq "matches") { + foreach (@matches) { + print $_ . "\n"; } } - die "$ENV{SEARCH_ABORT}\n" - if $ENV{SEARCH_ABORT} && $res =~ /^$ENV{SEARCH_ABORT}/; - exit($search_result != 1); + elsif ($ENV{SEARCH_OUTPUT} eq "count") + { + print "$res matches in $ENV{SEARCH_FILE}\n"; + } + elsif ($ENV{SEARCH_ABORT} and $res =~ /^$ENV{SEARCH_ABORT}/) { + die "$res /$search_pattern/ in $ENV{SEARCH_FILE}\n"; + } else { + print "$res /$search_pattern/ in $ENV{SEARCH_FILE}\n"; + } EOF - -let $SEARCH_RESULT= 1; # Found pattern -if ($errno) -{ - let $SEARCH_RESULT= 0; # Did not find pattern -} diff --git a/mysql-test/include/wait_for_pattern_in_file.inc b/mysql-test/include/wait_for_pattern_in_file.inc index 52226acd2da..a551761012f 100644 --- a/mysql-test/include/wait_for_pattern_in_file.inc +++ b/mysql-test/include/wait_for_pattern_in_file.inc @@ -25,23 +25,23 @@ if (!$_timeout) } let $_timeout_counter=`SELECT $_timeout * 10`; -let SEARCH_SILENT=1; - +let SEARCH_ABORT=NOT FOUND; let $_continue= 1; +disable_abort_on_error; while ($_continue) { source include/search_pattern_in_file.inc; - if ($SEARCH_RESULT) + if (!$errno) { # Found match let $_continue= 0; } - if (!$SEARCH_RESULT) + if ($errno) { dec $_timeout_counter; if ($_timeout_counter == 1) { - let $SEARCH_SILENT= 0; + enable_abort_on_error; } if (!$_timeout_counter) { @@ -49,8 +49,7 @@ while ($_continue) } } } - -let SEARCH_SILENT=0; +enable_abort_on_error; --source include/end_include_file.inc --let $keep_include_silent=$wait_save_keep_include_silent diff --git a/mysql-test/include/wait_until_connected_again.inc b/mysql-test/include/wait_until_connected_again.inc index deb6ca13e8b..2b20c780b69 100644 --- a/mysql-test/include/wait_until_connected_again.inc +++ b/mysql-test/include/wait_until_connected_again.inc @@ -11,7 +11,7 @@ let $counter= 5000; let $mysql_errno= 9999; while ($mysql_errno) { - --error 0,ER_ACCESS_DENIED_ERROR,ER_SERVER_SHUTDOWN,ER_CONNECTION_KILLED,ER_LOCK_WAIT_TIMEOUT,2002,2006,2013,HA_ERR_NO_ENCRYPTION + --error 0,ER_ACCESS_DENIED_ERROR,ER_SERVER_SHUTDOWN,ER_CONNECTION_KILLED,ER_LOCK_WAIT_TIMEOUT,2002,2006,2013,HA_ERR_NO_ENCRYPTION,2026 select 1; dec $counter; diff --git a/mysql-test/main/alter_table.result b/mysql-test/main/alter_table.result index 3353fc9b139..ec1ab658cf4 100644 --- a/mysql-test/main/alter_table.result +++ b/mysql-test/main/alter_table.result @@ -3102,6 +3102,14 @@ CREATE TEMPORARY TABLE t2 LIKE t1; DROP TEMPORARY TABLE t2; DROP TABLE t1; # +# MDEV-33313 Incorrect error message for "ALTER TABLE ... DROP CONSTRAINT ..., DROP col, DROP col" +# +create table t2(id int primary key) engine=innodb; +create table t1(id int primary key, t2_id int, constraint t1_fk_t2_id foreign key(t2_id) references t2(id)) engine=innodb; +alter table t1 drop constraint t1_fk_t2_id, drop t2_id, drop t2_id; +ERROR 42000: Can't DROP COLUMN `t2_id`; check that it exists +drop table t1, t2; +# # End of 10.6 tests # # diff --git a/mysql-test/main/alter_table.test b/mysql-test/main/alter_table.test index 306707c9d28..81de8d9bd1c 100644 --- a/mysql-test/main/alter_table.test +++ b/mysql-test/main/alter_table.test @@ -2392,6 +2392,15 @@ CREATE TEMPORARY TABLE t2 LIKE t1; DROP TEMPORARY TABLE t2; DROP TABLE t1; +--echo # +--echo # MDEV-33313 Incorrect error message for "ALTER TABLE ... DROP CONSTRAINT ..., DROP col, DROP col" +--echo # +create table t2(id int primary key) engine=innodb; +create table t1(id int primary key, t2_id int, constraint t1_fk_t2_id foreign key(t2_id) references t2(id)) engine=innodb; +--error ER_CANT_DROP_FIELD_OR_KEY +alter table t1 drop constraint t1_fk_t2_id, drop t2_id, drop t2_id; +drop table t1, t2; + --echo # --echo # End of 10.6 tests --echo # diff --git a/mysql-test/main/bootstrap.test b/mysql-test/main/bootstrap.test index 76d5ed008a6..68fbc00181d 100644 --- a/mysql-test/main/bootstrap.test +++ b/mysql-test/main/bootstrap.test @@ -74,7 +74,7 @@ SELECT 'bug' as '' FROM INFORMATION_SCHEMA.ENGINES WHERE engine='innodb' --echo # MDEV-13063 Server crashes in intern_plugin_lock or assertion `plugin_ptr->ref_count == 1' fails in plugin_init --echo # --error 1 ---exec $MYSQLD_BOOTSTRAP_CMD --myisam_recover_options=NONE +--exec $MYSQLD_BOOTSTRAP_CMD --myisam_recover_options=NONE 2>/dev/null --echo # --echo # MDEV-19349 mysql_install_db: segfault at tmp_file_prefix check diff --git a/mysql-test/main/ctype_collate.result b/mysql-test/main/ctype_collate.result index 8624cff4eef..9f2eca8e726 100644 --- a/mysql-test/main/ctype_collate.result +++ b/mysql-test/main/ctype_collate.result @@ -780,3 +780,27 @@ string # # End of 10.2 tests # +# +# MDEV-33318 ORDER BY COLLATE improperly applied to non-character columns +# +set names utf8; +create table t1 (ts datetime); +insert t1 values ('2024-01-26 21:37:54'), ('2024-01-26 21:37:54'), +('2024-01-26 21:37:54'), ('2024-01-26 21:37:54'), +('2024-01-26 21:37:58'), ('2024-01-26 21:37:58'), +('2024-01-26 21:37:58'), ('2024-01-26 21:38:02'), +('2024-01-26 21:38:02'), ('2024-01-26 21:38:02'); +select * from t1 order by ts collate utf8_bin; +ts +2024-01-26 21:37:54 +2024-01-26 21:37:54 +2024-01-26 21:37:54 +2024-01-26 21:37:54 +2024-01-26 21:37:58 +2024-01-26 21:37:58 +2024-01-26 21:37:58 +2024-01-26 21:38:02 +2024-01-26 21:38:02 +2024-01-26 21:38:02 +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/main/ctype_collate.test b/mysql-test/main/ctype_collate.test index 2366b130e7c..96ad216dd20 100644 --- a/mysql-test/main/ctype_collate.test +++ b/mysql-test/main/ctype_collate.test @@ -357,3 +357,18 @@ SELECT COLUMN_GET(COLUMN_CREATE(0, 'string'),0 AS CHAR CHARACTER SET latin1 COLL --echo # --echo # End of 10.2 tests --echo # + +--echo # +--echo # MDEV-33318 ORDER BY COLLATE improperly applied to non-character columns +--echo # +set names utf8; +create table t1 (ts datetime); +insert t1 values ('2024-01-26 21:37:54'), ('2024-01-26 21:37:54'), + ('2024-01-26 21:37:54'), ('2024-01-26 21:37:54'), + ('2024-01-26 21:37:58'), ('2024-01-26 21:37:58'), + ('2024-01-26 21:37:58'), ('2024-01-26 21:38:02'), + ('2024-01-26 21:38:02'), ('2024-01-26 21:38:02'); +select * from t1 order by ts collate utf8_bin; +drop table t1; + +--echo # End of 10.6 tests diff --git a/mysql-test/main/ctype_ucs.result b/mysql-test/main/ctype_ucs.result index f4ca4089e55..0cfc98a1b37 100644 --- a/mysql-test/main/ctype_ucs.result +++ b/mysql-test/main/ctype_ucs.result @@ -6517,5 +6517,25 @@ SELECT 1 COLLATE latin1_swedish_ci; ERROR 42000: COLLATION 'latin1_swedish_ci' is not valid for CHARACTER SET 'ucs2' SET NAMES utf8; # +# MDEV-33772 Bad SEPARATOR value in GROUP_CONCAT on character set conversion +# +SET NAMES utf8mb3, @@collation_connection=ucs2_general_ci; +CREATE TABLE t1 (c VARCHAR(10)) CHARACTER SET ucs2; +INSERT INTO t1 VALUES ('a'),('A'); +CREATE OR REPLACE VIEW v1 AS +SELECT COUNT(*) AS cnt, GROUP_CONCAT(c) AS c1 FROM t1 GROUP BY c; +SELECT * FROM v1; +cnt c1 +2 a,A +SELECT HEX(c1) FROM v1; +HEX(c1) +0061002C0041 +SHOW CREATE VIEW v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select count(0) AS `cnt`,group_concat(`t1`.`c` separator ',') AS `c1` from `t1` group by `t1`.`c` utf8mb3 ucs2_general_ci +DROP VIEW v1; +DROP TABLE t1; +SET NAMES utf8mb3; +# # End of 10.5 tests # diff --git a/mysql-test/main/ctype_ucs.test b/mysql-test/main/ctype_ucs.test index 993f9be5c54..93ddcb86964 100644 --- a/mysql-test/main/ctype_ucs.test +++ b/mysql-test/main/ctype_ucs.test @@ -1193,6 +1193,23 @@ SELECT HEX(1 COLLATE ucs2_bin); SELECT 1 COLLATE latin1_swedish_ci; SET NAMES utf8; +--echo # +--echo # MDEV-33772 Bad SEPARATOR value in GROUP_CONCAT on character set conversion +--echo # + +SET NAMES utf8mb3, @@collation_connection=ucs2_general_ci; +CREATE TABLE t1 (c VARCHAR(10)) CHARACTER SET ucs2; +INSERT INTO t1 VALUES ('a'),('A'); +CREATE OR REPLACE VIEW v1 AS + SELECT COUNT(*) AS cnt, GROUP_CONCAT(c) AS c1 FROM t1 GROUP BY c; +SELECT * FROM v1; +SELECT HEX(c1) FROM v1; +SHOW CREATE VIEW v1; +DROP VIEW v1; +DROP TABLE t1; +SET NAMES utf8mb3; + + --echo # --echo # End of 10.5 tests --echo # diff --git a/mysql-test/main/deadlock_innodb.result b/mysql-test/main/deadlock_innodb.result index 09958bf0413..9236f21c22b 100644 --- a/mysql-test/main/deadlock_innodb.result +++ b/mysql-test/main/deadlock_innodb.result @@ -89,11 +89,6 @@ id x 300 300 connection con2; update t2 set a=2 where b = 0; -select * from t2; -b a -0 2 -1 20 -2 30 update t1 set x=2 where id = 0; connection con1; update t1 set x=1 where id = 0; diff --git a/mysql-test/main/derived_view.result b/mysql-test/main/derived_view.result index 3cd38a62d92..5ceb04cb75d 100644 --- a/mysql-test/main/derived_view.result +++ b/mysql-test/main/derived_view.result @@ -4390,6 +4390,38 @@ dim1 dim2 dim3 p SUM(p) DROP VIEW v; # End of 10.4 tests # +# MDEV-31277: 2-nd execution of PS to select from materialized view +# specified as left join whose inner table is mergeable +# derived containing a constant column +# +create table t1 ( +Election int(10) unsigned NOT NULL +) engine=MyISAM; +insert into t1 (Election) values (1), (4); +create table t2 ( +VoteID int(10), +ElectionID int(10), +UserID int(10) +); +insert into t2 (ElectionID, UserID) values (2, 30), (3, 30); +create view v1 as select * from t1 +left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T +on T.ElectionID = t1.Election +limit 9; +prepare stmt1 from "select * from v1"; +execute stmt1; +Election Voted ElectionID +1 NULL NULL +4 NULL NULL +execute stmt1; +Election Voted ElectionID +1 NULL NULL +4 NULL NULL +deallocate prepare stmt1; +drop view v1; +drop table t1, t2; +# End of 10.5 tests +# # MDEV-31143: view with ORDER BY used in query with rownum() in WHERE # create table t1 (id int primary key); diff --git a/mysql-test/main/derived_view.test b/mysql-test/main/derived_view.test index b7c9c91480a..1cfc0cbfa30 100644 --- a/mysql-test/main/derived_view.test +++ b/mysql-test/main/derived_view.test @@ -2841,6 +2841,42 @@ DROP VIEW v; --echo # End of 10.4 tests +--echo # +--echo # MDEV-31277: 2-nd execution of PS to select from materialized view +--echo # specified as left join whose inner table is mergeable +--echo # derived containing a constant column +--echo # + +create table t1 ( + Election int(10) unsigned NOT NULL +) engine=MyISAM; + +insert into t1 (Election) values (1), (4); + +create table t2 ( + VoteID int(10), + ElectionID int(10), + UserID int(10) +); + +insert into t2 (ElectionID, UserID) values (2, 30), (3, 30); +create view v1 as select * from t1 + left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T + on T.ElectionID = t1.Election +limit 9; + +prepare stmt1 from "select * from v1"; + +execute stmt1; +execute stmt1; + +deallocate prepare stmt1; + +drop view v1; +drop table t1, t2; + +--echo # End of 10.5 tests + --echo # --echo # MDEV-31143: view with ORDER BY used in query with rownum() in WHERE --echo # diff --git a/mysql-test/main/empty_string_literal.result b/mysql-test/main/empty_string_literal.result index 732e8e6d557..71b0869cb2a 100644 --- a/mysql-test/main/empty_string_literal.result +++ b/mysql-test/main/empty_string_literal.result @@ -64,7 +64,7 @@ SET sql_mode=@mode; # Test litteral concat # SELECT 'a' 'b'; -a +ab ab SELECT 'a' ''; a @@ -76,13 +76,13 @@ SELECT '' ''; NULL NULL SELECT '' 'b' 'c'; -b +bc bc SELECT '' '' 'c'; c c SELECT 'a' '' 'c'; -a +ac ac SELECT 'a' '' ''; a @@ -208,3 +208,22 @@ t1 CREATE TABLE `t1` ( KEY `a` (`a`,`b`) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci drop table t1; +# +# MDEV-33460 select '123' 'x'; unexpected result +# +SELECT ''; +NULL +NULL +SELECT '' 'b' 'c'; +bc +bc +SELECT '' '' 'c'; +c +c +SELECT 'a' '' 'c'; +ac +ac +SELECT 'a' '' ''; +a +a +# End of 10.5 test diff --git a/mysql-test/main/empty_string_literal.test b/mysql-test/main/empty_string_literal.test index 9174a7714a2..3320841fb42 100644 --- a/mysql-test/main/empty_string_literal.test +++ b/mysql-test/main/empty_string_literal.test @@ -25,3 +25,15 @@ flush tables; update t1 set a = 2; show create table t1; drop table t1; + +--echo # +--echo # MDEV-33460 select '123' 'x'; unexpected result +--echo # + +SELECT ''; +SELECT '' 'b' 'c'; +SELECT '' '' 'c'; +SELECT 'a' '' 'c'; +SELECT 'a' '' ''; + +--echo # End of 10.5 test diff --git a/mysql-test/main/func_extract.result b/mysql-test/main/func_extract.result index bebb8c717f6..dc71f6ae27a 100644 --- a/mysql-test/main/func_extract.result +++ b/mysql-test/main/func_extract.result @@ -590,3 +590,885 @@ Warning 1292 Truncated incorrect time value: '01:02:03/' Warning 1292 Truncated incorrect time value: '01:02:03/' Warning 1292 Truncated incorrect INTERVAL DAY TO SECOND value: '01:02:03/' DROP TABLE t1; +# +# Start of 10.5 tests +# +# +# MDEV-33496 Out of range error in AVG(YEAR(datetime)) due to a wrong data type +# +CREATE FUNCTION select01() RETURNS TEXT RETURN 'SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?'; +CREATE FUNCTION select02() RETURNS TEXT RETURN 'SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)'; +CREATE TABLE t1 (a DATETIME(6)); +INSERT INTO t1 VALUES ('2001-12-31 10:20:30.999999'); +CREATE FUNCTION params(expr TEXT, count INT) RETURNS TEXT +BEGIN +RETURN CONCAT(expr, REPEAT(CONCAT(', ', expr), count-1)); +END; +$$ +CREATE PROCEDURE show_drop() +BEGIN +SELECT TABLE_NAME, COLUMN_TYPE, COLUMN_NAME +FROM INFORMATION_SCHEMA.COLUMNS +WHERE TABLE_SCHEMA='test' + AND TABLE_NAME IN ('t1e_nm','t2e_nm','t1f_nm','t2f_nm', +'t1e_ps','t1f_ps','t2e_ps','t2f_ps') +ORDER BY LEFT(TABLE_NAME, 2), ORDINAL_POSITION, TABLE_NAME; +FOR rec IN (SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES +WHERE TABLE_SCHEMA='test' + AND TABLE_NAME IN ('t1e_nm','t2e_nm','t1f_nm','t2f_nm', +'t1e_ps','t1f_ps','t2e_ps','t2f_ps')) +DO +EXECUTE IMMEDIATE CONCAT('DROP TABLE ', rec.TABLE_NAME); +END FOR; +END; +$$ +CREATE PROCEDURE p1(unit VARCHAR(32)) +BEGIN +DECLARE do_extract BOOL DEFAULT unit NOT IN('DAYOFYEAR'); +DECLARE query01 TEXT DEFAULT +CONCAT('CREATE TABLE t2 AS ', select01(), ' FROM t1'); +DECLARE query02 TEXT DEFAULT +CONCAT('CREATE TABLE t2 AS ', select02(), ' FROM t1'); +IF (do_extract) +THEN +EXECUTE IMMEDIATE REPLACE(REPLACE(query01,'t2','t1e_nm'),'?', CONCAT('EXTRACT(',unit,' FROM a)')); +EXECUTE IMMEDIATE REPLACE(REPLACE(query02,'t2','t2e_nm'),'?', CONCAT('EXTRACT(',unit,' FROM a)')); +END IF; +EXECUTE IMMEDIATE REPLACE(REPLACE(query01,'t2','t1f_nm'),'?', CONCAT(unit,'(a)')); +EXECUTE IMMEDIATE REPLACE(REPLACE(query02,'t2','t2f_nm'),'?', CONCAT(unit,'(a)')); +END; +$$ + + +# EXTRACT(YEAR FROM expr) and YEAR(expr) are equivalent +CALL p1('YEAR'); +EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(YEAR FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), YEAR(TIMESTAMP'2001-12-13 10:20:30.999999'); +CALL show_drop; +TABLE_NAME COLUMN_TYPE COLUMN_NAME +t1e_nm int(5) EXTRACT(YEAR FROM a) +t1e_ps int(5) ? +t1f_nm int(5) YEAR(a) +t1f_ps int(5) ? +t1e_nm int(4) unsigned CAST(EXTRACT(YEAR FROM a) AS UNSIGNED) +t1e_ps int(4) unsigned CAST(? AS UNSIGNED) +t1f_nm int(4) unsigned CAST(YEAR(a) AS UNSIGNED) +t1f_ps int(4) unsigned CAST(? AS UNSIGNED) +t1e_nm int(5) CAST(EXTRACT(YEAR FROM a) AS SIGNED) +t1e_ps int(5) CAST(? AS SIGNED) +t1f_nm int(5) CAST(YEAR(a) AS SIGNED) +t1f_ps int(5) CAST(? AS SIGNED) +t1e_nm int(5) ABS(EXTRACT(YEAR FROM a)) +t1e_ps int(5) ABS(?) +t1f_nm int(5) ABS(YEAR(a)) +t1f_ps int(5) ABS(?) +t1e_nm int(5) ROUND(EXTRACT(YEAR FROM a)) +t1e_ps int(5) ROUND(?) +t1f_nm int(5) ROUND(YEAR(a)) +t1f_ps int(5) ROUND(?) +t1e_nm int(5) -EXTRACT(YEAR FROM a) +t1e_ps int(5) -? +t1f_nm int(5) -YEAR(a) +t1f_ps int(5) -? +t1e_nm int(6) ROUND(EXTRACT(YEAR FROM a),-1) +t1e_ps int(6) ROUND(?,-1) +t1f_nm int(6) ROUND(YEAR(a),-1) +t1f_ps int(6) ROUND(?,-1) +t1e_nm int(6) EXTRACT(YEAR FROM a)+0 +t1e_ps int(6) ?+0 +t1f_nm int(6) YEAR(a)+0 +t1f_ps int(6) ?+0 +t1e_nm decimal(6,1) EXTRACT(YEAR FROM a)+0.0 +t1e_ps decimal(6,1) ?+0.0 +t1f_nm decimal(6,1) YEAR(a)+0.0 +t1f_ps decimal(6,1) ?+0.0 +t1e_nm varchar(4) CONCAT(EXTRACT(YEAR FROM a)) +t1e_ps varchar(4) CONCAT(?) +t1f_nm varchar(4) CONCAT(YEAR(a)) +t1f_ps varchar(4) CONCAT(?) +t1e_nm int(5) LEAST(EXTRACT(YEAR FROM a),EXTRACT(YEAR FROM a)) +t1e_ps int(5) LEAST(?,?) +t1f_nm int(5) LEAST(YEAR(a),YEAR(a)) +t1f_ps int(5) LEAST(?,?) +t1e_nm int(5) COALESCE(EXTRACT(YEAR FROM a)) +t1e_ps int(5) COALESCE(?) +t1f_nm int(5) COALESCE(YEAR(a)) +t1f_ps int(5) COALESCE(?) +t1e_nm int(5) COALESCE(EXTRACT(YEAR FROM a),CAST(1 AS SIGNED)) +t1e_ps int(5) COALESCE(?,CAST(1 AS SIGNED)) +t1f_nm int(5) COALESCE(YEAR(a),CAST(1 AS SIGNED)) +t1f_ps int(5) COALESCE(?,CAST(1 AS SIGNED)) +t1e_nm decimal(4,0) COALESCE(EXTRACT(YEAR FROM a),CAST(1 AS UNSIGNED)) +t1e_ps decimal(4,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1f_nm decimal(4,0) COALESCE(YEAR(a),CAST(1 AS UNSIGNED)) +t1f_ps decimal(4,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1e_nm int(5) @a:=EXTRACT(YEAR FROM a) +t1e_ps int(5) @a:=? +t1f_nm int(5) @a:=YEAR(a) +t1f_ps int(5) @a:=? +t2e_nm decimal(8,4) AVG(EXTRACT(YEAR FROM a)) +t2e_ps decimal(8,4) AVG(?) +t2f_nm decimal(8,4) AVG(YEAR(a)) +t2f_ps decimal(8,4) AVG(?) +t2e_nm bigint(5) MIN(EXTRACT(YEAR FROM a)) +t2e_ps bigint(5) MIN(?) +t2f_nm bigint(5) MIN(YEAR(a)) +t2f_ps bigint(5) MIN(?) +t2e_nm bigint(5) MAX(EXTRACT(YEAR FROM a)) +t2e_ps bigint(5) MAX(?) +t2f_nm bigint(5) MAX(YEAR(a)) +t2f_ps bigint(5) MAX(?) +t2e_nm mediumtext GROUP_CONCAT(EXTRACT(YEAR FROM a)) +t2e_ps mediumtext GROUP_CONCAT(?) +t2f_nm mediumtext GROUP_CONCAT(YEAR(a)) +t2f_ps mediumtext GROUP_CONCAT(?) + + +# EXTRACT(QUARTER FROM expr) and QUARTER(expr) are equavalent +CALL p1('QUARTER'); +EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(QUARTER FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'), QUARTER(TIMESTAMP'2001-12-13 10:20:30.999999'); +CALL show_drop; +TABLE_NAME COLUMN_TYPE COLUMN_NAME +t1e_nm int(2) EXTRACT(QUARTER FROM a) +t1e_ps int(2) ? +t1f_nm int(2) QUARTER(a) +t1f_ps int(2) ? +t1e_nm int(1) unsigned CAST(EXTRACT(QUARTER FROM a) AS UNSIGNED) +t1e_ps int(1) unsigned CAST(? AS UNSIGNED) +t1f_nm int(1) unsigned CAST(QUARTER(a) AS UNSIGNED) +t1f_ps int(1) unsigned CAST(? AS UNSIGNED) +t1e_nm int(2) CAST(EXTRACT(QUARTER FROM a) AS SIGNED) +t1e_ps int(2) CAST(? AS SIGNED) +t1f_nm int(2) CAST(QUARTER(a) AS SIGNED) +t1f_ps int(2) CAST(? AS SIGNED) +t1e_nm int(2) ABS(EXTRACT(QUARTER FROM a)) +t1e_ps int(2) ABS(?) +t1f_nm int(2) ABS(QUARTER(a)) +t1f_ps int(2) ABS(?) +t1e_nm int(2) ROUND(EXTRACT(QUARTER FROM a)) +t1e_ps int(2) ROUND(?) +t1f_nm int(2) ROUND(QUARTER(a)) +t1f_ps int(2) ROUND(?) +t1e_nm int(2) -EXTRACT(QUARTER FROM a) +t1e_ps int(2) -? +t1f_nm int(2) -QUARTER(a) +t1f_ps int(2) -? +t1e_nm int(3) ROUND(EXTRACT(QUARTER FROM a),-1) +t1e_ps int(3) ROUND(?,-1) +t1f_nm int(3) ROUND(QUARTER(a),-1) +t1f_ps int(3) ROUND(?,-1) +t1e_nm int(3) EXTRACT(QUARTER FROM a)+0 +t1e_ps int(3) ?+0 +t1f_nm int(3) QUARTER(a)+0 +t1f_ps int(3) ?+0 +t1e_nm decimal(3,1) EXTRACT(QUARTER FROM a)+0.0 +t1e_ps decimal(3,1) ?+0.0 +t1f_nm decimal(3,1) QUARTER(a)+0.0 +t1f_ps decimal(3,1) ?+0.0 +t1e_nm varchar(1) CONCAT(EXTRACT(QUARTER FROM a)) +t1e_ps varchar(1) CONCAT(?) +t1f_nm varchar(1) CONCAT(QUARTER(a)) +t1f_ps varchar(1) CONCAT(?) +t1e_nm int(2) LEAST(EXTRACT(QUARTER FROM a),EXTRACT(QUARTER FROM a)) +t1e_ps int(2) LEAST(?,?) +t1f_nm int(2) LEAST(QUARTER(a),QUARTER(a)) +t1f_ps int(2) LEAST(?,?) +t1e_nm int(2) COALESCE(EXTRACT(QUARTER FROM a)) +t1e_ps int(2) COALESCE(?) +t1f_nm int(2) COALESCE(QUARTER(a)) +t1f_ps int(2) COALESCE(?) +t1e_nm int(2) COALESCE(EXTRACT(QUARTER FROM a),CAST(1 AS SIGNED)) +t1e_ps int(2) COALESCE(?,CAST(1 AS SIGNED)) +t1f_nm int(2) COALESCE(QUARTER(a),CAST(1 AS SIGNED)) +t1f_ps int(2) COALESCE(?,CAST(1 AS SIGNED)) +t1e_nm decimal(1,0) COALESCE(EXTRACT(QUARTER FROM a),CAST(1 AS UNSIGNED)) +t1e_ps decimal(1,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1f_nm decimal(1,0) COALESCE(QUARTER(a),CAST(1 AS UNSIGNED)) +t1f_ps decimal(1,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1e_nm int(2) @a:=EXTRACT(QUARTER FROM a) +t1e_ps int(2) @a:=? +t1f_nm int(2) @a:=QUARTER(a) +t1f_ps int(2) @a:=? +t2e_nm decimal(5,4) AVG(EXTRACT(QUARTER FROM a)) +t2e_ps decimal(5,4) AVG(?) +t2f_nm decimal(5,4) AVG(QUARTER(a)) +t2f_ps decimal(5,4) AVG(?) +t2e_nm bigint(2) MIN(EXTRACT(QUARTER FROM a)) +t2e_ps bigint(2) MIN(?) +t2f_nm bigint(2) MIN(QUARTER(a)) +t2f_ps bigint(2) MIN(?) +t2e_nm bigint(2) MAX(EXTRACT(QUARTER FROM a)) +t2e_ps bigint(2) MAX(?) +t2f_nm bigint(2) MAX(QUARTER(a)) +t2f_ps bigint(2) MAX(?) +t2e_nm mediumtext GROUP_CONCAT(EXTRACT(QUARTER FROM a)) +t2e_ps mediumtext GROUP_CONCAT(?) +t2f_nm mediumtext GROUP_CONCAT(QUARTER(a)) +t2f_ps mediumtext GROUP_CONCAT(?) + + +# EXTRACT(MONTH FROM expr) and MONTH(expr) are equavalent +CALL p1('MONTH'); +EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MONTH FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'), MONTH(TIMESTAMP'2001-12-13 10:20:30.999999'); +CALL show_drop; +TABLE_NAME COLUMN_TYPE COLUMN_NAME +t1e_nm int(3) EXTRACT(MONTH FROM a) +t1e_ps int(3) ? +t1f_nm int(3) MONTH(a) +t1f_ps int(3) ? +t1e_nm int(2) unsigned CAST(EXTRACT(MONTH FROM a) AS UNSIGNED) +t1e_ps int(2) unsigned CAST(? AS UNSIGNED) +t1f_nm int(2) unsigned CAST(MONTH(a) AS UNSIGNED) +t1f_ps int(2) unsigned CAST(? AS UNSIGNED) +t1e_nm int(3) CAST(EXTRACT(MONTH FROM a) AS SIGNED) +t1e_ps int(3) CAST(? AS SIGNED) +t1f_nm int(3) CAST(MONTH(a) AS SIGNED) +t1f_ps int(3) CAST(? AS SIGNED) +t1e_nm int(3) ABS(EXTRACT(MONTH FROM a)) +t1e_ps int(3) ABS(?) +t1f_nm int(3) ABS(MONTH(a)) +t1f_ps int(3) ABS(?) +t1e_nm int(3) ROUND(EXTRACT(MONTH FROM a)) +t1e_ps int(3) ROUND(?) +t1f_nm int(3) ROUND(MONTH(a)) +t1f_ps int(3) ROUND(?) +t1e_nm int(3) -EXTRACT(MONTH FROM a) +t1e_ps int(3) -? +t1f_nm int(3) -MONTH(a) +t1f_ps int(3) -? +t1e_nm int(4) ROUND(EXTRACT(MONTH FROM a),-1) +t1e_ps int(4) ROUND(?,-1) +t1f_nm int(4) ROUND(MONTH(a),-1) +t1f_ps int(4) ROUND(?,-1) +t1e_nm int(4) EXTRACT(MONTH FROM a)+0 +t1e_ps int(4) ?+0 +t1f_nm int(4) MONTH(a)+0 +t1f_ps int(4) ?+0 +t1e_nm decimal(4,1) EXTRACT(MONTH FROM a)+0.0 +t1e_ps decimal(4,1) ?+0.0 +t1f_nm decimal(4,1) MONTH(a)+0.0 +t1f_ps decimal(4,1) ?+0.0 +t1e_nm varchar(2) CONCAT(EXTRACT(MONTH FROM a)) +t1e_ps varchar(2) CONCAT(?) +t1f_nm varchar(2) CONCAT(MONTH(a)) +t1f_ps varchar(2) CONCAT(?) +t1e_nm int(3) LEAST(EXTRACT(MONTH FROM a),EXTRACT(MONTH FROM a)) +t1e_ps int(3) LEAST(?,?) +t1f_nm int(3) LEAST(MONTH(a),MONTH(a)) +t1f_ps int(3) LEAST(?,?) +t1e_nm int(3) COALESCE(EXTRACT(MONTH FROM a)) +t1e_ps int(3) COALESCE(?) +t1f_nm int(3) COALESCE(MONTH(a)) +t1f_ps int(3) COALESCE(?) +t1e_nm int(3) COALESCE(EXTRACT(MONTH FROM a),CAST(1 AS SIGNED)) +t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1f_nm int(3) COALESCE(MONTH(a),CAST(1 AS SIGNED)) +t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1e_nm decimal(2,0) COALESCE(EXTRACT(MONTH FROM a),CAST(1 AS UNSIGNED)) +t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1f_nm decimal(2,0) COALESCE(MONTH(a),CAST(1 AS UNSIGNED)) +t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1e_nm int(3) @a:=EXTRACT(MONTH FROM a) +t1e_ps int(3) @a:=? +t1f_nm int(3) @a:=MONTH(a) +t1f_ps int(3) @a:=? +t2e_nm decimal(6,4) AVG(EXTRACT(MONTH FROM a)) +t2e_ps decimal(6,4) AVG(?) +t2f_nm decimal(6,4) AVG(MONTH(a)) +t2f_ps decimal(6,4) AVG(?) +t2e_nm bigint(3) MIN(EXTRACT(MONTH FROM a)) +t2e_ps bigint(3) MIN(?) +t2f_nm bigint(3) MIN(MONTH(a)) +t2f_ps bigint(3) MIN(?) +t2e_nm bigint(3) MAX(EXTRACT(MONTH FROM a)) +t2e_ps bigint(3) MAX(?) +t2f_nm bigint(3) MAX(MONTH(a)) +t2f_ps bigint(3) MAX(?) +t2e_nm mediumtext GROUP_CONCAT(EXTRACT(MONTH FROM a)) +t2e_ps mediumtext GROUP_CONCAT(?) +t2f_nm mediumtext GROUP_CONCAT(MONTH(a)) +t2f_ps mediumtext GROUP_CONCAT(?) + + +# EXTRACT(WEEK FROM expr) and WEEK(expr) are equavalent +CALL p1('WEEK'); +EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(WEEK FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'), WEEK(TIMESTAMP'2001-12-13 10:20:30.999999'); +CALL show_drop; +TABLE_NAME COLUMN_TYPE COLUMN_NAME +t1e_nm int(3) EXTRACT(WEEK FROM a) +t1e_ps int(3) ? +t1f_nm int(3) WEEK(a) +t1f_ps int(3) ? +t1e_nm int(2) unsigned CAST(EXTRACT(WEEK FROM a) AS UNSIGNED) +t1e_ps int(2) unsigned CAST(? AS UNSIGNED) +t1f_nm int(2) unsigned CAST(WEEK(a) AS UNSIGNED) +t1f_ps int(2) unsigned CAST(? AS UNSIGNED) +t1e_nm int(3) CAST(EXTRACT(WEEK FROM a) AS SIGNED) +t1e_ps int(3) CAST(? AS SIGNED) +t1f_nm int(3) CAST(WEEK(a) AS SIGNED) +t1f_ps int(3) CAST(? AS SIGNED) +t1e_nm int(3) ABS(EXTRACT(WEEK FROM a)) +t1e_ps int(3) ABS(?) +t1f_nm int(3) ABS(WEEK(a)) +t1f_ps int(3) ABS(?) +t1e_nm int(3) ROUND(EXTRACT(WEEK FROM a)) +t1e_ps int(3) ROUND(?) +t1f_nm int(3) ROUND(WEEK(a)) +t1f_ps int(3) ROUND(?) +t1e_nm int(3) -EXTRACT(WEEK FROM a) +t1e_ps int(3) -? +t1f_nm int(3) -WEEK(a) +t1f_ps int(3) -? +t1e_nm int(4) ROUND(EXTRACT(WEEK FROM a),-1) +t1e_ps int(4) ROUND(?,-1) +t1f_nm int(4) ROUND(WEEK(a),-1) +t1f_ps int(4) ROUND(?,-1) +t1e_nm int(4) EXTRACT(WEEK FROM a)+0 +t1e_ps int(4) ?+0 +t1f_nm int(4) WEEK(a)+0 +t1f_ps int(4) ?+0 +t1e_nm decimal(4,1) EXTRACT(WEEK FROM a)+0.0 +t1e_ps decimal(4,1) ?+0.0 +t1f_nm decimal(4,1) WEEK(a)+0.0 +t1f_ps decimal(4,1) ?+0.0 +t1e_nm varchar(2) CONCAT(EXTRACT(WEEK FROM a)) +t1e_ps varchar(2) CONCAT(?) +t1f_nm varchar(2) CONCAT(WEEK(a)) +t1f_ps varchar(2) CONCAT(?) +t1e_nm int(3) LEAST(EXTRACT(WEEK FROM a),EXTRACT(WEEK FROM a)) +t1e_ps int(3) LEAST(?,?) +t1f_nm int(3) LEAST(WEEK(a),WEEK(a)) +t1f_ps int(3) LEAST(?,?) +t1e_nm int(3) COALESCE(EXTRACT(WEEK FROM a)) +t1e_ps int(3) COALESCE(?) +t1f_nm int(3) COALESCE(WEEK(a)) +t1f_ps int(3) COALESCE(?) +t1e_nm int(3) COALESCE(EXTRACT(WEEK FROM a),CAST(1 AS SIGNED)) +t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1f_nm int(3) COALESCE(WEEK(a),CAST(1 AS SIGNED)) +t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1e_nm decimal(2,0) COALESCE(EXTRACT(WEEK FROM a),CAST(1 AS UNSIGNED)) +t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1f_nm decimal(2,0) COALESCE(WEEK(a),CAST(1 AS UNSIGNED)) +t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1e_nm int(3) @a:=EXTRACT(WEEK FROM a) +t1e_ps int(3) @a:=? +t1f_nm int(3) @a:=WEEK(a) +t1f_ps int(3) @a:=? +t2e_nm decimal(6,4) AVG(EXTRACT(WEEK FROM a)) +t2e_ps decimal(6,4) AVG(?) +t2f_nm decimal(6,4) AVG(WEEK(a)) +t2f_ps decimal(6,4) AVG(?) +t2e_nm bigint(3) MIN(EXTRACT(WEEK FROM a)) +t2e_ps bigint(3) MIN(?) +t2f_nm bigint(3) MIN(WEEK(a)) +t2f_ps bigint(3) MIN(?) +t2e_nm bigint(3) MAX(EXTRACT(WEEK FROM a)) +t2e_ps bigint(3) MAX(?) +t2f_nm bigint(3) MAX(WEEK(a)) +t2f_ps bigint(3) MAX(?) +t2e_nm mediumtext GROUP_CONCAT(EXTRACT(WEEK FROM a)) +t2e_ps mediumtext GROUP_CONCAT(?) +t2f_nm mediumtext GROUP_CONCAT(WEEK(a)) +t2f_ps mediumtext GROUP_CONCAT(?) + + +# EXTRACT(DAY FROM expr) returns hours/24 and includes the sign for TIME +# DAY(expr) returns the DD part of CAST(expr AS DATETIME) +CALL p1('DAY'); +EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(DAY FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'), DAY(TIMESTAMP'2001-12-13 10:20:30.999999'); +CALL show_drop; +TABLE_NAME COLUMN_TYPE COLUMN_NAME +t1e_nm int(3) EXTRACT(DAY FROM a) +t1e_ps int(3) ? +t1f_nm int(3) DAY(a) +t1f_ps int(3) ? +t1e_nm bigint(20) unsigned CAST(EXTRACT(DAY FROM a) AS UNSIGNED) +t1e_ps bigint(20) unsigned CAST(? AS UNSIGNED) +t1f_nm int(2) unsigned CAST(DAY(a) AS UNSIGNED) +t1f_ps int(2) unsigned CAST(? AS UNSIGNED) +t1e_nm int(3) CAST(EXTRACT(DAY FROM a) AS SIGNED) +t1e_ps int(3) CAST(? AS SIGNED) +t1f_nm int(3) CAST(DAY(a) AS SIGNED) +t1f_ps int(3) CAST(? AS SIGNED) +t1e_nm int(3) ABS(EXTRACT(DAY FROM a)) +t1e_ps int(3) ABS(?) +t1f_nm int(3) ABS(DAY(a)) +t1f_ps int(3) ABS(?) +t1e_nm int(3) ROUND(EXTRACT(DAY FROM a)) +t1e_ps int(3) ROUND(?) +t1f_nm int(3) ROUND(DAY(a)) +t1f_ps int(3) ROUND(?) +t1e_nm int(4) -EXTRACT(DAY FROM a) +t1e_ps int(4) -? +t1f_nm int(3) -DAY(a) +t1f_ps int(3) -? +t1e_nm int(4) ROUND(EXTRACT(DAY FROM a),-1) +t1e_ps int(4) ROUND(?,-1) +t1f_nm int(4) ROUND(DAY(a),-1) +t1f_ps int(4) ROUND(?,-1) +t1e_nm int(4) EXTRACT(DAY FROM a)+0 +t1e_ps int(4) ?+0 +t1f_nm int(4) DAY(a)+0 +t1f_ps int(4) ?+0 +t1e_nm decimal(4,1) EXTRACT(DAY FROM a)+0.0 +t1e_ps decimal(4,1) ?+0.0 +t1f_nm decimal(4,1) DAY(a)+0.0 +t1f_ps decimal(4,1) ?+0.0 +t1e_nm varchar(3) CONCAT(EXTRACT(DAY FROM a)) +t1e_ps varchar(3) CONCAT(?) +t1f_nm varchar(2) CONCAT(DAY(a)) +t1f_ps varchar(2) CONCAT(?) +t1e_nm int(3) LEAST(EXTRACT(DAY FROM a),EXTRACT(DAY FROM a)) +t1e_ps int(3) LEAST(?,?) +t1f_nm int(3) LEAST(DAY(a),DAY(a)) +t1f_ps int(3) LEAST(?,?) +t1e_nm int(3) COALESCE(EXTRACT(DAY FROM a)) +t1e_ps int(3) COALESCE(?) +t1f_nm int(3) COALESCE(DAY(a)) +t1f_ps int(3) COALESCE(?) +t1e_nm int(3) COALESCE(EXTRACT(DAY FROM a),CAST(1 AS SIGNED)) +t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1f_nm int(3) COALESCE(DAY(a),CAST(1 AS SIGNED)) +t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1e_nm decimal(2,0) COALESCE(EXTRACT(DAY FROM a),CAST(1 AS UNSIGNED)) +t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1f_nm decimal(2,0) COALESCE(DAY(a),CAST(1 AS UNSIGNED)) +t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1e_nm int(3) @a:=EXTRACT(DAY FROM a) +t1e_ps int(3) @a:=? +t1f_nm int(3) @a:=DAY(a) +t1f_ps int(3) @a:=? +t2e_nm decimal(6,4) AVG(EXTRACT(DAY FROM a)) +t2e_ps decimal(6,4) AVG(?) +t2f_nm decimal(6,4) AVG(DAY(a)) +t2f_ps decimal(6,4) AVG(?) +t2e_nm bigint(3) MIN(EXTRACT(DAY FROM a)) +t2e_ps bigint(3) MIN(?) +t2f_nm bigint(3) MIN(DAY(a)) +t2f_ps bigint(3) MIN(?) +t2e_nm bigint(3) MAX(EXTRACT(DAY FROM a)) +t2e_ps bigint(3) MAX(?) +t2f_nm bigint(3) MAX(DAY(a)) +t2f_ps bigint(3) MAX(?) +t2e_nm mediumtext GROUP_CONCAT(EXTRACT(DAY FROM a)) +t2e_ps mediumtext GROUP_CONCAT(?) +t2f_nm mediumtext GROUP_CONCAT(DAY(a)) +t2f_ps mediumtext GROUP_CONCAT(?) + + +# EXTRACT(HOUR FROM expr) returns hours%24 and includes the sign for TIME +# HOUR(expr) returns the hh part of CAST(expr AS DATETIME) +CALL p1('HOUR'); +EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(HOUR FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'), HOUR(TIMESTAMP'2001-12-13 10:20:30.999999'); +CALL show_drop; +TABLE_NAME COLUMN_TYPE COLUMN_NAME +t1e_nm int(3) EXTRACT(HOUR FROM a) +t1e_ps int(3) ? +t1f_nm int(3) HOUR(a) +t1f_ps int(3) ? +t1e_nm bigint(20) unsigned CAST(EXTRACT(HOUR FROM a) AS UNSIGNED) +t1e_ps bigint(20) unsigned CAST(? AS UNSIGNED) +t1f_nm int(2) unsigned CAST(HOUR(a) AS UNSIGNED) +t1f_ps int(2) unsigned CAST(? AS UNSIGNED) +t1e_nm int(3) CAST(EXTRACT(HOUR FROM a) AS SIGNED) +t1e_ps int(3) CAST(? AS SIGNED) +t1f_nm int(3) CAST(HOUR(a) AS SIGNED) +t1f_ps int(3) CAST(? AS SIGNED) +t1e_nm int(3) ABS(EXTRACT(HOUR FROM a)) +t1e_ps int(3) ABS(?) +t1f_nm int(3) ABS(HOUR(a)) +t1f_ps int(3) ABS(?) +t1e_nm int(3) ROUND(EXTRACT(HOUR FROM a)) +t1e_ps int(3) ROUND(?) +t1f_nm int(3) ROUND(HOUR(a)) +t1f_ps int(3) ROUND(?) +t1e_nm int(4) -EXTRACT(HOUR FROM a) +t1e_ps int(4) -? +t1f_nm int(3) -HOUR(a) +t1f_ps int(3) -? +t1e_nm int(4) ROUND(EXTRACT(HOUR FROM a),-1) +t1e_ps int(4) ROUND(?,-1) +t1f_nm int(4) ROUND(HOUR(a),-1) +t1f_ps int(4) ROUND(?,-1) +t1e_nm int(4) EXTRACT(HOUR FROM a)+0 +t1e_ps int(4) ?+0 +t1f_nm int(4) HOUR(a)+0 +t1f_ps int(4) ?+0 +t1e_nm decimal(4,1) EXTRACT(HOUR FROM a)+0.0 +t1e_ps decimal(4,1) ?+0.0 +t1f_nm decimal(4,1) HOUR(a)+0.0 +t1f_ps decimal(4,1) ?+0.0 +t1e_nm varchar(3) CONCAT(EXTRACT(HOUR FROM a)) +t1e_ps varchar(3) CONCAT(?) +t1f_nm varchar(2) CONCAT(HOUR(a)) +t1f_ps varchar(2) CONCAT(?) +t1e_nm int(3) LEAST(EXTRACT(HOUR FROM a),EXTRACT(HOUR FROM a)) +t1e_ps int(3) LEAST(?,?) +t1f_nm int(3) LEAST(HOUR(a),HOUR(a)) +t1f_ps int(3) LEAST(?,?) +t1e_nm int(3) COALESCE(EXTRACT(HOUR FROM a)) +t1e_ps int(3) COALESCE(?) +t1f_nm int(3) COALESCE(HOUR(a)) +t1f_ps int(3) COALESCE(?) +t1e_nm int(3) COALESCE(EXTRACT(HOUR FROM a),CAST(1 AS SIGNED)) +t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1f_nm int(3) COALESCE(HOUR(a),CAST(1 AS SIGNED)) +t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1e_nm decimal(2,0) COALESCE(EXTRACT(HOUR FROM a),CAST(1 AS UNSIGNED)) +t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1f_nm decimal(2,0) COALESCE(HOUR(a),CAST(1 AS UNSIGNED)) +t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1e_nm int(3) @a:=EXTRACT(HOUR FROM a) +t1e_ps int(3) @a:=? +t1f_nm int(3) @a:=HOUR(a) +t1f_ps int(3) @a:=? +t2e_nm decimal(6,4) AVG(EXTRACT(HOUR FROM a)) +t2e_ps decimal(6,4) AVG(?) +t2f_nm decimal(6,4) AVG(HOUR(a)) +t2f_ps decimal(6,4) AVG(?) +t2e_nm bigint(3) MIN(EXTRACT(HOUR FROM a)) +t2e_ps bigint(3) MIN(?) +t2f_nm bigint(3) MIN(HOUR(a)) +t2f_ps bigint(3) MIN(?) +t2e_nm bigint(3) MAX(EXTRACT(HOUR FROM a)) +t2e_ps bigint(3) MAX(?) +t2f_nm bigint(3) MAX(HOUR(a)) +t2f_ps bigint(3) MAX(?) +t2e_nm mediumtext GROUP_CONCAT(EXTRACT(HOUR FROM a)) +t2e_ps mediumtext GROUP_CONCAT(?) +t2f_nm mediumtext GROUP_CONCAT(HOUR(a)) +t2f_ps mediumtext GROUP_CONCAT(?) + + +# EXTRACT(MINUTE FROM expr) includes the sign for TIME +# MINUTE(expr) returns the absolute value +CALL p1('MINUTE'); +EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MINUTE FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'), MINUTE(TIMESTAMP'2001-12-13 10:20:30.999999'); +CALL show_drop; +TABLE_NAME COLUMN_TYPE COLUMN_NAME +t1e_nm int(3) EXTRACT(MINUTE FROM a) +t1e_ps int(3) ? +t1f_nm int(3) MINUTE(a) +t1f_ps int(3) ? +t1e_nm bigint(20) unsigned CAST(EXTRACT(MINUTE FROM a) AS UNSIGNED) +t1e_ps bigint(20) unsigned CAST(? AS UNSIGNED) +t1f_nm int(2) unsigned CAST(MINUTE(a) AS UNSIGNED) +t1f_ps int(2) unsigned CAST(? AS UNSIGNED) +t1e_nm int(3) CAST(EXTRACT(MINUTE FROM a) AS SIGNED) +t1e_ps int(3) CAST(? AS SIGNED) +t1f_nm int(3) CAST(MINUTE(a) AS SIGNED) +t1f_ps int(3) CAST(? AS SIGNED) +t1e_nm int(3) ABS(EXTRACT(MINUTE FROM a)) +t1e_ps int(3) ABS(?) +t1f_nm int(3) ABS(MINUTE(a)) +t1f_ps int(3) ABS(?) +t1e_nm int(3) ROUND(EXTRACT(MINUTE FROM a)) +t1e_ps int(3) ROUND(?) +t1f_nm int(3) ROUND(MINUTE(a)) +t1f_ps int(3) ROUND(?) +t1e_nm int(4) -EXTRACT(MINUTE FROM a) +t1e_ps int(4) -? +t1f_nm int(3) -MINUTE(a) +t1f_ps int(3) -? +t1e_nm int(4) ROUND(EXTRACT(MINUTE FROM a),-1) +t1e_ps int(4) ROUND(?,-1) +t1f_nm int(4) ROUND(MINUTE(a),-1) +t1f_ps int(4) ROUND(?,-1) +t1e_nm int(4) EXTRACT(MINUTE FROM a)+0 +t1e_ps int(4) ?+0 +t1f_nm int(4) MINUTE(a)+0 +t1f_ps int(4) ?+0 +t1e_nm decimal(4,1) EXTRACT(MINUTE FROM a)+0.0 +t1e_ps decimal(4,1) ?+0.0 +t1f_nm decimal(4,1) MINUTE(a)+0.0 +t1f_ps decimal(4,1) ?+0.0 +t1e_nm varchar(3) CONCAT(EXTRACT(MINUTE FROM a)) +t1e_ps varchar(3) CONCAT(?) +t1f_nm varchar(2) CONCAT(MINUTE(a)) +t1f_ps varchar(2) CONCAT(?) +t1e_nm int(3) LEAST(EXTRACT(MINUTE FROM a),EXTRACT(MINUTE FROM a)) +t1e_ps int(3) LEAST(?,?) +t1f_nm int(3) LEAST(MINUTE(a),MINUTE(a)) +t1f_ps int(3) LEAST(?,?) +t1e_nm int(3) COALESCE(EXTRACT(MINUTE FROM a)) +t1e_ps int(3) COALESCE(?) +t1f_nm int(3) COALESCE(MINUTE(a)) +t1f_ps int(3) COALESCE(?) +t1e_nm int(3) COALESCE(EXTRACT(MINUTE FROM a),CAST(1 AS SIGNED)) +t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1f_nm int(3) COALESCE(MINUTE(a),CAST(1 AS SIGNED)) +t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1e_nm decimal(2,0) COALESCE(EXTRACT(MINUTE FROM a),CAST(1 AS UNSIGNED)) +t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1f_nm decimal(2,0) COALESCE(MINUTE(a),CAST(1 AS UNSIGNED)) +t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1e_nm int(3) @a:=EXTRACT(MINUTE FROM a) +t1e_ps int(3) @a:=? +t1f_nm int(3) @a:=MINUTE(a) +t1f_ps int(3) @a:=? +t2e_nm decimal(6,4) AVG(EXTRACT(MINUTE FROM a)) +t2e_ps decimal(6,4) AVG(?) +t2f_nm decimal(6,4) AVG(MINUTE(a)) +t2f_ps decimal(6,4) AVG(?) +t2e_nm bigint(3) MIN(EXTRACT(MINUTE FROM a)) +t2e_ps bigint(3) MIN(?) +t2f_nm bigint(3) MIN(MINUTE(a)) +t2f_ps bigint(3) MIN(?) +t2e_nm bigint(3) MAX(EXTRACT(MINUTE FROM a)) +t2e_ps bigint(3) MAX(?) +t2f_nm bigint(3) MAX(MINUTE(a)) +t2f_ps bigint(3) MAX(?) +t2e_nm mediumtext GROUP_CONCAT(EXTRACT(MINUTE FROM a)) +t2e_ps mediumtext GROUP_CONCAT(?) +t2f_nm mediumtext GROUP_CONCAT(MINUTE(a)) +t2f_ps mediumtext GROUP_CONCAT(?) + + +# EXTRACT(SECONDS FROM expr) includes the sign for TIME +# SECONDS(expr) returns the absolute value +CALL p1('SECOND'); +EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(SECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), SECOND(TIMESTAMP'2001-12-13 10:20:30.999999'); +CALL show_drop; +TABLE_NAME COLUMN_TYPE COLUMN_NAME +t1e_nm int(3) EXTRACT(SECOND FROM a) +t1e_ps int(3) ? +t1f_nm int(3) SECOND(a) +t1f_ps int(3) ? +t1e_nm bigint(20) unsigned CAST(EXTRACT(SECOND FROM a) AS UNSIGNED) +t1e_ps bigint(20) unsigned CAST(? AS UNSIGNED) +t1f_nm int(2) unsigned CAST(SECOND(a) AS UNSIGNED) +t1f_ps int(2) unsigned CAST(? AS UNSIGNED) +t1e_nm int(3) CAST(EXTRACT(SECOND FROM a) AS SIGNED) +t1e_ps int(3) CAST(? AS SIGNED) +t1f_nm int(3) CAST(SECOND(a) AS SIGNED) +t1f_ps int(3) CAST(? AS SIGNED) +t1e_nm int(3) ABS(EXTRACT(SECOND FROM a)) +t1e_ps int(3) ABS(?) +t1f_nm int(3) ABS(SECOND(a)) +t1f_ps int(3) ABS(?) +t1e_nm int(3) ROUND(EXTRACT(SECOND FROM a)) +t1e_ps int(3) ROUND(?) +t1f_nm int(3) ROUND(SECOND(a)) +t1f_ps int(3) ROUND(?) +t1e_nm int(4) -EXTRACT(SECOND FROM a) +t1e_ps int(4) -? +t1f_nm int(3) -SECOND(a) +t1f_ps int(3) -? +t1e_nm int(4) ROUND(EXTRACT(SECOND FROM a),-1) +t1e_ps int(4) ROUND(?,-1) +t1f_nm int(4) ROUND(SECOND(a),-1) +t1f_ps int(4) ROUND(?,-1) +t1e_nm int(4) EXTRACT(SECOND FROM a)+0 +t1e_ps int(4) ?+0 +t1f_nm int(4) SECOND(a)+0 +t1f_ps int(4) ?+0 +t1e_nm decimal(4,1) EXTRACT(SECOND FROM a)+0.0 +t1e_ps decimal(4,1) ?+0.0 +t1f_nm decimal(4,1) SECOND(a)+0.0 +t1f_ps decimal(4,1) ?+0.0 +t1e_nm varchar(3) CONCAT(EXTRACT(SECOND FROM a)) +t1e_ps varchar(3) CONCAT(?) +t1f_nm varchar(2) CONCAT(SECOND(a)) +t1f_ps varchar(2) CONCAT(?) +t1e_nm int(3) LEAST(EXTRACT(SECOND FROM a),EXTRACT(SECOND FROM a)) +t1e_ps int(3) LEAST(?,?) +t1f_nm int(3) LEAST(SECOND(a),SECOND(a)) +t1f_ps int(3) LEAST(?,?) +t1e_nm int(3) COALESCE(EXTRACT(SECOND FROM a)) +t1e_ps int(3) COALESCE(?) +t1f_nm int(3) COALESCE(SECOND(a)) +t1f_ps int(3) COALESCE(?) +t1e_nm int(3) COALESCE(EXTRACT(SECOND FROM a),CAST(1 AS SIGNED)) +t1e_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1f_nm int(3) COALESCE(SECOND(a),CAST(1 AS SIGNED)) +t1f_ps int(3) COALESCE(?,CAST(1 AS SIGNED)) +t1e_nm decimal(2,0) COALESCE(EXTRACT(SECOND FROM a),CAST(1 AS UNSIGNED)) +t1e_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1f_nm decimal(2,0) COALESCE(SECOND(a),CAST(1 AS UNSIGNED)) +t1f_ps decimal(2,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1e_nm int(3) @a:=EXTRACT(SECOND FROM a) +t1e_ps int(3) @a:=? +t1f_nm int(3) @a:=SECOND(a) +t1f_ps int(3) @a:=? +t2e_nm decimal(6,4) AVG(EXTRACT(SECOND FROM a)) +t2e_ps decimal(6,4) AVG(?) +t2f_nm decimal(6,4) AVG(SECOND(a)) +t2f_ps decimal(6,4) AVG(?) +t2e_nm bigint(3) MIN(EXTRACT(SECOND FROM a)) +t2e_ps bigint(3) MIN(?) +t2f_nm bigint(3) MIN(SECOND(a)) +t2f_ps bigint(3) MIN(?) +t2e_nm bigint(3) MAX(EXTRACT(SECOND FROM a)) +t2e_ps bigint(3) MAX(?) +t2f_nm bigint(3) MAX(SECOND(a)) +t2f_ps bigint(3) MAX(?) +t2e_nm mediumtext GROUP_CONCAT(EXTRACT(SECOND FROM a)) +t2e_ps mediumtext GROUP_CONCAT(?) +t2f_nm mediumtext GROUP_CONCAT(SECOND(a)) +t2f_ps mediumtext GROUP_CONCAT(?) + + +# EXTRACT(MICROSECONDS FROM expr) includes the sign for TIME +# MICROSECONDS(expr) returns the absolute value +CALL p1('MICROSECOND'); +EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'), EXTRACT(MICROSECOND FROM TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'), MICROSECOND(TIMESTAMP'2001-12-13 10:20:30.999999'); +CALL show_drop; +TABLE_NAME COLUMN_TYPE COLUMN_NAME +t1e_nm int(7) EXTRACT(MICROSECOND FROM a) +t1e_ps int(7) ? +t1f_nm int(7) MICROSECOND(a) +t1f_ps int(7) ? +t1e_nm bigint(20) unsigned CAST(EXTRACT(MICROSECOND FROM a) AS UNSIGNED) +t1e_ps bigint(20) unsigned CAST(? AS UNSIGNED) +t1f_nm int(6) unsigned CAST(MICROSECOND(a) AS UNSIGNED) +t1f_ps int(6) unsigned CAST(? AS UNSIGNED) +t1e_nm int(7) CAST(EXTRACT(MICROSECOND FROM a) AS SIGNED) +t1e_ps int(7) CAST(? AS SIGNED) +t1f_nm int(7) CAST(MICROSECOND(a) AS SIGNED) +t1f_ps int(7) CAST(? AS SIGNED) +t1e_nm int(7) ABS(EXTRACT(MICROSECOND FROM a)) +t1e_ps int(7) ABS(?) +t1f_nm int(7) ABS(MICROSECOND(a)) +t1f_ps int(7) ABS(?) +t1e_nm int(7) ROUND(EXTRACT(MICROSECOND FROM a)) +t1e_ps int(7) ROUND(?) +t1f_nm int(7) ROUND(MICROSECOND(a)) +t1f_ps int(7) ROUND(?) +t1e_nm int(8) -EXTRACT(MICROSECOND FROM a) +t1e_ps int(8) -? +t1f_nm int(7) -MICROSECOND(a) +t1f_ps int(7) -? +t1e_nm int(8) ROUND(EXTRACT(MICROSECOND FROM a),-1) +t1e_ps int(8) ROUND(?,-1) +t1f_nm int(8) ROUND(MICROSECOND(a),-1) +t1f_ps int(8) ROUND(?,-1) +t1e_nm int(8) EXTRACT(MICROSECOND FROM a)+0 +t1e_ps int(8) ?+0 +t1f_nm int(8) MICROSECOND(a)+0 +t1f_ps int(8) ?+0 +t1e_nm decimal(8,1) EXTRACT(MICROSECOND FROM a)+0.0 +t1e_ps decimal(8,1) ?+0.0 +t1f_nm decimal(8,1) MICROSECOND(a)+0.0 +t1f_ps decimal(8,1) ?+0.0 +t1e_nm varchar(7) CONCAT(EXTRACT(MICROSECOND FROM a)) +t1e_ps varchar(7) CONCAT(?) +t1f_nm varchar(6) CONCAT(MICROSECOND(a)) +t1f_ps varchar(6) CONCAT(?) +t1e_nm int(7) LEAST(EXTRACT(MICROSECOND FROM a),EXTRACT(MICROSECOND FROM a)) +t1e_ps int(7) LEAST(?,?) +t1f_nm int(7) LEAST(MICROSECOND(a),MICROSECOND(a)) +t1f_ps int(7) LEAST(?,?) +t1e_nm int(7) COALESCE(EXTRACT(MICROSECOND FROM a)) +t1e_ps int(7) COALESCE(?) +t1f_nm int(7) COALESCE(MICROSECOND(a)) +t1f_ps int(7) COALESCE(?) +t1e_nm int(7) COALESCE(EXTRACT(MICROSECOND FROM a),CAST(1 AS SIGNED)) +t1e_ps int(7) COALESCE(?,CAST(1 AS SIGNED)) +t1f_nm int(7) COALESCE(MICROSECOND(a),CAST(1 AS SIGNED)) +t1f_ps int(7) COALESCE(?,CAST(1 AS SIGNED)) +t1e_nm decimal(6,0) COALESCE(EXTRACT(MICROSECOND FROM a),CAST(1 AS UNSIGNED)) +t1e_ps decimal(6,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1f_nm decimal(6,0) COALESCE(MICROSECOND(a),CAST(1 AS UNSIGNED)) +t1f_ps decimal(6,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1e_nm int(7) @a:=EXTRACT(MICROSECOND FROM a) +t1e_ps int(7) @a:=? +t1f_nm int(7) @a:=MICROSECOND(a) +t1f_ps int(7) @a:=? +t2e_nm decimal(10,4) AVG(EXTRACT(MICROSECOND FROM a)) +t2e_ps decimal(10,4) AVG(?) +t2f_nm decimal(10,4) AVG(MICROSECOND(a)) +t2f_ps decimal(10,4) AVG(?) +t2e_nm bigint(7) MIN(EXTRACT(MICROSECOND FROM a)) +t2e_ps bigint(7) MIN(?) +t2f_nm bigint(7) MIN(MICROSECOND(a)) +t2f_ps bigint(7) MIN(?) +t2e_nm bigint(7) MAX(EXTRACT(MICROSECOND FROM a)) +t2e_ps bigint(7) MAX(?) +t2f_nm bigint(7) MAX(MICROSECOND(a)) +t2f_ps bigint(7) MAX(?) +t2e_nm mediumtext GROUP_CONCAT(EXTRACT(MICROSECOND FROM a)) +t2e_ps mediumtext GROUP_CONCAT(?) +t2f_nm mediumtext GROUP_CONCAT(MICROSECOND(a)) +t2f_ps mediumtext GROUP_CONCAT(?) + + +# DAYOFYEAR +CALL p1('DAYOFYEAR'); +EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?' USING DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'); +EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?)' USING DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'), DAYOFYEAR(TIMESTAMP'2001-12-13 10:20:30.999999'); +CALL show_drop; +TABLE_NAME COLUMN_TYPE COLUMN_NAME +t1f_nm int(4) DAYOFYEAR(a) +t1f_ps int(4) ? +t1f_nm int(3) unsigned CAST(DAYOFYEAR(a) AS UNSIGNED) +t1f_ps int(3) unsigned CAST(? AS UNSIGNED) +t1f_nm int(4) CAST(DAYOFYEAR(a) AS SIGNED) +t1f_ps int(4) CAST(? AS SIGNED) +t1f_nm int(4) ABS(DAYOFYEAR(a)) +t1f_ps int(4) ABS(?) +t1f_nm int(4) ROUND(DAYOFYEAR(a)) +t1f_ps int(4) ROUND(?) +t1f_nm int(4) -DAYOFYEAR(a) +t1f_ps int(4) -? +t1f_nm int(5) ROUND(DAYOFYEAR(a),-1) +t1f_ps int(5) ROUND(?,-1) +t1f_nm int(5) DAYOFYEAR(a)+0 +t1f_ps int(5) ?+0 +t1f_nm decimal(5,1) DAYOFYEAR(a)+0.0 +t1f_ps decimal(5,1) ?+0.0 +t1f_nm varchar(3) CONCAT(DAYOFYEAR(a)) +t1f_ps varchar(3) CONCAT(?) +t1f_nm int(4) LEAST(DAYOFYEAR(a),DAYOFYEAR(a)) +t1f_ps int(4) LEAST(?,?) +t1f_nm int(4) COALESCE(DAYOFYEAR(a)) +t1f_ps int(4) COALESCE(?) +t1f_nm int(4) COALESCE(DAYOFYEAR(a),CAST(1 AS SIGNED)) +t1f_ps int(4) COALESCE(?,CAST(1 AS SIGNED)) +t1f_nm decimal(3,0) COALESCE(DAYOFYEAR(a),CAST(1 AS UNSIGNED)) +t1f_ps decimal(3,0) COALESCE(?,CAST(1 AS UNSIGNED)) +t1f_nm int(4) @a:=DAYOFYEAR(a) +t1f_ps int(4) @a:=? +t2f_nm decimal(7,4) AVG(DAYOFYEAR(a)) +t2f_ps decimal(7,4) AVG(?) +t2f_nm bigint(4) MIN(DAYOFYEAR(a)) +t2f_ps bigint(4) MIN(?) +t2f_nm bigint(4) MAX(DAYOFYEAR(a)) +t2f_ps bigint(4) MAX(?) +t2f_nm mediumtext GROUP_CONCAT(DAYOFYEAR(a)) +t2f_ps mediumtext GROUP_CONCAT(?) +DROP TABLE t1; +DROP PROCEDURE p1; +DROP PROCEDURE show_drop; +DROP FUNCTION params; +DROP FUNCTION select01; +DROP FUNCTION select02; +# +# End of 10.5 tests +# diff --git a/mysql-test/main/func_extract.test b/mysql-test/main/func_extract.test index 97920f1872f..dd808443f58 100644 --- a/mysql-test/main/func_extract.test +++ b/mysql-test/main/func_extract.test @@ -263,3 +263,254 @@ SELECT FROM t1; DROP TABLE t1; --enable_view_protocol + + +--echo # +--echo # Start of 10.5 tests +--echo # + +--echo # +--echo # MDEV-33496 Out of range error in AVG(YEAR(datetime)) due to a wrong data type +--echo # + +let select01=SELECT ?, CAST(? AS UNSIGNED), CAST(? AS SIGNED), ABS(?), ROUND(?), -?, ROUND(?,-1), ?+0, ?+0.0, CONCAT(?), LEAST(?,?), COALESCE(?), COALESCE(?,CAST(1 AS SIGNED)), COALESCE(?,CAST(1 AS UNSIGNED)), @a:=?; +let pcount01=16; +let select02=SELECT AVG(?), MIN(?), MAX(?), GROUP_CONCAT(?); +let pcount02=4; +let ts=TIMESTAMP'2001-12-13 10:20:30.999999'; + +eval CREATE FUNCTION select01() RETURNS TEXT RETURN '$select01'; +eval CREATE FUNCTION select02() RETURNS TEXT RETURN '$select02'; + +CREATE TABLE t1 (a DATETIME(6)); +INSERT INTO t1 VALUES ('2001-12-31 10:20:30.999999'); + +DELIMITER $$; +CREATE FUNCTION params(expr TEXT, count INT) RETURNS TEXT +BEGIN + RETURN CONCAT(expr, REPEAT(CONCAT(', ', expr), count-1)); +END; +$$ +CREATE PROCEDURE show_drop() +BEGIN + SELECT TABLE_NAME, COLUMN_TYPE, COLUMN_NAME + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA='test' + AND TABLE_NAME IN ('t1e_nm','t2e_nm','t1f_nm','t2f_nm', + 't1e_ps','t1f_ps','t2e_ps','t2f_ps') + ORDER BY LEFT(TABLE_NAME, 2), ORDINAL_POSITION, TABLE_NAME; + + FOR rec IN (SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES + WHERE TABLE_SCHEMA='test' + AND TABLE_NAME IN ('t1e_nm','t2e_nm','t1f_nm','t2f_nm', + 't1e_ps','t1f_ps','t2e_ps','t2f_ps')) + DO + EXECUTE IMMEDIATE CONCAT('DROP TABLE ', rec.TABLE_NAME); + END FOR; +END; +$$ +CREATE PROCEDURE p1(unit VARCHAR(32)) +BEGIN + DECLARE do_extract BOOL DEFAULT unit NOT IN('DAYOFYEAR'); + + DECLARE query01 TEXT DEFAULT + CONCAT('CREATE TABLE t2 AS ', select01(), ' FROM t1'); + + DECLARE query02 TEXT DEFAULT + CONCAT('CREATE TABLE t2 AS ', select02(), ' FROM t1'); + + IF (do_extract) + THEN + EXECUTE IMMEDIATE REPLACE(REPLACE(query01,'t2','t1e_nm'),'?', CONCAT('EXTRACT(',unit,' FROM a)')); + EXECUTE IMMEDIATE REPLACE(REPLACE(query02,'t2','t2e_nm'),'?', CONCAT('EXTRACT(',unit,' FROM a)')); + END IF; + EXECUTE IMMEDIATE REPLACE(REPLACE(query01,'t2','t1f_nm'),'?', CONCAT(unit,'(a)')); + EXECUTE IMMEDIATE REPLACE(REPLACE(query02,'t2','t2f_nm'),'?', CONCAT(unit,'(a)')); +END; +$$ +DELIMITER ;$$ + + +--echo +--echo +--echo # EXTRACT(YEAR FROM expr) and YEAR(expr) are equivalent + +CALL p1('YEAR'); +let extr=EXTRACT(YEAR FROM $ts); +let func=YEAR($ts); +let extr01=`SELECT params("$extr", $pcount01) AS p`; +let func01=`SELECT params("$func", $pcount01) AS p`; +let extr02=`SELECT params("$extr", $pcount02) AS p`; +let func02=`SELECT params("$func", $pcount02) AS p`; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02; +CALL show_drop; + + +--echo +--echo +--echo # EXTRACT(QUARTER FROM expr) and QUARTER(expr) are equavalent + +CALL p1('QUARTER'); +let extr=EXTRACT(QUARTER FROM $ts); +let func=QUARTER($ts); +let extr01=`SELECT params("$extr", $pcount01) AS p`; +let func01=`SELECT params("$func", $pcount01) AS p`; +let extr02=`SELECT params("$extr", $pcount02) AS p`; +let func02=`SELECT params("$func", $pcount02) AS p`; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02; +CALL show_drop; + +--echo +--echo +--echo # EXTRACT(MONTH FROM expr) and MONTH(expr) are equavalent + +CALL p1('MONTH'); +let extr=EXTRACT(MONTH FROM $ts); +let func=MONTH($ts); +let extr01=`SELECT params("$extr", $pcount01) AS p`; +let func01=`SELECT params("$func", $pcount01) AS p`; +let extr02=`SELECT params("$extr", $pcount02) AS p`; +let func02=`SELECT params("$func", $pcount02) AS p`; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02; +CALL show_drop; + +--echo +--echo +--echo # EXTRACT(WEEK FROM expr) and WEEK(expr) are equavalent + +CALL p1('WEEK'); +let extr=EXTRACT(WEEK FROM $ts); +let func=WEEK($ts); +let extr01=`SELECT params("$extr", $pcount01) AS p`; +let func01=`SELECT params("$func", $pcount01) AS p`; +let extr02=`SELECT params("$extr", $pcount02) AS p`; +let func02=`SELECT params("$func", $pcount02) AS p`; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02; +CALL show_drop; + +--echo +--echo +--echo # EXTRACT(DAY FROM expr) returns hours/24 and includes the sign for TIME +--echo # DAY(expr) returns the DD part of CAST(expr AS DATETIME) + +CALL p1('DAY'); +let extr=EXTRACT(DAY FROM $ts); +let func=DAY($ts); +let extr01=`SELECT params("$extr", $pcount01) AS p`; +let func01=`SELECT params("$func", $pcount01) AS p`; +let extr02=`SELECT params("$extr", $pcount02) AS p`; +let func02=`SELECT params("$func", $pcount02) AS p`; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02; +CALL show_drop; + +--echo +--echo +--echo # EXTRACT(HOUR FROM expr) returns hours%24 and includes the sign for TIME +--echo # HOUR(expr) returns the hh part of CAST(expr AS DATETIME) + +CALL p1('HOUR'); +let extr=EXTRACT(HOUR FROM $ts); +let func=HOUR($ts); +let extr01=`SELECT params("$extr", $pcount01) AS p`; +let func01=`SELECT params("$func", $pcount01) AS p`; +let extr02=`SELECT params("$extr", $pcount02) AS p`; +let func02=`SELECT params("$func", $pcount02) AS p`; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02; +CALL show_drop; + +--echo +--echo +--echo # EXTRACT(MINUTE FROM expr) includes the sign for TIME +--echo # MINUTE(expr) returns the absolute value + +CALL p1('MINUTE'); +let extr=EXTRACT(MINUTE FROM $ts); +let func=MINUTE($ts); +let extr01=`SELECT params("$extr", $pcount01) AS p`; +let func01=`SELECT params("$func", $pcount01) AS p`; +let extr02=`SELECT params("$extr", $pcount02) AS p`; +let func02=`SELECT params("$func", $pcount02) AS p`; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02; +CALL show_drop; + +--echo +--echo +--echo # EXTRACT(SECONDS FROM expr) includes the sign for TIME +--echo # SECONDS(expr) returns the absolute value + +CALL p1('SECOND'); +let extr=EXTRACT(SECOND FROM $ts); +let func=SECOND($ts); +let extr01=`SELECT params("$extr", $pcount01) AS p`; +let func01=`SELECT params("$func", $pcount01) AS p`; +let extr02=`SELECT params("$extr", $pcount02) AS p`; +let func02=`SELECT params("$func", $pcount02) AS p`; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02; +CALL show_drop; + +--echo +--echo +--echo # EXTRACT(MICROSECONDS FROM expr) includes the sign for TIME +--echo # MICROSECONDS(expr) returns the absolute value + +CALL p1('MICROSECOND'); +let extr=EXTRACT(MICROSECOND FROM $ts); +let func=MICROSECOND($ts); +let extr01=`SELECT params("$extr", $pcount01) AS p`; +let func01=`SELECT params("$func", $pcount01) AS p`; +let extr02=`SELECT params("$extr", $pcount02) AS p`; +let func02=`SELECT params("$func", $pcount02) AS p`; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1e_ps AS $select01' USING $extr01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2e_ps AS $select02' USING $extr02; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02; +CALL show_drop; + +--echo +--echo +--echo # DAYOFYEAR + +CALL p1('DAYOFYEAR'); +let func=DAYOFYEAR($ts); +let func01=`SELECT params("$func", $pcount01) AS p`; +let func02=`SELECT params("$func", $pcount02) AS p`; +eval EXECUTE IMMEDIATE 'CREATE TABLE t1f_ps AS $select01' USING $func01; +eval EXECUTE IMMEDIATE 'CREATE TABLE t2f_ps AS $select02' USING $func02; +CALL show_drop; + + +DROP TABLE t1; +DROP PROCEDURE p1; +DROP PROCEDURE show_drop; +DROP FUNCTION params; + +DROP FUNCTION select01; +DROP FUNCTION select02; + +--echo # +--echo # End of 10.5 tests +--echo # diff --git a/mysql-test/main/func_gconcat.result b/mysql-test/main/func_gconcat.result index fea25124941..f817a1176ea 100644 --- a/mysql-test/main/func_gconcat.result +++ b/mysql-test/main/func_gconcat.result @@ -1443,3 +1443,98 @@ drop table t1; # # End of 10.3 tests # +# +# MDEV-31276: Execution of PS from grouping query with join +# and GROUP_CONCAT set function +# +create table t1 (a int, b varchar(20)) engine=myisam; +create table t2 (a int, c varchar(20)) engine=myisam; +insert into t1 values (1,"aaaaaaaaaa"),(2,"bbbbbbbbbb"); +insert into t2 values (1,"cccccccccc"),(2,"dddddddddd"); +insert into t2 values (1,"eeeeeee"),(2,"fffffff"); +set group_concat_max_len=5; +select count(*), group_concat(t1.b,t2.c) +from t1 join t2 on t1.a=t2.a group by t1.a; +count(*) group_concat(t1.b,t2.c) +2 aaaaa +2 bbbbb +Warnings: +Warning 1260 Row 1 was cut by GROUP_CONCAT() +Warning 1260 Row 2 was cut by GROUP_CONCAT() +explain select count(*), group_concat(t1.b,t2.c) +from t1 join t2 on t1.a=t2.a group by t1.a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using temporary; Using filesort +1 SIMPLE t2 ALL NULL NULL NULL NULL 4 Using where; Using join buffer (flat, BNL join) +prepare stmt from "select count(*), group_concat(t1.b,t2.c) +from t1 join t2 on t1.a=t2.a group by t1.a"; +execute stmt; +count(*) group_concat(t1.b,t2.c) +2 aaaaa +2 bbbbb +Warnings: +Warning 1260 Row 1 was cut by GROUP_CONCAT() +Warning 1260 Row 2 was cut by GROUP_CONCAT() +execute stmt; +count(*) group_concat(t1.b,t2.c) +2 aaaaa +2 bbbbb +Warnings: +Warning 1260 Row 1 was cut by GROUP_CONCAT() +Warning 1260 Row 2 was cut by GROUP_CONCAT() +deallocate prepare stmt; +set join_cache_level=0; +select count(*), group_concat(t1.b,t2.c) +from t1 join t2 on t1.a=t2.a group by t1.a; +count(*) group_concat(t1.b,t2.c) +2 aaaaa +2 bbbbb +Warnings: +Warning 1260 Row 1 was cut by GROUP_CONCAT() +Warning 1260 Row 2 was cut by GROUP_CONCAT() +explain select count(*), group_concat(t1.b,t2.c) +from t1 join t2 on t1.a=t2.a group by t1.a; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 Using filesort +1 SIMPLE t2 ALL NULL NULL NULL NULL 4 Using where +prepare stmt from "select count(*), group_concat(t1.b,t2.c) +from t1 join t2 on t1.a=t2.a group by t1.a"; +execute stmt; +count(*) group_concat(t1.b,t2.c) +2 aaaaa +2 bbbbb +Warnings: +Warning 1260 Row 1 was cut by GROUP_CONCAT() +Warning 1260 Row 2 was cut by GROUP_CONCAT() +execute stmt; +count(*) group_concat(t1.b,t2.c) +2 aaaaa +2 bbbbb +Warnings: +Warning 1260 Row 1 was cut by GROUP_CONCAT() +Warning 1260 Row 2 was cut by GROUP_CONCAT() +deallocate prepare stmt; +set join_cache_level=default; +set group_concat_max_len=default; +drop table t1,t2; +# +# MDEV-33772 Bad SEPARATOR value in GROUP_CONCAT on character set conversion +# +SET NAMES utf8, @@collation_connection=latin1_swedish_ci; +CREATE TABLE t1 (c VARCHAR(10)) CHARACTER SET latin1; +INSERT INTO t1 VALUES ('a'),('A'); +CREATE OR REPLACE VIEW v1 AS +SELECT GROUP_CONCAT(c SEPARATOR 'ß') AS c1 FROM t1 GROUP BY c; +SELECT * FROM v1; +c1 +aßA +SELECT HEX(c1) FROM v1; +HEX(c1) +61DF41 +SHOW CREATE VIEW v1; +View Create View character_set_client collation_connection +v1 CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW `v1` AS select group_concat(`t1`.`c` separator 'ß') AS `c1` from `t1` group by `t1`.`c` utf8mb3 latin1_swedish_ci +DROP VIEW v1; +DROP TABLE t1; +SET NAMES latin1; +# End of 10.5 tests diff --git a/mysql-test/main/func_gconcat.test b/mysql-test/main/func_gconcat.test index cc5236a18be..c9787ce4471 100644 --- a/mysql-test/main/func_gconcat.test +++ b/mysql-test/main/func_gconcat.test @@ -1066,3 +1066,59 @@ drop table t1; --echo # --echo # End of 10.3 tests --echo # + +--echo # +--echo # MDEV-31276: Execution of PS from grouping query with join +--echo # and GROUP_CONCAT set function +--echo # + +create table t1 (a int, b varchar(20)) engine=myisam; +create table t2 (a int, c varchar(20)) engine=myisam; +insert into t1 values (1,"aaaaaaaaaa"),(2,"bbbbbbbbbb"); +insert into t2 values (1,"cccccccccc"),(2,"dddddddddd"); +insert into t2 values (1,"eeeeeee"),(2,"fffffff"); + +let $q= +select count(*), group_concat(t1.b,t2.c) + from t1 join t2 on t1.a=t2.a group by t1.a; + +set group_concat_max_len=5; + +eval $q; +eval explain $q; +eval prepare stmt from "$q"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +set join_cache_level=0; + +eval $q; +eval explain $q; +eval prepare stmt from "$q"; +execute stmt; +execute stmt; +deallocate prepare stmt; + +set join_cache_level=default; +set group_concat_max_len=default; + +drop table t1,t2; + +--echo # +--echo # MDEV-33772 Bad SEPARATOR value in GROUP_CONCAT on character set conversion +--echo # + +SET NAMES utf8, @@collation_connection=latin1_swedish_ci; +CREATE TABLE t1 (c VARCHAR(10)) CHARACTER SET latin1; +INSERT INTO t1 VALUES ('a'),('A'); +CREATE OR REPLACE VIEW v1 AS + SELECT GROUP_CONCAT(c SEPARATOR 'ß') AS c1 FROM t1 GROUP BY c; +SELECT * FROM v1; +SELECT HEX(c1) FROM v1; +SHOW CREATE VIEW v1; +DROP VIEW v1; +DROP TABLE t1; +SET NAMES latin1; + +--echo # End of 10.5 tests diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index a209c243ef9..451fada51c3 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -272,7 +272,7 @@ create table t1 as select json_object('id', 87, 'name', 'carrot') as f; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `f` varchar(32) DEFAULT NULL + `f` varchar(46) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci select * from t1; f diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index 17e20aa89fa..f1f4d141179 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -87,15 +87,12 @@ select json_extract('[10, 20, [30, 40], 1, 10]', '$[1]') as exp; select json_extract('[10, 20, [30, 40], 1, 10]', '$[1]', '$[25]') as exp; select json_extract( '[{"a": [3, 4]}, {"b": 2}]', '$[0].a', '$[1].a') as exp; -#enable after MDEV-32454 fix ---disable_view_protocol select json_insert('{"a":1, "b":{"c":1}, "d":[1, 2]}', '$.b.k1', 'word') as exp; select json_insert('{"a":1, "b":{"c":1}, "d":[1, 2]}', '$.d[3]', 3) as exp; select json_insert('{"a":1, "b":{"c":1}, "d":[1, 2]}', '$.a[2]', 2) as exp; select json_insert('{"a":1, "b":{"c":1}, "d":[1, 2]}', '$.b.c', 'word') as exp; select json_set('{ "a": 1, "b": [2, 3]}', '$.a', 10, '$.c', '[true, false]') as exp; ---enable_view_protocol select json_replace('{ "a": 1, "b": [2, 3]}', '$.a', 10, '$.c', '[true, false]') as exp; select json_replace('{ "a": 1, "b": [2, 3]}', '$.a', 10, '$.b', '[true, false]') as exp; @@ -137,14 +134,11 @@ select json_merge('a','b'); select json_merge('{"a":"b"}','{"c":"d"}'); SELECT JSON_MERGE('[1, 2]', '{"id": 47}'); -#enable after MDEV-32454 fix ---disable_view_protocol select json_type('{"k1":123, "k2":345}'); select json_type('[123, "k2", 345]'); select json_type("true"); select json_type('123'); select json_type('123.12'); ---enable_view_protocol select json_keys('{"a":{"c":1, "d":2}, "b":2}'); select json_keys('{"a":{"c":1, "d":2}, "b":2}', "$.a"); @@ -173,11 +167,8 @@ select json_search( json_col, 'all', 'foot' ) as ex from t1; drop table t1; -#enable after MDEV-32454 fix ---disable_view_protocol select json_unquote('"abc"'); select json_unquote('abc'); ---enable_view_protocol # # MDEV-13703 Illegal mix of collations for operation 'json_object' on using JSON_UNQUOTE as an argument. # @@ -188,13 +179,9 @@ select json_object('foo', json_unquote(json_object('bar', c)),'qux', c) as fld f drop table t1; -#enable after MDEV-32454 fix ---disable_view_protocol select json_object("a", json_object("b", "abcd")); select json_object("a", '{"b": "abcd"}'); select json_object("a", json_compact('{"b": "abcd"}')); ---enable_view_protocol - select json_compact(NULL); select json_depth(json_compact(NULL)); @@ -270,11 +257,8 @@ select json_merge('{"a":{"u":12, "x":"b"}}', '{"a":{"x":"c"}}') as ex ; select json_merge('{"a":{"u":12, "x":"b", "r":1}}', '{"a":{"x":"c", "r":2}}') as ex ; select json_compact('{"a":1, "b":[1,2,3], "c":{"aa":"v1", "bb": "v2"}}') as ex; -#enable after MDEV-32454 fix ---disable_view_protocol select json_loose('{"a":1, "b":[1,2,3], "c":{"aa":"v1", "bb": "v2"}}') as ex; select json_detailed('{"a":1, "b":[1,2,3], "c":{"aa":"v1", "bb": "v2"}}') as ex; ---enable_view_protocol # # MDEV-11856 json_search doesn't search for values with double quotes character (") @@ -469,12 +453,9 @@ drop table t1; --echo # MDEV-16750 JSON_SET mishandles unicode every second pair of arguments. --echo # -#enable after MDEV-32454 fix ---disable_view_protocol SELECT JSON_SET('{}', '$.a', _utf8 0xC3B6) as exp; SELECT JSON_SET('{}', '$.a', _utf8 0xC3B6, '$.b', _utf8 0xC3B6) as exp; SELECT JSON_SET('{}', '$.a', _utf8 X'C3B6', '$.x', 1, '$.b', _utf8 X'C3B6') as exp; ---enable_view_protocol --echo # --echo # MDEV-17121 JSON_ARRAY_APPEND diff --git a/mysql-test/main/func_regexp.result b/mysql-test/main/func_regexp.result index b883c8188df..e0a4702c095 100644 --- a/mysql-test/main/func_regexp.result +++ b/mysql-test/main/func_regexp.result @@ -110,7 +110,7 @@ R2 R3 deallocate prepare stmt1; drop table t1; -End of 4.1 tests +# End of 4.1 tests SELECT 1 REGEXP NULL; 1 REGEXP NULL NULL @@ -126,7 +126,7 @@ NULL SELECT "ABC" REGEXP BINARY NULL; "ABC" REGEXP BINARY NULL NULL -End of 5.0 tests +# End of 5.0 tests CREATE TABLE t1(a INT, b CHAR(4)); INSERT INTO t1 VALUES (1, '6.1'), (1, '7.0'), (1, '8.0'); PREPARE stmt1 FROM "SELECT a FROM t1 WHERE a=1 AND '7.0' REGEXP b LIMIT 1"; @@ -144,7 +144,7 @@ a 1 DEALLOCATE PREPARE stmt1; DROP TABLE t1; -End of 5.1 tests +# End of 5.1 tests SELECT ' ' REGEXP '[[:blank:]]'; ' ' REGEXP '[[:blank:]]' 1 @@ -163,3 +163,19 @@ SELECT '\t' REGEXP '[[:space:]]'; SELECT REGEXP_INSTR('111222333',2); REGEXP_INSTR('111222333',2) 4 +# End of 10.3 tests +# +# MDEV-33344 REGEXP empty string inconsistent +# +create table t1 (x char(5)); +insert t1 values (''), ('x'); +select 'foo' regexp x from t1 order by x asc; +'foo' regexp x +1 +0 +select 'foo' regexp x from t1 order by x desc; +'foo' regexp x +0 +1 +drop table t1; +# End of 10.5 tests diff --git a/mysql-test/main/func_regexp.test b/mysql-test/main/func_regexp.test index 6d5186269a5..b9e2ef197d6 100644 --- a/mysql-test/main/func_regexp.test +++ b/mysql-test/main/func_regexp.test @@ -55,7 +55,7 @@ execute stmt1 using @a; deallocate prepare stmt1; drop table t1; ---echo End of 4.1 tests +--echo # End of 4.1 tests # @@ -74,7 +74,7 @@ SELECT NULL REGEXP BINARY NULL; SELECT 'A' REGEXP BINARY NULL; SELECT "ABC" REGEXP BINARY NULL; ---echo End of 5.0 tests +--echo # End of 5.0 tests # @@ -91,7 +91,7 @@ DEALLOCATE PREPARE stmt1; DROP TABLE t1; ---echo End of 5.1 tests +--echo # End of 5.1 tests # # MDEV-5820 MySQL Bug #54805 definitions in regex/my_regex.h conflict with /usr/include/regex.h @@ -110,3 +110,15 @@ SELECT '\t' REGEXP '[[:space:]]'; --echo # SELECT REGEXP_INSTR('111222333',2); +--echo # End of 10.3 tests + +--echo # +--echo # MDEV-33344 REGEXP empty string inconsistent +--echo # +create table t1 (x char(5)); +insert t1 values (''), ('x'); +select 'foo' regexp x from t1 order by x asc; +select 'foo' regexp x from t1 order by x desc; +drop table t1; + +--echo # End of 10.5 tests diff --git a/mysql-test/main/func_str.result b/mysql-test/main/func_str.result index a54089e46f6..32b56fea165 100644 --- a/mysql-test/main/func_str.result +++ b/mysql-test/main/func_str.result @@ -7,7 +7,7 @@ select 'hello',"'hello'",'""hello""','''h''e''l''l''o''',"hel""lo",'hel\'lo'; hello 'hello' ""hello"" 'h'e'l'l'o' hel"lo hel'lo hello 'hello' ""hello"" 'h'e'l'l'o' hel"lo hel'lo select 'hello' 'monty'; -hello +hellomonty hellomonty select length('\n\t\r\b\0\_\%\\'); length('\n\t\r\b\0\_\%\\') diff --git a/mysql-test/main/func_time.result b/mysql-test/main/func_time.result index ff9b91ad1d5..4fdcfe4dfea 100644 --- a/mysql-test/main/func_time.result +++ b/mysql-test/main/func_time.result @@ -3142,7 +3142,7 @@ Catalog Database Table Table_alias Column Column_alias Type Length Max length Is def test t1 t1 a a 12 26 26 Y 128 6 63 def EXTRACT(YEAR FROM a) 3 4 4 Y 32896 0 63 def EXTRACT(YEAR_MONTH FROM a) 3 6 6 Y 32896 0 63 -def EXTRACT(QUARTER FROM a) 3 2 1 Y 32896 0 63 +def EXTRACT(QUARTER FROM a) 3 1 1 Y 32896 0 63 def EXTRACT(MONTH FROM a) 3 2 2 Y 32896 0 63 def EXTRACT(WEEK FROM a) 3 2 2 Y 32896 0 63 def EXTRACT(DAY FROM a) 3 3 2 Y 32896 0 63 @@ -3230,11 +3230,11 @@ SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( `a` datetime(6) DEFAULT NULL, - `EXTRACT(YEAR FROM a)` int(4) DEFAULT NULL, - `EXTRACT(YEAR_MONTH FROM a)` int(6) DEFAULT NULL, + `EXTRACT(YEAR FROM a)` int(5) DEFAULT NULL, + `EXTRACT(YEAR_MONTH FROM a)` int(7) DEFAULT NULL, `EXTRACT(QUARTER FROM a)` int(2) DEFAULT NULL, - `EXTRACT(MONTH FROM a)` int(2) DEFAULT NULL, - `EXTRACT(WEEK FROM a)` int(2) DEFAULT NULL, + `EXTRACT(MONTH FROM a)` int(3) DEFAULT NULL, + `EXTRACT(WEEK FROM a)` int(3) DEFAULT NULL, `EXTRACT(DAY FROM a)` int(3) DEFAULT NULL, `EXTRACT(DAY_HOUR FROM a)` int(5) DEFAULT NULL, `EXTRACT(DAY_MINUTE FROM a)` int(7) DEFAULT NULL, @@ -3281,7 +3281,7 @@ Catalog Database Table Table_alias Column Column_alias Type Length Max length Is def test t1 t1 a a 11 17 17 Y 128 6 63 def EXTRACT(YEAR FROM a) 3 4 1 Y 32896 0 63 def EXTRACT(YEAR_MONTH FROM a) 3 6 1 Y 32896 0 63 -def EXTRACT(QUARTER FROM a) 3 2 1 Y 32896 0 63 +def EXTRACT(QUARTER FROM a) 3 1 1 Y 32896 0 63 def EXTRACT(MONTH FROM a) 3 2 1 Y 32896 0 63 def EXTRACT(WEEK FROM a) 3 2 9 Y 32896 0 63 def EXTRACT(DAY FROM a) 3 3 3 Y 32896 0 63 @@ -3411,11 +3411,11 @@ SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( `a` time(6) DEFAULT NULL, - `EXTRACT(YEAR FROM a)` int(4) DEFAULT NULL, - `EXTRACT(YEAR_MONTH FROM a)` int(6) DEFAULT NULL, + `EXTRACT(YEAR FROM a)` int(5) DEFAULT NULL, + `EXTRACT(YEAR_MONTH FROM a)` int(7) DEFAULT NULL, `EXTRACT(QUARTER FROM a)` int(2) DEFAULT NULL, - `EXTRACT(MONTH FROM a)` int(2) DEFAULT NULL, - `EXTRACT(WEEK FROM a)` int(2) DEFAULT NULL, + `EXTRACT(MONTH FROM a)` int(3) DEFAULT NULL, + `EXTRACT(WEEK FROM a)` int(3) DEFAULT NULL, `EXTRACT(DAY FROM a)` int(3) DEFAULT NULL, `EXTRACT(DAY_HOUR FROM a)` int(5) DEFAULT NULL, `EXTRACT(DAY_MINUTE FROM a)` int(7) DEFAULT NULL, diff --git a/mysql-test/main/join.result b/mysql-test/main/join.result index a97bfbdc944..9a3d5ccc5e8 100644 --- a/mysql-test/main/join.result +++ b/mysql-test/main/join.result @@ -894,7 +894,7 @@ show status like 'Last_query_cost'; Variable_name Value Last_query_cost 0.011600 select 'The cost of accessing t1 (dont care if it changes' '^'; -The cost of accessing t1 (dont care if it changes +The cost of accessing t1 (dont care if it changes^ The cost of accessing t1 (dont care if it changes^ select 'vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv' Z; Z @@ -3435,6 +3435,62 @@ SELECT COUNT(*) FROM t1 LEFT JOIN (t2 LEFT JOIN t3 ON t2.b = t3.c) ON t1.a = t2. COUNT(*) 2 DROP TABLE t1, t2, t3; +# +# MDEV-30975: Wrong result with cross Join given join order +# +CREATE TABLE `t1` ( +`t1_seq` INT NOT NULL, +`c1` VARCHAR(10) NOT NULL , +PRIMARY KEY (`t1_seq`) USING BTREE +); +CREATE TABLE `t2` ( +`t2_seq` INT NOT NULL, +`t1_seq` INT NOT NULL, +`c2` VARCHAR(10) NOT NULL , +PRIMARY KEY (`t2_seq`, `t1_seq`) USING BTREE +); +INSERT INTO t1 VALUES(1, 'A'); +INSERT INTO t2 VALUES(1, 1, 'T2-1-1'); +INSERT INTO t2 VALUES(2, 1, 'T2-1-2'); +INSERT INTO t2 VALUES(3, 1, 'T2-1-3'); +SELECT LPAD(@rownum := @rownum + 1, 8, 0) AS str_num +, t1.t1_seq +, t2.t2_seq +, t1.c1 +, t2.c2 +FROM t1 +INNER JOIN t2 ON (t1.t1_seq = t2.t1_seq) +CROSS JOIN ( SELECT @rownum := 0 ) X; +str_num t1_seq t2_seq c1 c2 +00000001 1 1 A T2-1-1 +00000002 1 2 A T2-1-2 +00000003 1 3 A T2-1-3 +SELECT STRAIGHT_JOIN LPAD(@rownum := @rownum + 1, 8, 0) AS str_num +, t1.t1_seq +, t2.t2_seq +, t1.c1 +, t2.c2 +FROM t1 +INNER JOIN t2 ON (t1.t1_seq = t2.t1_seq) +CROSS JOIN ( SELECT @rownum := 0 ) X; +str_num t1_seq t2_seq c1 c2 +00000001 1 1 A T2-1-1 +00000002 1 2 A T2-1-2 +00000003 1 3 A T2-1-3 +SELECT STRAIGHT_JOIN * FROM t1 JOIN t2 ON (t1.t1_seq = t2.t1_seq) JOIN (SELECT @a := 0) x; +t1_seq c1 t2_seq t1_seq c2 @a := 0 +1 A 1 1 T2-1-1 0 +1 A 2 1 T2-1-2 0 +1 A 3 1 T2-1-3 0 +SELECT * FROM t1 JOIN t2 ON (t1.t1_seq = t2.t1_seq) JOIN (SELECT @a := 0) x; +t1_seq c1 t2_seq t1_seq c2 @a := 0 +1 A 1 1 T2-1-1 0 +1 A 2 1 T2-1-2 0 +1 A 3 1 T2-1-3 0 +SELECT STRAIGHT_JOIN c1 FROM t1 JOIN (SELECT @a := 0) x; +c1 +A +DROP TABLE t1, t2; # End of 10.5 tests # # MDEV-31449: Assertion s->table->opt_range_condition_rows <= s->found_records diff --git a/mysql-test/main/join.test b/mysql-test/main/join.test index 4168325046f..7a8c6a09732 100644 --- a/mysql-test/main/join.test +++ b/mysql-test/main/join.test @@ -1842,6 +1842,52 @@ SELECT * FROM t1 LEFT JOIN (t2 LEFT JOIN t3 ON t2.b = t3.c) ON t1.a = t2.b; SELECT COUNT(*) FROM t1 LEFT JOIN (t2 LEFT JOIN t3 ON t2.b = t3.c) ON t1.a = t2.b; DROP TABLE t1, t2, t3; +--echo # +--echo # MDEV-30975: Wrong result with cross Join given join order +--echo # + +CREATE TABLE `t1` ( + `t1_seq` INT NOT NULL, + `c1` VARCHAR(10) NOT NULL , + PRIMARY KEY (`t1_seq`) USING BTREE +); + +CREATE TABLE `t2` ( + `t2_seq` INT NOT NULL, + `t1_seq` INT NOT NULL, + `c2` VARCHAR(10) NOT NULL , + PRIMARY KEY (`t2_seq`, `t1_seq`) USING BTREE +); + +INSERT INTO t1 VALUES(1, 'A'); +INSERT INTO t2 VALUES(1, 1, 'T2-1-1'); +INSERT INTO t2 VALUES(2, 1, 'T2-1-2'); +INSERT INTO t2 VALUES(3, 1, 'T2-1-3'); + +SELECT LPAD(@rownum := @rownum + 1, 8, 0) AS str_num + , t1.t1_seq + , t2.t2_seq + , t1.c1 + , t2.c2 + FROM t1 + INNER JOIN t2 ON (t1.t1_seq = t2.t1_seq) + CROSS JOIN ( SELECT @rownum := 0 ) X; + +SELECT STRAIGHT_JOIN LPAD(@rownum := @rownum + 1, 8, 0) AS str_num + , t1.t1_seq + , t2.t2_seq + , t1.c1 + , t2.c2 + FROM t1 + INNER JOIN t2 ON (t1.t1_seq = t2.t1_seq) + CROSS JOIN ( SELECT @rownum := 0 ) X; + +SELECT STRAIGHT_JOIN * FROM t1 JOIN t2 ON (t1.t1_seq = t2.t1_seq) JOIN (SELECT @a := 0) x; +SELECT * FROM t1 JOIN t2 ON (t1.t1_seq = t2.t1_seq) JOIN (SELECT @a := 0) x; +SELECT STRAIGHT_JOIN c1 FROM t1 JOIN (SELECT @a := 0) x; + +DROP TABLE t1, t2; + --echo # End of 10.5 tests --echo # diff --git a/mysql-test/main/join_outer.test b/mysql-test/main/join_outer.test index 72fffd4fadf..a9292abd5bb 100644 --- a/mysql-test/main/join_outer.test +++ b/mysql-test/main/join_outer.test @@ -685,15 +685,9 @@ create table t1 (a int, b varchar(20)); create table t2 (a int, c varchar(20)); insert into t1 values (1,"aaaaaaaaaa"),(2,"bbbbbbbbbb"); insert into t2 values (1,"cccccccccc"),(2,"dddddddddd"); -#Enable after fix MDEV-31276 ---disable_ps2_protocol select group_concat(t1.b,t2.c) from t1 left join t2 using(a) group by t1.a; ---enable_ps2_protocol select group_concat(t1.b,t2.c) from t1 inner join t2 using(a) group by t1.a; -#Enable after fix MDEV-31276 ---disable_ps2_protocol select group_concat(t1.b,t2.c) from t1 left join t2 using(a) group by a; ---enable_ps2_protocol select group_concat(t1.b,t2.c) from t1 inner join t2 using(a) group by a; drop table t1, t2; set group_concat_max_len=default; @@ -2372,11 +2366,8 @@ create view v1 as select * from t1 left join ( select 'Y' AS Voted, ElectionID from t2 ) AS T on T.ElectionID = t1.Election limit 9; -#enable after fix MDEV-31277 ---disable_ps2_protocol # limit X causes merge algorithm select as opposed to temp table select * from v1; ---enable_ps2_protocol drop table t1, t2; drop view v1; @@ -2391,10 +2382,7 @@ create view v10 as select *, 'U' as u from t10 left join (select 'Y' as y, t20.b create table t30 (c int); insert into t30 values (1),(3); create view v20 as select * from t30 left join (select 'X' as x, v10.u, v10.y, v10.b from v10) dt2 on t30.c=dt2.b limit 6; -#check after fix MDEV-31277 ---disable_ps2_protocol select * from v20 limit 9; ---enable_ps2_protocol drop view v10, v20; drop table t10, t20, t30; @@ -2408,8 +2396,6 @@ insert into t3 values (3),(1); create table t1 (a int); insert into t1 values (1),(2),(7),(1); -#check after fix MDEV-31277 ---disable_ps2_protocol select * from ( select * from @@ -2422,7 +2408,6 @@ select * from on dt1.a=dt2.b limit 9 ) dt; ---enable_ps2_protocol ## Same as dt3 above create view v3(x,c) as select * from (select 'X' as x, t3.c from t3) dt3; @@ -2436,10 +2421,7 @@ create view v0(y,b,x,c) as select * from v2 left join v3 on v2.b=v3.c; # Same as above select statement create view v1 as select 'Z' as z, t1.a, v0.* from t1 left join v0 on t1.a=v0.b limit 9; -#check after fix MDEV-31277 ---disable_ps2_protocol select * from v1; ---enable_ps2_protocol set statement join_cache_level=0 for select * from v1; diff --git a/mysql-test/main/kill_processlist-6619.result b/mysql-test/main/kill_processlist-6619.result index 7dd42790cc7..25831a1f63b 100644 --- a/mysql-test/main/kill_processlist-6619.result +++ b/mysql-test/main/kill_processlist-6619.result @@ -1,4 +1,8 @@ +SET DEBUG_SYNC='dispatch_command_end SIGNAL ready WAIT_FOR go'; +select 1; connect con1,localhost,root,,; +SET DEBUG_SYNC='now wait_for ready'; +SET DEBUG_SYNC='now signal go'; SHOW PROCESSLIST; Id User Host db Command Time State Info Progress # root # test Sleep # # NULL 0.000 @@ -6,6 +10,8 @@ Id User Host db Command Time State Info Progress SET DEBUG_SYNC='before_execute_sql_command SIGNAL ready WAIT_FOR go'; SHOW PROCESSLIST; connection default; +1 +1 SET DEBUG_SYNC='now WAIT_FOR ready'; KILL QUERY con_id; SET DEBUG_SYNC='now SIGNAL go'; diff --git a/mysql-test/main/kill_processlist-6619.test b/mysql-test/main/kill_processlist-6619.test index c272e68a877..7330c79acd8 100644 --- a/mysql-test/main/kill_processlist-6619.test +++ b/mysql-test/main/kill_processlist-6619.test @@ -4,7 +4,14 @@ --source include/not_embedded.inc --source include/have_debug_sync.inc +# This is to ensure that the following SHOW PROCESSLIST does not show the query +SET DEBUG_SYNC='dispatch_command_end SIGNAL ready WAIT_FOR go'; +--send select 1 + --connect (con1,localhost,root,,) +SET DEBUG_SYNC='now wait_for ready'; +SET DEBUG_SYNC='now signal go'; + --let $con_id = `SELECT CONNECTION_ID()` --replace_result Execute Query --replace_column 1 # 3 # 6 # 7 # @@ -12,6 +19,8 @@ SHOW PROCESSLIST; SET DEBUG_SYNC='before_execute_sql_command SIGNAL ready WAIT_FOR go'; send SHOW PROCESSLIST; --connection default +--reap + # We must wait for the SHOW PROCESSLIST query to have started before sending # the kill. Otherwise, the KILL may be lost since it is reset at the start of # query execution. diff --git a/mysql-test/main/lock_sync.result b/mysql-test/main/lock_sync.result index af3fbe8a784..321f5dff06d 100644 --- a/mysql-test/main/lock_sync.result +++ b/mysql-test/main/lock_sync.result @@ -884,6 +884,6 @@ set debug_sync= 'now wait_for s1'; SELECT * FROM ( SELECT * FROM v1 ) sq; COMMIT; DROP VIEW v1; -DROP FUNCTION f; +DROP FUNCTION IF EXISTS f; DROP TABLE t1, t2; set debug_sync= 'reset'; diff --git a/mysql-test/main/lock_sync.test b/mysql-test/main/lock_sync.test index 0d15f8459b6..51247a51067 100644 --- a/mysql-test/main/lock_sync.test +++ b/mysql-test/main/lock_sync.test @@ -1219,6 +1219,6 @@ COMMIT; --disconnect con2 --connection default DROP VIEW v1; -DROP FUNCTION f; +DROP FUNCTION IF EXISTS f; DROP TABLE t1, t2; set debug_sync= 'reset'; diff --git a/mysql-test/main/mysql-interactive.test b/mysql-test/main/mysql-interactive.test index 2015e9d667d..0051d8e58c6 100644 --- a/mysql-test/main/mysql-interactive.test +++ b/mysql-test/main/mysql-interactive.test @@ -3,6 +3,8 @@ --echo # source include/not_embedded.inc; source include/not_windows.inc; +# this would need an instrumented ncurses library +source include/not_msan.inc; error 0,1; exec $MYSQL -V|grep -q readline; diff --git a/mysql-test/main/mysql_client_test.result b/mysql-test/main/mysql_client_test.result index e32053e50f3..59e82d7afc3 100644 --- a/mysql-test/main/mysql_client_test.result +++ b/mysql-test/main/mysql_client_test.result @@ -261,3 +261,4 @@ SET @@global.character_set_server= @save_character_set_server; SET @@global.collation_server= @save_collation_server; SET @@global.character_set_client= @save_character_set_client; SET @@global.collation_connection= @save_collation_connection; +FOUND 1 /Aborted connection.*'u' host: '192.0.2.1' real ip: '(localhost|::1)'/ in mysqld.1.err diff --git a/mysql-test/main/mysql_client_test.test b/mysql-test/main/mysql_client_test.test index bcb246a8a06..0de459bd316 100644 --- a/mysql-test/main/mysql_client_test.test +++ b/mysql-test/main/mysql_client_test.test @@ -57,3 +57,9 @@ SET @@global.character_set_server= @save_character_set_server; SET @@global.collation_server= @save_collation_server; SET @@global.character_set_client= @save_character_set_client; SET @@global.collation_connection= @save_collation_connection; + +# Search for "real ip" in Aborted message +# This is indicator for abort of the proxied connections. +let SEARCH_FILE=$MYSQLTEST_VARDIR/log/mysqld.1.err; +let SEARCH_PATTERN= Aborted connection.*'u' host: '192.0.2.1' real ip: '(localhost|::1)'; +source include/search_pattern_in_file.inc; diff --git a/mysql-test/main/mysqld--help.result b/mysql-test/main/mysqld--help.result index 74e80cba71b..90f75e75f55 100644 --- a/mysql-test/main/mysqld--help.result +++ b/mysql-test/main/mysqld--help.result @@ -1926,7 +1926,7 @@ slave-run-triggers-for-rbr NO slave-skip-errors OFF slave-sql-verify-checksum TRUE slave-transaction-retries 10 -slave-transaction-retry-errors 1158,1159,1160,1161,1205,1213,1429,2013,12701 +slave-transaction-retry-errors 1158,1159,1160,1161,1205,1213,1020,1429,2013,12701 slave-transaction-retry-interval 0 slave-type-conversions slow-launch-time 2 diff --git a/mysql-test/main/mysqld_option_err.result b/mysql-test/main/mysqld_option_err.result index 4afcc5e0cb1..e2c7b0bd213 100644 --- a/mysql-test/main/mysqld_option_err.result +++ b/mysql-test/main/mysqld_option_err.result @@ -3,6 +3,14 @@ Test bad binlog format. Test bad default storage engine. Test non-numeric value passed to number option. Test that bad value for plugin enum option is rejected correctly. +Test to see if multiple unknown options will be displayed in the error output +unknown option '--nonexistentoption' +unknown option '--alsononexistent' +unknown variable 'nonexistentvariable=1' +Test to see if multiple ambiguous options and invalid arguments will be displayed in the error output +Error while setting value 'invalid_value' to 'sql_mode' +ambiguous option '--character' (character-set-client-handshake, character_sets_dir) +option '--bootstrap' cannot take an argument Test that --help --verbose works Test that --not-known-option --help --verbose gives error Done. diff --git a/mysql-test/main/mysqld_option_err.test b/mysql-test/main/mysqld_option_err.test index e9655fd4bfe..ad4df61b0f8 100644 --- a/mysql-test/main/mysqld_option_err.test +++ b/mysql-test/main/mysqld_option_err.test @@ -46,6 +46,18 @@ mkdir $MYSQLTEST_VARDIR/tmp/mysqld_option_err; --error 7 --exec $MYSQLD_BOOTSTRAP_CMD --skip-networking --datadir=$MYSQLTEST_VARDIR/tmp/mysqld_option_err --skip-grant-tables --plugin-dir=$MYSQLTEST_VARDIR/plugins --plugin-load=example=ha_example.so --plugin-example-enum-var=noexist >>$MYSQLTEST_VARDIR/tmp/mysqld_option_err/mysqltest.log 2>&1 +--echo Test to see if multiple unknown options will be displayed in the error output +# Remove the noise to make the test robust +--replace_regex /^((?!nonexistent).)*$// /.*unknown/unknown/ +--error 7 +--exec $MYSQLD_BOOTSTRAP_CMD --skip-networking --datadir=$MYSQLTEST_VARDIR/tmp/mysqld_option_err --skip-grant-tables --nonexistentoption --alsononexistent --nonexistentvariable=1 2>&1 + +--echo Test to see if multiple ambiguous options and invalid arguments will be displayed in the error output +# Remove the noise to make the test robust +--replace_regex /^((?!('sql_mode'|'--character'|'--bootstrap')).)*$// /.*Error while setting value/Error while setting value/ /.*ambiguous option/ambiguous option/ /.*option '--bootstrap'/option '--bootstrap'/ +--error 1 +--exec $MYSQLD_BOOTSTRAP_CMD --skip-networking --datadir=$MYSQLTEST_VARDIR/tmp/mysqld_option_err --skip-grant-tables --getopt-prefix-matching --sql-mode=invalid_value --character --bootstrap=partstoob 2>&1 + # # Test that an wrong option with --help --verbose gives an error # diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result index e24eb1e838b..f1fa39faae5 100644 --- a/mysql-test/main/ps.result +++ b/mysql-test/main/ps.result @@ -5926,5 +5926,55 @@ a b 2 30 DROP TABLE t1, t2; # +# MDEV-33549: Incorrect handling of UPDATE in PS mode in case a table's colum declared as NOT NULL +# +CREATE TABLE t1 (a INT, b INT DEFAULT NULL); +INSERT INTO t1 VALUES (20, 30); +EXECUTE IMMEDIATE 'UPDATE t1 SET b=?' USING DEFAULT; +SELECT * FROM t1; +a b +20 NULL +# Run twice the same update in PS mode to check +# that no memory relating issues taken place. +PREPARE stmt FROM 'UPDATE t1 SET b=?'; +EXECUTE stmt USING DEFAULT; +EXECUTE stmt USING DEFAULT; +# Clean up +DEALLOCATE PREPARE stmt; +DROP TABLE t1; +# The same test for multi-table update +CREATE TABLE t1 (a INT, b INT DEFAULT NULL); +CREATE TABLE t2 (a INT, c INT DEFAULT NULL); +INSERT INTO t1 VALUES (20, 30); +INSERT INTO t2 VALUES (20, 30); +EXECUTE IMMEDIATE 'UPDATE t1,t2 SET b=? WHERE t1.a=t2.a' USING DEFAULT; +SELECT * FROM t1; +a b +20 NULL +# Run twice the same multi-table update in PS mode to check +# that no memory relating issues taken place. +PREPARE stmt FROM 'UPDATE t1,t2 SET b=? WHERE t1.a=t2.a'; +EXECUTE stmt USING DEFAULT; +EXECUTE stmt USING DEFAULT; +DEALLOCATE PREPARE stmt; +# Clean up +DROP TABLE t1; +# This time checks that a default value for table's column +# represented by a function call is handled correctly on UPDATE in PS mode +CREATE TABLE t1 (a INT, b INT DEFAULT MOD(a, 3)); +INSERT INTO t1 VALUES (20, 30); +EXECUTE IMMEDIATE 'UPDATE t1, t2 SET b=? WHERE t1.a=t2.a' USING DEFAULT; +SELECT * FROM t1; +a b +20 2 +# Run twice the same multi-table update in PS mode to check +# that no memory relating issues taken place. +PREPARE stmt FROM 'UPDATE t1, t2 SET b=? WHERE t1.a=t2.a'; +EXECUTE stmt USING DEFAULT; +EXECUTE stmt USING DEFAULT; +# Clean up +DEALLOCATE PREPARE stmt; +DROP TABLE t1, t2; +# # End of 10.4 tests # diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test index 30df8dcec32..a360d0efd0f 100644 --- a/mysql-test/main/ps.test +++ b/mysql-test/main/ps.test @@ -5367,6 +5367,60 @@ SELECT * FROM t2; # Cleanup DROP TABLE t1, t2; +--echo # +--echo # MDEV-33549: Incorrect handling of UPDATE in PS mode in case a table's colum declared as NOT NULL +--echo # + +CREATE TABLE t1 (a INT, b INT DEFAULT NULL); +INSERT INTO t1 VALUES (20, 30); +EXECUTE IMMEDIATE 'UPDATE t1 SET b=?' USING DEFAULT; +SELECT * FROM t1; + +--echo # Run twice the same update in PS mode to check +--echo # that no memory relating issues taken place. +PREPARE stmt FROM 'UPDATE t1 SET b=?'; +EXECUTE stmt USING DEFAULT; +EXECUTE stmt USING DEFAULT; + +--echo # Clean up +DEALLOCATE PREPARE stmt; +DROP TABLE t1; + +--echo # The same test for multi-table update +CREATE TABLE t1 (a INT, b INT DEFAULT NULL); +CREATE TABLE t2 (a INT, c INT DEFAULT NULL); + +INSERT INTO t1 VALUES (20, 30); +INSERT INTO t2 VALUES (20, 30); + +EXECUTE IMMEDIATE 'UPDATE t1,t2 SET b=? WHERE t1.a=t2.a' USING DEFAULT; +SELECT * FROM t1; +--echo # Run twice the same multi-table update in PS mode to check +--echo # that no memory relating issues taken place. +PREPARE stmt FROM 'UPDATE t1,t2 SET b=? WHERE t1.a=t2.a'; +EXECUTE stmt USING DEFAULT; +EXECUTE stmt USING DEFAULT; +DEALLOCATE PREPARE stmt; +--echo # Clean up +DROP TABLE t1; + +--echo # This time checks that a default value for table's column +--echo # represented by a function call is handled correctly on UPDATE in PS mode +CREATE TABLE t1 (a INT, b INT DEFAULT MOD(a, 3)); +INSERT INTO t1 VALUES (20, 30); +EXECUTE IMMEDIATE 'UPDATE t1, t2 SET b=? WHERE t1.a=t2.a' USING DEFAULT; +SELECT * FROM t1; + +--echo # Run twice the same multi-table update in PS mode to check +--echo # that no memory relating issues taken place. +PREPARE stmt FROM 'UPDATE t1, t2 SET b=? WHERE t1.a=t2.a'; +EXECUTE stmt USING DEFAULT; +EXECUTE stmt USING DEFAULT; + +--echo # Clean up +DEALLOCATE PREPARE stmt; +DROP TABLE t1, t2; + --echo # --echo # End of 10.4 tests --echo # diff --git a/mysql-test/main/ps_2myisam.result b/mysql-test/main/ps_2myisam.result index 256665ce4cb..ec365498833 100644 --- a/mysql-test/main/ps_2myisam.result +++ b/mysql-test/main/ps_2myisam.result @@ -1798,7 +1798,7 @@ t5 CREATE TABLE `t5` ( `param09` longtext DEFAULT NULL, `const10` bigint(17) DEFAULT NULL, `param10` bigint(20) DEFAULT NULL, - `const11` int(4) DEFAULT NULL, + `const11` int(5) DEFAULT NULL, `param11` bigint(20) DEFAULT NULL, `const12` binary(0) DEFAULT NULL, `param12` bigint(20) DEFAULT NULL, @@ -1828,7 +1828,7 @@ def test t5 t5 const09 const09 12 19 19 Y 128 0 63 def test t5 t5 param09 param09 252 4294967295 19 Y 16 0 8 def test t5 t5 const10 const10 8 17 9 Y 32768 0 63 def test t5 t5 param10 param10 8 20 9 Y 32768 0 63 -def test t5 t5 const11 const11 3 4 4 Y 32768 0 63 +def test t5 t5 const11 const11 3 5 4 Y 32768 0 63 def test t5 t5 param11 param11 8 20 4 Y 32768 0 63 def test t5 t5 const12 const12 254 0 0 Y 128 0 63 def test t5 t5 param12 param12 8 20 0 Y 32768 0 63 diff --git a/mysql-test/main/ps_3innodb.result b/mysql-test/main/ps_3innodb.result index 675587e020a..1c98a59cff5 100644 --- a/mysql-test/main/ps_3innodb.result +++ b/mysql-test/main/ps_3innodb.result @@ -1781,7 +1781,7 @@ t5 CREATE TABLE `t5` ( `param09` longtext DEFAULT NULL, `const10` bigint(17) DEFAULT NULL, `param10` bigint(20) DEFAULT NULL, - `const11` int(4) DEFAULT NULL, + `const11` int(5) DEFAULT NULL, `param11` bigint(20) DEFAULT NULL, `const12` binary(0) DEFAULT NULL, `param12` bigint(20) DEFAULT NULL, @@ -1811,7 +1811,7 @@ def test t5 t5 const09 const09 12 19 19 Y 128 0 63 def test t5 t5 param09 param09 252 4294967295 19 Y 16 0 8 def test t5 t5 const10 const10 8 17 9 Y 32768 0 63 def test t5 t5 param10 param10 8 20 9 Y 32768 0 63 -def test t5 t5 const11 const11 3 4 4 Y 32768 0 63 +def test t5 t5 const11 const11 3 5 4 Y 32768 0 63 def test t5 t5 param11 param11 8 20 4 Y 32768 0 63 def test t5 t5 const12 const12 254 0 0 Y 128 0 63 def test t5 t5 param12 param12 8 20 0 Y 32768 0 63 diff --git a/mysql-test/main/ps_4heap.result b/mysql-test/main/ps_4heap.result index dcde7613bfe..db182536e93 100644 --- a/mysql-test/main/ps_4heap.result +++ b/mysql-test/main/ps_4heap.result @@ -1782,7 +1782,7 @@ t5 CREATE TABLE `t5` ( `param09` longtext DEFAULT NULL, `const10` bigint(17) DEFAULT NULL, `param10` bigint(20) DEFAULT NULL, - `const11` int(4) DEFAULT NULL, + `const11` int(5) DEFAULT NULL, `param11` bigint(20) DEFAULT NULL, `const12` binary(0) DEFAULT NULL, `param12` bigint(20) DEFAULT NULL, @@ -1812,7 +1812,7 @@ def test t5 t5 const09 const09 12 19 19 Y 128 0 63 def test t5 t5 param09 param09 252 4294967295 19 Y 16 0 8 def test t5 t5 const10 const10 8 17 9 Y 32768 0 63 def test t5 t5 param10 param10 8 20 9 Y 32768 0 63 -def test t5 t5 const11 const11 3 4 4 Y 32768 0 63 +def test t5 t5 const11 const11 3 5 4 Y 32768 0 63 def test t5 t5 param11 param11 8 20 4 Y 32768 0 63 def test t5 t5 const12 const12 254 0 0 Y 128 0 63 def test t5 t5 param12 param12 8 20 0 Y 32768 0 63 diff --git a/mysql-test/main/ps_5merge.result b/mysql-test/main/ps_5merge.result index c9d33dbb1ae..963a3a60359 100644 --- a/mysql-test/main/ps_5merge.result +++ b/mysql-test/main/ps_5merge.result @@ -1719,7 +1719,7 @@ t5 CREATE TABLE `t5` ( `param09` longtext DEFAULT NULL, `const10` bigint(17) DEFAULT NULL, `param10` bigint(20) DEFAULT NULL, - `const11` int(4) DEFAULT NULL, + `const11` int(5) DEFAULT NULL, `param11` bigint(20) DEFAULT NULL, `const12` binary(0) DEFAULT NULL, `param12` bigint(20) DEFAULT NULL, @@ -1749,7 +1749,7 @@ def test t5 t5 const09 const09 12 19 19 Y 128 0 63 def test t5 t5 param09 param09 252 4294967295 19 Y 16 0 8 def test t5 t5 const10 const10 8 17 9 Y 32768 0 63 def test t5 t5 param10 param10 8 20 9 Y 32768 0 63 -def test t5 t5 const11 const11 3 4 4 Y 32768 0 63 +def test t5 t5 const11 const11 3 5 4 Y 32768 0 63 def test t5 t5 param11 param11 8 20 4 Y 32768 0 63 def test t5 t5 const12 const12 254 0 0 Y 128 0 63 def test t5 t5 param12 param12 8 20 0 Y 32768 0 63 @@ -5087,7 +5087,7 @@ t5 CREATE TABLE `t5` ( `param09` longtext DEFAULT NULL, `const10` bigint(17) DEFAULT NULL, `param10` bigint(20) DEFAULT NULL, - `const11` int(4) DEFAULT NULL, + `const11` int(5) DEFAULT NULL, `param11` bigint(20) DEFAULT NULL, `const12` binary(0) DEFAULT NULL, `param12` bigint(20) DEFAULT NULL, @@ -5117,7 +5117,7 @@ def test t5 t5 const09 const09 12 19 19 Y 128 0 63 def test t5 t5 param09 param09 252 4294967295 19 Y 16 0 8 def test t5 t5 const10 const10 8 17 9 Y 32768 0 63 def test t5 t5 param10 param10 8 20 9 Y 32768 0 63 -def test t5 t5 const11 const11 3 4 4 Y 32768 0 63 +def test t5 t5 const11 const11 3 5 4 Y 32768 0 63 def test t5 t5 param11 param11 8 20 4 Y 32768 0 63 def test t5 t5 const12 const12 254 0 0 Y 128 0 63 def test t5 t5 param12 param12 8 20 0 Y 32768 0 63 diff --git a/mysql-test/main/rpl_mysqldump_slave.result b/mysql-test/main/rpl_mysqldump_slave.result index 0d0378abd5b..190e3c26e80 100644 --- a/mysql-test/main/rpl_mysqldump_slave.result +++ b/mysql-test/main/rpl_mysqldump_slave.result @@ -219,4 +219,8 @@ connection master; -- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000002', MASTER_LOG_POS=BINLOG_START; connection slave; include/start_slave.inc +connection master; +connection slave; +connection master; +FOUND 1 matches in MDEV-33212.sql include/rpl_end.inc diff --git a/mysql-test/main/rpl_mysqldump_slave.test b/mysql-test/main/rpl_mysqldump_slave.test index 9dbee604520..75bb85dbe4b 100644 --- a/mysql-test/main/rpl_mysqldump_slave.test +++ b/mysql-test/main/rpl_mysqldump_slave.test @@ -198,4 +198,20 @@ if ($postdump_first_binary_log_filename != $postdump_binlog_filename) connection slave; --source include/start_slave.inc +# MDEV-33212: mysqldump uses MASTER_LOG_POS with dump-slave +# The bug was that the MASTER_LOG_POS was wrong. So check that it is correct. +--connection master +--let $pos= query_get_value(SHOW MASTER STATUS, Position, 1) +--sync_slave_with_master +--connection master +--exec $MYSQL_DUMP_SLAVE --compact --dump-slave test >$MYSQLTEST_VARDIR/tmp/MDEV-33212.sql +--let SEARCH_RANGE=500000000 +--let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/MDEV-33212.sql +--let SEARCH_PATTERN= MASTER_LOG_POS=$pos +--let SEARCH_OUTPUT=count +--source include/search_pattern_in_file.inc + +--remove_file $MYSQLTEST_VARDIR/tmp/MDEV-33212.sql + + --source include/rpl_end.inc diff --git a/mysql-test/main/show_explain_json.result b/mysql-test/main/show_explain_json.result index 4a21528e41d..92741b8aead 100644 --- a/mysql-test/main/show_explain_json.result +++ b/mysql-test/main/show_explain_json.result @@ -563,9 +563,12 @@ SET debug_dbug=@old_debug; # Try to do SHOW EXPLAIN for a query that runs a SET command: # I've found experimentally that select_id==2 here... # -set @show_explain_probe_select_id=2; +create table t2 (a int); +insert into t2 values (1),(2); +set @show_explain_probe_select_id=3; SET debug_dbug='+d,show_explain_probe_join_exec_start'; -set @foo= (select max(a) from t0 where sin(a) >0); +set @foo= (select max(a) from t2 +where a + (select max(a) from t0 where t0.a>t2.a) < 10000); connection default; show explain format=JSON for $thr2; ERROR HY000: Target is not executing an operation with a query plan @@ -573,6 +576,7 @@ kill query $thr2; connection con1; ERROR 70100: Query execution was interrupted SET debug_dbug=@old_debug; +drop table t2; # # Attempt SHOW EXPLAIN for an UPDATE # diff --git a/mysql-test/main/show_explain_json.test b/mysql-test/main/show_explain_json.test index 321fa46be87..9cbfa1fd686 100644 --- a/mysql-test/main/show_explain_json.test +++ b/mysql-test/main/show_explain_json.test @@ -281,9 +281,16 @@ SET debug_dbug=@old_debug; --echo # Try to do SHOW EXPLAIN for a query that runs a SET command: --echo # I've found experimentally that select_id==2 here... --echo # -set @show_explain_probe_select_id=2; + +create table t2 (a int); +insert into t2 values (1),(2); +set @show_explain_probe_select_id=3; # Stop in the subquery. SET debug_dbug='+d,show_explain_probe_join_exec_start'; -send set @foo= (select max(a) from t0 where sin(a) >0); +# t2 has 2 rows so we will stop in the subquery twice: +# - first one to serve the SHOW ANALYZE request +# - second one when waiting to be KILLed. +send set @foo= (select max(a) from t2 + where a + (select max(a) from t0 where t0.a>t2.a) < 10000); connection default; --source include/wait_condition.inc --error ER_TARGET_NOT_EXPLAINABLE @@ -293,6 +300,7 @@ connection con1; --error ER_QUERY_INTERRUPTED reap; SET debug_dbug=@old_debug; +drop table t2; --echo # --echo # Attempt SHOW EXPLAIN for an UPDATE diff --git a/mysql-test/main/subselect_sj_mat.test b/mysql-test/main/subselect_sj_mat.test index a6262323991..e411c29dc10 100644 --- a/mysql-test/main/subselect_sj_mat.test +++ b/mysql-test/main/subselect_sj_mat.test @@ -524,8 +524,6 @@ where a1 in (select substring(b1,1,512) from t2_512 where b1 > '0'); # group_concat with a blob argument - depends on # the variable group_concat_max_len, and # convert_blob_length == max_len*collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB -#Check after fix MDEV-31276 ---disable_ps2_protocol explain extended select left(a1,7), left(a2,7) from t1_512 where a1 in (select group_concat(b1) from t2_512 group by b2); @@ -543,7 +541,6 @@ where a1 in (select group_concat(b1) from t2_512 group by b2); select left(a1,7), left(a2,7) from t1_512 where a1 in (select group_concat(b1) from t2_512 group by b2); ---enable_ps2_protocol drop table t1_512, t2_512, t3_512; @@ -609,8 +606,6 @@ where a1 in (select substring(b1,1,1024) from t2_1024 where b1 > '0'); # group_concat with a blob argument - depends on # the variable group_concat_max_len, and # convert_blob_length == max_len*collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB -#Check after fix MDEV-31276 ---disable_ps2_protocol explain extended select left(a1,7), left(a2,7) from t1_1024 where a1 in (select group_concat(b1) from t2_1024 group by b2); @@ -628,7 +623,6 @@ where a1 in (select group_concat(b1) from t2_1024 group by b2); select left(a1,7), left(a2,7) from t1_1024 where a1 in (select group_concat(b1) from t2_1024 group by b2); ---enable_ps2_protocol drop table t1_1024, t2_1024, t3_1024; @@ -694,8 +688,6 @@ where a1 in (select substring(b1,1,1025) from t2_1025 where b1 > '0'); # group_concat with a blob argument - depends on # the variable group_concat_max_len, and # convert_blob_length == max_len*collation->mbmaxlen > CONVERT_IF_BIGGER_TO_BLOB -#Check after fix MDEV-31276 ---disable_ps2_protocol explain extended select left(a1,7), left(a2,7) from t1_1025 where a1 in (select group_concat(b1) from t2_1025 group by b2); @@ -713,7 +705,6 @@ where a1 in (select group_concat(b1) from t2_1025 group by b2); select left(a1,7), left(a2,7) from t1_1025 where a1 in (select group_concat(b1) from t2_1025 group by b2); ---enable_ps2_protocol drop table t1_1025, t2_1025, t3_1025; diff --git a/mysql-test/main/system_mysql_db_fix50030.result b/mysql-test/main/system_mysql_db_fix50030.result index 644d2b8a23f..10607be829a 100644 --- a/mysql-test/main/system_mysql_db_fix50030.result +++ b/mysql-test/main/system_mysql_db_fix50030.result @@ -151,7 +151,7 @@ columns_priv CREATE TABLE `columns_priv` ( show create table procs_priv; Table Create Table procs_priv CREATE TABLE `procs_priv` ( - `Host` char(60) NOT NULL DEFAULT '', + `Host` char(255) NOT NULL DEFAULT '', `Db` char(64) NOT NULL DEFAULT '', `User` char(128) NOT NULL DEFAULT '', `Routine_name` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT '', diff --git a/mysql-test/main/system_mysql_db_fix50117.result b/mysql-test/main/system_mysql_db_fix50117.result index 0a7cf2a9531..391c4e5b3f3 100644 --- a/mysql-test/main/system_mysql_db_fix50117.result +++ b/mysql-test/main/system_mysql_db_fix50117.result @@ -131,7 +131,7 @@ columns_priv CREATE TABLE `columns_priv` ( show create table procs_priv; Table Create Table procs_priv CREATE TABLE `procs_priv` ( - `Host` char(60) NOT NULL DEFAULT '', + `Host` char(255) NOT NULL DEFAULT '', `Db` char(64) NOT NULL DEFAULT '', `User` char(128) NOT NULL DEFAULT '', `Routine_name` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT '', diff --git a/mysql-test/main/system_mysql_db_fix50568.result b/mysql-test/main/system_mysql_db_fix50568.result index 0598f0a5fc4..8f3a1ff1f31 100644 --- a/mysql-test/main/system_mysql_db_fix50568.result +++ b/mysql-test/main/system_mysql_db_fix50568.result @@ -152,7 +152,7 @@ columns_priv CREATE TABLE `columns_priv` ( show create table procs_priv; Table Create Table procs_priv CREATE TABLE `procs_priv` ( - `Host` char(60) NOT NULL DEFAULT '', + `Host` char(255) NOT NULL DEFAULT '', `Db` char(64) NOT NULL DEFAULT '', `User` char(128) NOT NULL DEFAULT '', `Routine_name` char(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci NOT NULL DEFAULT '', diff --git a/mysql-test/main/type_decimal.result b/mysql-test/main/type_decimal.result index b2853f18f1c..e219124a8cf 100644 --- a/mysql-test/main/type_decimal.result +++ b/mysql-test/main/type_decimal.result @@ -1323,3 +1323,176 @@ SET sql_mode=DEFAULT; # # End of 10.4 tests # +# +# Start of 10.11 tests +# +# +# MDEV-33442 REPAIR TABLE corrupts UUIDs +# +CREATE PROCEDURE show_table() +BEGIN +SHOW CREATE TABLE t1; +SELECT VERSION FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test'; +SELECT * FROM t1 ORDER BY a; +END; +$$ +# Upgrade using REPAIR +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +9 +a +123.45 +123.46 +123.47 +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it! +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +9 +a +123.45 +123.46 +123.47 +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it! +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +9 +a +123.45 +123.46 +123.47 +REPAIR TABLE t1; +Table Op Msg_type Msg_text +test.t1 repair status OK +# Expect old decimal, as it does not implicitly upgrade to new decimal +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a +123.45 +123.46 +123.47 +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a +123.45 +123.46 +123.47 +DROP TABLE t1; +# Upgrade using ALTER, adding a table COMMENT +# Upgrade a 10.11.4 table using ALTER, adding a table COMMENT +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +9 +a +123.45 +123.46 +123.47 +# ALTER..INPLACE should fail - the FRM file is too old and needs upgrade +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test10'; +ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY +ALTER IGNORE TABLE t1 COMMENT 'test11'; +# Expect old decimal, as it does not implicitly upgrade to new decimal +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci COMMENT='test11' +VERSION +10 +a +123.45 +123.46 +123.47 +# Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test12'; +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci COMMENT='test12' +VERSION +10 +a +123.45 +123.46 +123.47 +DROP TABLE t1; +# Upgrade using ALTER, adding a column DEFAULT +# Upgrade a 10.11.4 table using ALTER, adding a table COMMENT +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +9 +a +123.45 +123.46 +123.47 +# ALTER..INPLACE should fail - the FRM file is too old and needs upgrade +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY a DECIMAL(10,2) DEFAULT 10; +ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY +ALTER IGNORE TABLE t1 MODIFY a DECIMAL(10,2) DEFAULT 11; +# Expect new decimal, as we explicitly redefined the data type +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2) DEFAULT 11.00 +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a +123.45 +123.46 +123.47 +# Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY a DECIMAL(10,2) DEFAULT 12; +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` decimal(10,2) DEFAULT 12.00 +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a +123.45 +123.46 +123.47 +DROP TABLE t1; +DROP PROCEDURE show_table; +# +# End of 10.11 tests +# diff --git a/mysql-test/main/type_decimal.test b/mysql-test/main/type_decimal.test index 9e294410a38..5f02d16e4ad 100644 --- a/mysql-test/main/type_decimal.test +++ b/mysql-test/main/type_decimal.test @@ -826,3 +826,93 @@ SET sql_mode=DEFAULT; --echo # --echo # End of 10.4 tests --echo # + +--echo # +--echo # Start of 10.11 tests +--echo # + +--echo # +--echo # MDEV-33442 REPAIR TABLE corrupts UUIDs +--echo # + +DELIMITER $$; +CREATE PROCEDURE show_table() +BEGIN + SHOW CREATE TABLE t1; + SELECT VERSION FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test'; + SELECT * FROM t1 ORDER BY a; +END; +$$ +DELIMITER ;$$ + +--echo # Upgrade using REPAIR + +--copy_file std_data/old_decimal/t1dec102.frm $MYSQLD_DATADIR/test/t1.frm +--copy_file std_data/old_decimal/t1dec102.MYD $MYSQLD_DATADIR/test/t1.MYD +--copy_file std_data/old_decimal/t1dec102.MYI $MYSQLD_DATADIR/test/t1.MYI +CALL show_table; + +CHECK TABLE t1 FOR UPGRADE; +CALL show_table; + +CHECK TABLE t1 FOR UPGRADE; +CALL show_table; + +REPAIR TABLE t1; +--echo # Expect old decimal, as it does not implicitly upgrade to new decimal +CALL show_table; + +CHECK TABLE t1 FOR UPGRADE; +CALL show_table; +DROP TABLE t1; + + +--echo # Upgrade using ALTER, adding a table COMMENT + +--echo # Upgrade a 10.11.4 table using ALTER, adding a table COMMENT +--copy_file std_data/old_decimal/t1dec102.frm $MYSQLD_DATADIR/test/t1.frm +--copy_file std_data/old_decimal/t1dec102.MYD $MYSQLD_DATADIR/test/t1.MYD +--copy_file std_data/old_decimal/t1dec102.MYI $MYSQLD_DATADIR/test/t1.MYI +CALL show_table; + +--echo # ALTER..INPLACE should fail - the FRM file is too old and needs upgrade +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test10'; +ALTER IGNORE TABLE t1 COMMENT 'test11'; +-- echo # Expect old decimal, as it does not implicitly upgrade to new decimal +CALL show_table; + +--echo # Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test12'; +CALL show_table; + +DROP TABLE t1; + + +--echo # Upgrade using ALTER, adding a column DEFAULT + +--echo # Upgrade a 10.11.4 table using ALTER, adding a table COMMENT +--copy_file std_data/old_decimal/t1dec102.frm $MYSQLD_DATADIR/test/t1.frm +--copy_file std_data/old_decimal/t1dec102.MYD $MYSQLD_DATADIR/test/t1.MYD +--copy_file std_data/old_decimal/t1dec102.MYI $MYSQLD_DATADIR/test/t1.MYI +CALL show_table; + +--echo # ALTER..INPLACE should fail - the FRM file is too old and needs upgrade +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY a DECIMAL(10,2) DEFAULT 10; +ALTER IGNORE TABLE t1 MODIFY a DECIMAL(10,2) DEFAULT 11; +--echo # Expect new decimal, as we explicitly redefined the data type +CALL show_table; + +--echo # Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY a DECIMAL(10,2) DEFAULT 12; +CALL show_table; + +DROP TABLE t1; + +DROP PROCEDURE show_table; + + +--echo # +--echo # End of 10.11 tests +--echo # diff --git a/mysql-test/main/type_json.result b/mysql-test/main/type_json.result index 431a7f138f6..91686704ebd 100644 --- a/mysql-test/main/type_json.result +++ b/mysql-test/main/type_json.result @@ -155,7 +155,7 @@ DROP TABLE t1; # SELECT json_object('a', (SELECT json_objectagg(b, c) FROM (SELECT 'b','c') d)) AS j FROM DUAL; Catalog Database Table Table_alias Column Column_alias Type Length Max length Is_null Flags Decimals Charsetnr -def j 250 (format=json) 9437283 16 Y 0 39 33 +def j 250 (format=json) 9437310 16 Y 0 39 33 j {"a": {"b":"c"}} # diff --git a/mysql-test/main/type_varchar_mysql41.result b/mysql-test/main/type_varchar_mysql41.result index cc7f663d709..eb8bafe0f36 100644 --- a/mysql-test/main/type_varchar_mysql41.result +++ b/mysql-test/main/type_varchar_mysql41.result @@ -111,3 +111,152 @@ t2 CREATE TABLE `t2` ( ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci DROP TABLE t1old; DROP PROCEDURE p1; +# +# Start of 10.11 tests +# +# +# MDEV-33442 REPAIR TABLE corrupts UUIDs +# +CREATE PROCEDURE show_table() +BEGIN +SHOW CREATE TABLE t1; +SELECT VERSION FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test'; +SELECT * FROM t1 ORDER BY a,b; +END; +$$ +# Upgrade using REPAIR +TRUNCATE TABLE t1; +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255)/*old*/ DEFAULT NULL, + `b` varchar(255)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +9 +a b +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it! +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255)/*old*/ DEFAULT NULL, + `b` varchar(255)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +9 +a b +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it! +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255)/*old*/ DEFAULT NULL, + `b` varchar(255)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +9 +a b +REPAIR TABLE t1; +Table Op Msg_type Msg_text +test.t1 repair status OK +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255) DEFAULT NULL, + `b` varchar(255) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255) DEFAULT NULL, + `b` varchar(255) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +DROP TABLE t1; +# Upgrade using ALTER, adding a table COMMENT +TRUNCATE TABLE t1; +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255)/*old*/ DEFAULT NULL, + `b` varchar(255)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +9 +a b +# ALTER..INPLACE should fail - the old columns need upgrade +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test10'; +ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY +ALTER IGNORE TABLE t1 COMMENT 'test11'; +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255) DEFAULT NULL, + `b` varchar(255) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci COMMENT='test11' +VERSION +10 +a b +# Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test12'; +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255) DEFAULT NULL, + `b` varchar(255) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci COMMENT='test12' +VERSION +10 +a b +DROP TABLE t1; +# Upgrade using ALTER, adding a column DEFAULT +TRUNCATE TABLE t1; +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255)/*old*/ DEFAULT NULL, + `b` varchar(255)/*old*/ DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +9 +a b +# ALTER..INPLACE should fail - the old columns need upgrade +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY a VARBINARY(255) DEFAULT 'a10'; +ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY +ALTER IGNORE TABLE t1 MODIFY a VARBINARY(255) DEFAULT 'a11'; +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255) DEFAULT 'a11', + `b` varchar(255) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +# Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 MODIFY a VARBINARY(255) DEFAULT 'a12'; +CALL show_table; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` varbinary(255) DEFAULT 'a12', + `b` varchar(255) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +DROP TABLE t1; +DROP PROCEDURE show_table; +# +# End of 10.11 tests +# diff --git a/mysql-test/main/type_varchar_mysql41.test b/mysql-test/main/type_varchar_mysql41.test index 5624e9edaaa..c963d3a3bb4 100644 --- a/mysql-test/main/type_varchar_mysql41.test +++ b/mysql-test/main/type_varchar_mysql41.test @@ -57,3 +57,88 @@ CALL p1('a'); DROP TABLE t1old; DROP PROCEDURE p1; + +--echo # +--echo # Start of 10.11 tests +--echo # + +--echo # +--echo # MDEV-33442 REPAIR TABLE corrupts UUIDs +--echo # + +DELIMITER $$; +CREATE PROCEDURE show_table() +BEGIN + SHOW CREATE TABLE t1; + SELECT VERSION FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test'; + SELECT * FROM t1 ORDER BY a,b; +END; +$$ +DELIMITER ;$$ + + +--echo # Upgrade using REPAIR + +--copy_file $MYSQL_TEST_DIR/std_data/bug19371.frm $MYSQLD_DATADIR/test/t1.frm +TRUNCATE TABLE t1; +CALL show_table; + +CHECK TABLE t1 FOR UPGRADE; +CALL show_table; + +CHECK TABLE t1 FOR UPGRADE; +CALL show_table; + +REPAIR TABLE t1; +CALL show_table; + +CHECK TABLE t1 FOR UPGRADE; +CALL show_table; + +DROP TABLE t1; + + +--echo # Upgrade using ALTER, adding a table COMMENT + +--copy_file $MYSQL_TEST_DIR/std_data/bug19371.frm $MYSQLD_DATADIR/test/t1.frm +TRUNCATE TABLE t1; +CALL show_table; + +--echo # ALTER..INPLACE should fail - the old columns need upgrade +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test10'; +ALTER IGNORE TABLE t1 COMMENT 'test11'; +CALL show_table; + +--echo # Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test12'; +CALL show_table; + +DROP TABLE t1; + + +--echo # Upgrade using ALTER, adding a column DEFAULT + +--copy_file $MYSQL_TEST_DIR/std_data/bug19371.frm $MYSQLD_DATADIR/test/t1.frm +TRUNCATE TABLE t1; +CALL show_table; + +--echo # ALTER..INPLACE should fail - the old columns need upgrade +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY a VARBINARY(255) DEFAULT 'a10'; +ALTER IGNORE TABLE t1 MODIFY a VARBINARY(255) DEFAULT 'a11'; +CALL show_table; + +--echo # Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 MODIFY a VARBINARY(255) DEFAULT 'a12'; +CALL show_table; + +DROP TABLE t1; + + +DROP PROCEDURE show_table; + + +--echo # +--echo # End of 10.11 tests +--echo # diff --git a/mysql-test/mariadb-test-run.pl b/mysql-test/mariadb-test-run.pl index 9f248d44b37..833e352b355 100755 --- a/mysql-test/mariadb-test-run.pl +++ b/mysql-test/mariadb-test-run.pl @@ -4489,6 +4489,14 @@ sub extract_warning_lines ($$) { qr/Slave I\/0: Master command COM_BINLOG_DUMP failed/, qr/Error reading packet/, qr/Lost connection to MariaDB server at 'reading initial communication packet'/, + qr/Could not read packet:.* state: [2-3] /, + qr/Could not read packet:.* errno: 104 /, + qr/Could not read packet:.* errno: 0 .* length: 0/, + qr/Could not write packet:.* errno: 32 /, + qr/Could not write packet:.* errno: 104 /, + qr/Semisync ack receiver got error 1158/, + qr/Semisync ack receiver got hangup/, + qr/Connection was killed/, qr/Failed on request_dump/, qr/Slave: Can't drop database.* database doesn't exist/, qr/Slave: Operation DROP USER failed for 'create_rout_db'/, @@ -4544,6 +4552,7 @@ sub extract_warning_lines ($$) { qr/WSREP: Failed to guess base node address/, qr/WSREP: Guessing address for incoming client/, + qr/InnoDB: Difficult to find free blocks in the buffer pool*/, # for UBSAN qr/decimal\.c.*: runtime error: signed integer overflow/, # Disable test for UBSAN on dynamically loaded objects diff --git a/mysql-test/suite/binlog/include/print_optional_metadata.inc b/mysql-test/suite/binlog/include/print_optional_metadata.inc index 739903ab190..47feede80ec 100644 --- a/mysql-test/suite/binlog/include/print_optional_metadata.inc +++ b/mysql-test/suite/binlog/include/print_optional_metadata.inc @@ -16,10 +16,9 @@ if ($stop_position) --exec $MYSQL_BINLOG -F --print-table-metadata $_stop_position_opt $binlog_file > $output_file ---let SEARCH_PATTERN= # (?:Columns\(| {8}).* +--let SEARCH_PATTERN= (?m-s:# (?:Columns\(| {8}).*) --let SEARCH_FILE= $output_file --let SEARCH_OUTPUT=matches ---let SEARCH_TYPE="_gm_" --source include/search_pattern_in_file.inc if ($print_primary_key) diff --git a/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result b/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result index fde3703645a..99721813159 100644 --- a/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result +++ b/mysql-test/suite/binlog_encryption/rpl_gtid_basic.result @@ -69,8 +69,6 @@ INSERT INTO t2 VALUES (5, "i1a"); connection server_4; CHANGE MASTER TO master_host = '127.0.0.1', master_port = MASTER_PORT, MASTER_USE_GTID=CURRENT_POS; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead include/start_slave.inc SELECT * FROM t1 ORDER BY a; a b @@ -91,8 +89,6 @@ connection server_2; include/stop_slave.inc CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_4, MASTER_USE_GTID=CURRENT_POS; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead include/start_slave.inc connection server_4; UPDATE t2 SET b="j1a" WHERE a=5; @@ -121,8 +117,6 @@ include/save_master_gtid.inc connection server_3; CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_4, MASTER_USE_GTID=CURRENT_POS; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead include/start_slave.inc include/sync_with_master_gtid.inc SELECT * FROM t2 ORDER BY a; diff --git a/mysql-test/suite/binlog_encryption/rpl_packet.result b/mysql-test/suite/binlog_encryption/rpl_packet.result index 4a2a5d70d39..bb6269607fe 100644 --- a/mysql-test/suite/binlog_encryption/rpl_packet.result +++ b/mysql-test/suite/binlog_encryption/rpl_packet.result @@ -2,6 +2,8 @@ include/master-slave.inc [connection master] call mtr.add_suppression("Slave I/O: Got a packet bigger than 'slave_max_allowed_packet' bytes, .*error.* 1153"); call mtr.add_suppression("Log entry on master is longer than slave_max_allowed_packet"); +call mtr.add_suppression("Could not write packet:"); +call mtr.add_suppression("Got a packet bigger than 'max_allowed_packet' bytes"); drop database if exists DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________; create database DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________; connection master; diff --git a/mysql-test/suite/binlog_encryption/rpl_parallel_stop_slave.result b/mysql-test/suite/binlog_encryption/rpl_parallel_stop_slave.result index 0c810d2a3f4..b0a4fa59c69 100644 --- a/mysql-test/suite/binlog_encryption/rpl_parallel_stop_slave.result +++ b/mysql-test/suite/binlog_encryption/rpl_parallel_stop_slave.result @@ -37,7 +37,9 @@ connection con_temp1; BEGIN; INSERT INTO t2 VALUES (21); connection server_2; -START SLAVE; +START SLAVE IO_THREAD; +include/wait_for_slave_param.inc [Read_Master_Log_Pos] +START SLAVE SQL_THREAD; connection con_temp2; SET @old_dbug= @@GLOBAL.debug_dbug; SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; diff --git a/mysql-test/suite/compat/oracle/r/empty_string_literal.result b/mysql-test/suite/compat/oracle/r/empty_string_literal.result index 4af576e90c0..1260c7aa75d 100644 --- a/mysql-test/suite/compat/oracle/r/empty_string_literal.result +++ b/mysql-test/suite/compat/oracle/r/empty_string_literal.result @@ -64,7 +64,7 @@ SET sql_mode=@mode; # Test litteral concat # SELECT 'a' 'b'; -a +ab ab SELECT 'a' ''; a @@ -76,13 +76,13 @@ SELECT '' ''; NULL NULL SELECT '' 'b' 'c'; -b +bc bc SELECT '' '' 'c'; c c SELECT 'a' '' 'c'; -a +ac ac SELECT 'a' '' ''; a diff --git a/mysql-test/suite/encryption/t/encrypt_and_grep.test b/mysql-test/suite/encryption/t/encrypt_and_grep.test index 485a3eb2ec8..648ad80780c 100644 --- a/mysql-test/suite/encryption/t/encrypt_and_grep.test +++ b/mysql-test/suite/encryption/t/encrypt_and_grep.test @@ -22,8 +22,9 @@ insert t2 values (repeat('tempsecret', 12)); insert t3 values (repeat('dummysecret', 12)); --echo # Wait max 10 min for key encryption threads to encrypt all spaces +--let $tables_count= `select count(*) + @@global.innodb_undo_tablespaces from information_schema.tables where engine = 'InnoDB'` --let $wait_timeout= 600 ---let $wait_condition=SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +--let $wait_condition=SELECT COUNT(*) = $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 --source include/wait_condition.inc --sorted_result @@ -93,8 +94,9 @@ UNLOCK TABLES; SET GLOBAL innodb_encrypt_tables = on; --echo # Wait max 10 min for key encryption threads to encrypt all spaces +--let $tables_count= `select count(*) + @@global.innodb_undo_tablespaces from information_schema.tables where engine = 'InnoDB'` --let $wait_timeout= 600 ---let $wait_condition=SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; +--let $wait_condition=SELECT COUNT(*) = $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 --source include/wait_condition.inc --sorted_result diff --git a/mysql-test/suite/engines/funcs/r/rpl_change_master.result b/mysql-test/suite/engines/funcs/r/rpl_change_master.result index 88801b07bba..48cec72d917 100644 --- a/mysql-test/suite/engines/funcs/r/rpl_change_master.result +++ b/mysql-test/suite/engines/funcs/r/rpl_change_master.result @@ -26,9 +26,4 @@ connection master; CHANGE MASTER TO MASTER_USER='root', MASTER_SSL=0, MASTER_SSL_CA='', MASTER_SSL_CERT='', MASTER_SSL_KEY='', MASTER_SSL_CRL='', MASTER_SSL_CRLPATH=''; CHANGE MASTER TO MASTER_USER='root', MASTER_PASSWORD='', MASTER_SSL=0; -"Usage of CURRENT_POS in CHANGE MASTER MASTER_USE_GTID is dreprecated. -CHANGE MASTER TO MASTER_USE_GTID=CURRENT_POS; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead -CHANGE MASTER TO MASTER_USE_GTID=SLAVE_POS; include/rpl_end.inc diff --git a/mysql-test/suite/engines/funcs/r/rpl_get_lock.result b/mysql-test/suite/engines/funcs/r/rpl_get_lock.result index b852546e1bf..cbb02a32648 100644 --- a/mysql-test/suite/engines/funcs/r/rpl_get_lock.result +++ b/mysql-test/suite/engines/funcs/r/rpl_get_lock.result @@ -1,6 +1,6 @@ include/master-slave.inc [connection master] -CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); +SET GLOBAL LOG_WARNINGS=4; create table t1(n int); insert into t1 values(get_lock("lock",2)); disconnect master; @@ -35,4 +35,5 @@ NULL connection master1; drop table t1; connection slave; +connection default; include/rpl_end.inc diff --git a/mysql-test/suite/galera/r/MDEV-33136.result b/mysql-test/suite/galera/r/MDEV-33136.result new file mode 100644 index 00000000000..36159fa05cd --- /dev/null +++ b/mysql-test/suite/galera/r/MDEV-33136.result @@ -0,0 +1,21 @@ +connection node_2; +connection node_1; +connect node_1a,127.0.0.1,root,,test,$NODE_MYPORT_1; +connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +connection node_1a; +TRUNCATE TABLE t1; +SET SESSION wsrep_retry_autocommit = 0; +SET DEBUG_SYNC = 'dict_stats_mdl_acquired SIGNAL may_toi WAIT_FOR bf_abort'; +INSERT INTO t1 VALUES (1); +connection node_1; +SET DEBUG_SYNC = 'now WAIT_FOR may_toi'; +TRUNCATE TABLE t1; +connection node_1a; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +connection node_1; +SET DEBUG_SYNC = 'RESET'; +DROP TABLE t1; +disconnect node_1a; +disconnect node_2; +disconnect node_1; diff --git a/mysql-test/suite/galera/r/galera_bf_abort_mariabackup.result b/mysql-test/suite/galera/r/galera_bf_abort_mariabackup.result index 88c200ee933..fa0568035a6 100644 --- a/mysql-test/suite/galera/r/galera_bf_abort_mariabackup.result +++ b/mysql-test/suite/galera/r/galera_bf_abort_mariabackup.result @@ -12,9 +12,9 @@ connection node_1; connection node_2; Starting server ... connection node_1; -# Both should return FOUND 2 as we have bootstrap and SST -FOUND 2 /Desyncing and pausing the provider/ in mysqld.1.err -FOUND 2 /Resuming and resyncing the provider/ in mysqld.1.err +# Both should return NOT FOUND as we have mariabackup with backup locks +NOT FOUND /Desyncing and pausing the provider/ in mysqld.1.err +NOT FOUND /Resuming and resyncing the provider/ in mysqld.1.err connection node_1; SET GLOBAL wsrep_mode = "BF_ABORT_MARIABACKUP"; # Restart node_2, force SST. @@ -25,9 +25,9 @@ connection node_2; Starting server ... connection node_2; connection node_1; -# Both should return FOUND 3 as we have 1 new SST -FOUND 3 /Desyncing and pausing the provider/ in mysqld.1.err -FOUND 3 /Resuming and resyncing the provider/ in mysqld.1.err +# Both should return NOT FOUND as we have mariabackup with backup locks +NOT FOUND /Desyncing and pausing the provider/ in mysqld.1.err +NOT FOUND /Resuming and resyncing the provider/ in mysqld.1.err SET GLOBAL wsrep_mode = ""; DROP TABLE t; # Case 2: MariaBackup backup from node_2 @@ -46,11 +46,13 @@ SET GLOBAL wsrep_mode = "BF_ABORT_MARIABACKUP"; SELECT @@wsrep_mode; @@wsrep_mode BF_ABORT_MARIABACKUP -# Both should return FOUND 1 as node should not desync -FOUND 1 /Desyncing and pausing the provider/ in mysqld.2.err -FOUND 1 /Resuming and resyncing the provider/ in mysqld.2.err -# Should return FOUND 1 because only last backup does not desync -FOUND 1 /Server not desynched from group because WSREP_MODE_BF_MARIABACKUP used./ in mysqld.2.err +# Both should return FOUND 2 because both backups do desync but on different points +FOUND 2 /Desyncing and pausing the provider/ in mysqld.2.err +FOUND 2 /Resuming and resyncing the provider/ in mysqld.2.err +# Should return FOUND 1 as server did not desync at BLOCK_DDL +FOUND 1 /Server not desynched from group at BLOCK_DDL because WSREP_MODE_BF_MARIABACKUP is used./ in mysqld.2.err +# Should return FOUND 1 as server did desync and pause at BLOCK_COMMIT +FOUND 1 /Server desynched from group during BACKUP STAGE BLOCK_COMMIT./ in mysqld.2.err SET GLOBAL wsrep_mode = ""; connection node_1; DROP TABLE t; diff --git a/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result b/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result index a15b0c7df69..bc61cfb4d6f 100644 --- a/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result +++ b/mysql-test/suite/galera/r/galera_parallel_apply_lock_table.result @@ -10,7 +10,7 @@ INSERT INTO t1 VALUES (1); INSERT INTO t2 VALUES (1); connection node_2a; SET SESSION wsrep_sync_wait=0; -SELECT COUNT(*) AS EXPECT_1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE (STATE LIKE '%committing%' or STATE = 'Waiting for certification'); +SELECT COUNT(*) AS EXPECT_1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE (STATE LIKE 'Commit' or STATE = 'Waiting for certification'); EXPECT_1 1 SELECT COUNT(*) AS EXPECT_1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%Waiting for table metadata lock%'; diff --git a/mysql-test/suite/galera/r/galera_query_cache_invalidate.result b/mysql-test/suite/galera/r/galera_query_cache_invalidate.result index 4a6b61e4d80..fc23c0f1caf 100644 --- a/mysql-test/suite/galera/r/galera_query_cache_invalidate.result +++ b/mysql-test/suite/galera/r/galera_query_cache_invalidate.result @@ -8,8 +8,6 @@ connection node_4; call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node."); connection node_3; CHANGE MASTER TO master_host='127.0.0.1', master_user='root', master_port=NODE_MYPORT_1, master_use_gtid=current_pos;; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead START SLAVE; include/wait_for_slave_to_start.inc connection node_1; diff --git a/mysql-test/suite/galera/t/MDEV-33136.test b/mysql-test/suite/galera/t/MDEV-33136.test new file mode 100644 index 00000000000..12765ef6dfb --- /dev/null +++ b/mysql-test/suite/galera/t/MDEV-33136.test @@ -0,0 +1,44 @@ +# +# MDEV-33136: Properly BF-abort user transactions with explicit locks +# +# User transactions may acquire explicit MDL locks from InnoDB level +# when persistent statistics is re-read for a table. +# If such a transaction would be subject to BF-abort, it was improperly +# detected as a system transaction and wouldn't get aborted. +# +# The fix: Check if a transaction holding explicit MDL locks is a user +# transaction in the MDL conflict handling code. + +--source include/galera_cluster.inc +--source include/have_debug_sync.inc +--source include/have_debug.inc + +--connect node_1a,127.0.0.1,root,,test,$NODE_MYPORT_1 + +--connection node_1 +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; + +--connection node_1a +TRUNCATE TABLE t1; +# TRUNCATE forces the next statement to re-read statistics from persistent storage, +# which will acquire MDL locks on the statistics tables in InnoDB. +SET SESSION wsrep_retry_autocommit = 0; +SET DEBUG_SYNC = 'dict_stats_mdl_acquired SIGNAL may_toi WAIT_FOR bf_abort'; +--send + INSERT INTO t1 VALUES (1); + +--connection node_1 +SET DEBUG_SYNC = 'now WAIT_FOR may_toi'; +TRUNCATE TABLE t1; + +--connection node_1a +# Local INSERT gets aborted. +--error ER_LOCK_DEADLOCK +--reap + +# Cleanup +--connection node_1 +SET DEBUG_SYNC = 'RESET'; +DROP TABLE t1; +--disconnect node_1a +--source include/galera_end.inc diff --git a/mysql-test/suite/galera/t/galera_bf_abort_mariabackup.test b/mysql-test/suite/galera/t/galera_bf_abort_mariabackup.test index 34c3f5d3621..ed16ac3926c 100644 --- a/mysql-test/suite/galera/t/galera_bf_abort_mariabackup.test +++ b/mysql-test/suite/galera/t/galera_bf_abort_mariabackup.test @@ -13,7 +13,7 @@ CREATE TABLE t(i INT NOT NULL PRIMARY KEY) ENGINE INNODB; INSERT INTO t VALUES(1); # -# In default settings donor should desync +# In default settings donor should not desync # --echo # Restart node_2, force SST. --connection node_2 @@ -37,7 +37,7 @@ let $restart_noprint=2; --connection node_1 let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err; ---echo # Both should return FOUND 2 as we have bootstrap and SST +--echo # Both should return NOT FOUND as we have mariabackup with backup locks let SEARCH_PATTERN = Desyncing and pausing the provider; --source include/search_pattern_in_file.inc let SEARCH_PATTERN = Resuming and resyncing the provider; @@ -76,7 +76,7 @@ let $restart_noprint=2; --connection node_1 let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err; ---echo # Both should return FOUND 3 as we have 1 new SST +--echo # Both should return NOT FOUND as we have mariabackup with backup locks let SEARCH_PATTERN = Desyncing and pausing the provider; --source include/search_pattern_in_file.inc let SEARCH_PATTERN = Resuming and resyncing the provider; @@ -117,13 +117,16 @@ let $targetdir=$MYSQLTEST_VARDIR/tmp/backup2; --enable_result_log let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.2.err; ---echo # Both should return FOUND 1 as node should not desync +--echo # Both should return FOUND 2 because both backups do desync but on different points let SEARCH_PATTERN = Desyncing and pausing the provider; --source include/search_pattern_in_file.inc let SEARCH_PATTERN = Resuming and resyncing the provider; --source include/search_pattern_in_file.inc ---echo # Should return FOUND 1 because only last backup does not desync -let SEARCH_PATTERN = Server not desynched from group because WSREP_MODE_BF_MARIABACKUP used.; +--echo # Should return FOUND 1 as server did not desync at BLOCK_DDL +let SEARCH_PATTERN = Server not desynched from group at BLOCK_DDL because WSREP_MODE_BF_MARIABACKUP is used.; +--source include/search_pattern_in_file.inc +--echo # Should return FOUND 1 as server did desync and pause at BLOCK_COMMIT +let SEARCH_PATTERN = Server desynched from group during BACKUP STAGE BLOCK_COMMIT.; --source include/search_pattern_in_file.inc SET GLOBAL wsrep_mode = ""; diff --git a/mysql-test/suite/galera/t/galera_parallel_apply_lock_table.test b/mysql-test/suite/galera/t/galera_parallel_apply_lock_table.test index 2e9f05cb4af..b49253efc02 100644 --- a/mysql-test/suite/galera/t/galera_parallel_apply_lock_table.test +++ b/mysql-test/suite/galera/t/galera_parallel_apply_lock_table.test @@ -32,7 +32,7 @@ INSERT INTO t2 VALUES (1); --connection node_2a --sleep 1 SET SESSION wsrep_sync_wait=0; -SELECT COUNT(*) AS EXPECT_1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE (STATE LIKE '%committing%' or STATE = 'Waiting for certification'); +SELECT COUNT(*) AS EXPECT_1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE (STATE LIKE 'Commit' or STATE = 'Waiting for certification'); SELECT COUNT(*) AS EXPECT_1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE '%Waiting for table metadata lock%'; SELECT COUNT(*) AS EXPECT_0 FROM t1; SELECT COUNT(*) AS EXPECT_0 FROM t2; diff --git a/mysql-test/suite/galera/t/galera_parallel_simple.test b/mysql-test/suite/galera/t/galera_parallel_simple.test index d49dc0ae876..89adaf8cbd2 100644 --- a/mysql-test/suite/galera/t/galera_parallel_simple.test +++ b/mysql-test/suite/galera/t/galera_parallel_simple.test @@ -51,7 +51,7 @@ SET SESSION wsrep_sync_wait = 0; --let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%'; --source include/wait_condition.inc ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'committing%'; +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Commit'; --source include/wait_condition.inc UNLOCK TABLES; diff --git a/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result b/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result index 63ea5b1d418..505afb9b923 100644 --- a/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result +++ b/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result @@ -96,11 +96,8 @@ a b c DROP TABLE t1; CREATE TABLE t1 (a INT, b INT, c INT GENERATED ALWAYS AS(a+b)); INSERT INTO t1(a, b) VALUES (1, 1), (2, 2), (3, 3), (4, 4); -connection con1; -# disable purge -BEGIN; -SELECT * FROM t0; -a +connect stop_purge,localhost,root,,; +START TRANSACTION WITH CONSISTENT SNAPSHOT; connection default; DELETE FROM t1 WHERE a = 1; UPDATE t1 SET a = 2, b = 2 WHERE a = 5; @@ -109,10 +106,11 @@ SET DEBUG_SYNC= 'inplace_after_index_build SIGNAL uncommitted WAIT_FOR purged'; ALTER TABLE t1 ADD INDEX idx (c), ALGORITHM=INPLACE, LOCK=NONE; connection con1; SET DEBUG_SYNC= 'now WAIT_FOR uncommitted'; +BEGIN; DELETE FROM t1 WHERE a = 3; UPDATE t1 SET a = 7, b = 7 WHERE a = 4; INSERT INTO t1(a, b) VALUES (8, 8); -# enable purge +disconnect stop_purge; COMMIT; # wait for purge to process the deleted/updated records. InnoDB 2 transactions not purged diff --git a/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test b/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test index 09fba0285c7..7966953535c 100644 --- a/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test +++ b/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test @@ -131,9 +131,8 @@ CREATE TABLE t1 (a INT, b INT, c INT GENERATED ALWAYS AS(a+b)); INSERT INTO t1(a, b) VALUES (1, 1), (2, 2), (3, 3), (4, 4); -connection con1; ---echo # disable purge -BEGIN; SELECT * FROM t0; +connect (stop_purge,localhost,root,,); +START TRANSACTION WITH CONSISTENT SNAPSHOT; connection default; DELETE FROM t1 WHERE a = 1; @@ -148,13 +147,14 @@ send ALTER TABLE t1 ADD INDEX idx (c), ALGORITHM=INPLACE, LOCK=NONE; connection con1; SET DEBUG_SYNC= 'now WAIT_FOR uncommitted'; +BEGIN; DELETE FROM t1 WHERE a = 3; UPDATE t1 SET a = 7, b = 7 WHERE a = 4; INSERT INTO t1(a, b) VALUES (8, 8); ---echo # enable purge +disconnect stop_purge; COMMIT; --echo # wait for purge to process the deleted/updated records. diff --git a/mysql-test/suite/innodb/r/alter_copy.result b/mysql-test/suite/innodb/r/alter_copy.result index 8c9e5966b2e..72ae28e9652 100644 --- a/mysql-test/suite/innodb/r/alter_copy.result +++ b/mysql-test/suite/innodb/r/alter_copy.result @@ -51,7 +51,7 @@ ADD INDEX(a,b,d), ADD INDEX(a,d,b), ADD INDEX(b,c,d), ADD INDEX(b,d,c), ALGORITHM=COPY; connection default; SET DEBUG_SYNC='now WAIT_FOR hung'; -# restart: --innodb-force-recovery=3 --debug_dbug=+d,recv_ran_out_of_buffer +# restart: --innodb-force-recovery=3 disconnect hang; FTS_INDEX_1.ibd FTS_INDEX_2.ibd diff --git a/mysql-test/suite/innodb/r/autoinc_debug.result b/mysql-test/suite/innodb/r/autoinc_debug.result index b3b7a469ada..59740e43163 100644 --- a/mysql-test/suite/innodb/r/autoinc_debug.result +++ b/mysql-test/suite/innodb/r/autoinc_debug.result @@ -105,3 +105,60 @@ t1 CREATE TABLE `t1` ( ) ENGINE=InnoDB AUTO_INCREMENT=6 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci DROP TABLE t1; SET DEBUG_SYNC='RESET'; +# +# MDEV-33593: Auto increment deadlock error causes ASSERT in subsequent save point +# +CREATE TABLE t1(col1 INT PRIMARY KEY AUTO_INCREMENT, col2 INT) ENGINE=InnoDB; +CREATE TABLE t2(col1 INT PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1(col2) values(100); +connect con1, localhost, root,,; +START TRANSACTION; +# T1: Acquiring Row X lock on table t2 +INSERT INTO t2 values(100); +connect con2, localhost, root,,; +START TRANSACTION; +# T2: Wait for (T1) row lock on t2 after acquiring GAP Lock on t1 +UPDATE t1 SET col2 = 20 where col1 = 10; +SET DEBUG_SYNC='lock_wait_before_suspend SIGNAL t2_waiting'; +INSERT INTO t2 values(100); +connection default; +SET DEBUG_SYNC='now WAIT_FOR t2_waiting'; +# T3: Wait for (T2) II row Lock on t1 after acquiring Auto Increment Lock on t1 +SET DEBUG_SYNC='lock_wait_before_suspend SIGNAL t3_waiting'; +INSERT INTO t1(col2) SELECT col2 from t1; +connection con1; +SAVEPOINT s1; +SET DEBUG_SYNC='now WAIT_FOR t3_waiting'; +# T1: Wait for (T3) auto increment lock on t1 causing T1 -> T3 -> T2 -> T1 deadlock +SET debug_dbug = '+d,innodb_deadlock_victim_self'; +INSERT INTO t1(col2) VALUES(200); +ERROR HY000: Failed to read auto-increment value from storage engine +# The transaction should have been rolled back +SELECT * FROM t1; +col1 col2 +1 100 +SELECT * FROM t2; +col1 +# Release the previous savepoint using the same name +SAVEPOINT s1; +COMMIT; +connection con2; +COMMIT; +connection default; +COMMIT; +disconnect con1; +disconnect con2; +# Cleanup +SELECT * FROM t1; +col1 col2 +1 100 +2 100 +DROP TABLE t1; +SELECT * FROM t2; +col1 +100 +DROP TABLE t2; +SET DEBUG_SYNC='RESET'; +# +# End of 10.5 tests +# diff --git a/mysql-test/suite/innodb/r/innodb-lru-force-no-free-page.result b/mysql-test/suite/innodb/r/innodb-lru-force-no-free-page.result deleted file mode 100644 index 09e53b59d9d..00000000000 --- a/mysql-test/suite/innodb/r/innodb-lru-force-no-free-page.result +++ /dev/null @@ -1,10 +0,0 @@ -call mtr.add_suppression("InnoDB: Difficult to find free blocks in the buffer pool"); -SET @saved_debug = @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,ib_lru_force_no_free_page"; -CREATE TABLE t1 (j LONGBLOB) ENGINE = InnoDB; -BEGIN; -INSERT INTO t1 VALUES (repeat('abcdefghijklmnopqrstuvwxyz',200)); -COMMIT; -SET debug_dbug = @saved_debug; -DROP TABLE t1; -FOUND 1 /InnoDB: Difficult to find free blocks / in mysqld.1.err diff --git a/mysql-test/suite/innodb/r/innodb_bug30113362.result b/mysql-test/suite/innodb/r/innodb_bug30113362.result index cb0f5091650..5d30c5c03dd 100644 --- a/mysql-test/suite/innodb/r/innodb_bug30113362.result +++ b/mysql-test/suite/innodb/r/innodb_bug30113362.result @@ -37,7 +37,7 @@ test.t1 analyze status Engine-independent statistics collected test.t1 analyze status OK SELECT CLUST_INDEX_SIZE FROM information_schema.INNODB_SYS_TABLESTATS WHERE NAME = 'test/t1'; CLUST_INDEX_SIZE -1856 +1792 connection con2; DELETE FROM t1 WHERE a00 = 'cnm'; COMMIT; @@ -80,7 +80,7 @@ test.t1 analyze status Engine-independent statistics collected test.t1 analyze status OK SELECT CLUST_INDEX_SIZE FROM information_schema.INNODB_SYS_TABLESTATS WHERE NAME = 'test/t1'; CLUST_INDEX_SIZE -1856 +1792 DELETE FROM t1 WHERE a00 = 'dpn'; COMMIT; INSERT INTO t1 SET a00 = 'dpn'; @@ -117,6 +117,6 @@ test.t1 analyze status Engine-independent statistics collected test.t1 analyze status OK SELECT CLUST_INDEX_SIZE FROM information_schema.INNODB_SYS_TABLESTATS WHERE NAME = 'test/t1'; CLUST_INDEX_SIZE -1856 +1792 SET DEBUG_SYNC = 'RESET'; DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/innodb_mysql.result b/mysql-test/suite/innodb/r/innodb_mysql.result index 1b892e69495..84ff19bc5c1 100644 --- a/mysql-test/suite/innodb/r/innodb_mysql.result +++ b/mysql-test/suite/innodb/r/innodb_mysql.result @@ -431,10 +431,6 @@ a connection con1; begin work; insert into t1 values (5); -select * from t1; -a -1 -5 insert into t1 values (2); ERROR HY000: Lock wait timeout exceeded; try restarting transaction select * from t1; @@ -509,10 +505,6 @@ a connection con1; begin work; insert into t1 values (5); -select * from t1; -a -1 -5 insert into t1 values (2); ERROR HY000: Lock wait timeout exceeded; try restarting transaction select * from t1; @@ -1217,10 +1209,6 @@ a connection con1; begin work; insert into t1 values (5); -select * from t1; -a -1 -5 insert into t1 values (2); ERROR HY000: Lock wait timeout exceeded; try restarting transaction select * from t1; diff --git a/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result b/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result index c53707d5997..4fa95934446 100644 --- a/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result +++ b/mysql-test/suite/innodb/r/innodb_skip_innodb_is_tables.result @@ -86,7 +86,6 @@ buffer_flush_n_to_flush_by_age buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NUL buffer_flush_adaptive_avg_time buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Avg time (ms) spent for adaptive flushing recently. buffer_flush_adaptive_avg_pass buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of adaptive flushes passed during the recent Avg period. buffer_LRU_get_free_loops buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Total loops in LRU get free. -buffer_LRU_get_free_waits buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Total sleep waits in LRU get free. buffer_flush_avg_page_rate buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Average number of pages at which flushing is happening buffer_flush_lsn_avg_rate buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Average redo generation rate buffer_flush_pct_for_dirty buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Percent of IO capacity used to avoid max dirty page limit @@ -106,7 +105,6 @@ buffer_LRU_batch_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NUL buffer_LRU_batch_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 set_member Pages scanned per LRU batch call buffer_LRU_batch_flush_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Total pages flushed as part of LRU batches buffer_LRU_batch_evict_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Total pages evicted as part of LRU batches -buffer_LRU_single_flush_failure_count Buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of times attempt to flush a single page from LRU failed buffer_LRU_get_free_search Buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of searches performed for a clean page buffer_LRU_search_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 set_owner Total pages scanned as part of LRU search buffer_LRU_search_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 set_member Number of times LRU search is performed diff --git a/mysql-test/suite/innodb/r/innodb_stats_fetch.result b/mysql-test/suite/innodb/r/innodb_stats_fetch.result index 2a68bc4c070..d3c81ba6723 100644 --- a/mysql-test/suite/innodb/r/innodb_stats_fetch.result +++ b/mysql-test/suite/innodb/r/innodb_stats_fetch.result @@ -174,3 +174,10 @@ SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_VIRTUAL LIMIT ROWS EXAMINED 5; SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN LIMIT ROWS EXAMINED 5; SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS LIMIT ROWS EXAMINED 5; DROP TABLE t1; +# +# MDEV-33462 Disallow LOCK=NONE operation on statistics table +# +ALTER TABLE mysql.innodb_table_stats FORCE, LOCK=NONE; +ERROR 0A000: LOCK=NONE is not supported. Reason: innodb_table_stats. Try LOCK=SHARED +ALTER TABLE mysql.innodb_index_stats FORCE, LOCK=NONE; +ERROR 0A000: LOCK=NONE is not supported. Reason: innodb_index_stats. Try LOCK=SHARED diff --git a/mysql-test/suite/innodb/r/innodb_timeout_rollback.result b/mysql-test/suite/innodb/r/innodb_timeout_rollback.result index 30db5a21a4d..e8fc89e55ba 100644 --- a/mysql-test/suite/innodb/r/innodb_timeout_rollback.result +++ b/mysql-test/suite/innodb/r/innodb_timeout_rollback.result @@ -17,10 +17,6 @@ a connection con1; begin work; insert into t1 values (5); -select * from t1; -a -1 -5 insert into t1 values (2); ERROR HY000: Lock wait timeout exceeded; try restarting transaction select * from t1; diff --git a/mysql-test/suite/innodb/r/instant_alter_crash.result b/mysql-test/suite/innodb/r/instant_alter_crash.result index e423afe10a8..565f7e4b34e 100644 --- a/mysql-test/suite/innodb/r/instant_alter_crash.result +++ b/mysql-test/suite/innodb/r/instant_alter_crash.result @@ -202,27 +202,3 @@ Table Op Msg_type Msg_text test.t2 check status OK DROP TABLE t1,t2; db.opt -# -# MDEV-26198 Assertion `0' failed in row_log_table_apply_op during -# ADD PRIMARY KEY or OPTIMIZE TABLE -# -CREATE TABLE t1(f1 year default null, f2 year default null, -f3 text, f4 year default null, f5 year default null, -f6 year default null, f7 year default null, -f8 year default null)ENGINE=InnoDB ROW_FORMAT=REDUNDANT; -INSERT INTO t1 VALUES(1, 1, 1, 1, 1, 1, 1, 1); -ALTER TABLE t1 ADD COLUMN f9 year default null, ALGORITHM=INPLACE; -set DEBUG_SYNC="row_log_table_apply1_before SIGNAL con1_insert WAIT_FOR con1_finish"; -ALTER TABLE t1 ROW_FORMAT=REDUNDANT, ADD COLUMN f10 YEAR DEFAULT NULL, ALGORITHM=INPLACE; -connect con1,localhost,root,,,; -SET DEBUG_SYNC="now WAIT_FOR con1_insert"; -INSERT IGNORE INTO t1 (f3) VALUES ( 'b' ); -INSERT IGNORE INTO t1 (f3) VALUES ( 'l' ); -SET DEBUG_SYNC="now SIGNAL con1_finish"; -connection default; -disconnect con1; -SET DEBUG_SYNC=RESET; -CHECK TABLE t1; -Table Op Msg_type Msg_text -test.t1 check status OK -DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/instant_alter_debug,redundant.rdiff b/mysql-test/suite/innodb/r/instant_alter_debug,redundant.rdiff index f442e406ce4..cf72c37bde8 100644 --- a/mysql-test/suite/innodb/r/instant_alter_debug,redundant.rdiff +++ b/mysql-test/suite/innodb/r/instant_alter_debug,redundant.rdiff @@ -2,7 +2,7 @@ FROM information_schema.global_status WHERE variable_name = 'innodb_instant_alter_column'; instants --35 -+36 +-37 ++38 SET GLOBAL innodb_stats_persistent = @save_stats_persistent; # End of 10.6 tests diff --git a/mysql-test/suite/innodb/r/instant_alter_debug.result b/mysql-test/suite/innodb/r/instant_alter_debug.result index f2f9d1ed0f2..f48baeb5495 100644 --- a/mysql-test/suite/innodb/r/instant_alter_debug.result +++ b/mysql-test/suite/innodb/r/instant_alter_debug.result @@ -481,10 +481,59 @@ SET DEBUG_SYNC="now WAIT_FOR try_insert"; INSERT INTO t1 VALUES(1, 2); ERROR HY000: Lock wait timeout exceeded; try restarting transaction SET DEBUG_SYNC="now SIGNAL alter_progress"; -disconnect con1; connection default; DROP TABLE t1; +# +# MDEV-26198 Assertion `0' failed in row_log_table_apply_op during +# ADD PRIMARY KEY or OPTIMIZE TABLE +# +CREATE TABLE t1(f1 year default null, f2 year default null, +f3 text, f4 year default null, f5 year default null, +f6 year default null, f7 year default null, +f8 year default null)ENGINE=InnoDB; +INSERT INTO t1 VALUES(1, 1, 1, 1, 1, 1, 1, 1); +ALTER TABLE t1 ADD COLUMN f9 year default null, ALGORITHM=INPLACE; +set DEBUG_SYNC="row_log_table_apply1_before SIGNAL con1_insert WAIT_FOR con1_finish"; +ALTER TABLE t1 ADD COLUMN f10 YEAR DEFAULT NULL, FORCE, ALGORITHM=INPLACE; +connection con1; +SET DEBUG_SYNC="now WAIT_FOR con1_insert"; +INSERT IGNORE INTO t1 (f3) VALUES ( 'b' ); +INSERT IGNORE INTO t1 (f3) VALUES ( 'l' ); +SET DEBUG_SYNC="now SIGNAL con1_finish"; +connection default; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; +# +# MDEV-19044 Alter table corrupts while applying the +# modification log +# +CREATE TABLE t1 ( +f1 INT, +f2 INT, +f3 char(19) CHARACTER SET utf8mb3, +f4 VARCHAR(500), +f5 TEXT)ENGINE=InnoDB; +INSERT INTO t1 VALUES(3, 1, REPEAT('a', 2), REPEAT("b", 20),'a'); +ALTER TABLE t1 ADD COLUMN f6 INT NOT NULL, ALGORITHM=INSTANT; +INSERT INTO t1 VALUES(1, 2, REPEAT('InnoDB', 2), +REPEAT("MariaDB", 20), REPEAT('a', 8000), 12); +INSERT INTO t1 VALUES(1, 2, REPEAT('MYSQL', 2), +REPEAT("MariaDB", 20), REPEAT('a', 8000), 12); +SET DEBUG_SYNC='innodb_inplace_alter_table_enter SIGNAL con1_begin WAIT_FOR con1_update'; +ALTER TABLE t1 MODIFY COLUMN f2 INT NOT NULL, FORCE, ALGORITHM=INPLACE; +connection con1; +SET DEBUG_SYNC='now WAIT_FOR con1_begin'; +UPDATE t1 SET f2=204 order by f1 limit 2; +SET DEBUG_SYNC='now SIGNAL con1_update'; +connection default; +disconnect con1; SET DEBUG_SYNC=reset; +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; # End of 10.4 tests # # MDEV-22867 Assertion instant.n_core_fields == n_core_fields @@ -529,6 +578,6 @@ SELECT variable_value-@old_instant instants FROM information_schema.global_status WHERE variable_name = 'innodb_instant_alter_column'; instants -35 +37 SET GLOBAL innodb_stats_persistent = @save_stats_persistent; # End of 10.6 tests diff --git a/mysql-test/suite/innodb/r/lock_insert_into_empty.result b/mysql-test/suite/innodb/r/lock_insert_into_empty.result index c1dea2fc9af..97369b58aef 100644 --- a/mysql-test/suite/innodb/r/lock_insert_into_empty.result +++ b/mysql-test/suite/innodb/r/lock_insert_into_empty.result @@ -47,6 +47,9 @@ CREATE TABLE t1 (k INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 SET k=1; START TRANSACTION; INSERT INTO t1 SET k=2; +SELECT count(*) > 0 FROM mysql.innodb_index_stats lock in share mode; +count(*) > 0 +1 connect con1,localhost,root,,test; SET innodb_lock_wait_timeout=0; CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDB @@ -54,4 +57,6 @@ AS SELECT k FROM t1; ERROR HY000: Lock wait timeout exceeded; try restarting transaction disconnect con1; connection default; +SET innodb_lock_wait_timeout=default; DROP TABLE t1; +DROP TABLE IF EXISTS t2; diff --git a/mysql-test/suite/innodb/r/lock_isolation.result b/mysql-test/suite/innodb/r/lock_isolation.result new file mode 100644 index 00000000000..88a2ad9326e --- /dev/null +++ b/mysql-test/suite/innodb/r/lock_isolation.result @@ -0,0 +1,108 @@ +# +# MDEV-26642 Weird SELECT view when a record is +# modified to the same value by two transactions +# MDEV-32898 Phantom rows caused by updates of PRIMARY KEY +# +CREATE TABLE t(a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t VALUES (1,1),(2,2); +BEGIN; +SELECT * FROM t LOCK IN SHARE MODE; +a b +1 1 +2 2 +connect con_weird,localhost,root; +BEGIN; +SELECT * FROM t; +a b +1 1 +2 2 +connect consistent,localhost,root; +SET innodb_snapshot_isolation=ON; +BEGIN; +SELECT * FROM t; +a b +1 1 +2 2 +connection default; +UPDATE t SET a=3 WHERE b=2; +COMMIT; +connection consistent; +UPDATE t SET b=3; +ERROR HY000: Record has changed since last read in table 't' +SELECT * FROM t; +a b +1 1 +3 2 +COMMIT; +connection con_weird; +UPDATE t SET b=3; +SELECT * FROM t; +a b +1 3 +2 2 +3 3 +COMMIT; +connection default; +SELECT * FROM t; +a b +1 3 +3 3 +DROP TABLE t; +# +# MDEV-26643 Inconsistent behaviors of UPDATE under +# READ UNCOMMITTED and READ COMMITTED isolation level +# +CREATE TABLE t(a INT, b INT) ENGINE=InnoDB; +INSERT INTO t VALUES(NULL, 1), (2, 2); +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +BEGIN; +UPDATE t SET a = 10; +connection consistent; +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +UPDATE t SET b = 20 WHERE a; +connection default; +COMMIT; +connection consistent; +SELECT * FROM t; +a b +10 20 +10 20 +connection default; +TRUNCATE TABLE t; +INSERT INTO t VALUES(NULL, 1), (2, 2); +BEGIN; +UPDATE t SET a = 10; +connection consistent; +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +UPDATE t SET b = 20 WHERE a; +connection default; +COMMIT; +connection consistent; +SELECT * FROM t; +a b +10 20 +10 20 +disconnect consistent; +connection default; +TRUNCATE TABLE t; +INSERT INTO t VALUES(NULL, 1), (2, 2); +BEGIN; +UPDATE t SET a = 10; +connection con_weird; +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +UPDATE t SET b = 20 WHERE a; +connection default; +SELECT * FROM t; +a b +10 1 +10 2 +COMMIT; +connection con_weird; +COMMIT; +disconnect con_weird; +connection default; +SELECT * FROM t; +a b +10 1 +10 20 +DROP TABLE t; diff --git a/mysql-test/suite/innodb/r/log_upgrade_101_flags.result b/mysql-test/suite/innodb/r/log_upgrade_101_flags.result new file mode 100644 index 00000000000..2458c51bf23 --- /dev/null +++ b/mysql-test/suite/innodb/r/log_upgrade_101_flags.result @@ -0,0 +1,12 @@ +call mtr.add_suppression("InnoDB: The change buffer is corrupted"); +call mtr.add_suppression("InnoDB: Tablespace size stored in header is 768 pages, but the sum of data file sizes is 384 pages"); +call mtr.add_suppression("InnoDB: adjusting FSP_SPACE_FLAGS of file"); +# restart: --innodb-data-home-dir=MYSQLTEST_VARDIR/tmp/log_upgrade --innodb-log-group-home-dir=MYSQLTEST_VARDIR/tmp/log_upgrade --innodb-undo-directory=MYSQLTEST_VARDIR/tmp/log_upgrade --innodb-force-recovery=5 --innodb-log-file-size=4m --innodb_page_size=32k --innodb_buffer_pool_size=10M +SELECT COUNT(*) FROM INFORMATION_SCHEMA.ENGINES +WHERE engine = 'innodb' +AND support IN ('YES', 'DEFAULT', 'ENABLED'); +COUNT(*) +1 +FOUND 1 /InnoDB: Upgrading redo log:/ in mysqld.1.err +# restart +# End of 10.5 tests diff --git a/mysql-test/suite/innodb/r/monitor.result b/mysql-test/suite/innodb/r/monitor.result index e4b8714768e..ff750211ced 100644 --- a/mysql-test/suite/innodb/r/monitor.result +++ b/mysql-test/suite/innodb/r/monitor.result @@ -51,7 +51,6 @@ buffer_flush_n_to_flush_by_age disabled buffer_flush_adaptive_avg_time disabled buffer_flush_adaptive_avg_pass disabled buffer_LRU_get_free_loops disabled -buffer_LRU_get_free_waits disabled buffer_flush_avg_page_rate disabled buffer_flush_lsn_avg_rate disabled buffer_flush_pct_for_dirty disabled @@ -71,7 +70,6 @@ buffer_LRU_batch_num_scan disabled buffer_LRU_batch_scanned_per_call disabled buffer_LRU_batch_flush_total_pages enabled buffer_LRU_batch_evict_total_pages enabled -buffer_LRU_single_flush_failure_count disabled buffer_LRU_get_free_search disabled buffer_LRU_search_scanned disabled buffer_LRU_search_num_scan disabled diff --git a/mysql-test/suite/innodb/r/rename_table.result b/mysql-test/suite/innodb/r/rename_table.result index 0ed56005e21..a3bf59101b3 100644 --- a/mysql-test/suite/innodb/r/rename_table.result +++ b/mysql-test/suite/innodb/r/rename_table.result @@ -21,11 +21,17 @@ path DROP DATABASE abc_def; # restart DROP DATABASE abc_def2; -call mtr.add_suppression("InnoDB: (Operating system error|Error number \\d+ means|Cannot rename file)"); +call mtr.add_suppression("InnoDB: Cannot rename '.*t1.ibd' to '.*non_existing_db.*' because the target schema directory doesn't exist"); CREATE TABLE t1 (a INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES(100); RENAME TABLE t1 TO non_existing_db.t1; ERROR HY000: Error on rename of './test/t1' to './non_existing_db/t1' (errno: 168 "Unknown (generic) error from engine") -FOUND 1 /\[ERROR\] InnoDB: Cannot rename file '.*t1\.ibd' to '.*non_existing_db/ in mysqld.1.err +FOUND 1 /\[ERROR\] InnoDB: Cannot rename '.*t1\.ibd' to '.*non_existing_db/ in mysqld.1.err +SET GLOBAL innodb_fast_shutdown=2; +# restart +SELECT * FROM t1; +a +100 DROP TABLE t1; # # MDEV-25509 Atomic DDL: Assertion `err != DB_DUPLICATE_KEY' diff --git a/mysql-test/suite/innodb/t/alter_copy.test b/mysql-test/suite/innodb/t/alter_copy.test index b62f812f4b7..90f2171d10b 100644 --- a/mysql-test/suite/innodb/t/alter_copy.test +++ b/mysql-test/suite/innodb/t/alter_copy.test @@ -57,7 +57,7 @@ ALTER TABLE t ADD INDEX(b,c,d,a),ADD INDEX(b,c,a,d),ADD INDEX(b,a,c,d),ADD INDEX connection default; SET DEBUG_SYNC='now WAIT_FOR hung'; let $shutdown_timeout=0; ---let $restart_parameters= --innodb-force-recovery=3 --debug_dbug="+d,recv_ran_out_of_buffer" +--let $restart_parameters= --innodb-force-recovery=3 --source include/restart_mysqld.inc disconnect hang; let $shutdown_timeout=; diff --git a/mysql-test/suite/innodb/t/autoinc_debug.test b/mysql-test/suite/innodb/t/autoinc_debug.test index 7722b848c74..d38a70b3376 100644 --- a/mysql-test/suite/innodb/t/autoinc_debug.test +++ b/mysql-test/suite/innodb/t/autoinc_debug.test @@ -92,3 +92,69 @@ SELECT * FROM t1; SHOW CREATE TABLE t1; DROP TABLE t1; SET DEBUG_SYNC='RESET'; + +--echo # +--echo # MDEV-33593: Auto increment deadlock error causes ASSERT in subsequent save point +--echo # + +CREATE TABLE t1(col1 INT PRIMARY KEY AUTO_INCREMENT, col2 INT) ENGINE=InnoDB; +CREATE TABLE t2(col1 INT PRIMARY KEY) ENGINE=InnoDB; +INSERT INTO t1(col2) values(100); + +--connect(con1, localhost, root,,) +START TRANSACTION; +--echo # T1: Acquiring Row X lock on table t2 +INSERT INTO t2 values(100); + +--connect(con2, localhost, root,,) +START TRANSACTION; +--echo # T2: Wait for (T1) row lock on t2 after acquiring GAP Lock on t1 +UPDATE t1 SET col2 = 20 where col1 = 10; +SET DEBUG_SYNC='lock_wait_before_suspend SIGNAL t2_waiting'; +--send INSERT INTO t2 values(100) + +--connection default +SET DEBUG_SYNC='now WAIT_FOR t2_waiting'; +--echo # T3: Wait for (T2) II row Lock on t1 after acquiring Auto Increment Lock on t1 +SET DEBUG_SYNC='lock_wait_before_suspend SIGNAL t3_waiting'; +--send INSERT INTO t1(col2) SELECT col2 from t1 + +--connection con1 +SAVEPOINT s1; +SET DEBUG_SYNC='now WAIT_FOR t3_waiting'; +--echo # T1: Wait for (T3) auto increment lock on t1 causing T1 -> T3 -> T2 -> T1 deadlock +SET debug_dbug = '+d,innodb_deadlock_victim_self'; +--error ER_AUTOINC_READ_FAILED +INSERT INTO t1(col2) VALUES(200); + +--echo # The transaction should have been rolled back +SELECT * FROM t1; +SELECT * FROM t2; + +--echo # Release the previous savepoint using the same name +SAVEPOINT s1; +COMMIT; + +--connection con2 +--reap +COMMIT; + +--connection default +--reap +COMMIT; + +--disconnect con1 +--disconnect con2 + +--echo # Cleanup +SELECT * FROM t1; +DROP TABLE t1; + +SELECT * FROM t2; +DROP TABLE t2; + +SET DEBUG_SYNC='RESET'; + +--echo # +--echo # End of 10.5 tests +--echo # diff --git a/mysql-test/suite/innodb/t/innodb-lru-force-no-free-page.test b/mysql-test/suite/innodb/t/innodb-lru-force-no-free-page.test deleted file mode 100644 index d4f08b5afe6..00000000000 --- a/mysql-test/suite/innodb/t/innodb-lru-force-no-free-page.test +++ /dev/null @@ -1,24 +0,0 @@ ---source include/have_innodb.inc ---source include/have_debug.inc ---source include/not_embedded.inc - -call mtr.add_suppression("InnoDB: Difficult to find free blocks in the buffer pool"); - -SET @saved_debug = @@SESSION.debug_dbug; -SET SESSION debug_dbug="+d,ib_lru_force_no_free_page"; - -CREATE TABLE t1 (j LONGBLOB) ENGINE = InnoDB; -BEGIN; -INSERT INTO t1 VALUES (repeat('abcdefghijklmnopqrstuvwxyz',200)); -COMMIT; - -SET debug_dbug = @saved_debug; - -DROP TABLE t1; - -# -# There should be only one message -# -let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; ---let SEARCH_PATTERN=InnoDB: Difficult to find free blocks ---source include/search_pattern_in_file.inc diff --git a/mysql-test/suite/innodb/t/innodb_stats_fetch.test b/mysql-test/suite/innodb/t/innodb_stats_fetch.test index 99fc115af1d..d02cd3b8959 100644 --- a/mysql-test/suite/innodb/t/innodb_stats_fetch.test +++ b/mysql-test/suite/innodb/t/innodb_stats_fetch.test @@ -96,3 +96,11 @@ SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_VIRTUAL LIMIT ROWS EXAMINED 5; SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN LIMIT ROWS EXAMINED 5; SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_FOREIGN_COLS LIMIT ROWS EXAMINED 5; DROP TABLE t1; + +--echo # +--echo # MDEV-33462 Disallow LOCK=NONE operation on statistics table +--echo # +--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON +ALTER TABLE mysql.innodb_table_stats FORCE, LOCK=NONE; +--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON +ALTER TABLE mysql.innodb_index_stats FORCE, LOCK=NONE; diff --git a/mysql-test/suite/innodb/t/instant_alter_crash.test b/mysql-test/suite/innodb/t/instant_alter_crash.test index f51f61e3c04..76b85b771f7 100644 --- a/mysql-test/suite/innodb/t/instant_alter_crash.test +++ b/mysql-test/suite/innodb/t/instant_alter_crash.test @@ -230,29 +230,3 @@ CHECK TABLE t2; DROP TABLE t1,t2; --list_files $MYSQLD_DATADIR/test - ---echo # ---echo # MDEV-26198 Assertion `0' failed in row_log_table_apply_op during ---echo # ADD PRIMARY KEY or OPTIMIZE TABLE ---echo # -CREATE TABLE t1(f1 year default null, f2 year default null, - f3 text, f4 year default null, f5 year default null, - f6 year default null, f7 year default null, - f8 year default null)ENGINE=InnoDB ROW_FORMAT=REDUNDANT; -INSERT INTO t1 VALUES(1, 1, 1, 1, 1, 1, 1, 1); -ALTER TABLE t1 ADD COLUMN f9 year default null, ALGORITHM=INPLACE; -set DEBUG_SYNC="row_log_table_apply1_before SIGNAL con1_insert WAIT_FOR con1_finish"; -send ALTER TABLE t1 ROW_FORMAT=REDUNDANT, ADD COLUMN f10 YEAR DEFAULT NULL, ALGORITHM=INPLACE; - -connect(con1,localhost,root,,,); -SET DEBUG_SYNC="now WAIT_FOR con1_insert"; -INSERT IGNORE INTO t1 (f3) VALUES ( 'b' ); -INSERT IGNORE INTO t1 (f3) VALUES ( 'l' ); -SET DEBUG_SYNC="now SIGNAL con1_finish"; - -connection default; -reap; -disconnect con1; -SET DEBUG_SYNC=RESET; -CHECK TABLE t1; -DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/instant_alter_debug.test b/mysql-test/suite/innodb/t/instant_alter_debug.test index f11d0bd07e2..953f7663df8 100644 --- a/mysql-test/suite/innodb/t/instant_alter_debug.test +++ b/mysql-test/suite/innodb/t/instant_alter_debug.test @@ -551,11 +551,62 @@ SET DEBUG_SYNC="now WAIT_FOR try_insert"; --error ER_LOCK_WAIT_TIMEOUT INSERT INTO t1 VALUES(1, 2); SET DEBUG_SYNC="now SIGNAL alter_progress"; -disconnect con1; connection default; reap; DROP TABLE t1; + +--echo # +--echo # MDEV-26198 Assertion `0' failed in row_log_table_apply_op during +--echo # ADD PRIMARY KEY or OPTIMIZE TABLE +--echo # +CREATE TABLE t1(f1 year default null, f2 year default null, + f3 text, f4 year default null, f5 year default null, + f6 year default null, f7 year default null, + f8 year default null)ENGINE=InnoDB; +INSERT INTO t1 VALUES(1, 1, 1, 1, 1, 1, 1, 1); +ALTER TABLE t1 ADD COLUMN f9 year default null, ALGORITHM=INPLACE; +set DEBUG_SYNC="row_log_table_apply1_before SIGNAL con1_insert WAIT_FOR con1_finish"; +send ALTER TABLE t1 ADD COLUMN f10 YEAR DEFAULT NULL, FORCE, ALGORITHM=INPLACE; + +connection con1; +SET DEBUG_SYNC="now WAIT_FOR con1_insert"; +INSERT IGNORE INTO t1 (f3) VALUES ( 'b' ); +INSERT IGNORE INTO t1 (f3) VALUES ( 'l' ); +SET DEBUG_SYNC="now SIGNAL con1_finish"; + +connection default; +reap; +CHECK TABLE t1; +DROP TABLE t1; + +--echo # +--echo # MDEV-19044 Alter table corrupts while applying the +--echo # modification log +--echo # +CREATE TABLE t1 ( + f1 INT, + f2 INT, + f3 char(19) CHARACTER SET utf8mb3, + f4 VARCHAR(500), + f5 TEXT)ENGINE=InnoDB; +INSERT INTO t1 VALUES(3, 1, REPEAT('a', 2), REPEAT("b", 20),'a'); +ALTER TABLE t1 ADD COLUMN f6 INT NOT NULL, ALGORITHM=INSTANT; +INSERT INTO t1 VALUES(1, 2, REPEAT('InnoDB', 2), + REPEAT("MariaDB", 20), REPEAT('a', 8000), 12); +INSERT INTO t1 VALUES(1, 2, REPEAT('MYSQL', 2), + REPEAT("MariaDB", 20), REPEAT('a', 8000), 12); +SET DEBUG_SYNC='innodb_inplace_alter_table_enter SIGNAL con1_begin WAIT_FOR con1_update'; +send ALTER TABLE t1 MODIFY COLUMN f2 INT NOT NULL, FORCE, ALGORITHM=INPLACE; +connection con1; +SET DEBUG_SYNC='now WAIT_FOR con1_begin'; +UPDATE t1 SET f2=204 order by f1 limit 2; +SET DEBUG_SYNC='now SIGNAL con1_update'; +connection default; +reap; +disconnect con1; SET DEBUG_SYNC=reset; +CHECK TABLE t1; +DROP TABLE t1; --echo # End of 10.4 tests diff --git a/mysql-test/suite/innodb/t/lock_insert_into_empty.test b/mysql-test/suite/innodb/t/lock_insert_into_empty.test index 91d2bcdd3ba..42409e8cf35 100644 --- a/mysql-test/suite/innodb/t/lock_insert_into_empty.test +++ b/mysql-test/suite/innodb/t/lock_insert_into_empty.test @@ -51,6 +51,7 @@ CREATE TABLE t1 (k INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 SET k=1; START TRANSACTION; INSERT INTO t1 SET k=2; +SELECT count(*) > 0 FROM mysql.innodb_index_stats lock in share mode; --connect (con1,localhost,root,,test) SET innodb_lock_wait_timeout=0; @@ -59,5 +60,6 @@ CREATE TABLE t2 (pk INT AUTO_INCREMENT PRIMARY KEY) ENGINE=InnoDB AS SELECT k FROM t1; --disconnect con1 --connection default - +SET innodb_lock_wait_timeout=default; DROP TABLE t1; +DROP TABLE IF EXISTS t2; diff --git a/mysql-test/suite/innodb/t/lock_isolation.test b/mysql-test/suite/innodb/t/lock_isolation.test new file mode 100644 index 00000000000..30d2978f4f2 --- /dev/null +++ b/mysql-test/suite/innodb/t/lock_isolation.test @@ -0,0 +1,110 @@ +--source include/have_innodb.inc + +--echo # +--echo # MDEV-26642 Weird SELECT view when a record is +--echo # modified to the same value by two transactions +--echo # MDEV-32898 Phantom rows caused by updates of PRIMARY KEY +--echo # + +CREATE TABLE t(a INT PRIMARY KEY, b INT) ENGINE=InnoDB; +INSERT INTO t VALUES (1,1),(2,2); +BEGIN; SELECT * FROM t LOCK IN SHARE MODE; +--connect con_weird,localhost,root +BEGIN; +SELECT * FROM t; +--connect consistent,localhost,root +SET innodb_snapshot_isolation=ON; +BEGIN; +SELECT * FROM t; +--connection default +UPDATE t SET a=3 WHERE b=2; +COMMIT; +--connection consistent +--error ER_CHECKREAD +UPDATE t SET b=3; +SELECT * FROM t; +COMMIT; +--connection con_weird +UPDATE t SET b=3; +SELECT * FROM t; +COMMIT; +--connection default +SELECT * FROM t; +DROP TABLE t; + +--echo # +--echo # MDEV-26643 Inconsistent behaviors of UPDATE under +--echo # READ UNCOMMITTED and READ COMMITTED isolation level +--echo # + +CREATE TABLE t(a INT, b INT) ENGINE=InnoDB; +INSERT INTO t VALUES(NULL, 1), (2, 2); +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +BEGIN; UPDATE t SET a = 10; + +--connection consistent +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +--send UPDATE t SET b = 20 WHERE a + +--connection default +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where state = 'Updating' + and info = 'UPDATE t SET b = 20 WHERE a'; +--source include/wait_condition.inc + +COMMIT; + +--connection consistent +--reap +SELECT * FROM t; + +--connection default +TRUNCATE TABLE t; +INSERT INTO t VALUES(NULL, 1), (2, 2); +BEGIN; UPDATE t SET a = 10; + +--connection consistent +SET TRANSACTION ISOLATION LEVEL READ COMMITTED; +--send UPDATE t SET b = 20 WHERE a + +--connection default +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where info = 'UPDATE t SET b = 20 WHERE a'; +--source include/wait_condition.inc + +COMMIT; + +--connection consistent +--reap +SELECT * FROM t; +--disconnect consistent + +--connection default +TRUNCATE TABLE t; +INSERT INTO t VALUES(NULL, 1), (2, 2); +BEGIN; UPDATE t SET a = 10; + +--connection con_weird +SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; +send UPDATE t SET b = 20 WHERE a; + +--connection default +let $wait_condition= + select count(*) = 1 from information_schema.processlist + where state = 'Updating' + and info = 'UPDATE t SET b = 20 WHERE a'; +--source include/wait_condition.inc + +SELECT * FROM t; +COMMIT; + +--connection con_weird +--reap +COMMIT; +--disconnect con_weird + +--connection default +SELECT * FROM t; +DROP TABLE t; diff --git a/mysql-test/suite/innodb/t/log_upgrade_101_flags.test b/mysql-test/suite/innodb/t/log_upgrade_101_flags.test new file mode 100644 index 00000000000..7b54e3ae9aa --- /dev/null +++ b/mysql-test/suite/innodb/t/log_upgrade_101_flags.test @@ -0,0 +1,107 @@ +--source include/have_innodb.inc +--source include/big_test.inc +--source include/not_embedded.inc +call mtr.add_suppression("InnoDB: The change buffer is corrupted"); +call mtr.add_suppression("InnoDB: Tablespace size stored in header is 768 pages, but the sum of data file sizes is 384 pages"); +call mtr.add_suppression("InnoDB: adjusting FSP_SPACE_FLAGS of file"); +--source include/shutdown_mysqld.inc +let bugdir= $MYSQLTEST_VARDIR/tmp/log_upgrade; +--mkdir $bugdir +--let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err +--let $dirs= --innodb-data-home-dir=$bugdir --innodb-log-group-home-dir=$bugdir --innodb-undo-directory=$bugdir + +# Test case similar to log_upgrade.test +perl; +do "$ENV{MTR_SUITE_DIR}/../innodb/include/crc32.pl"; +my $polynomial = 0x82f63b78; # CRC-32C + +die unless open OUT, ">", "$ENV{bugdir}/ibdata1"; +binmode OUT; + +my $head = pack("Nx[18]", 0); +# Add FSP_SPACE_FLAGS as 49152 (10.1.0...10.1.20), page_size = 32k +my $body = pack("x[8]Nx[4]Nx[2]Nx[32696]", 768, 49152, 97937874); +my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial); +print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck); + +# Dummy change buffer header page (page 3). +die unless seek(OUT, 3 * 32768, 0); +## FIL_PAGE_OFFSET, FIL_PAGE_PREV, FIL_PAGE_NEXT, FIL_PAGE_TYPE +my $head = pack("NNNx[8]n", 3, 0xffffffff, 0xffffffff, 6); +my $body = pack("x[62]nnx[32656]", 2, 50); +my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial); +print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck); + +# Dummy change buffer root page (page 4). +## FIL_PAGE_OFFSET, FIL_PAGE_PREV, FIL_PAGE_NEXT +my $head = pack("NNNx[10]", 4, 0xffffffff, 0xffffffff); +my $body = chr(0) x 32722; +my $ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial); +print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck); + + +# Dictionary header page (page 7). +die unless seek(OUT, 7 * 32768, 0); +$head = pack("Nx[18]", 7); +$body = pack("x[32]Nx[8]Nx[32674]", 8, 9); +$ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial); +print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck); + +# Empty SYS_TABLES page (page 8). +$head = pack("NNNx[8]n", 8, ~0, ~0, 17855); +$body = pack("nnx[31]Cx[20]", 2, 124, 1); +$body .= pack("nxnn", 0x801, 3, 116) . "infimum"; +$body .= pack("xnxnxx", 0x901, 0x803) . "supremum"; +$body .= pack("x[32632]nn", 116, 101); +$ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial); +print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck); + +# Empty SYS_INDEXES page (page 9). +$head = pack("NNNx[8]n", 9, ~0, ~0, 17855); +$body = pack("nnx[31]Cx[20]", 2, 124, 3); +$body .= pack("nxnn", 0x801, 3, 116) . "infimum"; +$body .= pack("xnxnxx", 0x901, 0x803) . "supremum"; +$body .= pack("x[32632]nn", 116, 101); +$ck = mycrc32($head, 0, $polynomial) ^ mycrc32($body, 0, $polynomial); +print OUT pack("N",$ck).$head.pack("x[12]").$body.pack("Nx[4]",$ck); + +die unless seek(OUT, 768 * 16384 - 1, 0); +print OUT chr(0); +close OUT or die; + +die unless open OUT, ">", "$ENV{bugdir}/ib_logfile0"; +binmode OUT; +$_= pack("Nx[5]nx[5]", 1, 0x1286) . "BogoDB 4.3.2.1" . chr(0) x 478; +print OUT $_, pack("N", mycrc32($_, 0, $polynomial)); +# checkpoint page 1 and all-zero checkpoint 2 +$_= pack("x[13]nCNNx[484]", 0x1286, 12, 2, 0x80c); +print OUT $_, pack("N", mycrc32($_, 0, $polynomial)); +die unless seek(OUT, 0x1FFFFFFFF, 0); +print OUT chr(0); +close OUT or die; +die unless open OUT, ">", "$ENV{bugdir}/ib_logfile1"; +binmode OUT; +die unless seek(OUT, 0x800, 0); # the first 2048 bytes are unused! +$_= pack("Nnnx[500]", 0x80000944, 12, 12); +print OUT $_, pack("N", mycrc32($_, 0, $polynomial)); +die unless seek(OUT, 0x1FFFFFFFF, 0); +print OUT chr(0); +close OUT or die; +EOF + +--let $restart_parameters= $dirs --innodb-force-recovery=5 --innodb-log-file-size=4m --innodb_page_size=32k --innodb_buffer_pool_size=10M +--source include/start_mysqld.inc +SELECT COUNT(*) FROM INFORMATION_SCHEMA.ENGINES +WHERE engine = 'innodb' +AND support IN ('YES', 'DEFAULT', 'ENABLED'); +--source include/shutdown_mysqld.inc +--let SEARCH_PATTERN= InnoDB: Upgrading redo log: +--source include/search_pattern_in_file.inc +--let $restart_parameters= $dirs + +--remove_files_wildcard $bugdir +--rmdir $bugdir +--let $restart_parameters= +--source include/start_mysqld.inc + +--echo # End of 10.5 tests diff --git a/mysql-test/suite/innodb/t/purge_secondary.test b/mysql-test/suite/innodb/t/purge_secondary.test index 8a38a418877..ec02c726891 100644 --- a/mysql-test/suite/innodb/t/purge_secondary.test +++ b/mysql-test/suite/innodb/t/purge_secondary.test @@ -4,10 +4,6 @@ SET @save_stats_persistent = @@GLOBAL.innodb_stats_persistent; SET GLOBAL innodb_stats_persistent = 0; ---disable_query_log -call mtr.add_suppression("InnoDB: Difficult to find free blocks in the buffer pool"); ---enable_query_log - CREATE TABLE t1 ( a SERIAL, b CHAR(255) NOT NULL DEFAULT '', c BOOLEAN DEFAULT false, l LINESTRING NOT NULL DEFAULT ST_linefromtext('linestring(448 -689, diff --git a/mysql-test/suite/innodb/t/rename_table.test b/mysql-test/suite/innodb/t/rename_table.test index 654f8809b22..a61813429b3 100644 --- a/mysql-test/suite/innodb/t/rename_table.test +++ b/mysql-test/suite/innodb/t/rename_table.test @@ -32,17 +32,22 @@ DROP DATABASE abc_def; DROP DATABASE abc_def2; -call mtr.add_suppression("InnoDB: (Operating system error|Error number \\d+ means|Cannot rename file)"); +call mtr.add_suppression("InnoDB: Cannot rename '.*t1.ibd' to '.*non_existing_db.*' because the target schema directory doesn't exist"); CREATE TABLE t1 (a INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES(100); --replace_result "\\" "/" --error ER_ERROR_ON_RENAME RENAME TABLE t1 TO non_existing_db.t1; ---let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot rename file '.*t1\.ibd' to '.*non_existing_db +--let SEARCH_PATTERN= \[ERROR\] InnoDB: Cannot rename '.*t1\.ibd' to '.*non_existing_db let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; --source include/search_pattern_in_file.inc +SET GLOBAL innodb_fast_shutdown=2; +--source include/restart_mysqld.inc + +SELECT * FROM t1; # Cleanup DROP TABLE t1; diff --git a/mysql-test/suite/innodb_fts/r/foreign_key_update.result b/mysql-test/suite/innodb_fts/r/foreign_key_update.result index f2d47da78c5..87c21c0bfc5 100644 --- a/mysql-test/suite/innodb_fts/r/foreign_key_update.result +++ b/mysql-test/suite/innodb_fts/r/foreign_key_update.result @@ -32,3 +32,15 @@ database database DROP TABLE t1_fk; DROP TABLE t1; +# +# MDEV-32346 Assertion failure sym_node->table != NULL +# in pars_retrieve_table_def on UPDATE +# +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (a INT, b TEXT, FOREIGN KEY(a) REFERENCES t1(a), +FULLTEXT (b))ENGINE=InnoDB; +INSERT INTO t1 SET a=1; +ALTER TABLE t2 DISCARD TABLESPACE; +UPDATE t1 SET a=2; +ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test`.`t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`)) +DROP TABLE t2,t1; diff --git a/mysql-test/suite/innodb_fts/t/foreign_key_update.test b/mysql-test/suite/innodb_fts/t/foreign_key_update.test index 1f74e640088..8a64ac33d69 100644 --- a/mysql-test/suite/innodb_fts/t/foreign_key_update.test +++ b/mysql-test/suite/innodb_fts/t/foreign_key_update.test @@ -32,3 +32,16 @@ SELECT * FROM t1_fk WHERE MATCH(a) AGAINST('database'); DROP TABLE t1_fk; DROP TABLE t1; + +--echo # +--echo # MDEV-32346 Assertion failure sym_node->table != NULL +--echo # in pars_retrieve_table_def on UPDATE +--echo # +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t2 (a INT, b TEXT, FOREIGN KEY(a) REFERENCES t1(a), + FULLTEXT (b))ENGINE=InnoDB; +INSERT INTO t1 SET a=1; +ALTER TABLE t2 DISCARD TABLESPACE; +--error ER_ROW_IS_REFERENCED_2 +UPDATE t1 SET a=2; +DROP TABLE t2,t1; diff --git a/mysql-test/suite/innodb_zip/t/innochecksum_2.test b/mysql-test/suite/innodb_zip/t/innochecksum_2.test index 1743bd4a1eb..f6b0e5de252 100644 --- a/mysql-test/suite/innodb_zip/t/innochecksum_2.test +++ b/mysql-test/suite/innodb_zip/t/innochecksum_2.test @@ -9,11 +9,6 @@ --source include/not_embedded.inc -- source include/big_test.inc ---disable_query_log -# This warning occurs due to small buffer pool size(i.e. 8MB). It doesn't occur -# with --mysqld=--innodb_buffer_pool_size=10MB -call mtr.add_suppression("\\[Warning\\] InnoDB: Difficult to find free blocks in the buffer pool.*"); ---enable_query_log let MYSQLD_BASEDIR= `SELECT @@basedir`; let MYSQLD_DATADIR= `SELECT @@datadir`; let SEARCH_FILE= $MYSQLTEST_VARDIR/log/my_restart.err; diff --git a/mysql-test/suite/maria/alter.result b/mysql-test/suite/maria/alter.result index cc035426745..17164343163 100644 --- a/mysql-test/suite/maria/alter.result +++ b/mysql-test/suite/maria/alter.result @@ -193,3 +193,29 @@ ALTER TABLE t1 DISABLE KEYS; INSERT INTO t1 VALUES (1, 'Nine chars or more'); ALTER TABLE t1 ENABLE KEYS; DROP TABLE t1; +# +# MDEV-25923 Memory not freed or Assertion `old_flags == ((my_flags & +# 0x10000U) ? 1 : 0)' failed in my_realloc upon ALTER on Aria table +# with GIS column +# +CREATE TABLE t1 (pk INT PRIMARY KEY, a POINT DEFAULT ST_GEOMFROMTEXT('Point(1 1)')) ENGINE=Aria; +INSERT INTO t1 (pk) SELECT seq FROM seq_1_to_100; +SET @old_threads= @@SESSION.aria_repair_threads; +SET SESSION aria_repair_threads= 2; +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +DROP TABLE t1; +# +# MDEV-33562: Assertion `(old_flags & 1) == ((my_flags & 0x10000U) ? +# 1 : 0)' failed in my_realloc from sort_get_next_record on INSERT +# +SET @old_mode= @@SESSION.sql_mode; +SET sql_mode=''; +CREATE TEMPORARY TABLE t (b TEXT, INDEX s(b(300))) ROW_FORMAT=DYNAMIC ENGINE=Aria; +INSERT INTO t VALUES (REPEAT ('a',10000000)); +Warnings: +Warning 1265 Data truncated for column 'b' at row 1 +CREATE TABLE ti LIKE t; +INSERT INTO ti SELECT * FROM t; +DROP TABLE t, ti; +SET SESSION aria_repair_threads= @old_threads; +SET SESSION sql_mode= @old_mode; diff --git a/mysql-test/suite/maria/alter.test b/mysql-test/suite/maria/alter.test index 525cd80f3d9..a68b5f2e0d7 100644 --- a/mysql-test/suite/maria/alter.test +++ b/mysql-test/suite/maria/alter.test @@ -203,3 +203,31 @@ ALTER TABLE t1 DISABLE KEYS; INSERT INTO t1 VALUES (1, 'Nine chars or more'); ALTER TABLE t1 ENABLE KEYS; DROP TABLE t1; + +--echo # +--echo # MDEV-25923 Memory not freed or Assertion `old_flags == ((my_flags & +--echo # 0x10000U) ? 1 : 0)' failed in my_realloc upon ALTER on Aria table +--echo # with GIS column +--echo # + +CREATE TABLE t1 (pk INT PRIMARY KEY, a POINT DEFAULT ST_GEOMFROMTEXT('Point(1 1)')) ENGINE=Aria; +INSERT INTO t1 (pk) SELECT seq FROM seq_1_to_100; +SET @old_threads= @@SESSION.aria_repair_threads; +SET SESSION aria_repair_threads= 2; +ALTER TABLE t1 ROW_FORMAT=DYNAMIC; +DROP TABLE t1; + +--echo # +--echo # MDEV-33562: Assertion `(old_flags & 1) == ((my_flags & 0x10000U) ? +--echo # 1 : 0)' failed in my_realloc from sort_get_next_record on INSERT +--echo # + +SET @old_mode= @@SESSION.sql_mode; +SET sql_mode=''; +CREATE TEMPORARY TABLE t (b TEXT, INDEX s(b(300))) ROW_FORMAT=DYNAMIC ENGINE=Aria; +INSERT INTO t VALUES (REPEAT ('a',10000000)); +CREATE TABLE ti LIKE t; +INSERT INTO ti SELECT * FROM t; +DROP TABLE t, ti; +SET SESSION aria_repair_threads= @old_threads; +SET SESSION sql_mode= @old_mode; diff --git a/mysql-test/suite/maria/ps_maria.result b/mysql-test/suite/maria/ps_maria.result index 5c128a32dd1..3f806c43dbd 100644 --- a/mysql-test/suite/maria/ps_maria.result +++ b/mysql-test/suite/maria/ps_maria.result @@ -1798,7 +1798,7 @@ t5 CREATE TABLE `t5` ( `param09` longtext DEFAULT NULL, `const10` bigint(17) DEFAULT NULL, `param10` bigint(20) DEFAULT NULL, - `const11` int(4) DEFAULT NULL, + `const11` int(5) DEFAULT NULL, `param11` bigint(20) DEFAULT NULL, `const12` binary(0) DEFAULT NULL, `param12` bigint(20) DEFAULT NULL, @@ -1828,7 +1828,7 @@ def test t5 t5 const09 const09 12 19 19 Y 128 0 63 def test t5 t5 param09 param09 252 4294967295 19 Y 16 0 8 def test t5 t5 const10 const10 8 17 9 Y 32768 0 63 def test t5 t5 param10 param10 8 20 9 Y 32768 0 63 -def test t5 t5 const11 const11 3 4 4 Y 32768 0 63 +def test t5 t5 const11 const11 3 5 4 Y 32768 0 63 def test t5 t5 param11 param11 8 20 4 Y 32768 0 63 def test t5 t5 const12 const12 254 0 0 Y 128 0 63 def test t5 t5 param12 param12 8 20 0 Y 32768 0 63 diff --git a/mysql-test/suite/mariabackup/absolute_ibdata_paths.test b/mysql-test/suite/mariabackup/absolute_ibdata_paths.test index fa304f0bc55..f4d0e2a5bd5 100644 --- a/mysql-test/suite/mariabackup/absolute_ibdata_paths.test +++ b/mysql-test/suite/mariabackup/absolute_ibdata_paths.test @@ -30,7 +30,7 @@ let $_innodb_data_file_path=`select @@innodb_data_file_path`; let $_innodb_data_home_dir=`select @@innodb_data_home_dir`; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; --enable_result_log exec $XTRABACKUP --prepare --target-dir=$targetdir; diff --git a/mysql-test/suite/mariabackup/alter_copy_race.result b/mysql-test/suite/mariabackup/alter_copy_race.result index 82202249f81..ae58ac28881 100644 --- a/mysql-test/suite/mariabackup/alter_copy_race.result +++ b/mysql-test/suite/mariabackup/alter_copy_race.result @@ -4,7 +4,7 @@ INSERT into t1 values(1); connect con2, localhost, root,,; connection con2; set lock_wait_timeout=1; -SET debug_sync='copy_data_between_tables_before_reset_backup_lock SIGNAL go WAIT_FOR after_backup_stage_block_commit'; +SET debug_sync='copy_data_between_tables_before_reset_backup_lock SIGNAL go WAIT_FOR after_backup_stage_block_ddl'; SET debug_sync='alter_table_after_temp_table_drop SIGNAL temp_table_dropped'; SET debug_sync='now WAIT_FOR after_backup_stage_start';ALTER TABLE test.t1 FORCE, algorithm=COPY;| connection default; diff --git a/mysql-test/suite/mariabackup/alter_copy_race.test b/mysql-test/suite/mariabackup/alter_copy_race.test index 553643bf667..cfabd76c513 100644 --- a/mysql-test/suite/mariabackup/alter_copy_race.test +++ b/mysql-test/suite/mariabackup/alter_copy_race.test @@ -18,7 +18,7 @@ INSERT into t1 values(1); connect con2, localhost, root,,; connection con2; set lock_wait_timeout=1; -SET debug_sync='copy_data_between_tables_before_reset_backup_lock SIGNAL go WAIT_FOR after_backup_stage_block_commit'; +SET debug_sync='copy_data_between_tables_before_reset_backup_lock SIGNAL go WAIT_FOR after_backup_stage_block_ddl'; SET debug_sync='alter_table_after_temp_table_drop SIGNAL temp_table_dropped'; DELIMITER |; send SET debug_sync='now WAIT_FOR after_backup_stage_start';ALTER TABLE test.t1 FORCE, algorithm=COPY;| @@ -27,7 +27,7 @@ connection default; # setup mariabackup events let after_backup_stage_start=SET debug_sync='now SIGNAL after_backup_stage_start WAIT_FOR go'; -let after_backup_stage_block_commit=SET debug_sync='now SIGNAL after_backup_stage_block_commit'; +let after_backup_stage_block_ddl=SET debug_sync='now SIGNAL after_backup_stage_block_ddl'; let backup_fix_ddl=SET debug_sync='now WAIT_FOR temp_table_dropped'; --disable_result_log exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --dbug=+d,mariabackup_events; diff --git a/mysql-test/suite/mariabackup/apply-log-only-incr.test b/mysql-test/suite/mariabackup/apply-log-only-incr.test index 01b74af2211..1da2be090d6 100644 --- a/mysql-test/suite/mariabackup/apply-log-only-incr.test +++ b/mysql-test/suite/mariabackup/apply-log-only-incr.test @@ -19,7 +19,7 @@ dec $n; } --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$basedir; --enable_result_log let $n=100; while ($n) { @@ -36,7 +36,7 @@ disconnect flush_log; connection default; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --ftwrl-wait-timeout=5 --ftwrl-wait-threshold=300 --ftwrl-wait-query-type=all --target-dir=$incremental_dir --incremental-basedir=$basedir ; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --ftwrl-wait-timeout=5 --ftwrl-wait-threshold=300 --ftwrl-wait-query-type=all --target-dir=$incremental_dir --incremental-basedir=$basedir ; exec $XTRABACKUP --prepare --verbose --target-dir=$basedir ; --enable_result_log diff --git a/mysql-test/suite/mariabackup/apply-log-only.test b/mysql-test/suite/mariabackup/apply-log-only.test index 7ffed0719f0..dbae77b7c9a 100644 --- a/mysql-test/suite/mariabackup/apply-log-only.test +++ b/mysql-test/suite/mariabackup/apply-log-only.test @@ -8,7 +8,7 @@ start transaction; INSERT INTO t VALUES(1); --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$basedir; --enable_result_log exec $XTRABACKUP --prepare --target-dir=$basedir ; diff --git a/mysql-test/suite/mariabackup/aria_backup.opt b/mysql-test/suite/mariabackup/aria_backup.opt new file mode 100644 index 00000000000..3565e3f1023 --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_backup.opt @@ -0,0 +1 @@ +--loose-partition --loose-aria-log-file-size=8388608 diff --git a/mysql-test/suite/mariabackup/aria_backup.result b/mysql-test/suite/mariabackup/aria_backup.result new file mode 100644 index 00000000000..e8c73f0ab1e --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_backup.result @@ -0,0 +1,780 @@ +### +# Test for backup to stream +##### +### +# Test for mix of online/offline backup tables +##### +CREATE TABLE t_default(i INT PRIMARY KEY) +ENGINE ARIA; +INSERT INTO t_default VALUES (1); +CREATE TABLE t_tr_p_ch(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_tr_p_ch VALUES (1); +CREATE TABLE t_tr_p_nch(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=0; +INSERT INTO t_tr_p_nch VALUES (1); +CREATE TABLE t_p_ch(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_p_ch VALUES (1); +CREATE TABLE t_p_nch(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=PAGE PAGE_CHECKSUM=0; +INSERT INTO t_p_nch VALUES (1); +CREATE TABLE t_fixed(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=FIXED PAGE_CHECKSUM=1; +INSERT INTO t_fixed VALUES (1); +CREATE TABLE t_dyn(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=DYNAMIC PAGE_CHECKSUM=1; +INSERT INTO t_dyn VALUES (1); +# Test for partitioned table +CREATE TABLE t_part_online(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL = 1 PAGE_CHECKSUM = 1 +PARTITION BY RANGE( i ) ( +PARTITION p0 VALUES LESS THAN (10), +PARTITION p1 VALUES LESS THAN (20), +PARTITION p2 VALUES LESS THAN (30) +); +INSERT INTO t_part_online VALUES(5); +INSERT INTO t_part_online VALUES(15); +INSERT INTO t_part_online VALUES(25); +SELECT * FROM t_part_online; +i +5 +15 +25 +CREATE TABLE t_part_offline(i INT) +ENGINE ARIA TRANSACTIONAL = 0 PAGE_CHECKSUM = 0 +PARTITION BY RANGE( i ) ( +PARTITION p0 VALUES LESS THAN (10), +PARTITION p1 VALUES LESS THAN (20), +PARTITION p2 VALUES LESS THAN (30) +); +INSERT INTO t_part_offline VALUES(5); +INSERT INTO t_part_offline VALUES(15); +INSERT INTO t_part_offline VALUES(25); +# Test for filename to tablename mapping +CREATE TABLE `t 1 t-1`(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO `t 1 t-1` VALUES (1); +CREATE TABLE `t-part online`(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL = 1 PAGE_CHECKSUM = 1 +PARTITION BY RANGE( i ) ( +PARTITION p0 VALUES LESS THAN (10), +PARTITION p1 VALUES LESS THAN (20), +PARTITION p2 VALUES LESS THAN (30) +); +INSERT INTO `t-part online` VALUES(5); +INSERT INTO `t-part online` VALUES(15); +INSERT INTO `t-part online` VALUES(25); +### +# Test for redo log files backup; +##### +CREATE TABLE t_logs_1(i INT) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +CREATE TABLE t_logs_2 LIKE t_logs_1; +CREATE TABLE t_bulk_ins LIKE t_logs_1; +INSERT INTO t_logs_1 VALUES +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9); +# Generate several log files +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +### +# Test for DML during backup for online backup +##### +CREATE TABLE t_dml_ins(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_dml_ins VALUES(1); +CREATE TABLE t_dml_upd(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_dml_upd VALUES(1); +CREATE TABLE t_dml_del(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_dml_del VALUES(1); +### +# Test for DDL during backup for online backup +##### +CREATE DATABASE test_for_db_drop; +CREATE TABLE test_for_db_drop.t(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_db_create(i INT PRIMARY KEY) ENGINE ARIA; +SHOW DATABASES; +Database +information_schema +mtr +mysql +performance_schema +sys +test +test_for_db_drop +CREATE TABLE t_alter(i INT PRIMARY KEY) ENGINE ARIA; +INSERT INTO t_alter VALUES (1); +CREATE TABLE t_trunc(i INT PRIMARY KEY) ENGINE ARIA; +INSERT INTO t_trunc VALUES (1); +CREATE TABLE t_ch_i (i int(10), index(i) ) ENGINE=Aria; +INSERT INTO t_ch_i VALUES(1); +CREATE TABLE t_change_engine(i INT PRIMARY KEY) ENGINE InnoDB; +INSERT INTO t_change_engine VALUES (1); +CREATE TABLE t_rename(i INT PRIMARY KEY) ENGINE ARIA; +CREATE DATABASE test_for_rename; +CREATE TABLE t_rename_2(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_rename_3(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_rename_4(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_delete(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_delete_2(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_rename_alter(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_rename_create(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_part_create(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_part_add_part(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_part_change_eng(i INT PRIMARY KEY) ENGINE ARIA PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_change_eng_2(i INT PRIMARY KEY) ENGINE InnoDB PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_change_eng_3(i INT PRIMARY KEY) ENGINE Aria; +CREATE TABLE t_part_alter(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_alter_2(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 3; +CREATE TABLE t_part_drop(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_rename(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_rename_3(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_rm_part(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +SET SESSION debug_dbug="+d,maria_flush_whole_log"; +SET GLOBAL aria_checkpoint_interval=10000; +### Backup to stream +# xtrabackup prepare +# shutdown server +# remove datadir +# xtrabackup move back +# restart +### Result for DDL test +SHOW CREATE TABLE t_alter; +Table Create Table +t_alter CREATE TABLE `t_alter` ( + `i` int(11) NOT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1 +SELECT * FROM t_alter; +i c +1 NULL +SHOW CREATE TABLE t_change_engine; +Table Create Table +t_change_engine CREATE TABLE `t_change_engine` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1 +SELECT * FROM t_change_engine; +i +1 +SELECT * FROM t_trunc; +i +1 +SELECT * FROM t_ch_i; +i +1 +SELECT * FROM t_rename_new; +i +SELECT * FROM test_for_rename.t_rename_new_2; +i +SELECT * FROM t_rename_new_new_3; +i +SELECT * FROM t_rename_new_4; +i +SELECT * FROM t_delete; +ERROR 42S02: Table 'test.t_delete' doesn't exist +SHOW CREATE TABLE t_delete_2; +Table Create Table +t_delete_2 CREATE TABLE `t_delete_2` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1 +SELECT * FROM t_rename_alter_2; +i c +SELECT * FROM t_rename_create; +d +SELECT * FROM t_rename_create_new; +i +SHOW CREATE TABLE t_part_create_2; +Table Create Table +t_part_create_2 CREATE TABLE `t_part_create_2` ( + `i` int(11) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci + PARTITION BY HASH (`i`) +PARTITIONS 2 +SELECT * FROM t_part_create_2; +i +SHOW CREATE TABLE t_part_add_part; +Table Create Table +t_part_add_part CREATE TABLE `t_part_add_part` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1 + PARTITION BY HASH (`i`) +PARTITIONS 2 +SELECT * FROM t_part_add_part; +i +SHOW CREATE TABLE t_part_change_eng; +Table Create Table +t_part_change_eng CREATE TABLE `t_part_change_eng` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci + PARTITION BY HASH (`i`) +PARTITIONS 2 +SELECT * FROM t_part_change_eng; +i +SHOW CREATE TABLE t_part_change_eng_2; +Table Create Table +t_part_change_eng_2 CREATE TABLE `t_part_change_eng_2` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci + PARTITION BY HASH (`i`) +PARTITIONS 2 +SELECT * FROM t_part_change_eng_2; +i +SELECT * FROM t_part_alter; +i c +SHOW CREATE TABLE t_part_alter_2; +Table Create Table +t_part_alter_2 CREATE TABLE `t_part_alter_2` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci + PARTITION BY HASH (`i`) +PARTITIONS 2 +SELECT * FROM t_part_alter_2; +i +SELECT * FROM t_part_drop; +ERROR 42S02: Table 'test.t_part_drop' doesn't exist +SELECT * FROM t_part_rename; +ERROR 42S02: Table 'test.t_part_rename' doesn't exist +SELECT * FROM t_part_rename_2; +i +SELECT * FROM t_part_rename_3; +ERROR 42S02: Table 'test.t_part_rename_3' doesn't exist +SELECT * FROM test_for_rename.t_part_rename_4; +i +SHOW CREATE TABLE t_part_rm_part; +Table Create Table +t_part_rm_part CREATE TABLE `t_part_rm_part` ( + `i` int(11) NOT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1 +SELECT * FROM t_part_rm_part; +i c +SHOW DATABASES; +Database +information_schema +mtr +mysql +performance_schema +sys +test +test_for_db_create +test_for_rename +### Clean up for DDL test +DROP DATABASE test_for_db_create; +DROP TABLE t_db_create; +DROP TABLE t_change_engine; +DROP TABLE t_alter; +DROP TABLE t_trunc; +DROP TABLE t_ch_i; +DROP TABLE t_rename_new; +DROP TABLE t_rename_new_new_3; +DROP TABLE t_rename_new_4; +DROP TABLE t_delete_2; +DROP TABLE t_rename_alter_2; +DROP TABLE t_rename_create; +DROP TABLE t_rename_create_new; +DROP TABLE t_part_create; +DROP TABLE t_part_create_2; +DROP TABLE t_part_add_part; +DROP TABLE t_part_change_eng; +DROP TABLE t_part_change_eng_2; +DROP TABLE t_part_change_eng_3; +DROP TABLE t_part_alter; +DROP TABLE t_part_alter_2; +DROP TABLE t_part_rename_2; +DROP TABLE t_part_rm_part; +DROP DATABASE test_for_rename; +### Result for DML test +SELECT * FROM t_dml_ins; +i +1 +2 +SELECT * FROM t_dml_upd; +i +2 +SELECT * FROM t_dml_del; +i +### Clean up for DML test +DROP TABLE t_dml_ins; +DROP TABLE t_dml_upd; +DROP TABLE t_dml_del; +### Result for redo log files backup +# ok +# ok +# ok +### Cleanup for redo log files backup +DROP TABLE t_logs_1; +DROP TABLE t_logs_2; +DROP TABLE t_bulk_ins; +### Result for online/offline tables test +SELECT * FROM t_default; +i +1 +SELECT * FROM t_tr_p_ch; +i +1 +SELECT * FROM t_tr_p_nch; +i +1 +SELECT * FROM t_p_ch; +i +1 +SELECT * FROM t_p_nch; +i +1 +SELECT * FROM t_fixed; +i +1 +SELECT * FROM t_dyn; +i +1 +SELECT * FROM t_part_online; +i +5 +15 +25 +SELECT * FROM t_part_offline; +i +5 +15 +25 +SELECT * FROM `t 1 t-1`; +i +1 +SELECT * FROM `t-part online`; +i +5 +15 +25 +### Cleanup for online/offline tables test +DROP TABLE t_default; +DROP TABLE t_tr_p_ch; +DROP TABLE t_tr_p_nch; +DROP TABLE t_p_ch; +DROP TABLE t_p_nch; +DROP TABLE t_fixed; +DROP TABLE t_dyn; +DROP TABLE t_part_online; +DROP TABLE t_part_offline; +DROP TABLE `t 1 t-1`; +DROP TABLE `t-part online`; +### +# Test for backup to directory +##### +### +# Test for mix of online/offline backup tables +##### +CREATE TABLE t_default(i INT PRIMARY KEY) +ENGINE ARIA; +INSERT INTO t_default VALUES (1); +CREATE TABLE t_tr_p_ch(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_tr_p_ch VALUES (1); +CREATE TABLE t_tr_p_nch(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=0; +INSERT INTO t_tr_p_nch VALUES (1); +CREATE TABLE t_p_ch(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_p_ch VALUES (1); +CREATE TABLE t_p_nch(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=PAGE PAGE_CHECKSUM=0; +INSERT INTO t_p_nch VALUES (1); +CREATE TABLE t_fixed(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=FIXED PAGE_CHECKSUM=1; +INSERT INTO t_fixed VALUES (1); +CREATE TABLE t_dyn(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=DYNAMIC PAGE_CHECKSUM=1; +INSERT INTO t_dyn VALUES (1); +# Test for partitioned table +CREATE TABLE t_part_online(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL = 1 PAGE_CHECKSUM = 1 +PARTITION BY RANGE( i ) ( +PARTITION p0 VALUES LESS THAN (10), +PARTITION p1 VALUES LESS THAN (20), +PARTITION p2 VALUES LESS THAN (30) +); +INSERT INTO t_part_online VALUES(5); +INSERT INTO t_part_online VALUES(15); +INSERT INTO t_part_online VALUES(25); +SELECT * FROM t_part_online; +i +5 +15 +25 +CREATE TABLE t_part_offline(i INT) +ENGINE ARIA TRANSACTIONAL = 0 PAGE_CHECKSUM = 0 +PARTITION BY RANGE( i ) ( +PARTITION p0 VALUES LESS THAN (10), +PARTITION p1 VALUES LESS THAN (20), +PARTITION p2 VALUES LESS THAN (30) +); +INSERT INTO t_part_offline VALUES(5); +INSERT INTO t_part_offline VALUES(15); +INSERT INTO t_part_offline VALUES(25); +# Test for filename to tablename mapping +CREATE TABLE `t 1 t-1`(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO `t 1 t-1` VALUES (1); +CREATE TABLE `t-part online`(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL = 1 PAGE_CHECKSUM = 1 +PARTITION BY RANGE( i ) ( +PARTITION p0 VALUES LESS THAN (10), +PARTITION p1 VALUES LESS THAN (20), +PARTITION p2 VALUES LESS THAN (30) +); +INSERT INTO `t-part online` VALUES(5); +INSERT INTO `t-part online` VALUES(15); +INSERT INTO `t-part online` VALUES(25); +### +# Test for redo log files backup; +##### +CREATE TABLE t_logs_1(i INT) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +CREATE TABLE t_logs_2 LIKE t_logs_1; +CREATE TABLE t_bulk_ins LIKE t_logs_1; +INSERT INTO t_logs_1 VALUES +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9), +(0), (1), (2), (3), (4), (5), (6), (7), (8), (9); +# Generate several log files +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +### +# Test for DML during backup for online backup +##### +CREATE TABLE t_dml_ins(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_dml_ins VALUES(1); +CREATE TABLE t_dml_upd(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_dml_upd VALUES(1); +CREATE TABLE t_dml_del(i INT PRIMARY KEY) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_dml_del VALUES(1); +### +# Test for DDL during backup for online backup +##### +CREATE DATABASE test_for_db_drop; +CREATE TABLE test_for_db_drop.t(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_db_create(i INT PRIMARY KEY) ENGINE ARIA; +SHOW DATABASES; +Database +information_schema +mtr +mysql +performance_schema +sys +test +test_for_db_drop +CREATE TABLE t_alter(i INT PRIMARY KEY) ENGINE ARIA; +INSERT INTO t_alter VALUES (1); +CREATE TABLE t_trunc(i INT PRIMARY KEY) ENGINE ARIA; +INSERT INTO t_trunc VALUES (1); +CREATE TABLE t_ch_i (i int(10), index(i) ) ENGINE=Aria; +INSERT INTO t_ch_i VALUES(1); +CREATE TABLE t_change_engine(i INT PRIMARY KEY) ENGINE InnoDB; +INSERT INTO t_change_engine VALUES (1); +CREATE TABLE t_rename(i INT PRIMARY KEY) ENGINE ARIA; +CREATE DATABASE test_for_rename; +CREATE TABLE t_rename_2(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_rename_3(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_rename_4(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_delete(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_delete_2(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_rename_alter(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_rename_create(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_part_create(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_part_add_part(i INT PRIMARY KEY) ENGINE ARIA; +CREATE TABLE t_part_change_eng(i INT PRIMARY KEY) ENGINE ARIA PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_change_eng_2(i INT PRIMARY KEY) ENGINE InnoDB PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_change_eng_3(i INT PRIMARY KEY) ENGINE Aria; +CREATE TABLE t_part_alter(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_alter_2(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 3; +CREATE TABLE t_part_drop(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_rename(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_rename_3(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_rm_part(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +SET SESSION debug_dbug="+d,maria_flush_whole_log"; +SET GLOBAL aria_checkpoint_interval=10000; +### Backup to dir +# xtrabackup prepare +# shutdown server +# remove datadir +# xtrabackup move back +# restart +### Result for DDL test +SHOW CREATE TABLE t_alter; +Table Create Table +t_alter CREATE TABLE `t_alter` ( + `i` int(11) NOT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1 +SELECT * FROM t_alter; +i c +1 NULL +SHOW CREATE TABLE t_change_engine; +Table Create Table +t_change_engine CREATE TABLE `t_change_engine` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1 +SELECT * FROM t_change_engine; +i +1 +SELECT * FROM t_trunc; +i +1 +SELECT * FROM t_ch_i; +i +1 +SELECT * FROM t_rename_new; +i +SELECT * FROM test_for_rename.t_rename_new_2; +i +SELECT * FROM t_rename_new_new_3; +i +SELECT * FROM t_rename_new_4; +i +SELECT * FROM t_delete; +ERROR 42S02: Table 'test.t_delete' doesn't exist +SHOW CREATE TABLE t_delete_2; +Table Create Table +t_delete_2 CREATE TABLE `t_delete_2` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1 +SELECT * FROM t_rename_alter_2; +i c +SELECT * FROM t_rename_create; +d +SELECT * FROM t_rename_create_new; +i +SHOW CREATE TABLE t_part_create_2; +Table Create Table +t_part_create_2 CREATE TABLE `t_part_create_2` ( + `i` int(11) DEFAULT NULL +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci + PARTITION BY HASH (`i`) +PARTITIONS 2 +SELECT * FROM t_part_create_2; +i +SHOW CREATE TABLE t_part_add_part; +Table Create Table +t_part_add_part CREATE TABLE `t_part_add_part` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1 + PARTITION BY HASH (`i`) +PARTITIONS 2 +SELECT * FROM t_part_add_part; +i +SHOW CREATE TABLE t_part_change_eng; +Table Create Table +t_part_change_eng CREATE TABLE `t_part_change_eng` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci + PARTITION BY HASH (`i`) +PARTITIONS 2 +SELECT * FROM t_part_change_eng; +i +SHOW CREATE TABLE t_part_change_eng_2; +Table Create Table +t_part_change_eng_2 CREATE TABLE `t_part_change_eng_2` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci + PARTITION BY HASH (`i`) +PARTITIONS 2 +SELECT * FROM t_part_change_eng_2; +i +SELECT * FROM t_part_alter; +i c +SHOW CREATE TABLE t_part_alter_2; +Table Create Table +t_part_alter_2 CREATE TABLE `t_part_alter_2` ( + `i` int(11) NOT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci + PARTITION BY HASH (`i`) +PARTITIONS 2 +SELECT * FROM t_part_alter_2; +i +SELECT * FROM t_part_drop; +ERROR 42S02: Table 'test.t_part_drop' doesn't exist +SELECT * FROM t_part_rename; +ERROR 42S02: Table 'test.t_part_rename' doesn't exist +SELECT * FROM t_part_rename_2; +i +SELECT * FROM t_part_rename_3; +ERROR 42S02: Table 'test.t_part_rename_3' doesn't exist +SELECT * FROM test_for_rename.t_part_rename_4; +i +SHOW CREATE TABLE t_part_rm_part; +Table Create Table +t_part_rm_part CREATE TABLE `t_part_rm_part` ( + `i` int(11) NOT NULL, + `c` int(11) DEFAULT NULL, + PRIMARY KEY (`i`) +) ENGINE=Aria DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci PAGE_CHECKSUM=1 +SELECT * FROM t_part_rm_part; +i c +SHOW DATABASES; +Database +information_schema +mtr +mysql +performance_schema +sys +test +test_for_db_create +test_for_rename +### Clean up for DDL test +DROP DATABASE test_for_db_create; +DROP TABLE t_db_create; +DROP TABLE t_change_engine; +DROP TABLE t_alter; +DROP TABLE t_trunc; +DROP TABLE t_ch_i; +DROP TABLE t_rename_new; +DROP TABLE t_rename_new_new_3; +DROP TABLE t_rename_new_4; +DROP TABLE t_delete_2; +DROP TABLE t_rename_alter_2; +DROP TABLE t_rename_create; +DROP TABLE t_rename_create_new; +DROP TABLE t_part_create; +DROP TABLE t_part_create_2; +DROP TABLE t_part_add_part; +DROP TABLE t_part_change_eng; +DROP TABLE t_part_change_eng_2; +DROP TABLE t_part_change_eng_3; +DROP TABLE t_part_alter; +DROP TABLE t_part_alter_2; +DROP TABLE t_part_rename_2; +DROP TABLE t_part_rm_part; +DROP DATABASE test_for_rename; +### Result for DML test +SELECT * FROM t_dml_ins; +i +1 +2 +SELECT * FROM t_dml_upd; +i +2 +SELECT * FROM t_dml_del; +i +### Clean up for DML test +DROP TABLE t_dml_ins; +DROP TABLE t_dml_upd; +DROP TABLE t_dml_del; +### Result for redo log files backup +# ok +# ok +# ok +### Cleanup for redo log files backup +DROP TABLE t_logs_1; +DROP TABLE t_logs_2; +DROP TABLE t_bulk_ins; +### Result for online/offline tables test +SELECT * FROM t_default; +i +1 +SELECT * FROM t_tr_p_ch; +i +1 +SELECT * FROM t_tr_p_nch; +i +1 +SELECT * FROM t_p_ch; +i +1 +SELECT * FROM t_p_nch; +i +1 +SELECT * FROM t_fixed; +i +1 +SELECT * FROM t_dyn; +i +1 +SELECT * FROM t_part_online; +i +5 +15 +25 +SELECT * FROM t_part_offline; +i +5 +15 +25 +SELECT * FROM `t 1 t-1`; +i +1 +SELECT * FROM `t-part online`; +i +5 +15 +25 +### Cleanup for online/offline tables test +DROP TABLE t_default; +DROP TABLE t_tr_p_ch; +DROP TABLE t_tr_p_nch; +DROP TABLE t_p_ch; +DROP TABLE t_p_nch; +DROP TABLE t_fixed; +DROP TABLE t_dyn; +DROP TABLE t_part_online; +DROP TABLE t_part_offline; +DROP TABLE `t 1 t-1`; +DROP TABLE `t-part online`; diff --git a/mysql-test/suite/mariabackup/aria_backup.test b/mysql-test/suite/mariabackup/aria_backup.test new file mode 100644 index 00000000000..b844518bdf1 --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_backup.test @@ -0,0 +1,425 @@ +--source include/have_aria.inc +--source include/have_partition.inc +--source include/have_debug.inc +--source include/big_test.inc +# This test timeouts with msan +--source include/not_msan.inc + +--let $targetdir=$MYSQLTEST_VARDIR/tmp/backup +--let $backup_stream=2 +--let $backup_dir=1 +--let $backup_variant=$backup_stream + +while ($backup_variant) { +if ($backup_variant == $backup_stream) { +--echo ### +--echo # Test for backup to stream +--echo ##### +} +if ($backup_variant == $backup_dir) { +--echo ### +--echo # Test for backup to directory +--echo ##### +} + +--echo ### +--echo # Test for mix of online/offline backup tables +--echo ##### + +CREATE TABLE t_default(i INT PRIMARY KEY) + ENGINE ARIA; +INSERT INTO t_default VALUES (1); + +CREATE TABLE t_tr_p_ch(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_tr_p_ch VALUES (1); + +CREATE TABLE t_tr_p_nch(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=0; +INSERT INTO t_tr_p_nch VALUES (1); + +CREATE TABLE t_p_ch(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_p_ch VALUES (1); + +CREATE TABLE t_p_nch(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=PAGE PAGE_CHECKSUM=0; +INSERT INTO t_p_nch VALUES (1); + +CREATE TABLE t_fixed(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=FIXED PAGE_CHECKSUM=1; +INSERT INTO t_fixed VALUES (1); + +CREATE TABLE t_dyn(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL=0 ROW_FORMAT=DYNAMIC PAGE_CHECKSUM=1; +INSERT INTO t_dyn VALUES (1); + +--echo # Test for partitioned table +CREATE TABLE t_part_online(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL = 1 PAGE_CHECKSUM = 1 + PARTITION BY RANGE( i ) ( + PARTITION p0 VALUES LESS THAN (10), + PARTITION p1 VALUES LESS THAN (20), + PARTITION p2 VALUES LESS THAN (30) + ); + +INSERT INTO t_part_online VALUES(5); +INSERT INTO t_part_online VALUES(15); +INSERT INTO t_part_online VALUES(25); +SELECT * FROM t_part_online; + +CREATE TABLE t_part_offline(i INT) + ENGINE ARIA TRANSACTIONAL = 0 PAGE_CHECKSUM = 0 + PARTITION BY RANGE( i ) ( + PARTITION p0 VALUES LESS THAN (10), + PARTITION p1 VALUES LESS THAN (20), + PARTITION p2 VALUES LESS THAN (30) + ); + +INSERT INTO t_part_offline VALUES(5); +INSERT INTO t_part_offline VALUES(15); +INSERT INTO t_part_offline VALUES(25); + +--echo # Test for filename to tablename mapping +CREATE TABLE `t 1 t-1`(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO `t 1 t-1` VALUES (1); + +CREATE TABLE `t-part online`(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL = 1 PAGE_CHECKSUM = 1 + PARTITION BY RANGE( i ) ( + PARTITION p0 VALUES LESS THAN (10), + PARTITION p1 VALUES LESS THAN (20), + PARTITION p2 VALUES LESS THAN (30) + ); + +INSERT INTO `t-part online` VALUES(5); +INSERT INTO `t-part online` VALUES(15); +INSERT INTO `t-part online` VALUES(25); + + +--echo ### +--echo # Test for redo log files backup; +--echo ##### +CREATE TABLE t_logs_1(i INT) + ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +CREATE TABLE t_logs_2 LIKE t_logs_1; +CREATE TABLE t_bulk_ins LIKE t_logs_1; +INSERT INTO t_logs_1 VALUES + (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), + (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), + (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), + (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), + (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), + (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), + (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), + (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), + (0), (1), (2), (3), (4), (5), (6), (7), (8), (9), + (0), (1), (2), (3), (4), (5), (6), (7), (8), (9); +--echo # Generate several log files +--let $i = 0 +while ($i < 14) { +INSERT INTO t_logs_1 SELECT * FROM t_logs_1; +--inc $i +} + +--echo ### +--echo # Test for DML during backup for online backup +--echo ##### +CREATE TABLE t_dml_ins(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_dml_ins VALUES(1); +--let after_aria_table_copy_test_t_dml_ins=INSERT INTO test.t_dml_ins VALUES(2) +CREATE TABLE t_dml_upd(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_dml_upd VALUES(1); +--let after_aria_table_copy_test_t_dml_upd=UPDATE test.t_dml_upd SET i = 2 +CREATE TABLE t_dml_del(i INT PRIMARY KEY) + ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +INSERT INTO t_dml_del VALUES(1); +--let after_aria_table_copy_test_t_dml_del=DELETE FROM test.t_dml_del + +--echo ### +--echo # Test for DDL during backup for online backup +--echo ##### +CREATE DATABASE test_for_db_drop; +CREATE TABLE test_for_db_drop.t(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_for_db_drop_t=DROP DATABASE test_for_db_drop +CREATE TABLE t_db_create(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_db_create=CREATE DATABASE test_for_db_create +--sorted_result +SHOW DATABASES; + +CREATE TABLE t_alter(i INT PRIMARY KEY) ENGINE ARIA; +INSERT INTO t_alter VALUES (1); +--let after_aria_table_copy_test_t_alter=ALTER TABLE test.t_alter ADD COLUMN c INT + +CREATE TABLE t_trunc(i INT PRIMARY KEY) ENGINE ARIA; +INSERT INTO t_trunc VALUES (1); +--let after_aria_table_copy_test_t_trunc=TRUNCATE TABLE test.t_trunc + +CREATE TABLE t_ch_i (i int(10), index(i) ) ENGINE=Aria; +INSERT INTO t_ch_i VALUES(1); +--let after_aria_table_copy_test_t_ch_i=ALTER TABLE test.t_ch_i DISABLE KEYS + +CREATE TABLE t_change_engine(i INT PRIMARY KEY) ENGINE InnoDB; +INSERT INTO t_change_engine VALUES (1); +--let after_aria_background=begin not atomic ALTER TABLE test.t_change_engine ENGINE = ARIA; INSERT INTO test.t_logs_1 SELECT * FROM test.t_logs_1; INSERT INTO test.t_bulk_ins SELECT * FROM test.t_logs_1; INSERT INTO test.t_logs_2 SET i = 1; end + +CREATE TABLE t_rename(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_rename=RENAME TABLE test.t_rename TO test.t_rename_new +CREATE DATABASE test_for_rename; +CREATE TABLE t_rename_2(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_rename_2=RENAME TABLE test.t_rename_2 TO test_for_rename.t_rename_new_2 + +CREATE TABLE t_rename_3(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_rename_3=begin not atomic RENAME TABLE test.t_rename_3 TO test.t_rename_new_3; RENAME TABLE test.t_rename_new_3 TO test.t_rename_new_new_3; end + +CREATE TABLE t_rename_4(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_rename_4=begin not atomic RENAME TABLE test.t_rename_4 TO test.t_rename_new_4; RENAME TABLE test.t_rename_new_4 TO test.t_rename_new_new_4; RENAME TABLE test.t_rename_new_new_4 TO test.t_rename_new_4; end + +CREATE TABLE t_delete(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_delete=DROP TABLE test.t_delete + +CREATE TABLE t_delete_2(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_delete_2=ALTER TABLE test.t_delete_2 ENGINE=Innodb + +CREATE TABLE t_rename_alter(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_rename_alter=begin not atomic RENAME TABLE test.t_rename_alter TO test.t_rename_alter_2; ALTER TABLE test.t_rename_alter_2 ADD COLUMN c INT; end + +CREATE TABLE t_rename_create(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_rename_create=begin not atomic RENAME TABLE test.t_rename_create TO test.t_rename_create_new; CREATE TABLE test.t_rename_create(d INT PRIMARY KEY) ENGINE ARIA; end + +CREATE TABLE t_part_create(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_part_create=create table test.t_part_create_2 (i int) engine=Aria PARTITION BY HASH(i) PARTITIONS 2 + +CREATE TABLE t_part_add_part(i INT PRIMARY KEY) ENGINE ARIA; +--let after_aria_table_copy_test_t_part_add_part=alter table test.t_part_add_part PARTITION BY HASH(i) PARTITIONS 2 + +CREATE TABLE t_part_change_eng(i INT PRIMARY KEY) ENGINE ARIA PARTITION BY HASH(i) PARTITIONS 2; +--let after_aria_table_copy_test_t_part_change_eng=alter table test.t_part_change_eng ENGINE=InnoDB + +CREATE TABLE t_part_change_eng_2(i INT PRIMARY KEY) ENGINE InnoDB PARTITION BY HASH(i) PARTITIONS 2; +CREATE TABLE t_part_change_eng_3(i INT PRIMARY KEY) ENGINE Aria; +--let after_aria_table_copy_test_t_part_change_eng_3=alter table test.t_part_change_eng_2 ENGINE=Aria + +CREATE TABLE t_part_alter(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +--let after_aria_table_copy_test_t_part_alter=alter table test.t_part_alter ADD COLUMN c INT + +CREATE TABLE t_part_alter_2(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 3; +--let after_aria_table_copy_test_t_part_alter_2=alter table test.t_part_alter_2 COALESCE PARTITION 1 + +CREATE TABLE t_part_drop(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +--let after_aria_table_copy_test_t_part_drop=DROP table test.t_part_drop + +CREATE TABLE t_part_rename(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +--let after_aria_table_copy_test_t_part_rename=RENAME TABLE test.t_part_rename TO test.t_part_rename_2 + +CREATE TABLE t_part_rename_3(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +--let after_aria_table_copy_test_t_part_rename_3=RENAME TABLE test.t_part_rename_3 TO test_for_rename.t_part_rename_4 + +CREATE TABLE t_part_rm_part(i INT PRIMARY KEY) ENGINE Aria PARTITION BY HASH(i) PARTITIONS 2; +--let after_aria_table_copy_test_t_part_rm_part=begin not atomic ALTER TABLE test.t_part_rm_part REMOVE PARTITIONING; ALTER TABLE test.t_part_rm_part ADD COLUMN c INT; end + +SET SESSION debug_dbug="+d,maria_flush_whole_log"; +SET GLOBAL aria_checkpoint_interval=10000; + +--mkdir $targetdir + +if ($backup_variant == $backup_stream) { +--echo ### Backup to stream +--let $streamfile=$MYSQLTEST_VARDIR/tmp/backup.xb +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --dbug=+d,mariabackup_events --stream=xbstream > $streamfile 2>$targetdir/backup_stream.log; +--disable_result_log +exec $XBSTREAM -x -C $targetdir < $streamfile; +--enable_result_log +} + +if ($backup_variant == $backup_dir) { +--echo ### Backup to dir +--disable_result_log +--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --dbug=+d,mariabackup_events +--enable_result_log +} + +--let $t_logs_1_records_count_before_backup=`SELECT COUNT(*) FROM t_logs_1` +--let $t_logs_2_records_count_before_backup=`SELECT COUNT(*) FROM t_logs_2` +--let $t_bulk_ins_records_count_before_backup=`SELECT COUNT(*) FROM t_bulk_ins` + +--echo # xtrabackup prepare +--disable_result_log +--exec $XTRABACKUP --prepare --target-dir=$targetdir +--source include/restart_and_restore.inc +--enable_result_log + +--echo ### Result for DDL test +SHOW CREATE TABLE t_alter; +SELECT * FROM t_alter; +SHOW CREATE TABLE t_change_engine; +SELECT * FROM t_change_engine; +SELECT * FROM t_trunc; +SELECT * FROM t_ch_i; +SELECT * FROM t_rename_new; +SELECT * FROM test_for_rename.t_rename_new_2; +SELECT * FROM t_rename_new_new_3; +SELECT * FROM t_rename_new_4; +--error ER_NO_SUCH_TABLE +SELECT * FROM t_delete; +SHOW CREATE TABLE t_delete_2; +SELECT * FROM t_rename_alter_2; +SELECT * FROM t_rename_create; +SELECT * FROM t_rename_create_new; +SHOW CREATE TABLE t_part_create_2; +SELECT * FROM t_part_create_2; +SHOW CREATE TABLE t_part_add_part; +SELECT * FROM t_part_add_part; +SHOW CREATE TABLE t_part_change_eng; +SELECT * FROM t_part_change_eng; +SHOW CREATE TABLE t_part_change_eng_2; +SELECT * FROM t_part_change_eng_2; +SELECT * FROM t_part_alter; +SHOW CREATE TABLE t_part_alter_2; +SELECT * FROM t_part_alter_2; +--error ER_NO_SUCH_TABLE +SELECT * FROM t_part_drop; +--error ER_NO_SUCH_TABLE +SELECT * FROM t_part_rename; +SELECT * FROM t_part_rename_2; +--error ER_NO_SUCH_TABLE +SELECT * FROM t_part_rename_3; +SELECT * FROM test_for_rename.t_part_rename_4; +SHOW CREATE TABLE t_part_rm_part; +SELECT * FROM t_part_rm_part; +--sorted_result +SHOW DATABASES; + +--echo ### Clean up for DDL test +DROP DATABASE test_for_db_create; +DROP TABLE t_db_create; +DROP TABLE t_change_engine; +DROP TABLE t_alter; +DROP TABLE t_trunc; +DROP TABLE t_ch_i; +DROP TABLE t_rename_new; +DROP TABLE t_rename_new_new_3; +DROP TABLE t_rename_new_4; +DROP TABLE t_delete_2; +DROP TABLE t_rename_alter_2; +DROP TABLE t_rename_create; +DROP TABLE t_rename_create_new; +DROP TABLE t_part_create; +DROP TABLE t_part_create_2; +DROP TABLE t_part_add_part; +DROP TABLE t_part_change_eng; +DROP TABLE t_part_change_eng_2; +DROP TABLE t_part_change_eng_3; +DROP TABLE t_part_alter; +DROP TABLE t_part_alter_2; +DROP TABLE t_part_rename_2; +DROP TABLE t_part_rm_part; +DROP DATABASE test_for_rename; +--let after_aria_table_copy_test_for_db_drop_t= +--let after_aria_table_copy_test_t_db_create= +--let after_aria_table_copy_test_t_alter= +--let after_aria_background= +--let after_aria_table_copy_test_t_trunc= +--let after_aria_table_copy_test_t_ch_i= +--let after_aria_table_copy_test_t_rename= +--let after_aria_table_copy_test_t_rename_2= +--let after_aria_table_copy_test_t_rename_3= +--let after_aria_table_copy_test_t_rename_4= +--let after_aria_table_copy_test_t_delete= +--let after_aria_table_copy_test_t_delete_2= +--let after_aria_table_copy_test_t_rename_alter= +--let after_aria_table_copy_test_t_rename_create= +--let after_aria_table_copy_test_t_part_create= +--let after_aria_table_copy_test_t_part_add_part= +--let after_aria_table_copy_test_t_part_change_eng= +--let after_aria_table_copy_test_t_part_change_eng_3= +--let after_aria_table_copy_test_t_part_alter= +--let after_aria_table_copy_test_t_part_alter_2= +--let after_aria_table_copy_test_t_part_drop= +--let after_aria_table_copy_test_t_part_rename= +--let after_aria_table_copy_test_t_part_rename_3= +--let after_aria_table_copy_test_t_part_rm_part= + +--echo ### Result for DML test +SELECT * FROM t_dml_ins; +SELECT * FROM t_dml_upd; +SELECT * FROM t_dml_del; + +--echo ### Clean up for DML test +DROP TABLE t_dml_ins; +DROP TABLE t_dml_upd; +DROP TABLE t_dml_del; +--let after_aria_table_copy_test_t_dml_ins= +--let after_aria_table_copy_test_t_dml_upd= +--let after_aria_table_copy_test_t_dml_del= + +--echo ### Result for redo log files backup +--let $t_logs_1_records_count_after_backup=`SELECT COUNT(*) FROM t_logs_1` +--let $t_logs_2_records_count_after_backup=`SELECT COUNT(*) FROM t_logs_2` +--let $t_bulk_ins_records_count_after_backup=`SELECT COUNT(*) FROM t_bulk_ins` +if ($t_logs_1_records_count_after_backup == $t_logs_1_records_count_before_backup) { +--echo # ok +} +if ($t_logs_1_records_count_after_backup != $t_logs_1_records_count_before_backup) { +--echo # failed +} +if ($t_logs_2_records_count_after_backup == $t_logs_2_records_count_before_backup) { +--echo # ok +} +if ($t_logs_2_records_count_after_backup != $t_logs_2_records_count_before_backup) { +--echo # failed +} +if ($t_bulk_ins_records_count_after_backup == $t_bulk_ins_records_count_before_backup) { +--echo # ok +} +if ($t_bulk_ins_records_count_after_backup != $t_bulk_ins_records_count_before_backup) { +--echo # failed +} + +--echo ### Cleanup for redo log files backup +DROP TABLE t_logs_1; +DROP TABLE t_logs_2; +DROP TABLE t_bulk_ins; +--let $t_logs_1_records_count_before_backup= +--let $t_logs_1_records_count_after_backup= +--let $t_logs_2_records_count_before_backup= +--let $t_logs_2_records_count_after_backup= +--let $t_bulk_ins_records_count_before_backup= +--let $t_bulk_ins_records_count_after_backup= + +--echo ### Result for online/offline tables test +SELECT * FROM t_default; +SELECT * FROM t_tr_p_ch; +SELECT * FROM t_tr_p_nch; +SELECT * FROM t_p_ch; +SELECT * FROM t_p_nch; +SELECT * FROM t_fixed; +SELECT * FROM t_dyn; +SELECT * FROM t_part_online; +SELECT * FROM t_part_offline; +SELECT * FROM `t 1 t-1`; +SELECT * FROM `t-part online`; + +--echo ### Cleanup for online/offline tables test +DROP TABLE t_default; +DROP TABLE t_tr_p_ch; +DROP TABLE t_tr_p_nch; +DROP TABLE t_p_ch; +DROP TABLE t_p_nch; +DROP TABLE t_fixed; +DROP TABLE t_dyn; +DROP TABLE t_part_online; +DROP TABLE t_part_offline; +DROP TABLE `t 1 t-1`; +DROP TABLE `t-part online`; + +if ($backup_variant == $backup_stream) { +--remove_file $streamfile +} +--rmdir $targetdir +--dec $backup_variant +} + diff --git a/mysql-test/suite/mariabackup/aria_log.opt b/mysql-test/suite/mariabackup/aria_log.opt new file mode 100644 index 00000000000..f226499f5dd --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_log.opt @@ -0,0 +1 @@ +--loose-aria-log-file-size=8388608 diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path.result b/mysql-test/suite/mariabackup/aria_log_dir_path.result index 1a877321bbe..ead4b836682 100644 --- a/mysql-test/suite/mariabackup/aria_log_dir_path.result +++ b/mysql-test/suite/mariabackup/aria_log_dir_path.result @@ -35,7 +35,6 @@ DROP TABLE t1; SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; SHOW ENGINE aria logs; Type Name Status -Aria aria_log.00000001 free Aria aria_log.00000002 in use # Restarting mariadbd with default parameters # restart diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path.test b/mysql-test/suite/mariabackup/aria_log_dir_path.test index 0178cd4eae5..40bc39446bf 100644 --- a/mysql-test/suite/mariabackup/aria_log_dir_path.test +++ b/mysql-test/suite/mariabackup/aria_log_dir_path.test @@ -48,7 +48,6 @@ SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; --replace_regex /Size +[0-9]+ ; .+aria_log/aria_log/ SHOW ENGINE aria logs; - --echo # mariadb-backup --backup --disable_result_log --mkdir $targetdir @@ -61,7 +60,6 @@ SHOW ENGINE aria logs; --exec $XTRABACKUP --prepare --target-dir=$targetdir --enable_result_log - --echo # shutdown server --disable_result_log --source include/shutdown_mysqld.inc @@ -70,12 +68,14 @@ SHOW ENGINE aria logs; --echo # remove aria-log-dir-path --rmdir $ARIA_LOGDIR_FS + --echo # mariadb-backup --copy-back --let $mariadb_backup_parameters=--defaults-file=$MYSQLTEST_VARDIR/my.cnf --copy-back --datadir=$datadir --target-dir=$targetdir --parallel=2 --throttle=1 --aria-log-dir-path=$ARIA_LOGDIR_MARIADB --replace_result $MYSQL_TEST_DIR MYSQL_TEST_DIR $MYSQLTEST_VARDIR MYSQLTEST_VARDIR --exec echo "# with parameters: $mariadb_backup_parameters" --exec $XTRABACKUP $mariadb_backup_parameters + --echo # starting server --let $restart_parameters=$server_parameters --source include/start_mysqld.inc @@ -91,7 +91,7 @@ DROP TABLE t1; --echo # Testing aria log files after --copy-back SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; --file_exists $ARIA_LOGDIR_FS/aria_log_control ---file_exists $ARIA_LOGDIR_FS/aria_log.00000001 +#--file_exists $ARIA_LOGDIR_FS/aria_log.00000001 --file_exists $ARIA_LOGDIR_FS/aria_log.00000002 --error 1 --file_exists $ARIA_LOGDIR_FS/aria_log.00000003 diff --git a/mysql-test/suite/mariabackup/aria_log_dir_path_rel.result b/mysql-test/suite/mariabackup/aria_log_dir_path_rel.result index 7fef26096e0..736bc5564e8 100644 --- a/mysql-test/suite/mariabackup/aria_log_dir_path_rel.result +++ b/mysql-test/suite/mariabackup/aria_log_dir_path_rel.result @@ -35,7 +35,6 @@ DROP TABLE t1; SET @@global.aria_checkpoint_interval=DEFAULT /*Force checkpoint*/; SHOW ENGINE aria logs; Type Name Status -Aria aria_log.00000001 free Aria aria_log.00000002 in use # Restarting mariadbd with default parameters # restart diff --git a/mysql-test/suite/mariabackup/aria_log_rotate_during_backup.opt b/mysql-test/suite/mariabackup/aria_log_rotate_during_backup.opt new file mode 100644 index 00000000000..7c3ebe422c3 --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_log_rotate_during_backup.opt @@ -0,0 +1,2 @@ +--loose-aria-log-file-size=8388608 +--loose-restart-for-aria_log_rotate_during_backup="This is needed to recreate datadir, to have Aria start logs from aria_log.00000001" diff --git a/mysql-test/suite/mariabackup/aria_log_rotate_during_backup.result b/mysql-test/suite/mariabackup/aria_log_rotate_during_backup.result new file mode 100644 index 00000000000..0691bce8554 --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_log_rotate_during_backup.result @@ -0,0 +1,58 @@ +SHOW VARIABLES LIKE 'aria_log_file_size'; +Variable_name Value +aria_log_file_size 8388608 +CREATE PROCEDURE display_aria_log_control(ctrl BLOB) +BEGIN +SELECT HEX(REVERSE(SUBSTRING(ctrl, 42, 4))) AS last_logno; +END; +$$ +CREATE PROCEDURE populate_t1() +BEGIN +FOR id IN 0..9 DO +INSERT INTO test.t1 (id, txt) VALUES (id, REPEAT(id,1024*1024)); +END FOR; +END; +$$ +CREATE TABLE test.t1(id INT, txt LONGTEXT) ENGINE=Aria; +# MYSQLD_DATADIR/aria_log_control before --backup +CALL display_aria_log_control(@aria_log_control); +last_logno +00000001 +# Running --backup +# MYSQLD_DATADIR/aria_log_control after --backup +CALL display_aria_log_control(@aria_log_control); +last_logno +00000002 +# targetdir/aria_log_control after --backup +CALL display_aria_log_control(@aria_log_control); +last_logno +00000001 +# Running --prepare +# targetdir/aria_log_control after --prepare +CALL display_aria_log_control(@aria_log_control); +last_logno +00000002 +# shutdown server +# remove datadir +# xtrabackup move back +# restart +# MYSQLD_DATADIR/aria_log_control after --copy-back +CALL display_aria_log_control(@aria_log_control); +last_logno +00000002 +# Checking that after --restore all t1 data is there +SELECT id, LENGTH(txt) FROM t1 ORDER BY id; +id LENGTH(txt) +0 1048576 +1 1048576 +2 1048576 +3 1048576 +4 1048576 +5 1048576 +6 1048576 +7 1048576 +8 1048576 +9 1048576 +DROP TABLE t1; +DROP PROCEDURE populate_t1; +DROP PROCEDURE display_aria_log_control; diff --git a/mysql-test/suite/mariabackup/aria_log_rotate_during_backup.test b/mysql-test/suite/mariabackup/aria_log_rotate_during_backup.test new file mode 100644 index 00000000000..172ade338d5 --- /dev/null +++ b/mysql-test/suite/mariabackup/aria_log_rotate_during_backup.test @@ -0,0 +1,82 @@ +--source include/have_debug.inc +--source include/have_aria.inc + +SHOW VARIABLES LIKE 'aria_log_file_size'; + +--let $MYSQLD_DATADIR= `select @@datadir` +--let $targetdir=$MYSQLTEST_VARDIR/tmp/backup +mkdir $targetdir; + + +DELIMITER $$; +CREATE PROCEDURE display_aria_log_control(ctrl BLOB) +BEGIN + SELECT HEX(REVERSE(SUBSTRING(ctrl, 42, 4))) AS last_logno; +END; +$$ +DELIMITER ;$$ + +DELIMITER $$; +CREATE PROCEDURE populate_t1() +BEGIN + FOR id IN 0..9 DO + INSERT INTO test.t1 (id, txt) VALUES (id, REPEAT(id,1024*1024)); + END FOR; +END; +$$ +DELIMITER ;$$ + + +CREATE TABLE test.t1(id INT, txt LONGTEXT) ENGINE=Aria; + +--echo # MYSQLD_DATADIR/aria_log_control before --backup +--let ARIA_DATADIR=$MYSQLD_DATADIR +--source include/aria_log_control_load.inc +CALL display_aria_log_control(@aria_log_control); + + +--echo # Running --backup +--let after_scanning_log_files=CALL test.populate_t1 +--disable_result_log +--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --dbug=+d,mariabackup_events 2>&1 +--let after_scanning_log_files= +--enable_result_log + +--echo # MYSQLD_DATADIR/aria_log_control after --backup +--let ARIA_DATADIR=$MYSQLD_DATADIR +--source include/aria_log_control_load.inc +CALL display_aria_log_control(@aria_log_control); + +--echo # targetdir/aria_log_control after --backup +--let ARIA_DATADIR=$targetdir +--source include/aria_log_control_load.inc +CALL display_aria_log_control(@aria_log_control); + + +--echo # Running --prepare +--disable_result_log +--exec $XTRABACKUP --prepare --target-dir=$targetdir +--enable_result_log + +--echo # targetdir/aria_log_control after --prepare +--let ARIA_DATADIR=$targetdir +--source include/aria_log_control_load.inc +CALL display_aria_log_control(@aria_log_control); + + +--disable_result_log +--source include/restart_and_restore.inc +--enable_result_log + +--echo # MYSQLD_DATADIR/aria_log_control after --copy-back +--let ARIA_DATADIR=$MYSQLD_DATADIR +--source include/aria_log_control_load.inc +CALL display_aria_log_control(@aria_log_control); + +--echo # Checking that after --restore all t1 data is there +SELECT id, LENGTH(txt) FROM t1 ORDER BY id; +DROP TABLE t1; +rmdir $targetdir; + +DROP PROCEDURE populate_t1; +DROP PROCEDURE display_aria_log_control; diff --git a/mysql-test/suite/mariabackup/auth_plugin_win.test b/mysql-test/suite/mariabackup/auth_plugin_win.test index 70ae74b7028..7c0ba047014 100644 --- a/mysql-test/suite/mariabackup/auth_plugin_win.test +++ b/mysql-test/suite/mariabackup/auth_plugin_win.test @@ -22,7 +22,7 @@ eval GRANT ALL PRIVILEGES ON *.* to '$USERNAME'; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf -u $USERNAME --backup --protocol=pipe --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf -u $USERNAME --backup --parallel=10 --protocol=pipe --target-dir=$targetdir; --enable_result_log --replace_result $USERNAME USERNAME eval DROP USER '$USERNAME'; diff --git a/mysql-test/suite/mariabackup/backup_grants.result b/mysql-test/suite/mariabackup/backup_grants.result index 6bd6c9f42cd..ae5b42700d1 100644 --- a/mysql-test/suite/mariabackup/backup_grants.result +++ b/mysql-test/suite/mariabackup/backup_grants.result @@ -6,8 +6,6 @@ GRANT RELOAD, PROCESS on *.* to backup@localhost; FOUND 1 /missing required privilege REPLICA MONITOR/ in backup.log GRANT REPLICA MONITOR ON *.* TO backup@localhost; REVOKE REPLICA MONITOR ON *.* FROM backup@localhost; -FOUND 1 /missing required privilege CONNECTION ADMIN/ in backup.log -GRANT CONNECTION ADMIN ON *.* TO backup@localhost; FOUND 1 /missing required privilege REPLICATION SLAVE ADMIN/ in backup.log FOUND 1 /missing required privilege REPLICA MONITOR/ in backup.log GRANT REPLICATION SLAVE ADMIN ON *.* TO backup@localhost; diff --git a/mysql-test/suite/mariabackup/backup_grants.test b/mysql-test/suite/mariabackup/backup_grants.test index 18db3489a94..b4713d231be 100644 --- a/mysql-test/suite/mariabackup/backup_grants.test +++ b/mysql-test/suite/mariabackup/backup_grants.test @@ -3,14 +3,14 @@ CREATE user backup@localhost; # backup possible for unprivileges user, with --no-lock --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup -ubackup --no-lock --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 -ubackup --no-lock --target-dir=$targetdir; --enable_result_log rmdir $targetdir; # backup fails without --no-lock, because of FTWRL --disable_result_log error 1; -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup -ubackup --target-dir=$targetdir > $MYSQLTEST_VARDIR/tmp/backup.log 2>&1; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 -ubackup --target-dir=$targetdir > $MYSQLTEST_VARDIR/tmp/backup.log 2>&1; --enable_result_log let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/backup.log; @@ -23,7 +23,7 @@ let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/backup.log; # backup succeeds with RELOAD privilege GRANT RELOAD, PROCESS on *.* to backup@localhost; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup -ubackup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 -ubackup --target-dir=$targetdir; --enable_result_log rmdir $targetdir; @@ -45,24 +45,6 @@ exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup -ubackup --sl rmdir $targetdir; REVOKE REPLICA MONITOR ON *.* FROM backup@localhost; -# TODO need a query that would delay a BACKUP STAGE START/ BACKUP STAGE BLOCK_COMMIT longer than the kill-long-queries-timeout -#--send SELECT SLEEP(9) kill_me - -# kill-long-query-type=(not empty) requires CONNECTION ADMIN ---disable_result_log ---exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup -ubackup --kill-long-query-type=ALL --kill-long-queries-timeout=4 --target-dir=$targetdir > $MYSQLTEST_VARDIR/tmp/backup.log 2>&1; ---enable_result_log -rmdir $targetdir; - ---let SEARCH_PATTERN= missing required privilege CONNECTION ADMIN ---source include/search_pattern_in_file.inc - -GRANT CONNECTION ADMIN ON *.* TO backup@localhost; ---disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup -ubackup --kill-long-query-type=all --kill-long-queries-timeout=1 --target-dir=$targetdir; ---enable_result_log -rmdir $targetdir; - # --safe-slave-backup requires REPLICATION SLAVE ADMIN, and REPLICA MONITOR --disable_result_log error 1; diff --git a/mysql-test/suite/mariabackup/backup_ssl.test b/mysql-test/suite/mariabackup/backup_ssl.test index e858c834d29..b38073cf19f 100644 --- a/mysql-test/suite/mariabackup/backup_ssl.test +++ b/mysql-test/suite/mariabackup/backup_ssl.test @@ -3,7 +3,7 @@ FLUSH PRIVILEGES; echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --user=backup_user --password=x --ssl --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --user=backup_user --password=x --ssl --backup --parallel=10 --target-dir=$targetdir; --enable_result_log echo # xtrabackup prepare; diff --git a/mysql-test/suite/mariabackup/binlog.test b/mysql-test/suite/mariabackup/binlog.test index 9d62e5f8d6b..d02d135ebbd 100644 --- a/mysql-test/suite/mariabackup/binlog.test +++ b/mysql-test/suite/mariabackup/binlog.test @@ -9,7 +9,7 @@ INSERT INTO t VALUES(1); SHOW VARIABLES like 'log_bin'; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$basedir; --enable_result_log exec $XTRABACKUP --prepare --binlog-info=1 --target-dir=$basedir ; diff --git a/mysql-test/suite/mariabackup/compress_qpress.test b/mysql-test/suite/mariabackup/compress_qpress.test index c7762f8e55e..263fc55eb67 100644 --- a/mysql-test/suite/mariabackup/compress_qpress.test +++ b/mysql-test/suite/mariabackup/compress_qpress.test @@ -4,7 +4,7 @@ echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --compress --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --compress --target-dir=$targetdir; --enable_result_log INSERT INTO t VALUES(2); diff --git a/mysql-test/suite/mariabackup/create_during_backup.test b/mysql-test/suite/mariabackup/create_during_backup.test index 985a5a3e53a..16d47a648f8 100644 --- a/mysql-test/suite/mariabackup/create_during_backup.test +++ b/mysql-test/suite/mariabackup/create_during_backup.test @@ -7,7 +7,7 @@ mkdir $targetdir; echo # xtrabackup backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --dbug=+d,mariabackup_events; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --dbug=+d,mariabackup_events; --enable_result_log --let after_load_tables= diff --git a/mysql-test/suite/mariabackup/create_with_data_directory_during_backup.test b/mysql-test/suite/mariabackup/create_with_data_directory_during_backup.test index f01028b6494..aa7d6de2739 100644 --- a/mysql-test/suite/mariabackup/create_with_data_directory_during_backup.test +++ b/mysql-test/suite/mariabackup/create_with_data_directory_during_backup.test @@ -8,7 +8,7 @@ mkdir $table_data_dir; echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --dbug=+d,mariabackup_events; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --dbug=+d,mariabackup_events; --enable_result_log --source include/shutdown_mysqld.inc echo # xtrabackup prepare; diff --git a/mysql-test/suite/mariabackup/data_directory.test b/mysql-test/suite/mariabackup/data_directory.test index ffb3ab3073c..96d76ba0253 100644 --- a/mysql-test/suite/mariabackup/data_directory.test +++ b/mysql-test/suite/mariabackup/data_directory.test @@ -7,7 +7,7 @@ INSERT INTO t VALUES(1); echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; --enable_result_log --source include/shutdown_mysqld.inc echo # xtrabackup prepare; @@ -21,6 +21,7 @@ rmdir $table_data_dir; SELECT * FROM t; DROP TABLE t; rmdir $targetdir; +rmdir $table_data_dir; --echo # --echo # MDEV-18200 MariaBackup full backup failed with InnoDB: Failing assertion: success @@ -32,8 +33,8 @@ chmod 0000 $DATADIR/ibdata1; exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; --enable_result_log chmod 0755 $DATADIR/ibdata1; -rmdir $table_data_dir; rmdir $targetdir; + --echo # --echo # End of 10.4 tests --echo # diff --git a/mysql-test/suite/mariabackup/ddl_for_common_engine.result b/mysql-test/suite/mariabackup/ddl_for_common_engine.result new file mode 100644 index 00000000000..27a2f288107 --- /dev/null +++ b/mysql-test/suite/mariabackup/ddl_for_common_engine.result @@ -0,0 +1,67 @@ +CREATE TABLE t1 (a INT NOT NULL) ENGINE=CSV; +CREATE TABLE t2 (a INT NOT NULL) ENGINE=CSV; +CREATE TABLE t3 (a INT NOT NULL) ENGINE=CSV; +### Backup to dir +# xtrabackup prepare +# shutdown server +# remove datadir +# xtrabackup move back +# restart +SELECT * FROM t4; +a +SELECT * FROM t2; +ERROR 42S02: Table 'test.t2' doesn't exist +SELECT * FROM t3; +ERROR 42S02: Table 'test.t3' doesn't exist +SELECT * FROM t5; +a +SELECT * FROM t1; +a +DROP TABLE t4, t5, t1; +CREATE TABLE t1_m1 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t1_m2 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t1 (a INT NOT NULL) ENGINE=MERGE UNION=(t1_m1, t1_m2) INSERT_METHOD=LAST; +CREATE TABLE t2_m1 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t2_m2 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t2 (a INT NOT NULL) ENGINE=MERGE UNION=(t2_m1, t2_m2) INSERT_METHOD=LAST; +CREATE TABLE t3_m1 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t3_m2 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t3 (a INT NOT NULL) ENGINE=MERGE UNION=(t3_m1, t3_m2) INSERT_METHOD=LAST; +### Backup to dir +# xtrabackup prepare +# shutdown server +# remove datadir +# xtrabackup move back +# restart +SELECT * FROM t4; +a +SELECT * FROM t2; +ERROR 42S02: Table 'test.t2' doesn't exist +SELECT * FROM t3; +ERROR 42S02: Table 'test.t3' doesn't exist +SELECT * FROM t5; +a +SELECT * FROM t1; +a +DROP TABLE t4, t5, t1; +DROP TABLE t1_m1, t1_m2, t2_m1, t2_m2, t3_m1, t3_m2; +CREATE TABLE t1 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t2 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t3 (a INT NOT NULL) ENGINE=MyISAM; +### Backup to dir +# xtrabackup prepare +# shutdown server +# remove datadir +# xtrabackup move back +# restart +SELECT * FROM t4; +a +SELECT * FROM t2; +ERROR 42S02: Table 'test.t2' doesn't exist +SELECT * FROM t3; +ERROR 42S02: Table 'test.t3' doesn't exist +SELECT * FROM t5; +a +SELECT * FROM t1; +a +DROP TABLE t4, t5, t1; diff --git a/mysql-test/suite/mariabackup/ddl_for_common_engine.test b/mysql-test/suite/mariabackup/ddl_for_common_engine.test new file mode 100644 index 00000000000..045c2320edb --- /dev/null +++ b/mysql-test/suite/mariabackup/ddl_for_common_engine.test @@ -0,0 +1,79 @@ +# This test is just to ensure the DDL processing works for common engines like +# MyISAM, ARCHIVE, CSV etc. The more complex test for different cases is +# implemented in aria_backup.test. +--source include/have_archive.inc +--source include/have_csv.inc +--source include/have_debug.inc + +--let $targetdir=$MYSQLTEST_VARDIR/tmp/backup + +--let $e_myisam = 1 +--let $e_merge = 2 +--let $e_csv = 3 +--let $e_archive = 4 +# 'rename' is not logged in $e_archive, return when fix +--let $e_var = $e_csv + +while ($e_var) { +if ($e_var == $e_csv) { +--let $engine = CSV +} +if ($e_var == $e_archive) { +--let $engine = ARCHIVE +} +if ($e_var == $e_merge) { +--let $engine = MERGE +} +if ($e_var == $e_myisam) { +--let $engine = MyISAM +} + +if ($e_var == $e_merge) { +CREATE TABLE t1_m1 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t1_m2 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t1 (a INT NOT NULL) ENGINE=MERGE UNION=(t1_m1, t1_m2) INSERT_METHOD=LAST; +CREATE TABLE t2_m1 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t2_m2 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t2 (a INT NOT NULL) ENGINE=MERGE UNION=(t2_m1, t2_m2) INSERT_METHOD=LAST; +CREATE TABLE t3_m1 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t3_m2 (a INT NOT NULL) ENGINE=MyISAM; +CREATE TABLE t3 (a INT NOT NULL) ENGINE=MERGE UNION=(t3_m1, t3_m2) INSERT_METHOD=LAST; +} +if ($e_var != $e_merge) { +eval CREATE TABLE t1 (a INT NOT NULL) ENGINE=$engine; +eval CREATE TABLE t2 (a INT NOT NULL) ENGINE=$engine; +eval CREATE TABLE t3 (a INT NOT NULL) ENGINE=$engine; +} + +--let after_ce_table_copy_test_t1=begin not atomic CREATE TABLE test.t4 LIKE test.t1; DROP TABLE test.t2; RENAME TABLE test.t3 TO test.t5; end + +--mkdir $targetdir +--echo ### Backup to dir +--disable_result_log +--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --dbug=+d,mariabackup_events +--enable_result_log + +--echo # xtrabackup prepare +--disable_result_log +--exec $XTRABACKUP --prepare --target-dir=$targetdir +--source include/restart_and_restore.inc +--enable_result_log +--rmdir $targetdir + +SELECT * FROM t4; +--error ER_NO_SUCH_TABLE +SELECT * FROM t2; +--error ER_NO_SUCH_TABLE +SELECT * FROM t3; +SELECT * FROM t5; +SELECT * FROM t1; + +DROP TABLE t4, t5, t1; + +if ($e_var == $e_merge) { +DROP TABLE t1_m1, t1_m2, t2_m1, t2_m2, t3_m1, t3_m2; +} +--let after_ce_table_copy_test_t1= +--dec $e_var +} + diff --git a/mysql-test/suite/mariabackup/disabled.def b/mysql-test/suite/mariabackup/disabled.def index d272540cec8..f8a341814da 100644 --- a/mysql-test/suite/mariabackup/disabled.def +++ b/mysql-test/suite/mariabackup/disabled.def @@ -1 +1,3 @@ log_page_corruption : MDEV-26210 +mariabackup.xb_compressed_encrypted : MDEV-26154 (error 194 "Tablespace is missing for a table") +innodb_ddl_on_intermediate_table : MENT-1213 diff --git a/mysql-test/suite/mariabackup/encrypted_page_compressed.test b/mysql-test/suite/mariabackup/encrypted_page_compressed.test index 54fffb7d08f..245fcc31c0d 100644 --- a/mysql-test/suite/mariabackup/encrypted_page_compressed.test +++ b/mysql-test/suite/mariabackup/encrypted_page_compressed.test @@ -37,7 +37,7 @@ echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; let $backuplog=$MYSQLTEST_VARDIR/tmp/backup.log; --error 1 -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --core-file > $backuplog; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --core-file > $backuplog; --enable_result_log --let SEARCH_PATTERN=Database page corruption detected.* diff --git a/mysql-test/suite/mariabackup/encrypted_page_corruption.test b/mysql-test/suite/mariabackup/encrypted_page_corruption.test index 1beb020b463..9ba958c68a0 100644 --- a/mysql-test/suite/mariabackup/encrypted_page_corruption.test +++ b/mysql-test/suite/mariabackup/encrypted_page_corruption.test @@ -65,7 +65,7 @@ if (`select @@innodb_checksum_algorithm LIKE '%full_crc32'`) } --disable_result_log --error $expect_error -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --extended-validation --target-dir=$targetdir --core-file > $backuplog; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --extended-validation --target-dir=$targetdir --core-file > $backuplog; --enable_result_log @@ -77,7 +77,7 @@ rmdir $targetdir; # Due to very constructed nature of the "corruption" (faking checksums), the "corruption" won't be found without --extended-validation --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; --enable_result_log drop table t1; diff --git a/mysql-test/suite/mariabackup/extra_lsndir.test b/mysql-test/suite/mariabackup/extra_lsndir.test index 092ee34c6cc..f880edbef9e 100644 --- a/mysql-test/suite/mariabackup/extra_lsndir.test +++ b/mysql-test/suite/mariabackup/extra_lsndir.test @@ -2,7 +2,7 @@ let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; let $extra_lsndir=$MYSQLTEST_VARDIR/tmp/extra_lsndir; mkdir $extra_lsndir; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --extra-lsndir=$extra_lsndir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --extra-lsndir=$extra_lsndir; --enable_result_log list_files $extra_lsndir; rmdir $extra_lsndir; diff --git a/mysql-test/suite/mariabackup/full_backup.test b/mysql-test/suite/mariabackup/full_backup.test index c6a21112b60..385f3b8785d 100644 --- a/mysql-test/suite/mariabackup/full_backup.test +++ b/mysql-test/suite/mariabackup/full_backup.test @@ -7,7 +7,7 @@ let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --let $backup_log=$MYSQLTEST_VARDIR/tmp/backup.log --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir > $backup_log 2>&1; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --parallel=10 > $backup_log 2>&1; --enable_result_log # The following warning must not appear after MDEV-27343 fix diff --git a/mysql-test/suite/mariabackup/huge_lsn.test b/mysql-test/suite/mariabackup/huge_lsn.test index 8850e9d8954..0da67744457 100644 --- a/mysql-test/suite/mariabackup/huge_lsn.test +++ b/mysql-test/suite/mariabackup/huge_lsn.test @@ -79,7 +79,7 @@ INSERT INTO t VALUES(1); echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; --enable_result_log SET GLOBAL innodb_flush_log_at_trx_commit=1; INSERT INTO t VALUES(2); diff --git a/mysql-test/suite/mariabackup/incremental_encrypted.test b/mysql-test/suite/mariabackup/incremental_encrypted.test index d5570f20006..ddf7e492a56 100644 --- a/mysql-test/suite/mariabackup/incremental_encrypted.test +++ b/mysql-test/suite/mariabackup/incremental_encrypted.test @@ -18,7 +18,7 @@ INSERT INTO t VALUES(1); echo # Create full backup , modify table, then create incremental/differential backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$basedir; --enable_result_log SET GLOBAL innodb_flush_log_at_trx_commit = 1; @@ -26,7 +26,7 @@ INSERT INTO t VALUES(2); SELECT * FROM t; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$incremental_dir --incremental-basedir=$basedir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$incremental_dir --incremental-basedir=$basedir; echo # Prepare full backup, apply incremental one; exec $XTRABACKUP --prepare --target-dir=$basedir; exec $XTRABACKUP --prepare --target-dir=$basedir --incremental-dir=$incremental_dir; diff --git a/mysql-test/suite/mariabackup/innodb_ddl_on_intermediate_table.result b/mysql-test/suite/mariabackup/innodb_ddl_on_intermediate_table.result new file mode 100644 index 00000000000..46fdfe77145 --- /dev/null +++ b/mysql-test/suite/mariabackup/innodb_ddl_on_intermediate_table.result @@ -0,0 +1,5 @@ +CREATE TABLE IF NOT EXISTS t1 ( col1 INT, col_text TEXT ) ENGINE = InnoDB; +ALTER TABLE t1 ADD FULLTEXT KEY `ftidx1` ( col_text ); +# xtrabackup backup +SET debug_sync='RESET'; +DROP TABLE t1; diff --git a/mysql-test/suite/mariabackup/innodb_ddl_on_intermediate_table.test b/mysql-test/suite/mariabackup/innodb_ddl_on_intermediate_table.test new file mode 100644 index 00000000000..d4c4d70d1a5 --- /dev/null +++ b/mysql-test/suite/mariabackup/innodb_ddl_on_intermediate_table.test @@ -0,0 +1,18 @@ +--source include/have_debug.inc +--source include/have_innodb.inc +--source include/have_debug_sync.inc + +--let $targetdir=$MYSQLTEST_VARDIR/tmp/backup +--mkdir $targetdir + +CREATE TABLE IF NOT EXISTS t1 ( col1 INT, col_text TEXT ) ENGINE = InnoDB; +ALTER TABLE t1 ADD FULLTEXT KEY `ftidx1` ( col_text ); + +echo # xtrabackup backup; +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --dbug=+d,mariabackup_events,emulate_ddl_on_intermediate_table; +--enable_result_log + +SET debug_sync='RESET'; +rmdir $targetdir; +DROP TABLE t1; diff --git a/mysql-test/suite/mariabackup/lock_ddl_per_table.test b/mysql-test/suite/mariabackup/lock_ddl_per_table.test index 18c207718b5..98e7c5eaf6f 100644 --- a/mysql-test/suite/mariabackup/lock_ddl_per_table.test +++ b/mysql-test/suite/mariabackup/lock_ddl_per_table.test @@ -16,7 +16,7 @@ CREATE TABLE `bobby``tables` (id INT, name VARCHAR(50), purchased DATE) ENGINE I set global innodb_log_checkpoint_now = 1; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --lock-ddl-per-table=1 --dbug=+d,check_mdl_lock_works; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --lock-ddl-per-table=1 --dbug=+d,check_mdl_lock_works; --enable_result_log DROP TABLE t; DROP TABLE `bobby``tables`; diff --git a/mysql-test/suite/mariabackup/log_checksum_mismatch.test b/mysql-test/suite/mariabackup/log_checksum_mismatch.test index c8baf66e917..6cf4b3547e5 100644 --- a/mysql-test/suite/mariabackup/log_checksum_mismatch.test +++ b/mysql-test/suite/mariabackup/log_checksum_mismatch.test @@ -7,7 +7,7 @@ let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; let $backuplog=$MYSQLTEST_VARDIR/tmp/backup.log; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --dbug=+d,log_intermittent_checksum_mismatch --core-file > $backuplog; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --dbug=+d,log_intermittent_checksum_mismatch --core-file > $backuplog; --enable_result_log --let SEARCH_RANGE = 10000000 diff --git a/mysql-test/suite/mariabackup/log_file_unexpected_large_number_in_name.result b/mysql-test/suite/mariabackup/log_file_unexpected_large_number_in_name.result new file mode 100644 index 00000000000..51b4dfc5536 --- /dev/null +++ b/mysql-test/suite/mariabackup/log_file_unexpected_large_number_in_name.result @@ -0,0 +1,20 @@ +# +# Start of 10.5 tests +# +# +# MENT-1587 mariabackup failing due to aria log file copy +# +CREATE TABLE t1(i INT PRIMARY KEY) ENGINE=ARIA; +INSERT INTO t1 VALUES (10); +# Prepare full backup +# shutdown server +# remove datadir +# xtrabackup move back +# restart +SELECT * FROM t1; +i +10 +DROP TABLE t1; +# +# End of 10.5 tests +# diff --git a/mysql-test/suite/mariabackup/log_file_unexpected_large_number_in_name.test b/mysql-test/suite/mariabackup/log_file_unexpected_large_number_in_name.test new file mode 100644 index 00000000000..7fef9d61549 --- /dev/null +++ b/mysql-test/suite/mariabackup/log_file_unexpected_large_number_in_name.test @@ -0,0 +1,47 @@ +--let $MYSQLD_DATADIR=`select @@datadir` + +--echo # +--echo # Start of 10.5 tests +--echo # + +--echo # +--echo # MENT-1587 mariabackup failing due to aria log file copy +--echo # + + +--let $basedir=$MYSQLTEST_VARDIR/tmp/backup +--let $incremental_dir=$MYSQLTEST_VARDIR/tmp/backup_inc1 + +CREATE TABLE t1(i INT PRIMARY KEY) ENGINE=ARIA; +INSERT INTO t1 VALUES (10); + +# +# Add a log file with a number outside of last_log_number +# specified in aria_log_control. +# The actual file number written in the header is 4. +# Let's rename it to 100 for test purposes. +# Hopefully 100 should be enough. +# +--copy_file suite/mariabackup/std_data/ment1587_aria_log.00000004 $MYSQLD_DATADIR/aria_log.00000100 + +--disable_result_log +--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir +--enable_result_log + +--disable_result_log +--echo # Prepare full backup +--exec $XTRABACKUP --prepare --target-dir=$basedir +--enable_result_log + +--let $targetdir=$basedir +--source include/restart_and_restore.inc +--enable_result_log +--rmdir $basedir + +SELECT * FROM t1; +DROP TABLE t1; + + +--echo # +--echo # End of 10.5 tests +--echo # diff --git a/mysql-test/suite/mariabackup/log_tables.result b/mysql-test/suite/mariabackup/log_tables.result new file mode 100644 index 00000000000..840efc718e9 --- /dev/null +++ b/mysql-test/suite/mariabackup/log_tables.result @@ -0,0 +1,24 @@ +CREATE TABLE t(i INT) +ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; +SET GLOBAL general_log = 1; +SET GLOBAL log_output = 'TABLE'; +INSERT INTO t VALUES (1); +SELECT * FROM mysql.general_log +WHERE argument LIKE "INSERT INTO %" AND +(command_type = "Query" OR command_type = "Execute") ; +event_time user_host thread_id server_id command_type argument +TIMESTAMP USER_HOST THREAD_ID 1 Query INSERT INTO t VALUES (1) +# Insert new row into general_log table after it has been copied on BLOCK_DDL. +# Backup to dir. +# Xtrabackup prepare. +# shutdown server +# remove datadir +# xtrabackup move back +# restart +SELECT * FROM mysql.general_log +WHERE argument LIKE "INSERT INTO %" AND +(command_type = "Query" OR command_type = "Execute") ; +event_time user_host thread_id server_id command_type argument +TIMESTAMP USER_HOST THREAD_ID 1 Query INSERT INTO t VALUES (1) +TIMESTAMP USER_HOST THREAD_ID 1 Query INSERT INTO test.t VALUES (2) +DROP TABLE t; diff --git a/mysql-test/suite/mariabackup/log_tables.test b/mysql-test/suite/mariabackup/log_tables.test new file mode 100644 index 00000000000..fe540a1ca91 --- /dev/null +++ b/mysql-test/suite/mariabackup/log_tables.test @@ -0,0 +1,49 @@ +# Test for copying log tables tail +--source include/have_aria.inc +--source include/have_debug.inc + +--let $targetdir=$MYSQLTEST_VARDIR/tmp/backup + +CREATE TABLE t(i INT) + ENGINE ARIA TRANSACTIONAL=1 ROW_FORMAT=PAGE PAGE_CHECKSUM=1; + +--let $general_log_old = `SELECT @@global.general_log` +--let $log_output_old = `SELECT @@global.log_output` + +SET GLOBAL general_log = 1; +SET GLOBAL log_output = 'TABLE'; + +INSERT INTO t VALUES (1); + +--replace_column 1 TIMESTAMP 2 USER_HOST 3 THREAD_ID 5 Query +--sorted_result +SELECT * FROM mysql.general_log + WHERE argument LIKE "INSERT INTO %" AND + (command_type = "Query" OR command_type = "Execute") ; + +--echo # Insert new row into general_log table after it has been copied on BLOCK_DDL. +--let after_stage_block_ddl=INSERT INTO test.t VALUES (2) + +--echo # Backup to dir. +--disable_result_log +--exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --dbug=+d,mariabackup_events +--enable_result_log + +--echo # Xtrabackup prepare. +--disable_result_log +--exec $XTRABACKUP --prepare --target-dir=$targetdir +--source include/restart_and_restore.inc +--enable_result_log + +--replace_column 1 TIMESTAMP 2 USER_HOST 3 THREAD_ID 5 Query +--sorted_result +SELECT * FROM mysql.general_log + WHERE argument LIKE "INSERT INTO %" AND + (command_type = "Query" OR command_type = "Execute") ; + +--rmdir $targetdir +DROP TABLE t; +--disable_query_log +--eval SET GLOBAL general_log = $general_log_old +--eval SET GLOBAL log_output = $log_output_old +--enable_query_log diff --git a/mysql-test/suite/mariabackup/mdev-14447.test b/mysql-test/suite/mariabackup/mdev-14447.test index 79a0d075897..74ae1378ac6 100644 --- a/mysql-test/suite/mariabackup/mdev-14447.test +++ b/mysql-test/suite/mariabackup/mdev-14447.test @@ -11,7 +11,7 @@ CREATE TABLE t(a varchar(40) PRIMARY KEY, b varchar(40), c varchar(40), d varcha echo # Create full backup , modify table, then create incremental/differential backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$basedir; --enable_result_log SET debug_dbug='+d,skip_page_checksum',foreign_key_checks=0,unique_checks=0; diff --git a/mysql-test/suite/mariabackup/missing_ibd.test b/mysql-test/suite/mariabackup/missing_ibd.test index f406a555b4a..76d5a4ff281 100644 --- a/mysql-test/suite/mariabackup/missing_ibd.test +++ b/mysql-test/suite/mariabackup/missing_ibd.test @@ -24,7 +24,7 @@ call mtr.add_suppression('InnoDB: Ignoring tablespace for test/t1 because it cou echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; --enable_result_log rmdir $targetdir; diff --git a/mysql-test/suite/mariabackup/nolock_ddl_during_backup_end.test b/mysql-test/suite/mariabackup/nolock_ddl_during_backup_end.test index f6bc51bd9a6..c75c063ab2a 100644 --- a/mysql-test/suite/mariabackup/nolock_ddl_during_backup_end.test +++ b/mysql-test/suite/mariabackup/nolock_ddl_during_backup_end.test @@ -9,6 +9,6 @@ CREATE TABLE t1(i int) ENGINE=INNODB; echo # xtrabackup backup; --disable_result_log error 1; -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --no-lock --dbug=+d,mariabackup_events; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --no-lock --dbug=+d,mariabackup_events; --enable_result_log rmdir $targetdir; diff --git a/mysql-test/suite/mariabackup/partial.test b/mysql-test/suite/mariabackup/partial.test index 85808749b62..af6da274102 100644 --- a/mysql-test/suite/mariabackup/partial.test +++ b/mysql-test/suite/mariabackup/partial.test @@ -14,7 +14,7 @@ echo # xtrabackup backup; let targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup "--tables=test.*1" --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 "--tables=test.*1" --target-dir=$targetdir; --enable_result_log list_files $targetdir/test *.ibd; list_files $targetdir/test *.new; diff --git a/mysql-test/suite/mariabackup/partial_exclude.test b/mysql-test/suite/mariabackup/partial_exclude.test index 6a1ae13b512..973e7a4f328 100644 --- a/mysql-test/suite/mariabackup/partial_exclude.test +++ b/mysql-test/suite/mariabackup/partial_exclude.test @@ -28,7 +28,7 @@ echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup "--tables-exclude=test.*2" "--databases-exclude=db2" --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 "--tables-exclude=test.*2" "--databases-exclude=db2" --target-dir=$targetdir; --enable_result_log COMMIT; diff --git a/mysql-test/suite/mariabackup/partition_datadir.test b/mysql-test/suite/mariabackup/partition_datadir.test index 36520d331bf..078055a5a1a 100644 --- a/mysql-test/suite/mariabackup/partition_datadir.test +++ b/mysql-test/suite/mariabackup/partition_datadir.test @@ -14,7 +14,7 @@ PARTITION BY RANGE (i) PARTITION p3 VALUES LESS THAN (400) DATA DIRECTORY = '$MYSQLTEST_VARDIR/partitdata', PARTITION p4 VALUES LESS THAN MAXVALUE); INSERT INTO t VALUES (1), (101), (201), (301), (401); -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; exec $XTRABACKUP --prepare --target-dir=$targetdir; DROP TABLE t; rmdir $MYSQLTEST_VARDIR/partitdata; diff --git a/mysql-test/suite/mariabackup/partition_partial.test b/mysql-test/suite/mariabackup/partition_partial.test index 7ccc42c036c..30e31a9d43e 100644 --- a/mysql-test/suite/mariabackup/partition_partial.test +++ b/mysql-test/suite/mariabackup/partition_partial.test @@ -16,7 +16,7 @@ echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup "--tables=test.t1" --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 "--tables=test.t1" --target-dir=$targetdir; --enable_result_log INSERT INTO t1 VALUES (1), (101), (201), (301); diff --git a/mysql-test/suite/mariabackup/rename_during_backup.result b/mysql-test/suite/mariabackup/rename_during_backup.result index e071b6b2e21..ba509efe0cb 100644 --- a/mysql-test/suite/mariabackup/rename_during_backup.result +++ b/mysql-test/suite/mariabackup/rename_during_backup.result @@ -61,3 +61,15 @@ SELECT * from t6; i 5 DROP TABLE t6; +# +# MDEV-33011 mariabackup --backup: FATAL ERROR: ... Can't open datafile cool_down/t3 +# +# Simulate zero initialized page to defer tablespace load after rename log is found +SET @save_dbug = @@SESSION.debug_dbug; +SET DEBUG_DBUG="+d,checkpoint_after_file_create"; +CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB; +INSERT INTO t1 VALUES(1); +# RENAME that fails after redo log entry is written and flushed +RENAME TABLE t1 TO non_existing_db.t1; +ERROR HY000: Error on rename of './test/t1' to './non_existing_db/t1' (errno: 168 "Unknown (generic) error from engine") +DROP TABLE t1; diff --git a/mysql-test/suite/mariabackup/rename_during_backup.test b/mysql-test/suite/mariabackup/rename_during_backup.test index d8e40b28941..44036691b6d 100644 --- a/mysql-test/suite/mariabackup/rename_during_backup.test +++ b/mysql-test/suite/mariabackup/rename_during_backup.test @@ -92,4 +92,31 @@ SELECT * from t6; DROP TABLE t6; rmdir $targetdir; +--echo # +--echo # MDEV-33011 mariabackup --backup: FATAL ERROR: ... Can't open datafile cool_down/t3 +--echo # +--disable_query_log +call mtr.add_suppression("InnoDB: Cannot rename '.*t1.ibd' to '.*non_existing_db.*' because the target schema directory doesn't exist"); +--enable_query_log + +mkdir $targetdir; + +--echo # Simulate zero initialized page to defer tablespace load after rename log is found +SET @save_dbug = @@SESSION.debug_dbug; +SET DEBUG_DBUG="+d,checkpoint_after_file_create"; +CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB; +INSERT INTO t1 VALUES(1); + +--echo # RENAME that fails after redo log entry is written and flushed +--replace_result "\\" "/" +--error ER_ERROR_ON_RENAME +RENAME TABLE t1 TO non_existing_db.t1; + +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --prepare --target-dir=$targetdir; +--enable_result_log + +DROP TABLE t1; +rmdir $targetdir; diff --git a/mysql-test/suite/mariabackup/rename_during_mdl_lock.test b/mysql-test/suite/mariabackup/rename_during_mdl_lock.test index 212b7aabd69..b14b04a5e26 100644 --- a/mysql-test/suite/mariabackup/rename_during_mdl_lock.test +++ b/mysql-test/suite/mariabackup/rename_during_mdl_lock.test @@ -3,7 +3,7 @@ let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; mkdir $targetdir; CREATE TABLE t1(i int) ENGINE INNODB; set global innodb_log_checkpoint_now = 1; -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --lock-ddl-per-table --dbug=+d,rename_during_mdl_lock_table; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --lock-ddl-per-table --dbug=+d,rename_during_mdl_lock_table; echo # xtrabackup prepare; --disable_result_log diff --git a/mysql-test/suite/mariabackup/small_ibd.test b/mysql-test/suite/mariabackup/small_ibd.test index e8175fce7c9..bb476b8771e 100644 --- a/mysql-test/suite/mariabackup/small_ibd.test +++ b/mysql-test/suite/mariabackup/small_ibd.test @@ -13,7 +13,7 @@ echo #backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; --enable_result_log remove_file $_datadir/test/small.ibd; rmdir $targetdir; diff --git a/mysql-test/suite/mariabackup/std_data/ment1587_aria_log.00000004 b/mysql-test/suite/mariabackup/std_data/ment1587_aria_log.00000004 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/mysql-test/suite/mariabackup/system_versioning.test b/mysql-test/suite/mariabackup/system_versioning.test index 1ced00b4588..04a5f72ac2c 100644 --- a/mysql-test/suite/mariabackup/system_versioning.test +++ b/mysql-test/suite/mariabackup/system_versioning.test @@ -5,7 +5,7 @@ update t set a=2; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; --enable_result_log insert into t values (3); @@ -32,7 +32,7 @@ insert into t values (1); update t set a=2; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; --enable_result_log insert into t values (3); diff --git a/mysql-test/suite/mariabackup/truncate_during_backup.test b/mysql-test/suite/mariabackup/truncate_during_backup.test index 46ee244dfb0..8928fc4eb07 100644 --- a/mysql-test/suite/mariabackup/truncate_during_backup.test +++ b/mysql-test/suite/mariabackup/truncate_during_backup.test @@ -7,7 +7,7 @@ CREATE TABLE t1 ENGINE=InnoDB SELECT 1; --let after_load_tablespaces=TRUNCATE test.t1 --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --dbug=+d,mariabackup_events; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --dbug=+d,mariabackup_events; --enable_result_log --let after_load_tablespaces= diff --git a/mysql-test/suite/mariabackup/undo_space_id.test b/mysql-test/suite/mariabackup/undo_space_id.test index 2c56492fd8e..ea762608eb6 100644 --- a/mysql-test/suite/mariabackup/undo_space_id.test +++ b/mysql-test/suite/mariabackup/undo_space_id.test @@ -11,7 +11,7 @@ INSERT INTO t1 VALUES(1); --echo # xtrabackup backup --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$basedir; --enable_result_log --echo # Display undo log files from target directory list_files $basedir undo*; diff --git a/mysql-test/suite/mariabackup/unencrypted_page_compressed.test b/mysql-test/suite/mariabackup/unencrypted_page_compressed.test index 700c4dd2034..31e8323b8b6 100644 --- a/mysql-test/suite/mariabackup/unencrypted_page_compressed.test +++ b/mysql-test/suite/mariabackup/unencrypted_page_compressed.test @@ -38,7 +38,7 @@ echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; let $backuplog=$MYSQLTEST_VARDIR/tmp/backup.log; --error 1 -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir --core-file > $backuplog; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir --core-file > $backuplog; --enable_result_log --let SEARCH_PATTERN=Database page corruption detected.* diff --git a/mysql-test/suite/mariabackup/unsupported_redo.test b/mysql-test/suite/mariabackup/unsupported_redo.test index 97e1cad222d..38cceb2f687 100644 --- a/mysql-test/suite/mariabackup/unsupported_redo.test +++ b/mysql-test/suite/mariabackup/unsupported_redo.test @@ -15,7 +15,7 @@ ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; echo # No longer fails during full backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$basedir; --enable_result_log DROP TABLE t1; @@ -29,13 +29,13 @@ INSERT INTO t1(a) select 1 union select 2 union select 3; --echo # Create full backup , modify table, then fails during creation of --echo # incremental/differential backup --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$basedir; --enable_result_log ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$incremental_dir --incremental-basedir=$basedir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$incremental_dir --incremental-basedir=$basedir; --enable_result_log DROP TABLE t1; @@ -58,7 +58,7 @@ ALTER TABLE t21 FORCE, ALGORITHM=INPLACE; --echo # unsupported redo log for the table t21. --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup "--tables-exclude=test.t21" --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 "--tables-exclude=test.t21" --target-dir=$targetdir; --enable_result_log --list_files $targetdir/test *.ibd --list_files $targetdir/test *.new diff --git a/mysql-test/suite/mariabackup/xb_aws_key_management.test b/mysql-test/suite/mariabackup/xb_aws_key_management.test index c8a12f6ed08..a2e407d3b22 100644 --- a/mysql-test/suite/mariabackup/xb_aws_key_management.test +++ b/mysql-test/suite/mariabackup/xb_aws_key_management.test @@ -10,7 +10,7 @@ INSERT INTO t VALUES('foobar1'); echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; exec $XTRABACKUP --prepare --target-dir=$targetdir; -- source include/restart_and_restore.inc --enable_result_log diff --git a/mysql-test/suite/mariabackup/xb_file_key_management.test b/mysql-test/suite/mariabackup/xb_file_key_management.test index 4d27b2dfa95..eca69c976ff 100644 --- a/mysql-test/suite/mariabackup/xb_file_key_management.test +++ b/mysql-test/suite/mariabackup/xb_file_key_management.test @@ -8,7 +8,7 @@ DELETE FROM t LIMIT 1; echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; --enable_result_log --let SEARCH_RANGE = 10000000 diff --git a/mysql-test/suite/mariabackup/xb_history.test b/mysql-test/suite/mariabackup/xb_history.test index f9374a1aaab..e05b7721dc9 100644 --- a/mysql-test/suite/mariabackup/xb_history.test +++ b/mysql-test/suite/mariabackup/xb_history.test @@ -6,7 +6,7 @@ DROP TABLE IF EXISTS mysql.mariadb_backup_history; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --history=foo --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --history=foo --backup --parallel=10 --target-dir=$targetdir; --enable_result_log rmdir $targetdir; diff --git a/mysql-test/suite/mariabackup/xb_page_compress.test b/mysql-test/suite/mariabackup/xb_page_compress.test index 7e806e6de22..e2819e264c1 100644 --- a/mysql-test/suite/mariabackup/xb_page_compress.test +++ b/mysql-test/suite/mariabackup/xb_page_compress.test @@ -27,7 +27,7 @@ echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup "--tables=test.*1" --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 "--tables=test.*1" --target-dir=$targetdir; echo # xtrabackup prepare; exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group-suffix=.1 --prepare --export --target-dir=$targetdir; --enable_result_log diff --git a/mysql-test/suite/mariabackup/xb_partition.test b/mysql-test/suite/mariabackup/xb_partition.test index 1c8eeaa19e6..13ce8fa2b39 100644 --- a/mysql-test/suite/mariabackup/xb_partition.test +++ b/mysql-test/suite/mariabackup/xb_partition.test @@ -39,7 +39,7 @@ INSERT INTO isam_p VALUES (1), (101), (201), (301); let $targetdir=$MYSQLTEST_VARDIR/tmp; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir/full; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir/full; --enable_result_log DROP TABLE t1; diff --git a/mysql-test/suite/mariabackup/xb_rocksdb.test b/mysql-test/suite/mariabackup/xb_rocksdb.test index e41f3b2bf7e..6c23fe3d1ea 100644 --- a/mysql-test/suite/mariabackup/xb_rocksdb.test +++ b/mysql-test/suite/mariabackup/xb_rocksdb.test @@ -8,9 +8,9 @@ echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; let $stream=$MYSQLTEST_VARDIR/tmp/backup.xb; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir $backup_extra_param; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir $backup_extra_param; --enable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --stream=xbstream > $stream 2>$MYSQLTEST_VARDIR/tmp/backup_stream.log; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --stream=xbstream > $stream 2>$MYSQLTEST_VARDIR/tmp/backup_stream.log; INSERT INTO t VALUES(2); diff --git a/mysql-test/suite/mariabackup/xb_rocksdb_datadir.test b/mysql-test/suite/mariabackup/xb_rocksdb_datadir.test index c2e90d9075b..2a0b2a4666e 100644 --- a/mysql-test/suite/mariabackup/xb_rocksdb_datadir.test +++ b/mysql-test/suite/mariabackup/xb_rocksdb_datadir.test @@ -9,7 +9,7 @@ INSERT INTO t VALUES(1); echo # xtrabackup backup; let $targetdir=$MYSQLTEST_VARDIR/tmp/backup; --disable_result_log -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$targetdir; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --target-dir=$targetdir; --enable_result_log INSERT INTO t VALUES(2); diff --git a/mysql-test/suite/mariabackup/xbstream.test b/mysql-test/suite/mariabackup/xbstream.test index 212ac598064..8429a3b587d 100644 --- a/mysql-test/suite/mariabackup/xbstream.test +++ b/mysql-test/suite/mariabackup/xbstream.test @@ -8,7 +8,7 @@ mkdir $targetdir; let $streamfile=$MYSQLTEST_VARDIR/tmp/backup.xb; echo # xtrabackup backup to stream; -exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --databases-exclude=foobar --stream=xbstream > $streamfile 2>$targetdir/backup_stream.log; +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --parallel=10 --databases-exclude=foobar --stream=xbstream > $streamfile 2>$targetdir/backup_stream.log; echo # xbstream extract; --disable_result_log exec $XBSTREAM -x -C $targetdir < $streamfile; diff --git a/mysql-test/suite/multi_source/info_logs.result b/mysql-test/suite/multi_source/info_logs.result index a35a20bdbf7..6f3fd7e7e68 100644 --- a/mysql-test/suite/multi_source/info_logs.result +++ b/mysql-test/suite/multi_source/info_logs.result @@ -94,17 +94,17 @@ MASTER 2.2 # EOF # show all slaves status; -Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos - Slave has read all relay log; waiting for more updates Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 relay.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 1 No optimistic 0 NULL Slave has read all relay log; waiting for more updates 0 0 0 0 1073741824 7 0 60.000 -MASTER 2.2 Slave has read all relay log; waiting for more updates Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 relay-master@00202@002e2.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No optimistic 0 NULL Slave has read all relay log; waiting for more updates 0 0 0 0 1073741824 7 0 60.000 +Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Replicate_Rewrite_DB Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos + Slave has read all relay log; waiting for more updates Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 relay.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 1 No optimistic 0 NULL Slave has read all relay log; waiting for more updates 0 0 0 0 1073741824 7 0 60.000 +MASTER 2.2 Slave has read all relay log; waiting for more updates Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 relay-master@00202@002e2.000002 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No optimistic 0 NULL Slave has read all relay log; waiting for more updates 0 0 0 0 1073741824 7 0 60.000 include/wait_for_slave_to_start.inc set default_master_connection = 'MASTER 2.2'; include/wait_for_slave_to_start.inc set default_master_connection = ''; show all slaves status; -Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos - Slave has read all relay log; waiting for more updates Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 relay.000004 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 1 No optimistic 0 NULL Slave has read all relay log; waiting for more updates 0 0 0 0 1073741824 6 0 60.000 -MASTER 2.2 Slave has read all relay log; waiting for more updates Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 relay-master@00202@002e2.000004 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No optimistic 0 NULL Slave has read all relay log; waiting for more updates 0 0 0 0 1073741824 6 0 60.000 +Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Replicate_Rewrite_DB Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos + Slave has read all relay log; waiting for more updates Waiting for master to send event 127.0.0.1 root MYPORT_1 60 master-bin.000001 relay.000004 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 1 No optimistic 0 NULL Slave has read all relay log; waiting for more updates 0 0 0 0 1073741824 6 0 60.000 +MASTER 2.2 Slave has read all relay log; waiting for more updates Waiting for master to send event 127.0.0.1 root MYPORT_2 60 master-bin.000001 relay-master@00202@002e2.000004 master-bin.000001 Yes Yes 0 0 None 0 No 0 No 0 0 2 No optimistic 0 NULL Slave has read all relay log; waiting for more updates 0 0 0 0 1073741824 6 0 60.000 # # List of files matching '*info*' pattern # after slave server restart diff --git a/mysql-test/suite/multi_source/multi_source_slave_alias_replica.result b/mysql-test/suite/multi_source/multi_source_slave_alias_replica.result index 355919def5a..ce6efc26b84 100644 --- a/mysql-test/suite/multi_source/multi_source_slave_alias_replica.result +++ b/mysql-test/suite/multi_source/multi_source_slave_alias_replica.result @@ -34,7 +34,6 @@ Relay_Log_Pos Relay_Master_Log_File master-bin.000001 Slave_IO_Running Yes Slave_SQL_Running Yes -Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table @@ -76,6 +75,7 @@ Slave_SQL_Running_State Slave has read all relay log; waiting for more updates Slave_DDL_Groups 0 Slave_Non_Transactional_Groups 0 Slave_Transactional_Groups 0 +Replicate_Rewrite_DB Retried_transactions 0 Max_relay_log_size 1073741824 Executed_log_entries 7 @@ -96,7 +96,6 @@ Relay_Log_Pos Relay_Master_Log_File master-bin.000001 Slave_IO_Running Yes Slave_SQL_Running Yes -Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table @@ -138,6 +137,7 @@ Slave_SQL_Running_State Slave has read all relay log; waiting for more updates Slave_DDL_Groups 0 Slave_Non_Transactional_Groups 0 Slave_Transactional_Groups 0 +Replicate_Rewrite_DB Retried_transactions 0 Max_relay_log_size 1073741824 Executed_log_entries 7 diff --git a/mysql-test/suite/multi_source/reset_slave.result b/mysql-test/suite/multi_source/reset_slave.result index 2e9ce5e896f..6ff1f5a9d23 100644 --- a/mysql-test/suite/multi_source/reset_slave.result +++ b/mysql-test/suite/multi_source/reset_slave.result @@ -13,15 +13,15 @@ insert into t1 values (1),(2); connection slave; stop slave 'master1'; show slave 'master1' status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups - 127.0.0.1 root MYPORT_1 60 master-bin.000001 mysqld-relay-bin-master1.000002 master-bin.000001 No No 0 0 None 0 No NULL No 0 0 1 Slave_Pos 0-1-3 optimistic 0 NULL 2 1 0 +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Replicate_Rewrite_DB + 127.0.0.1 root MYPORT_1 60 master-bin.000001 mysqld-relay-bin-master1.000002 master-bin.000001 No No 0 0 None 0 No NULL No 0 0 1 Slave_Pos 0-1-3 optimistic 0 NULL 2 1 0 mysqld-relay-bin-master1.000001 mysqld-relay-bin-master1.000002 mysqld-relay-bin-master1.index reset slave 'master1'; show slave 'master1' status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups - 127.0.0.1 root MYPORT_1 60 4 No No 0 0 0 None 0 No NULL No 0 0 1 Slave_Pos optimistic 0 NULL 2 1 0 +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Replicate_Rewrite_DB + 127.0.0.1 root MYPORT_1 60 4 No No 0 0 0 None 0 No NULL No 0 0 1 Slave_Pos optimistic 0 NULL 2 1 0 reset slave 'master1' all; show slave 'master1' status; ERROR HY000: There is no master connection 'master1' diff --git a/mysql-test/suite/multi_source/simple.result b/mysql-test/suite/multi_source/simple.result index 65c25b88e44..61932184b66 100644 --- a/mysql-test/suite/multi_source/simple.result +++ b/mysql-test/suite/multi_source/simple.result @@ -32,7 +32,6 @@ Relay_Log_Pos Relay_Master_Log_File master-bin.000001 Slave_IO_Running Yes Slave_SQL_Running Yes -Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table @@ -74,6 +73,7 @@ Slave_SQL_Running_State Slave has read all relay log; waiting for more updates Slave_DDL_Groups 0 Slave_Non_Transactional_Groups 0 Slave_Transactional_Groups 0 +Replicate_Rewrite_DB Retried_transactions 0 Max_relay_log_size 1073741824 Executed_log_entries 7 @@ -94,7 +94,6 @@ Relay_Log_Pos Relay_Master_Log_File master-bin.000001 Slave_IO_Running Yes Slave_SQL_Running Yes -Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table @@ -136,6 +135,7 @@ Slave_SQL_Running_State Slave has read all relay log; waiting for more updates Slave_DDL_Groups 0 Slave_Non_Transactional_Groups 0 Slave_Transactional_Groups 0 +Replicate_Rewrite_DB Retried_transactions 0 Max_relay_log_size 1073741824 Executed_log_entries 7 @@ -221,7 +221,6 @@ Relay_Log_Pos Relay_Master_Log_File master-bin.000001 Slave_IO_Running No Slave_SQL_Running No -Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table @@ -263,6 +262,7 @@ Slave_SQL_Running_State Slave_DDL_Groups 0 Slave_Non_Transactional_Groups 0 Slave_Transactional_Groups 0 +Replicate_Rewrite_DB reset slave 'slave1'; show all slaves status; Connection_name slave1 @@ -279,7 +279,6 @@ Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running No Slave_SQL_Running No -Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table @@ -321,6 +320,7 @@ Slave_SQL_Running_State Slave_DDL_Groups 0 Slave_Non_Transactional_Groups 0 Slave_Transactional_Groups 0 +Replicate_Rewrite_DB Retried_transactions 0 Max_relay_log_size 1073741824 Executed_log_entries 7 @@ -341,7 +341,6 @@ Relay_Log_Pos Relay_Master_Log_File master-bin.000001 Slave_IO_Running Yes Slave_SQL_Running Yes -Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table @@ -383,6 +382,7 @@ Slave_SQL_Running_State Slave has read all relay log; waiting for more updates Slave_DDL_Groups 0 Slave_Non_Transactional_Groups 0 Slave_Transactional_Groups 0 +Replicate_Rewrite_DB Retried_transactions 0 Max_relay_log_size 1073741824 Executed_log_entries 7 @@ -405,7 +405,6 @@ Relay_Log_Pos Relay_Master_Log_File master-bin.000001 Slave_IO_Running Yes Slave_SQL_Running Yes -Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table @@ -447,6 +446,7 @@ Slave_SQL_Running_State Slave has read all relay log; waiting for more updates Slave_DDL_Groups 0 Slave_Non_Transactional_Groups 0 Slave_Transactional_Groups 0 +Replicate_Rewrite_DB Retried_transactions 0 Max_relay_log_size 1073741824 Executed_log_entries 7 @@ -471,7 +471,6 @@ Relay_Log_Pos Relay_Master_Log_File master-bin.000001 Slave_IO_Running No Slave_SQL_Running No -Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table @@ -513,6 +512,7 @@ Slave_SQL_Running_State Slave_DDL_Groups 0 Slave_Non_Transactional_Groups 0 Slave_Transactional_Groups 0 +Replicate_Rewrite_DB Retried_transactions 0 Max_relay_log_size 1073741824 Executed_log_entries 7 diff --git a/mysql-test/suite/multi_source/syntax.result b/mysql-test/suite/multi_source/syntax.result index 3c7c91c35c8..6b214fe3644 100644 --- a/mysql-test/suite/multi_source/syntax.result +++ b/mysql-test/suite/multi_source/syntax.result @@ -1,11 +1,11 @@ include/master-slave.inc [connection master] show slave status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Replicate_Rewrite_DB show slave '' status; -Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups +Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Replicate_Rewrite_DB show all slaves status; -Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Rewrite_DB Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos +Connection_name Slave_SQL_State Slave_IO_State Master_Host Master_User Master_Port Connect_Retry Master_Log_File Read_Master_Log_Pos Relay_Log_File Relay_Log_Pos Relay_Master_Log_File Slave_IO_Running Slave_SQL_Running Replicate_Do_DB Replicate_Ignore_DB Replicate_Do_Table Replicate_Ignore_Table Replicate_Wild_Do_Table Replicate_Wild_Ignore_Table Last_Errno Last_Error Skip_Counter Exec_Master_Log_Pos Relay_Log_Space Until_Condition Until_Log_File Until_Log_Pos Master_SSL_Allowed Master_SSL_CA_File Master_SSL_CA_Path Master_SSL_Cert Master_SSL_Cipher Master_SSL_Key Seconds_Behind_Master Master_SSL_Verify_Server_Cert Last_IO_Errno Last_IO_Error Last_SQL_Errno Last_SQL_Error Replicate_Ignore_Server_Ids Master_Server_Id Master_SSL_Crl Master_SSL_Crlpath Using_Gtid Gtid_IO_Pos Replicate_Do_Domain_Ids Replicate_Ignore_Domain_Ids Parallel_Mode SQL_Delay SQL_Remaining_Delay Slave_SQL_Running_State Slave_DDL_Groups Slave_Non_Transactional_Groups Slave_Transactional_Groups Replicate_Rewrite_DB Retried_transactions Max_relay_log_size Executed_log_entries Slave_received_heartbeats Slave_heartbeat_period Gtid_Slave_Pos # # Check error handling # diff --git a/mysql-test/suite/perfschema/r/alter_table_progress.result b/mysql-test/suite/perfschema/r/alter_table_progress.result index 31cc60927f6..e09e836f698 100644 --- a/mysql-test/suite/perfschema/r/alter_table_progress.result +++ b/mysql-test/suite/perfschema/r/alter_table_progress.result @@ -80,11 +80,10 @@ stage/sql/Unlocking tables NULL NULL stage/sql/Rename result table NULL NULL stage/sql/End of update loop NULL NULL stage/sql/Query end NULL NULL -stage/sql/Commit NULL NULL stage/sql/closing tables NULL NULL stage/sql/Unlocking tables NULL NULL stage/sql/closing tables NULL NULL -stage/sql/Commit implicit NULL NULL +stage/sql/Query end NULL NULL stage/sql/Starting cleanup NULL NULL stage/sql/Freeing items NULL NULL stage/sql/Reset for next command NULL NULL diff --git a/mysql-test/suite/perfschema/r/event_aggregate.result b/mysql-test/suite/perfschema/r/event_aggregate.result index 805378f5850..abc3b205518 100644 --- a/mysql-test/suite/perfschema/r/event_aggregate.result +++ b/mysql-test/suite/perfschema/r/event_aggregate.result @@ -251,35 +251,35 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 4 localhost stage/sql/closing tables 11 localhost stage/sql/init 3 localhost stage/sql/Opening tables 7 -localhost stage/sql/starting 6 +localhost stage/sql/starting 7 execute dump_stages_global; event_name count_star stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -383,7 +383,7 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 0 user2 localhost stage/sql/closing tables 0 user2 localhost stage/sql/init 0 @@ -395,7 +395,7 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 0 user2 stage/sql/closing tables 0 user2 stage/sql/init 0 @@ -407,21 +407,21 @@ localhost stage/sql/checking permissions 4 localhost stage/sql/closing tables 11 localhost stage/sql/init 3 localhost stage/sql/Opening tables 7 -localhost stage/sql/starting 6 +localhost stage/sql/starting 7 execute dump_stages_global; event_name count_star stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -551,45 +551,45 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 8 localhost stage/sql/closing tables 21 localhost stage/sql/init 6 localhost stage/sql/Opening tables 13 -localhost stage/sql/starting 12 +localhost stage/sql/starting 14 execute dump_stages_global; event_name count_star stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -712,12 +712,12 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 0 user3 localhost stage/sql/closing tables 0 user3 localhost stage/sql/init 0 @@ -729,12 +729,12 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 0 user3 stage/sql/closing tables 0 user3 stage/sql/init 0 @@ -746,21 +746,21 @@ localhost stage/sql/checking permissions 8 localhost stage/sql/closing tables 21 localhost stage/sql/init 6 localhost stage/sql/Opening tables 13 -localhost stage/sql/starting 12 +localhost stage/sql/starting 14 execute dump_stages_global; event_name count_star stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -909,55 +909,55 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 12 localhost stage/sql/closing tables 31 localhost stage/sql/init 9 localhost stage/sql/Opening tables 19 -localhost stage/sql/starting 18 +localhost stage/sql/starting 21 execute dump_stages_global; event_name count_star stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1099,17 +1099,17 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 0 user4 localhost stage/sql/closing tables 0 user4 localhost stage/sql/init 0 @@ -1121,17 +1121,17 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 0 user4 stage/sql/closing tables 0 user4 stage/sql/init 0 @@ -1143,21 +1143,21 @@ localhost stage/sql/checking permissions 12 localhost stage/sql/closing tables 31 localhost stage/sql/init 9 localhost stage/sql/Opening tables 19 -localhost stage/sql/starting 18 +localhost stage/sql/starting 21 execute dump_stages_global; event_name count_star stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1325,65 +1325,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 24 +localhost stage/sql/starting 28 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1538,65 +1538,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 25 +localhost stage/sql/starting 29 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1751,65 +1751,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 26 +localhost stage/sql/starting 30 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1963,65 +1963,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 27 +localhost stage/sql/starting 31 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2174,65 +2174,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2386,65 +2386,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2597,65 +2597,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2808,65 +2808,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3019,65 +3019,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3230,65 +3230,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3441,65 +3441,65 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3674,43 +3674,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3907,21 +3907,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4125,14 +4125,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4343,7 +4343,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4554,7 +4554,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4765,7 +4765,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4976,7 +4976,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -5187,7 +5187,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -5398,7 +5398,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -5609,7 +5609,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -5820,7 +5820,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -6031,7 +6031,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -6242,7 +6242,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -6453,7 +6453,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -6636,7 +6636,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -6763,7 +6763,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -6862,7 +6862,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_a.result b/mysql-test/suite/perfschema/r/event_aggregate_no_a.result index 30948c2e611..eb2ac32f0c0 100644 --- a/mysql-test/suite/perfschema/r/event_aggregate_no_a.result +++ b/mysql-test/suite/perfschema/r/event_aggregate_no_a.result @@ -235,28 +235,28 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 4 localhost stage/sql/closing tables 11 localhost stage/sql/init 3 localhost stage/sql/Opening tables 7 -localhost stage/sql/starting 6 +localhost stage/sql/starting 7 execute dump_stages_global; event_name count_star stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -350,7 +350,7 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 0 user2 stage/sql/closing tables 0 user2 stage/sql/init 0 @@ -362,21 +362,21 @@ localhost stage/sql/checking permissions 4 localhost stage/sql/closing tables 11 localhost stage/sql/init 3 localhost stage/sql/Opening tables 7 -localhost stage/sql/starting 6 +localhost stage/sql/starting 7 execute dump_stages_global; event_name count_star stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -489,33 +489,33 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 8 localhost stage/sql/closing tables 21 localhost stage/sql/init 6 localhost stage/sql/Opening tables 13 -localhost stage/sql/starting 12 +localhost stage/sql/starting 14 execute dump_stages_global; event_name count_star stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -619,12 +619,12 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 0 user3 stage/sql/closing tables 0 user3 stage/sql/init 0 @@ -636,21 +636,21 @@ localhost stage/sql/checking permissions 8 localhost stage/sql/closing tables 21 localhost stage/sql/init 6 localhost stage/sql/Opening tables 13 -localhost stage/sql/starting 12 +localhost stage/sql/starting 14 execute dump_stages_global; event_name count_star stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -773,38 +773,38 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 12 localhost stage/sql/closing tables 31 localhost stage/sql/init 9 localhost stage/sql/Opening tables 19 -localhost stage/sql/starting 18 +localhost stage/sql/starting 21 execute dump_stages_global; event_name count_star stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -918,17 +918,17 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 0 user4 stage/sql/closing tables 0 user4 stage/sql/init 0 @@ -940,21 +940,21 @@ localhost stage/sql/checking permissions 12 localhost stage/sql/closing tables 31 localhost stage/sql/init 9 localhost stage/sql/Opening tables 19 -localhost stage/sql/starting 18 +localhost stage/sql/starting 21 execute dump_stages_global; event_name count_star stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1087,43 +1087,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 24 +localhost stage/sql/starting 28 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1243,43 +1243,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 25 +localhost stage/sql/starting 29 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1399,43 +1399,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 26 +localhost stage/sql/starting 30 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1554,43 +1554,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 27 +localhost stage/sql/starting 31 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1708,43 +1708,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1863,43 +1863,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2017,43 +2017,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2171,43 +2171,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2325,43 +2325,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2479,43 +2479,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2633,43 +2633,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2787,43 +2787,43 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2963,21 +2963,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3124,14 +3124,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3285,7 +3285,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3439,7 +3439,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3593,7 +3593,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3747,7 +3747,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3901,7 +3901,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4055,7 +4055,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4209,7 +4209,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4363,7 +4363,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4517,7 +4517,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4671,7 +4671,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4825,7 +4825,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4979,7 +4979,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -5105,7 +5105,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -5203,7 +5203,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_h.result b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_h.result index 956ea6c6488..8dbe393eda9 100644 --- a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_h.result +++ b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_h.result @@ -205,7 +205,7 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -214,14 +214,14 @@ stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -306,7 +306,7 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 0 user2 stage/sql/closing tables 0 user2 stage/sql/init 0 @@ -320,14 +320,14 @@ stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -431,12 +431,12 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -445,14 +445,14 @@ stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -547,12 +547,12 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 0 user3 stage/sql/closing tables 0 user3 stage/sql/init 0 @@ -566,14 +566,14 @@ stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -687,17 +687,17 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -706,14 +706,14 @@ stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -818,17 +818,17 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 0 user4 stage/sql/closing tables 0 user4 stage/sql/init 0 @@ -842,14 +842,14 @@ stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -973,22 +973,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -997,14 +997,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1115,22 +1115,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1139,14 +1139,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1257,22 +1257,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1281,14 +1281,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1398,22 +1398,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1422,14 +1422,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1538,22 +1538,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1562,14 +1562,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1679,22 +1679,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1703,14 +1703,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1819,22 +1819,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1843,14 +1843,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1959,22 +1959,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1983,14 +1983,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2099,22 +2099,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -2123,14 +2123,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2239,22 +2239,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -2263,14 +2263,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2379,22 +2379,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -2403,14 +2403,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2519,22 +2519,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -2543,14 +2543,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2683,14 +2683,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2823,14 +2823,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2970,7 +2970,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3110,7 +3110,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3250,7 +3250,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3390,7 +3390,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3530,7 +3530,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3670,7 +3670,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3810,7 +3810,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3950,7 +3950,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4090,7 +4090,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4230,7 +4230,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4370,7 +4370,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4510,7 +4510,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4622,7 +4622,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4706,7 +4706,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u.result b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u.result index fbaa9a8d83f..f44c3bf9a1f 100644 --- a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u.result +++ b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u.result @@ -219,21 +219,21 @@ localhost stage/sql/checking permissions 4 localhost stage/sql/closing tables 11 localhost stage/sql/init 3 localhost stage/sql/Opening tables 7 -localhost stage/sql/starting 6 +localhost stage/sql/starting 7 execute dump_stages_global; event_name count_star stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -317,21 +317,21 @@ localhost stage/sql/checking permissions 4 localhost stage/sql/closing tables 11 localhost stage/sql/init 3 localhost stage/sql/Opening tables 7 -localhost stage/sql/starting 6 +localhost stage/sql/starting 7 execute dump_stages_global; event_name count_star stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -427,21 +427,21 @@ localhost stage/sql/checking permissions 8 localhost stage/sql/closing tables 21 localhost stage/sql/init 6 localhost stage/sql/Opening tables 13 -localhost stage/sql/starting 12 +localhost stage/sql/starting 14 execute dump_stages_global; event_name count_star stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -526,21 +526,21 @@ localhost stage/sql/checking permissions 8 localhost stage/sql/closing tables 21 localhost stage/sql/init 6 localhost stage/sql/Opening tables 13 -localhost stage/sql/starting 12 +localhost stage/sql/starting 14 execute dump_stages_global; event_name count_star stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -637,21 +637,21 @@ localhost stage/sql/checking permissions 12 localhost stage/sql/closing tables 31 localhost stage/sql/init 9 localhost stage/sql/Opening tables 19 -localhost stage/sql/starting 18 +localhost stage/sql/starting 21 execute dump_stages_global; event_name count_star stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -737,21 +737,21 @@ localhost stage/sql/checking permissions 12 localhost stage/sql/closing tables 31 localhost stage/sql/init 9 localhost stage/sql/Opening tables 19 -localhost stage/sql/starting 18 +localhost stage/sql/starting 21 execute dump_stages_global; event_name count_star stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -849,21 +849,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 24 +localhost stage/sql/starting 28 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -948,21 +948,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 25 +localhost stage/sql/starting 29 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1047,21 +1047,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 26 +localhost stage/sql/starting 30 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1145,21 +1145,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 27 +localhost stage/sql/starting 31 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1242,21 +1242,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1340,21 +1340,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1437,21 +1437,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1534,21 +1534,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1631,21 +1631,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1728,21 +1728,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1825,21 +1825,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1922,21 +1922,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2019,21 +2019,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2123,14 +2123,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2227,7 +2227,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2324,7 +2324,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2421,7 +2421,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2518,7 +2518,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2615,7 +2615,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2712,7 +2712,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2809,7 +2809,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2906,7 +2906,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3003,7 +3003,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3100,7 +3100,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3197,7 +3197,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3294,7 +3294,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3391,7 +3391,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -3488,7 +3488,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u_no_h.result b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u_no_h.result index af535623e9a..887c74d35d9 100644 --- a/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u_no_h.result +++ b/mysql-test/suite/perfschema/r/event_aggregate_no_a_no_u_no_h.result @@ -191,14 +191,14 @@ stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -275,14 +275,14 @@ stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -371,14 +371,14 @@ stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -456,14 +456,14 @@ stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -553,14 +553,14 @@ stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -639,14 +639,14 @@ stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -737,14 +737,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -822,14 +822,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -907,14 +907,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -991,14 +991,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1074,14 +1074,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1158,14 +1158,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1241,14 +1241,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1324,14 +1324,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1407,14 +1407,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1490,14 +1490,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1573,14 +1573,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1656,14 +1656,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1739,14 +1739,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1822,14 +1822,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1912,7 +1912,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -1995,7 +1995,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2078,7 +2078,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2161,7 +2161,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2244,7 +2244,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2327,7 +2327,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2410,7 +2410,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2493,7 +2493,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2576,7 +2576,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2659,7 +2659,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2742,7 +2742,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2825,7 +2825,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2908,7 +2908,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -2991,7 +2991,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_h.result b/mysql-test/suite/perfschema/r/event_aggregate_no_h.result index 1209d37667e..b9a885f62e7 100644 --- a/mysql-test/suite/perfschema/r/event_aggregate_no_h.result +++ b/mysql-test/suite/perfschema/r/event_aggregate_no_h.result @@ -221,14 +221,14 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -237,14 +237,14 @@ stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -339,7 +339,7 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 0 user2 localhost stage/sql/closing tables 0 user2 localhost stage/sql/init 0 @@ -351,7 +351,7 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 0 user2 stage/sql/closing tables 0 user2 stage/sql/init 0 @@ -365,14 +365,14 @@ stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -493,24 +493,24 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -519,14 +519,14 @@ stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -640,12 +640,12 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 0 user3 localhost stage/sql/closing tables 0 user3 localhost stage/sql/init 0 @@ -657,12 +657,12 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 0 user3 stage/sql/closing tables 0 user3 stage/sql/init 0 @@ -676,14 +676,14 @@ stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -823,34 +823,34 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -859,14 +859,14 @@ stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -999,17 +999,17 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 0 user4 localhost stage/sql/closing tables 0 user4 localhost stage/sql/init 0 @@ -1021,17 +1021,17 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 0 user4 stage/sql/closing tables 0 user4 stage/sql/init 0 @@ -1045,14 +1045,14 @@ stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1211,44 +1211,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 6 +user1 stage/sql/starting 7 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1257,14 +1257,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1410,44 +1410,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 6 +user2 stage/sql/starting 7 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1456,14 +1456,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1609,44 +1609,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 6 +user3 stage/sql/starting 7 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1655,14 +1655,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1807,44 +1807,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 6 +user4 stage/sql/starting 7 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -1853,14 +1853,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2004,44 +2004,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -2050,14 +2050,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2202,44 +2202,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -2248,14 +2248,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2399,44 +2399,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -2445,14 +2445,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2596,44 +2596,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -2642,14 +2642,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2793,44 +2793,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -2839,14 +2839,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2990,44 +2990,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -3036,14 +3036,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3187,44 +3187,44 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -3233,14 +3233,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3406,22 +3406,22 @@ user1 stage/sql/checking permissions 4 user1 stage/sql/closing tables 11 user1 stage/sql/init 3 user1 stage/sql/Opening tables 7 -user1 stage/sql/starting 7 +user1 stage/sql/starting 8 user2 stage/sql/checking permissions 4 user2 stage/sql/closing tables 10 user2 stage/sql/init 3 user2 stage/sql/Opening tables 6 -user2 stage/sql/starting 7 +user2 stage/sql/starting 8 user3 stage/sql/checking permissions 4 user3 stage/sql/closing tables 10 user3 stage/sql/init 3 user3 stage/sql/Opening tables 6 -user3 stage/sql/starting 7 +user3 stage/sql/starting 8 user4 stage/sql/checking permissions 4 user4 stage/sql/closing tables 10 user4 stage/sql/init 3 user4 stage/sql/Opening tables 6 -user4 stage/sql/starting 7 +user4 stage/sql/starting 8 execute dump_stages_host; host event_name count_star execute dump_stages_global; @@ -3430,14 +3430,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3627,14 +3627,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3824,14 +3824,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4028,7 +4028,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4225,7 +4225,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4422,7 +4422,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4619,7 +4619,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4816,7 +4816,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -5013,7 +5013,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -5210,7 +5210,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -5407,7 +5407,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -5604,7 +5604,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -5801,7 +5801,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -5998,7 +5998,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -6167,7 +6167,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -6280,7 +6280,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -6365,7 +6365,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_u.result b/mysql-test/suite/perfschema/r/event_aggregate_no_u.result index 39da9783c96..da99aa54a2b 100644 --- a/mysql-test/suite/perfschema/r/event_aggregate_no_u.result +++ b/mysql-test/suite/perfschema/r/event_aggregate_no_u.result @@ -233,7 +233,7 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -242,21 +242,21 @@ localhost stage/sql/checking permissions 4 localhost stage/sql/closing tables 11 localhost stage/sql/init 3 localhost stage/sql/Opening tables 7 -localhost stage/sql/starting 6 +localhost stage/sql/starting 7 execute dump_stages_global; event_name count_star stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -348,7 +348,7 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 0 user2 localhost stage/sql/closing tables 0 user2 localhost stage/sql/init 0 @@ -362,21 +362,21 @@ localhost stage/sql/checking permissions 4 localhost stage/sql/closing tables 11 localhost stage/sql/init 3 localhost stage/sql/Opening tables 7 -localhost stage/sql/starting 6 +localhost stage/sql/starting 7 execute dump_stages_global; event_name count_star stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -487,12 +487,12 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -501,21 +501,21 @@ localhost stage/sql/checking permissions 8 localhost stage/sql/closing tables 21 localhost stage/sql/init 6 localhost stage/sql/Opening tables 13 -localhost stage/sql/starting 12 +localhost stage/sql/starting 14 execute dump_stages_global; event_name count_star stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -617,12 +617,12 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 0 user3 localhost stage/sql/closing tables 0 user3 localhost stage/sql/init 0 @@ -636,21 +636,21 @@ localhost stage/sql/checking permissions 8 localhost stage/sql/closing tables 21 localhost stage/sql/init 6 localhost stage/sql/Opening tables 13 -localhost stage/sql/starting 12 +localhost stage/sql/starting 14 execute dump_stages_global; event_name count_star stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -771,17 +771,17 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -790,21 +790,21 @@ localhost stage/sql/checking permissions 12 localhost stage/sql/closing tables 31 localhost stage/sql/init 9 localhost stage/sql/Opening tables 19 -localhost stage/sql/starting 18 +localhost stage/sql/starting 21 execute dump_stages_global; event_name count_star stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -916,17 +916,17 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 0 user4 localhost stage/sql/closing tables 0 user4 localhost stage/sql/init 0 @@ -940,21 +940,21 @@ localhost stage/sql/checking permissions 12 localhost stage/sql/closing tables 31 localhost stage/sql/init 9 localhost stage/sql/Opening tables 19 -localhost stage/sql/starting 18 +localhost stage/sql/starting 21 execute dump_stages_global; event_name count_star stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1085,22 +1085,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1109,21 +1109,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 24 +localhost stage/sql/starting 28 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1241,22 +1241,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1265,21 +1265,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 25 +localhost stage/sql/starting 29 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1397,22 +1397,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1421,21 +1421,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 26 +localhost stage/sql/starting 30 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1552,22 +1552,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1576,21 +1576,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 27 +localhost stage/sql/starting 31 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1706,22 +1706,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1730,21 +1730,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1861,22 +1861,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1885,21 +1885,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2015,22 +2015,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -2039,21 +2039,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2169,22 +2169,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -2193,21 +2193,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2323,22 +2323,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -2347,21 +2347,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2477,22 +2477,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -2501,21 +2501,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2631,22 +2631,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -2655,21 +2655,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2809,21 +2809,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2963,21 +2963,21 @@ localhost stage/sql/checking permissions 16 localhost stage/sql/closing tables 41 localhost stage/sql/init 12 localhost stage/sql/Opening tables 25 -localhost stage/sql/starting 28 +localhost stage/sql/starting 32 execute dump_stages_global; event_name count_star stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3124,14 +3124,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3285,7 +3285,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3439,7 +3439,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3593,7 +3593,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3747,7 +3747,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3901,7 +3901,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4055,7 +4055,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4209,7 +4209,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4363,7 +4363,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4517,7 +4517,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4671,7 +4671,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4825,7 +4825,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4951,7 +4951,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -5049,7 +5049,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -5147,7 +5147,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; diff --git a/mysql-test/suite/perfschema/r/event_aggregate_no_u_no_h.result b/mysql-test/suite/perfschema/r/event_aggregate_no_u_no_h.result index 818c61e0630..064b6e45e5c 100644 --- a/mysql-test/suite/perfschema/r/event_aggregate_no_u_no_h.result +++ b/mysql-test/suite/perfschema/r/event_aggregate_no_u_no_h.result @@ -203,7 +203,7 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -214,14 +214,14 @@ stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -304,7 +304,7 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 0 user2 localhost stage/sql/closing tables 0 user2 localhost stage/sql/init 0 @@ -320,14 +320,14 @@ stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 4 stage/sql/closing tables 11 stage/sql/init 3 stage/sql/Opening tables 7 -stage/sql/starting 6 +stage/sql/starting 7 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -429,12 +429,12 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -445,14 +445,14 @@ stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -545,12 +545,12 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 0 user3 localhost stage/sql/closing tables 0 user3 localhost stage/sql/init 0 @@ -566,14 +566,14 @@ stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 8 stage/sql/closing tables 21 stage/sql/init 6 stage/sql/Opening tables 13 -stage/sql/starting 12 +stage/sql/starting 14 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -685,17 +685,17 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -706,14 +706,14 @@ stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -816,17 +816,17 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 0 user4 localhost stage/sql/closing tables 0 user4 localhost stage/sql/init 0 @@ -842,14 +842,14 @@ stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 12 stage/sql/closing tables 31 stage/sql/init 9 stage/sql/Opening tables 19 -stage/sql/starting 18 +stage/sql/starting 21 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -971,22 +971,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 6 +user1 localhost stage/sql/starting 7 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -997,14 +997,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 24 +stage/sql/starting 28 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1113,22 +1113,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 6 +user2 localhost stage/sql/starting 7 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1139,14 +1139,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 25 +stage/sql/starting 29 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1255,22 +1255,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 6 +user3 localhost stage/sql/starting 7 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1281,14 +1281,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 26 +stage/sql/starting 30 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1396,22 +1396,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 6 +user4 localhost stage/sql/starting 7 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1422,14 +1422,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 27 +stage/sql/starting 31 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1536,22 +1536,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1562,14 +1562,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1677,22 +1677,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1703,14 +1703,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1817,22 +1817,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1843,14 +1843,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -1957,22 +1957,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -1983,14 +1983,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2097,22 +2097,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -2123,14 +2123,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2237,22 +2237,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -2263,14 +2263,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2377,22 +2377,22 @@ user1 localhost stage/sql/checking permissions 4 user1 localhost stage/sql/closing tables 11 user1 localhost stage/sql/init 3 user1 localhost stage/sql/Opening tables 7 -user1 localhost stage/sql/starting 7 +user1 localhost stage/sql/starting 8 user2 localhost stage/sql/checking permissions 4 user2 localhost stage/sql/closing tables 10 user2 localhost stage/sql/init 3 user2 localhost stage/sql/Opening tables 6 -user2 localhost stage/sql/starting 7 +user2 localhost stage/sql/starting 8 user3 localhost stage/sql/checking permissions 4 user3 localhost stage/sql/closing tables 10 user3 localhost stage/sql/init 3 user3 localhost stage/sql/Opening tables 6 -user3 localhost stage/sql/starting 7 +user3 localhost stage/sql/starting 8 user4 localhost stage/sql/checking permissions 4 user4 localhost stage/sql/closing tables 10 user4 localhost stage/sql/init 3 user4 localhost stage/sql/Opening tables 6 -user4 localhost stage/sql/starting 7 +user4 localhost stage/sql/starting 8 execute dump_stages_user; user event_name count_star execute dump_stages_host; @@ -2403,14 +2403,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2543,14 +2543,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2683,14 +2683,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2823,14 +2823,14 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_stages_history; event_name count(event_name) stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -2970,7 +2970,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3110,7 +3110,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3250,7 +3250,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3390,7 +3390,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3530,7 +3530,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3670,7 +3670,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3810,7 +3810,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -3950,7 +3950,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4090,7 +4090,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4230,7 +4230,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4370,7 +4370,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star user1 localhost statement/com/Error 0 @@ -4482,7 +4482,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4566,7 +4566,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; @@ -4650,7 +4650,7 @@ stage/sql/checking permissions 16 stage/sql/closing tables 41 stage/sql/init 12 stage/sql/Opening tables 25 -stage/sql/starting 28 +stage/sql/starting 32 execute dump_statements_account; user host event_name count_star execute dump_statements_user; diff --git a/mysql-test/suite/perfschema/r/nesting.result b/mysql-test/suite/perfschema/r/nesting.result index 9e18e5ac272..f17a6462432 100644 --- a/mysql-test/suite/perfschema/r/nesting.result +++ b/mysql-test/suite/perfschema/r/nesting.result @@ -127,10 +127,10 @@ relative_event_id relative_end_event_id event_name comment nesting_event_type re 10 10 stage/sql/Optimizing (stage) STATEMENT 0 11 11 stage/sql/Executing (stage) STATEMENT 0 12 12 stage/sql/End of update loop (stage) STATEMENT 0 -13 13 stage/sql/Query end (stage) STATEMENT 0 -14 15 stage/sql/Commit (stage) STATEMENT 0 -15 15 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 14 -16 16 stage/sql/closing tables (stage) STATEMENT 0 +13 14 stage/sql/Query end (stage) STATEMENT 0 +14 14 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 13 +15 15 stage/sql/closing tables (stage) STATEMENT 0 +16 16 stage/sql/Query end (stage) STATEMENT 0 17 17 stage/sql/Starting cleanup (stage) STATEMENT 0 18 18 stage/sql/Freeing items (stage) STATEMENT 0 19 19 wait/io/socket/sql/client_connection send STATEMENT 0 @@ -151,10 +151,10 @@ relative_event_id relative_end_event_id event_name comment nesting_event_type re 34 34 stage/sql/Optimizing (stage) STATEMENT 24 35 35 stage/sql/Executing (stage) STATEMENT 24 36 36 stage/sql/End of update loop (stage) STATEMENT 24 -37 37 stage/sql/Query end (stage) STATEMENT 24 -38 39 stage/sql/Commit (stage) STATEMENT 24 -39 39 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 38 -40 40 stage/sql/closing tables (stage) STATEMENT 24 +37 38 stage/sql/Query end (stage) STATEMENT 24 +38 38 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 37 +39 39 stage/sql/closing tables (stage) STATEMENT 24 +40 40 stage/sql/Query end (stage) STATEMENT 24 41 41 stage/sql/Starting cleanup (stage) STATEMENT 24 42 42 stage/sql/Freeing items (stage) STATEMENT 24 43 43 wait/io/socket/sql/client_connection send STATEMENT 24 @@ -175,10 +175,10 @@ relative_event_id relative_end_event_id event_name comment nesting_event_type re 58 58 stage/sql/Optimizing (stage) STATEMENT 48 59 59 stage/sql/Executing (stage) STATEMENT 48 60 60 stage/sql/End of update loop (stage) STATEMENT 48 -61 61 stage/sql/Query end (stage) STATEMENT 48 -62 63 stage/sql/Commit (stage) STATEMENT 48 -63 63 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 62 -64 64 stage/sql/closing tables (stage) STATEMENT 48 +61 62 stage/sql/Query end (stage) STATEMENT 48 +62 62 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 61 +63 63 stage/sql/closing tables (stage) STATEMENT 48 +64 64 stage/sql/Query end (stage) STATEMENT 48 65 65 stage/sql/Starting cleanup (stage) STATEMENT 48 66 66 stage/sql/Freeing items (stage) STATEMENT 48 67 67 wait/io/socket/sql/client_connection send STATEMENT 48 @@ -202,10 +202,10 @@ select "With a third part to make things complete" as payload NULL NULL 83 83 stage/sql/Optimizing (stage) STATEMENT 72 84 84 stage/sql/Executing (stage) STATEMENT 72 85 85 stage/sql/End of update loop (stage) STATEMENT 72 -86 86 stage/sql/Query end (stage) STATEMENT 72 -87 88 stage/sql/Commit (stage) STATEMENT 72 -88 88 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 87 -89 89 stage/sql/closing tables (stage) STATEMENT 72 +86 87 stage/sql/Query end (stage) STATEMENT 72 +87 87 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 86 +88 88 stage/sql/closing tables (stage) STATEMENT 72 +89 89 stage/sql/Query end (stage) STATEMENT 72 90 90 stage/sql/Starting cleanup (stage) STATEMENT 72 91 92 stage/sql/Freeing items (stage) STATEMENT 72 92 92 wait/io/socket/sql/client_connection send STAGE 91 @@ -221,10 +221,10 @@ select "With a third part to make things complete" as payload NULL NULL 101 101 stage/sql/Optimizing (stage) STATEMENT 93 102 102 stage/sql/Executing (stage) STATEMENT 93 103 103 stage/sql/End of update loop (stage) STATEMENT 93 -104 104 stage/sql/Query end (stage) STATEMENT 93 -105 106 stage/sql/Commit (stage) STATEMENT 93 -106 106 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 105 -107 107 stage/sql/closing tables (stage) STATEMENT 93 +104 105 stage/sql/Query end (stage) STATEMENT 93 +105 105 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 104 +106 106 stage/sql/closing tables (stage) STATEMENT 93 +107 107 stage/sql/Query end (stage) STATEMENT 93 108 108 stage/sql/Starting cleanup (stage) STATEMENT 93 109 110 stage/sql/Freeing items (stage) STATEMENT 93 110 110 wait/io/socket/sql/client_connection send STAGE 109 @@ -238,10 +238,10 @@ select "With a third part to make things complete" as payload NULL NULL 118 118 stage/sql/Optimizing (stage) STATEMENT 111 119 119 stage/sql/Executing (stage) STATEMENT 111 120 120 stage/sql/End of update loop (stage) STATEMENT 111 -121 121 stage/sql/Query end (stage) STATEMENT 111 -122 123 stage/sql/Commit (stage) STATEMENT 111 -123 123 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 122 -124 124 stage/sql/closing tables (stage) STATEMENT 111 +121 122 stage/sql/Query end (stage) STATEMENT 111 +122 122 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 121 +123 123 stage/sql/closing tables (stage) STATEMENT 111 +124 124 stage/sql/Query end (stage) STATEMENT 111 125 125 stage/sql/Starting cleanup (stage) STATEMENT 111 126 126 stage/sql/Freeing items (stage) STATEMENT 111 127 127 wait/io/socket/sql/client_connection send STATEMENT 111 @@ -262,10 +262,10 @@ select "With a third part to make things complete" as payload NULL NULL 142 142 stage/sql/Optimizing (stage) STATEMENT 132 143 143 stage/sql/Executing (stage) STATEMENT 132 144 144 stage/sql/End of update loop (stage) STATEMENT 132 -145 145 stage/sql/Query end (stage) STATEMENT 132 -146 147 stage/sql/Commit (stage) STATEMENT 132 -147 147 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 146 -148 148 stage/sql/closing tables (stage) STATEMENT 132 +145 146 stage/sql/Query end (stage) STATEMENT 132 +146 146 wait/synch/mutex/sql/THD::LOCK_thd_kill lock STAGE 145 +147 147 stage/sql/closing tables (stage) STATEMENT 132 +148 148 stage/sql/Query end (stage) STATEMENT 132 149 149 stage/sql/Starting cleanup (stage) STATEMENT 132 150 150 stage/sql/Freeing items (stage) STATEMENT 132 151 151 wait/io/socket/sql/client_connection send STATEMENT 132 diff --git a/mysql-test/suite/perfschema/r/stage_mdl_global.result b/mysql-test/suite/perfschema/r/stage_mdl_global.result index b9eda506700..48aca9e5529 100644 --- a/mysql-test/suite/perfschema/r/stage_mdl_global.result +++ b/mysql-test/suite/perfschema/r/stage_mdl_global.result @@ -10,10 +10,10 @@ username event_name nesting_event_type username event_name nesting_event_type user1 stage/sql/starting STATEMENT user1 stage/sql/starting STATEMENT +user1 stage/sql/starting STATEMENT user1 stage/sql/Query end STATEMENT -user1 stage/sql/Commit STATEMENT user1 stage/sql/closing tables STATEMENT -user1 stage/sql/Commit implicit STATEMENT +user1 stage/sql/Query end STATEMENT user1 stage/sql/Starting cleanup STATEMENT user1 stage/sql/Freeing items STATEMENT user1 stage/sql/Reset for next command STATEMENT diff --git a/mysql-test/suite/perfschema/r/stage_mdl_table.result b/mysql-test/suite/perfschema/r/stage_mdl_table.result index 5ba0ba04fca..b9b2dc1257d 100644 --- a/mysql-test/suite/perfschema/r/stage_mdl_table.result +++ b/mysql-test/suite/perfschema/r/stage_mdl_table.result @@ -19,10 +19,10 @@ username event_name nesting_event_type user1 stage/sql/Sending data STATEMENT user1 stage/sql/End of update loop STATEMENT user1 stage/sql/Query end STATEMENT -user1 stage/sql/Commit STATEMENT user1 stage/sql/closing tables STATEMENT user1 stage/sql/Unlocking tables STATEMENT user1 stage/sql/closing tables STATEMENT +user1 stage/sql/Query end STATEMENT user1 stage/sql/Starting cleanup STATEMENT user1 stage/sql/Freeing items STATEMENT user1 stage/sql/Reset for next command STATEMENT diff --git a/mysql-test/suite/perfschema/r/threads_history.result b/mysql-test/suite/perfschema/r/threads_history.result index aaf2cd09e31..364a5f6f9e3 100644 --- a/mysql-test/suite/perfschema/r/threads_history.result +++ b/mysql-test/suite/perfschema/r/threads_history.result @@ -167,16 +167,16 @@ stage/sql/Freeing items stage/sql/Reset for next command stage/sql/starting stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command EVENT_NAME stage/sql/starting stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command @@ -189,29 +189,29 @@ stage/sql/Optimizing stage/sql/Executing stage/sql/End of update loop stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command stage/sql/starting stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command stage/sql/starting stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command stage/sql/starting stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command @@ -585,16 +585,16 @@ stage/sql/Freeing items stage/sql/Reset for next command stage/sql/starting stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command EVENT_NAME stage/sql/starting stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command @@ -607,29 +607,29 @@ stage/sql/Optimizing stage/sql/Executing stage/sql/End of update loop stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command stage/sql/starting stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command stage/sql/starting stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command stage/sql/starting stage/sql/Query end -stage/sql/Commit stage/sql/closing tables +stage/sql/Query end stage/sql/Starting cleanup stage/sql/Freeing items stage/sql/Reset for next command diff --git a/mysql-test/suite/rpl/include/rpl_extra_col_master.test b/mysql-test/suite/rpl/include/rpl_extra_col_master.test index a7abe69db0a..3fef3cc1fd0 100644 --- a/mysql-test/suite/rpl/include/rpl_extra_col_master.test +++ b/mysql-test/suite/rpl/include/rpl_extra_col_master.test @@ -59,6 +59,10 @@ #VARCHAR(M) # +--disable_query_log +call mtr.add_suppression("Could not read packet:.* errno: 11"); +--enable_query_log + --let $_saved_conn= $CURRENT_CONNECTION let $binformat = `SHOW VARIABLES LIKE '%binlog_format%'`; diff --git a/mysql-test/suite/rpl/r/rpl_auditing.result b/mysql-test/suite/rpl/r/rpl_auditing.result new file mode 100644 index 00000000000..1861beb4e8f --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_auditing.result @@ -0,0 +1,64 @@ +include/master-slave.inc +[connection master] +drop table if exists t1; +connection slave; +reset master; +CREATE TABLE IF NOT EXISTS mysql.server_audit_filters ( +filtername char(80) COLLATE utf8_bin NOT NULL DEFAULT '', +rule longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL DEFAULT 'true' CHECK (json_valid(rule)), +CONSTRAINT c_filtername UNIQUE (filtername) +) ENGINE=Aria; +CREATE TABLE IF NOT EXISTS mysql.server_audit_users (host char(60) COLLATE utf8_bin NOT NULL DEFAULT '', +user char(80) COLLATE utf8_bin NOT NULL DEFAULT '', +filtername char(80) NOT NULL DEFAULT '', +CONSTRAINT c_host_user UNIQUE (host, user) +) ENGINE=Aria; +INSERT INTO mysql.server_audit_filters VALUES ('ignore_sys', '{"ignore_tables" : "mysql.*"}'); +INSERT INTO mysql.server_audit_users VALUES ('%','','ignore_sys'); +INSERT INTO mysql.server_audit_users VALUES ('%','root','ignore_sys'); +install plugin server_audit soname 'server_audit2'; +set global server_audit_logging=on; +connection master; +create table t1 (a int); +insert into t1 values (1); +truncate t1; +drop table t1; +connection slave; +set global server_audit_logging=off; +truncate mysql.server_audit_filters; +truncate mysql.server_audit_users; +INSERT INTO mysql.server_audit_filters VALUES ('no_logging','false'); +INSERT INTO mysql.server_audit_users VALUES ('%','','no_logging'); +set global server_audit_logging=on; +connection master; +create table t1 (a int); +insert into t1 values (1); +truncate t1; +drop table t1; +connection slave; +set global server_audit_logging=off; +uninstall plugin server_audit; +Warnings: +Warning 1620 Plugin is busy and will be uninstalled on shutdown +truncate mysql.server_audit_filters; +truncate mysql.server_audit_users; +TIME,HOSTNAME,,,0,0,AUDIT_CONFIG,,file_path=server_audit.log,0 +TIME,HOSTNAME,,,0,0,AUDIT_CONFIG,,rotate_size=1000000,0 +TIME,HOSTNAME,,,0,0,AUDIT_CONFIG,,file_rotations=9,0 +TIME,HOSTNAME,root,localhost,ID,0,AUDIT_CONFIG,test,logging=ON,0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_logging=on',0 +TIME,HOSTNAME,,,ID,ID,CREATE,test,t1, +TIME,HOSTNAME,,,ID,ID,WRITE,test,t1, +TIME,HOSTNAME,,,ID,ID,CREATE,test,t1, +TIME,HOSTNAME,,,ID,ID,DROP,test,t1, +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'select master_pos_wait(\'master-bin.#', POS, 300, \'\')',0 +TIME,HOSTNAME,root,localhost,ID,0,AUDIT_CONFIG,test,logging=OFF,0 +TIME,HOSTNAME,,,0,0,AUDIT_CONFIG,,file_path=server_audit.log,0 +TIME,HOSTNAME,,,0,0,AUDIT_CONFIG,,rotate_size=1000000,0 +TIME,HOSTNAME,,,0,0,AUDIT_CONFIG,,file_rotations=9,0 +TIME,HOSTNAME,root,localhost,ID,0,AUDIT_CONFIG,test,logging=ON,0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'set global server_audit_logging=on',0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,test,'select master_pos_wait(\'master-bin.#', POS, 300, \'\')',0 +TIME,HOSTNAME,root,localhost,ID,0,AUDIT_CONFIG,test,logging=OFF,0 +connection master; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_binlog_dump_slave_gtid_state_info.result b/mysql-test/suite/rpl/r/rpl_binlog_dump_slave_gtid_state_info.result index af79b482b2f..20dd9076a5d 100644 --- a/mysql-test/suite/rpl/r/rpl_binlog_dump_slave_gtid_state_info.result +++ b/mysql-test/suite/rpl/r/rpl_binlog_dump_slave_gtid_state_info.result @@ -1,6 +1,7 @@ include/master-slave.inc [connection master] connection master; +SET @org_log_warnings=@@GLOBAL.LOG_WARNINGS; SET GLOBAL LOG_WARNINGS=2; connection slave; include/stop_slave.inc @@ -41,11 +42,11 @@ connection master; include/wait_for_pattern_in_file.inc FOUND 1 /using_gtid\(1\), gtid\(\'0-1-2,10-1-1\'\).*/ in mysqld.1.err "===== Clean up =====" +SET GLOBAL LOG_WARNINGS=@org_log_warnings; connection slave; include/stop_slave.inc CHANGE MASTER TO MASTER_USE_GTID=no; include/start_slave.inc connection master; DROP TABLE t; -SET GLOBAL LOG_WARNINGS=default; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_change_master.result b/mysql-test/suite/rpl/r/rpl_change_master.result index 88801b07bba..48cec72d917 100644 --- a/mysql-test/suite/rpl/r/rpl_change_master.result +++ b/mysql-test/suite/rpl/r/rpl_change_master.result @@ -26,9 +26,4 @@ connection master; CHANGE MASTER TO MASTER_USER='root', MASTER_SSL=0, MASTER_SSL_CA='', MASTER_SSL_CERT='', MASTER_SSL_KEY='', MASTER_SSL_CRL='', MASTER_SSL_CRLPATH=''; CHANGE MASTER TO MASTER_USER='root', MASTER_PASSWORD='', MASTER_SSL=0; -"Usage of CURRENT_POS in CHANGE MASTER MASTER_USE_GTID is dreprecated. -CHANGE MASTER TO MASTER_USE_GTID=CURRENT_POS; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead -CHANGE MASTER TO MASTER_USE_GTID=SLAVE_POS; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_domain_id_filter_master_crash.result b/mysql-test/suite/rpl/r/rpl_domain_id_filter_master_crash.result index a54ff99b591..cd13590b61f 100644 --- a/mysql-test/suite/rpl/r/rpl_domain_id_filter_master_crash.result +++ b/mysql-test/suite/rpl/r/rpl_domain_id_filter_master_crash.result @@ -1,9 +1,6 @@ include/master-slave.inc [connection master] connection master; -call mtr.add_suppression("mysqld: Table '.*gtid_slave_pos' is marked as crashed and should be repaired"); -call mtr.add_suppression("Checking table: './mysql/gtid_slave_pos'"); -call mtr.add_suppression("mysql.gtid_slave_pos: 1 client is using or hasn't closed the table properly"); SET @@session.gtid_domain_id= 0; create table ti (a int auto_increment primary key) engine=innodb; create table tm (a int auto_increment primary key) engine=myisam; diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result b/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result index be98b7e3dcd..145b269aac9 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_master_innodb.result @@ -657,7 +657,7 @@ START SLAVE; STOP SLAVE; include/reset_slave.inc Warnings: -Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-103. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-104. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos CREATE TABLE t15 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5) ) ENGINE='InnoDB'; @@ -697,7 +697,7 @@ Last_SQL_Error = 'Error 'Unknown column 'c7' in 't15'' on query. Default databas STOP SLAVE; include/reset_slave.inc Warnings: -Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-104. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-105. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos *** Drop t15 *** DROP TABLE t15; @@ -716,7 +716,7 @@ START SLAVE; STOP SLAVE; include/reset_slave.inc Warnings: -Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-105. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-106. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos CREATE TABLE t16 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5) ) ENGINE='InnoDB'; @@ -756,7 +756,7 @@ Last_SQL_Error = 'Error 'Key column 'c6' doesn't exist in table' on query. Defau STOP SLAVE; include/reset_slave.inc Warnings: -Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-106. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-107. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos *** Drop t16 *** DROP TABLE t16; @@ -775,7 +775,7 @@ START SLAVE; STOP SLAVE; include/reset_slave.inc Warnings: -Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-107. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-108. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos CREATE TABLE t17 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5) ) ENGINE='InnoDB'; diff --git a/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result b/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result index 53b20b188ba..dae497d5f29 100644 --- a/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result +++ b/mysql-test/suite/rpl/r/rpl_extra_col_master_myisam.result @@ -657,7 +657,7 @@ START SLAVE; STOP SLAVE; include/reset_slave.inc Warnings: -Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-103. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-104. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos CREATE TABLE t15 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5) ) ENGINE='MyISAM'; @@ -697,7 +697,7 @@ Last_SQL_Error = 'Error 'Unknown column 'c7' in 't15'' on query. Default databas STOP SLAVE; include/reset_slave.inc Warnings: -Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-104. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-105. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos *** Drop t15 *** DROP TABLE t15; @@ -716,7 +716,7 @@ START SLAVE; STOP SLAVE; include/reset_slave.inc Warnings: -Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-105. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-106. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos CREATE TABLE t16 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5) ) ENGINE='MyISAM'; @@ -756,7 +756,7 @@ Last_SQL_Error = 'Error 'Key column 'c6' doesn't exist in table' on query. Defau STOP SLAVE; include/reset_slave.inc Warnings: -Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-106. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-107. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos *** Drop t16 *** DROP TABLE t16; @@ -775,7 +775,7 @@ START SLAVE; STOP SLAVE; include/reset_slave.inc Warnings: -Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-107. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos +Warning 1948 Specified value for @@gtid_slave_pos contains no value for replication domain 0. This conflicts with the binary log which contains GTID 0-2-108. If MASTER_GTID_POS=CURRENT_POS is used, the binlog position will override the new value of @@gtid_slave_pos CREATE TABLE t17 (c1 INT PRIMARY KEY, c4 BLOB, c5 CHAR(5) ) ENGINE='MyISAM'; diff --git a/mysql-test/suite/rpl/r/rpl_get_lock.result b/mysql-test/suite/rpl/r/rpl_get_lock.result index b852546e1bf..cbb02a32648 100644 --- a/mysql-test/suite/rpl/r/rpl_get_lock.result +++ b/mysql-test/suite/rpl/r/rpl_get_lock.result @@ -1,6 +1,6 @@ include/master-slave.inc [connection master] -CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); +SET GLOBAL LOG_WARNINGS=4; create table t1(n int); insert into t1 values(get_lock("lock",2)); disconnect master; @@ -35,4 +35,5 @@ NULL connection master1; drop table t1; connection slave; +connection default; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_gtid_basic.result b/mysql-test/suite/rpl/r/rpl_gtid_basic.result index a7da70108b0..afc700a72c5 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_basic.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_basic.result @@ -69,8 +69,6 @@ INSERT INTO t2 VALUES (5, "i1a"); connection server_4; CHANGE MASTER TO master_host = '127.0.0.1', master_port = MASTER_PORT, MASTER_USE_GTID=CURRENT_POS; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead include/start_slave.inc SELECT * FROM t1 ORDER BY a; a b @@ -91,8 +89,6 @@ connection server_2; include/stop_slave.inc CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_4, MASTER_USE_GTID=CURRENT_POS; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead include/start_slave.inc connection server_4; UPDATE t2 SET b="j1a" WHERE a=5; @@ -121,8 +117,6 @@ include/save_master_gtid.inc connection server_3; CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_4, MASTER_USE_GTID=CURRENT_POS; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead include/start_slave.inc include/sync_with_master_gtid.inc SELECT * FROM t2 ORDER BY a; diff --git a/mysql-test/suite/rpl/r/rpl_gtid_crash.result b/mysql-test/suite/rpl/r/rpl_gtid_crash.result index 179461adb8a..c47beacfb36 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_crash.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_crash.result @@ -4,6 +4,7 @@ connection server_1; call mtr.add_suppression("Checking table:"); call mtr.add_suppression("client is using or hasn't closed the table properly"); call mtr.add_suppression("Table .* is marked as crashed and should be repaired"); +call mtr.add_suppression("Could not read packet:.* errno: 11"); flush tables; ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE=InnoDB; diff --git a/mysql-test/suite/rpl/r/rpl_gtid_errorhandling.result b/mysql-test/suite/rpl/r/rpl_gtid_errorhandling.result index a7cb710cc07..4c35d42d90a 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_errorhandling.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_errorhandling.result @@ -75,8 +75,6 @@ INSERT INTO t1 VALUES (2); SET sql_log_bin = 1; INSERT INTO t1 VALUES (3); CHANGE MASTER TO master_use_gtid=current_pos; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead BEGIN; SET GLOBAL gtid_slave_pos = "100-100-100"; ERROR 25000: You are not allowed to execute this command in a transaction diff --git a/mysql-test/suite/rpl/r/rpl_gtid_grouping.result b/mysql-test/suite/rpl/r/rpl_gtid_grouping.result index ad7d6116c49..1b4d86dd8e2 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_grouping.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_grouping.result @@ -50,5 +50,4 @@ CHANGE MASTER TO MASTER_USE_GTID=no; include/start_slave.inc connection master; DROP TABLE t; -SET GLOBAL LOG_WARNINGS=default; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_gtid_mdev4820.result b/mysql-test/suite/rpl/r/rpl_gtid_mdev4820.result index cea5aaaeacd..665fc536df6 100644 --- a/mysql-test/suite/rpl/r/rpl_gtid_mdev4820.result +++ b/mysql-test/suite/rpl/r/rpl_gtid_mdev4820.result @@ -45,8 +45,6 @@ SET GLOBAL gtid_slave_pos= '0-2-10'; connection server_1; CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_2, master_user= 'root', master_use_gtid=CURRENT_POS; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead START SLAVE; connection server_2; INSERT INTO t1 VALUES (11); @@ -76,8 +74,6 @@ connection server_2; INSERT INTO t1 VALUES (22); CHANGE MASTER TO master_host = '127.0.0.1', master_port = SERVER_MYPORT_1, master_user= 'root', master_use_gtid=CURRENT_POS; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead START SLAVE; SET sql_log_bin= 0; CALL mtr.add_suppression("which is not in the master's binlog. Since the master's binlog contains GTIDs with higher sequence numbers, it probably means that the slave has diverged"); diff --git a/mysql-test/suite/rpl/r/rpl_heartbeat_debug.result b/mysql-test/suite/rpl/r/rpl_heartbeat_debug.result index dc45c0b9ab3..a89010cd432 100644 --- a/mysql-test/suite/rpl/r/rpl_heartbeat_debug.result +++ b/mysql-test/suite/rpl/r/rpl_heartbeat_debug.result @@ -9,8 +9,6 @@ Variable_name Slave_heartbeat_period Value 60.000 SET @saved_dbug= @@GLOBAL.debug_dbug; SET GLOBAL debug_dbug="+d,simulate_slave_heartbeat_network_error"; -CALL mtr.add_suppression('SET @master_heartbeat_period to master failed with error'); -CALL mtr.add_suppression('Master command COM_REGISTER_SLAVE failed: failed registering on master, reconnecting to try again'); include/start_slave.inc connection master; drop table if exists t1; diff --git a/mysql-test/suite/rpl/r/rpl_packet.result b/mysql-test/suite/rpl/r/rpl_packet.result index 4a2a5d70d39..bb6269607fe 100644 --- a/mysql-test/suite/rpl/r/rpl_packet.result +++ b/mysql-test/suite/rpl/r/rpl_packet.result @@ -2,6 +2,8 @@ include/master-slave.inc [connection master] call mtr.add_suppression("Slave I/O: Got a packet bigger than 'slave_max_allowed_packet' bytes, .*error.* 1153"); call mtr.add_suppression("Log entry on master is longer than slave_max_allowed_packet"); +call mtr.add_suppression("Could not write packet:"); +call mtr.add_suppression("Got a packet bigger than 'max_allowed_packet' bytes"); drop database if exists DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________; create database DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________; connection master; diff --git a/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result b/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result index eff939512e2..0b317c4682b 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_optimistic.result @@ -626,6 +626,7 @@ include/save_master_gtid.inc connection server_2; include/sync_with_master_gtid.inc connection server_2; +SET @org_log_warnings=@@GLOBAL.LOG_WARNINGS; set global log_warnings=2; BEGIN; INSERT INTO t1 SET a=1; @@ -651,7 +652,7 @@ connection server_2; include/sync_with_master_gtid.inc connection server_2; include/stop_slave.inc -set global log_warnings=default; +set global log_warnings=@org_log_warnings; SET GLOBAL slave_parallel_mode=@old_parallel_mode; SET GLOBAL slave_parallel_threads=@old_parallel_threads; include/start_slave.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_optimistic_xa.result b/mysql-test/suite/rpl/r/rpl_parallel_optimistic_xa.result index 4136f1885db..90c3e5db614 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_optimistic_xa.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_optimistic_xa.result @@ -32,7 +32,6 @@ include/diff_tables.inc [master:t0, slave:t0] include/diff_tables.inc [master:t1, slave:t1] connection slave; include/stop_slave.inc -set global log_warnings=default; SET GLOBAL slave_parallel_mode=@old_parallel_mode; SET GLOBAL slave_parallel_threads=@old_parallel_threads; include/start_slave.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_optimistic_xa_lsu_off.result b/mysql-test/suite/rpl/r/rpl_parallel_optimistic_xa_lsu_off.result index 4136f1885db..90c3e5db614 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_optimistic_xa_lsu_off.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_optimistic_xa_lsu_off.result @@ -32,7 +32,6 @@ include/diff_tables.inc [master:t0, slave:t0] include/diff_tables.inc [master:t1, slave:t1] connection slave; include/stop_slave.inc -set global log_warnings=default; SET GLOBAL slave_parallel_mode=@old_parallel_mode; SET GLOBAL slave_parallel_threads=@old_parallel_threads; include/start_slave.inc diff --git a/mysql-test/suite/rpl/r/rpl_parallel_retry.result b/mysql-test/suite/rpl/r/rpl_parallel_retry.result index 2cc4044a2cd..4c7effd737a 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_retry.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_retry.result @@ -339,6 +339,28 @@ connection server_1; DROP TABLE t1, t2, t3, t4; DROP function foo; connection server_2; +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=4; +connection server_1; +CREATE TABLE t1 (a INT, b VARCHAR(123)) ENGINE=InnoDB; +INSERT INTO t1 VALUES(1, 'asdf'); +UPDATE t1 SET b='zxf1' WHERE a=1; +UPDATE t1 SET b='\n' WHERE a=1; +connection server_2; +SET @old_dbug=@@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,write_row_inject_sleep_before_ha_write_row"; +include/start_slave.inc +connection server_1; +connection server_2; +connection server_1; +DROP TABLE t1; +connection server_2; +include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +include/start_slave.inc connection server_1; CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; INSERT INTO t1 VALUES(100, 100); diff --git a/mysql-test/suite/rpl/r/rpl_parallel_sbm.result b/mysql-test/suite/rpl/r/rpl_parallel_sbm.result index 7990a663f04..e349353ac59 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_sbm.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_sbm.result @@ -27,12 +27,19 @@ connection slave; # delaying a transaction; then when the reciprocal START SLAVE occurs, # if the event is still to be delayed, SBM should resume accordingly include/stop_slave.inc +# Lock t1 on slave to ensure the event can't finish (and thereby update +# Seconds_Behind_Master) so slow running servers don't accidentally +# catch up to the master before checking SBM. +connection server_2; +LOCK TABLES t1 WRITE; include/start_slave.inc connection slave; # Waiting for replica to resume the delay for the transaction # Sleeping 1s to increment SBM # Ensuring Seconds_Behind_Master increases after sleeping.. # ..done +connection server_2; +UNLOCK TABLES; include/sync_with_master_gtid.inc # # Pt 2) If the worker threads have not entered an idle state, ensure diff --git a/mysql-test/suite/rpl/r/rpl_parallel_stop_slave.result b/mysql-test/suite/rpl/r/rpl_parallel_stop_slave.result index 0c810d2a3f4..b0a4fa59c69 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_stop_slave.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_stop_slave.result @@ -37,7 +37,9 @@ connection con_temp1; BEGIN; INSERT INTO t2 VALUES (21); connection server_2; -START SLAVE; +START SLAVE IO_THREAD; +include/wait_for_slave_param.inc [Read_Master_Log_Pos] +START SLAVE SQL_THREAD; connection con_temp2; SET @old_dbug= @@GLOBAL.debug_dbug; SET GLOBAL debug_dbug="+d,rpl_parallel_wait_for_done_trigger"; diff --git a/mysql-test/suite/rpl/r/rpl_parallel_temptable.result b/mysql-test/suite/rpl/r/rpl_parallel_temptable.result index e9bff03bd41..0f7af25303e 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_temptable.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_temptable.result @@ -202,6 +202,24 @@ a b include/stop_slave.inc SET GLOBAL slave_parallel_mode=@old_mode; include/start_slave.inc +*** MDEV33426: Memory allocation accounting incorrect for replicated temptable +connection server_1; +CREATE TEMPORARY TABLE t5 (a int) ENGINE=Aria; +CREATE TEMPORARY TABLE t6 (a int) ENGINE=Heap; +INSERT INTO t5 VALUES (1); +INSERT INTO t6 VALUES (2); +connection server_2; +include/stop_slave.inc +connection server_1; +INSERT INTO t1 SELECT a+40, 5 FROM t5; +INSERT INTO t1 SELECT a+40, 6 FROM t6; +DROP TABLE t5, t6; +connection server_2; +include/start_slave.inc +SELECT * FROM t1 WHERE a>=40 ORDER BY a; +a b +41 5 +42 6 connection server_2; include/stop_slave.inc SET GLOBAL slave_parallel_threads=@old_parallel_threads; diff --git a/mysql-test/suite/rpl/r/rpl_perfschema_connect_config.result b/mysql-test/suite/rpl/r/rpl_perfschema_connect_config.result index 27cb29d3968..4ace84ffac4 100644 --- a/mysql-test/suite/rpl/r/rpl_perfschema_connect_config.result +++ b/mysql-test/suite/rpl/r/rpl_perfschema_connect_config.result @@ -87,8 +87,6 @@ include/assert.inc [Value returned by SSS and PS table for Using_Gtid should be change master to master_user = 'root', master_use_gtid= CURRENT_POS; -Warnings: -Warning #### 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead include/assert.inc [Value returned by SSS and PS table for Using_Gtid should be same.] # 3) Test for Auto_position= SLAVE_POS diff --git a/mysql-test/suite/rpl/r/rpl_row_find_row_debug.result b/mysql-test/suite/rpl/r/rpl_row_find_row_debug.result index f1a0059a04f..c2bb256a750 100644 --- a/mysql-test/suite/rpl/r/rpl_row_find_row_debug.result +++ b/mysql-test/suite/rpl/r/rpl_row_find_row_debug.result @@ -20,6 +20,5 @@ FOUND 1 /The slave is applying a ROW event on behalf of an UPDATE statement on t FOUND 1 /The slave is applying a ROW event on behalf of a DELETE statement on table t1 and is currently taking a considerable amount/ in mysqld.2.err include/stop_slave.inc SET @@GLOBAL.debug_dbug = @saved_dbug; -SET GLOBAL log_warnings = 2; include/start_slave.inc include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result b/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result new file mode 100644 index 00000000000..08f601447d5 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result @@ -0,0 +1,32 @@ +include/master-slave.inc +[connection master] +connection master; +call mtr.add_suppression("Got an error reading communication packets"); +set @save_bgc_count= @@global.binlog_commit_wait_count; +set @save_bgc_usec= @@global.binlog_commit_wait_usec; +set @save_debug_dbug= @@global.debug_dbug; +set @@global.binlog_commit_wait_count=3; +set @@global.binlog_commit_wait_usec=10000000; +set @@global.debug_dbug="+d,testing_cond_var_per_thd"; +# Ensure semi-sync is on +connection slave; +connection master; +# Create three transactions to binlog group commit together +connection master; +create table t1 (a int); +connection server_1; +create table t2 (a int); +connection default; +create table t3 (a int); +connection master; +connection server_1; +connection default; +include/assert_grep.inc [Check that there is no 'Thread awaiting semi-sync ACK was awoken before its ACK' warning in error log.] +# +# Cleanup +connection master; +set @@global.binlog_commit_wait_count=@save_bgc_count; +set @@global.binlog_commit_wait_usec=@save_bgc_usec; +set @@global.debug_dbug=@save_debug_dbug; +drop table t1, t2, t3; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_fail_over.result b/mysql-test/suite/rpl/r/rpl_semi_sync_fail_over.result index 1c94c239fc6..ca7c802bfbe 100644 --- a/mysql-test/suite/rpl/r/rpl_semi_sync_fail_over.result +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_fail_over.result @@ -50,6 +50,8 @@ SELECT @@GLOBAL.gtid_current_pos; 0-1-4 # restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1 connection server_1; +# Ensuring variable rpl_semi_sync_slave_enabled is ON.. +# Ensuring status rpl_semi_sync_slave_status is OFF.. include/assert.inc [Table t1 should have 1 rows.] FOUND 1 /truncated binlog file:.*master.*000001/ in mysqld.1.err disconnect conn_client; @@ -128,6 +130,8 @@ SELECT @@GLOBAL.gtid_current_pos; 0-2-7 # restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1 connection server_2; +# Ensuring variable rpl_semi_sync_slave_enabled is ON.. +# Ensuring status rpl_semi_sync_slave_status is OFF.. include/assert.inc [Table t1 should have 3 rows.] FOUND 1 /truncated binlog file:.*slave.*000002.* to remove transactions starting from GTID 0-1-6/ in mysqld.2.err disconnect conn_client; @@ -207,6 +211,8 @@ SELECT @@GLOBAL.gtid_current_pos; 0-1-9 # restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1 connection server_1; +# Ensuring variable rpl_semi_sync_slave_enabled is ON.. +# Ensuring status rpl_semi_sync_slave_status is OFF.. include/assert.inc [Table t1 should have 6 rows.] FOUND 1 /truncated binlog file:.*master.*000002.* to remove transactions starting from GTID 0-1-9/ in mysqld.1.err disconnect conn_client; diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_shutdown_await_ack.result b/mysql-test/suite/rpl/r/rpl_semi_sync_shutdown_await_ack.result index ebd67c947f0..a8e158a25e4 100644 --- a/mysql-test/suite/rpl/r/rpl_semi_sync_shutdown_await_ack.result +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_shutdown_await_ack.result @@ -25,7 +25,7 @@ call mtr.add_suppression("Failed to kill the active semi-sync connection"); set @sav_enabled_server_3= @@GLOBAL.rpl_semi_sync_slave_enabled; set @sav_server_3_dbug= @@GLOBAL.debug_dbug; connection server_1; -CREATE TABLE t1 (a int); +CREATE TABLE t1 (a int) engine=innodb; connection server_2; connection server_3; connect server_1_con2, localhost, root,,; @@ -34,8 +34,8 @@ connect server_1_con2, localhost, root,,; ############################# # # Test Case 1) If both replicas simulate a delay that is within the -# allowed timeout, the primary should delay killing the suspended thread -# until an ACK is received (Rpl_semi_sync_master_yes_tx should be 1). +# allowed timeout, the primary should delay killing the Ack_thread +# until an ACK is received. # connection server_1; #-- @@ -78,8 +78,6 @@ SET @@GLOBAL.debug_dbug= "+d,simulate_delay_semisync_slave_reply"; #-- #-- Test begins connection server_1_con2; -#-- Give enough time after timeout/ack received to query yes_tx/no_tx -SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait"; connection server_1; #-- Begin semi-sync transaction INSERT INTO t1 VALUES (1); @@ -88,14 +86,7 @@ connection server_1_con2; #-- Begin master shutdown SHUTDOWN WAIT FOR ALL SLAVES; connection server_1; -#-- Ensure either ACK was received (yes_tx=1) or timeout (no_tx=1) -show status like 'Rpl_semi_sync_master_yes_tx'; -Variable_name Value -Rpl_semi_sync_master_yes_tx 1 -show status like 'Rpl_semi_sync_master_no_tx'; -Variable_name Value -Rpl_semi_sync_master_no_tx 0 -connection server_1_con2; +ERROR HY000: Lost connection to server during query # Check logs to ensure shutdown was delayed FOUND 1 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err # Validate slave data is in correct state @@ -144,8 +135,8 @@ COUNT(*)=0 1 # # Test Case 2) If both replicas simulate an error before sending an ACK, -# the primary should delay killing the suspended thread until the -# timeout is reached (Rpl_semi_sync_master_no_tx should be 1). +# the primary should delay killing the Ack_thread until the +# timeout is reached. # connection server_1; #-- @@ -188,8 +179,6 @@ SET @@GLOBAL.debug_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_ #-- #-- Test begins connection server_1_con2; -#-- Give enough time after timeout/ack received to query yes_tx/no_tx -SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait"; connection server_1; #-- Begin semi-sync transaction INSERT INTO t1 VALUES (1); @@ -198,14 +187,7 @@ connection server_1_con2; #-- Begin master shutdown SHUTDOWN WAIT FOR ALL SLAVES; connection server_1; -#-- Ensure either ACK was received (yes_tx=1) or timeout (no_tx=1) -show status like 'Rpl_semi_sync_master_yes_tx'; -Variable_name Value -Rpl_semi_sync_master_yes_tx 0 -show status like 'Rpl_semi_sync_master_no_tx'; -Variable_name Value -Rpl_semi_sync_master_no_tx 1 -connection server_1_con2; +ERROR HY000: Lost connection to server during query # Check logs to ensure shutdown was delayed FOUND 2 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err # Validate slave data is in correct state @@ -267,8 +249,8 @@ COUNT(*)=0 # # Test Case 3) If one replica simulates a delay within the allowed # timeout and the other simulates an error before sending an ACK, the -# primary should delay killing the suspended thread until it receives an -# ACK from the delayed slave (Rpl_semi_sync_master_yes_tx should be 1). +# primary should delay killing the Ack_thread until it receives an +# ACK from the delayed slave. # connection server_1; #-- @@ -311,8 +293,6 @@ SET @@GLOBAL.debug_dbug= "+d,simulate_delay_semisync_slave_reply"; #-- #-- Test begins connection server_1_con2; -#-- Give enough time after timeout/ack received to query yes_tx/no_tx -SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait"; connection server_1; #-- Begin semi-sync transaction INSERT INTO t1 VALUES (1); @@ -321,14 +301,7 @@ connection server_1_con2; #-- Begin master shutdown SHUTDOWN WAIT FOR ALL SLAVES; connection server_1; -#-- Ensure either ACK was received (yes_tx=1) or timeout (no_tx=1) -show status like 'Rpl_semi_sync_master_yes_tx'; -Variable_name Value -Rpl_semi_sync_master_yes_tx 1 -show status like 'Rpl_semi_sync_master_no_tx'; -Variable_name Value -Rpl_semi_sync_master_no_tx 0 -connection server_1_con2; +ERROR HY000: Lost connection to server during query # Check logs to ensure shutdown was delayed FOUND 3 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err # Validate slave data is in correct state @@ -391,8 +364,7 @@ COUNT(*)=0 # active semi-sync connection in-tact. The slave should notice this, and # not issue a `QUIT` command to the primary, which would otherwise be # sent to kill an active connection. This test case validates that the -# slave does not send a `QUIT` in this case (Rpl_semi_sync_master_yes_tx -# should be 1 because server_3 will send the ACK within a valid timeout). +# slave does not send a `QUIT` in this case. # connection server_1; #-- @@ -435,8 +407,6 @@ SET @@GLOBAL.debug_dbug= "+d,simulate_delay_semisync_slave_reply"; #-- #-- Test begins connection server_1_con2; -#-- Give enough time after timeout/ack received to query yes_tx/no_tx -SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait"; connection server_1; #-- Begin semi-sync transaction INSERT INTO t1 VALUES (1); @@ -445,14 +415,7 @@ connection server_1_con2; #-- Begin master shutdown SHUTDOWN WAIT FOR ALL SLAVES; connection server_1; -#-- Ensure either ACK was received (yes_tx=1) or timeout (no_tx=1) -show status like 'Rpl_semi_sync_master_yes_tx'; -Variable_name Value -Rpl_semi_sync_master_yes_tx 1 -show status like 'Rpl_semi_sync_master_no_tx'; -Variable_name Value -Rpl_semi_sync_master_no_tx 0 -connection server_1_con2; +ERROR HY000: Lost connection to server during query # Check logs to ensure shutdown was delayed FOUND 4 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err # Validate slave data is in correct state @@ -506,16 +469,60 @@ Rpl_semi_sync_slave_status OFF SELECT COUNT(*)=0 from t1; COUNT(*)=0 1 +# +# Test Case 5) If a waiting-for-ACK user thread is killed (disconnected) +# during SHUTDOWN WAIT FOR ALL SLAVES, ensure the primary will still +# await the ACK from the replica before killing the Ack_receiver thread +# +connection server_1; +insert into t1 values (1); +include/save_master_gtid.inc +connection server_2; +include/sync_with_master_gtid.inc +include/stop_slave.inc +SET GLOBAL rpl_semi_sync_slave_enabled= 1; +include/start_slave.inc +connection server_1; +SET GLOBAL rpl_semi_sync_master_enabled= 1; +SET GLOBAL rpl_semi_sync_master_timeout= 2000; +show status like 'Rpl_semi_sync_master_status'; +Variable_name Value +Rpl_semi_sync_master_status ON +show status like 'Rpl_semi_sync_master_clients'; +Variable_name Value +Rpl_semi_sync_master_clients 1 +connection server_2; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,simulate_delay_semisync_slave_reply"; +connect con1, localhost, root,,; +connect con2, localhost, root,,; +connection con1; +insert into t1 values (2); +connection server_1; +# Wait for thd to begin semi-sync wait.. +# ..done +disconnect con1; +connection default; +connection con2; +SHUTDOWN WAIT FOR ALL SLAVES; +connection server_2; +include/assert_grep.inc [Ensure the primary waited for the ACK of the killed thread] +connection default; +connection server_1; +connection server_2; +include/stop_slave.inc +connection server_3; +include/stop_slave.inc +connection default; +connection server_1; ############################# # Cleanup ############################# connection server_2; -include/stop_slave.inc SET @@GLOBAL.rpl_semi_sync_slave_enabled = @sav_enabled_server_2; SET @@GLOBAL.debug_dbug= @sav_server_2_dbug; include/start_slave.inc connection server_3; -include/stop_slave.inc SET @@GLOBAL.rpl_semi_sync_slave_enabled = @sav_enabled_server_3; SET @@GLOBAL.debug_dbug= @sav_server_3_dbug; include/start_slave.inc diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_slave_enabled_consistent.result b/mysql-test/suite/rpl/r/rpl_semi_sync_slave_enabled_consistent.result index 99c3124957f..4195acb931d 100644 --- a/mysql-test/suite/rpl/r/rpl_semi_sync_slave_enabled_consistent.result +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_slave_enabled_consistent.result @@ -2,6 +2,9 @@ include/master-slave.inc [connection master] call mtr.add_suppression("Replication event checksum verification failed"); call mtr.add_suppression("could not queue event from master"); +call mtr.add_suppression("Semisync ack receiver.*error reading communication packets"); +call mtr.add_suppression("Semisync ack receiver got hangup"); +connection slave; # # Set up a semisync connection connection master; diff --git a/mysql-test/suite/rpl/r/rpl_show_slave_status.result b/mysql-test/suite/rpl/r/rpl_show_slave_status.result new file mode 100644 index 00000000000..e32b2f554ce --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_show_slave_status.result @@ -0,0 +1,75 @@ +include/master-slave.inc +[connection master] +* +* The purpose of this test is to prevent incorrect additions to SHOW +* SLAVE STATUS, which has happened several times in the past. +* +* We must never, _ever_, add extra rows to this output of SHOW SLAVE +* STATUS, except at the very end, as this breaks backwards compatibility +* with applications or scripts that parse the output. This also means that +* we cannot add _any_ new rows in a GA version if a different row was +* already added in a later MariaDB version, as this would make it impossible +* to merge the change up while preserving the order of rows. +* +connection slave; +SHOW SLAVE STATUS; +Slave_IO_State # +Master_Host # +Master_User # +Master_Port # +Connect_Retry # +Master_Log_File # +Read_Master_Log_Pos # +Relay_Log_File # +Relay_Log_Pos # +Relay_Master_Log_File # +Slave_IO_Running # +Slave_SQL_Running # +Replicate_Do_DB # +Replicate_Ignore_DB # +Replicate_Do_Table # +Replicate_Ignore_Table # +Replicate_Wild_Do_Table # +Replicate_Wild_Ignore_Table # +Last_Errno # +Last_Error # +Skip_Counter # +Exec_Master_Log_Pos # +Relay_Log_Space # +Until_Condition # +Until_Log_File # +Until_Log_Pos # +Master_SSL_Allowed # +Master_SSL_CA_File # +Master_SSL_CA_Path # +Master_SSL_Cert # +Master_SSL_Cipher # +Master_SSL_Key # +Seconds_Behind_Master # +Master_SSL_Verify_Server_Cert # +Last_IO_Errno # +Last_IO_Error # +Last_SQL_Errno # +Last_SQL_Error # +Replicate_Ignore_Server_Ids # +Master_Server_Id # +Master_SSL_Crl # +Master_SSL_Crlpath # +Using_Gtid # +Gtid_IO_Pos # +Replicate_Do_Domain_Ids # +Replicate_Ignore_Domain_Ids # +Parallel_Mode # +SQL_Delay # +SQL_Remaining_Delay # +Slave_SQL_Running_State # +Slave_DDL_Groups # +Slave_Non_Transactional_Groups # +Slave_Transactional_Groups # +Replicate_Rewrite_DB # +* +* When modifying this test after adding a column to SHOW SLAVE STATUS, +* _only_ additions at the end are allowed, the column number of existing +* columns must _not_ change! +* +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_start_alter_options.result b/mysql-test/suite/rpl/r/rpl_start_alter_options.result index 4c6135aaf5e..30854b12be1 100644 --- a/mysql-test/suite/rpl/r/rpl_start_alter_options.result +++ b/mysql-test/suite/rpl/r/rpl_start_alter_options.result @@ -3,8 +3,6 @@ include/master-slave.inc connection slave; stop slave; change master to master_use_gtid= current_pos; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead SET GLOBAL slave_parallel_threads=4; set global slave_parallel_mode=optimistic; set global gtid_strict_mode=1; @@ -96,8 +94,6 @@ include/start_slave.inc connection slave; stop slave; change master to master_use_gtid= current_pos; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead SET GLOBAL slave_parallel_threads=4; set global slave_parallel_mode=optimistic; set global gtid_strict_mode=1; @@ -189,8 +185,6 @@ include/start_slave.inc connection slave; stop slave; change master to master_use_gtid= current_pos; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead SET GLOBAL slave_parallel_threads=4; set global slave_parallel_mode=optimistic; set global gtid_strict_mode=1; @@ -321,8 +315,6 @@ include/start_slave.inc connection slave; stop slave; change master to master_use_gtid= current_pos; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead SET GLOBAL slave_parallel_threads=4; set global slave_parallel_mode=optimistic; set global gtid_strict_mode=1; @@ -414,8 +406,6 @@ include/start_slave.inc connection slave; stop slave; change master to master_use_gtid= current_pos; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead SET GLOBAL slave_parallel_threads=4; set global slave_parallel_mode=optimistic; set global gtid_strict_mode=1; diff --git a/mysql-test/suite/rpl/r/rpl_using_gtid_default.result b/mysql-test/suite/rpl/r/rpl_using_gtid_default.result index 3bdc9d47fbb..24a316de1bd 100644 --- a/mysql-test/suite/rpl/r/rpl_using_gtid_default.result +++ b/mysql-test/suite/rpl/r/rpl_using_gtid_default.result @@ -58,8 +58,6 @@ include/start_slave.inc # to its default of Slave_Pos after RESET SLAVE. include/stop_slave.inc CHANGE MASTER TO MASTER_USE_GTID=Current_Pos; -Warnings: -Warning 1681 'master_use_gtid=current_pos' is deprecated and will be removed in a future release. Please use master_demote_to_slave=1 instead include/start_slave.inc include/stop_slave.inc RESET SLAVE; diff --git a/mysql-test/suite/rpl/r/show_status_stop_slave_race-7126.result b/mysql-test/suite/rpl/r/show_status_stop_slave_race-7126.result index 999d9417b3f..35cb3fcdc13 100644 --- a/mysql-test/suite/rpl/r/show_status_stop_slave_race-7126.result +++ b/mysql-test/suite/rpl/r/show_status_stop_slave_race-7126.result @@ -1,6 +1,5 @@ include/master-slave.inc [connection master] -call mtr.add_suppression("Master is configured to log replication events"); connection slave; connection slave; include/wait_for_slave_to_stop.inc diff --git a/mysql-test/suite/rpl/t/rpl_auditing.test b/mysql-test/suite/rpl/t/rpl_auditing.test new file mode 100644 index 00000000000..3d3cee9a7a0 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_auditing.test @@ -0,0 +1,77 @@ +if (!$SERVER_AUDIT2_SO) { + skip No SERVER_AUDIT2 plugin; +} + +source include/master-slave.inc; + +--disable_warnings +drop table if exists t1; +sync_slave_with_master; +reset master; +--enable_warnings + +--disable_warnings +CREATE TABLE IF NOT EXISTS mysql.server_audit_filters ( + filtername char(80) COLLATE utf8_bin NOT NULL DEFAULT '', + rule longtext CHARACTER SET utf8mb4 COLLATE utf8mb4_bin NOT NULL DEFAULT 'true' CHECK (json_valid(rule)), + CONSTRAINT c_filtername UNIQUE (filtername) +) ENGINE=Aria; + +CREATE TABLE IF NOT EXISTS mysql.server_audit_users (host char(60) COLLATE utf8_bin NOT NULL DEFAULT '', + user char(80) COLLATE utf8_bin NOT NULL DEFAULT '', + filtername char(80) NOT NULL DEFAULT '', + CONSTRAINT c_host_user UNIQUE (host, user) +) ENGINE=Aria; +--enable_warnings + +INSERT INTO mysql.server_audit_filters VALUES ('ignore_sys', '{"ignore_tables" : "mysql.*"}'); +INSERT INTO mysql.server_audit_users VALUES ('%','','ignore_sys'); +INSERT INTO mysql.server_audit_users VALUES ('%','root','ignore_sys'); + +install plugin server_audit soname 'server_audit2'; +set global server_audit_logging=on; + +# this is done to make test deterministic +# so the above 'set' command is always logged before the 'create table t1' +-- disable_query_log +-- disable_result_log +select * from mysql.server_audit_filters; +select * from mysql.server_audit_users; +-- enable_result_log +-- enable_query_log + +connection master; +create table t1 (a int); +insert into t1 values (1); +truncate t1; +drop table t1; +sync_slave_with_master; + +set global server_audit_logging=off; + +truncate mysql.server_audit_filters; +truncate mysql.server_audit_users; +INSERT INTO mysql.server_audit_filters VALUES ('no_logging','false'); +INSERT INTO mysql.server_audit_users VALUES ('%','','no_logging'); + +set global server_audit_logging=on; + +connection master; +create table t1 (a int); +insert into t1 values (1); +truncate t1; +drop table t1; +sync_slave_with_master; + +set global server_audit_logging=off; +uninstall plugin server_audit; +truncate mysql.server_audit_filters; +truncate mysql.server_audit_users; +let $MYSQLD_DATADIR= `SELECT @@datadir`; +# replace the timestamp and the hostname with constant values +--replace_regex /[0-9]* [0-9][0-9]:[0-9][0-9]:[0-9][0-9]\,[^,]*\,/TIME,HOSTNAME,/ /\,[1-9][0-9]*\,/,1,/ /\,[1-9][0-9]*/,ID/ /000001\\', [0-9]*,/#', POS,/ +cat_file $MYSQLD_DATADIR/server_audit.log; +remove_file $MYSQLD_DATADIR/server_audit.log; + +connection master; +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_binlog_dump_slave_gtid_state_info.test b/mysql-test/suite/rpl/t/rpl_binlog_dump_slave_gtid_state_info.test index 02b31c065f9..4f0eafc4020 100644 --- a/mysql-test/suite/rpl/t/rpl_binlog_dump_slave_gtid_state_info.test +++ b/mysql-test/suite/rpl/t/rpl_binlog_dump_slave_gtid_state_info.test @@ -39,6 +39,7 @@ --source include/master-slave.inc --connection master +SET @org_log_warnings=@@GLOBAL.LOG_WARNINGS; SET GLOBAL LOG_WARNINGS=2; --connection slave @@ -110,6 +111,7 @@ CHANGE MASTER TO MASTER_USE_GTID=slave_pos; --source include/wait_for_pattern_in_file.inc --echo "===== Clean up =====" +SET GLOBAL LOG_WARNINGS=@org_log_warnings; --connection slave --source include/stop_slave.inc CHANGE MASTER TO MASTER_USE_GTID=no; @@ -117,5 +119,4 @@ CHANGE MASTER TO MASTER_USE_GTID=no; --connection master DROP TABLE t; -SET GLOBAL LOG_WARNINGS=default; --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_change_master.test b/mysql-test/suite/rpl/t/rpl_change_master.test index 2758f9d6e27..992e23906e5 100644 --- a/mysql-test/suite/rpl/t/rpl_change_master.test +++ b/mysql-test/suite/rpl/t/rpl_change_master.test @@ -109,9 +109,4 @@ CHANGE MASTER TO MASTER_USER='root', MASTER_SSL=0, MASTER_SSL_CA='', MASTER_SSL_ CHANGE MASTER TO MASTER_USER='root', MASTER_PASSWORD='', MASTER_SSL=0; -# MDEV-20122: Deprecate MASTER_USE_GTID=Current_Pos to favor new MASTER_DEMOTE_TO_SLAVE option ---echo "Usage of CURRENT_POS in CHANGE MASTER MASTER_USE_GTID is dreprecated. -CHANGE MASTER TO MASTER_USE_GTID=CURRENT_POS; -CHANGE MASTER TO MASTER_USE_GTID=SLAVE_POS; - --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_domain_id_filter_master_crash.test b/mysql-test/suite/rpl/t/rpl_domain_id_filter_master_crash.test index cdfdc098f5a..2ce273860fe 100644 --- a/mysql-test/suite/rpl/t/rpl_domain_id_filter_master_crash.test +++ b/mysql-test/suite/rpl/t/rpl_domain_id_filter_master_crash.test @@ -6,9 +6,12 @@ connection master; +--disable_query_log call mtr.add_suppression("mysqld: Table '.*gtid_slave_pos' is marked as crashed and should be repaired"); call mtr.add_suppression("Checking table: './mysql/gtid_slave_pos'"); call mtr.add_suppression("mysql.gtid_slave_pos: 1 client is using or hasn't closed the table properly"); +call mtr.add_suppression("Could not read packet:.* errno: 11"); +--enable_query_log SET @@session.gtid_domain_id= 0; create table ti (a int auto_increment primary key) engine=innodb; diff --git a/mysql-test/suite/rpl/t/rpl_dump_request_retry_warning.test b/mysql-test/suite/rpl/t/rpl_dump_request_retry_warning.test index 1ee043623ae..633071a89b3 100644 --- a/mysql-test/suite/rpl/t/rpl_dump_request_retry_warning.test +++ b/mysql-test/suite/rpl/t/rpl_dump_request_retry_warning.test @@ -30,6 +30,10 @@ --let $rpl_skip_start_slave=1 --source include/master-slave.inc +--disable_query_log +call mtr.add_suppression("Could not read packet:.* errno: 11"); +--enable_query_log + # Do an insert on master CREATE TABLE t1(a int); INSERT INTO t1 VALUES(1); diff --git a/mysql-test/suite/rpl/t/rpl_get_lock.test b/mysql-test/suite/rpl/t/rpl_get_lock.test index b5c08858055..c6f2f6ec83b 100644 --- a/mysql-test/suite/rpl/t/rpl_get_lock.test +++ b/mysql-test/suite/rpl/t/rpl_get_lock.test @@ -1,6 +1,17 @@ source include/master-slave.inc; +--disable_query_log CALL mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); +call mtr.add_suppression("Could not read packet:.* errno: 11 "); +# The following one comes from calling dirty_close on client side +call mtr.add_suppression("Could not read packet:.* errno: 2 "); +call mtr.add_suppression("Could not read packet:.* errno: 35 "); +--enable_query_log + +let $org_log_warnings=`select @@global.log_warnings`; + +# Test extended warnings +SET GLOBAL LOG_WARNINGS=4; create table t1(n int); # Use of get_lock gives a warning for unsafeness if binlog_format=statement @@ -41,6 +52,10 @@ connection master1; drop table t1; sync_slave_with_master; +connection default; +--disable_query_log +--eval SET GLOBAL LOG_WARNINGS=$org_log_warnings; +--enable_query_log --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_gtid_crash.test b/mysql-test/suite/rpl/t/rpl_gtid_crash.test index 283298318be..e1a57f4b725 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_crash.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_crash.test @@ -12,6 +12,7 @@ call mtr.add_suppression("Checking table:"); call mtr.add_suppression("client is using or hasn't closed the table properly"); call mtr.add_suppression("Table .* is marked as crashed and should be repaired"); +call mtr.add_suppression("Could not read packet:.* errno: 11"); flush tables; ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB; diff --git a/mysql-test/suite/rpl/t/rpl_gtid_crash_myisam.test b/mysql-test/suite/rpl/t/rpl_gtid_crash_myisam.test index faf388f5bed..e113e17b7ec 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_crash_myisam.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_crash_myisam.test @@ -5,6 +5,10 @@ --let $rpl_topology=1->2 --source include/rpl_init.inc +--disable_query_log +call mtr.add_suppression("Could not read packet:.* errno: 11"); +--enable_query_log + --echo *** Test crashing master with InnoDB disabled, the binlog gtid state should still be correctly recovered. *** --connection server_1 diff --git a/mysql-test/suite/rpl/t/rpl_gtid_grouping.test b/mysql-test/suite/rpl/t/rpl_gtid_grouping.test index 66448c4f96c..bb1057a6e4f 100644 --- a/mysql-test/suite/rpl/t/rpl_gtid_grouping.test +++ b/mysql-test/suite/rpl/t/rpl_gtid_grouping.test @@ -93,5 +93,4 @@ CHANGE MASTER TO MASTER_USE_GTID=no; --connection master DROP TABLE t; -SET GLOBAL LOG_WARNINGS=default; --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_heartbeat_debug.test b/mysql-test/suite/rpl/t/rpl_heartbeat_debug.test index bd66a249ada..e593786655b 100644 --- a/mysql-test/suite/rpl/t/rpl_heartbeat_debug.test +++ b/mysql-test/suite/rpl/t/rpl_heartbeat_debug.test @@ -3,6 +3,12 @@ --source include/have_debug.inc --source include/master-slave.inc +--disable_query_log +CALL mtr.add_suppression('SET @master_heartbeat_period to master failed with error'); +CALL mtr.add_suppression('Master command COM_REGISTER_SLAVE failed: failed registering on master, reconnecting to try again'); +call mtr.add_suppression("Could not read packet:.* errno: 11"); +--enable_query_log + connection slave; --source include/stop_slave.inc set @restore_slave_net_timeout= @@global.slave_net_timeout; @@ -14,14 +20,13 @@ set @@global.slave_net_timeout= 10; ### Checking the range ### -# + # default period slave_net_timeout/2 # --query_vertical show status like 'Slave_heartbeat_period'; SET @saved_dbug= @@GLOBAL.debug_dbug; SET GLOBAL debug_dbug="+d,simulate_slave_heartbeat_network_error"; -CALL mtr.add_suppression('SET @master_heartbeat_period to master failed with error'); -CALL mtr.add_suppression('Master command COM_REGISTER_SLAVE failed: failed registering on master, reconnecting to try again'); + --source include/start_slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_packet.test b/mysql-test/suite/rpl/t/rpl_packet.test index cbde486bcbb..f1814e61f9b 100644 --- a/mysql-test/suite/rpl/t/rpl_packet.test +++ b/mysql-test/suite/rpl/t/rpl_packet.test @@ -20,6 +20,9 @@ source include/master-slave.inc; call mtr.add_suppression("Slave I/O: Got a packet bigger than 'slave_max_allowed_packet' bytes, .*error.* 1153"); call mtr.add_suppression("Log entry on master is longer than slave_max_allowed_packet"); +call mtr.add_suppression("Could not write packet:"); +call mtr.add_suppression("Got a packet bigger than 'max_allowed_packet' bytes"); + let $db= DB_NAME_OF_MAX_LENGTH_AKA_NAME_LEN_64_BYTES_____________________; disable_warnings; eval drop database if exists $db; diff --git a/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test b/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test index f5e48282326..09da8c2df15 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_optimistic.test @@ -508,6 +508,7 @@ DELETE FROM t2; # The 1st of the following two trx:s a blocker on slave --connection server_2 +SET @org_log_warnings=@@GLOBAL.LOG_WARNINGS; set global log_warnings=2; BEGIN; INSERT INTO t1 SET a=1; @@ -555,7 +556,7 @@ DELETE FROM t2; # --connection server_2 --source include/stop_slave.inc -set global log_warnings=default; +set global log_warnings=@org_log_warnings; SET GLOBAL slave_parallel_mode=@old_parallel_mode; SET GLOBAL slave_parallel_threads=@old_parallel_threads; --source include/start_slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_optimistic_xa.test b/mysql-test/suite/rpl/t/rpl_parallel_optimistic_xa.test index 35c22d1e92e..6e5cf4367a7 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_optimistic_xa.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_optimistic_xa.test @@ -206,7 +206,6 @@ while($i > 0) # --connection slave --source include/stop_slave.inc -set global log_warnings=default; SET GLOBAL slave_parallel_mode=@old_parallel_mode; SET GLOBAL slave_parallel_threads=@old_parallel_threads; --source include/start_slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_retry.test b/mysql-test/suite/rpl/t/rpl_parallel_retry.test index fe6f40d2c85..1e87c85cd6c 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_retry.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_retry.test @@ -410,6 +410,44 @@ DROP function foo; --sync_slave_with_master server_2 +# +# MDEV-33303: slave_parallel_mode=optimistic should not report the mode's +# specific temporary errors. +# + +--connection server_2 +--source include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET GLOBAL slave_parallel_threads=4; + +--connection server_1 +# The problem occurred in the code path for row-based updates in tables +# with no primary/unique key, where a scan is needed. +CREATE TABLE t1 (a INT, b VARCHAR(123)) ENGINE=InnoDB; +INSERT INTO t1 VALUES(1, 'asdf'); +UPDATE t1 SET b='zxf1' WHERE a=1; +UPDATE t1 SET b='\n' WHERE a=1; + +--connection server_2 +# Inject a small sleep in the code that makes the race easier to hit. +SET @old_dbug=@@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,write_row_inject_sleep_before_ha_write_row"; +--source include/start_slave.inc + +--connection server_1 +# Here, we would get errors in the slave's error log: +# [ERROR] mariadbd: Can't find record in 't1' +--sync_slave_with_master server_2 + +--connection server_1 +DROP TABLE t1; +--sync_slave_with_master server_2 +--source include/stop_slave.inc +SET GLOBAL debug_dbug=@old_dbug; +SET GLOBAL slave_parallel_threads=@old_parallel_threads; +--source include/start_slave.inc + + # # MDEV-12746 rpl.rpl_parallel_optimistic_nobinlog fails committing out of order at retry # diff --git a/mysql-test/suite/rpl/t/rpl_parallel_sbm.test b/mysql-test/suite/rpl/t/rpl_parallel_sbm.test index 58c0db15e47..9c502ff6cb9 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_sbm.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_sbm.test @@ -67,6 +67,13 @@ if (`SELECT $sbm_trx1_arrive > ($seconds_since_idling + 1)`) --echo # if the event is still to be delayed, SBM should resume accordingly --source include/stop_slave.inc + +--echo # Lock t1 on slave to ensure the event can't finish (and thereby update +--echo # Seconds_Behind_Master) so slow running servers don't accidentally +--echo # catch up to the master before checking SBM. +--connection server_2 +LOCK TABLES t1 WRITE; + --source include/start_slave.inc --connection slave @@ -86,6 +93,9 @@ if (`SELECT $sbm_trx1_after_1s_sleep <= $sbm_trx1_arrive`) } --echo # ..done +--connection server_2 +UNLOCK TABLES; + --source include/sync_with_master_gtid.inc --echo # diff --git a/mysql-test/suite/rpl/t/rpl_parallel_stop_slave.test b/mysql-test/suite/rpl/t/rpl_parallel_stop_slave.test index 35879e98e66..1610292ecb7 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_stop_slave.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_stop_slave.test @@ -57,6 +57,7 @@ COMMIT; INSERT INTO t3 VALUES(21, 21); INSERT INTO t3 VALUES(22, 22); --save_master_pos +--let $master_pos= query_get_value(SHOW MASTER STATUS, Position, 1) # Start a connection that will block the replicated transaction halfway. --connection con_temp1 @@ -64,7 +65,27 @@ BEGIN; INSERT INTO t2 VALUES (21); --connection server_2 -START SLAVE; + +# +# Parallel replication will complete any in-progress event group at STOP SLAVE, +# but only if the event group is already queued up for the worker thread. If +# the SQL driver thread is delayed in queueing up events, the parallel worker +# thread can abort the event group, leaving the non-transactional update to the +# MyISAM table that cannot be rolled back (MDEV-7432). If this happens the test +# would fail with duplicate key error after slave restart. +# +# To avoid this, we here wait for the IO thread to read all master events, and +# for the SQL driver thread to queue all the events for workers. This wait +# should be removed if/when MDEV-7432 is fixed. +# +START SLAVE IO_THREAD; +--let $slave_param= Read_Master_Log_Pos +--let $slave_param_value= $master_pos +--source include/wait_for_slave_param.inc +START SLAVE SQL_THREAD; +--let $wait_condition= SELECT COUNT(*)=1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE State LIKE '%Slave has read all relay log; waiting for more updates%' +--source include/wait_condition.inc + # Wait for the MyISAM change to be visible, after which replication will wait # for con_temp1 to roll back. --let $wait_condition= SELECT COUNT(*) = 1 FROM t1 WHERE a=20 diff --git a/mysql-test/suite/rpl/t/rpl_parallel_temptable.test b/mysql-test/suite/rpl/t/rpl_parallel_temptable.test index 3684763dad7..8bab4af2b43 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_temptable.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_temptable.test @@ -264,6 +264,30 @@ SET GLOBAL slave_parallel_mode=@old_mode; --source include/start_slave.inc +--echo *** MDEV33426: Memory allocation accounting incorrect for replicated temptable +--connection server_1 +CREATE TEMPORARY TABLE t5 (a int) ENGINE=Aria; +CREATE TEMPORARY TABLE t6 (a int) ENGINE=Heap; +INSERT INTO t5 VALUES (1); +INSERT INTO t6 VALUES (2); +--save_master_pos + +--connection server_2 +--sync_with_master +--source include/stop_slave.inc + +--connection server_1 +INSERT INTO t1 SELECT a+40, 5 FROM t5; +INSERT INTO t1 SELECT a+40, 6 FROM t6; +DROP TABLE t5, t6; + +--save_master_pos + +--connection server_2 +--source include/start_slave.inc +--sync_with_master +SELECT * FROM t1 WHERE a>=40 ORDER BY a; + # Clean up. --connection server_2 diff --git a/mysql-test/suite/rpl/t/rpl_row_find_row_debug.test b/mysql-test/suite/rpl/t/rpl_row_find_row_debug.test index e3edabe239d..f06b2982ec5 100644 --- a/mysql-test/suite/rpl/t/rpl_row_find_row_debug.test +++ b/mysql-test/suite/rpl/t/rpl_row_find_row_debug.test @@ -51,7 +51,9 @@ DROP TABLE t1; # cleanup --source include/stop_slave.inc SET @@GLOBAL.debug_dbug = @saved_dbug; +--disable_query_log --eval SET GLOBAL log_warnings = $log_warnings_save +--enable_query_log --source include/start_slave.inc --source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.cnf b/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.cnf new file mode 100644 index 00000000000..e8e03e71ec8 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.cnf @@ -0,0 +1,10 @@ +!include ../my.cnf + +[mysqld.1] +log-warnings=9 +rpl_semi_sync_master_enabled=1 +rpl_semi_sync_master_wait_point=AFTER_COMMIT + +[mysqld.2] +log-warnings=9 +rpl_semi_sync_slave_enabled=1 diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test b/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test new file mode 100644 index 00000000000..65b6e8b4f7b --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test @@ -0,0 +1,77 @@ +# +# This test ensures that, when using semi-sync with the wait_point +# AFTER_COMMIT, each thread awaiting an ACK is only woken up when its ACK (or +# an ACK for a later commit in binlog) has been received from the slave. +# +# Prior to MDEV-33551, all threads would be woken up for each ACK received, +# leading to large slowdowns, as each thread would check if the ACK was for it +# in mutual exclusion from the others. +# +# To ensure this, a debug-build-only log warning is added into +# Repl_semi_sync_master::commit_trx() at wakeup time, which will complain if +# the awoken thread's binlog wait coordinates are after the coordinate of the +# last ACK coordinates. Then, we use binlog group commit to commit a series of +# transactions, such that each will await an ACK concurrently. After all +# transactions have been finished (i.e. ACKed and committed), we check the log +# for the expected absence of the added debug warning message. +# +# +# References: +# MDEV-33551: Semi-sync Wait Point AFTER_COMMIT Slow on Workloads with Heavy +# Concurrency +# +--source include/have_binlog_format_row.inc +--source include/have_debug.inc +--source include/master-slave.inc + +--connection master +call mtr.add_suppression("Got an error reading communication packets"); +set @save_bgc_count= @@global.binlog_commit_wait_count; +set @save_bgc_usec= @@global.binlog_commit_wait_usec; +set @save_debug_dbug= @@global.debug_dbug; +set @@global.binlog_commit_wait_count=3; +set @@global.binlog_commit_wait_usec=10000000; +set @@global.debug_dbug="+d,testing_cond_var_per_thd"; + +--echo # Ensure semi-sync is on +--connection slave +let $status_var= rpl_semi_sync_slave_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; + +--connection master +let $status_var= rpl_semi_sync_master_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; + +--echo # Create three transactions to binlog group commit together +--connection master +--send create table t1 (a int) +--connection server_1 +--send create table t2 (a int) +--connection default +--send create table t3 (a int) + +--connection master +--reap +--connection server_1 +--reap +--connection default +--reap + +--let $assert_text= Check that there is no 'Thread awaiting semi-sync ACK was awoken before its ACK' warning in error log. +--let $assert_select=Thread awaiting semi-sync ACK was awoken before its ACK +--let $assert_file= $MYSQLTEST_VARDIR/log/mysqld.1.err +--let $assert_count= 0 +--let $assert_only_after=CURRENT_TEST +--source include/assert_grep.inc + +--echo # +--echo # Cleanup +--connection master +set @@global.binlog_commit_wait_count=@save_bgc_count; +set @@global.binlog_commit_wait_usec=@save_bgc_usec; +set @@global.debug_dbug=@save_debug_dbug; +drop table t1, t2, t3; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_crash.inc b/mysql-test/suite/rpl/t/rpl_semi_sync_crash.inc index 01b0d0e5050..b092d0018b0 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_crash.inc +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_crash.inc @@ -81,6 +81,20 @@ SELECT @@GLOBAL.gtid_current_pos; --enable_reconnect --source include/wait_until_connected_again.inc +--let $slave_semi_sync_enabled= query_get_value(SHOW VARIABLES LIKE 'Rpl_semi_sync_slave_enabled', Value, 1) +--echo # Ensuring variable rpl_semi_sync_slave_enabled is ON.. +if (`SELECT strcmp("ON", "$slave_semi_sync_enabled") != 0`) +{ + --die Slave started with rpl_semi_sync_slave_enabled=1 yet it is OFF in the variable output +} + +--let $slave_semi_sync_status= query_get_value(SHOW STATUS LIKE 'Rpl_semi_sync_slave_status', Value, 1) +--echo # Ensuring status rpl_semi_sync_slave_status is OFF.. +if (`SELECT strcmp("OFF", "$slave_semi_sync_status") != 0`) +{ + --die Slave started with skip-slave-start yet started with rpl_semi_sync_slave_status=ON +} + --let $assert_cond= COUNT(*) = $expected_rows_on_master FROM t1 --let $assert_text= Table t1 should have $expected_rows_on_master rows. --source include/assert.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_fail_over.test b/mysql-test/suite/rpl/t/rpl_semi_sync_fail_over.test index 17d7b50d614..e1e7a4f1de3 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_fail_over.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_fail_over.test @@ -10,6 +10,11 @@ --source include/have_binlog_format_row.inc --source include/master-slave.inc +--disable_query_log +call mtr.add_suppression("Could not read packet:.* errno: 11"); +flush tables; +--enable_query_log + # Initial slave --connection server_2 --source include/stop_slave.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.cnf b/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.cnf index 2cf1b1786bd..5f69b557188 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.cnf +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.cnf @@ -1,13 +1,13 @@ !include ../my.cnf [mysqld.1] -log_warnings=9 +log_warnings=3 [mysqld.2] -log_warnings=9 +log_warnings=3 [mysqld.3] -log_warnings=9 +log_warnings=3 [ENV] SERVER_MYPORT_3= @mysqld.3.port diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.inc b/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.inc index a41ef2756d1..d20ef628327 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.inc +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.inc @@ -64,9 +64,6 @@ show status like 'Rpl_semi_sync_master_clients'; --echo #-- Test begins --connection server_1_con2 ---echo #-- Give enough time after timeout/ack received to query yes_tx/no_tx -SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait"; - --write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect wait EOF @@ -82,15 +79,11 @@ let $status_var_value= 1; source include/wait_for_status_var.inc; --echo #-- Begin master shutdown ---send SHUTDOWN WAIT FOR ALL SLAVES +SHUTDOWN WAIT FOR ALL SLAVES; +--source include/wait_until_disconnected.inc --connection server_1 ---reap ---echo #-- Ensure either ACK was received (yes_tx=1) or timeout (no_tx=1) -show status like 'Rpl_semi_sync_master_yes_tx'; -show status like 'Rpl_semi_sync_master_no_tx'; - ---connection server_1_con2 +--error 2013 --reap --source include/wait_until_disconnected.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.test b/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.test index fca6de4c209..c321f2bf72f 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.test @@ -5,7 +5,7 @@ # This test validates that data is consistent between a primary and replica # in semi-sync mode when the primary is issued `SHUTDOWN WAIT FOR SLAVES` # during an active communication. More specifically, the primary should not -# kill the connection until it is sure a replica has received all binlog +# kill the Ack_thread until it is sure a replica has received all binlog # data, i.e. once the primary receives the ACK. If a primary is issued a # shutdown before receiving an ACK, it should wait until either 1) the ACK is # received, or 2) the configured timeout (rpl_semi_sync_master_timeout) is @@ -15,23 +15,18 @@ # Using a topology consisting of one primary with two replicas, all in # semi-sync mode, we use DEBUG_DBUG to simulate an error or delay on the # replicas during an active communication while the primary is issued -# `SHUTDOWN WAIT FOR SLAVES`. We create four test cases to ensure the primary -# will correctly wait for the communication to finish, and use the semi-sync -# status variables Rpl_semi_sync_master_yes_tx and Rpl_semi_sync_master_no_tx -# to ensure the connection was not prematurely killed due to the shutdown. +# `SHUTDOWN WAIT FOR SLAVES`. We create four test cases to ensure the +# Ack_thread is not prematurely killed due to the shutdown. # Test Case 1) If both replicas simulate a delay that is within the allowed -# timeout, the primary should delay killing the suspended thread -# until an ACK is received (Rpl_semi_sync_master_yes_tx should -# be 1). +# timeout, the primary should delay killing the Ack_thread +# until an ACK is received. # Test Case 2) If both replicas simulate an error before sending an ACK, the -# primary should delay killing the suspended thread until the -# the timeout is reached (Rpl_semi_sync_master_no_tx should be -# 1). +# primary should delay killing the Ack_thread until the +# the timeout is reached. # Test Case 3) If one replica simulates a delay within the allowed timeout # and the other simulates an error before sending an ACK, the -# primary should delay killing the suspended thread until it -# receives an ACK from the delayed slave -# (Rpl_semi_sync_master_yes_tx should be 1). +# primary should delay killing the Ack_thread until it +# receives an ACK from the delayed slave. # Test Case 4) If a replica errors before sending an ACK, it will cause the # IO thread to stop and handle the error. During error handling, # if semi-sync is active, the replica will form a new connection @@ -41,9 +36,11 @@ # slave should notice this, and not issue a `QUIT` command to # the primary, which would otherwise be sent to kill an active # connection. This test case validates that the slave does not -# send a `QUIT` in this case (Rpl_semi_sync_master_yes_tx should -# be 1 because server_3 will send the ACK within a valid -# timeout). +# send a `QUIT` in this case. +# Test Case 5) If a waiting-for-ACK user thread is killed (disconnected) +# during SHUTDOWN WAIT FOR ALL SLAVES, ensure the primary will +# still await the ACK from the replica before killing the +# Ack_thread. # # References: # MDEV-11853: semisync thread can be killed after sync binlog but before ACK @@ -58,6 +55,7 @@ --echo # Note: Simulated slave delay is hardcoded to 800 milliseconds --echo # Note: Simulated master shutdown delay is hardcoded to 500 milliseconds +--source include/have_innodb.inc --source include/have_debug.inc --let $rpl_topology=1->2, 1->3 --source include/rpl_init.inc @@ -90,7 +88,7 @@ set @sav_enabled_server_3= @@GLOBAL.rpl_semi_sync_slave_enabled; set @sav_server_3_dbug= @@GLOBAL.debug_dbug; --connection server_1 -CREATE TABLE t1 (a int); +CREATE TABLE t1 (a int) engine=innodb; --save_master_pos --let i= 2 @@ -112,8 +110,8 @@ while (`SELECT $i <= $slave_last`) --echo # --echo # Test Case 1) If both replicas simulate a delay that is within the ---echo # allowed timeout, the primary should delay killing the suspended thread ---echo # until an ACK is received (Rpl_semi_sync_master_yes_tx should be 1). +--echo # allowed timeout, the primary should delay killing the Ack_thread +--echo # until an ACK is received. --echo # --let server_2_dbug= "+d,simulate_delay_semisync_slave_reply" --let server_3_dbug= "+d,simulate_delay_semisync_slave_reply" @@ -124,8 +122,8 @@ while (`SELECT $i <= $slave_last`) --echo # --echo # Test Case 2) If both replicas simulate an error before sending an ACK, ---echo # the primary should delay killing the suspended thread until the ---echo # timeout is reached (Rpl_semi_sync_master_no_tx should be 1). +--echo # the primary should delay killing the Ack_thread until the +--echo # timeout is reached. --echo # --let server_2_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141" --let server_3_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141" @@ -137,8 +135,8 @@ while (`SELECT $i <= $slave_last`) --echo # --echo # Test Case 3) If one replica simulates a delay within the allowed --echo # timeout and the other simulates an error before sending an ACK, the ---echo # primary should delay killing the suspended thread until it receives an ---echo # ACK from the delayed slave (Rpl_semi_sync_master_yes_tx should be 1). +--echo # primary should delay killing the Ack_thread until it receives an +--echo # ACK from the delayed slave. --echo # --let server_2_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141" --let server_3_dbug= "+d,simulate_delay_semisync_slave_reply" @@ -156,8 +154,7 @@ while (`SELECT $i <= $slave_last`) --echo # active semi-sync connection in-tact. The slave should notice this, and --echo # not issue a `QUIT` command to the primary, which would otherwise be --echo # sent to kill an active connection. This test case validates that the ---echo # slave does not send a `QUIT` in this case (Rpl_semi_sync_master_yes_tx ---echo # should be 1 because server_3 will send the ACK within a valid timeout). +--echo # slave does not send a `QUIT` in this case. --echo # --let server_2_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141" --let server_3_dbug= "+d,simulate_delay_semisync_slave_reply" @@ -166,18 +163,108 @@ while (`SELECT $i <= $slave_last`) --let server_3_expect_row_count= 1 --source rpl_semi_sync_shutdown_await_ack.inc +# +# Added with MDEV-33551 +# +--echo # +--echo # Test Case 5) If a waiting-for-ACK user thread is killed (disconnected) +--echo # during SHUTDOWN WAIT FOR ALL SLAVES, ensure the primary will still +--echo # await the ACK from the replica before killing the Ack_receiver thread +--echo # +--connection server_1 +insert into t1 values (1); +--source include/save_master_gtid.inc + +--connection server_2 +--source include/sync_with_master_gtid.inc +--source include/stop_slave.inc +SET GLOBAL rpl_semi_sync_slave_enabled= 1; +--source include/start_slave.inc + +--connection server_1 +SET GLOBAL rpl_semi_sync_master_enabled= 1; +SET GLOBAL rpl_semi_sync_master_timeout= 2000; + +--let $status_var= Rpl_semi_sync_master_clients +--let $status_var_value= 1 +source include/wait_for_status_var.inc; + +show status like 'Rpl_semi_sync_master_status'; +show status like 'Rpl_semi_sync_master_clients'; + +--connection server_2 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET GLOBAL debug_dbug="+d,simulate_delay_semisync_slave_reply"; + +--connect(con1, localhost, root,,) +--connect(con2, localhost, root,,) + +--connection con1 +--send insert into t1 values (2) + +--connection server_1 +--echo # Wait for thd to begin semi-sync wait.. +--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state = 'Waiting for semi-sync ACK from slave' +--source include/wait_condition.inc +--source include/wait_condition.inc +--echo # ..done + +--disconnect con1 + +--connection default +--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +wait +EOF + +--connection con2 +SHUTDOWN WAIT FOR ALL SLAVES; +--source include/wait_until_disconnected.inc + +# Run assert_grep on server_2 as it uses SQL commands for verification, but +# server_1 has gone away +--connection server_2 +--let $assert_text= Ensure the primary waited for the ACK of the killed thread +--let $assert_select= Delaying shutdown to await semi-sync ACK +--let $assert_file= $MYSQLTEST_VARDIR/log/mysqld.1.err +--let $assert_count= 5 +--let $assert_only_after=CURRENT_TEST +--source include/assert_grep.inc + +--connection default +--source include/wait_until_disconnected.inc + +--connection server_1 +--source include/wait_until_disconnected.inc + +--connection server_2 +--let $rpl_allow_error= 1 +source include/stop_slave.inc; +--connection server_3 +source include/stop_slave.inc; +--let $rpl_allow_error= + +--connection default +--append_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect +restart +EOF +--enable_reconnect +--source include/wait_until_connected_again.inc + +--connection server_1 +--enable_reconnect +--source include/wait_until_connected_again.inc + + --echo ############################# --echo # Cleanup --echo ############################# --connection server_2 -source include/stop_slave.inc; SET @@GLOBAL.rpl_semi_sync_slave_enabled = @sav_enabled_server_2; SET @@GLOBAL.debug_dbug= @sav_server_2_dbug; source include/start_slave.inc; --connection server_3 -source include/stop_slave.inc; SET @@GLOBAL.rpl_semi_sync_slave_enabled = @sav_enabled_server_3; SET @@GLOBAL.debug_dbug= @sav_server_3_dbug; source include/start_slave.inc; diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_slave_enabled_consistent.test b/mysql-test/suite/rpl/t/rpl_semi_sync_slave_enabled_consistent.test index 9e388ab4419..ca7e788780c 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_slave_enabled_consistent.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_slave_enabled_consistent.test @@ -14,6 +14,9 @@ call mtr.add_suppression("Replication event checksum verification failed"); call mtr.add_suppression("could not queue event from master"); +call mtr.add_suppression("Semisync ack receiver.*error reading communication packets"); +call mtr.add_suppression("Semisync ack receiver got hangup"); +--sync_slave_with_master --echo # --echo # Set up a semisync connection diff --git a/mysql-test/suite/rpl/t/rpl_show_slave_status.test b/mysql-test/suite/rpl/t/rpl_show_slave_status.test new file mode 100644 index 00000000000..f4bbb5faeab --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_show_slave_status.test @@ -0,0 +1,27 @@ +--source include/have_binlog_format_mixed.inc +--source include/master-slave.inc + +--echo * +--echo * The purpose of this test is to prevent incorrect additions to SHOW +--echo * SLAVE STATUS, which has happened several times in the past. +--echo * +--echo * We must never, _ever_, add extra rows to this output of SHOW SLAVE +--echo * STATUS, except at the very end, as this breaks backwards compatibility +--echo * with applications or scripts that parse the output. This also means that +--echo * we cannot add _any_ new rows in a GA version if a different row was +--echo * already added in a later MariaDB version, as this would make it impossible +--echo * to merge the change up while preserving the order of rows. +--echo * + +--connection slave +--replace_column 1 # 2 # 3 # 4 # 5 # 6 # 7 # 8 # 9 # 10 # 11 # 12 # 13 # 14 # 15 # 16 # 17 # 18 # 19 # 20 # 21 # 22 # 23 # 24 # 25 # 26 # 27 # 28 # 29 # 30 # 31 # 32 # 33 # 34 # 35 # 36 # 37 # 38 # 39 # 40 # 41 # 42 # 43 # 44 # 45 # 46 # 47 # 48 # 49 # 50 # 51 # 52 # 53 # 54 # +query_vertical +SHOW SLAVE STATUS; + +--echo * +--echo * When modifying this test after adding a column to SHOW SLAVE STATUS, +--echo * _only_ additions at the end are allowed, the column number of existing +--echo * columns must _not_ change! +--echo * + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/show_status_stop_slave_race-7126.test b/mysql-test/suite/rpl/t/show_status_stop_slave_race-7126.test index cd0f8aad106..815b2537e93 100644 --- a/mysql-test/suite/rpl/t/show_status_stop_slave_race-7126.test +++ b/mysql-test/suite/rpl/t/show_status_stop_slave_race-7126.test @@ -3,7 +3,10 @@ # --source include/master-slave.inc +--disable_query_log call mtr.add_suppression("Master is configured to log replication events"); +call mtr.add_suppression("Could not read packet:.* errno: 11"); +--enable_query_log --connection slave diff --git a/mysql-test/suite/s3/partition.result b/mysql-test/suite/s3/partition.result index 9face3ec947..20530bd399a 100644 --- a/mysql-test/suite/s3/partition.result +++ b/mysql-test/suite/s3/partition.result @@ -55,8 +55,6 @@ ERROR HY000: Table 't2' is read only ALTER TABLE t2 ANALYZE PARTITION p3; Table Op Msg_type Msg_text s3.t2 analyze status Table 's3.t2' is read only -s3.t2 analyze status Engine-independent statistics collected -s3.t2 analyze status OK SELECT count(*) FROM t2; count(*) 6 diff --git a/mysql-test/suite/sys_vars/r/slave_transaction_retry_errors.result b/mysql-test/suite/sys_vars/r/slave_transaction_retry_errors.result index 72b4041599a..43988a81f4f 100644 --- a/mysql-test/suite/sys_vars/r/slave_transaction_retry_errors.result +++ b/mysql-test/suite/sys_vars/r/slave_transaction_retry_errors.result @@ -1,20 +1,20 @@ select @@global.slave_transaction_retry_errors; @@global.slave_transaction_retry_errors -1158,1159,1160,1161,1205,1213,1429,2013,12701,10,20,5000,400 +1158,1159,1160,1161,1205,1213,1020,1429,2013,12701,10,20,5000,400 select @@session.slave_transaction_retry_errors; ERROR HY000: Variable 'slave_transaction_retry_errors' is a GLOBAL variable show global variables like 'slave_transaction_retry_errors'; Variable_name Value -slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1429,2013,12701,10,20,5000,400 +slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1020,1429,2013,12701,10,20,5000,400 show session variables like 'slave_transaction_retry_errors'; Variable_name Value -slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1429,2013,12701,10,20,5000,400 +slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1020,1429,2013,12701,10,20,5000,400 select * from information_schema.global_variables where variable_name='slave_transaction_retry_errors'; VARIABLE_NAME VARIABLE_VALUE -SLAVE_TRANSACTION_RETRY_ERRORS 1158,1159,1160,1161,1205,1213,1429,2013,12701,10,20,5000,400 +SLAVE_TRANSACTION_RETRY_ERRORS 1158,1159,1160,1161,1205,1213,1020,1429,2013,12701,10,20,5000,400 select * from information_schema.session_variables where variable_name='slave_transaction_retry_errors'; VARIABLE_NAME VARIABLE_VALUE -SLAVE_TRANSACTION_RETRY_ERRORS 1158,1159,1160,1161,1205,1213,1429,2013,12701,10,20,5000,400 +SLAVE_TRANSACTION_RETRY_ERRORS 1158,1159,1160,1161,1205,1213,1020,1429,2013,12701,10,20,5000,400 set global slave_transaction_retry_errors=1; ERROR HY000: Variable 'slave_transaction_retry_errors' is a read only variable set session slave_transaction_retry_errors=1; diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result index 0a3185693e8..83e137a946d 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result @@ -1015,6 +1015,18 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST NULL READ_ONLY YES COMMAND_LINE_ARGUMENT REQUIRED +VARIABLE_NAME INNODB_LOG_SPIN_WAIT_DELAY +SESSION_VALUE NULL +DEFAULT_VALUE 0 +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE INT UNSIGNED +VARIABLE_COMMENT Delay between log buffer spin lock polls (0 to use a blocking latch) +NUMERIC_MIN_VALUE 0 +NUMERIC_MAX_VALUE 6000 +NUMERIC_BLOCK_SIZE 0 +ENUM_VALUE_LIST NULL +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_LRU_FLUSH_SIZE SESSION_VALUE NULL DEFAULT_VALUE 32 @@ -1387,6 +1399,18 @@ NUMERIC_BLOCK_SIZE 0 ENUM_VALUE_LIST NULL READ_ONLY NO COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME INNODB_SNAPSHOT_ISOLATION +SESSION_VALUE OFF +DEFAULT_VALUE OFF +VARIABLE_SCOPE SESSION +VARIABLE_TYPE BOOLEAN +VARIABLE_COMMENT Use snapshot isolation (write-write conflict detection). +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST OFF,ON +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME INNODB_SORT_BUFFER_SIZE SESSION_VALUE NULL DEFAULT_VALUE 1048576 diff --git a/mysql-test/suite/sysschema/t/fn_ps_thread_trx_info.test b/mysql-test/suite/sysschema/t/fn_ps_thread_trx_info.test index ddf462473ca..19ea013f500 100644 --- a/mysql-test/suite/sysschema/t/fn_ps_thread_trx_info.test +++ b/mysql-test/suite/sysschema/t/fn_ps_thread_trx_info.test @@ -78,7 +78,7 @@ SELECT JSON_CONTAINS(@json_doc, '"COMMIT"', '$[0].statements_executed[1].sql_tex SET @sys.ps_thread_trx_info.max_length = 100; # Should return an error JSON object ---replace_regex /Row 1[1-2] was/Row 1X was/ +--replace_regex /Row \d+ was/Row 1X was/ SELECT sys.ps_thread_trx_info(@ps_thread_id); # Setting the user variable back to NULL should reset to 65535 from sys_config, and no truncation diff --git a/mysys/lf_alloc-pin.c b/mysys/lf_alloc-pin.c index fc3f320a623..4dc41645530 100644 --- a/mysys/lf_alloc-pin.c +++ b/mysys/lf_alloc-pin.c @@ -291,7 +291,7 @@ static int harvest_pins(LF_PINS *el, struct st_harvester *hv) { for (i= 0; i < LF_PINBOX_PINS; i++) { - void *p= el->pin[i]; + void *p= my_atomic_loadptr((void **)&el->pin[i]); if (p) *hv->granary++= p; } @@ -316,7 +316,7 @@ static int match_pins(LF_PINS *el, void *addr) LF_PINS *el_end= el+LF_DYNARRAY_LEVEL_LENGTH; for (; el < el_end; el++) for (i= 0; i < LF_PINBOX_PINS; i++) - if (el->pin[i] == addr) + if (my_atomic_loadptr((void **)&el->pin[i]) == addr) return 1; return 0; } @@ -501,7 +501,8 @@ void *lf_alloc_new(LF_PINS *pins) { node= allocator->top; lf_pin(pins, 0, node); - } while (node != allocator->top && LF_BACKOFF()); + } while (node != my_atomic_loadptr((void **)(char *)&allocator->top) + && LF_BACKOFF()); if (!node) { node= (void *)my_malloc(key_memory_lf_node, allocator->element_size, diff --git a/mysys/my_bitmap.c b/mysys/my_bitmap.c index 9893c7e4a58..c9bbcc4b06e 100644 --- a/mysys/my_bitmap.c +++ b/mysys/my_bitmap.c @@ -13,17 +13,28 @@ You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */ + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 + USA + */ /* - Handling of uchar arrays as large bitmaps. + Handling of my_bitmap_map (ulonglong) arrays as large bitmaps. API limitations (or, rather asserted safety assumptions, to encourage correct programming) - * the internal size is a set of 32 bit words + * the internal storage is a set of 64 bit words * the number of bits specified in creation can be any number > 0 + Implementation notes: + * MY_BITMAP includes a pointer, last_word_ptr, to the last word. + The implication is that if one copies bitmaps to another memory + location, one has to call create_last_bit_mask() on the bitmap to + fix the internal pointer. + * The not used part of a the last word should always be 0. + This avoids special handling of the last bitmap in several cases. + This is checked for most calls to bitmap functions. + TODO: Make assembler thread safe versions of these using test-and-set instructions @@ -31,117 +42,97 @@ New version written and test program added and some changes to the interface was made by Mikael Ronstrom 2005, with assistance of Tomas Ulin and Mats Kindahl. + Updated to 64 bits and use my_find_first_bit() to speed up + bitmap_get_next_set() by Monty in 2024 */ #include "mysys_priv.h" #include #include #include +#include + + +/* Defines to check bitmaps */ + +#define DBUG_ASSERT_BITMAP(M) \ + DBUG_ASSERT((M)->bitmap); \ + DBUG_ASSERT((M)->n_bits > 0); \ + DBUG_ASSERT((M)->last_word_ptr == (M)->bitmap + no_words_in_map(M)-1); \ + DBUG_ASSERT((*(M)->last_word_ptr & (M)->last_bit_mask) == 0); + +#define DBUG_ASSERT_BITMAP_AND_BIT(M,B) \ + DBUG_ASSERT_BITMAP(M); \ + DBUG_ASSERT((B) < (M)->n_bits); + +#define DBUG_ASSERT_DIFFERENT_BITMAPS(M,N) \ + DBUG_ASSERT_BITMAP(M); \ + DBUG_ASSERT_BITMAP(N); + +#define DBUG_ASSERT_IDENTICAL_BITMAPS(M,N) \ + DBUG_ASSERT_BITMAP(M); \ + DBUG_ASSERT_BITMAP(N); \ + DBUG_ASSERT((M)->n_bits == (N)->n_bits); /* - Create a mask with the upper 'unused' bits set and the lower 'used' - bits clear. The bits within each byte is stored in big-endian order. + Create a mask for the usable bits on the LAST my_bitmap_map position for + a bitmap with 'bits' number of bits. + + The lowest 'bits' bits are set to zero and the rest bits are set to 1. + For (bits & 63) == 0 , 0 is returned as in this case all bits in the + my_bitmap_position are significant. (This example assumes the + storage is ulonglong). + + For 'bits & 63' it will return values from the series + 0, 0xfffffffffffffffe,.... 0x8000000000000000 */ -static inline uchar invers_last_byte_mask(uint bits) +static inline my_bitmap_map last_bit_mask(uint bits) { - return last_byte_mask(bits) ^ 255; + uint bits_in_last_map= (bits & (my_bitmap_map_bits-1)); + return bits_in_last_map ? ~((1ULL << bits_in_last_map)-1) : 0ULL; } -void create_last_word_mask(MY_BITMAP *map) +/* + Get a mask of the bits that are to be considered as 'on' at location + starting with 'bits'. + This function has _inv in it's name as it's usage is invers compared + to last_bit_mask(). + + For (bits & 63) it will return values from the series + 0xffffffffffffffff, 0xfffffffffffffffe,.... 0x8000000000000000 +*/ + +static inline my_bitmap_map first_bit_mask_inv(uint bits) { - unsigned char const mask= invers_last_byte_mask(map->n_bits); - - /* - The first bytes are to be set to zero since they represent real bits - in the bitvector. The last bytes are set to 0xFF since they represent - bytes not used by the bitvector. Finally the last byte contains bits - as set by the mask above. - */ - unsigned char *ptr= (unsigned char*)&map->last_word_mask; - - map->last_word_ptr= map->bitmap + no_words_in_map(map)-1; - switch (no_bytes_in_map(map) & 3) { - case 1: - map->last_word_mask= ~0U; - ptr[0]= mask; - return; - case 2: - map->last_word_mask= ~0U; - ptr[0]= 0; - ptr[1]= mask; - return; - case 3: - map->last_word_mask= 0U; - ptr[2]= mask; - ptr[3]= 0xFFU; - return; - case 0: - map->last_word_mask= 0U; - ptr[3]= mask; - return; - } + uint bits_in_last_map= (bits & (my_bitmap_map_bits-1)); + return ~((1ULL << bits_in_last_map)-1); } -static inline my_bitmap_map last_word_mask(uint bit) +/* + Update the bitmap's last_word_ptr and last_bit_mask + Also ensure that the last world is all zero to make it + easy to find the next set bit. + + Note that if n_bits is 0, then last_word_ptr will point to + bitmap (safely). The bitmap will not be usable for almost any operation. +*/ + +void create_last_bit_mask(MY_BITMAP *map) { - my_bitmap_map last_word_mask; - uint n_bits= bit + 1; - unsigned char const mask= invers_last_byte_mask(n_bits); - - /* - The first bytes are to be set to zero since they represent real bits - in the bitvector. The last bytes are set to 0xFF since they represent - bytes not used by the bitvector. Finally the last byte contains bits - as set by the mask above. - */ - unsigned char *ptr= (unsigned char*)&last_word_mask; - - switch ((n_bits + 7)/8 & 3) { - case 1: - last_word_mask= ~0U; - ptr[0]= mask; - break; - case 2: - last_word_mask= ~0U; - ptr[0]= 0; - ptr[1]= mask; - break; - case 3: - last_word_mask= 0U; - ptr[2]= mask; - ptr[3]= 0xFFU; - break; - case 0: - last_word_mask= 0U; - ptr[3]= mask; - break; - } - return last_word_mask; -} - - -static inline uint get_first_set(my_bitmap_map value, uint word_pos) -{ - uchar *byte_ptr= (uchar*)&value; - uchar byte_value; - uint byte_pos, bit_pos; - - DBUG_ASSERT(value); - for (byte_pos=0; ; byte_pos++, byte_ptr++) + my_bitmap_map mask= last_bit_mask(map->n_bits); + map->last_bit_mask= mask; + map->last_word_ptr= map->bitmap + MY_MAX(no_words_in_map(map),1) -1; + if (map->n_bits > 0) { - if ((byte_value= *byte_ptr)) - { - for (bit_pos=0; ; bit_pos++) - if (byte_value & (1 << bit_pos)) - return (word_pos*32) + (byte_pos*8) + bit_pos; - } + *map->last_word_ptr&= ~mask; /* Set not used bits to 0 */ + DBUG_ASSERT_BITMAP(map); } - return MY_BIT_NONE; /* Impossible */ } + /* Initialize a bitmap object. All bits will be set to zero */ @@ -149,17 +140,24 @@ static inline uint get_first_set(my_bitmap_map value, uint word_pos) my_bool my_bitmap_init(MY_BITMAP *map, my_bitmap_map *buf, uint n_bits) { DBUG_ENTER("my_bitmap_init"); + if (!buf) { uint size_in_bytes= bitmap_buffer_size(n_bits); if (!(buf= (my_bitmap_map*) my_malloc(key_memory_MY_BITMAP_bitmap, size_in_bytes, MYF(MY_WME)))) + { + map->bitmap= 0; DBUG_RETURN(1); + } + map->bitmap_allocated= 1; } + else + map->bitmap_allocated= 0; map->bitmap= buf; map->n_bits= n_bits; - create_last_word_mask(map); + create_last_bit_mask(map); bitmap_clear_all(map); DBUG_RETURN(0); } @@ -170,7 +168,8 @@ void my_bitmap_free(MY_BITMAP *map) DBUG_ENTER("my_bitmap_free"); if (map->bitmap) { - my_free(map->bitmap); + if (map->bitmap_allocated) + my_free(map->bitmap); map->bitmap=0; } DBUG_VOID_RETURN; @@ -192,11 +191,14 @@ void my_bitmap_free(MY_BITMAP *map) my_bool bitmap_fast_test_and_set(MY_BITMAP *map, uint bitmap_bit) { - uchar *value= ((uchar*) map->bitmap) + (bitmap_bit / 8); - uchar bit= 1 << ((bitmap_bit) & 7); - uchar res= (*value) & bit; + my_bitmap_map *value, bit, res; + DBUG_ASSERT_BITMAP_AND_BIT(map, bitmap_bit); + + value= map->bitmap + (bitmap_bit/my_bitmap_map_bits); + bit= 1ULL << (bitmap_bit & (my_bitmap_map_bits-1)); + res= *value & bit; *value|= bit; - return res; + return MY_TEST(res); } @@ -215,8 +217,7 @@ my_bool bitmap_fast_test_and_set(MY_BITMAP *map, uint bitmap_bit) my_bool bitmap_test_and_set(MY_BITMAP *map, uint bitmap_bit) { - DBUG_ASSERT(map->bitmap); - DBUG_ASSERT(bitmap_bit < map->n_bits); + DBUG_ASSERT_BITMAP_AND_BIT(map, bitmap_bit); return bitmap_fast_test_and_set(map, bitmap_bit); } @@ -235,18 +236,20 @@ my_bool bitmap_test_and_set(MY_BITMAP *map, uint bitmap_bit) my_bool bitmap_fast_test_and_clear(MY_BITMAP *map, uint bitmap_bit) { - uchar *byte= (uchar*) map->bitmap + (bitmap_bit / 8); - uchar bit= 1 << ((bitmap_bit) & 7); - uchar res= (*byte) & bit; - *byte&= ~bit; - return res; + my_bitmap_map *value, bit, res; + DBUG_ASSERT_BITMAP_AND_BIT(map, bitmap_bit); + + value= map->bitmap + (bitmap_bit/my_bitmap_map_bits); + bit= 1ULL << (bitmap_bit & (my_bitmap_map_bits-1)); + res= *value & bit; + *value&= ~bit; + return MY_TEST(res); } my_bool bitmap_test_and_clear(MY_BITMAP *map, uint bitmap_bit) { - DBUG_ASSERT(map->bitmap); - DBUG_ASSERT(bitmap_bit < map->n_bits); + DBUG_ASSERT_BITMAP_AND_BIT(map, bitmap_bit); return bitmap_fast_test_and_clear(map, bitmap_bit); } @@ -254,8 +257,8 @@ my_bool bitmap_test_and_clear(MY_BITMAP *map, uint bitmap_bit) uint bitmap_set_next(MY_BITMAP *map) { uint bit_found; - DBUG_ASSERT(map->bitmap); - if ((bit_found= bitmap_get_first(map)) != MY_BIT_NONE) + DBUG_ASSERT_BITMAP(map); + if ((bit_found= bitmap_get_first_clear(map)) != MY_BIT_NONE) bitmap_set_bit(map, bit_found); return bit_found; } @@ -265,58 +268,69 @@ uint bitmap_set_next(MY_BITMAP *map) Set the specified number of bits in the bitmap buffer. @param map [IN] Bitmap - @param prefix_size [IN] Number of bits to be set + @param prefix_size [IN] Number of bits to be set or (uint) ~0 for all */ + void bitmap_set_prefix(MY_BITMAP *map, uint prefix_size) { - uint prefix_bytes, prefix_bits, d; - uchar *m= (uchar *)map->bitmap; - - DBUG_ASSERT(map->bitmap); + uint prefix, prefix_bits; + my_bitmap_map *value= map->bitmap; + DBUG_ASSERT_BITMAP(map); DBUG_ASSERT(prefix_size <= map->n_bits || prefix_size == (uint) ~0); set_if_smaller(prefix_size, map->n_bits); - if ((prefix_bytes= prefix_size / 8)) - memset(m, 0xff, prefix_bytes); - m+= prefix_bytes; - if ((prefix_bits= prefix_size & 7)) + + if ((prefix= prefix_size / my_bitmap_map_bits)) { - *(m++)= (1 << prefix_bits)-1; - // As the prefix bits are set, lets count this byte too as a prefix byte. - prefix_bytes ++; + my_bitmap_map *end= value+prefix; + do + { + *value++= ~(my_bitmap_map) 0; + } while (value < end); } - if ((d= no_bytes_in_map(map)-prefix_bytes)) - memset(m, 0, d); + if ((prefix_bits= prefix_size & (my_bitmap_map_bits-1))) + *value++= (1ULL << prefix_bits)-1; + while (value <= map->last_word_ptr) + *value++= 0; + DBUG_ASSERT_BITMAP(map); } +/** + Check if bitmap is a bitmap of prefix bits set in the beginning + + @param map bitmap + @param prefix_size number of bits that should be set. 0 is allowed. + + @return 1 Yes, prefix bits where set or prefix_size == 0. + @return 0 No +*/ + my_bool bitmap_is_prefix(const MY_BITMAP *map, uint prefix_size) { - uint prefix_mask= last_byte_mask(prefix_size); - uchar *m= (uchar*) map->bitmap; - uchar *end_prefix= m+(prefix_size-1)/8; - uchar *end; - DBUG_ASSERT(m); - DBUG_ASSERT(prefix_size <= map->n_bits); + my_bitmap_map *value= map->bitmap; + my_bitmap_map *end= value+ (prefix_size/my_bitmap_map_bits); + uint prefix_bits; /* Empty prefix is always true */ if (!prefix_size) return 1; - while (m < end_prefix) - if (*m++ != 0xff) + DBUG_ASSERT_BITMAP_AND_BIT(map, prefix_size-1); + + while (value < end) + if (*value++ != ~(my_bitmap_map) 0) return 0; - end= ((uchar*) map->bitmap) + no_bytes_in_map(map) - 1; - if (m == end) - return ((*m & last_byte_mask(map->n_bits)) == prefix_mask); - - if (*m != prefix_mask) - return 0; - - while (++m < end) - if (*m != 0) + if ((prefix_bits= prefix_size & (my_bitmap_map_bits-1))) + { + if (*value++ != (1ULL << prefix_bits)-1) return 0; - return ((*m & last_byte_mask(map->n_bits)) == 0); + } + end= map->last_word_ptr; + while (value <= end) + if (*value++ != 0) + return 0; + return 1; } @@ -324,10 +338,12 @@ my_bool bitmap_is_set_all(const MY_BITMAP *map) { my_bitmap_map *data_ptr= map->bitmap; my_bitmap_map *end= map->last_word_ptr; + DBUG_ASSERT_BITMAP(map); + for (; data_ptr < end; data_ptr++) - if (*data_ptr != 0xFFFFFFFF) + if (*data_ptr != ~(my_bitmap_map)0) return FALSE; - return (*data_ptr | map->last_word_mask) == 0xFFFFFFFF; + return (*data_ptr | map->last_bit_mask) == ~(my_bitmap_map)0; } @@ -335,61 +351,58 @@ my_bool bitmap_is_clear_all(const MY_BITMAP *map) { my_bitmap_map *data_ptr= map->bitmap; my_bitmap_map *end= map->last_word_ptr; + DBUG_ASSERT_BITMAP(map); - DBUG_ASSERT(map->n_bits > 0); - for (; data_ptr < end; data_ptr++) + for (; data_ptr <= end; data_ptr++) if (*data_ptr) return FALSE; - return (*data_ptr & ~map->last_word_mask) == 0; + return TRUE; } + /* Return TRUE if map1 is a subset of map2 */ my_bool bitmap_is_subset(const MY_BITMAP *map1, const MY_BITMAP *map2) { - my_bitmap_map *m1= map1->bitmap, *m2= map2->bitmap, *end; + my_bitmap_map *m1= map1->bitmap, *m2= map2->bitmap, *end= map1->last_word_ptr; + DBUG_ASSERT_IDENTICAL_BITMAPS(map1,map2); - DBUG_ASSERT(map1->bitmap && map2->bitmap); - DBUG_ASSERT(map1->n_bits==map2->n_bits); - - end= map1->last_word_ptr; - while (m1 < end) + while (m1 <= end) { if ((*m1++) & ~(*m2++)) return 0; } - /* here both maps have the same number of bits - see assert above */ - return ((*m1 & ~*m2 & ~map1->last_word_mask) ? 0 : 1); + return 1; } /* True if bitmaps has any common bits */ my_bool bitmap_is_overlapping(const MY_BITMAP *map1, const MY_BITMAP *map2) { - my_bitmap_map *m1= map1->bitmap, *m2= map2->bitmap, *end; + my_bitmap_map *m1= map1->bitmap, *m2= map2->bitmap, *end= map1->last_word_ptr; + DBUG_ASSERT_IDENTICAL_BITMAPS(map1,map2); - DBUG_ASSERT(map1->bitmap); - DBUG_ASSERT(map2->bitmap); - DBUG_ASSERT(map1->n_bits==map2->n_bits); - - end= map1->last_word_ptr; - while (m1 < end) + while (m1 <= end) { if ((*m1++) & (*m2++)) return 1; } - /* here both maps have the same number of bits - see assert above */ - return ((*m1 & *m2 & ~map1->last_word_mask) ? 1 : 0); + return 0; } +/* + Create intersection of two bitmaps + + @param map map1. Result is stored here + @param map2 map2 +*/ + void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2) { my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end; uint len= no_words_in_map(map), len2 = no_words_in_map(map2); - - DBUG_ASSERT(map->bitmap); - DBUG_ASSERT(map2->bitmap); + DBUG_ASSERT_DIFFERENT_BITMAPS(map,map2); end= to+MY_MIN(len,len2); while (to < end) @@ -397,7 +410,7 @@ void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2) if (len2 <= len) { - to[-1]&= ~map2->last_word_mask; /* Clear last not relevant bits */ + to[-1]&= ~map2->last_bit_mask; /* Clear last not relevant bits */ end+= len-len2; while (to < end) *to++= 0; @@ -407,50 +420,51 @@ void bitmap_intersect(MY_BITMAP *map, const MY_BITMAP *map2) /* Check if there is some bit index between start_bit and end_bit, such that - this is bit is set for all bitmaps in bitmap_list. + this is at least on bit that set for all bitmaps in bitmap_list. SYNOPSIS bitmap_exists_intersection() bitmpap_array [in] a set of MY_BITMAPs - bitmap_count [in] number of elements in bitmpap_array + bitmap_count [in] number of elements in bitmap_array start_bit [in] beginning (inclusive) of the range of bits to search end_bit [in] end (inclusive) of the range of bits to search, must be no bigger than the bits of the shortest bitmap. - NOTES - This function assumes that for at least one of the bitmaps in bitmap_array all - bits outside the range [start_bit, end_bit] are 0. As a result is not - necessary to take care of the bits outside the range [start_bit, end_bit]. - RETURN TRUE if an intersecion exists FALSE no intersection */ -my_bool bitmap_exists_intersection(const MY_BITMAP **bitmap_array, +my_bool bitmap_exists_intersection(MY_BITMAP **bitmap_array, uint bitmap_count, uint start_bit, uint end_bit) { uint i, j, start_idx, end_idx; - my_bitmap_map cur_res; + my_bitmap_map cur_res, first_map; DBUG_ASSERT(bitmap_count); DBUG_ASSERT(end_bit >= start_bit); for (j= 0; j < bitmap_count; j++) - DBUG_ASSERT(end_bit < bitmap_array[j]->n_bits); + { + DBUG_ASSERT_BITMAP_AND_BIT(bitmap_array[j], end_bit); + } start_idx= start_bit/8/sizeof(my_bitmap_map); end_idx= end_bit/8/sizeof(my_bitmap_map); + first_map= first_bit_mask_inv(start_bit); + cur_res= first_map; for (i= start_idx; i < end_idx; i++) { - cur_res= ~0; for (j= 0; cur_res && j < bitmap_count; j++) cur_res &= bitmap_array[j]->bitmap[i]; if (cur_res) return TRUE; + cur_res= ~(my_bitmap_map) 0; } - cur_res= ~last_word_mask(end_bit); + cur_res= ~last_bit_mask(end_bit+1); + if (start_idx == end_idx) + cur_res&= first_map; for (j= 0; cur_res && j < bitmap_count; j++) cur_res &= bitmap_array[j]->bitmap[end_idx]; return cur_res != 0; @@ -461,60 +475,21 @@ my_bool bitmap_exists_intersection(const MY_BITMAP **bitmap_array, my_bool bitmap_union_is_set_all(const MY_BITMAP *map1, const MY_BITMAP *map2) { - my_bitmap_map *m1= map1->bitmap, *m2= map2->bitmap, *end; + my_bitmap_map *m1= map1->bitmap, *m2= map2->bitmap, *end= map1->last_word_ptr; + DBUG_ASSERT_IDENTICAL_BITMAPS(map1,map2); - DBUG_ASSERT(map1->bitmap); - DBUG_ASSERT(map2->bitmap); - DBUG_ASSERT(map1->n_bits==map2->n_bits); - end= map1->last_word_ptr; while ( m1 < end) - if ((*m1++ | *m2++) != 0xFFFFFFFF) + if ((*m1++ | *m2++) != ~(my_bitmap_map)0) return FALSE; /* here both maps have the same number of bits - see assert above */ - return ((*m1 | *m2 | map1->last_word_mask) != 0xFFFFFFFF); -} - - - -/* - Set/clear all bits above a bit. - - SYNOPSIS - bitmap_set_above() - map RETURN The bitmap to change. - from_byte The bitmap buffer byte offset to start with. - use_bit The bit value (1/0) to use for all upper bits. - - NOTE - You can only set/clear full bytes. - The function is meant for the situation that you copy a smaller bitmap - to a bigger bitmap. Bitmap lengths are always multiple of eigth (the - size of a byte). Using 'from_byte' saves multiplication and division - by eight during parameter passing. - - RETURN - void -*/ - -void bitmap_set_above(MY_BITMAP *map, uint from_byte, uint use_bit) -{ - uchar use_byte= use_bit ? 0xff : 0; - uchar *to= (uchar *)map->bitmap + from_byte; - uchar *end= (uchar *)map->bitmap + (map->n_bits+7)/8; - - while (to < end) - *to++= use_byte; + return ((*m1 | *m2 | map1->last_bit_mask) != ~(my_bitmap_map)0); } void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2) { - my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end; - DBUG_ASSERT(map->bitmap); - DBUG_ASSERT(map2->bitmap); - DBUG_ASSERT(map->n_bits==map2->n_bits); - - end= map->last_word_ptr; + my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end= map->last_word_ptr; + DBUG_ASSERT_IDENTICAL_BITMAPS(map,map2); while (to <= end) *to++ &= ~(*from++); @@ -523,12 +498,8 @@ void bitmap_subtract(MY_BITMAP *map, const MY_BITMAP *map2) void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2) { - my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end; - - DBUG_ASSERT(map->bitmap); - DBUG_ASSERT(map2->bitmap); - DBUG_ASSERT(map->n_bits == map2->n_bits); - end= map->last_word_ptr; + my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end= map->last_word_ptr; + DBUG_ASSERT_IDENTICAL_BITMAPS(map,map2); while (to <= end) *to++ |= *from++; @@ -538,9 +509,8 @@ void bitmap_union(MY_BITMAP *map, const MY_BITMAP *map2) void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2) { my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end= map->last_word_ptr; - DBUG_ASSERT(map->bitmap); - DBUG_ASSERT(map2->bitmap); - DBUG_ASSERT(map->n_bits == map2->n_bits); + DBUG_ASSERT_IDENTICAL_BITMAPS(map,map2); + while (to <= end) *to++ ^= *from++; } @@ -548,13 +518,14 @@ void bitmap_xor(MY_BITMAP *map, const MY_BITMAP *map2) void bitmap_invert(MY_BITMAP *map) { - my_bitmap_map *to= map->bitmap, *end; + my_bitmap_map *to= map->bitmap, *end= map->last_word_ptr; + DBUG_ASSERT_BITMAP(map); - DBUG_ASSERT(map->bitmap); - end= map->last_word_ptr; + while (to < end) + *to++ ^= ~(my_bitmap_map)0; - while (to <= end) - *to++ ^= 0xFFFFFFFF; + *to ^= (~(my_bitmap_map)0 & ~map->last_bit_mask); + DBUG_ASSERT_BITMAP(map); } @@ -563,45 +534,54 @@ uint bitmap_bits_set(const MY_BITMAP *map) my_bitmap_map *data_ptr= map->bitmap; my_bitmap_map *end= map->last_word_ptr; uint res= 0; - DBUG_ASSERT(map->bitmap); + DBUG_ASSERT_BITMAP(map); - for (; data_ptr < end; data_ptr++) - res+= my_count_bits_uint32(*data_ptr); + for (; data_ptr <= end; data_ptr++) + res+= my_count_bits(*data_ptr); - /*Reset last bits to zero*/ - res+= my_count_bits_uint32(*map->last_word_ptr & ~map->last_word_mask); return res; } -void bitmap_copy(MY_BITMAP *map, const MY_BITMAP *map2) + +/** + Copy bitmaps + + @param map1 to-bitmap + @param map2 from-bitmap + + @notes + Code will work even of the bitmaps are of different size. + In this case, only up to to->n_bits will be copied. +*/ + +void bitmap_copy(MY_BITMAP *map1, const MY_BITMAP *map2) { - my_bitmap_map *to= map->bitmap, *from= map2->bitmap, *end; + my_bitmap_map *to= map1->bitmap, *from= map2->bitmap; + uint map1_length= no_words_in_map(map1)*sizeof(my_bitmap_map); + uint map2_length= no_words_in_map(map2)*sizeof(my_bitmap_map); + uint length= MY_MIN(map1_length, map2_length); + DBUG_ASSERT_DIFFERENT_BITMAPS(map1,map2); - DBUG_ASSERT(map->bitmap); - DBUG_ASSERT(map2->bitmap); - DBUG_ASSERT(map->n_bits == map2->n_bits); - end= map->last_word_ptr; - - while (to <= end) - *to++ = *from++; + memcpy(to, from, length); + if (length < map1_length) + bzero(to + length, map1_length - length); + *map1->last_word_ptr&= ~map1->last_bit_mask; } +/* + Find first set bit in the bitmap +*/ + uint bitmap_get_first_set(const MY_BITMAP *map) { - uint i; my_bitmap_map *data_ptr= map->bitmap, *end= map->last_word_ptr; + DBUG_ASSERT_BITMAP(map); - DBUG_ASSERT(map->bitmap); - - for (i=0; data_ptr < end; data_ptr++, i++) + for (uint i=0; data_ptr <= end; data_ptr++, i++) if (*data_ptr) - goto found; - if (!(*data_ptr & ~map->last_word_mask)) - return MY_BIT_NONE; - -found: - return get_first_set(*data_ptr, i); + return my_find_first_bit(*data_ptr) + i * sizeof(my_bitmap_map)*8; + return MY_BIT_NONE; } @@ -616,80 +596,113 @@ found: uint bitmap_get_next_set(const MY_BITMAP *map, uint bitmap_bit) { - uint word_pos, byte_to_mask, i; - union { my_bitmap_map bitmap ; uchar bitmap_buff[sizeof(my_bitmap_map)]; } - first_word; - uchar *ptr= &first_word.bitmap_buff[0]; - my_bitmap_map *data_ptr, *end= map->last_word_ptr; - - DBUG_ASSERT(map->bitmap); + uint word_pos; + my_bitmap_map first_word, *data_ptr, *end= map->last_word_ptr; + DBUG_ASSERT_BITMAP(map); /* Look for the next bit */ bitmap_bit++; if (bitmap_bit >= map->n_bits) return MY_BIT_NONE; - word_pos= bitmap_bit / 32; + + word_pos= bitmap_bit / 64; data_ptr= map->bitmap + word_pos; - first_word.bitmap= *data_ptr; - /* Mask out previous bits from first_word */ - byte_to_mask= (bitmap_bit % 32) / 8; - for (i= 0; i < byte_to_mask; i++) - ptr[i]= 0; - ptr[byte_to_mask]&= 0xFFU << (bitmap_bit & 7); + first_word= *data_ptr & first_bit_mask_inv(bitmap_bit); - if (data_ptr == end) + if (first_word) { - if (first_word.bitmap & ~map->last_word_mask) - return get_first_set(first_word.bitmap, word_pos); - else - return MY_BIT_NONE; + /* Optimize common case when most bits are set */ + if (first_word & (1ULL << ((bitmap_bit & (my_bitmap_map_bits-1))))) + return bitmap_bit; + return my_find_first_bit(first_word) + (bitmap_bit & ~(my_bitmap_map_bits-1)); } - - if (first_word.bitmap) - return get_first_set(first_word.bitmap, word_pos); - for (data_ptr++, word_pos++; data_ptr < end; data_ptr++, word_pos++) + for (data_ptr++; data_ptr <= end; data_ptr++) + { + bitmap_bit+= 64; if (*data_ptr) - return get_first_set(*data_ptr, word_pos); - - if (!(*end & ~map->last_word_mask)) - return MY_BIT_NONE; - return get_first_set(*end, word_pos); -} - - -/* Get first free bit */ - -uint bitmap_get_first(const MY_BITMAP *map) -{ - uchar *byte_ptr; - uint i,j,k; - my_bitmap_map *data_ptr, *end= map->last_word_ptr; - - DBUG_ASSERT(map->bitmap); - data_ptr= map->bitmap; - *map->last_word_ptr|= map->last_word_mask; - - for (i=0; data_ptr < end; data_ptr++, i++) - if (*data_ptr != 0xFFFFFFFF) - goto found; - if ((*data_ptr | map->last_word_mask) == 0xFFFFFFFF) - return MY_BIT_NONE; - -found: - byte_ptr= (uchar*)data_ptr; - for (j=0; ; j++, byte_ptr++) - { - if (*byte_ptr != 0xFF) - { - for (k=0; ; k++) - { - if (!(*byte_ptr & (1 << k))) - return (i*32) + (j*8) + k; - } - } + return my_find_first_bit(*data_ptr) + (bitmap_bit & ~(my_bitmap_map_bits-1)); } - DBUG_ASSERT(0); - return MY_BIT_NONE; /* Impossible */ + return MY_BIT_NONE; } + + +/* Get first clear bit */ + +uint bitmap_get_first_clear(const MY_BITMAP *map) +{ + uint i; + my_bitmap_map *data_ptr= map->bitmap, *end= map->last_word_ptr; + DBUG_ASSERT_BITMAP(map); + + for (i= 0; data_ptr < end; data_ptr++, i++) + if (*data_ptr != ~(my_bitmap_map)0) + goto found; + if ((*data_ptr | map->last_bit_mask) == ~(my_bitmap_map)0) + return MY_BIT_NONE; +found: + /* find first zero bit by reverting all bits and find first bit */ + return my_find_first_bit(~*data_ptr) + i * sizeof(my_bitmap_map)*8; +} +/* + Functions to export/import bitmaps to an architecture independent format + (low_byte_first) +*/ + +#ifdef WORDS_BIGENDIAN +/* Big endian machines, like powerpc or s390x */ + +void bitmap_export(uchar *to, MY_BITMAP *map) +{ + my_bitmap_map *value; + uint length; + uchar buff[my_bitmap_map_bytes]; + + for (value= map->bitmap ; value < map->last_word_ptr ; value++) + { + int8store(to, *value); + to+= 8; + } + int8store(buff, *value); + + /* We want length & 7 to return a serie 8,2,3,4,5,6,7, 8,2,3,... */ + length= 1+ ((no_bytes_in_export_map(map) + 7) & 7); + memcpy(to, buff, length); +} + + +void bitmap_import(MY_BITMAP *map, uchar *from) +{ + my_bitmap_map *value; + uint length; + uchar buff[my_bitmap_map_bytes]; + + for (value= map->bitmap ; value < map->last_word_ptr ; value++) + { + *value= uint8korr(from); + from+= 8; + } + bzero(buff, sizeof(buff)); + + /* We want length & 7 to return a serie 8,2,3,4,5,6,7, 8,2,3,... */ + length= 1+ ((no_bytes_in_export_map(map) + 7) & 7); + memcpy(buff, from, length); + *value= uint8korr(buff) & ~map->last_bit_mask; +} + +#else + +/* Little endian machines, like intel and amd */ + +void bitmap_export(uchar *to, MY_BITMAP *map) +{ + memcpy(to, (uchar*) map->bitmap, no_bytes_in_export_map(map)); +} + +void bitmap_import(MY_BITMAP *map, uchar *from) +{ + memcpy((uchar*) map->bitmap, from, no_bytes_in_export_map(map)); + *map->last_word_ptr&= ~map->last_bit_mask; +} +#endif /* WORDS_BIGENDIAN */ diff --git a/mysys/my_getopt.c b/mysys/my_getopt.c index 0343e06a2f6..5fb84f1fe2f 100644 --- a/mysys/my_getopt.c +++ b/mysys/my_getopt.c @@ -172,6 +172,8 @@ static void validate_value(const char *key, const char *value, #define validate_value(key, value, filename) (void)filename #endif +#define SET_HO_ERROR_AND_CONTINUE(e) { ho_error= (e); continue; } + /** Handle command line options. Sort options. @@ -241,7 +243,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, const char *UNINIT_VAR(prev_found); const struct my_option *optp; void *value; - int error, i; + int ho_error= 0, error, i; my_bool is_cmdline_arg= 1; DBUG_ENTER("handle_options"); @@ -255,7 +257,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, is_cmdline_arg= !is_file_marker(**argv); - for (pos= *argv, pos_end=pos+ *argc; pos != pos_end ; pos++) + for (pos= *argv, pos_end=pos+ *argc; pos < pos_end ; pos++) { char **first= pos; char *cur_arg= *pos; @@ -344,7 +346,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, my_progname, special_opt_prefix[i], opt_str, special_opt_prefix[i], prev_found); - DBUG_RETURN(EXIT_AMBIGUOUS_OPTION); + SET_HO_ERROR_AND_CONTINUE(EXIT_AMBIGUOUS_OPTION) } switch (i) { case OPT_SKIP: @@ -389,7 +391,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, "%s: unknown variable '%s'", my_progname, cur_arg); if (!option_is_loose) - DBUG_RETURN(EXIT_UNKNOWN_VARIABLE); + SET_HO_ERROR_AND_CONTINUE(EXIT_UNKNOWN_VARIABLE) } else { @@ -399,7 +401,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, "%s: unknown option '--%s'", my_progname, cur_arg); if (!option_is_loose) - DBUG_RETURN(EXIT_UNKNOWN_OPTION); + SET_HO_ERROR_AND_CONTINUE(EXIT_UNKNOWN_OPTION) } if (option_is_loose) { @@ -416,7 +418,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, my_getopt_error_reporter(ERROR_LEVEL, "%s: variable prefix '%s' is not unique", my_progname, opt_str); - DBUG_RETURN(EXIT_VAR_PREFIX_NOT_UNIQUE); + SET_HO_ERROR_AND_CONTINUE(EXIT_VAR_PREFIX_NOT_UNIQUE) } else { @@ -425,7 +427,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, "%s: ambiguous option '--%s' (%s, %s)", my_progname, opt_str, prev_found, optp->name); - DBUG_RETURN(EXIT_AMBIGUOUS_OPTION); + SET_HO_ERROR_AND_CONTINUE(EXIT_AMBIGUOUS_OPTION) } } if ((optp->var_type & GET_TYPE_MASK) == GET_DISABLED) @@ -439,14 +441,14 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, (*argc)--; continue; } - DBUG_RETURN(EXIT_OPTION_DISABLED); + SET_HO_ERROR_AND_CONTINUE(EXIT_OPTION_DISABLED) } error= 0; value= optp->var_type & GET_ASK_ADDR ? (*my_getopt_get_addr)(key_name, (uint)strlen(key_name), optp, &error) : optp->value; if (error) - DBUG_RETURN(error); + SET_HO_ERROR_AND_CONTINUE(error) if (optp->arg_type == NO_ARG) { @@ -461,7 +463,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, my_getopt_error_reporter(ERROR_LEVEL, "%s: option '--%s' cannot take an argument", my_progname, optp->name); - DBUG_RETURN(EXIT_NO_ARGUMENT_ALLOWED); + SET_HO_ERROR_AND_CONTINUE(EXIT_NO_ARGUMENT_ALLOWED) } if ((optp->var_type & GET_TYPE_MASK) == GET_BOOL) { @@ -490,7 +492,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, if (get_one_option(optp, *((my_bool*) value) ? enabled_my_option : disabled_my_option, filename)) - DBUG_RETURN(EXIT_ARGUMENT_INVALID); + SET_HO_ERROR_AND_CONTINUE(EXIT_ARGUMENT_INVALID) continue; } argument= optend; @@ -504,7 +506,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, "option '--%s' cannot take an argument", my_progname, optp->name); - DBUG_RETURN(EXIT_NO_ARGUMENT_ALLOWED); + SET_HO_ERROR_AND_CONTINUE(EXIT_NO_ARGUMENT_ALLOWED) } if (!(optp->var_type & GET_AUTO)) { @@ -514,7 +516,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, "unsupported by option '--%s'", my_progname, optp->name); if (!option_is_loose) - DBUG_RETURN(EXIT_ARGUMENT_INVALID); + SET_HO_ERROR_AND_CONTINUE(EXIT_ARGUMENT_INVALID) continue; } else @@ -533,7 +535,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, my_getopt_error_reporter(ERROR_LEVEL, "%s: option '--%s' requires an argument", my_progname, optp->name); - DBUG_RETURN(EXIT_ARGUMENT_REQUIRED); + SET_HO_ERROR_AND_CONTINUE(EXIT_ARGUMENT_REQUIRED) } argument= *pos; (*argc)--; @@ -558,14 +560,14 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, fprintf(stderr, "%s: ERROR: Option '-%c' used, but is disabled\n", my_progname, optp->id); - DBUG_RETURN(EXIT_OPTION_DISABLED); + SET_HO_ERROR_AND_CONTINUE(EXIT_OPTION_DISABLED) } if ((optp->var_type & GET_TYPE_MASK) == GET_BOOL && optp->arg_type == NO_ARG) { *((my_bool*) optp->value)= (my_bool) 1; if (get_one_option(optp, argument, filename)) - DBUG_RETURN(EXIT_UNSPECIFIED_ERROR); + SET_HO_ERROR_AND_CONTINUE(EXIT_UNSPECIFIED_ERROR) continue; } else if (optp->arg_type == REQUIRED_ARG || @@ -585,7 +587,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, if (optp->var_type == GET_BOOL) *((my_bool*) optp->value)= (my_bool) 1; if (get_one_option(optp, argument, filename)) - DBUG_RETURN(EXIT_UNSPECIFIED_ERROR); + SET_HO_ERROR_AND_CONTINUE(EXIT_UNSPECIFIED_ERROR) continue; } /* Check if there are more arguments after this one */ @@ -595,7 +597,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, my_getopt_error_reporter(ERROR_LEVEL, "%s: option '-%c' requires an argument", my_progname, optp->id); - DBUG_RETURN(EXIT_ARGUMENT_REQUIRED); + SET_HO_ERROR_AND_CONTINUE(EXIT_ARGUMENT_REQUIRED) } argument= *++pos; (*argc)--; @@ -603,10 +605,10 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, } } if ((error= setval(optp, optp->value, argument, - set_maximum_value,filename))) - DBUG_RETURN(error); + set_maximum_value,filename))) + SET_HO_ERROR_AND_CONTINUE(error) if (get_one_option(optp, argument, filename)) - DBUG_RETURN(EXIT_UNSPECIFIED_ERROR); + SET_HO_ERROR_AND_CONTINUE(EXIT_UNSPECIFIED_ERROR) break; } } @@ -640,7 +642,7 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, my_getopt_error_reporter(ERROR_LEVEL, "%s: unknown option '-%c'", my_progname, *optend); - DBUG_RETURN(EXIT_UNKNOWN_OPTION); + SET_HO_ERROR_AND_CONTINUE(EXIT_UNKNOWN_OPTION) } } } @@ -651,15 +653,17 @@ int handle_options(int *argc, char ***argv, const struct my_option *longopts, if ((!option_is_autoset) && ((error= setval(optp, value, argument, set_maximum_value,filename))) && !option_is_loose) - DBUG_RETURN(error); + SET_HO_ERROR_AND_CONTINUE(error) if (get_one_option(optp, argument, filename)) - DBUG_RETURN(EXIT_UNSPECIFIED_ERROR); + SET_HO_ERROR_AND_CONTINUE(EXIT_UNSPECIFIED_ERROR) (*argc)--; /* option handled (long), decrease argument count */ } else /* non-option found */ (*argv)[argvpos++]= cur_arg; } + if (ho_error) + DBUG_RETURN(ho_error); /* Destroy the first, already handled option, so that programs that look for arguments in 'argv', without checking 'argc', know when to stop. diff --git a/plugin/auth_gssapi/README.md b/plugin/auth_gssapi/README.md index ea8deaafa94..635982234c4 100644 --- a/plugin/auth_gssapi/README.md +++ b/plugin/auth_gssapi/README.md @@ -49,7 +49,7 @@ Usually nothing need to be done. MariaDB server should to run on a domain joine Creating service principal is not required here (but you can still do it using [_setspn_](https://technet.microsoft.com/en-us/library/cc731241.aspx) tool) -# Installing plugin +## Installing plugin - Start the server - On Unix, edit my the my.cnf/my.ini configuration file, set the parameter gssapi-keytab-path to point to previously @@ -72,7 +72,7 @@ configure alternative principal name with INSTALL SONAME 'auth_gssapi' ``` -#Creating users +## Creating users Now, you can create a user for GSSAPI/SSPI authentication. CREATE USER command, for Kerberos user would be like this (*long* form, see below for short one) @@ -94,7 +94,7 @@ CREATE USER usr1 IDENTIFIED WITH gssapi; If this syntax is used, realm part is *not* used for comparison thus 'usr1@EXAMPLE.COM', 'usr1@EXAMPLE.CO.UK' and 'mymachine\usr1' will all identify as 'usr1'. -#Login as GSSAPI user with command line clients +## Login as GSSAPI user with command line clients Using command line client, do @@ -102,7 +102,7 @@ Using command line client, do mysql --plugin-dir=/path/to/plugin-dir -u usr1 ``` -#Plugin variables +## Plugin variables - **gssapi-keytab-path** (Unix only) - Path to the server keytab file - **gssapi-principal-name** - name of the service principal. - **gssapi-mech-name** (Windows only) - Name of the SSPI package used by server. Can be either 'Kerberos' or 'Negotiate'. @@ -111,7 +111,7 @@ mysql --plugin-dir=/path/to/plugin-dir -u usr1 to allow non-domain environment (e.g if server does not run in domain environment). -#Implementation +## Implementation Overview of the protocol between client and server diff --git a/plugin/type_mysql_timestamp/plugin.cc b/plugin/type_mysql_timestamp/plugin.cc index a524c5c0124..6cbd76d8583 100644 --- a/plugin/type_mysql_timestamp/plugin.cc +++ b/plugin/type_mysql_timestamp/plugin.cc @@ -103,7 +103,19 @@ public: Field_mysql_timestampf(*name, rec, attr->unireg_check, share, attr->temporal_dec(MAX_DATETIME_WIDTH)); } - void Column_definition_implicit_upgrade(Column_definition *c) const override + const Type_handler *type_handler_for_implicit_upgrade() const override + { + /* + The derived method as of 10.11.8 does "return this;" anyway. + However, in the future this may change to return a + opt_mysql56_temporal_format dependent handler. + Here in this class we need to make sure to do "return this;" + not to depend on the derived method changes. + */ + return this; + } + void Column_definition_implicit_upgrade_to_this(Column_definition *old) + const override { /* Suppress the automatic upgrade depending on opt_mysql56_temporal_format, diff --git a/plugin/type_uuid/mysql-test/type_uuid/type_uuid_mariadb101104.result b/plugin/type_uuid/mysql-test/type_uuid/type_uuid_mariadb101104.result new file mode 100644 index 00000000000..b22e78519c6 --- /dev/null +++ b/plugin/type_uuid/mysql-test/type_uuid/type_uuid_mariadb101104.result @@ -0,0 +1,422 @@ +# +# Start of 10.11 tests +# +# +# MDEV-33442 REPAIR TABLE corrupts UUIDs +# +CREATE PROCEDURE show_table(long_version INT) +BEGIN +SHOW CREATE TABLE t1; +SELECT VERSION FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test'; +IF long_version>0 THEN +SELECT * FROM t1 ORDER BY b; +ELSE +SELECT * FROM t1 ORDER BY a DESC LIMIT 5; +END IF; +END; +$$ +# Upgrade a 10.11.4 table using REPAIR +CALL show_table(1); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +00001234-5566-0777-0888-99aabbccddee 0 +10101234-5566-0777-8888-99aabbccddee 1 +00201234-5566-0777-c888-99aabbccddee 2 +10301234-5566-0777-e888-99aabbccddee 3 +00401234-5566-1777-0888-99aabbccddee 4 +10501234-5566-1777-8888-99aabbccddee 5 +00601234-5566-1777-c888-99aabbccddee 6 +10701234-5566-1777-e888-99aabbccddee 7 +00801234-5566-2777-0888-99aabbccddee 8 +10901234-5566-2777-8888-99aabbccddee 9 +01001234-5566-2777-c888-99aabbccddee 10 +11101234-5566-2777-e888-99aabbccddee 11 +01201234-5566-3777-0888-99aabbccddee 12 +11301234-5566-3777-8888-99aabbccddee 13 +01401234-5566-3777-c888-99aabbccddee 14 +11501234-5566-3777-e888-99aabbccddee 15 +01601234-5566-4777-0888-99aabbccddee 16 +11701234-5566-4777-8888-99aabbccddee 17 +01801234-5566-4777-c888-99aabbccddee 18 +11901234-5566-4777-e888-99aabbccddee 19 +02001234-5566-5777-0888-99aabbccddee 20 +12101234-5566-5777-8888-99aabbccddee 21 +02201234-5566-5777-c888-99aabbccddee 22 +12301234-5566-5777-e888-99aabbccddee 23 +02401234-5566-6777-0888-99aabbccddee 24 +12501234-5566-6777-8888-99aabbccddee 25 +02601234-5566-6777-c888-99aabbccddee 26 +12701234-5566-6777-e888-99aabbccddee 27 +02801234-5566-7777-0888-99aabbccddee 28 +12901234-5566-7777-8888-99aabbccddee 29 +03001234-5566-7777-c888-99aabbccddee 30 +13101234-5566-7777-e888-99aabbccddee 31 +03201234-5566-8777-0888-99aabbccddee 32 +13301234-5566-8777-8888-99aabbccddee 33 +03401234-5566-8777-c888-99aabbccddee 34 +13501234-5566-8777-e888-99aabbccddee 35 +03601234-5566-9777-0888-99aabbccddee 36 +13701234-5566-9777-8888-99aabbccddee 37 +03801234-5566-9777-c888-99aabbccddee 38 +13901234-5566-9777-e888-99aabbccddee 39 +04001234-5566-a777-0888-99aabbccddee 40 +14101234-5566-a777-8888-99aabbccddee 41 +04201234-5566-a777-c888-99aabbccddee 42 +14301234-5566-a777-e888-99aabbccddee 43 +04401234-5566-b777-0888-99aabbccddee 44 +14501234-5566-b777-8888-99aabbccddee 45 +04601234-5566-b777-c888-99aabbccddee 46 +14701234-5566-b777-e888-99aabbccddee 47 +04801234-5566-c777-0888-99aabbccddee 48 +14901234-5566-c777-8888-99aabbccddee 49 +05001234-5566-c777-c888-99aabbccddee 50 +15101234-5566-c777-e888-99aabbccddee 51 +05201234-5566-d777-0888-99aabbccddee 52 +15301234-5566-d777-8888-99aabbccddee 53 +05401234-5566-d777-c888-99aabbccddee 54 +15501234-5566-d777-e888-99aabbccddee 55 +05601234-5566-e777-0888-99aabbccddee 56 +15701234-5566-e777-8888-99aabbccddee 57 +05801234-5566-e777-c888-99aabbccddee 58 +15901234-5566-e777-e888-99aabbccddee 59 +06001234-5566-f777-0888-99aabbccddee 60 +16101234-5566-f777-8888-99aabbccddee 61 +06201234-5566-f777-c888-99aabbccddee 62 +16301234-5566-f777-e888-99aabbccddee 63 +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it! +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +16301234-5566-f777-e888-99aabbccddee 63 +15901234-5566-e777-e888-99aabbccddee 59 +15501234-5566-d777-e888-99aabbccddee 55 +15101234-5566-c777-e888-99aabbccddee 51 +14701234-5566-b777-e888-99aabbccddee 47 +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check error Upgrade required. Please do "REPAIR TABLE `t1`" or dump/reload to fix it! +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +16301234-5566-f777-e888-99aabbccddee 63 +15901234-5566-e777-e888-99aabbccddee 59 +15501234-5566-d777-e888-99aabbccddee 55 +15101234-5566-c777-e888-99aabbccddee 51 +14701234-5566-b777-e888-99aabbccddee 47 +REPAIR TABLE t1; +Table Op Msg_type Msg_text +test.t1 repair Warning Incorrect uuid value: '03201234-5566-8777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 33 +test.t1 repair Warning Incorrect uuid value: '03601234-5566-9777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 37 +test.t1 repair Warning Incorrect uuid value: '04001234-5566-a777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 41 +test.t1 repair Warning Incorrect uuid value: '04401234-5566-b777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 45 +test.t1 repair Warning Incorrect uuid value: '04801234-5566-c777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 49 +test.t1 repair Warning Incorrect uuid value: '05201234-5566-d777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 53 +test.t1 repair Warning Incorrect uuid value: '05601234-5566-e777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 57 +test.t1 repair Warning Incorrect uuid value: '06001234-5566-f777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 61 +test.t1 repair status OK +CALL show_table(1); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +00001234-5566-0777-0888-99aabbccddee 0 +10101234-5566-0777-8888-99aabbccddee 1 +00201234-5566-0777-c888-99aabbccddee 2 +10301234-5566-0777-e888-99aabbccddee 3 +00401234-5566-1777-0888-99aabbccddee 4 +10501234-5566-1777-8888-99aabbccddee 5 +00601234-5566-1777-c888-99aabbccddee 6 +10701234-5566-1777-e888-99aabbccddee 7 +00801234-5566-2777-0888-99aabbccddee 8 +10901234-5566-2777-8888-99aabbccddee 9 +01001234-5566-2777-c888-99aabbccddee 10 +11101234-5566-2777-e888-99aabbccddee 11 +01201234-5566-3777-0888-99aabbccddee 12 +11301234-5566-3777-8888-99aabbccddee 13 +01401234-5566-3777-c888-99aabbccddee 14 +11501234-5566-3777-e888-99aabbccddee 15 +01601234-5566-4777-0888-99aabbccddee 16 +11701234-5566-4777-8888-99aabbccddee 17 +01801234-5566-4777-c888-99aabbccddee 18 +11901234-5566-4777-e888-99aabbccddee 19 +02001234-5566-5777-0888-99aabbccddee 20 +12101234-5566-5777-8888-99aabbccddee 21 +02201234-5566-5777-c888-99aabbccddee 22 +12301234-5566-5777-e888-99aabbccddee 23 +02401234-5566-6777-0888-99aabbccddee 24 +12501234-5566-6777-8888-99aabbccddee 25 +02601234-5566-6777-c888-99aabbccddee 26 +12701234-5566-6777-e888-99aabbccddee 27 +02801234-5566-7777-0888-99aabbccddee 28 +12901234-5566-7777-8888-99aabbccddee 29 +03001234-5566-7777-c888-99aabbccddee 30 +13101234-5566-7777-e888-99aabbccddee 31 +NULL 32 +13301234-5566-8777-8888-99aabbccddee 33 +03401234-5566-8777-c888-99aabbccddee 34 +13501234-5566-8777-e888-99aabbccddee 35 +NULL 36 +13701234-5566-9777-8888-99aabbccddee 37 +03801234-5566-9777-c888-99aabbccddee 38 +13901234-5566-9777-e888-99aabbccddee 39 +NULL 40 +14101234-5566-a777-8888-99aabbccddee 41 +04201234-5566-a777-c888-99aabbccddee 42 +14301234-5566-a777-e888-99aabbccddee 43 +NULL 44 +14501234-5566-b777-8888-99aabbccddee 45 +04601234-5566-b777-c888-99aabbccddee 46 +14701234-5566-b777-e888-99aabbccddee 47 +NULL 48 +14901234-5566-c777-8888-99aabbccddee 49 +05001234-5566-c777-c888-99aabbccddee 50 +15101234-5566-c777-e888-99aabbccddee 51 +NULL 52 +15301234-5566-d777-8888-99aabbccddee 53 +05401234-5566-d777-c888-99aabbccddee 54 +15501234-5566-d777-e888-99aabbccddee 55 +NULL 56 +15701234-5566-e777-8888-99aabbccddee 57 +05801234-5566-e777-c888-99aabbccddee 58 +15901234-5566-e777-e888-99aabbccddee 59 +NULL 60 +16101234-5566-f777-8888-99aabbccddee 61 +06201234-5566-f777-c888-99aabbccddee 62 +16301234-5566-f777-e888-99aabbccddee 63 +CHECK TABLE t1 FOR UPGRADE; +Table Op Msg_type Msg_text +test.t1 check status OK +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +12301234-5566-5777-e888-99aabbccddee 23 +11901234-5566-4777-e888-99aabbccddee 19 +11501234-5566-3777-e888-99aabbccddee 15 +11101234-5566-2777-e888-99aabbccddee 11 +10701234-5566-1777-e888-99aabbccddee 7 +DROP TABLE t1; +# Upgrade a 10.11.4 table using ALTER, adding a table COMMENT +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +16301234-5566-f777-e888-99aabbccddee 63 +15901234-5566-e777-e888-99aabbccddee 59 +15501234-5566-d777-e888-99aabbccddee 55 +15101234-5566-c777-e888-99aabbccddee 51 +14701234-5566-b777-e888-99aabbccddee 47 +# ALTER..INPLACE should fail - the old column 'b UUID' needs upgrade +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test10'; +ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY +ALTER IGNORE TABLE t1 COMMENT 'test11'; +Warnings: +Warning 1292 Incorrect uuid value: '03201234-5566-8777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 33 +Warning 1292 Incorrect uuid value: '03601234-5566-9777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 37 +Warning 1292 Incorrect uuid value: '04001234-5566-a777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 41 +Warning 1292 Incorrect uuid value: '04401234-5566-b777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 45 +Warning 1292 Incorrect uuid value: '04801234-5566-c777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 49 +Warning 1292 Incorrect uuid value: '05201234-5566-d777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 53 +Warning 1292 Incorrect uuid value: '05601234-5566-e777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 57 +Warning 1292 Incorrect uuid value: '06001234-5566-f777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 61 +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci COMMENT='test11' +VERSION +10 +a b +12301234-5566-5777-e888-99aabbccddee 23 +11901234-5566-4777-e888-99aabbccddee 19 +11501234-5566-3777-e888-99aabbccddee 15 +11101234-5566-2777-e888-99aabbccddee 11 +10701234-5566-1777-e888-99aabbccddee 7 +# Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test12'; +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci COMMENT='test12' +VERSION +10 +a b +12301234-5566-5777-e888-99aabbccddee 23 +11901234-5566-4777-e888-99aabbccddee 19 +11501234-5566-3777-e888-99aabbccddee 15 +11101234-5566-2777-e888-99aabbccddee 11 +10701234-5566-1777-e888-99aabbccddee 7 +DROP TABLE t1; +# Upgrade a 10.11.4 table using ALTER, adding a DEFAULT for 'b INT' +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +16301234-5566-f777-e888-99aabbccddee 63 +15901234-5566-e777-e888-99aabbccddee 59 +15501234-5566-d777-e888-99aabbccddee 55 +15101234-5566-c777-e888-99aabbccddee 51 +14701234-5566-b777-e888-99aabbccddee 47 +# ALTER..INPLACE should fail - the old column 'b UUID' needs upgrade +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY b INT NOT NULL DEFAULT 10; +ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY +ALTER IGNORE TABLE t1 MODIFY b INT NOT NULL DEFAULT 11; +Warnings: +Warning 1292 Incorrect uuid value: '03201234-5566-8777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 33 +Warning 1292 Incorrect uuid value: '03601234-5566-9777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 37 +Warning 1292 Incorrect uuid value: '04001234-5566-a777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 41 +Warning 1292 Incorrect uuid value: '04401234-5566-b777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 45 +Warning 1292 Incorrect uuid value: '04801234-5566-c777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 49 +Warning 1292 Incorrect uuid value: '05201234-5566-d777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 53 +Warning 1292 Incorrect uuid value: '05601234-5566-e777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 57 +Warning 1292 Incorrect uuid value: '06001234-5566-f777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 61 +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL DEFAULT 11, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +12301234-5566-5777-e888-99aabbccddee 23 +11901234-5566-4777-e888-99aabbccddee 19 +11501234-5566-3777-e888-99aabbccddee 15 +11101234-5566-2777-e888-99aabbccddee 11 +10701234-5566-1777-e888-99aabbccddee 7 +# Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY b INT NOT NULL DEFAULT 12; +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL DEFAULT 12, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +12301234-5566-5777-e888-99aabbccddee 23 +11901234-5566-4777-e888-99aabbccddee 19 +11501234-5566-3777-e888-99aabbccddee 15 +11101234-5566-2777-e888-99aabbccddee 11 +10701234-5566-1777-e888-99aabbccddee 7 +DROP TABLE t1; +# Upgrade a 10.11.4 table using ALTER, adding a DEFAULT for 'a UUID' +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT NULL, + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +16301234-5566-f777-e888-99aabbccddee 63 +15901234-5566-e777-e888-99aabbccddee 59 +15501234-5566-d777-e888-99aabbccddee 55 +15101234-5566-c777-e888-99aabbccddee 51 +14701234-5566-b777-e888-99aabbccddee 47 +# ALTER..INPLACE should fail - the old column 'b UUID' needs upgrade +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY a UUID DEFAULT '16301234-5566-f777-e888-99aabbccdd00'; +ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY +ALTER IGNORE TABLE t1 MODIFY a UUID DEFAULT '16301234-5566-f777-e888-99aabbccdd01'; +Warnings: +Warning 1292 Incorrect uuid value: '03201234-5566-8777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 33 +Warning 1292 Incorrect uuid value: '03601234-5566-9777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 37 +Warning 1292 Incorrect uuid value: '04001234-5566-a777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 41 +Warning 1292 Incorrect uuid value: '04401234-5566-b777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 45 +Warning 1292 Incorrect uuid value: '04801234-5566-c777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 49 +Warning 1292 Incorrect uuid value: '05201234-5566-d777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 53 +Warning 1292 Incorrect uuid value: '05601234-5566-e777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 57 +Warning 1292 Incorrect uuid value: '06001234-5566-f777-0888-99aabbccddee' for column `test`.`t1`.`a` at row 61 +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT '16301234-5566-f777-e888-99aabbccdd01', + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +12301234-5566-5777-e888-99aabbccddee 23 +11901234-5566-4777-e888-99aabbccddee 19 +11501234-5566-3777-e888-99aabbccddee 15 +11101234-5566-2777-e888-99aabbccddee 11 +10701234-5566-1777-e888-99aabbccddee 7 +# Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 MODIFY a UUID DEFAULT '16301234-5566-f777-e888-99aabbccdd02'; +CALL show_table(0); +Table Create Table +t1 CREATE TABLE `t1` ( + `a` uuid DEFAULT '16301234-5566-f777-e888-99aabbccdd02', + `b` int(11) NOT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +VERSION +10 +a b +12301234-5566-5777-e888-99aabbccddee 23 +11901234-5566-4777-e888-99aabbccddee 19 +11501234-5566-3777-e888-99aabbccddee 15 +11101234-5566-2777-e888-99aabbccddee 11 +10701234-5566-1777-e888-99aabbccddee 7 +DROP TABLE t1; +DROP PROCEDURE show_table; +# +# End of 10.11 tests +# diff --git a/plugin/type_uuid/mysql-test/type_uuid/type_uuid_mariadb101104.test b/plugin/type_uuid/mysql-test/type_uuid/type_uuid_mariadb101104.test new file mode 100644 index 00000000000..6181f6efd94 --- /dev/null +++ b/plugin/type_uuid/mysql-test/type_uuid/type_uuid_mariadb101104.test @@ -0,0 +1,110 @@ +let $datadir= `select @@datadir`; + +--echo # +--echo # Start of 10.11 tests +--echo # + +--echo # +--echo # MDEV-33442 REPAIR TABLE corrupts UUIDs +--echo # + +DELIMITER $$; +CREATE PROCEDURE show_table(long_version INT) +BEGIN + SHOW CREATE TABLE t1; + SELECT VERSION FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME='t1' AND TABLE_SCHEMA='test'; + IF long_version>0 THEN + SELECT * FROM t1 ORDER BY b; + ELSE + SELECT * FROM t1 ORDER BY a DESC LIMIT 5; + END IF; +END; +$$ +DELIMITER ;$$ + + +--echo # Upgrade a 10.11.4 table using REPAIR + +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.frm $datadir/test/t1.frm +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.MYI $datadir/test/t1.MYI +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.MYD $datadir/test/t1.MYD +CALL show_table(1); + +CHECK TABLE t1 FOR UPGRADE; +CALL show_table(0); + +CHECK TABLE t1 FOR UPGRADE; +CALL show_table(0); + +REPAIR TABLE t1; +CALL show_table(1); + +CHECK TABLE t1 FOR UPGRADE; +CALL show_table(0); + +DROP TABLE t1; + +--echo # Upgrade a 10.11.4 table using ALTER, adding a table COMMENT + +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.frm $datadir/test/t1.frm +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.MYI $datadir/test/t1.MYI +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.MYD $datadir/test/t1.MYD +CALL show_table(0); + +--echo # ALTER..INPLACE should fail - the old column 'b UUID' needs upgrade +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test10'; +ALTER IGNORE TABLE t1 COMMENT 'test11'; +CALL show_table(0); + +--echo # Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, COMMENT 'test12'; +CALL show_table(0); + +DROP TABLE t1; + + +--echo # Upgrade a 10.11.4 table using ALTER, adding a DEFAULT for 'b INT' + +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.frm $datadir/test/t1.frm +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.MYI $datadir/test/t1.MYI +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.MYD $datadir/test/t1.MYD +CALL show_table(0); + +--echo # ALTER..INPLACE should fail - the old column 'b UUID' needs upgrade +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY b INT NOT NULL DEFAULT 10; +ALTER IGNORE TABLE t1 MODIFY b INT NOT NULL DEFAULT 11; +CALL show_table(0); + +--echo # Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY b INT NOT NULL DEFAULT 12; +CALL show_table(0); + +DROP TABLE t1; + + +--echo # Upgrade a 10.11.4 table using ALTER, adding a DEFAULT for 'a UUID' + +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.frm $datadir/test/t1.frm +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.MYI $datadir/test/t1.MYI +--copy_file $MTR_SUITE_DIR/std_data/mdev-29959.MYD $datadir/test/t1.MYD +CALL show_table(0); + +--echo # ALTER..INPLACE should fail - the old column 'b UUID' needs upgrade +--error ER_ALTER_OPERATION_NOT_SUPPORTED +ALTER IGNORE TABLE t1 ALGORITHM=INPLACE, MODIFY a UUID DEFAULT '16301234-5566-f777-e888-99aabbccdd00'; +ALTER IGNORE TABLE t1 MODIFY a UUID DEFAULT '16301234-5566-f777-e888-99aabbccdd01'; +CALL show_table(0); + +--echo # Now ALTER..INPLACE should work +ALTER IGNORE TABLE t1 MODIFY a UUID DEFAULT '16301234-5566-f777-e888-99aabbccdd02'; +CALL show_table(0); + +DROP TABLE t1; + +DROP PROCEDURE show_table; + +--echo # +--echo # End of 10.11 tests +--echo # diff --git a/plugin/type_uuid/plugin.cc b/plugin/type_uuid/plugin.cc index 499019e948c..a1bde5411e6 100644 --- a/plugin/type_uuid/plugin.cc +++ b/plugin/type_uuid/plugin.cc @@ -98,6 +98,14 @@ const Type_handler *Type_collection_uuid::find_in_array(const Type_handler *a, return NULL; } + +const Type_handler *Type_collection_uuid::type_handler_for_implicit_upgrade( + const Type_handler *from) const +{ + return Type_handler_uuid_new::singleton(); +} + + /*************************************************************************/ class Create_func_uuid : public Create_func_arg0 diff --git a/plugin/type_uuid/sql_type_uuid.h b/plugin/type_uuid/sql_type_uuid.h index 67d7471d4d2..05d6cef63c2 100644 --- a/plugin/type_uuid/sql_type_uuid.h +++ b/plugin/type_uuid/sql_type_uuid.h @@ -316,6 +316,9 @@ public: const override { return NULL; } + const Type_handler *type_handler_for_implicit_upgrade( + const Type_handler *from) const; + static Type_collection_uuid *singleton() { static Type_collection_uuid tc; diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql index 64b92fb445e..73b72f6c822 100644 --- a/scripts/mysql_system_tables_fix.sql +++ b/scripts/mysql_system_tables_fix.sql @@ -773,19 +773,22 @@ if @have_innodb then end if // DELIMITER ; -# MDEV-4332 longer user names +# MDEV-4332 longer user names, extended by MDEV-24312 to longer again. alter table user modify User char(128) binary not null default ''; alter table db modify User char(128) binary not null default ''; alter table tables_priv modify User char(128) binary not null default ''; alter table columns_priv modify User char(128) binary not null default ''; -alter table procs_priv modify User char(128) binary not null default ''; +alter table procs_priv modify User char(128) binary not null default '', modify Host char(255) binary DEFAULT ''; alter table proc modify definer varchar(384) collate utf8mb3_bin not null default ''; -alter table proxies_priv modify User char(128) COLLATE utf8mb3_bin not null default ''; +alter table proxies_priv modify User char(128) COLLATE utf8mb3_bin not null default '', modify Host char(255) binary DEFAULT ''; alter table proxies_priv modify Proxied_user char(128) COLLATE utf8mb3_bin not null default ''; alter table proxies_priv modify Grantor varchar(384) COLLATE utf8mb3_bin not null default ''; alter table servers modify Username char(128) not null default ''; alter table procs_priv modify Grantor varchar(384) COLLATE utf8mb3_bin not null default ''; alter table tables_priv modify Grantor varchar(384) COLLATE utf8mb3_bin not null default ''; +# MDEV-33726 longer names from MDEV-24312 extension +alter table if exists global_priv modify Host char(255) binary DEFAULT '', modify User char(128) binary not null default ''; +alter table if exists roles_mapping modify Host char(255) binary not null DEFAULT '', modify User char(128) binary not null default ''; # Activate the new, possible modified privilege tables # This should not be needed, but gives us some extra testing that the above diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh index 53172da7b96..bfd3b1d7b4e 100644 --- a/scripts/wsrep_sst_mariabackup.sh +++ b/scripts/wsrep_sst_mariabackup.sh @@ -1167,12 +1167,6 @@ if [ "$WSREP_SST_OPT_ROLE" = 'donor' ]; then iopts="--databases-exclude='lost+found'${iopts:+ }$iopts" - if [ ${FORCE_FTWRL:-0} -eq 1 ]; then - wsrep_log_info "Forcing FTWRL due to environment variable" \ - "FORCE_FTWRL equal to $FORCE_FTWRL" - iopts="--no-backup-locks${iopts:+ }$iopts" - fi - # if compression is enabled for backup files, then add the # appropriate options to the mariadb-backup command line: if [ "$compress" != 'none' ]; then diff --git a/sql/backup.cc b/sql/backup.cc index 5ce770c3c4c..f634a11f867 100644 --- a/sql/backup.cc +++ b/sql/backup.cc @@ -39,6 +39,7 @@ #ifdef WITH_WSREP #include "wsrep_server_state.h" #include "wsrep_mysqld.h" +#include "wsrep_sst.h" #endif /* WITH_WSREP */ static const char *stage_names[]= @@ -293,29 +294,40 @@ static bool backup_block_ddl(THD *thd) #ifdef WITH_WSREP DBUG_ASSERT(thd->wsrep_desynced_backup_stage == false); - /* - if user is specifically choosing to allow BF aborting for BACKUP STAGE BLOCK_DDL lock - holder, then do not desync and pause the node from cluster replication. - e.g. mariabackup uses BACKUP STATE BLOCK_DDL; and will be abortable by this. - But, If node is processing as SST donor or WSREP_MODE_BF_MARIABACKUP mode is not set, - we desync the node for BACKUP STAGE because applier threads - bypass backup MDL locks (see MDL_lock::can_grant_lock) - */ if (WSREP_NNULL(thd)) { Wsrep_server_state &server_state= Wsrep_server_state::instance(); - if (!wsrep_check_mode(WSREP_MODE_BF_MARIABACKUP) || - server_state.state() == Wsrep_server_state::s_donor) + /* + If user is specifically choosing to allow BF aborting for + BACKUP STAGE BLOCK_DDL lock holder, then do not desync and + pause the node from cluster replication. e.g. mariabackup + uses BACKUP STATE BLOCK_DDL; and will be abortable by this. + */ + bool mariabackup= (server_state.state() == Wsrep_server_state::s_donor + && !strcmp(wsrep_sst_method, "mariabackup")); + bool allow_bf= wsrep_check_mode(WSREP_MODE_BF_MARIABACKUP); + bool pause_and_desync= true; + + if ((allow_bf) || (mariabackup)) { - if (server_state.desync_and_pause().is_undefined()) { + pause_and_desync= false; + } + + if (pause_and_desync) + { + if (server_state.desync_and_pause().is_undefined()) DBUG_RETURN(1); - } + + WSREP_INFO("Server desynched from group during BACKUP STAGE BLOCK_DDL."); DEBUG_SYNC(thd, "wsrep_backup_stage_after_desync_and_pause"); thd->wsrep_desynced_backup_stage= true; } else - WSREP_INFO("Server not desynched from group because WSREP_MODE_BF_MARIABACKUP used."); + { + WSREP_INFO("Server not desynched from group at BLOCK_DDL because %s is used.", + allow_bf ? "WSREP_MODE_BF_MARIABACKUP" : wsrep_sst_method); + } } #endif /* WITH_WSREP */ @@ -399,6 +411,28 @@ static bool backup_block_commit(THD *thd) } thd->clear_error(); +#ifdef WITH_WSREP + if (WSREP_NNULL(thd) && !thd->wsrep_desynced_backup_stage) + { + Wsrep_server_state &server_state= Wsrep_server_state::instance(); + bool mariabackup= (server_state.state() == Wsrep_server_state::s_donor + && !strcmp(wsrep_sst_method, "mariabackup")); + + /* If this node is donor and mariabackup is not used + we desync and pause provider here if it is not yet done. + */ + if (!mariabackup) + { + if (server_state.desync_and_pause().is_undefined()) + DBUG_RETURN(1); + + WSREP_INFO("Server desynched from group during BACKUP STAGE BLOCK_COMMIT."); + thd->wsrep_desynced_backup_stage= true; + DEBUG_SYNC(thd, "wsrep_backup_stage_commit_after_desync_and_pause"); + } + } +#endif /* WITH_WSREP */ + DBUG_RETURN(0); } diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc index 362463a7f22..9d95b3c70fc 100644 --- a/sql/debug_sync.cc +++ b/sql/debug_sync.cc @@ -1152,7 +1152,7 @@ static bool debug_sync_eval_action(THD *thd, char *action_str, char *action_end) st_debug_sync_action *action= NULL; const char *errmsg; char *ptr; - char *token; + char *token= nullptr; uint token_length= 0; DBUG_ENTER("debug_sync_eval_action"); DBUG_ASSERT(thd); diff --git a/sql/field.h b/sql/field.h index d57a471a694..16cf689db9c 100644 --- a/sql/field.h +++ b/sql/field.h @@ -5754,7 +5754,8 @@ public: { List_iterator it(list); while (Create_field *f= it++) - f->type_handler()->Column_definition_implicit_upgrade(f); + f->type_handler()->type_handler_for_implicit_upgrade()-> + Column_definition_implicit_upgrade_to_this(f); } }; diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index f2545892d1b..030aaabfeac 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -398,6 +398,7 @@ void ha_partition::init_handler_variables() m_start_key.length= 0; m_myisam= FALSE; m_innodb= FALSE; + m_myisammrg= FALSE; m_extra_cache= FALSE; m_extra_cache_size= 0; m_extra_prepare_for_update= FALSE; @@ -3040,6 +3041,10 @@ bool ha_partition::create_handlers(MEM_ROOT *mem_root) DBUG_PRINT("info", ("InnoDB")); m_innodb= TRUE; } + else if (ha_legacy_type(hton0) == DB_TYPE_MRG_MYISAM) + { + m_myisammrg= TRUE; + } DBUG_RETURN(FALSE); } @@ -9316,8 +9321,9 @@ int ha_partition::extra(enum ha_extra_function operation) switch (operation) { /* Category 1), used by most handlers */ case HA_EXTRA_NO_KEYREAD: - DBUG_RETURN(loop_partitions(end_keyread_cb, NULL)); + DBUG_RETURN(loop_read_partitions(end_keyread_cb, NULL)); case HA_EXTRA_KEYREAD: + DBUG_RETURN(loop_read_partitions(extra_cb, &operation)); case HA_EXTRA_FLUSH: case HA_EXTRA_PREPARE_FOR_FORCED_CLOSE: DBUG_RETURN(loop_partitions(extra_cb, &operation)); @@ -9426,9 +9432,14 @@ int ha_partition::extra(enum ha_extra_function operation) } /* Category 9) Operations only used by MERGE */ case HA_EXTRA_ADD_CHILDREN_LIST: + if (!m_myisammrg) + DBUG_RETURN(0); DBUG_RETURN(loop_partitions(extra_cb, &operation)); case HA_EXTRA_ATTACH_CHILDREN: { + if (!m_myisammrg) + DBUG_RETURN(0); + int result; uint num_locks; handler **file; @@ -9447,8 +9458,9 @@ int ha_partition::extra(enum ha_extra_function operation) break; } case HA_EXTRA_IS_ATTACHED_CHILDREN: - DBUG_RETURN(loop_partitions(extra_cb, &operation)); case HA_EXTRA_DETACH_CHILDREN: + if (!m_myisammrg) + DBUG_RETURN(0); DBUG_RETURN(loop_partitions(extra_cb, &operation)); case HA_EXTRA_MARK_AS_LOG_TABLE: /* @@ -9533,7 +9545,7 @@ int ha_partition::extra_opt(enum ha_extra_function operation, ulong arg) switch (operation) { case HA_EXTRA_KEYREAD: - DBUG_RETURN(loop_partitions(start_keyread_cb, &arg)); + DBUG_RETURN(loop_read_partitions(start_keyread_cb, &arg)); case HA_EXTRA_CACHE: prepare_extra_cache(arg); DBUG_RETURN(0); @@ -9621,14 +9633,53 @@ int ha_partition::loop_extra_alter(enum ha_extra_function operation) */ int ha_partition::loop_partitions(handler_callback callback, void *param) +{ + int result= loop_partitions_over_map(&m_part_info->lock_partitions, + callback, param); + /* Add all used partitions to be called in reset(). */ + bitmap_union(&m_partitions_to_reset, &m_part_info->lock_partitions); + return result; +} + + +/* + Call callback(part, param) on read_partitions (the ones used by the query) +*/ + +int ha_partition::loop_read_partitions(handler_callback callback, void *param) +{ + /* + There is no need to record partitions on m_partitions_to_reset as + read_partitions were opened, etc - they will be reset anyway. + */ + return loop_partitions_over_map(&m_part_info->read_partitions, callback, + param); +} + + +/** + Call callback(part, param) on specified set of partitions + + @part_map The set of partitions to call callback for + @param callback a callback to call for each partition + @param param a void*-parameter passed to callback + + @return Operation status + @retval >0 Error code + @retval 0 Success +*/ + +int ha_partition::loop_partitions_over_map(const MY_BITMAP *part_map, + handler_callback callback, + void *param) { int result= 0, tmp; uint i; - DBUG_ENTER("ha_partition::loop_partitions"); + DBUG_ENTER("ha_partition::loop_partitions_over_map"); - for (i= bitmap_get_first_set(&m_part_info->lock_partitions); + for (i= bitmap_get_first_set(part_map); i < m_tot_parts; - i= bitmap_get_next_set(&m_part_info->lock_partitions, i)) + i= bitmap_get_next_set(part_map, i)) { /* This can be called after an error in ha_open. @@ -9638,8 +9689,6 @@ int ha_partition::loop_partitions(handler_callback callback, void *param) (tmp= callback(m_file[i], param))) result= tmp; } - /* Add all used partitions to be called in reset(). */ - bitmap_union(&m_partitions_to_reset, &m_part_info->lock_partitions); DBUG_RETURN(result); } diff --git a/sql/ha_partition.h b/sql/ha_partition.h index d450f96f4f9..f43dbc8e423 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -399,6 +399,7 @@ private: */ bool m_innodb; // Are all underlying handlers // InnoDB + bool m_myisammrg; // Are any of the handlers of type MERGE /* When calling extra(HA_EXTRA_CACHE) we do not pass this to the underlying handlers immediately. Instead we cache it and call the underlying @@ -988,6 +989,10 @@ private: handler *file, uint *n); static const uint NO_CURRENT_PART_ID= NOT_A_PARTITION_ID; int loop_partitions(handler_callback callback, void *param); + int loop_partitions_over_map(const MY_BITMAP *map, + handler_callback callback, + void *param); + int loop_read_partitions(handler_callback callback, void *param); int loop_extra_alter(enum ha_extra_function operations); void late_extra_cache(uint partition_id); void late_extra_no_cache(uint partition_id); diff --git a/sql/handler.cc b/sql/handler.cc index 7482258ed30..47b0720aff1 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -3515,6 +3515,17 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode, DBUG_ASSERT(alloc_root_inited(&table->mem_root)); set_partitions_to_open(partitions_to_open); + internal_tmp_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE); + + if (!internal_tmp_table && (test_if_locked & HA_OPEN_TMP_TABLE) && + current_thd->slave_thread) + { + /* + This is a temporary table used by replication that is not attached + to a THD. Mark it as a global temporary table. + */ + test_if_locked|= HA_OPEN_GLOBAL_TMP_TABLE; + } if (unlikely((error=open(name,mode,test_if_locked)))) { @@ -3574,7 +3585,6 @@ int handler::ha_open(TABLE *table_arg, const char *name, int mode, /* Copy current optimizer costs. Needed in case clone() is used */ reset_statistics(); } - internal_tmp_table= MY_TEST(test_if_locked & HA_OPEN_INTERNAL_TABLE); DBUG_RETURN(error); } @@ -4929,7 +4939,8 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt) KEY *keyinfo, *keyend; KEY_PART_INFO *keypart, *keypartend; - if (table->s->incompatible_version) + if (table->s->incompatible_version || + check_old_types()) return HA_ADMIN_NEEDS_ALTER; if (!table->s->mysql_version) @@ -4955,6 +4966,12 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt) } } } + + /* + True VARCHAR appeared in MySQL-5.0.3. + If the FRM is older than 5.0.3, force alter even if the check_old_type() + call above did not find data types that want upgrade. + */ if (table->s->frm_version < FRM_VER_TRUE_VARCHAR) return HA_ADMIN_NEEDS_ALTER; @@ -4968,26 +4985,15 @@ int handler::ha_check_for_upgrade(HA_CHECK_OPT *check_opt) } -int handler::check_old_types() +bool handler::check_old_types() const { - Field** field; - - if (!table->s->mysql_version) + for (Field **field= table->field; (*field); field++) { - /* check for bad DECIMAL field */ - for (field= table->field; (*field); field++) - { - if ((*field)->type() == MYSQL_TYPE_NEWDECIMAL) - { - return HA_ADMIN_NEEDS_ALTER; - } - if ((*field)->type() == MYSQL_TYPE_VAR_STRING) - { - return HA_ADMIN_NEEDS_ALTER; - } - } + const Type_handler *th= (*field)->type_handler(); + if (th != th->type_handler_for_implicit_upgrade()) + return true; } - return 0; + return false; } @@ -5188,8 +5194,6 @@ int handler::ha_check(THD *thd, HA_CHECK_OPT *check_opt) if (table->s->mysql_version < MYSQL_VERSION_ID) { - if (unlikely((error= check_old_types()))) - return error; error= ha_check_for_upgrade(check_opt); if (unlikely(error && (error != HA_ADMIN_NEEDS_CHECK))) return error; @@ -5693,6 +5697,9 @@ handler::ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info_arg) { DBUG_ASSERT(m_lock_type == F_UNLCK); mark_trx_read_write(); + if ((info_arg->options & HA_LEX_CREATE_TMP_TABLE) && + current_thd->slave_thread) + info_arg->options|= HA_LEX_CREATE_GLOBAL_TMP_TABLE; int error= create(name, form, info_arg); if (!error && !(info_arg->options & (HA_LEX_CREATE_TMP_TABLE | HA_CREATE_TMP_ALTER))) diff --git a/sql/handler.h b/sql/handler.h index 44a38cf7507..9497d6f736a 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -497,6 +497,12 @@ enum chf_create_flags { #define HA_LEX_CREATE_SEQUENCE 16U #define HA_VERSIONED_TABLE 32U #define HA_SKIP_KEY_SORT 64U +/* + A temporary table that can be used by different threads, eg. replication + threads. This flag ensure that memory is not allocated with THREAD_SPECIFIC, + as we do for other temporary tables. +*/ +#define HA_LEX_CREATE_GLOBAL_TMP_TABLE 128U #define HA_MAX_REC_LENGTH 65535 @@ -3388,6 +3394,7 @@ public: */ MEM_UNDEFINED(&optimizer_where_cost, sizeof(optimizer_where_cost)); MEM_UNDEFINED(&optimizer_scan_setup_cost, sizeof(optimizer_scan_setup_cost)); + active_handler_stats.active= 0; } virtual ~handler(void) { @@ -4358,7 +4365,6 @@ public: } virtual void update_create_info(HA_CREATE_INFO *create_info) {} - int check_old_types(); virtual int assign_to_keycache(THD* thd, HA_CHECK_OPT* check_opt) { return HA_ADMIN_NOT_IMPLEMENTED; } virtual int preload_keys(THD* thd, HA_CHECK_OPT* check_opt) @@ -5052,9 +5058,12 @@ public: } inline void ha_handler_stats_disable() { - handler_stats= 0; - active_handler_stats.active= 0; - handler_stats_updated(); + if (handler_stats) + { + handler_stats= 0; + active_handler_stats.active= 0; + handler_stats_updated(); + } } private: @@ -5068,6 +5077,7 @@ private: } } + bool check_old_types() const; void mark_trx_read_write_internal(); bool check_table_binlog_row_based_internal(); diff --git a/sql/item.cc b/sql/item.cc index b6041effe4a..79343366321 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -27,6 +27,7 @@ #include "sp_rcontext.h" #include "sp_head.h" #include "sql_trigger.h" +#include "sql_parse.h" #include "sql_select.h" #include "sql_show.h" // append_identifier #include "sql_view.h" // VIEW_ANY_SQL @@ -495,7 +496,10 @@ void Item::print_parenthesised(String *str, enum_query_type query_type, bool need_parens= precedence() < parent_prec; if (need_parens) str->append('('); - print(str, query_type); + if (check_stack_overrun(current_thd, STACK_MIN_SIZE, NULL)) + str->append(STRING_WITH_LEN("")); + else + print(str, query_type); if (need_parens) str->append(')'); } @@ -5186,9 +5190,19 @@ bool Item_param::assign_default(Field *field) } if (m_default_field->default_value) - m_default_field->set_default(); - - return field_conv(field, m_default_field); + { + return m_default_field->default_value->expr->save_in_field(field, 0); + } + else if (m_default_field->is_null()) + { + field->set_null(); + return false; + } + else + { + field->set_notnull(); + return field_conv(field, m_default_field); + } } @@ -7017,6 +7031,7 @@ Item_basic_constant * Item_string::make_string_literal_concat(THD *thd, const LEX_CSTRING *str) { append(str->str, (uint32) str->length); + set_name(thd, &str_value); if (!(collation.repertoire & MY_REPERTOIRE_EXTENDED)) { // If the string has been pure ASCII so far, check the new part. diff --git a/sql/item.h b/sql/item.h index e7c8569694d..71e5a80843b 100644 --- a/sql/item.h +++ b/sql/item.h @@ -4149,6 +4149,12 @@ public: Item_param(THD *thd, const LEX_CSTRING *name_arg, uint pos_in_query_arg, uint len_in_query_arg); + void cleanup() override + { + m_default_field= NULL; + Item::cleanup(); + } + Type type() const override { // Don't pretend to be a constant unless value for this item is set. @@ -6084,6 +6090,8 @@ class Item_direct_view_ref :public Item_direct_ref if (!view->is_inner_table_of_outer_join() || !(null_ref_table= view->get_real_join_table())) null_ref_table= NO_NULL_TABLE; + if (null_ref_table && null_ref_table != NO_NULL_TABLE) + set_maybe_null(); } bool check_null_ref() diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index c1654bfd874..64419605a6f 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -6136,8 +6136,8 @@ bool Regexp_processor_pcre::compile(String *pattern, bool send_error) if (!stringcmp(pattern, &m_prev_pattern)) return false; cleanup(); - m_prev_pattern.copy(*pattern); } + m_prev_pattern.copy(*pattern); if (!(pattern= convert_if_needed(pattern, &pattern_converter))) return true; diff --git a/sql/item_func.cc b/sql/item_func.cc index 462055e806f..0d798847a5a 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -1917,6 +1917,18 @@ void Item_func_abs::fix_length_and_dec_int() set_handler(type_handler_long_or_longlong()); } +void Item_func_abs::fix_length_and_dec_sint_ge0() +{ + /* + We're converting slong_ge0 to slong/slonglong. + Add one character for the sign into max_length. + */ + max_length= args[0]->decimal_precision() + 1/*sign*/; + DBUG_ASSERT(!args[0]->unsigned_flag); + unsigned_flag= false; + set_handler(type_handler_long_or_longlong()); +} + void Item_func_abs::fix_length_and_dec_double() { @@ -2594,6 +2606,22 @@ void Item_func_round::fix_arg_int(const Type_handler *preferred, } +void Item_func_round::fix_arg_slong_ge0() +{ + DBUG_ASSERT(!args[0]->unsigned_flag); + DBUG_ASSERT(args[0]->decimals == 0); + Type_std_attributes::set(args[0]); + /* + We're converting the data type from slong_ge0 to slong/slonglong. + Add one character for the sign, + to change max_length notation from "max_length digits" to + "max_length-1 digits and the sign". + */ + max_length+= 1/*sign*/ + test_if_length_can_increase(); + set_handler(type_handler_long_or_longlong()); +} + + void Item_func_round::fix_arg_hex_hybrid() { DBUG_ASSERT(args[0]->decimals == 0); @@ -4794,7 +4822,9 @@ Item_func_set_user_var::fix_length_and_dec(THD *thd) if (args[0]->collation.derivation == DERIVATION_NUMERIC) { collation.set(DERIVATION_NUMERIC); - fix_length_and_charset(args[0]->max_char_length(), &my_charset_numeric); + uint sign_length= args[0]->type_handler() == &type_handler_slong_ge0 ? 1: 0; + fix_length_and_charset(args[0]->max_char_length() + sign_length, + &my_charset_numeric); } else { diff --git a/sql/item_func.h b/sql/item_func.h index 1f185eff869..0f34f3f6c0e 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -89,7 +89,7 @@ public: static void wrong_param_count_error(const LEX_CSTRING &schema_name, const LEX_CSTRING &func_name); - table_map not_null_tables_cache; + table_map not_null_tables_cache= 0; enum Functype { UNKNOWN_FUNC,EQ_FUNC,EQUAL_FUNC,NE_FUNC,LT_FUNC,LE_FUNC, GE_FUNC,GT_FUNC,FT_FUNC, @@ -1263,6 +1263,24 @@ public: }; +class Item_long_ge0_func: public Item_int_func +{ +public: + Item_long_ge0_func(THD *thd): Item_int_func(thd) { } + Item_long_ge0_func(THD *thd, Item *a): Item_int_func(thd, a) {} + Item_long_ge0_func(THD *thd, Item *a, Item *b): Item_int_func(thd, a, b) {} + Item_long_ge0_func(THD *thd, Item *a, Item *b, Item *c): Item_int_func(thd, a, b, c) {} + Item_long_ge0_func(THD *thd, List &list): Item_int_func(thd, list) { } + Item_long_ge0_func(THD *thd, Item_long_ge0_func *item) :Item_int_func(thd, item) {} + const Type_handler *type_handler() const override + { + DBUG_ASSERT(!unsigned_flag); + return &type_handler_slong_ge0; + } + bool fix_length_and_dec(THD *) override { max_length= 10; return FALSE; } +}; + + class Item_func_hash: public Item_int_func { public: @@ -1407,6 +1425,13 @@ public: { fix_char_length(MAX_BIGINT_WIDTH); } + void fix_length_and_dec_sint_ge0() + { + uint32 digits= args[0]->decimal_precision(); + DBUG_ASSERT(digits > 0); + DBUG_ASSERT(digits <= MY_INT64_NUM_DECIMAL_DIGITS); + fix_char_length(digits + (unsigned_flag ? 0 : 1/*sign*/)); + } void fix_length_and_dec_generic() { uint32 char_length= MY_MIN(args[0]->max_char_length(), @@ -1823,6 +1848,7 @@ public: return name; } void fix_length_and_dec_int(); + void fix_length_and_dec_sint_ge0(); void fix_length_and_dec_double(); void fix_length_and_dec_decimal(); bool fix_length_and_dec(THD *thd) override; @@ -2152,6 +2178,7 @@ public: void fix_arg_int(const Type_handler *preferred, const Type_std_attributes *preferred_attributes, bool use_decimal_on_length_increase); + void fix_arg_slong_ge0(); void fix_arg_hex_hybrid(); void fix_arg_double(); void fix_arg_time(); diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc index 97d7b89f511..f5d321b57e1 100644 --- a/sql/item_jsonfunc.cc +++ b/sql/item_jsonfunc.cc @@ -20,20 +20,14 @@ #include "item.h" #include "sql_parse.h" // For check_stack_overrun -/* - Allocating memory and *also* using it (reading and - writing from it) because some build instructions cause - compiler to optimize out stack_used_up. Since alloca() - here depends on stack_used_up, it doesnt get executed - correctly and causes json_debug_nonembedded to fail - ( --error ER_STACK_OVERRUN_NEED_MORE does not occur). -*/ -#define ALLOCATE_MEM_ON_STACK(A) do \ - { \ - uchar *array= (uchar*)alloca(A); \ - bzero(array, A); \ - my_checksum(0, array, A); \ - } while(0) +#ifndef DBUG_OFF +static int dbug_json_check_min_stack_requirement() +{ + my_error(ER_STACK_OVERRUN_NEED_MORE, MYF(ME_FATAL), + my_thread_stack_size, my_thread_stack_size, STACK_MIN_SIZE); + return 1; +} +#endif /* Compare ASCII string against the string with the specified @@ -152,11 +146,8 @@ int json_path_parts_compare( const json_path_step_t *temp_b= b; DBUG_EXECUTE_IF("json_check_min_stack_requirement", - { - long arbitrary_var; - long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); - ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); - }); + return dbug_json_check_min_stack_requirement();); + if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)) return 1; @@ -845,7 +836,7 @@ bool Item_func_json_unquote::fix_length_and_dec(THD *thd) { collation.set(&my_charset_utf8mb3_general_ci, DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); - max_length= args[0]->max_length; + max_length= args[0]->max_char_length() * collation.collation->mbmaxlen; set_maybe_null(); return FALSE; } @@ -1306,11 +1297,7 @@ static int check_contains(json_engine_t *js, json_engine_t *value) json_engine_t loc_js; bool set_js; DBUG_EXECUTE_IF("json_check_min_stack_requirement", - { - long arbitrary_var; - long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); - ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); - }); + return dbug_json_check_min_stack_requirement();); if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)) return 1; @@ -1912,7 +1899,23 @@ bool Item_func_json_array::fix_length_and_dec(THD *thd) return TRUE; for (n_arg=0 ; n_arg < arg_count ; n_arg++) - char_length+= static_cast(args[n_arg]->max_char_length()) + 4; + { + ulonglong arg_length; + Item *arg= args[n_arg]; + + if (arg->result_type() == STRING_RESULT && + !Type_handler_json_common::is_json_type_handler(arg->type_handler())) + arg_length= arg->max_char_length() * 2; /*escaping possible */ + else if (arg->type_handler()->is_bool_type()) + arg_length= 5; + else + arg_length= arg->max_char_length(); + + if (arg_length < 4) + arg_length= 4; /* can be 'null' */ + + char_length+= arg_length + 4; + } fix_char_length_ulonglong(char_length); tmp_val.set_charset(collation.collation); @@ -2309,13 +2312,8 @@ err_return: static int do_merge(String *str, json_engine_t *je1, json_engine_t *je2) { - DBUG_EXECUTE_IF("json_check_min_stack_requirement", - { - long arbitrary_var; - long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); - ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); - }); + return dbug_json_check_min_stack_requirement();); if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)) return 1; @@ -2654,11 +2652,7 @@ static int do_merge_patch(String *str, json_engine_t *je1, json_engine_t *je2, bool *empty_result) { DBUG_EXECUTE_IF("json_check_min_stack_requirement", - { - long arbitrary_var; - long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); - ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); - }); + return dbug_json_check_min_stack_requirement();); if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)) return 1; @@ -3141,8 +3135,12 @@ bool Item_func_json_insert::fix_length_and_dec(THD *thd) for (n_arg= 1; n_arg < arg_count; n_arg+= 2) { paths[n_arg/2].set_constant_flag(args[n_arg]->const_item()); - char_length+= - static_cast(args[n_arg+1]->max_char_length()) + 4; + /* + In the resulting JSON we can insert the property + name from the path, and the value itself. + */ + char_length+= args[n_arg/2]->max_char_length() + 6; + char_length+= args[n_arg/2+1]->max_char_length() + 4; } fix_char_length_ulonglong(char_length); @@ -3987,7 +3985,20 @@ bool Item_func_json_format::fix_length_and_dec(THD *thd) { decimals= 0; collation.set(args[0]->collation); - max_length= args[0]->max_length; + switch (fmt) + { + case COMPACT: + max_length= args[0]->max_length; + break; + case LOOSE: + max_length= args[0]->max_length * 2; + break; + case DETAILED: + max_length= MAX_BLOB_WIDTH; + break; + default: + DBUG_ASSERT(0); + }; set_maybe_null(); return FALSE; } @@ -4705,11 +4716,7 @@ int json_find_overlap_with_object(json_engine_t *js, json_engine_t *value, int check_overlaps(json_engine_t *js, json_engine_t *value, bool compare_whole) { DBUG_EXECUTE_IF("json_check_min_stack_requirement", - { - long arbitrary_var; - long stack_used_up= (available_stack_size(current_thd->thread_stack, &arbitrary_var)); - ALLOCATE_MEM_ON_STACK(my_thread_stack_size-stack_used_up-STACK_MIN_SIZE); - }); + return dbug_json_check_min_stack_requirement();); if (check_stack_overrun(current_thd, STACK_MIN_SIZE , NULL)) return 1; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 37999967c9b..80ab435f9ac 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -3878,7 +3878,9 @@ bool Item_func_set_collation::fix_length_and_dec(THD *thd) return true; collation.set(cl.collation().charset_info(), DERIVATION_EXPLICIT, args[0]->collation.repertoire); - max_length= args[0]->max_length; + ulonglong max_char_length= (ulonglong) args[0]->max_char_length(); + fix_char_length_ulonglong(max_char_length * collation.collation->mbmaxlen); + return FALSE; } diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 038130d7988..30931e4243b 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -6769,9 +6769,10 @@ exists_complementing_null_row(MY_BITMAP *keys_to_complement) return FALSE; } - return bitmap_exists_intersection((const MY_BITMAP**) null_bitmaps, + return bitmap_exists_intersection(null_bitmaps, count_null_keys, - (uint)highest_min_row, (uint)lowest_max_row); + (uint)highest_min_row, + (uint)lowest_max_row); } diff --git a/sql/item_sum.cc b/sql/item_sum.cc index bcaf229dd15..275451ab271 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -1223,6 +1223,21 @@ bool Item_sum_hybrid::fix_length_and_dec_numeric(const Type_handler *handler) } +bool Item_sum_hybrid::fix_length_and_dec_sint_ge0() +{ + // We don't have Item_field's of "ge0" type handlers. + DBUG_ASSERT(args[0]->real_item()->type() != FIELD_ITEM); + Type_std_attributes::set(args[0]); + /* + We're converting from e.g. slong_ge0 to slonglong + and need to add one extra character for the sign. + */ + max_length++; + set_handler(&type_handler_slonglong); + return false; +} + + /** MAX(str_field) converts ENUM/SET to CHAR, and preserve all other types for Fields. @@ -4061,6 +4076,7 @@ void Item_func_group_concat::cleanup() unique_filter= NULL; } } + row_count= 0; DBUG_ASSERT(tree == 0); } /* @@ -4586,7 +4602,7 @@ void Item_func_group_concat::print(String *str, enum_query_type query_type) if (sum_func() == GROUP_CONCAT_FUNC) { str->append(STRING_WITH_LEN(" separator \'")); - str->append_for_single_quote(separator->ptr(), separator->length()); + str->append_for_single_quote_opt_convert(*separator); str->append(STRING_WITH_LEN("\'")); } diff --git a/sql/item_sum.h b/sql/item_sum.h index 2dad0bc3582..bba93f6f4ab 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -1138,6 +1138,7 @@ public: { return Type_handler_hybrid_field_type::type_handler(); } bool fix_length_and_dec_generic(); bool fix_length_and_dec_numeric(const Type_handler *h); + bool fix_length_and_dec_sint_ge0(); bool fix_length_and_dec_string(); }; diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index db38e44d5b0..a53e89cc8e3 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -2942,7 +2942,7 @@ bool Item_extract::fix_length_and_dec(THD *thd) switch (int_type) { case INTERVAL_YEAR: set_date_length(4); break; // YYYY case INTERVAL_YEAR_MONTH: set_date_length(6); break; // YYYYMM - case INTERVAL_QUARTER: set_date_length(2); break; // 1..4 + case INTERVAL_QUARTER: set_date_length(1); break; // 1..4 case INTERVAL_MONTH: set_date_length(2); break; // MM case INTERVAL_WEEK: set_date_length(2); break; // 0..52 case INTERVAL_DAY: set_day_length(daylen); break; // DD diff --git a/sql/item_timefunc.h b/sql/item_timefunc.h index 083eb7ba8e7..5485f6c111b 100644 --- a/sql/item_timefunc.h +++ b/sql/item_timefunc.h @@ -30,23 +30,23 @@ bool get_interval_value(THD *thd, Item *args, interval_type int_type, INTERVAL *interval); -class Item_long_func_date_field: public Item_long_func +class Item_long_func_date_field: public Item_long_ge0_func { bool check_arguments() const override { return args[0]->check_type_can_return_date(func_name_cstring()); } public: Item_long_func_date_field(THD *thd, Item *a) - :Item_long_func(thd, a) { } + :Item_long_ge0_func(thd, a) { } }; -class Item_long_func_time_field: public Item_long_func +class Item_long_func_time_field: public Item_long_ge0_func { bool check_arguments() const override { return args[0]->check_type_can_return_time(func_name_cstring()); } public: Item_long_func_time_field(THD *thd, Item *a) - :Item_long_func(thd, a) { } + :Item_long_ge0_func(thd, a) { } }; @@ -186,10 +186,10 @@ public: }; -class Item_func_month :public Item_long_func +class Item_func_month :public Item_long_ge0_func { public: - Item_func_month(THD *thd, Item *a): Item_long_func(thd, a) + Item_func_month(THD *thd, Item *a): Item_long_ge0_func(thd, a) { } longlong val_int() override; LEX_CSTRING func_name_cstring() const override @@ -381,7 +381,7 @@ public: }; -class Item_func_week :public Item_long_func +class Item_func_week :public Item_long_ge0_func { bool check_arguments() const override { @@ -389,8 +389,8 @@ class Item_func_week :public Item_long_func (arg_count > 1 && args[1]->check_type_can_return_int(func_name_cstring())); } public: - Item_func_week(THD *thd, Item *a): Item_long_func(thd, a) {} - Item_func_week(THD *thd, Item *a, Item *b): Item_long_func(thd, a, b) {} + Item_func_week(THD *thd, Item *a): Item_long_ge0_func(thd, a) {} + Item_func_week(THD *thd, Item *a, Item *b): Item_long_ge0_func(thd, a, b) {} longlong val_int() override; LEX_CSTRING func_name_cstring() const override { @@ -1166,12 +1166,17 @@ class Item_extract :public Item_int_func, void set_date_length(uint32 length) { /* - Although DATE components (e.g. YEAR, YEAR_MONTH, QUARTER, MONTH, WEEK) - cannot have a sign, we should probably still add +1, - because all around the code we assume that max_length is sign inclusive. - Another options is to set unsigned_flag to "true". + DATE components (e.g. YEAR, YEAR_MONTH, QUARTER, MONTH, WEEK) + return non-negative values but historically EXTRACT for date + components always returned the signed int data type. + So do equivalent functions YEAR(), QUARTER(), MONTH(), WEEK(). + Let's set the data type to "signed int, but not negative", + so "this" produces better data types in VARCHAR and DECIMAL context + by using the fact that all of the max_length characters are spent + for digits (non of them are spent for the sign). */ - set_handler(handler_by_length(max_length= length, 10)); // QQ: see above + set_handler(&type_handler_slong_ge0); + fix_char_length(length); m_date_mode= date_mode_t(0); } void set_day_length(uint32 length) diff --git a/sql/log.cc b/sql/log.cc index 2fa398e593e..d78e8522b04 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -7024,8 +7024,8 @@ err: mysql_mutex_assert_not_owner(&LOCK_after_binlog_sync); mysql_mutex_assert_not_owner(&LOCK_commit_ordered); #ifdef HAVE_REPLICATION - if (repl_semisync_master.report_binlog_update(thd, log_file_name, - file->pos_in_file)) + if (repl_semisync_master.report_binlog_update( + thd, thd, log_file_name, file->pos_in_file)) { sql_print_error("Failed to run 'after_flush' hooks"); error= 1; @@ -8619,9 +8619,19 @@ MYSQL_BIN_LOG::trx_group_commit_leader(group_commit_entry *leader) for (current= queue; current != NULL; current= current->next) { #ifdef HAVE_REPLICATION + /* + The thread which will await the ACK from the replica can change + depending on the wait-point. If AFTER_COMMIT, then the user thread + will perform the wait. If AFTER_SYNC, the binlog group commit leader + will perform the wait on behalf of the user thread. + */ + THD *waiter_thd= (repl_semisync_master.wait_point() == + SEMI_SYNC_MASTER_WAIT_POINT_AFTER_STORAGE_COMMIT) + ? current->thd + : leader->thd; if (likely(!current->error) && unlikely(repl_semisync_master. - report_binlog_update(current->thd, + report_binlog_update(current->thd, waiter_thd, current->cache_mngr-> last_commit_pos_file, current->cache_mngr-> @@ -11172,7 +11182,7 @@ Recovery_context::Recovery_context() : prev_event_pos(0), last_gtid_standalone(false), last_gtid_valid(false), last_gtid_no2pc(false), last_gtid_engines(0), - do_truncate(repl_semisync_slave.get_slave_enabled()), + do_truncate(global_rpl_semi_sync_slave_enabled), truncate_validated(false), truncate_reset_done(false), truncate_set_in_1st(false), id_binlog(MAX_binlog_id), checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF), gtid_maybe_to_truncate(NULL) diff --git a/sql/log.h b/sql/log.h index ae813f7588d..bcccd3e433e 100644 --- a/sql/log.h +++ b/sql/log.h @@ -426,6 +426,7 @@ struct wait_for_commit; class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG { +#ifdef HAVE_PSI_INTERFACE /** The instrumentation key to use for @ LOCK_index. */ PSI_mutex_key m_key_LOCK_index; /** The instrumentation key to use for @ COND_relay_log_updated */ @@ -440,6 +441,16 @@ class MYSQL_BIN_LOG: public TC_LOG, private MYSQL_LOG PSI_cond_key m_key_COND_queue_busy; /** The instrumentation key to use for LOCK_binlog_end_pos. */ PSI_mutex_key m_key_LOCK_binlog_end_pos; +#else + static constexpr PSI_mutex_key m_key_LOCK_index= 0; + static constexpr PSI_cond_key m_key_relay_log_update= 0; + static constexpr PSI_cond_key m_key_bin_log_update= 0; + static constexpr PSI_file_key m_key_file_log= 0, m_key_file_log_cache= 0; + static constexpr PSI_file_key m_key_file_log_index= 0; + static constexpr PSI_file_key m_key_file_log_index_cache= 0; + static constexpr PSI_cond_key m_key_COND_queue_busy= 0; + static constexpr PSI_mutex_key m_key_LOCK_binlog_end_pos= 0; +#endif struct group_commit_entry { diff --git a/sql/log_event.cc b/sql/log_event.cc index b3c2390f168..fab8e6626bb 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -3101,21 +3101,16 @@ Rows_log_event::Rows_log_event(const uchar *buf, uint event_len, /* if my_bitmap_init fails, caught in is_valid() */ if (likely(!my_bitmap_init(&m_cols, - m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, - m_width))) + m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, + m_width))) { DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); - memcpy(m_cols.bitmap, ptr_after_width, (m_width + 7) / 8); - create_last_word_mask(&m_cols); + bitmap_import(&m_cols, ptr_after_width); + DBUG_DUMP("m_cols", (uchar*) ptr_after_width, no_bytes_in_export_map(&m_cols)); ptr_after_width+= (m_width + 7) / 8; - DBUG_DUMP("m_cols", (uchar*) m_cols.bitmap, no_bytes_in_map(&m_cols)); } else - { - // Needed because my_bitmap_init() does not set it to null on failure - m_cols.bitmap= NULL; DBUG_VOID_RETURN; - } m_cols_ai.bitmap= m_cols.bitmap; /* See explanation in is_valid() */ @@ -3125,22 +3120,17 @@ Rows_log_event::Rows_log_event(const uchar *buf, uint event_len, /* if my_bitmap_init fails, caught in is_valid() */ if (likely(!my_bitmap_init(&m_cols_ai, - m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : NULL, - m_width))) + m_width <= sizeof(m_bitbuf_ai)*8 ? m_bitbuf_ai : + NULL, + m_width))) { DBUG_PRINT("debug", ("Reading from %p", ptr_after_width)); - memcpy(m_cols_ai.bitmap, ptr_after_width, (m_width + 7) / 8); - create_last_word_mask(&m_cols_ai); + bitmap_import(&m_cols_ai, ptr_after_width); + DBUG_DUMP("m_cols_ai", ptr_after_width, no_bytes_in_export_map(&m_cols_ai)); ptr_after_width+= (m_width + 7) / 8; - DBUG_DUMP("m_cols_ai", (uchar*) m_cols_ai.bitmap, - no_bytes_in_map(&m_cols_ai)); } else - { - // Needed because my_bitmap_init() does not set it to null on failure - m_cols_ai.bitmap= 0; DBUG_VOID_RETURN; - } } const uchar* const ptr_rows_data= (const uchar*) ptr_after_width; @@ -3203,8 +3193,6 @@ void Rows_log_event::uncompress_buf() Rows_log_event::~Rows_log_event() { - if (m_cols.bitmap == m_bitbuf) // no my_malloc happened - m_cols.bitmap= 0; // so no my_free in my_bitmap_free my_bitmap_free(&m_cols); // To pair with my_bitmap_init(). my_free(m_rows_buf); my_free(m_extra_row_data); @@ -3218,9 +3206,10 @@ int Rows_log_event::get_data_size() uchar *end= net_store_length(buf, m_width); DBUG_EXECUTE_IF("old_row_based_repl_4_byte_map_id_master", - return (int)(6 + no_bytes_in_map(&m_cols) + (end - buf) + - (general_type_code == UPDATE_ROWS_EVENT ? no_bytes_in_map(&m_cols_ai) : 0) + - m_rows_cur - m_rows_buf);); + return (int) (6 + no_bytes_in_export_map(&m_cols) + (end - buf) + + (general_type_code == UPDATE_ROWS_EVENT ? + no_bytes_in_export_map(&m_cols_ai) : 0) + + m_rows_cur - m_rows_buf);); int data_size= 0; Log_event_type type= get_type_code(); bool is_v2_event= LOG_EVENT_IS_ROW_V2(type); @@ -3235,11 +3224,11 @@ int Rows_log_event::get_data_size() { data_size= ROWS_HEADER_LEN_V1; } - data_size+= no_bytes_in_map(&m_cols); + data_size+= no_bytes_in_export_map(&m_cols); data_size+= (uint) (end - buf); if (general_type_code == UPDATE_ROWS_EVENT) - data_size+= no_bytes_in_map(&m_cols_ai); + data_size+= no_bytes_in_export_map(&m_cols_ai); data_size+= (uint) (m_rows_cur - m_rows_buf); return data_size; @@ -3770,12 +3759,7 @@ Delete_rows_compressed_log_event::Delete_rows_compressed_log_event( Update_rows_log_event::~Update_rows_log_event() { - if (m_cols_ai.bitmap) - { - if (m_cols_ai.bitmap == m_bitbuf_ai) // no my_malloc happened - m_cols_ai.bitmap= 0; // so no my_free in my_bitmap_free - my_bitmap_free(&m_cols_ai); // To pair with my_bitmap_init(). - } + my_bitmap_free(&m_cols_ai); // To pair with my_bitmap_init(). } diff --git a/sql/log_event.h b/sql/log_event.h index d9c25eab93f..7362105df62 100644 --- a/sql/log_event.h +++ b/sql/log_event.h @@ -4715,8 +4715,8 @@ protected: ulong m_master_reclength; /* Length of record on master side */ /* Bit buffers in the same memory as the class */ - uint32 m_bitbuf[128/(sizeof(uint32)*8)]; - uint32 m_bitbuf_ai[128/(sizeof(uint32)*8)]; + my_bitmap_map m_bitbuf[128/(sizeof(my_bitmap_map)*8)]; + my_bitmap_map m_bitbuf_ai[128/(sizeof(my_bitmap_map)*8)]; uchar *m_rows_buf; /* The rows in packed format */ uchar *m_rows_cur; /* One-after the end of the data */ diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc index a4a4d0ec5bb..89907f1552b 100644 --- a/sql/log_event_server.cc +++ b/sql/log_event_server.cc @@ -4624,20 +4624,12 @@ Rows_log_event::Rows_log_event(THD *thd_arg, TABLE *tbl_arg, set_flags(NO_CHECK_CONSTRAINT_CHECKS_F); /* if my_bitmap_init fails, caught in is_valid() */ if (likely(!my_bitmap_init(&m_cols, - m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, - m_width))) + m_width <= sizeof(m_bitbuf)*8 ? m_bitbuf : NULL, + m_width))) { /* Cols can be zero if this is a dummy binrows event */ if (likely(cols != NULL)) - { - memcpy(m_cols.bitmap, cols->bitmap, no_bytes_in_map(cols)); - create_last_word_mask(&m_cols); - } - } - else - { - // Needed because my_bitmap_init() does not set it to null on failure - m_cols.bitmap= 0; + bitmap_copy(&m_cols, cols); } } @@ -5102,9 +5094,12 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi) if (table->versioned()) { + bitmap_set_bit(table->read_set, table->s->vers.start_fieldno); bitmap_set_bit(table->write_set, table->s->vers.start_fieldno); + bitmap_set_bit(table->read_set, table->s->vers.end_fieldno); bitmap_set_bit(table->write_set, table->s->vers.end_fieldno); } + m_table->mark_columns_per_binlog_row_image(); this->slave_exec_mode= slave_exec_mode_options; // fix the mode @@ -5450,30 +5445,37 @@ bool Rows_log_event::write_data_body() my_ptrdiff_t const data_size= m_rows_cur - m_rows_buf; bool res= false; uchar *const sbuf_end= net_store_length(sbuf, (size_t) m_width); + uint bitmap_size= no_bytes_in_export_map(&m_cols); + uchar *bitmap; DBUG_ASSERT(static_cast(sbuf_end - sbuf) <= sizeof(sbuf)); DBUG_DUMP("m_width", sbuf, (size_t) (sbuf_end - sbuf)); res= res || write_data(sbuf, (size_t) (sbuf_end - sbuf)); - DBUG_DUMP("m_cols", (uchar*) m_cols.bitmap, no_bytes_in_map(&m_cols)); - res= res || write_data((uchar*)m_cols.bitmap, no_bytes_in_map(&m_cols)); + bitmap= (uchar*) my_alloca(bitmap_size); + bitmap_export(bitmap, &m_cols); + + DBUG_DUMP("m_cols", bitmap, bitmap_size); + res= res || write_data(bitmap, bitmap_size); /* TODO[refactor write]: Remove the "down cast" here (and elsewhere). */ if (get_general_type_code() == UPDATE_ROWS_EVENT) { - DBUG_DUMP("m_cols_ai", (uchar*) m_cols_ai.bitmap, - no_bytes_in_map(&m_cols_ai)); - res= res || write_data((uchar*)m_cols_ai.bitmap, - no_bytes_in_map(&m_cols_ai)); + DBUG_ASSERT(m_cols.n_bits == m_cols_ai.n_bits); + bitmap_export(bitmap, &m_cols_ai); + + DBUG_DUMP("m_cols_ai", bitmap, bitmap_size); + res= res || write_data(bitmap, bitmap_size); } DBUG_DUMP("rows", m_rows_buf, data_size); res= res || write_data(m_rows_buf, (size_t) data_size); + my_afree(bitmap); return res; - } + bool Rows_log_event::write_compressed() { uchar *m_rows_buf_tmp= m_rows_buf; @@ -6757,6 +6759,8 @@ Rows_log_event::write_row(rpl_group_info *rgi, TODO: Add safety measures against infinite looping. */ + DBUG_EXECUTE_IF("write_row_inject_sleep_before_ha_write_row", + my_sleep(20000);); if (table->s->sequence) error= update_sequence(); else while (unlikely(error= table->file->ha_write_row(table->record[0]))) @@ -7254,6 +7258,12 @@ static int row_not_found_error(rpl_group_info *rgi) ? HA_ERR_KEY_NOT_FOUND : HA_ERR_RECORD_CHANGED; } +static int end_of_file_error(rpl_group_info *rgi) +{ + return rgi->speculation != rpl_group_info::SPECULATE_OPTIMISTIC + ? HA_ERR_END_OF_FILE : HA_ERR_RECORD_CHANGED; +} + /** Locate the current row in event's table. @@ -7498,6 +7508,8 @@ int Rows_log_event::find_row(rpl_group_info *rgi) while ((error= table->file->ha_index_next(table->record[0]))) { DBUG_PRINT("info",("no record matching the given row found")); + if (error == HA_ERR_END_OF_FILE) + error= end_of_file_error(rgi); table->file->print_error(error, MYF(0)); table->file->ha_index_end(); goto end; @@ -7534,6 +7546,7 @@ int Rows_log_event::find_row(rpl_group_info *rgi) break; case HA_ERR_END_OF_FILE: + error= end_of_file_error(rgi); DBUG_PRINT("info", ("Record not found")); table->file->ha_rnd_end(); goto end; @@ -7749,10 +7762,7 @@ void Update_rows_log_event::init(MY_BITMAP const *cols) { /* Cols can be zero if this is a dummy binrows event */ if (likely(cols != NULL)) - { - memcpy(m_cols_ai.bitmap, cols->bitmap, no_bytes_in_map(cols)); - create_last_word_mask(&m_cols_ai); - } + bitmap_copy(&m_cols_ai, cols); } } @@ -7816,11 +7826,6 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) #endif /* WSREP_PROC_INFO */ thd_proc_info(thd, message); - // Temporary fix to find out why it fails [/Matz] - memcpy(m_table->read_set->bitmap, m_cols.bitmap, (m_table->read_set->n_bits + 7) / 8); - memcpy(m_table->write_set->bitmap, m_cols_ai.bitmap, (m_table->write_set->n_bits + 7) / 8); - - m_table->mark_columns_per_binlog_row_image(); int error= find_row(rgi); if (unlikely(error)) @@ -7926,7 +7931,6 @@ Update_rows_log_event::do_exec_row(rpl_group_info *rgi) error= vers_insert_history_row(m_table); restore_record(m_table, record[2]); } - m_table->default_column_bitmaps(); if (invoke_triggers && likely(!error) && unlikely(process_triggers(TRG_EVENT_UPDATE, TRG_ACTION_AFTER, TRUE))) diff --git a/sql/multi_range_read.h b/sql/multi_range_read.h index 930ee3f238b..ecb1a08051e 100644 --- a/sql/multi_range_read.h +++ b/sql/multi_range_read.h @@ -556,10 +556,6 @@ class DsMrr_impl public: typedef void (handler::*range_check_toggle_func_t)(bool on); - DsMrr_impl() - : secondary_file(NULL), - rowid_filter(NULL) {}; - void init(handler *h_arg, TABLE *table_arg) { primary_file= h_arg; @@ -581,7 +577,7 @@ public: int dsmrr_explain_info(uint mrr_mode, char *str, size_t size); private: /* Buffer to store (key, range_id) pairs */ - Lifo_buffer *key_buffer; + Lifo_buffer *key_buffer= nullptr; /* The "owner" handler object (the one that is expected to "own" this object @@ -594,13 +590,13 @@ private: Secondary handler object. (created when needed, we need it when we need to run both index scan and rnd_pos() scan at the same time) */ - handler *secondary_file; + handler *secondary_file= nullptr; /* The rowid filter that DS-MRR has "unpushed" from the storage engine. If it's present, DS-MRR will use it. */ - Rowid_filter *rowid_filter; + Rowid_filter *rowid_filter= nullptr; uint keyno; /* index we're running the scan on */ /* TRUE <=> need range association, buffers hold {rowid, range_id} pairs */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index a5641668503..737c3bc4c2a 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -303,7 +303,8 @@ static TYPELIB tc_heuristic_recover_typelib= }; const char *first_keyword= "first"; -const char *my_localhost= "localhost", *delayed_user= "DELAYED"; +const char *my_localhost= "localhost", + *delayed_user= "delayed", *slave_user= ""; bool opt_large_files= sizeof(my_off_t) > 4; static my_bool opt_autocommit; ///< for --autocommit command-line option @@ -1567,15 +1568,12 @@ static void kill_thread(THD *thd) /** First shutdown everything but slave threads and binlog dump connections */ -static my_bool kill_thread_phase_1(THD *thd, int *n_threads_awaiting_ack) +static my_bool kill_thread_phase_1(THD *thd, void *) { DBUG_PRINT("quit", ("Informing thread %ld that it's time to die", (ulong) thd->thread_id)); - if (thd->slave_thread || thd->is_binlog_dump_thread() || - (shutdown_wait_for_slaves && - repl_semisync_master.is_thd_awaiting_semisync_ack(thd) && - ++(*n_threads_awaiting_ack))) + if (thd->slave_thread || thd->is_binlog_dump_thread()) return 0; if (DBUG_IF("only_kill_system_threads") && !thd->system_thread) @@ -1773,29 +1771,18 @@ static void close_connections(void) This will give the threads some time to gracefully abort their statements and inform their clients that the server is about to die. */ - int n_threads_awaiting_ack= 0; - server_threads.iterate(kill_thread_phase_1, &n_threads_awaiting_ack); + server_threads.iterate(kill_thread_phase_1); /* If we are waiting on any ACKs, delay killing the thread until either an ACK is received or the timeout is hit. - - Allow at max the number of sessions to await a timeout; however, if all - ACKs have been received in less iterations, then quit early */ if (shutdown_wait_for_slaves && repl_semisync_master.get_master_enabled()) { - int waiting_threads= repl_semisync_master.sync_get_master_wait_sessions(); - if (waiting_threads) - sql_print_information("Delaying shutdown to await semi-sync ACK"); - - while (waiting_threads-- > 0) - repl_semisync_master.await_slave_reply(); + repl_semisync_master.await_all_slave_replies( + "Delaying shutdown to await semi-sync ACK"); } - DBUG_EXECUTE_IF("delay_shutdown_phase_2_after_semisync_wait", - my_sleep(500000);); - Events::deinit(); slave_prepare_for_shutdown(); ack_receiver.stop(); @@ -1816,8 +1803,7 @@ static void close_connections(void) */ DBUG_PRINT("info", ("THD_count: %u", THD_count::value())); - for (int i= 0; THD_count::connection_thd_count() - n_threads_awaiting_ack - && i < 1000; i++) + for (int i= 0; THD_count::connection_thd_count() && i < 1000; i++) { if (DBUG_IF("only_kill_system_threads_no_loop")) break; @@ -1836,9 +1822,9 @@ static void close_connections(void) #endif /* All threads has now been aborted */ DBUG_PRINT("quit", ("Waiting for threads to die (count=%u)", - THD_count::connection_thd_count() - n_threads_awaiting_ack)); + THD_count::connection_thd_count())); - while (THD_count::connection_thd_count() - n_threads_awaiting_ack) + while (THD_count::connection_thd_count()) { if (DBUG_IF("only_kill_system_threads_no_loop")) break; @@ -3223,7 +3209,6 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) sigset_t set; int sig; my_thread_init(); // Init new thread - DBUG_ENTER("signal_hand"); signal_thread_in_use= 1; /* @@ -3277,7 +3262,6 @@ pthread_handler_t signal_hand(void *arg __attribute__((unused))) { DBUG_PRINT("quit",("signal_handler: calling my_thread_end()")); my_thread_end(); - DBUG_LEAVE; // Must match DBUG_ENTER() signal_thread_in_use= 0; pthread_exit(0); // Safety return 0; // Avoid compiler warnings @@ -3795,20 +3779,35 @@ static void my_malloc_size_cb_func(long long size, my_bool is_thread_specific) thd->status_var.local_memory_used > (int64)thd->variables.max_mem_used && likely(!thd->killed) && !thd->get_stmt_da()->is_set()) { - /* Ensure we don't get called here again */ - char buf[50], *buf2; - thd->set_killed(KILL_QUERY); - my_snprintf(buf, sizeof(buf), "--max-session-mem-used=%llu", - thd->variables.max_mem_used); - if ((buf2= (char*) thd->alloc(256))) - { - my_snprintf(buf2, 256, ER_THD(thd, ER_OPTION_PREVENTS_STATEMENT), buf); - thd->set_killed(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT, buf2); - } - else - { - thd->set_killed(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT, - "--max-session-mem-used"); + /* + Ensure we don't get called here again. + + It is not safe to wait for LOCK_thd_kill here, as we could be called + from almost any context. For example while LOCK_plugin is being held; + but THD::awake() locks LOCK_thd_kill and LOCK_plugin in the opposite + order (MDEV-33443). + + So ignore the max_mem_used limit in the unlikely case we cannot obtain + LOCK_thd_kill here (the limit will be enforced on the next allocation). + */ + if (!mysql_mutex_trylock(&thd->LOCK_thd_kill)) { + char buf[50], *buf2; + thd->set_killed_no_mutex(KILL_QUERY); + my_snprintf(buf, sizeof(buf), "--max-session-mem-used=%llu", + thd->variables.max_mem_used); + if ((buf2= (char*) thd->alloc(256))) + { + my_snprintf(buf2, 256, + ER_THD(thd, ER_OPTION_PREVENTS_STATEMENT), buf); + thd->set_killed_no_mutex(KILL_QUERY, + ER_OPTION_PREVENTS_STATEMENT, buf2); + } + else + { + thd->set_killed_no_mutex(KILL_QUERY, ER_OPTION_PREVENTS_STATEMENT, + "--max-session-mem-used"); + } + mysql_mutex_unlock(&thd->LOCK_thd_kill); } } DBUG_ASSERT((longlong) thd->status_var.local_memory_used >= 0 || @@ -5005,9 +5004,9 @@ static int init_server_components() proc_info_hook= set_thd_stage_info; /* - Print source revision hash, as one of the first lines, if not the - first in error log, for troubleshooting and debugging purposes - */ + Print source revision hash, as one of the first lines, if not the + first in error log, for troubleshooting and debugging purposes + */ if (!opt_help) sql_print_information("Starting MariaDB %s source revision %s as process %lu", server_version, SOURCE_REVISION, (ulong) getpid()); @@ -5035,6 +5034,19 @@ static int init_server_components() xid_cache_init(); + /* + Do not open binlong when doing bootstrap. + This ensures that rpl_load_gtid_slave_state() will not fail with an error + as the mysql schema does not yet exists. + This also ensures that we don't get an empty binlog file if the user has + log-bin in his config files. + */ + if (opt_bootstrap) + { + opt_bin_log= opt_bin_log_used= binlog_format_used= 0; + opt_log_slave_updates= 0; + } + /* need to configure logging before initializing storage engines */ if (!opt_bin_log_used && !WSREP_ON) { diff --git a/sql/mysqld.h b/sql/mysqld.h index f40a2537790..22081f7249f 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -267,7 +267,7 @@ extern time_t server_start_time, flush_status_time; extern char *opt_mysql_tmpdir, mysql_charsets_dir[]; extern size_t mysql_unpacked_real_data_home_len; extern MYSQL_PLUGIN_IMPORT MY_TMPDIR mysql_tmpdir_list; -extern const char *first_keyword, *delayed_user; +extern const char *first_keyword, *delayed_user, *slave_user; extern MYSQL_PLUGIN_IMPORT const char *my_localhost; extern MYSQL_PLUGIN_IMPORT const char **errmesg; /* Error messages */ extern const char *myisam_recover_options_str; diff --git a/sql/net_serv.cc b/sql/net_serv.cc index 3dff8442c6a..af65d92c2b9 100644 --- a/sql/net_serv.cc +++ b/sql/net_serv.cc @@ -74,7 +74,13 @@ static int inline EXTRA_DEBUG_fflush(...) { return 0; } #ifdef MYSQL_SERVER #include #include -#define MYSQL_SERVER_my_error my_error + +static void inline MYSQL_SERVER_my_error(uint error, myf flags) +{ + my_error(error, + flags | MYF(global_system_variables.log_warnings > 3 ? ME_ERROR_LOG : 0)); +} + #else static void inline MYSQL_SERVER_my_error(...) {} #endif @@ -157,6 +163,7 @@ my_bool my_net_init(NET *net, Vio *vio, void *thd, uint my_flags) net->net_skip_rest_factor= 0; net->last_errno=0; net->pkt_nr_can_be_reset= 0; + net->using_proxy_protocol= 0; net->thread_specific_malloc= MY_TEST(my_flags & MY_THREAD_SPECIFIC); net->thd= 0; #ifdef MYSQL_SERVER @@ -211,6 +218,7 @@ void net_end(NET *net) DBUG_ENTER("net_end"); my_free(net->buff); net->buff=0; + net->using_proxy_protocol= 0; DBUG_VOID_RETURN; } @@ -766,7 +774,19 @@ net_real_write(NET *net,const uchar *packet, size_t len) #endif /* !defined(MYSQL_SERVER) */ net->error= 2; /* Close socket */ net->last_errno= (interrupted ? ER_NET_WRITE_INTERRUPTED : - ER_NET_ERROR_ON_WRITE); + ER_NET_ERROR_ON_WRITE); +#ifdef MYSQL_SERVER + if (global_system_variables.log_warnings > 3) + { + my_printf_error(net->last_errno, + "Could not write packet: fd: %lld state: %d " + "errno: %d vio_errno: %d length: %ld", + MYF(ME_ERROR_LOG), + (longlong) vio_fd(net->vio), (int) net->vio->state, + vio_errno(net->vio), net->last_errno, (ulong) (end-pos)); + break; + } +#endif MYSQL_SERVER_my_error(net->last_errno, MYF(0)); break; } @@ -937,6 +957,7 @@ static handle_proxy_header_result handle_proxy_header(NET *net) return RETRY; /* Change peer address in THD and ACL structures.*/ uint host_errors; + net->using_proxy_protocol= 1; return (handle_proxy_header_result)thd_set_peer_addr(thd, &(peer_info.peer_addr), NULL, peer_info.port, false, &host_errors); @@ -1064,20 +1085,34 @@ retry: (longlong) vio_fd(net->vio)); } #ifndef MYSQL_SERVER - if (length != 0 && vio_errno(net->vio) == SOCKET_EINTR) + if (length != 0 && vio_should_retry(net->vio)) { DBUG_PRINT("warning",("Interrupted read. Retrying...")); continue; } #endif - DBUG_PRINT("error",("Couldn't read packet: remain: %u errno: %d length: %ld", + DBUG_PRINT("error",("Could not read packet: remain: %u errno: %d length: %ld", remain, vio_errno(net->vio), (long) length)); len= packet_error; net->error= 2; /* Close socket */ net->last_errno= (vio_was_timeout(net->vio) ? - ER_NET_READ_INTERRUPTED : - ER_NET_READ_ERROR); - MYSQL_SERVER_my_error(net->last_errno, MYF(0)); + ER_NET_READ_INTERRUPTED : + ER_NET_READ_ERROR); +#ifdef MYSQL_SERVER + if (global_system_variables.log_warnings > 3) + { + my_printf_error(net->last_errno, + "Could not read packet: fd: %lld state: %d " + "remain: %u errno: %d vio_errno: %d " + "length: %lld", + MYF(ME_ERROR_LOG), + (longlong) vio_fd(net->vio), (int) net->vio->state, + remain, vio_errno(net->vio), net->last_errno, + (longlong) length); + } + else + my_error(net->last_errno, MYF(0)); +#endif /* MYSQL_SERVER */ goto end; } remain -= (uint32) length; @@ -1282,7 +1317,10 @@ ulong my_net_read_packet(NET *net, my_bool read_from_server) { ulong reallen = 0; - return my_net_read_packet_reallen(net, read_from_server, &reallen); + ulong length; + DBUG_ENTER("my_net_read_packet"); + length= my_net_read_packet_reallen(net, read_from_server, &reallen); + DBUG_RETURN(length); } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 66a6e565f0c..84a007f85c5 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -3711,7 +3711,7 @@ end_of_range_loop: table->reginfo.impossible_range= 0; uint used_fields_buff_size= bitmap_buffer_size(table->s->fields); - uint32 *used_fields_buff= (uint32*)thd->alloc(used_fields_buff_size); + my_bitmap_map *used_fields_buff= (my_bitmap_map*)thd->alloc(used_fields_buff_size); MY_BITMAP cols_for_indexes; (void) my_bitmap_init(&cols_for_indexes, used_fields_buff, table->s->fields); bitmap_clear_all(&cols_for_indexes); @@ -7020,8 +7020,7 @@ ROR_INTERSECT_INFO* ror_intersect_init(const PARAM *param) void ror_intersect_cpy(ROR_INTERSECT_INFO *dst, const ROR_INTERSECT_INFO *src) { dst->param= src->param; - memcpy(dst->covered_fields.bitmap, src->covered_fields.bitmap, - no_bytes_in_map(&src->covered_fields)); + bitmap_copy(&dst->covered_fields, &src->covered_fields); dst->out_rows= src->out_rows; dst->is_covering= src->is_covering; dst->index_records= src->index_records; @@ -7706,7 +7705,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param, (*scan)->used_fields_covered= bitmap_bits_set(&(*scan)->covered_fields); (*scan)->first_uncovered_field= - bitmap_get_first(&(*scan)->covered_fields); + bitmap_get_first_clear(&(*scan)->covered_fields); } my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark, sizeof(ROR_SCAN_INFO*), diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 333a3960360..88287d331ec 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -137,6 +137,7 @@ wait_for_pending_deadlock_kill(THD *thd, rpl_group_info *rgi) PSI_stage_info old_stage; mysql_mutex_lock(&thd->LOCK_wakeup_ready); + thd->set_time_for_next_stage(); thd->ENTER_COND(&thd->COND_wakeup_ready, &thd->LOCK_wakeup_ready, &stage_waiting_for_deadlock_kill, &old_stage); while (rgi->killed_for_retry == rpl_group_info::RETRY_KILL_PENDING) @@ -399,12 +400,12 @@ do_gco_wait(rpl_group_info *rgi, group_commit_orderer *gco, if (wait_count > entry->count_committing_event_groups) { DEBUG_SYNC(thd, "rpl_parallel_start_waiting_for_prior"); + thd->set_time_for_next_stage(); thd->ENTER_COND(&gco->COND_group_commit_orderer, &entry->LOCK_parallel_entry, &stage_waiting_for_prior_transaction_to_start_commit, old_stage); *did_enter_cond= true; - thd->set_time_for_next_stage(); do { if (!rgi->worker_error && unlikely(thd->check_killed(1))) @@ -492,10 +493,10 @@ do_ftwrl_wait(rpl_group_info *rgi, */ if (unlikely(sub_id > entry->pause_sub_id)) { + thd->set_time_for_next_stage(); thd->ENTER_COND(&entry->COND_parallel_entry, &entry->LOCK_parallel_entry, &stage_waiting_for_ftwrl, old_stage); *did_enter_cond= true; - thd->set_time_for_next_stage(); do { if (entry->force_abort || rgi->worker_error) @@ -558,9 +559,9 @@ pool_mark_busy(rpl_parallel_thread_pool *pool, THD *thd) mysql_mutex_lock(&pool->LOCK_rpl_thread_pool); if (thd) { + thd->set_time_for_next_stage(); thd->ENTER_COND(&pool->COND_rpl_thread_pool, &pool->LOCK_rpl_thread_pool, &stage_waiting_for_rpl_thread_pool, &old_stage); - thd->set_time_for_next_stage(); } while (pool->busy) { @@ -700,9 +701,9 @@ rpl_pause_for_ftwrl(THD *thd) mysql_mutex_lock(&e->LOCK_parallel_entry); }); } + thd->set_time_for_next_stage(); thd->ENTER_COND(&e->COND_parallel_entry, &e->LOCK_parallel_entry, &stage_waiting_for_ftwrl_threads_to_pause, &old_stage); - thd->set_time_for_next_stage(); while (e->pause_sub_id < (uint64)ULONGLONG_MAX && e->last_committed_sub_id < e->pause_sub_id && !err) @@ -2599,6 +2600,7 @@ rpl_parallel_thread * rpl_parallel_entry::choose_thread_internal(uint idx, STRING_WITH_LEN("now SIGNAL wait_queue_ready")); };); #endif + rli->sql_driver_thd->set_time_for_next_stage(); rli->sql_driver_thd->ENTER_COND(&thr->COND_rpl_thread_queue, &thr->LOCK_rpl_thread, &stage_waiting_for_room_in_worker_thread, @@ -2953,6 +2955,7 @@ rpl_parallel::wait_for_workers_idle(THD *thd) e= (struct rpl_parallel_entry *)my_hash_element(&domain_hash, i); mysql_mutex_lock(&e->LOCK_parallel_entry); ++e->need_sub_id_signal; + thd->set_time_for_next_stage(); thd->ENTER_COND(&e->COND_parallel_entry, &e->LOCK_parallel_entry, &stage_waiting_for_workers_idle, &old_stage); while (e->current_sub_id > e->last_committed_sub_id) diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc index 8cc721e5737..aa1056c8c8f 100644 --- a/sql/semisync_master.cc +++ b/sql/semisync_master.cc @@ -68,6 +68,19 @@ static ulonglong timespec_to_usec(const struct timespec *ts) return (ulonglong) ts->tv_sec * TIME_MILLION + ts->tv_nsec / TIME_THOUSAND; } +int signal_waiting_transaction(THD *waiting_thd, const char *binlog_file, + my_off_t binlog_pos) +{ + /* + It is possible that the connection thd waiting for an ACK was killed. In + such circumstance, the connection thread will nullify the thd member of its + Active_tranx node. So before we try to signal, ensure the THD exists. + */ + if (waiting_thd) + mysql_cond_signal(&waiting_thd->COND_wakeup_ready); + return 0; +} + /******************************************************************************* * * class : manage all active transaction nodes @@ -75,12 +88,14 @@ static ulonglong timespec_to_usec(const struct timespec *ts) ******************************************************************************/ Active_tranx::Active_tranx(mysql_mutex_t *lock, + mysql_cond_t *cond, ulong trace_level) : Trace(trace_level), m_allocator(max_connections), m_num_entries(max_connections << 1), /* Transaction hash table size * is set to double the size * of max_connections */ - m_lock(lock) + m_lock(lock), + m_cond_empty(cond) { /* No transactions are in the list initially. */ m_trx_front = NULL; @@ -142,7 +157,8 @@ int Active_tranx::compare(const char *log_file_name1, my_off_t log_file_pos1, return 0; } -int Active_tranx::insert_tranx_node(const char *log_file_name, +int Active_tranx::insert_tranx_node(THD *thd_to_wait, + const char *log_file_name, my_off_t log_file_pos) { Tranx_node *ins_node; @@ -165,6 +181,7 @@ int Active_tranx::insert_tranx_node(const char *log_file_name, strncpy(ins_node->log_name, log_file_name, FN_REFLEN-1); ins_node->log_name[FN_REFLEN-1] = 0; /* make sure it ends properly */ ins_node->log_pos = log_file_pos; + ins_node->thd= thd_to_wait; if (!m_trx_front) { @@ -232,28 +249,22 @@ bool Active_tranx::is_tranx_end_pos(const char *log_file_name, DBUG_RETURN(entry != NULL); } -void Active_tranx::clear_active_tranx_nodes(const char *log_file_name, - my_off_t log_file_pos) +void Active_tranx::clear_active_tranx_nodes( + const char *log_file_name, my_off_t log_file_pos, + active_tranx_action pre_delete_hook) { Tranx_node *new_front; DBUG_ENTER("Active_tranx::::clear_active_tranx_nodes"); - if (log_file_name != NULL) + new_front= m_trx_front; + while (new_front) { - new_front = m_trx_front; - - while (new_front) - { - if (compare(new_front, log_file_name, log_file_pos) > 0) - break; - new_front = new_front->next; - } - } - else - { - /* If log_file_name is NULL, clear everything. */ - new_front = NULL; + if ((log_file_name != NULL) && + compare(new_front, log_file_name, log_file_pos) > 0) + break; + pre_delete_hook(new_front->thd, new_front->log_name, new_front->log_pos); + new_front = new_front->next; } if (new_front == NULL) @@ -315,9 +326,66 @@ void Active_tranx::clear_active_tranx_nodes(const char *log_file_name, m_trx_front->log_name, (ulong)m_trx_front->log_pos)); } + /* + m_cond_empty aliases Repl_semi_sync_master::COND_binlog, which holds the + condition variable to notify that we have cleared all nodes, e.g. used by + SHUTDOWN WAIT FOR ALL SLAVES. + */ + if (is_empty()) + mysql_cond_signal(m_cond_empty); + DBUG_VOID_RETURN; } +void Active_tranx::unlink_thd_as_waiter(const char *log_file_name, + my_off_t log_file_pos) +{ + DBUG_ENTER("Active_tranx::unlink_thd_as_waiter"); + mysql_mutex_assert_owner(m_lock); + + unsigned int hash_val = get_hash_value(log_file_name, log_file_pos); + Tranx_node *entry = m_trx_htb[hash_val]; + + while (entry != NULL) + { + if (compare(entry, log_file_name, log_file_pos) == 0) + break; + + entry = entry->hash_next; + } + + if (entry) + entry->thd= NULL; + + DBUG_VOID_RETURN; +} + +#ifndef DBUG_OFF +void Active_tranx::assert_thd_is_waiter(THD *thd_to_check, + const char *log_file_name, + my_off_t log_file_pos) +{ + DBUG_ENTER("Active_tranx::assert_thd_is_waiter"); + mysql_mutex_assert_owner(m_lock); + + unsigned int hash_val = get_hash_value(log_file_name, log_file_pos); + Tranx_node *entry = m_trx_htb[hash_val]; + + while (entry != NULL) + { + if (compare(entry, log_file_name, log_file_pos) == 0) + break; + + entry = entry->hash_next; + } + + DBUG_ASSERT(entry); + DBUG_ASSERT(entry->thd); + DBUG_ASSERT(entry->thd->thread_id == thd_to_check->thread_id); + + DBUG_VOID_RETURN; +} +#endif /******************************************************************************* * @@ -397,7 +465,8 @@ int Repl_semi_sync_master::enable_master() if (!get_master_enabled()) { - m_active_tranxs = new Active_tranx(&LOCK_binlog, m_trace_level); + m_active_tranxs= + new Active_tranx(&LOCK_binlog, &COND_binlog_send, m_trace_level); if (m_active_tranxs != NULL) { m_commit_file_name_inited = false; @@ -459,15 +528,6 @@ void Repl_semi_sync_master::cleanup() delete m_active_tranxs; } -int Repl_semi_sync_master::sync_get_master_wait_sessions() -{ - int wait_sessions; - lock(); - wait_sessions= rpl_semi_sync_master_wait_sessions; - unlock(); - return wait_sessions; -} - void Repl_semi_sync_master::create_timeout(struct timespec *out, struct timespec *start_arg) { @@ -500,23 +560,6 @@ void Repl_semi_sync_master::unlock() mysql_mutex_unlock(&LOCK_binlog); } -void Repl_semi_sync_master::cond_broadcast() -{ - mysql_cond_broadcast(&COND_binlog_send); -} - -int Repl_semi_sync_master::cond_timewait(struct timespec *wait_time) -{ - int wait_res; - - DBUG_ENTER("Repl_semi_sync_master::cond_timewait()"); - - wait_res= mysql_cond_timedwait(&COND_binlog_send, - &LOCK_binlog, wait_time); - - DBUG_RETURN(wait_res); -} - void Repl_semi_sync_master::add_slave() { lock(); @@ -533,7 +576,8 @@ void Repl_semi_sync_master::remove_slave() Signal transactions waiting in commit_trx() that they do not have to wait anymore. */ - cond_broadcast(); + m_active_tranxs->clear_active_tranx_nodes(NULL, 0, + signal_waiting_transaction); } unlock(); } @@ -616,7 +660,6 @@ int Repl_semi_sync_master::report_reply_binlog(uint32 server_id, my_off_t log_file_pos) { int cmp; - bool can_release_threads = false; bool need_copy_send_pos = true; DBUG_ENTER("Repl_semi_sync_master::report_reply_binlog"); @@ -668,45 +711,26 @@ int Repl_semi_sync_master::report_reply_binlog(uint32 server_id, /* Remove all active transaction nodes before this point. */ DBUG_ASSERT(m_active_tranxs != NULL); - m_active_tranxs->clear_active_tranx_nodes(log_file_name, log_file_pos); + m_active_tranxs->clear_active_tranx_nodes(log_file_name, log_file_pos, + signal_waiting_transaction); + if (m_active_tranxs->is_empty()) + m_wait_file_name_inited= false; DBUG_PRINT("semisync", ("%s: Got reply at (%s, %lu)", "Repl_semi_sync_master::report_reply_binlog", log_file_name, (ulong)log_file_pos)); } - if (rpl_semi_sync_master_wait_sessions > 0) - { - /* Let us check if some of the waiting threads doing a trx - * commit can now proceed. - */ - cmp = Active_tranx::compare(m_reply_file_name, m_reply_file_pos, - m_wait_file_name, m_wait_file_pos); - if (cmp >= 0) - { - /* Yes, at least one waiting thread can now proceed: - * let us release all waiting threads with a broadcast - */ - can_release_threads = true; - m_wait_file_name_inited = false; - } - } l_end: unlock(); - if (can_release_threads) - { - DBUG_PRINT("semisync", ("%s: signal all waiting threads.", - "Repl_semi_sync_master::report_reply_binlog")); - - cond_broadcast(); - } DBUG_RETURN(0); } -int Repl_semi_sync_master::wait_after_sync(const char *log_file, my_off_t log_pos) +int Repl_semi_sync_master::wait_after_sync(const char *log_file, + my_off_t log_pos) { if (!get_master_enabled()) return 0; @@ -762,24 +786,27 @@ int Repl_semi_sync_master::wait_after_rollback(THD *thd, bool all) /** The method runs after flush to binary log is done. */ -int Repl_semi_sync_master::report_binlog_update(THD* thd, const char *log_file, +int Repl_semi_sync_master::report_binlog_update(THD *trans_thd, + THD *waiter_thd, + const char *log_file, my_off_t log_pos) { if (get_master_enabled()) { Trans_binlog_info *log_info; - if (!(log_info= thd->semisync_info)) + if (!(log_info= trans_thd->semisync_info)) { if(!(log_info= (Trans_binlog_info*)my_malloc(PSI_INSTRUMENT_ME, sizeof(Trans_binlog_info), MYF(0)))) return 1; - thd->semisync_info= log_info; + trans_thd->semisync_info= log_info; } strcpy(log_info->log_file, log_file + dirname_length(log_file)); log_info->log_pos = log_pos; - return write_tranx_in_binlog(log_info->log_file, log_pos); + return write_tranx_in_binlog(waiter_thd, log_info->log_file, + log_pos); } return 0; @@ -825,7 +852,7 @@ void Repl_semi_sync_master::dump_end(THD* thd) ack_receiver.remove_slave(thd); } -int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, +int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name, my_off_t trx_wait_binlog_pos) { bool success= 0; @@ -852,9 +879,8 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, lock(); /* This must be called after acquired the lock */ - THD_ENTER_COND(thd, &COND_binlog_send, &LOCK_binlog, - & stage_waiting_for_semi_sync_ack_from_slave, - & old_stage); + THD_ENTER_COND(thd, &thd->COND_wakeup_ready, &LOCK_binlog, + &stage_waiting_for_semi_sync_ack_from_slave, &old_stage); /* This is the real check inside the mutex. */ if (!get_master_enabled() || !is_on()) @@ -865,7 +891,7 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, trx_wait_binlog_name, (ulong)trx_wait_binlog_pos, (int)is_on())); - while (is_on() && !thd_killed(thd)) + while (is_on() && !(aborted= thd_killed(thd))) { /* We have to check these again as things may have changed */ if (!rpl_semi_sync_master_clients && !rpl_semi_sync_master_wait_no_slave) @@ -902,7 +928,7 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, trx_wait_binlog_pos, m_wait_file_name, m_wait_file_pos); if (cmp <= 0) - { + { /* This thd has a lower position, let's update the minimum info. */ strmake_buf(m_wait_file_name, trx_wait_binlog_name); m_wait_file_pos = trx_wait_binlog_pos; @@ -934,20 +960,18 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, */ rpl_semi_sync_master_wait_sessions++; - /* We keep track of when this thread is awaiting an ack to ensure it is - * not killed while awaiting an ACK if a shutdown is issued. - */ - set_thd_awaiting_semisync_ack(thd, TRUE); - DBUG_PRINT("semisync", ("%s: wait %lu ms for binlog sent (%s, %lu)", "Repl_semi_sync_master::commit_trx", m_wait_timeout, m_wait_file_name, (ulong)m_wait_file_pos)); +#ifndef DBUG_OFF + m_active_tranxs->assert_thd_is_waiter(thd, trx_wait_binlog_name, + trx_wait_binlog_pos); +#endif create_timeout(&abstime, &start_ts); - wait_result = cond_timewait(&abstime); - - set_thd_awaiting_semisync_ack(thd, FALSE); + wait_result= mysql_cond_timedwait(&thd->COND_wakeup_ready, &LOCK_binlog, + &abstime); rpl_semi_sync_master_wait_sessions--; if (wait_result != 0) @@ -979,17 +1003,49 @@ int Repl_semi_sync_master::commit_trx(const char* trx_wait_binlog_name, { rpl_semi_sync_master_trx_wait_num++; rpl_semi_sync_master_trx_wait_time += wait_time; + + DBUG_EXECUTE_IF("testing_cond_var_per_thd", { + /* + DBUG log warning to ensure we have either recieved our ACK; or + have timed out and are awoken in an off state. Test + rpl.rpl_semi_sync_cond_var_per_thd scans the logs to ensure this + warning is not present. + */ + bool valid_wakeup= + (!get_master_enabled() || !is_on() || thd->is_killed() || + 0 <= Active_tranx::compare( + m_reply_file_name, m_reply_file_pos, + trx_wait_binlog_name, trx_wait_binlog_pos)); + if (!valid_wakeup) + { + sql_print_warning( + "Thread awaiting semi-sync ACK was awoken before its " + "ACK. THD (%llu), Wait coord: (%s, %llu), ACK coord: (%s, " + "%llu)", + thd->thread_id, trx_wait_binlog_name, trx_wait_binlog_pos, + m_reply_file_name, m_reply_file_pos); + } + }); } } } + /* + If our THD was killed (rather than awoken from an ACK) notify the + Active_tranx cache that we are no longer waiting for the ACK, so nobody + signals our COND var invalidly. + */ + if (aborted) + m_active_tranxs->unlink_thd_as_waiter(trx_wait_binlog_name, + trx_wait_binlog_pos); + /* At this point, the binlog file and position of this transaction must have been removed from Active_tranx. m_active_tranxs may be NULL if someone disabled semi sync during - cond_timewait() + mysql_cond_timedwait */ - DBUG_ASSERT(thd_killed(thd) || !m_active_tranxs || aborted || + DBUG_ASSERT(aborted || !m_active_tranxs || m_active_tranxs->is_empty() || !m_active_tranxs->is_tranx_end_pos(trx_wait_binlog_name, trx_wait_binlog_pos)); @@ -1030,20 +1086,21 @@ void Repl_semi_sync_master::switch_off() { DBUG_ENTER("Repl_semi_sync_master::switch_off"); + /* Clear the active transaction list. */ + if (m_active_tranxs) + m_active_tranxs->clear_active_tranx_nodes(NULL, 0, + signal_waiting_transaction); + if (m_state) { m_state = false; - /* Clear the active transaction list. */ - DBUG_ASSERT(m_active_tranxs != NULL); - m_active_tranxs->clear_active_tranx_nodes(NULL, 0); rpl_semi_sync_master_off_times++; m_wait_file_name_inited = false; m_reply_file_name_inited = false; sql_print_information("Semi-sync replication switched OFF."); } - cond_broadcast(); /* wake up all waiting threads */ DBUG_VOID_RETURN; } @@ -1190,7 +1247,8 @@ int Repl_semi_sync_master::update_sync_header(THD* thd, unsigned char *packet, DBUG_RETURN(0); } -int Repl_semi_sync_master::write_tranx_in_binlog(const char* log_file_name, +int Repl_semi_sync_master::write_tranx_in_binlog(THD *thd, + const char *log_file_name, my_off_t log_file_pos) { int result = 0; @@ -1233,7 +1291,7 @@ int Repl_semi_sync_master::write_tranx_in_binlog(const char* log_file_name, if (is_on()) { DBUG_ASSERT(m_active_tranxs != NULL); - if(m_active_tranxs->insert_tranx_node(log_file_name, log_file_pos)) + if(m_active_tranxs->insert_tranx_node(thd, log_file_name, log_file_pos)) { /* if insert tranx_node failed, print a warning message @@ -1362,21 +1420,34 @@ void Repl_semi_sync_master::set_export_stats() unlock(); } -void Repl_semi_sync_master::await_slave_reply() +void Repl_semi_sync_master::await_all_slave_replies(const char *msg) { - struct timespec abstime; + struct timespec timeout; + int wait_result= 0; + bool first= true; + DBUG_ENTER("Repl_semi_sync_master::::await_all_slave_replies"); - DBUG_ENTER("Repl_semi_sync_master::::await_slave_reply"); + /* + Wait for all transactions that need ACKS to have received them; or timeout. + If it is a timeout, the connection thread should attempt to turn off + semi-sync and broadcast to all other waiting threads to move on. + + COND_binlog_send is only signalled after the Active_tranx cache has been + emptied. + */ + create_timeout(&timeout, NULL); lock(); + while (get_master_enabled() && is_on() && !m_active_tranxs->is_empty() && !wait_result) + { + if (msg && first) + { + first= false; + sql_print_information(msg); + } - /* Just return if there is nothing to wait for */ - if (!rpl_semi_sync_master_wait_sessions) - goto end; - - create_timeout(&abstime, NULL); - cond_timewait(&abstime); - -end: + wait_result= + mysql_cond_timedwait(&COND_binlog_send, &LOCK_binlog, &timeout); + } unlock(); DBUG_VOID_RETURN; } diff --git a/sql/semisync_master.h b/sql/semisync_master.h index 99f46869354..3978d21a61d 100644 --- a/sql/semisync_master.h +++ b/sql/semisync_master.h @@ -31,6 +31,7 @@ extern PSI_cond_key key_COND_binlog_send; struct Tranx_node { char log_name[FN_REFLEN]; my_off_t log_pos; + THD *thd; /* The thread awaiting an ACK */ struct Tranx_node *next; /* the next node in the sorted list */ struct Tranx_node *hash_next; /* the next node during hash collision */ }; @@ -288,6 +289,18 @@ private: } }; +/** + Function pointer type to run on the contents of an Active_tranx node. + + Return 0 for success, 1 for error. + + Note Repl_semi_sync_master::LOCK_binlog is not guaranteed to be held for + its invocation. See the context in which it is called to know. +*/ + +typedef int (*active_tranx_action)(THD *trx_thd, const char *log_file_name, + my_off_t trx_log_file_pos); + /** This class manages memory for active transaction list. @@ -308,6 +321,7 @@ private: int m_num_entries; /* maximum hash table entries */ mysql_mutex_t *m_lock; /* mutex lock */ + mysql_cond_t *m_cond_empty; /* signalled when cleared all Tranx_node */ inline void assert_lock_owner(); @@ -330,7 +344,8 @@ private: } public: - Active_tranx(mysql_mutex_t *lock, unsigned long trace_level); + Active_tranx(mysql_mutex_t *lock, mysql_cond_t *cond, + unsigned long trace_level); ~Active_tranx(); /* Insert an active transaction node with the specified position. @@ -338,15 +353,38 @@ public: * Return: * 0: success; non-zero: error */ - int insert_tranx_node(const char *log_file_name, my_off_t log_file_pos); + int insert_tranx_node(THD *thd_to_wait, const char *log_file_name, + my_off_t log_file_pos); /* Clear the active transaction nodes until(inclusive) the specified * position. * If log_file_name is NULL, everything will be cleared: the sorted * list and the hash table will be reset to empty. + * + * The pre_delete_hook parameter is a function pointer that will be invoked + * for each Active_tranx node, in order, from m_trx_front to m_trx_rear, + * e.g. to signal their wakeup condition. Repl_semi_sync_binlog::LOCK_binlog + * is held while this is invoked. */ void clear_active_tranx_nodes(const char *log_file_name, - my_off_t log_file_pos); + my_off_t log_file_pos, + active_tranx_action pre_delete_hook); + + /* Unlinks a thread from a Tranx_node, so it will not be referenced/signalled + * if it is separately killed. Note that this keeps the Tranx_node itself in + * the cache so it can still be awaited by await_all_slave_replies(), e.g. + * as is done by SHUTDOWN WAIT FOR ALL SLAVES. + */ + void unlink_thd_as_waiter(const char *log_file_name, my_off_t log_file_pos); + +#ifndef DBUG_OFF + /* Uses DBUG_ASSERT statements to ensure that the argument thd_to_check + * matches the thread of the respective Tranx_node::thd of the passed in + * log_file_name and log_file_pos. + */ + void assert_thd_is_waiter(THD *thd_to_check, const char *log_file_name, + my_off_t log_file_pos); +#endif /* Given a position, check to see whether the position is an active * transaction's ending position by probing the hash table. @@ -359,6 +397,12 @@ public: static int compare(const char *log_file_name1, my_off_t log_file_pos1, const char *log_file_name2, my_off_t log_file_pos2); + + /* Check if there are no transactions actively awaiting ACKs. Returns true + * if the internal linked list has no entries, false otherwise. + */ + bool is_empty() { return m_trx_front == NULL; } + }; /** @@ -433,8 +477,6 @@ class Repl_semi_sync_master void lock(); void unlock(); - void cond_broadcast(); - int cond_timewait(struct timespec *wait_time); /* Is semi-sync replication on? */ bool is_on() { @@ -472,8 +514,6 @@ class Repl_semi_sync_master m_wait_timeout = wait_timeout; } - int sync_get_master_wait_sessions(); - /* Calculates a timeout that is m_wait_timeout after start_arg and saves it in out. If start_arg is NULL, the timeout is m_wait_timeout after the @@ -482,10 +522,15 @@ class Repl_semi_sync_master void create_timeout(struct timespec *out, struct timespec *start_arg); /* - Blocks the calling thread until the ack_receiver either receives an ACK - or times out (from rpl_semi_sync_master_timeout) + Blocks the calling thread until the ack_receiver either receives ACKs for + all transactions awaiting ACKs, or times out (from + rpl_semi_sync_master_timeout). + + If info_msg is provided, it will be output via sql_print_information when + there are transactions awaiting ACKs; info_msg is not output if there are + no transasctions to await. */ - void await_slave_reply(); + void await_all_slave_replies(const char *msg); /*set the ACK point, after binlog sync or after transaction commit*/ void set_wait_point(unsigned long ack_point) @@ -561,9 +606,23 @@ class Repl_semi_sync_master /*Wait after the transaction is rollback*/ int wait_after_rollback(THD *thd, bool all); - /*Store the current binlog position in m_active_tranxs. This position should - * be acked by slave*/ - int report_binlog_update(THD *thd, const char *log_file,my_off_t log_pos); + /* Store the current binlog position in m_active_tranxs. This position should + * be acked by slave. + * + * Inputs: + * trans_thd Thread of the transaction which is executing the + * transaction. + * waiter_thd Thread that will wait for the ACK from the replica, + * which depends on the semi-sync wait point. If AFTER_SYNC, + * and also using binlog group commit, this will be the leader + * thread of the binlog commit. Otherwise, it is the thread that + * is executing the transaction, i.e. the same as trans_thd. + * log_file Name of the binlog file that the transaction is written into + * log_pos Offset within the binlog file that the transaction is written + * at + */ + int report_binlog_update(THD *trans_thd, THD *waiter_thd, + const char *log_file, my_off_t log_pos); int dump_start(THD* thd, const char *log_file, @@ -609,13 +668,19 @@ class Repl_semi_sync_master * semi-sync is on * * Input: (the transaction events' ending binlog position) + * THD - (IN) thread that will wait for an ACK. This can be the + * binlog leader thread when using wait_point + * AFTER_SYNC with binlog group commit. In all other + * cases, this is the user thread executing the + * transaction. * log_file_name - (IN) transaction ending position's file name * log_file_pos - (IN) transaction ending position's file offset * * Return: * 0: success; non-zero: error */ - int write_tranx_in_binlog(const char* log_file_name, my_off_t log_file_pos); + int write_tranx_in_binlog(THD *thd, const char *log_file_name, + my_off_t log_file_pos); /* Read the slave's reply so that we know how much progress the slave makes * on receive replication events. @@ -633,30 +698,6 @@ class Repl_semi_sync_master /*called before reset master*/ int before_reset_master(); - /* - Determines if the given thread is currently awaiting a semisync_ack. Note - that the thread's value is protected by this class's LOCK_binlog, so this - function (indirectly) provides safe access. - */ - my_bool is_thd_awaiting_semisync_ack(THD *thd) - { - lock(); - my_bool ret= thd->is_awaiting_semisync_ack; - unlock(); - return ret; - } - - /* - Update the thread's value for is_awaiting_semisync_ack. LOCK_binlog (from - this class) should be acquired before calling this function. - */ - void set_thd_awaiting_semisync_ack(THD *thd, - my_bool _is_awaiting_semisync_ack) - { - mysql_mutex_assert_owner(&LOCK_binlog); - thd->is_awaiting_semisync_ack= _is_awaiting_semisync_ack; - } - mysql_mutex_t LOCK_rpl_semi_sync_master_enabled; }; diff --git a/sql/semisync_master_ack_receiver.cc b/sql/semisync_master_ack_receiver.cc index a311599c54b..29fa5fd5328 100644 --- a/sql/semisync_master_ack_receiver.cc +++ b/sql/semisync_master_ack_receiver.cc @@ -149,7 +149,7 @@ bool Ack_receiver::add_slave(THD *thd) slave->thd= thd; slave->vio= *thd->net.vio; slave->vio.mysql_socket.m_psi= NULL; - slave->vio.read_timeout= 1; + slave->vio.read_timeout= 1; // 1 ms mysql_mutex_lock(&m_mutex); @@ -338,6 +338,17 @@ void Ack_receiver::run() */ net.compress= slave->thd->net.compress; + if (unlikely(listener.is_socket_hangup(slave))) + { + if (global_system_variables.log_warnings > 2) + sql_print_warning("Semisync ack receiver got hangup " + "from slave server-id %d", + slave->server_id()); + it.remove(); + m_slaves_changed= true; + continue; + } + len= my_net_read(&net); if (likely(len != packet_error)) { diff --git a/sql/semisync_master_ack_receiver.h b/sql/semisync_master_ack_receiver.h index 817df513069..6b3ff3695d0 100644 --- a/sql/semisync_master_ack_receiver.h +++ b/sql/semisync_master_ack_receiver.h @@ -228,6 +228,11 @@ public: return m_fds[slave->m_fds_index].revents & POLLIN; } + bool is_socket_hangup(const Slave *slave) + { + return m_fds[slave->m_fds_index].revents & POLLHUP; + } + void clear_socket_info(const Slave *slave) { m_fds[slave->m_fds_index].fd= -1; @@ -296,6 +301,11 @@ public: return FD_ISSET(slave->sock_fd(), &m_fds); } + bool is_socket_hangup(const Slave *slave) + { + return 0; + } + bool has_signal_data() override { return FD_ISSET(local_read_signal, &m_fds); diff --git a/sql/semisync_slave.cc b/sql/semisync_slave.cc index 3bd6d135087..d10754ad374 100644 --- a/sql/semisync_slave.cc +++ b/sql/semisync_slave.cc @@ -34,7 +34,6 @@ int Repl_semi_sync_slave::init_object() m_init_done = true; /* References to the parameter works after set_options(). */ - set_slave_enabled(global_rpl_semi_sync_slave_enabled); set_trace_level(rpl_semi_sync_slave_trace_level); set_delay_master(rpl_semi_sync_slave_delay_master); set_kill_conn_timeout(rpl_semi_sync_slave_kill_conn_timeout); diff --git a/sql/service_wsrep.cc b/sql/service_wsrep.cc index 17240fd4fbd..06513387ac7 100644 --- a/sql/service_wsrep.cc +++ b/sql/service_wsrep.cc @@ -201,11 +201,11 @@ extern "C" void wsrep_handle_SR_rollback(THD *bf_thd, /* Note: do not store/reset globals before wsrep_bf_abort() call to avoid losing BF thd context. */ - mysql_mutex_lock(&victim_thd->LOCK_thd_data); if (!(bf_thd && bf_thd != victim_thd)) { DEBUG_SYNC(victim_thd, "wsrep_before_SR_rollback"); } + mysql_mutex_lock(&victim_thd->LOCK_thd_data); if (bf_thd) { wsrep_bf_abort(bf_thd, victim_thd); diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index 8b2ca9ed794..544a01747bb 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -4440,23 +4440,23 @@ ER_ERROR_DURING_CHECKPOINT swe "Fick fel %M vid CHECKPOINT" ukr "Отримано помилку %M під час CHECKPOINT" ER_NEW_ABORTING_CONNECTION 08S01 - chi "终止的连接 %lld 到数据库: '%-.192s' 用户: '%-.48s' 主机: '%-.64s' (%-.64s)" - cze "Spojení %lld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s' (%-.64s) bylo přerušeno" - dan "Afbrød forbindelsen %lld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s' (%-.64s)" - eng "Aborted connection %lld to db: '%-.192s' user: '%-.48s' host: '%-.64s' (%-.64s)" - est "Ühendus katkestatud %lld andmebaas: '%-.192s' kasutaja: '%-.48s' masin: '%-.64s' (%-.64s)" - fre "Connection %lld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' hôte: '%-.64s' (%-.64s)" - ger "Abbruch der Verbindung %lld zur Datenbank '%-.192s'. Benutzer: '%-.48s', Host: '%-.64s' (%-.64s)" - geo "შეწყდა კავშირი %lld ბაზამდე: '%-.192s' მომხმარებელი: '%-.48s' ჰოსტი: '%-.64s' (%-.64s)" - ita "Interrotta la connessione %lld al db: ''%-.192s' utente: '%-.48s' host: '%-.64s' (%-.64s)" - jpn "接続 %lld が中断されました。データベース: '%-.192s' ユーザー: '%-.48s' ホスト: '%-.64s' (%-.64s)" - nla "Afgebroken verbinding %lld naar db: '%-.192s' gebruiker: '%-.48s' host: '%-.64s' (%-.64s)" - por "Conexão %lld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s' ('%-.64s')" - rus "Прервано соединение %lld к базе данных '%-.192s' пользователя '%-.48s' с хоста '%-.64s' (%-.64s)" - serbian "Prekinuta konekcija broj %lld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s' (%-.64s)" - spa "Abortada conexión %lld a la base de datos: '%-.192s' usuario: '%-.48s' equipo: '%-.64s' (%-.64s)" - swe "Avbröt länken för tråd %lld till db '%-.192s', användare '%-.48s', host '%-.64s' (%-.64s)" - ukr "Перервано з'єднання %lld до бази данних: '%-.192s' користувач: '%-.48s' хост: '%-.64s' (%-.64s)" + chi "终止的连接 %lld 到数据库: '%-.192s' 用户: '%-.48s' 主机: '%-.64s'%-.64s (%-.64s)" + cze "Spojení %lld do databáze: '%-.192s' uživatel: '%-.48s' stroj: '%-.64s'%-.64s (%-.64s) bylo přerušeno" + dan "Afbrød forbindelsen %lld til databasen '%-.192s' bruger: '%-.48s' vært: '%-.64s'%-.64s (%-.64s)" + eng "Aborted connection %lld to db: '%-.192s' user: '%-.48s' host: '%-.64s'%-.64s (%-.64s)" + est "Ühendus katkestatud %lld andmebaas: '%-.192s' kasutaja: '%-.48s' masin: '%-.64s'%-.64s (%-.64s)" + fre "Connection %lld avortée vers la bd: '%-.192s' utilisateur: '%-.48s' hôte: '%-.64s'%-.64s (%-.64s)" + ger "Abbruch der Verbindung %lld zur Datenbank '%-.192s'. Benutzer: '%-.48s', Host: '%-.64s'%-.64s (%-.64s)" + geo "შეწყდა კავშირი %lld ბაზამდე: '%-.192s' მომხმარებელი: '%-.48s' ჰოსტი: '%-.64s'%-.64s (%-.64s)" + ita "Interrotta la connessione %lld al db: ''%-.192s' utente: '%-.48s' host: '%-.64s'%-.64s (%-.64s)" + jpn "接続 %lld が中断されました。データベース: '%-.192s' ユーザー: '%-.48s' ホスト: '%-.64s'%-.64s (%-.64s)" + nla "Afgebroken verbinding %lld naar db: '%-.192s' gebruiker: '%-.48s' host: '%-.64s'%-.64s (%-.64s)" + por "Conexão %lld abortada para banco de dados '%-.192s' - usuário '%-.48s' - 'host' '%-.64s'%-.64s ('%-.64s')" + rus "Прервано соединение %lld к базе данных '%-.192s' пользователя '%-.48s' с хоста '%-.64s'%-.64s (%-.64s)" + serbian "Prekinuta konekcija broj %lld ka bazi: '%-.192s' korisnik je bio: '%-.48s' a host: '%-.64s'%-.64s (%-.64s)" + spa "Abortada conexión %lld a la base de datos: '%-.192s' usuario: '%-.48s' equipo: '%-.64s'%-.64s (%-.64s)" + swe "Avbröt länken för tråd %lld till db '%-.192s', användare '%-.48s', host '%-.64s'%-.64s (%-.64s)" + ukr "Перервано з'єднання %lld до бази данних: '%-.192s' користувач: '%-.48s' хост: '%-.64s'%-.64s (%-.64s)" ER_UNUSED_10 eng "You should never see it" geo "ეს ვერასდროს უნდა დაინახოთ" diff --git a/sql/slave.cc b/sql/slave.cc index 7b9eced68d3..099d5939811 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -857,7 +857,7 @@ static void make_slave_transaction_retry_errors_printable(void) } -#define DEFAULT_SLAVE_RETRY_ERRORS 9 +static constexpr uint DEFAULT_SLAVE_RETRY_ERRORS= 10; bool init_slave_transaction_retry_errors(const char* arg) { @@ -899,9 +899,10 @@ bool init_slave_transaction_retry_errors(const char* arg) slave_transaction_retry_errors[3]= ER_NET_WRITE_INTERRUPTED; slave_transaction_retry_errors[4]= ER_LOCK_WAIT_TIMEOUT; slave_transaction_retry_errors[5]= ER_LOCK_DEADLOCK; - slave_transaction_retry_errors[6]= ER_CONNECT_TO_FOREIGN_DATA_SOURCE; - slave_transaction_retry_errors[7]= 2013; /* CR_SERVER_LOST */ - slave_transaction_retry_errors[8]= 12701; /* ER_SPIDER_REMOTE_SERVER_GONE_AWAY_NUM */ + slave_transaction_retry_errors[6]= ER_CHECKREAD; + slave_transaction_retry_errors[7]= ER_CONNECT_TO_FOREIGN_DATA_SOURCE; + slave_transaction_retry_errors[8]= 2013; /* CR_SERVER_LOST */ + slave_transaction_retry_errors[9]= 12701; /* ER_SPIDER_REMOTE_SERVER_GONE_AWAY_NUM */ /* Add user codes after this */ for (p= arg, i= DEFAULT_SLAVE_RETRY_ERRORS; *p; ) @@ -1339,6 +1340,8 @@ static bool io_slave_killed(Master_info* mi) DBUG_ENTER("io_slave_killed"); DBUG_ASSERT(mi->slave_running); // tracking buffer overrun + if (mi->abort_slave || mi->io_thd->killed) + DBUG_PRINT("info", ("killed")); DBUG_RETURN(mi->abort_slave || mi->io_thd->killed); } @@ -2936,9 +2939,6 @@ void show_master_info_get_fields(THD *thd, List *field_list, field_list->push_back(new (mem_root) Item_empty_string(thd, "Slave_SQL_Running", 3), mem_root); - field_list->push_back(new (mem_root) - Item_empty_string(thd, "Replicate_Rewrite_DB", 23), - mem_root); field_list->push_back(new (mem_root) Item_empty_string(thd, "Replicate_Do_DB", 20), mem_root); @@ -3087,6 +3087,21 @@ void show_master_info_get_fields(THD *thd, List *field_list, Item_return_int(thd, "Slave_Transactional_Groups", 20, MYSQL_TYPE_LONGLONG), mem_root); + field_list->push_back(new (mem_root) + Item_empty_string(thd, "Replicate_Rewrite_DB", 23), + mem_root); + + /* + Note, we must never, _ever_, add extra rows to this output of SHOW SLAVE + STATUS, except here at the end before the extra rows of SHOW ALL SLAVES + STATUS. Otherwise, we break backwards compatibility with applications or + scripts that parse the output! + + This also means that we cannot add _any_ new rows in a GA version if a + different row was already added in a later MariaDB version, as this would + make it impossible to merge the change up while preserving the order of + rows. + */ if (full) { @@ -3202,7 +3217,6 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, &my_charset_bin); protocol->store(&slave_running[mi->slave_running], &my_charset_bin); protocol->store(mi->rli.slave_running ? &msg_yes : &msg_no, &my_charset_bin); - protocol->store(rpl_filter->get_rewrite_db()); protocol->store(rpl_filter->get_do_db()); protocol->store(rpl_filter->get_ignore_db()); @@ -3362,6 +3376,7 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, protocol->store(mi->total_ddl_groups); protocol->store(mi->total_non_trans_groups); protocol->store(mi->total_trans_groups); + protocol->store(rpl_filter->get_rewrite_db()); if (full) { @@ -3536,6 +3551,7 @@ static int init_slave_thread(THD* thd, Master_info *mi, } thd->security_ctx->skip_grants(); + thd->security_ctx->user=(char*) slave_user; thd->slave_thread= 1; thd->connection_name= mi->connection_name; thd->variables.sql_log_slow= !MY_TEST(thd->variables.log_slow_disabled_statements & LOG_SLOW_DISABLE_SLAVE); diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 0e985a95cfe..9108047c781 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -2938,6 +2938,7 @@ bool acl_reload(THD *thd) } acl_cache->clear(0); + mysql_mutex_record_order(&acl_cache->lock, &LOCK_status); mysql_mutex_lock(&acl_cache->lock); old_acl_hosts= acl_hosts; @@ -7618,7 +7619,7 @@ static bool can_grant_role(THD *thd, ACL_ROLE *role) { Security_context *sctx= thd->security_ctx; - if (!sctx->user) // replication + if (!sctx->is_user_defined()) // galera return true; ACL_USER *grantee= find_user_exact(sctx->priv_host, sctx->priv_user); @@ -13339,8 +13340,27 @@ static bool send_server_handshake_packet(MPVIO_EXT *mpvio, *end++= 0; int2store(end, thd->client_capabilities); + + CHARSET_INFO *handshake_cs= default_charset_info; + if (handshake_cs->number > 0xFF) + { + /* + A workaround for a 2-byte collation ID: translate it into + the ID of the primary collation of this character set. + */ + CHARSET_INFO *cs= get_charset_by_csname(handshake_cs->cs_name.str, + MY_CS_PRIMARY, MYF(MY_WME)); + /* + cs should not normally be NULL, however it may be possible + with a dynamic character set incorrectly defined in Index.xml. + For safety let's fallback to latin1 in case cs is NULL. + */ + handshake_cs= cs ? cs : &my_charset_latin1; + } + /* write server characteristics: up to 16 bytes allowed */ - end[2]= (char) default_charset_info->number; + end[2]= (char) handshake_cs->number; + int2store(end+3, mpvio->auth_info.thd->server_status); int2store(end+5, thd->client_capabilities >> 16); end[7]= data_len; diff --git a/sql/sql_admin.cc b/sql/sql_admin.cc index 2186ee60db1..983070a9b3d 100644 --- a/sql/sql_admin.cc +++ b/sql/sql_admin.cc @@ -863,11 +863,9 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, !(check_opt->sql_flags & TT_USEFRM)) { handler *file= table->table->file; - int check_old_types= file->check_old_types(); int check_for_upgrade= file->ha_check_for_upgrade(check_opt); - if (check_old_types == HA_ADMIN_NEEDS_ALTER || - check_for_upgrade == HA_ADMIN_NEEDS_ALTER) + if (check_for_upgrade == HA_ADMIN_NEEDS_ALTER) { /* We use extra_open_options to be able to open crashed tables */ thd->open_options|= extra_open_options; @@ -876,7 +874,7 @@ static bool mysql_admin_table(THD* thd, TABLE_LIST* tables, thd->open_options&= ~extra_open_options; goto send_result; } - if (check_old_types || check_for_upgrade) + if (check_for_upgrade) { /* If repair is not implemented for the engine, run ALTER TABLE */ need_repair_or_alter= 1; diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 779146d2855..a87a01f0b5d 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -814,8 +814,10 @@ int close_thread_tables(THD *thd) { TABLE *table; int error= 0; + PSI_stage_info org_stage; DBUG_ENTER("close_thread_tables"); + thd->backup_stage(&org_stage); THD_STAGE_INFO(thd, stage_closing_tables); #ifdef EXTRA_DEBUG @@ -931,7 +933,10 @@ int close_thread_tables(THD *thd) we will exit this function a few lines below. */ if (! thd->lex->requires_prelocking()) - DBUG_RETURN(0); + { + error= 0; + goto end; + } /* We are in the top-level statement of a prelocked statement, @@ -942,7 +947,10 @@ int close_thread_tables(THD *thd) thd->locked_tables_mode= LTM_LOCK_TABLES; if (thd->locked_tables_mode == LTM_LOCK_TABLES) - DBUG_RETURN(0); + { + error= 0; + goto end; + } thd->leave_locked_tables_mode(); @@ -971,6 +979,8 @@ int close_thread_tables(THD *thd) while (thd->open_tables) (void) close_thread_table(thd, &thd->open_tables); +end: + THD_STAGE_INFO(thd, org_stage); DBUG_RETURN(error); } @@ -5006,6 +5016,9 @@ prepare_fk_prelocking_list(THD *thd, Query_tables_list *prelocking_ctx, Query_arena *arena, backup; TABLE *table= table_list->table; + if (!table->file->referenced_by_foreign_key()) + DBUG_RETURN(FALSE); + arena= thd->activate_stmt_arena_if_needed(&backup); table->file->get_parent_foreign_key_list(thd, &fk_list); @@ -5091,16 +5104,12 @@ bool DML_prelocking_strategy::handle_table(THD *thd, return TRUE; } - if (table->file->referenced_by_foreign_key()) - { - if (prepare_fk_prelocking_list(thd, prelocking_ctx, table_list, - need_prelocking, - table_list->trg_event_map)) - return TRUE; - } + if (prepare_fk_prelocking_list(thd, prelocking_ctx, table_list, + need_prelocking, + table_list->trg_event_map)) + return TRUE; } - else if (table_list->slave_fk_event_map && - table->file->referenced_by_foreign_key()) + else if (table_list->slave_fk_event_map) { if (prepare_fk_prelocking_list(thd, prelocking_ctx, table_list, need_prelocking, diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 086623633ca..305ca499210 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -680,8 +680,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) #ifdef HAVE_REPLICATION , current_linfo(0), - slave_info(0), - is_awaiting_semisync_ack(0) + slave_info(0) #endif #ifdef WITH_WSREP , @@ -4478,7 +4477,7 @@ void Security_context::destroy() my_free((char*) host); host= NULL; } - if (user != delayed_user) + if (is_user_defined()) { my_free((char*) user); user= NULL; @@ -5300,14 +5299,6 @@ extern "C" enum enum_server_command thd_current_command(MYSQL_THD thd) return thd->get_command(); } -#ifdef HAVE_REPLICATION /* Working around MDEV-24622 */ -/** @return whether the current thread is for applying binlog in a replica */ -extern "C" int thd_is_slave(const MYSQL_THD thd) -{ - return thd && thd->slave_thread; -} -#endif /* HAVE_REPLICATION */ - /* Returns high resolution timestamp for the start of the current query. */ extern "C" unsigned long long thd_start_utime(const MYSQL_THD thd) diff --git a/sql/sql_class.h b/sql/sql_class.h index e2269439cbb..ab6994fe4fe 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -1573,6 +1573,8 @@ public: */ bool check_access(const privilege_t want_access, bool match_any = false); bool is_priv_user(const char *user, const char *host); + bool is_user_defined() const + { return user && user != delayed_user && user != slave_user; }; }; @@ -5187,11 +5189,29 @@ public: { if (global_system_variables.log_warnings > threshold) { + char real_ip_str[64]; + real_ip_str[0]= 0; + + /* For proxied connections, add the real IP to the warning message */ + if (net.using_proxy_protocol && net.vio) + { + if(net.vio->localhost) + snprintf(real_ip_str, sizeof(real_ip_str), " real ip: 'localhost'"); + else + { + char buf[INET6_ADDRSTRLEN]; + if (!vio_getnameinfo((sockaddr *)&(net.vio->remote), buf, + sizeof(buf),NULL, 0, NI_NUMERICHOST)) + { + snprintf(real_ip_str, sizeof(real_ip_str), " real ip: '%s'",buf); + } + } + } Security_context *sctx= &main_security_ctx; sql_print_warning(ER_THD(this, ER_NEW_ABORTING_CONNECTION), thread_id, (db.str ? db.str : "unconnected"), sctx->user ? sctx->user : "unauthenticated", - sctx->host_or_ip, reason); + sctx->host_or_ip, real_ip_str, reason); } } @@ -5289,8 +5309,18 @@ public: Flag, mutex and condition for a thread to wait for a signal from another thread. - Currently used to wait for group commit to complete, can also be used for - other purposes. + Currently used to wait for group commit to complete, and COND_wakeup_ready + is used for threads to wait on semi-sync ACKs (though is protected by + Repl_semi_sync_master::LOCK_binlog). Note the following relationships + between these two use-cases when using + rpl_semi_sync_master_wait_point=AFTER_SYNC during group commit: + 1) Non-leader threads use COND_wakeup_ready to wait for the leader thread + to complete binlog commit. + 2) The leader thread uses COND_wakeup_ready to await ACKs from the + replica before signalling the non-leader threads to wake up. + + With wait_point=AFTER_COMMIT, there is no overlap as binlogging has + finished, so COND_wakeup_ready is safe to re-use. */ bool wakeup_ready; mysql_mutex_t LOCK_wakeup_ready; @@ -5418,14 +5448,6 @@ public: bool is_binlog_dump_thread(); #endif - /* - Indicates if this thread is suspended due to awaiting an ACK from a - replica. True if suspended, false otherwise. - - Note that this variable is protected by Repl_semi_sync_master::LOCK_binlog - */ - bool is_awaiting_semisync_ack; - inline ulong wsrep_binlog_format(ulong binlog_format) const { #ifdef WITH_WSREP diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index 8878c722557..6149cc49ebb 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -1274,7 +1274,7 @@ void prepare_new_connection_state(THD* thd) thd->thread_id, thd->db.str ? thd->db.str : "unconnected", sctx->user ? sctx->user : "unauthenticated", - sctx->host_or_ip, "init_connect command failed"); + sctx->host_or_ip, "", "init_connect command failed"); thd->server_status&= ~SERVER_STATUS_CLEAR_SET; thd->protocol->end_statement(); thd->killed = KILL_CONNECTION; diff --git a/sql/sql_derived.cc b/sql/sql_derived.cc index f56ce08aee0..6842ba3a790 100644 --- a/sql/sql_derived.cc +++ b/sql/sql_derived.cc @@ -1225,8 +1225,12 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) (derived->alias.str ? derived->alias.str : ""), derived->get_unit())); - if (unit->executed && !unit->uncacheable && !unit->describe && - !derived_is_recursive) + /* + Only fill derived tables once, unless the derived table is dependent in + which case we will delete all of its rows and refill it below. + */ + if (unit->executed && !(unit->uncacheable & UNCACHEABLE_DEPENDENT) && + !unit->describe && !derived_is_recursive) DBUG_RETURN(FALSE); /*check that table creation passed without problems. */ DBUG_ASSERT(derived->table && derived->table->is_created()); @@ -1285,6 +1289,7 @@ bool mysql_derived_fill(THD *thd, LEX *lex, TABLE_LIST *derived) } else { + DBUG_ASSERT(!unit->executed || (unit->uncacheable & UNCACHEABLE_DEPENDENT)); SELECT_LEX *first_select= unit->first_select(); unit->set_limit(unit->global_parameters()); if (unit->lim.is_unlimited()) diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index 97e1c102db9..c579ad17550 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1452,7 +1452,7 @@ static bool check_view_insertability(THD *thd, TABLE_LIST *view, *trans_end= trans_start + num; Field_translator *trans; uint used_fields_buff_size= bitmap_buffer_size(table->s->fields); - uint32 *used_fields_buff= (uint32*)thd->alloc(used_fields_buff_size); + my_bitmap_map *used_fields_buff= (my_bitmap_map*)thd->alloc(used_fields_buff_size); MY_BITMAP used_fields; enum_column_usage saved_column_usage= thd->column_usage; List_iterator_fast it(fields); @@ -2892,6 +2892,8 @@ TABLE *Delayed_insert::get_local_table(THD* client_thd) copy->def_read_set.bitmap= (my_bitmap_map*) bitmap; copy->def_write_set.bitmap= ((my_bitmap_map*) (bitmap + share->column_bitmap_size)); + create_last_bit_mask(©->def_read_set); + create_last_bit_mask(©->def_write_set); bitmaps_used= 2; if (share->default_fields || share->default_expressions) { diff --git a/sql/sql_lex.h b/sql/sql_lex.h index 3151f237f9c..f785d28622d 100644 --- a/sql/sql_lex.h +++ b/sql/sql_lex.h @@ -3280,7 +3280,7 @@ public: Table_type table_type; /* Used for SHOW CREATE */ List ref_list; List users_list; - List *insert_list,field_list,value_list,update_list; + List *insert_list= nullptr,field_list,value_list,update_list; List many_values; List var_list; List stmt_var_list; //SET_STATEMENT values diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc index 3d3728b9e00..5cd66d8047a 100644 --- a/sql/sql_manager.cc +++ b/sql/sql_manager.cc @@ -76,7 +76,9 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused))) pthread_detach_this_thread(); manager_thread = pthread_self(); mysql_mutex_lock(&LOCK_manager); - while (!abort_manager) + manager_thread_in_use = 1; + mysql_cond_signal(&COND_manager); + while (!abort_manager || cb_list) { /* XXX: This will need to be made more general to handle different * polling needs. */ @@ -116,6 +118,7 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused))) } mysql_mutex_lock(&LOCK_manager); } + DBUG_ASSERT(cb_list == NULL); manager_thread_in_use = 0; mysql_mutex_unlock(&LOCK_manager); mysql_mutex_destroy(&LOCK_manager); @@ -135,12 +138,19 @@ void start_handle_manager() pthread_t hThread; int err; DBUG_EXECUTE_IF("delay_start_handle_manager", my_sleep(1000);); - manager_thread_in_use = 1; mysql_cond_init(key_COND_manager, &COND_manager,NULL); mysql_mutex_init(key_LOCK_manager, &LOCK_manager, NULL); if ((err= mysql_thread_create(key_thread_handle_manager, &hThread, &connection_attrib, handle_manager, 0))) + { sql_print_warning("Can't create handle_manager thread (errno: %M)", err); + DBUG_VOID_RETURN; + } + + mysql_mutex_lock(&LOCK_manager); + while (!manager_thread_in_use) + mysql_cond_wait(&COND_manager, &LOCK_manager); + mysql_mutex_unlock(&LOCK_manager); } DBUG_VOID_RETURN; } diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 7a2f3286f82..37735181404 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -6167,13 +6167,11 @@ finish: if (unlikely(thd->is_error()) || (thd->variables.option_bits & OPTION_MASTER_SQL_ERROR)) { - THD_STAGE_INFO(thd, stage_rollback); trans_rollback_stmt(thd); } else { /* If commit fails, we should be able to reset the OK status. */ - THD_STAGE_INFO(thd, stage_commit); thd->get_stmt_da()->set_overwrite_status(true); trans_commit_stmt(thd); thd->get_stmt_da()->set_overwrite_status(false); @@ -6200,7 +6198,6 @@ finish: one of storage engines (e.g. due to deadlock). Rollback transaction in all storage engines including binary log. */ - THD_STAGE_INFO(thd, stage_rollback_implicit); trans_rollback_implicit(thd); thd->release_transactional_locks(); } @@ -6210,7 +6207,6 @@ finish: DBUG_ASSERT(! thd->in_sub_stmt); if (!(thd->variables.option_bits & OPTION_GTID_BEGIN)) { - THD_STAGE_INFO(thd, stage_commit_implicit); /* If commit fails, we should be able to reset the OK status. */ thd->get_stmt_da()->set_overwrite_status(true); /* Commit the normal transaction if one is active. */ diff --git a/sql/sql_partition.cc b/sql/sql_partition.cc index 3c1a803c724..483ea1566c5 100644 --- a/sql/sql_partition.cc +++ b/sql/sql_partition.cc @@ -1080,7 +1080,7 @@ void check_range_capable_PF(TABLE *table) static bool set_up_partition_bitmaps(THD *thd, partition_info *part_info) { - uint32 *bitmap_buf; + my_bitmap_map *bitmap_buf; uint bitmap_bits= part_info->num_subparts? (part_info->num_subparts* part_info->num_parts): part_info->num_parts; @@ -1091,14 +1091,15 @@ static bool set_up_partition_bitmaps(THD *thd, partition_info *part_info) /* Allocate for both read and lock_partitions */ if (unlikely(!(bitmap_buf= - (uint32*) alloc_root(&part_info->table->mem_root, - bitmap_bytes * 2)))) + (my_bitmap_map*) alloc_root(&part_info->table->mem_root, + bitmap_bytes * 2)))) DBUG_RETURN(TRUE); my_bitmap_init(&part_info->read_partitions, bitmap_buf, bitmap_bits); /* Use the second half of the allocated buffer for lock_partitions */ - my_bitmap_init(&part_info->lock_partitions, bitmap_buf + (bitmap_bytes / 4), - bitmap_bits); + my_bitmap_init(&part_info->lock_partitions, + (my_bitmap_map*) (((char*) bitmap_buf) + bitmap_bytes), + bitmap_bits); part_info->bitmaps_are_initialized= TRUE; part_info->set_partition_bitmaps(NULL); DBUG_RETURN(FALSE); diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 1abf3067c77..4fadc176cad 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -3876,13 +3876,7 @@ bool change_master(THD* thd, Master_info* mi, bool *master_info_added) if (lex_mi->use_gtid_opt == LEX_MASTER_INFO::LEX_GTID_SLAVE_POS) mi->using_gtid= Master_info::USE_GTID_SLAVE_POS; else if (lex_mi->use_gtid_opt == LEX_MASTER_INFO::LEX_GTID_CURRENT_POS) - { mi->using_gtid= Master_info::USE_GTID_CURRENT_POS; - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_WARN_DEPRECATED_SYNTAX_NO_REPLACEMENT, - ER_THD(thd, ER_WARN_DEPRECATED_SYNTAX), - "master_use_gtid=current_pos", "master_demote_to_slave=1"); - } else if (lex_mi->use_gtid_opt == LEX_MASTER_INFO::LEX_GTID_NO || lex_mi->log_file_name || lex_mi->pos || lex_mi->relay_log_name || lex_mi->relay_log_pos) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 9b964fd87bb..4fc62c573f3 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -18349,6 +18349,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels, if (!eq_item || eq_item->set_cmp_func(thd)) return 0; + eq_item->eval_not_null_tables(0); eq_item->quick_fix_field(); } current_sjm= field_sjm; @@ -18406,6 +18407,7 @@ Item *eliminate_item_equal(THD *thd, COND *cond, COND_EQUAL *upper_levels, { res->quick_fix_field(); res->update_used_tables(); + res->eval_not_null_tables(0); } return res; @@ -20075,6 +20077,12 @@ Item_cond::remove_eq_conds(THD *thd, Item::cond_result *cond_value, bool and_level= functype() == Item_func::COND_AND_FUNC; List *cond_arg_list= argument_list(); + if (check_stack_overrun(thd, STACK_MIN_SIZE, NULL)) + { + *cond_value= Item::COND_FALSE; + return (COND*) 0; // Fatal error flag is set! + } + if (and_level) { /* diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 896cd347c62..0d265f3d512 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2806,9 +2806,10 @@ static my_bool list_callback(THD *tmp, list_callback_arg *arg) thd_info->thread_id=tmp->thread_id; thd_info->os_thread_id=tmp->os_thread_id; - thd_info->user= arg->thd->strdup(tmp_sctx->user ? tmp_sctx->user : - (tmp->system_thread ? - "system user" : "unauthenticated user")); + thd_info->user= arg->thd->strdup(tmp_sctx->user && tmp_sctx->user != slave_user ? + tmp_sctx->user : + (tmp->system_thread ? + "system user" : "unauthenticated user")); if (tmp->peer_port && (tmp_sctx->host || tmp_sctx->ip) && arg->thd->security_ctx->host_or_ip[0]) { @@ -3311,7 +3312,7 @@ static my_bool processlist_callback(THD *tmp, processlist_callback_arg *arg) /* ID */ arg->table->field[0]->store((longlong) tmp->thread_id, TRUE); /* USER */ - val= tmp_sctx->user ? tmp_sctx->user : + val= tmp_sctx->user && tmp_sctx->user != slave_user ? tmp_sctx->user : (tmp->system_thread ? "system user" : "unauthenticated user"); arg->table->field[1]->store(val, strlen(val), cs); /* HOST */ diff --git a/sql/sql_string.cc b/sql/sql_string.cc index c183873ddc4..79df1e0ff94 100644 --- a/sql/sql_string.cc +++ b/sql/sql_string.cc @@ -1144,28 +1144,45 @@ bool String::append_for_single_quote(const char *st, size_t len) int chlen; for (; st < end; st++) { - switch (*st) + char ch2= (char) (uchar) escaped_wc_for_single_quote((uchar) *st); + if (ch2) { - case '\\': APPEND(STRING_WITH_LEN("\\\\")); break; - case '\0': APPEND(STRING_WITH_LEN("\\0")); break; - case '\'': APPEND(STRING_WITH_LEN("\\'")); break; - case '\b': APPEND(STRING_WITH_LEN("\\b")); break; - case '\t': APPEND(STRING_WITH_LEN("\\t")); break; - case '\n': APPEND(STRING_WITH_LEN("\\n")); break; - case '\r': APPEND(STRING_WITH_LEN("\\r")); break; - case '\032': APPEND(STRING_WITH_LEN("\\Z")); break; - default: if ((chlen=charset()->charlen(st, end)) > 0) - { - APPEND(st, chlen); - st+= chlen-1; - } - else - APPEND(*st); + if (append('\\') || append(ch2)) + return true; + continue; } + if ((chlen= charset()->charlen(st, end)) > 0) + { + APPEND(st, chlen); + st+= chlen-1; + } + else + APPEND(*st); } return 0; } + +bool String::append_for_single_quote_using_mb_wc(const char *src, + size_t length, + CHARSET_INFO *cs) +{ + DBUG_ASSERT(&my_charset_bin != charset()); + DBUG_ASSERT(&my_charset_bin != cs); + const uchar *str= (const uchar *) src; + const uchar *end= (const uchar *) src + length; + int chlen; + my_wc_t wc; + for ( ; (chlen= cs->cset->mb_wc(cs, &wc, str, end)) > 0; str+= chlen) + { + my_wc_t wc2= escaped_wc_for_single_quote(wc); + if (wc2 ? (append_wc('\\') || append_wc(wc2)) : append_wc(wc)) + return true; + } + return false; +} + + void String::print(String *str) const { str->append_for_single_quote(Ptr, str_length); diff --git a/sql/sql_string.h b/sql/sql_string.h index 200735922df..346a767237c 100644 --- a/sql/sql_string.h +++ b/sql/sql_string.h @@ -1188,6 +1188,43 @@ public: print_with_conversion(to, cs); } + static my_wc_t escaped_wc_for_single_quote(my_wc_t ch) + { + switch (ch) { + case '\\': return '\\'; + case '\0': return '0'; + case '\'': return '\''; + case '\b': return 'b'; + case '\t': return 't'; + case '\n': return 'n'; + case '\r': return 'r'; + case '\032': return 'Z'; + } + return 0; + } + + // Append for single quote using mb_wc/wc_mb Unicode conversion + bool append_for_single_quote_using_mb_wc(const char *str, size_t length, + CHARSET_INFO *cs); + + // Append for single quote with optional mb_wc/wc_mb conversion + bool append_for_single_quote_opt_convert(const char *str, + size_t length, + CHARSET_INFO *cs) + { + return charset() == &my_charset_bin || cs == &my_charset_bin || + my_charset_same(charset(), cs) ? + append_for_single_quote(str, length) : + append_for_single_quote_using_mb_wc(str, length, cs); + } + + bool append_for_single_quote_opt_convert(const String &str) + { + return append_for_single_quote_opt_convert(str.ptr(), + str.length(), + str.charset()); + } + bool append_for_single_quote(const char *st, size_t len); bool append_for_single_quote(const String *s) { diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 82fa163e026..8057d91ca26 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -6602,8 +6602,6 @@ static KEY *find_key_ci(const char *key_name, KEY *key_start, KEY *key_end) @param thd Thread @param table The original table. - @param varchar Indicates that new definition has new - VARCHAR column. @param[in/out] ha_alter_info Data structure which already contains basic information about create options, field and keys for the new version of @@ -6638,7 +6636,7 @@ static KEY *find_key_ci(const char *key_name, KEY *key_start, KEY *key_end) @retval false success */ -static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar, +static bool fill_alter_inplace_info(THD *thd, TABLE *table, Alter_inplace_info *ha_alter_info) { Field **f_ptr, *field; @@ -6688,13 +6686,6 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar, if (alter_info->flags & ALTER_CHANGE_COLUMN) ha_alter_info->handler_flags|= ALTER_COLUMN_DEFAULT; - /* - If we altering table with old VARCHAR fields we will be automatically - upgrading VARCHAR column types. - */ - if (table->s->frm_version < FRM_VER_TRUE_VARCHAR && varchar) - ha_alter_info->handler_flags|= ALTER_STORED_COLUMN_TYPE; - DBUG_PRINT("info", ("handler_flags: %llu", ha_alter_info->handler_flags)); /* @@ -6736,6 +6727,30 @@ static bool fill_alter_inplace_info(THD *thd, TABLE *table, bool varchar, Check if type of column has changed. */ bool is_equal= field->is_equal(*new_field); + + if (is_equal) + { + const Type_handler *th= field->type_handler(); + if (th != th->type_handler_for_implicit_upgrade()) + { + /* + The field data type says it wants upgrade. + This should not be possible: + - if this is a new column definition, e.g. from statements like: + ALTER TABLE t1 ADD a INT; + ALTER TABLE t1 MODIFY a INT; + then it's coming from the parser, which returns + only up-to-date data types. + - if this is an old column definition, e.g. from: + ALTER TABLE t1 COMMENT 'new comment'; + it should have ealier called Column_definition_implicit_upgrade(), + which replaces old data types to up-to-date data types. + */ + DBUG_ASSERT(0); + is_equal= false; + } + } + if (!is_equal) { if (field->table->file->can_convert_nocopy(*field, *new_field)) @@ -9139,7 +9154,7 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, case Alter_drop::CHECK_CONSTRAINT: case Alter_drop::PERIOD: my_error(ER_CANT_DROP_FIELD_OR_KEY, MYF(0), drop->type_name(), - alter_info->drop_list.head()->name); + drop->name); goto err; case Alter_drop::FOREIGN_KEY: // Leave the DROP FOREIGN KEY names in the alter_info->drop_list. @@ -10106,12 +10121,7 @@ bool mysql_alter_table(THD *thd, const LEX_CSTRING *new_db, */ KEY *key_info; uint key_count; - /* - Remember if the new definition has new VARCHAR column; - create_info->varchar will be reset in create_table_impl()/ - mysql_prepare_create_table(). - */ - bool varchar= create_info->varchar, table_creation_was_logged= 0; + bool table_creation_was_logged= 0; bool binlog_as_create_select= 0, log_if_exists= 0; uint tables_opened; handlerton *new_db_type= create_info->db_type, *old_db_type; @@ -10929,7 +10939,7 @@ do_continue:; bool use_inplace= true; /* Fill the Alter_inplace_info structure. */ - if (fill_alter_inplace_info(thd, table, varchar, &ha_alter_info)) + if (fill_alter_inplace_info(thd, table, &ha_alter_info)) goto err_new_table_cleanup; alter_ctx.tmp_storage_engine_name_partitioned= diff --git a/sql/sql_type.cc b/sql/sql_type.cc index a086e338996..b1911c79e14 100644 --- a/sql/sql_type.cc +++ b/sql/sql_type.cc @@ -41,6 +41,7 @@ Named_type_handler type_handler_bool("boolean"); Named_type_handler type_handler_stiny("tinyint"); Named_type_handler type_handler_sshort("smallint"); Named_type_handler type_handler_slong("int"); +Named_type_handler type_handler_slong_ge0("int"); Named_type_handler type_handler_sint24("mediumint"); Named_type_handler type_handler_slonglong("bigint"); Named_type_handler type_handler_utiny("tiny unsigned"); @@ -4620,6 +4621,10 @@ bool Type_handler_general_purpose_int:: bool unsigned_flag= items[0]->unsigned_flag; for (uint i= 1; i < nitems; i++) { + /* + TODO: avoid creating DECIMAL for a mix of ulong and slong_ge0. + It's too late for 10.5. Let's do it in a higher version. + */ if (unsigned_flag != items[i]->unsigned_flag) { // Convert a mixture of signed and unsigned int to decimal @@ -4629,6 +4634,21 @@ bool Type_handler_general_purpose_int:: } } func->aggregate_attributes_int(items, nitems); + for (uint i= 0; i < nitems; i++) + { + if (items[i]->type_handler() == &type_handler_slong_ge0) + { + /* + A slong_ge0 argument found. + We need to add an extra character for the sign. + TODO: rewrite aggregate_attributes_int() to find + the maximum decimal_precision() instead of the maximum max_length. + This change is too late for 10.5, so let's do it in a higher version. + */ + uint digits_and_sign= items[i]->decimal_precision() + 1; + set_if_bigger(func->max_length, digits_and_sign); + } + } handler->set_handler(func->unsigned_flag ? handler->type_handler()->type_handler_unsigned() : handler->type_handler()->type_handler_signed()); @@ -4924,6 +4944,13 @@ bool Type_handler_real_result:: /*************************************************************************/ +bool Type_handler_long_ge0:: + Item_sum_hybrid_fix_length_and_dec(Item_sum_hybrid *func) const +{ + return func->fix_length_and_dec_sint_ge0(); +} + + bool Type_handler_int_result:: Item_sum_hybrid_fix_length_and_dec(Item_sum_hybrid *func) const { @@ -6366,6 +6393,14 @@ bool Type_handler_int_result:: } +bool Type_handler_long_ge0:: + Item_func_round_fix_length_and_dec(Item_func_round *item) const +{ + item->fix_arg_slong_ge0(); + return false; +} + + bool Type_handler_year:: Item_func_round_fix_length_and_dec(Item_func_round *item) const { @@ -6587,6 +6622,14 @@ bool Type_handler_int_result:: } +bool Type_handler_long_ge0:: + Item_func_abs_fix_length_and_dec(Item_func_abs *item) const +{ + item->fix_length_and_dec_sint_ge0(); + return false; +} + + bool Type_handler_real_result:: Item_func_abs_fix_length_and_dec(Item_func_abs *item) const { @@ -6697,6 +6740,22 @@ bool Type_handler:: } +bool Type_handler_long_ge0:: + Item_func_signed_fix_length_and_dec(Item_func_signed *item) const +{ + item->fix_length_and_dec_sint_ge0(); + return false; +} + + +bool Type_handler_long_ge0:: + Item_func_unsigned_fix_length_and_dec(Item_func_unsigned *item) const +{ + item->fix_length_and_dec_sint_ge0(); + return false; +} + + bool Type_handler_string_result:: Item_func_signed_fix_length_and_dec(Item_func_signed *item) const { @@ -7182,6 +7241,18 @@ decimal_digits_t Type_handler_int_result::Item_decimal_precision(const Item *ite return (decimal_digits_t) MY_MIN(prec, DECIMAL_MAX_PRECISION); } +decimal_digits_t Type_handler_long_ge0::Item_decimal_precision(const Item *item) const +{ + DBUG_ASSERT(item->max_length); + DBUG_ASSERT(!item->decimals); + /* + Unlinke in Type_handler_long, Type_handler_long_ge does + not reserve one character for the sign. All max_length + characters are digits. + */ + return MY_MIN(item->max_length, DECIMAL_MAX_PRECISION); +} + decimal_digits_t Type_handler_time_common::Item_decimal_precision(const Item *item) const { return (decimal_digits_t) (7 + MY_MIN(item->decimals, TIME_SECOND_PART_DIGITS)); @@ -8183,6 +8254,26 @@ Field *Type_handler_long:: } +Field *Type_handler_long_ge0:: + make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root, + const LEX_CSTRING *name, + const Record_addr &rec, const Bit_addr &bit, + const Column_definition_attributes *attr, + uint32 flags) const +{ + /* + We're converting signed long_ge0 to signed long. + So add one character for the sign. + */ + return new (mem_root) + Field_long(rec.ptr(), (uint32) attr->length + 1/*sign*/, + rec.null_ptr(), rec.null_bit(), + attr->unireg_check, name, + f_is_zerofill(attr->pack_flag) != 0, + f_is_dec(attr->pack_flag) == 0); +} + + Field *Type_handler_longlong:: make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *mem_root, const LEX_CSTRING *name, @@ -8804,41 +8895,48 @@ bool Type_handler_string_result::union_element_finalize(Item_type_holder* item) /***************************************************************************/ -void Type_handler_var_string:: - Column_definition_implicit_upgrade(Column_definition *c) const + +const Type_handler * +Type_handler_var_string::type_handler_for_implicit_upgrade() const { - // Change old VARCHAR to new VARCHAR - c->set_handler(&type_handler_varchar); + return &type_handler_varchar; +} + + +void Type_handler:: + Column_definition_implicit_upgrade_to_this(Column_definition *old) const +{ + old->set_handler(this); } void Type_handler_time_common:: - Column_definition_implicit_upgrade(Column_definition *c) const + Column_definition_implicit_upgrade_to_this(Column_definition *old) const { if (opt_mysql56_temporal_format) - c->set_handler(&type_handler_time2); + old->set_handler(&type_handler_time2); else - c->set_handler(&type_handler_time); + old->set_handler(&type_handler_time); } void Type_handler_datetime_common:: - Column_definition_implicit_upgrade(Column_definition *c) const + Column_definition_implicit_upgrade_to_this(Column_definition *old) const { if (opt_mysql56_temporal_format) - c->set_handler(&type_handler_datetime2); + old->set_handler(&type_handler_datetime2); else - c->set_handler(&type_handler_datetime); + old->set_handler(&type_handler_datetime); } void Type_handler_timestamp_common:: - Column_definition_implicit_upgrade(Column_definition *c) const + Column_definition_implicit_upgrade_to_this(Column_definition *old) const { if (opt_mysql56_temporal_format) - c->set_handler(&type_handler_timestamp2); + old->set_handler(&type_handler_timestamp2); else - c->set_handler(&type_handler_timestamp); + old->set_handler(&type_handler_timestamp); } diff --git a/sql/sql_type.h b/sql/sql_type.h index 5ce17447610..b092433b9af 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -3836,6 +3836,16 @@ public: const Type_handler *res= type_handler_base(); return res ? res : this; } + /* + In 10.11.8 the semantics of this method has changed to the opposite. + It used to be called with the old data type handler as "this". + Now it's called with the new data type hander as "this". + To avoid problems during merges, the method name was renamed. + */ + virtual const Type_handler *type_handler_for_implicit_upgrade() const + { + return this; + } virtual const Type_handler *type_handler_for_comparison() const= 0; virtual const Type_handler *type_handler_for_native_format() const { @@ -3981,9 +3991,13 @@ public: virtual bool validate_implicit_default_value(THD *thd, const Column_definition &def) const; - // Automatic upgrade, e.g. for ALTER TABLE t1 FORCE - virtual void Column_definition_implicit_upgrade(Column_definition *c) const - { } + /* + Automatic upgrade, e.g. for REPAIR or ALTER TABLE t1 FORCE + - from the data type specified in old->type_handler() + - to the data type specified in "this" + */ + virtual void Column_definition_implicit_upgrade_to_this( + Column_definition *old) const; // Validate CHECK constraint after the parser virtual bool Column_definition_validate_check_constraint(THD *thd, Column_definition *c) @@ -5773,6 +5787,38 @@ public: }; +/* + The expression of this type reports itself as signed, + however it's known not to return negative values. + Items of this data type count only digits in Item::max_length, + without adding +1 for the sign. This allows expressions + of this type convert nicely to VARCHAR and DECIMAL. + For example, YEAR(now()) is: + - VARCHAR(4) in a string context + - DECIMAL(4,0) in a decimal context + - but INT(5) in an integer context +*/ +class Type_handler_long_ge0: public Type_handler_long +{ +public: + decimal_digits_t Item_decimal_precision(const Item *item) const override; + bool Item_func_signed_fix_length_and_dec(Item_func_signed *item) + const override; + bool Item_func_unsigned_fix_length_and_dec(Item_func_unsigned *item) + const override; + bool Item_func_abs_fix_length_and_dec(Item_func_abs *) const override; + bool Item_func_round_fix_length_and_dec(Item_func_round *) const override; + bool Item_sum_hybrid_fix_length_and_dec(Item_sum_hybrid *func) const override; + Field *make_table_field_from_def(TABLE_SHARE *share, + MEM_ROOT *mem_root, + const LEX_CSTRING *name, + const Record_addr &addr, + const Bit_addr &bit, + const Column_definition_attributes *attr, + uint32 flags) const override; +}; + + class Type_handler_ulong: public Type_handler_long { public: @@ -6188,7 +6234,8 @@ public: const Type_handler *type_handler_for_comparison() const override; int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) const override; - void Column_definition_implicit_upgrade(Column_definition *c) const override; + void Column_definition_implicit_upgrade_to_this( + Column_definition *old) const override; bool Column_definition_fix_attributes(Column_definition *c) const override; bool Column_definition_attributes_frm_unpack(Column_definition_attributes *attr, @@ -6512,7 +6559,8 @@ public: const Type_cast_attributes &attr) const override; bool validate_implicit_default_value(THD *thd, const Column_definition &def) const override; - void Column_definition_implicit_upgrade(Column_definition *c) const override; + void Column_definition_implicit_upgrade_to_this( + Column_definition *old) const override; bool Column_definition_fix_attributes(Column_definition *c) const override; bool Column_definition_attributes_frm_unpack(Column_definition_attributes *attr, @@ -6650,7 +6698,8 @@ public: { return true; } - void Column_definition_implicit_upgrade(Column_definition *c) const override; + void Column_definition_implicit_upgrade_to_this( + Column_definition *old) const override; bool Column_definition_attributes_frm_unpack(Column_definition_attributes *attr, TABLE_SHARE *share, @@ -7000,6 +7049,7 @@ public: { return MYSQL_TYPE_VARCHAR; } + const Type_handler *type_handler_for_implicit_upgrade() const override; const Type_handler *type_handler_for_tmp_table(const Item *item) const override { return varstring_type_handler(item); @@ -7007,7 +7057,6 @@ public: uint32 max_display_length_for_field(const Conv_source &src) const override; void show_binlog_type(const Conv_source &src, const Field &dst, String *str) const override; - void Column_definition_implicit_upgrade(Column_definition *c) const override; bool Column_definition_fix_attributes(Column_definition *c) const override; bool Column_definition_prepare_stage2(Column_definition *c, handler *file, @@ -7607,6 +7656,7 @@ extern MYSQL_PLUGIN_IMPORT Named_type_handler type_han extern MYSQL_PLUGIN_IMPORT Named_type_handler type_handler_sshort; extern MYSQL_PLUGIN_IMPORT Named_type_handler type_handler_sint24; extern MYSQL_PLUGIN_IMPORT Named_type_handler type_handler_slong; +extern MYSQL_PLUGIN_IMPORT Named_type_handler type_handler_slong_ge0; extern MYSQL_PLUGIN_IMPORT Named_type_handler type_handler_slonglong; extern Named_type_handler type_handler_utiny; diff --git a/sql/sql_type_fixedbin.h b/sql/sql_type_fixedbin.h index 65418c34032..a05b0849d17 100644 --- a/sql/sql_type_fixedbin.h +++ b/sql/sql_type_fixedbin.h @@ -1125,6 +1125,11 @@ public: return FbtImpl::max_char_length(); } + const Type_handler *type_handler_for_implicit_upgrade() const override + { + return TypeCollectionImpl::singleton()-> + type_handler_for_implicit_upgrade(this); + } const Type_handler *type_handler_for_comparison() const override { return this; @@ -1943,6 +1948,12 @@ public: return NULL; } + const Type_handler *type_handler_for_implicit_upgrade( + const Type_handler *from) const + { + return from; + } + static Type_collection_fbt *singleton() { static Type_collection_fbt tc; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index b948c2e46fc..519215ad946 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -257,7 +257,7 @@ static void prepare_record_for_error_message(int error, TABLE *table) Field *field; uint keynr; MY_BITMAP unique_map; /* Fields in offended unique. */ - my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)]; + my_bitmap_map unique_map_buf[bitmap_buffer_size(MAX_FIELDS)/sizeof(my_bitmap_map)]; DBUG_ENTER("prepare_record_for_error_message"); /* diff --git a/sql/temporary_tables.cc b/sql/temporary_tables.cc index fa9abf7b3b9..a018ba5a6ff 100644 --- a/sql/temporary_tables.cc +++ b/sql/temporary_tables.cc @@ -1128,11 +1128,16 @@ TABLE *THD::open_temporary_table(TMP_TABLE_SHARE *share, DBUG_RETURN(NULL); /* Out of memory */ } + uint flags= ha_open_options | (open_options & HA_OPEN_FOR_CREATE); + /* + In replication, temporary tables are not confined to a single + thread/THD. + */ + if (slave_thread) + flags|= HA_OPEN_GLOBAL_TMP_TABLE; if (open_table_from_share(this, share, &alias, (uint) HA_OPEN_KEYFILE, - EXTRA_RECORD, - (ha_open_options | - (open_options & HA_OPEN_FOR_CREATE)), + EXTRA_RECORD, flags, table, false)) { my_free(table); diff --git a/sql/threadpool_win.cc b/sql/threadpool_win.cc index ed68e31c755..65e40598135 100644 --- a/sql/threadpool_win.cc +++ b/sql/threadpool_win.cc @@ -355,10 +355,13 @@ int TP_pool_win::init() if (IS_SYSVAR_AUTOSIZE(&threadpool_max_threads)) { /* - Nr 500 comes from Microsoft documentation, - there is no API for GetThreadpoolThreadMaxThreads() + Default 500 comes from Microsoft documentation, + there is no API for GetThreadpoolThreadMaxThreads(). + + To avoid deadlocks, allow at least max_connections + safety + margin threads in the pool. */ - SYSVAR_AUTOSIZE(threadpool_max_threads,500); + SYSVAR_AUTOSIZE(threadpool_max_threads,std::max(500U,(uint)max_connections + 10)); } else { diff --git a/sql/transaction.cc b/sql/transaction.cc index a6dbf57c24a..f34307ac4f2 100644 --- a/sql/transaction.cc +++ b/sql/transaction.cc @@ -255,11 +255,15 @@ bool trans_begin(THD *thd, uint flags) bool trans_commit(THD *thd) { int res; + PSI_stage_info org_stage; DBUG_ENTER("trans_commit"); if (trans_check(thd)) DBUG_RETURN(TRUE); + thd->backup_stage(&org_stage); + THD_STAGE_INFO(thd, stage_commit); + thd->server_status&= ~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY); DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS")); @@ -288,6 +292,7 @@ bool trans_commit(THD *thd) DBUG_ASSERT(thd->m_transaction_psi == NULL); trans_track_end_trx(thd); + THD_STAGE_INFO(thd, org_stage); DBUG_RETURN(MY_TEST(res)); } @@ -320,6 +325,10 @@ bool trans_commit_implicit(THD *thd) if (thd->in_multi_stmt_transaction_mode() || (thd->variables.option_bits & OPTION_TABLE_LOCK)) { + PSI_stage_info org_stage; + thd->backup_stage(&org_stage); + THD_STAGE_INFO(thd, stage_commit_implicit); + /* Safety if one did "drop table" on locked tables */ if (!thd->locked_tables_mode) thd->variables.option_bits&= ~OPTION_TABLE_LOCK; @@ -327,6 +336,8 @@ bool trans_commit_implicit(THD *thd) ~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY); DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS")); res= MY_TEST(ha_commit_trans(thd, TRUE)); + + THD_STAGE_INFO(thd, org_stage); } thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_BINLOG_THIS_TRX); @@ -361,11 +372,15 @@ bool trans_commit_implicit(THD *thd) bool trans_rollback(THD *thd) { int res; + PSI_stage_info org_stage; DBUG_ENTER("trans_rollback"); if (trans_check(thd)) DBUG_RETURN(TRUE); + thd->backup_stage(&org_stage); + THD_STAGE_INFO(thd, stage_rollback); + thd->server_status&= ~(SERVER_STATUS_IN_TRANS | SERVER_STATUS_IN_TRANS_READONLY); DBUG_PRINT("info", ("clearing SERVER_STATUS_IN_TRANS")); @@ -384,6 +399,7 @@ bool trans_rollback(THD *thd) trans_track_end_trx(thd); + THD_STAGE_INFO(thd, org_stage); DBUG_RETURN(MY_TEST(res)); } @@ -406,8 +422,12 @@ bool trans_rollback(THD *thd) bool trans_rollback_implicit(THD *thd) { int res; + PSI_stage_info org_stage; DBUG_ENTER("trans_rollback_implict"); + thd->backup_stage(&org_stage); + THD_STAGE_INFO(thd, stage_rollback_implicit); + /* Always commit/rollback statement transaction before manipulating with the normal one. @@ -434,6 +454,7 @@ bool trans_rollback_implicit(THD *thd) trans_track_end_trx(thd); + THD_STAGE_INFO(thd, org_stage); DBUG_RETURN(MY_TEST(res)); } @@ -469,11 +490,17 @@ bool trans_commit_stmt(THD *thd) if (thd->transaction->stmt.ha_list) { + PSI_stage_info org_stage; + thd->backup_stage(&org_stage); + THD_STAGE_INFO(thd, stage_commit); + res= ha_commit_trans(thd, FALSE); if (! thd->in_active_multi_stmt_transaction()) { trans_reset_one_shot_chistics(thd); } + + THD_STAGE_INFO(thd, org_stage); } mysql_mutex_assert_not_owner(&LOCK_prepare_ordered); @@ -532,9 +559,15 @@ bool trans_rollback_stmt(THD *thd) if (thd->transaction->stmt.ha_list) { + PSI_stage_info org_stage; + thd->backup_stage(&org_stage); + THD_STAGE_INFO(thd, stage_rollback); + ha_rollback_trans(thd, FALSE); if (! thd->in_active_multi_stmt_transaction()) trans_reset_one_shot_chistics(thd); + + THD_STAGE_INFO(thd, org_stage); } #ifdef HAVE_REPLICATION diff --git a/sql/unireg.h b/sql/unireg.h index 1eec3585acc..fa657a267c2 100644 --- a/sql/unireg.h +++ b/sql/unireg.h @@ -217,7 +217,7 @@ LEX_CUSTRING build_frm_image(THD *thd, const LEX_CSTRING &table, #define FRM_FORMINFO_SIZE 288 #define FRM_MAX_SIZE (1024*1024) -static inline bool is_binary_frm_header(uchar *head) +static inline bool is_binary_frm_header(const uchar *head) { return head[0] == 254 && head[1] == 1 diff --git a/sql/wsrep_client_service.h b/sql/wsrep_client_service.h index 253d2f43ac3..b74c52b038f 100644 --- a/sql/wsrep_client_service.h +++ b/sql/wsrep_client_service.h @@ -57,6 +57,10 @@ public: { return false; } + bool is_prepared_xa() + { + return false; + } bool is_xa_rollback() { return false; diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 334633537e0..5d49c47b932 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -3248,7 +3248,9 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, } } else if (granted_thd->lex->sql_command == SQLCOM_FLUSH || - granted_thd->mdl_context.has_explicit_locks()) + /* System transactions with explicit locks are BACKUP. */ + (granted_thd->system_thread != NON_SYSTEM_THREAD && + granted_thd->mdl_context.has_explicit_locks())) { WSREP_DEBUG("BF thread waiting for FLUSH for %s", wsrep_thd_query(request_thd)); @@ -3926,21 +3928,11 @@ void wsrep_ready_set(bool ready_value) step is performed to leave the wsrep transaction in the state as it never existed. - This should not be an inline functions as it requires a lot of stack space - because of WSREP_DBUG() usage. It's also not a function that is - frequently called. */ void wsrep_commit_empty(THD* thd, bool all) { DBUG_ENTER("wsrep_commit_empty"); - WSREP_DEBUG("wsrep_commit_empty for %llu client_state %s client_mode" - " %s trans_state %s sql %s", - thd_get_thread_id(thd), - wsrep::to_c_string(thd->wsrep_cs().state()), - wsrep::to_c_string(thd->wsrep_cs().mode()), - wsrep::to_c_string(thd->wsrep_cs().transaction().state()), - wsrep_thd_query(thd)); if (wsrep_is_real(thd, all) && wsrep_thd_is_local(thd) && @@ -3948,14 +3940,40 @@ void wsrep_commit_empty(THD* thd, bool all) !thd->internal_transaction() && thd->wsrep_trx().state() != wsrep::transaction::s_committed) { - /* Here transaction is either empty (i.e. no changes) or - it was CREATE TABLE with no row binlog format or - we have already aborted transaction e.g. because max writeset size - has been reached. */ - DBUG_ASSERT(!wsrep_has_changes(thd) || - (thd->lex->sql_command == SQLCOM_CREATE_TABLE && - !thd->is_current_stmt_binlog_format_row()) || - thd->wsrep_cs().transaction().state() == wsrep::transaction::s_aborted); +#ifndef DBUG_OFF + const bool empty= !wsrep_has_changes(thd); + const bool create= thd->lex->sql_command == SQLCOM_CREATE_TABLE && + !thd->is_current_stmt_binlog_format_row(); + const bool aborted= thd->wsrep_cs().transaction().state() == wsrep::transaction::s_aborted; + const bool ddl_replay= ((sql_command_flags[thd->lex->sql_command] & + (CF_SCHEMA_CHANGE | CF_ADMIN_COMMAND)) && + thd->wsrep_cs().transaction().state() == wsrep::transaction::s_must_replay); + /* Here transaction is either + (1) empty (i.e. no changes) or + (2) it was CREATE TABLE with no row binlog format or + (3) we have already aborted transaction e.g. because max writeset size + has been reached or + (4) it was DDL and got BF aborted and must replay. + */ + if(!(empty || create || aborted || ddl_replay)) + { + WSREP_DEBUG("wsrep_commit_empty: thread: %llu client_state: %s client_mode:" + " %s trans_state: %s error: %s empty: %d create: %d aborted:" + " %d ddl_replay: %d sql: %s", + thd_get_thread_id(thd), + wsrep::to_c_string(thd->wsrep_cs().state()), + wsrep::to_c_string(thd->wsrep_cs().mode()), + wsrep::to_c_string(thd->wsrep_cs().transaction().state()), + wsrep::to_c_string(thd->wsrep_cs().current_error()), + empty, create, aborted, ddl_replay, + wsrep_thd_query(thd)); + + DBUG_ASSERT(empty || // 1 + create || // 2 + aborted || // 3 + ddl_replay); // 4 + } +#endif /* DBUG_OFF */ bool have_error= wsrep_current_error(thd); int ret= wsrep_before_rollback(thd, all) || wsrep_after_rollback(thd, all) || @@ -3969,10 +3987,10 @@ void wsrep_commit_empty(THD* thd, bool all) DBUG_ASSERT(wsrep_current_error(thd) == wsrep::e_deadlock_error); thd->wsrep_cs().reset_error(); } + if (ret) - { - WSREP_DEBUG("wsrep_commit_empty failed: %d", wsrep_current_error(thd)); - } + WSREP_DEBUG("wsrep_commit_empty failed: %s", + wsrep::to_c_string(thd->wsrep_cs().current_error())); } DBUG_VOID_RETURN; } diff --git a/storage/connect/mysql-test/connect/r/drop-open-error.result b/storage/connect/mysql-test/connect/r/drop-open-error.result index a5d1e89307b..f9b9b7e87d2 100644 --- a/storage/connect/mysql-test/connect/r/drop-open-error.result +++ b/storage/connect/mysql-test/connect/r/drop-open-error.result @@ -3,7 +3,7 @@ create table tcon engine=connect table_type=mysql CONNECTION='mysql://root@local ERROR HY000: Too long value for 'SRCDEF' drop table mdev9949; Warnings: -Warning 1017 Can't find file: 'MYSQLD_DATADIR/test/mdev9949.dos' (errno: 2 "No such file or directory") +Warning 1017 Can't find file: 'DATADIR/test/mdev9949.dos' (errno: 2 "No such file or directory") drop table t1; select @@secure_file_priv 'must be NULL'; must be NULL diff --git a/storage/connect/mysql-test/connect/t/drop-open-error.test b/storage/connect/mysql-test/connect/t/drop-open-error.test index 69e634e82bd..dd286c96466 100644 --- a/storage/connect/mysql-test/connect/t/drop-open-error.test +++ b/storage/connect/mysql-test/connect/t/drop-open-error.test @@ -13,8 +13,9 @@ error ER_VALUE_TOO_LONG; create table tcon engine=connect table_type=mysql CONNECTION='mysql://root@localhost/test/t1' SRCDEF='select c from t1 where c in ("foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar", "foo", "bar", "qux", "foobar")'; # copy the invalid frm (as created by the statement above before the MDEV-9949 fix) +let $MARIADB_DATADIR=`select @@datadir`; copy_file $MTR_SUITE_DIR/std_data/mdev9949.frm $datadir/test/mdev9949.frm; ---replace_result $datadir MYSQLD_DATADIR/ ./ MYSQLD_DATADIR/ +--replace_result $MARIADB_DATADIR DATADIR/ './' 'DATADIR/' drop table mdev9949; drop table t1; diff --git a/storage/connect/zip.c b/storage/connect/zip.c index 3d3d4caddef..40c1d6794c0 100644 --- a/storage/connect/zip.c +++ b/storage/connect/zip.c @@ -1027,7 +1027,6 @@ extern int ZEXPORT zipOpenNewFileInZip4_64(zipFile file, const char* filename, c int err = ZIP_OK; # ifdef NOCRYPT - (crcForCrypting); if (password != NULL) return ZIP_PARAMERROR; # endif diff --git a/storage/innobase/CMakeLists.txt b/storage/innobase/CMakeLists.txt index c2bdaaa7e3a..d5a0445a23b 100644 --- a/storage/innobase/CMakeLists.txt +++ b/storage/innobase/CMakeLists.txt @@ -71,7 +71,7 @@ ADD_FEATURE_INFO(INNODB_ROOT_GUESS WITH_INNODB_ROOT_GUESS OPTION(WITH_INNODB_EXTRA_DEBUG "Enable extra InnoDB debug checks" OFF) IF(WITH_INNODB_EXTRA_DEBUG) - ADD_DEFINITIONS(-DUNIV_ZIP_DEBUG) + ADD_DEFINITIONS(-DUNIV_ZIP_DEBUG -DLOG_LATCH_DEBUG) ENDIF() ADD_FEATURE_INFO(INNODB_EXTRA_DEBUG WITH_INNODB_EXTRA_DEBUG "Extra InnoDB debug checks") diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index bd29af78092..13c58c6a0cb 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -76,6 +76,8 @@ struct set_numa_interleave_t if (srv_numa_interleave) { struct bitmask *numa_mems_allowed = numa_get_mems_allowed(); + MEM_MAKE_DEFINED(numa_mems_allowed, + sizeof *numa_mems_allowed); ib::info() << "Setting NUMA memory policy to" " MPOL_INTERLEAVE"; if (set_mempolicy(MPOL_INTERLEAVE, @@ -1062,6 +1064,7 @@ inline bool buf_pool_t::chunk_t::create(size_t bytes) if (srv_numa_interleave) { struct bitmask *numa_mems_allowed= numa_get_mems_allowed(); + MEM_MAKE_DEFINED(numa_mems_allowed, sizeof *numa_mems_allowed); if (mbind(mem, mem_size(), MPOL_INTERLEAVE, numa_mems_allowed->maskp, numa_mems_allowed->size, MPOL_MF_MOVE)) @@ -1591,17 +1594,14 @@ inline bool buf_pool_t::withdraw_blocks() /* reserve free_list length */ if (UT_LIST_GET_LEN(withdraw) < withdraw_target) { - buf_flush_LRU( - std::max(withdraw_target - - UT_LIST_GET_LEN(withdraw), - srv_LRU_scan_depth), - true); - mysql_mutex_unlock(&buf_pool.mutex); - buf_dblwr.flush_buffered_writes(); - mysql_mutex_lock(&buf_pool.flush_list_mutex); - buf_flush_wait_LRU_batch_end(); - mysql_mutex_unlock(&buf_pool.flush_list_mutex); - mysql_mutex_lock(&buf_pool.mutex); + try_LRU_scan = false; + mysql_mutex_unlock(&mutex); + mysql_mutex_lock(&flush_list_mutex); + page_cleaner_wakeup(true); + my_cond_wait(&done_flush_list, + &flush_list_mutex.m_mutex); + mysql_mutex_unlock(&flush_list_mutex); + mysql_mutex_lock(&mutex); } /* relocate blocks/buddies in withdrawn area */ diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 06ac24a5a1e..4e54c7055ca 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -274,30 +274,22 @@ buf_flush_relocate_on_flush_list( ut_d(buf_flush_validate_low()); } -/** Note that a block is no longer dirty, while not removing -it from buf_pool.flush_list -@param temporary whether the page belongs to the temporary tablespace -@param error whether an error may have occurred while writing */ -inline void buf_page_t::write_complete(bool temporary, bool error) +void buf_page_t::write_complete(bool persistent, bool error, uint32_t state) { - ut_ad(temporary == fsp_is_system_temporary(id().space())); - if (UNIV_UNLIKELY(error)); - else if (temporary) - { - ut_ad(oldest_modification() == 2); - oldest_modification_= 0; - } - else + ut_ad(!persistent == fsp_is_system_temporary(id().space())); + ut_ad(state >= WRITE_FIX); + + if (UNIV_LIKELY(!error)) { + ut_d(lsn_t om= oldest_modification()); + ut_ad(om >= 2); + ut_ad(persistent == (om > 2)); /* We use release memory order to guarantee that callers of oldest_modification_acquire() will observe the block as being detached from buf_pool.flush_list, after reading the value 0. */ - ut_ad(oldest_modification() > 2); - oldest_modification_.store(1, std::memory_order_release); + oldest_modification_.store(persistent, std::memory_order_release); } - const auto s= state(); - ut_ad(s >= WRITE_FIX); - zip.fix.fetch_sub((s >= WRITE_FIX_REINIT) + zip.fix.fetch_sub((state >= WRITE_FIX_REINIT) ? (WRITE_FIX_REINIT - UNFIXED) : (WRITE_FIX - UNFIXED)); lock.u_unlock(true); @@ -311,18 +303,10 @@ inline void buf_pool_t::n_flush_inc() inline void buf_pool_t::n_flush_dec() { - mysql_mutex_lock(&flush_list_mutex); + mysql_mutex_assert_owner(&flush_list_mutex); ut_ad(page_cleaner_status >= LRU_FLUSH); if ((page_cleaner_status-= LRU_FLUSH) < LRU_FLUSH) pthread_cond_broadcast(&done_flush_LRU); - mysql_mutex_unlock(&flush_list_mutex); -} - -inline void buf_pool_t::n_flush_dec_holding_mutex() -{ - mysql_mutex_assert_owner(&flush_list_mutex); - ut_ad(page_cleaner_status >= LRU_FLUSH); - page_cleaner_status-= LRU_FLUSH; } /** Complete write of a file page from buf_pool. @@ -352,28 +336,26 @@ void buf_page_write_complete(const IORequest &request, bool error) mysql_mutex_assert_not_owner(&buf_pool.mutex); mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex); - if (request.is_LRU()) + const bool persistent= bpage->oldest_modification() != 2; + + if (UNIV_UNLIKELY(!persistent) && UNIV_LIKELY(!error)) { - const bool temp= bpage->oldest_modification() == 2; - if (!temp && state < buf_page_t::WRITE_FIX_REINIT && - request.node->space->use_doublewrite()) - buf_dblwr.write_completed(); /* We must hold buf_pool.mutex while releasing the block, so that no other thread can access it before we have freed it. */ mysql_mutex_lock(&buf_pool.mutex); - bpage->write_complete(temp, error); - if (!error) - buf_LRU_free_page(bpage, true); + bpage->write_complete(persistent, error, state); + buf_LRU_free_page(bpage, true); mysql_mutex_unlock(&buf_pool.mutex); - - buf_pool.n_flush_dec(); } else { + bpage->write_complete(persistent, error, state); if (state < buf_page_t::WRITE_FIX_REINIT && request.node->space->use_doublewrite()) + { + ut_ad(persistent); buf_dblwr.write_completed(); - bpage->write_complete(false, error); + } } } @@ -740,17 +722,15 @@ ATTRIBUTE_COLD void buf_pool_t::release_freed_page(buf_page_t *bpage) noexcept } /** Write a flushable page to a file or free a freeable block. -@param evict whether to evict the page on write completion @param space tablespace @return whether a page write was initiated and buf_pool.mutex released */ -bool buf_page_t::flush(bool evict, fil_space_t *space) +bool buf_page_t::flush(fil_space_t *space) { mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex); ut_ad(in_file()); ut_ad(in_LRU_list); ut_ad((space->purpose == FIL_TYPE_TEMPORARY) == (space == fil_system.temp_space)); - ut_ad(evict || space != fil_system.temp_space); ut_ad(space->referenced()); const auto s= state(); @@ -797,22 +777,11 @@ bool buf_page_t::flush(bool evict, fil_space_t *space) mysql_mutex_unlock(&buf_pool.mutex); IORequest::Type type= IORequest::WRITE_ASYNC; - if (UNIV_UNLIKELY(evict)) - { - type= IORequest::WRITE_LRU; - mysql_mutex_lock(&buf_pool.flush_list_mutex); - buf_pool.n_flush_inc(); - mysql_mutex_unlock(&buf_pool.flush_list_mutex); - } /* Apart from the U-lock, this block will also be protected by is_write_fixed() and oldest_modification()>1. Thus, it cannot be relocated or removed. */ - DBUG_PRINT("ib_buf", ("%s %u page %u:%u", - evict ? "LRU" : "flush_list", - id().space(), id().page_no())); - buf_block_t *block= reinterpret_cast(this); page_t *write_frame= zip.data; @@ -864,10 +833,7 @@ bool buf_page_t::flush(bool evict, fil_space_t *space) { switch (space->chain.start->punch_hole) { case 1: - static_assert(IORequest::PUNCH_LRU - IORequest::PUNCH == - IORequest::WRITE_LRU - IORequest::WRITE_ASYNC, ""); - type= - IORequest::Type(type + (IORequest::PUNCH - IORequest::WRITE_ASYNC)); + type= IORequest::PUNCH; break; case 2: size= orig_size; @@ -894,10 +860,8 @@ bool buf_page_t::flush(bool evict, fil_space_t *space) /** Check whether a page can be flushed from the buf_pool. @param id page identifier @param fold id.fold() -@param evict true=buf_pool.LRU; false=buf_pool.flush_list @return whether the page can be flushed */ -static bool buf_flush_check_neighbor(const page_id_t id, ulint fold, - bool evict) +static bool buf_flush_check_neighbor(const page_id_t id, ulint fold) { mysql_mutex_assert_owner(&buf_pool.mutex); ut_ad(fold == id.fold()); @@ -906,26 +870,16 @@ static bool buf_flush_check_neighbor(const page_id_t id, ulint fold, const buf_page_t *bpage= buf_pool.page_hash.get(id, buf_pool.page_hash.cell_get(fold)); - if (!bpage) - return false; - - /* We avoid flushing 'non-old' blocks in an eviction flush, because the - flushed blocks are soon freed */ - if (evict && !bpage->is_old()) - return false; - - return bpage->oldest_modification() > 1 && !bpage->is_io_fixed(); + return bpage && bpage->oldest_modification() > 1 && !bpage->is_io_fixed(); } /** Check which neighbors of a page can be flushed from the buf_pool. @param space tablespace @param id page identifier of a dirty page @param contiguous whether to consider contiguous areas of pages -@param evict true=buf_pool.LRU; false=buf_pool.flush_list @return last page number that can be flushed */ static page_id_t buf_flush_check_neighbors(const fil_space_t &space, - page_id_t &id, bool contiguous, - bool evict) + page_id_t &id, bool contiguous) { ut_ad(id.page_no() < space.size + (space.physical_size() == 2048 ? 1 @@ -958,7 +912,7 @@ static page_id_t buf_flush_check_neighbors(const fil_space_t &space, for (page_id_t i= id - 1;; --i) { fold--; - if (!buf_flush_check_neighbor(i, fold, evict)) + if (!buf_flush_check_neighbor(i, fold)) { low= i + 1; break; @@ -974,7 +928,7 @@ static page_id_t buf_flush_check_neighbors(const fil_space_t &space, while (++i < high) { ++fold; - if (!buf_flush_check_neighbor(i, fold, evict)) + if (!buf_flush_check_neighbor(i, fold)) break; } @@ -1051,14 +1005,13 @@ and also write zeroes or punch the hole for the freed ranges of pages. @param page_id page identifier @param bpage buffer page @param contiguous whether to consider contiguous areas of pages -@param evict true=buf_pool.LRU; false=buf_pool.flush_list @param n_flushed number of pages flushed so far in this batch @param n_to_flush maximum number of pages we are allowed to flush @return number of pages flushed */ static ulint buf_flush_try_neighbors(fil_space_t *space, const page_id_t page_id, buf_page_t *bpage, - bool contiguous, bool evict, + bool contiguous, ulint n_flushed, ulint n_to_flush) { ut_ad(space->id == page_id.space()); @@ -1072,7 +1025,7 @@ static ulint buf_flush_try_neighbors(fil_space_t *space, ut_ad(lsn >= bpage->oldest_modification()); if (UNIV_UNLIKELY(lsn < space->get_create_lsn())) { - ut_a(!bpage->flush(evict, space)); + ut_a(!bpage->flush(space)); mysql_mutex_unlock(&buf_pool.mutex); return 0; } @@ -1082,7 +1035,7 @@ static ulint buf_flush_try_neighbors(fil_space_t *space, ulint count= 0; page_id_t id= page_id; - page_id_t high= buf_flush_check_neighbors(*space, id, contiguous, evict); + page_id_t high= buf_flush_check_neighbors(*space, id, contiguous); ut_ad(page_id >= id); ut_ad(page_id < high); @@ -1118,7 +1071,7 @@ static ulint buf_flush_try_neighbors(fil_space_t *space, bpage= nullptr; ut_ad(b->oldest_modification() > 1); flush: - if (b->flush(evict, space)) + if (b->flush(space)) { ++count; continue; @@ -1126,9 +1079,9 @@ static ulint buf_flush_try_neighbors(fil_space_t *space, } /* We avoid flushing 'non-old' blocks in an eviction flush, because the flushed blocks are soon freed */ - else if ((!evict || b->is_old()) && - b->oldest_modification() > 1 && b->lock.u_lock_try(true)) + else if (b->oldest_modification() > 1 && b->lock.u_lock_try(true)) { + /* For the buf_pool.watch[] sentinels, oldest_modification() == 0 */ if (b->oldest_modification() < 2) b->lock.u_unlock(true); else @@ -1250,10 +1203,8 @@ static void buf_flush_discard_page(buf_page_t *bpage) /** Flush dirty blocks from the end buf_pool.LRU, and move clean blocks to buf_pool.free. @param max maximum number of blocks to flush -@param evict whether dirty pages are to be evicted after flushing them @param n counts of flushed and evicted pages */ -static void buf_flush_LRU_list_batch(ulint max, bool evict, - flush_counters_t *n) +static void buf_flush_LRU_list_batch(ulint max, flush_counters_t *n) { ulint scanned= 0; ulint free_limit= srv_LRU_scan_depth; @@ -1301,8 +1252,12 @@ static void buf_flush_LRU_list_batch(ulint max, bool evict, if (state < buf_page_t::READ_FIX && bpage->lock.u_lock_try(true)) { ut_ad(!bpage->is_io_fixed()); - bool do_evict= evict; switch (bpage->oldest_modification()) { + case 2: + /* LRU flushing will always evict pages of the temporary tablespace, + in buf_page_write_complete(). */ + ++n->evicted; + break; case 1: mysql_mutex_lock(&buf_pool.flush_list_mutex); if (ut_d(lsn_t lsn=) bpage->oldest_modification()) @@ -1315,12 +1270,8 @@ static void buf_flush_LRU_list_batch(ulint max, bool evict, case 0: bpage->lock.u_unlock(true); goto evict; - case 2: - /* LRU flushing will always evict pages of the temporary tablespace. */ - do_evict= true; } - /* Block is ready for flush. Dispatch an IO request. - If do_evict, the page may be evicted by buf_page_write_complete(). */ + /* Block is ready for flush. Dispatch an IO request. */ const page_id_t page_id(bpage->id()); const uint32_t space_id= page_id.space(); if (!space || space->id != space_id) @@ -1355,6 +1306,7 @@ static void buf_flush_LRU_list_batch(ulint max, bool evict, no_space: mysql_mutex_lock(&buf_pool.flush_list_mutex); buf_flush_discard_page(bpage); + ++n->evicted; continue; } @@ -1367,8 +1319,8 @@ static void buf_flush_LRU_list_batch(ulint max, bool evict, if (neighbors && space->is_rotational()) n->flushed+= buf_flush_try_neighbors(space, page_id, bpage, neighbors == 1, - do_evict, n->flushed, max); - else if (bpage->flush(do_evict, space)) + n->flushed, max); + else if (bpage->flush(space)) ++n->flushed; else continue; @@ -1386,24 +1338,25 @@ static void buf_flush_LRU_list_batch(ulint max, bool evict, space->release(); if (scanned) + { MONITOR_INC_VALUE_CUMULATIVE(MONITOR_LRU_BATCH_SCANNED, MONITOR_LRU_BATCH_SCANNED_NUM_CALL, MONITOR_LRU_BATCH_SCANNED_PER_CALL, scanned); + } } /** Flush and move pages from LRU or unzip_LRU list to the free list. Whether LRU or unzip_LRU is used depends on the state of the system. @param max maximum number of blocks to flush -@param evict whether dirty pages are to be evicted after flushing them @param n counts of flushed and evicted pages */ -static void buf_do_LRU_batch(ulint max, bool evict, flush_counters_t *n) +static void buf_do_LRU_batch(ulint max, flush_counters_t *n) { if (buf_LRU_evict_from_unzip_LRU()) buf_free_from_unzip_LRU_list_batch(); n->evicted= 0; n->flushed= 0; - buf_flush_LRU_list_batch(max, evict, n); + buf_flush_LRU_list_batch(max, n); mysql_mutex_assert_owner(&buf_pool.mutex); buf_lru_freed_page_count+= n->evicted; @@ -1515,8 +1468,8 @@ static ulint buf_do_flush_list_batch(ulint max_n, lsn_t lsn) { if (neighbors && space->is_rotational()) count+= buf_flush_try_neighbors(space, page_id, bpage, - neighbors == 1, false, count, max_n); - else if (bpage->flush(false, space)) + neighbors == 1, count, max_n); + else if (bpage->flush(space)) ++count; else continue; @@ -1535,10 +1488,13 @@ static ulint buf_do_flush_list_batch(ulint max_n, lsn_t lsn) space->release(); if (scanned) + { MONITOR_INC_VALUE_CUMULATIVE(MONITOR_FLUSH_BATCH_SCANNED, MONITOR_FLUSH_BATCH_SCANNED_NUM_CALL, MONITOR_FLUSH_BATCH_SCANNED_PER_CALL, scanned); + } + return count; } @@ -1682,7 +1638,7 @@ bool buf_flush_list_space(fil_space_t *space, ulint *n_flushed) goto was_freed; } mysql_mutex_unlock(&buf_pool.flush_list_mutex); - if (bpage->flush(false, space)) + if (bpage->flush(space)) { ++n_flush; if (!--max_n_flush) @@ -1740,27 +1696,22 @@ and move clean blocks to buf_pool.free. The caller must invoke buf_dblwr.flush_buffered_writes() after releasing buf_pool.mutex. @param max_n wished maximum mumber of blocks flushed -@param evict whether to evict pages after flushing -@return evict ? number of processed pages : number of pages written */ -ulint buf_flush_LRU(ulint max_n, bool evict) +@return number of pages written */ +static ulint buf_flush_LRU(ulint max_n) { mysql_mutex_assert_owner(&buf_pool.mutex); flush_counters_t n; - buf_do_LRU_batch(max_n, evict, &n); + buf_do_LRU_batch(max_n, &n); ulint pages= n.flushed; if (n.evicted) { - if (evict) - pages+= n.evicted; buf_pool.try_LRU_scan= true; pthread_cond_broadcast(&buf_pool.done_free); } - else if (!pages && !buf_pool.try_LRU_scan && - !buf_pool.LRU_warned.test_and_set(std::memory_order_acquire)) - { + else if (!pages && !buf_pool.try_LRU_scan) /* For example, with the minimum innodb_buffer_pool_size=5M and the default innodb_page_size=16k there are only a little over 316 pages in the buffer pool. The buffer pool can easily be exhausted @@ -1774,12 +1725,7 @@ ulint buf_flush_LRU(ulint max_n, bool evict) (3) This thread is the only one that could make progress, but we fail to do so because all the pages that we scanned are buffer-fixed or latched by some thread. */ - sql_print_warning("InnoDB: Could not free any blocks in the buffer pool!" - " %zu blocks are in use and %zu free." - " Consider increasing innodb_buffer_pool_size.", - UT_LIST_GET_LEN(buf_pool.LRU), - UT_LIST_GET_LEN(buf_pool.free)); - } + buf_pool.LRU_warn(); return pages; } @@ -1914,7 +1860,7 @@ inline void log_t::write_checkpoint(lsn_t end_lsn) noexcept { my_munmap(buf, file_size); buf= resize_buf; - buf_free= START_OFFSET + (get_lsn() - resizing); + set_buf_free(START_OFFSET + (get_lsn() - resizing)); } else #endif @@ -1956,9 +1902,7 @@ inline void log_t::write_checkpoint(lsn_t end_lsn) noexcept static bool log_checkpoint_low(lsn_t oldest_lsn, lsn_t end_lsn) { ut_ad(!srv_read_only_mode); -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif + ut_ad(log_sys.latch_have_wr()); ut_ad(oldest_lsn <= end_lsn); ut_ad(end_lsn == log_sys.get_lsn()); @@ -2527,26 +2471,16 @@ static void buf_flush_page_cleaner() { buf_pool.page_cleaner_set_idle(false); buf_pool.n_flush_inc(); - /* Remove clean blocks from buf_pool.flush_list before the LRU scan. */ - for (buf_page_t *p= UT_LIST_GET_FIRST(buf_pool.flush_list); p; ) - { - const lsn_t lsn{p->oldest_modification()}; - ut_ad(lsn > 2 || lsn == 1); - buf_page_t *n= UT_LIST_GET_NEXT(list, p); - if (lsn <= 1) - buf_pool.delete_from_flush_list(p); - p= n; - } mysql_mutex_unlock(&buf_pool.flush_list_mutex); n= srv_max_io_capacity; mysql_mutex_lock(&buf_pool.mutex); LRU_flush: - n= buf_flush_LRU(n, false); + n= buf_flush_LRU(n); mysql_mutex_unlock(&buf_pool.mutex); last_pages+= n; check_oldest_and_set_idle: mysql_mutex_lock(&buf_pool.flush_list_mutex); - buf_pool.n_flush_dec_holding_mutex(); + buf_pool.n_flush_dec(); oldest_lsn= buf_pool.get_oldest_modification(0); if (!oldest_lsn) goto fully_unemployed; @@ -2679,6 +2613,16 @@ static void buf_flush_page_cleaner() #endif } +ATTRIBUTE_COLD void buf_pool_t::LRU_warn() +{ + mysql_mutex_assert_owner(&mutex); + if (!LRU_warned.test_and_set(std::memory_order_acquire)) + sql_print_warning("InnoDB: Could not free any blocks in the buffer pool!" + " %zu blocks are in use and %zu free." + " Consider increasing innodb_buffer_pool_size.", + UT_LIST_GET_LEN(LRU), UT_LIST_GET_LEN(free)); +} + /** Initialize page_cleaner. */ ATTRIBUTE_COLD void buf_flush_page_cleaner_init() { diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 267a2b02bda..e1626b6cee3 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -386,148 +386,82 @@ we put it to free list to be used. @retval nullptr if get==have_no_mutex_soft and memory was not available */ buf_block_t* buf_LRU_get_free_block(buf_LRU_get get) { - ulint n_iterations = 0; - ulint flush_failures = 0; - MONITOR_INC(MONITOR_LRU_GET_FREE_SEARCH); - if (UNIV_UNLIKELY(get == have_mutex)) { - mysql_mutex_assert_owner(&buf_pool.mutex); - goto got_mutex; - } - DBUG_EXECUTE_IF("recv_ran_out_of_buffer", - if (recv_recovery_is_on() - && recv_sys.apply_log_recs) { - mysql_mutex_lock(&buf_pool.mutex); - goto flush_lru; - }); -get_mutex: - mysql_mutex_lock(&buf_pool.mutex); -got_mutex: - buf_LRU_check_size_of_non_data_objects(); - buf_block_t* block; + bool waited= false; + MONITOR_INC(MONITOR_LRU_GET_FREE_SEARCH); + if (UNIV_LIKELY(get != have_mutex)) + mysql_mutex_lock(&buf_pool.mutex); - IF_DBUG(static bool buf_lru_free_blocks_error_printed,); - DBUG_EXECUTE_IF("ib_lru_force_no_free_page", - if (!buf_lru_free_blocks_error_printed) { - n_iterations = 21; - block = nullptr; - goto not_found;}); + buf_LRU_check_size_of_non_data_objects(); + + buf_block_t *block; retry: - /* If there is a block in the free list, take it */ - if ((block = buf_LRU_get_free_only()) != nullptr) { + /* If there is a block in the free list, take it */ + block= buf_LRU_get_free_only(); + if (block) + { got_block: - const ulint LRU_size = UT_LIST_GET_LEN(buf_pool.LRU); - const ulint available = UT_LIST_GET_LEN(buf_pool.free); - const ulint scan_depth = srv_LRU_scan_depth / 2; - ut_ad(LRU_size <= BUF_LRU_MIN_LEN || available >= scan_depth - || buf_pool.need_LRU_eviction()); + const ulint LRU_size= UT_LIST_GET_LEN(buf_pool.LRU); + const ulint available= UT_LIST_GET_LEN(buf_pool.free); + const ulint scan_depth= srv_LRU_scan_depth / 2; + ut_ad(LRU_size <= BUF_LRU_MIN_LEN || + available >= scan_depth || buf_pool.need_LRU_eviction()); - if (UNIV_LIKELY(get != have_mutex)) { - mysql_mutex_unlock(&buf_pool.mutex); - } + if (UNIV_UNLIKELY(available < scan_depth) && LRU_size > BUF_LRU_MIN_LEN) + { + mysql_mutex_lock(&buf_pool.flush_list_mutex); + if (!buf_pool.page_cleaner_active()) + buf_pool.page_cleaner_wakeup(true); + mysql_mutex_unlock(&buf_pool.flush_list_mutex); + } - if (UNIV_UNLIKELY(available < scan_depth) - && LRU_size > BUF_LRU_MIN_LEN) { - mysql_mutex_lock(&buf_pool.flush_list_mutex); - if (!buf_pool.page_cleaner_active()) { - buf_pool.page_cleaner_wakeup(true); - } - mysql_mutex_unlock(&buf_pool.flush_list_mutex); - } + if (UNIV_LIKELY(get != have_mutex)) + mysql_mutex_unlock(&buf_pool.mutex); - block->page.zip.clear(); - return block; - } + block->page.zip.clear(); + return block; + } - MONITOR_INC( MONITOR_LRU_GET_FREE_LOOPS ); - if (n_iterations || buf_pool.try_LRU_scan) { - /* If no block was in the free list, search from the - end of the LRU list and try to free a block there. - If we are doing for the first time we'll scan only - tail of the LRU list otherwise we scan the whole LRU - list. */ - if (buf_LRU_scan_and_free_block(n_iterations - ? ULINT_UNDEFINED : 100)) { - goto retry; - } + MONITOR_INC(MONITOR_LRU_GET_FREE_LOOPS); + if (waited || buf_pool.try_LRU_scan) + { + /* If no block was in the free list, search from the end of the + LRU list and try to free a block there. If we are doing for the + first time we'll scan only tail of the LRU list otherwise we scan + the whole LRU list. */ + if (buf_LRU_scan_and_free_block(waited ? ULINT_UNDEFINED : 100)) + goto retry; - /* Tell other threads that there is no point - in scanning the LRU list. */ - buf_pool.try_LRU_scan = false; - } + /* Tell other threads that there is no point in scanning the LRU + list. */ + buf_pool.try_LRU_scan= false; + } - if (get == have_no_mutex_soft) { - mysql_mutex_unlock(&buf_pool.mutex); - return nullptr; - } + if (get == have_no_mutex_soft) + { + mysql_mutex_unlock(&buf_pool.mutex); + return nullptr; + } - for (;;) { - if ((block = buf_LRU_get_free_only()) != nullptr) { - goto got_block; - } - const bool wake = buf_pool.need_LRU_eviction(); - mysql_mutex_unlock(&buf_pool.mutex); - mysql_mutex_lock(&buf_pool.flush_list_mutex); - const auto n_flush = buf_pool.n_flush(); - if (wake && !buf_pool.page_cleaner_active()) { - buf_pool.page_cleaner_wakeup(true); - } - mysql_mutex_unlock(&buf_pool.flush_list_mutex); - mysql_mutex_lock(&buf_pool.mutex); - if (!n_flush) { - goto not_found; - } - if (!buf_pool.try_LRU_scan) { - my_cond_wait(&buf_pool.done_free, - &buf_pool.mutex.m_mutex); - } - } + waited= true; -not_found: - if (n_iterations > 1) { - MONITOR_INC( MONITOR_LRU_GET_FREE_WAITS ); - } + while (!(block= buf_LRU_get_free_only())) + { + buf_pool.stat.LRU_waits++; - if (n_iterations == 21 - && srv_buf_pool_old_size == srv_buf_pool_size - && buf_pool.LRU_warned.test_and_set(std::memory_order_acquire)) { - IF_DBUG(buf_lru_free_blocks_error_printed = true,); - mysql_mutex_unlock(&buf_pool.mutex); - ib::warn() << "Difficult to find free blocks in the buffer pool" - " (" << n_iterations << " search iterations)! " - << flush_failures << " failed attempts to" - " flush a page!" - " Consider increasing innodb_buffer_pool_size." - " Pending flushes (fsync): " - << fil_n_pending_tablespace_flushes - << ". " << os_n_file_reads << " OS file reads, " - << os_n_file_writes << " OS file writes, " - << os_n_fsyncs - << " OS fsyncs."; - mysql_mutex_lock(&buf_pool.mutex); - } + timespec abstime; + set_timespec(abstime, 1); - /* No free block was found: try to flush the LRU list. - The freed blocks will be up for grabs for all threads. + mysql_mutex_lock(&buf_pool.flush_list_mutex); + if (!buf_pool.page_cleaner_active()) + buf_pool.page_cleaner_wakeup(true); + mysql_mutex_unlock(&buf_pool.flush_list_mutex); + if (my_cond_timedwait(&buf_pool.done_free, &buf_pool.mutex.m_mutex, + &abstime)) + buf_pool.LRU_warn(); + } - TODO: A more elegant way would have been to return one freed - up block to the caller here but the code that deals with - removing the block from buf_pool.page_hash and buf_pool.LRU is fairly - involved (particularly in case of ROW_FORMAT=COMPRESSED pages). We - can do that in a separate patch sometime in future. */ -#ifndef DBUG_OFF -flush_lru: -#endif - if (!buf_flush_LRU(innodb_lru_flush_size, true)) { - MONITOR_INC(MONITOR_LRU_SINGLE_FLUSH_FAILURE_COUNT); - ++flush_failures; - } - - n_iterations++; - buf_pool.stat.LRU_waits++; - mysql_mutex_unlock(&buf_pool.mutex); - buf_dblwr.flush_buffered_writes(); - goto get_mutex; + goto got_block; } /** Move the LRU_old pointer so that the length of the old blocks list diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 001783de73d..583b5c1f784 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -3842,6 +3842,10 @@ release_and_exit: goto release_and_exit; } +#ifdef ENABLED_DEBUG_SYNC + DEBUG_SYNC(thd, "dict_stats_mdl_acquired"); +#endif /* ENABLED_DEBUG_SYNC */ + trx = trx_create(); trx_start_internal_read_only(trx); diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index adffce7880d..ac491a958d6 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -929,9 +929,7 @@ bool fil_space_free(uint32_t id, bool x_latched) log_sys.latch.wr_unlock(); } else { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif + ut_ad(log_sys.latch_have_wr()); if (space->max_lsn) { ut_d(space->max_lsn = 0); fil_system.named_spaces.remove(*space); @@ -1805,30 +1803,27 @@ pfs_os_file_t fil_delete_tablespace(uint32_t id) /*******************************************************************//** Allocates and builds a file name from a path, a table or tablespace name and a suffix. The string must be freed by caller with ut_free(). -@param[in] path NULL or the directory path or the full path and filename. +@param[in] path nullptr or the directory path or the full path and filename @param[in] name {} if path is full, or Table/Tablespace name -@param[in] ext the file extension to use -@param[in] trim_name true if the last name on the path should be trimmed. +@param[in] extension the file extension to use +@param[in] trim_name true if the last name on the path should be trimmed @return own: file name */ -char* fil_make_filepath(const char *path, const fil_space_t::name_type &name, - ib_extention ext, bool trim_name) +char* fil_make_filepath_low(const char *path, + const fil_space_t::name_type &name, + ib_extention extension, bool trim_name) { /* The path may contain the basename of the file, if so we do not need the name. If the path is NULL, we can use the default path, but there needs to be a name. */ ut_ad(path || name.data()); - /* If we are going to strip a name off the path, there better be a - path and a new name to put back on. */ - ut_ad(!trim_name || (path && name.data())); - if (path == NULL) { path = fil_path_to_mysql_datadir; } ulint len = 0; /* current length */ ulint path_len = strlen(path); - const char* suffix = dot_ext[ext]; + const char* suffix = dot_ext[extension]; ulint suffix_len = strlen(suffix); ulint full_len = path_len + 1 + name.size() + suffix_len + 1; @@ -1911,8 +1906,16 @@ char* fil_make_filepath(const char *path, const fil_space_t::name_type &name, char *fil_make_filepath(const char* path, const table_name_t name, ib_extention suffix, bool strip_name) { - return fil_make_filepath(path, {name.m_name, strlen(name.m_name)}, - suffix, strip_name); + return fil_make_filepath_low(path, {name.m_name, strlen(name.m_name)}, + suffix, strip_name); +} + +/** Wrapper function over fil_make_filepath_low() to build directory name. +@param path the directory path or the full path and filename +@return own: directory name */ +static inline char *fil_make_dirpath(const char *path) +{ + return fil_make_filepath_low(path, fil_space_t::name_type{}, NO_EXT, true); } dberr_t fil_space_t::rename(const char *path, bool log, bool replace) @@ -1953,14 +1956,32 @@ dberr_t fil_space_t::rename(const char *path, bool log, bool replace) return DB_TABLESPACE_NOT_FOUND; } - exists= false; - if (replace); - else if (!os_file_status(path, &exists, &ftype) || exists) + if (!replace) { - sql_print_error("InnoDB: Cannot rename '%s' to '%s'" - " because the target file exists.", - old_path, path); - return DB_TABLESPACE_EXISTS; + char *schema_path= fil_make_dirpath(path); + if (!schema_path) + return DB_ERROR; + + exists= false; + bool schema_fail= os_file_status(schema_path, &exists, &ftype) && !exists; + ut_free(schema_path); + + if (schema_fail) + { + sql_print_error("InnoDB: Cannot rename '%s' to '%s'" + " because the target schema directory doesn't exist.", + old_path, path); + return DB_ERROR; + } + + exists= false; + if (!os_file_status(path, &exists, &ftype) || exists) + { + sql_print_error("InnoDB: Cannot rename '%s' to '%s'" + " because the target file exists.", + old_path, path); + return DB_TABLESPACE_EXISTS; + } } mtr_t mtr; @@ -3139,9 +3160,7 @@ void fil_names_dirty( fil_space_t* space) { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif + ut_ad(log_sys.latch_have_wr()); ut_ad(recv_recovery_is_on()); ut_ad(log_sys.get_lsn() != 0); ut_ad(space->max_lsn == 0); @@ -3155,9 +3174,7 @@ fil_names_dirty( tablespace was modified for the first time since fil_names_clear(). */ ATTRIBUTE_NOINLINE ATTRIBUTE_COLD void mtr_t::name_write() { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif + ut_ad(log_sys.latch_have_wr()); ut_d(fil_space_validate_for_mtr_commit(m_user_space)); ut_ad(!m_user_space->max_lsn); m_user_space->max_lsn= log_sys.get_lsn(); @@ -3181,9 +3198,7 @@ ATTRIBUTE_COLD lsn_t fil_names_clear(lsn_t lsn) { mtr_t mtr; -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif + ut_ad(log_sys.latch_have_wr()); ut_ad(lsn); ut_ad(log_sys.is_latest()); diff --git a/storage/innobase/fsp/fsp0file.cc b/storage/innobase/fsp/fsp0file.cc index 1c20efcdca2..62f90f53a54 100644 --- a/storage/innobase/fsp/fsp0file.cc +++ b/storage/innobase/fsp/fsp0file.cc @@ -502,9 +502,10 @@ err_exit: return DB_SUCCESS; } - sql_print_error("InnoDB: %s in datafile: %s, Space ID: " - UINT32PF ", " "Flags: " UINT32PF, - error_txt, m_filepath, m_space_id, m_flags); + sql_print_information( + "InnoDB: %s in datafile: %s, Space ID: " UINT32PF + ", " "Flags: " UINT32PF, + error_txt, m_filepath, m_space_id, m_flags); m_is_valid = false; return DB_CORRUPTION; } diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index 54a00fc80c6..aff742fa741 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -268,7 +268,7 @@ inline void xdes_init(const buf_block_t &block, xdes_t *descr, mtr_t *mtr) static MY_ATTRIBUTE((nonnull, warn_unused_result)) dberr_t fseg_mark_page_used(fseg_inode_t *seg_inode, buf_block_t *iblock, - ulint page, xdes_t *descr, buf_block_t *xdes, mtr_t *mtr) + uint32_t page, xdes_t *descr, buf_block_t *xdes, mtr_t *mtr) { ut_ad(fil_page_get_type(iblock->page.frame) == FIL_PAGE_INODE); ut_ad(!((page_offset(seg_inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); @@ -994,7 +994,7 @@ MY_ATTRIBUTE((nonnull, warn_unused_result)) @return error code */ static dberr_t fsp_alloc_from_free_frag(buf_block_t *header, buf_block_t *xdes, xdes_t *descr, - ulint bit, mtr_t *mtr) + uint32_t bit, mtr_t *mtr) { if (UNIV_UNLIKELY(xdes_get_state(descr) != XDES_FREE_FRAG || !xdes_is_free(descr, bit))) @@ -1984,29 +1984,42 @@ fseg_alloc_free_page_low( } } - /* In the big if-else below we look for ret_page and ret_descr */ - /*-------------------------------------------------------------*/ - if ((xdes_get_state(descr) == XDES_FSEG) - && mach_read_from_8(descr + XDES_ID) == seg_id - && xdes_is_free(descr, hint % FSP_EXTENT_SIZE)) { + const uint32_t extent_size = FSP_EXTENT_SIZE; + ret_descr = descr; + /* Try to get the page from extent which belongs to segment */ + if (xdes_get_state(descr) == XDES_FSEG + && mach_read_from_8(descr + XDES_ID) == seg_id) { + /* Get the page from the segment extent */ + if (xdes_is_free(descr, hint % extent_size)) { take_hinted_page: - /* 1. We can take the hinted page - =================================*/ - ret_descr = descr; - ret_page = hint; - /* Skip the check for extending the tablespace. If the - page hint were not within the size of the tablespace, - we would have got (descr == NULL) above and reset the hint. */ - goto got_hinted_page; - /*-----------------------------------------------------------*/ - } else if (xdes_get_state(descr) == XDES_FREE - && reserved - used < reserved / FSEG_FILLFACTOR - && used >= FSEG_FRAG_LIMIT) { + ret_page = hint; + goto got_hinted_page; + } else if (!xdes_is_full(descr)) { + /* Take the page from the same extent as the + hinted page (and the extent already belongs to + the segment) */ + ret_page = xdes_find_free(descr, hint % extent_size); + if (ret_page == FIL_NULL) { + ut_ad(!has_done_reservation); + return nullptr; + } + ret_page += xdes_get_offset(ret_descr); + goto alloc_done; + } + } - /* 2. We allocate the free extent from space and can take - ========================================================= - the hinted page - ===============*/ + /** If the number of unused but reserved pages in a segment is + esser than minimum value of 1/8 of reserved pages or + 4 * FSP_EXTENT_SIZE and there are at least half of extent size + used pages, then we allow a new empty extent to be added to + the segment in fseg_alloc_free_page_general(). Otherwise, we use + unused pages of the segment. */ + if (used < extent_size / 2 || + reserved - used >= reserved / 8 || + reserved - used >= extent_size * 4) { + } else if (xdes_get_state(descr) == XDES_FREE) { + /* Allocate the free extent from space and can + take the hinted page */ ret_descr = fsp_alloc_free_extent(space, hint, &xdes, mtr, err); @@ -2033,54 +2046,34 @@ take_hinted_page: /* Try to fill the segment free list */ *err = fseg_fill_free_list(seg_inode, iblock, space, - hint + FSP_EXTENT_SIZE, mtr); + hint + extent_size, mtr); if (UNIV_UNLIKELY(*err != DB_SUCCESS)) { return nullptr; } goto take_hinted_page; - /*-----------------------------------------------------------*/ - } else if ((direction != FSP_NO_DIR) - && ((reserved - used) < reserved / FSEG_FILLFACTOR) - && (used >= FSEG_FRAG_LIMIT) - && (ret_descr = fseg_alloc_free_extent(seg_inode, iblock, - &xdes, space, - mtr, err))) { - /* 3. We take any free extent (which was already assigned above - =============================================================== - in the if-condition to ret_descr) and take the lowest or - ======================================================== - highest page in it, depending on the direction - ==============================================*/ + } else if (direction != FSP_NO_DIR) { + + ret_descr = fseg_alloc_free_extent(seg_inode, iblock, + &xdes, space, mtr, err); + + if (!ret_descr) { + ut_ad(*err != DB_SUCCESS); + return nullptr; + } + /* Take any free extent (which was already assigned + above in the if-condition to ret_descr) and take the + lowest or highest page in it, depending on the direction */ ret_page = xdes_get_offset(ret_descr); if (direction == FSP_DOWN) { - ret_page += FSP_EXTENT_SIZE - 1; + ret_page += extent_size - 1; } - ut_ad(!has_done_reservation || ret_page != FIL_NULL); - /*-----------------------------------------------------------*/ - } else if (UNIV_UNLIKELY(*err != DB_SUCCESS)) { - return nullptr; - } else if ((xdes_get_state(descr) == XDES_FSEG) - && mach_read_from_8(descr + XDES_ID) == seg_id - && (!xdes_is_full(descr))) { + goto alloc_done; + } - /* 4. We can take the page from the same extent as the - ====================================================== - hinted page (and the extent already belongs to the - ================================================== - segment) - ========*/ - ret_descr = descr; - ret_page = xdes_find_free(ret_descr, hint % FSP_EXTENT_SIZE); - if (ret_page == FIL_NULL) { - ut_ad(!has_done_reservation); - } else { - ret_page += xdes_get_offset(ret_descr); - } - /*-----------------------------------------------------------*/ - } else if (reserved - used > 0) { - /* 5. We take any unused page from the segment - ==============================================*/ + /* Try to take individual page from the segment or tablespace */ + if (reserved - used > 0) { + /* Take any unused page from the segment */ fil_addr_t first; if (flst_get_len(seg_inode + FSEG_NOT_FULL) > 0) { @@ -2089,7 +2082,7 @@ take_hinted_page: first = flst_get_first(seg_inode + FSEG_FREE); } else { ut_ad(!has_done_reservation); - return(NULL); + return nullptr; } ret_descr = xdes_lst_get_descriptor(*space, first, mtr, &xdes); @@ -2103,10 +2096,9 @@ take_hinted_page: } else { ret_page += xdes_get_offset(ret_descr); } - /*-----------------------------------------------------------*/ - } else if (used < FSEG_FRAG_LIMIT) { - /* 6. We allocate an individual page from the space - ===================================================*/ + + } else if (used < extent_size / 2) { + /* Allocate an individual page from the space */ buf_block_t* block = fsp_alloc_free_page( space, hint, mtr, init_mtr, err); @@ -2129,13 +2121,11 @@ take_hinted_page: /* fsp_alloc_free_page() invoked fsp_init_file_page() already. */ return(block); - /*-----------------------------------------------------------*/ } else { - /* 7. We allocate a new extent and take its first page - ======================================================*/ + /* In worst case, try to allocate a new extent + and take its first page */ ret_descr = fseg_alloc_free_extent(seg_inode, iblock, &xdes, space, mtr, err); - if (!ret_descr) { ut_ad(!has_done_reservation || *err); return nullptr; @@ -2148,14 +2138,13 @@ take_hinted_page: /* Page could not be allocated */ ut_ad(!has_done_reservation); - return(NULL); + return nullptr; } - +alloc_done: if (space->size <= ret_page && !is_predefined_tablespace(space->id)) { /* It must be that we are extending a single-table tablespace whose size is still < 64 pages */ - - if (ret_page >= FSP_EXTENT_SIZE) { + if (ret_page >= extent_size) { sql_print_error("InnoDB: Trying to extend '%s'" " by single page(s) though the" " space size " UINT32PF "." @@ -2163,30 +2152,31 @@ take_hinted_page: space->chain.start->name, space->size, ret_page); ut_ad(!has_done_reservation); - return(NULL); + return nullptr; } if (!fsp_try_extend_data_file_with_pages( space, ret_page, header, mtr)) { /* No disk space left */ ut_ad(!has_done_reservation); - return(NULL); + return nullptr; } } -got_hinted_page: - /* ret_descr == NULL if the block was allocated from free_frag - (XDES_FREE_FRAG) */ + /* Skip the check for extending the tablespace. + If the page hint were not within the size of the tablespace, + descr set to nullptr above and reset the hint and the block + was allocated from free_frag (XDES_FREE_FRAG) */ if (ret_descr != NULL) { +got_hinted_page: /* At this point we know the extent and the page offset. The extent is still in the appropriate list (FSEG_NOT_FULL or FSEG_FREE), and the page is not yet marked as used. */ - ut_d(buf_block_t* xxdes); ut_ad(xdes_get_descriptor(space, ret_page, mtr, err, &xxdes) == ret_descr); ut_ad(xdes == xxdes); - ut_ad(xdes_is_free(ret_descr, ret_page % FSP_EXTENT_SIZE)); + ut_ad(xdes_is_free(ret_descr, ret_page % extent_size)); *err = fseg_mark_page_used(seg_inode, iblock, ret_page, ret_descr, xdes, mtr); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 141af832cf1..21c33ddd337 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -858,6 +858,10 @@ static MYSQL_THDVAR_BOOL(table_locks, PLUGIN_VAR_OPCMDARG, /* check_func */ NULL, /* update_func */ NULL, /* default */ TRUE); +static MYSQL_THDVAR_BOOL(snapshot_isolation, PLUGIN_VAR_OPCMDARG, + "Use snapshot isolation (write-write conflict detection).", + NULL, NULL, FALSE); + static MYSQL_THDVAR_BOOL(strict_mode, PLUGIN_VAR_OPCMDARG, "Use strict mode when evaluating create options.", NULL, NULL, TRUE); @@ -2132,6 +2136,9 @@ convert_error_code_to_mysql( return(HA_ERR_LOCK_DEADLOCK); + case DB_RECORD_CHANGED: + return HA_ERR_RECORD_CHANGED; + case DB_LOCK_WAIT_TIMEOUT: /* Starting from 5.0.13, we let MySQL just roll back the latest SQL statement in a lock wait timeout. Previously, we @@ -2775,6 +2782,8 @@ innobase_trx_init( trx->check_unique_secondary = !thd_test_options( thd, OPTION_RELAXED_UNIQUE_CHECKS); + trx->snapshot_isolation = THDVAR(thd, snapshot_isolation) & 1; + #ifdef WITH_WSREP trx->wsrep = wsrep_on(thd); #endif @@ -12179,7 +12188,7 @@ create_table_info_t::create_foreign_keys() dict_index_t* index = NULL; fkerr_t index_error = FK_SUCCESS; dict_index_t* err_index = NULL; - ulint err_col; + ulint err_col = 0; const bool tmp_table = m_flags2 & DICT_TF2_TEMPORARY; const CHARSET_INFO* cs = thd_charset(m_thd); const char* operation = "Create "; @@ -15412,7 +15421,6 @@ get_foreign_key_info( char tmp_buff[NAME_LEN+1]; char name_buff[NAME_LEN+1]; const char* ptr; - LEX_CSTRING* referenced_key_name; LEX_CSTRING* name = NULL; if (dict_table_t::is_temporary_name(foreign->foreign_table_name)) { @@ -15513,18 +15521,16 @@ get_foreign_key_info( if (foreign->referenced_index && foreign->referenced_index->name != NULL) { - referenced_key_name = thd_make_lex_string( + f_key_info.referenced_key_name = thd_make_lex_string( thd, - f_key_info.referenced_key_name, + nullptr, foreign->referenced_index->name, strlen(foreign->referenced_index->name), 1); } else { - referenced_key_name = NULL; + f_key_info.referenced_key_name = NULL; } - f_key_info.referenced_key_name = referenced_key_name; - pf_key_info = (FOREIGN_KEY_INFO*) thd_memdup(thd, &f_key_info, sizeof(FOREIGN_KEY_INFO)); @@ -16604,6 +16610,13 @@ ha_innobase::get_auto_increment( if (error != DB_SUCCESS) { *first_value = (~(ulonglong) 0); + /* This is an error case. We do the error handling by calling + the error code conversion function. Specifically, we need to + call thd_mark_transaction_to_rollback() to inform sql that we + have rolled back innodb transaction after a deadlock error. We + ignore the returned mysql error code here. */ + std::ignore = convert_error_code_to_mysql( + error, m_prebuilt->table->flags, m_user_thd); return; } @@ -18480,6 +18493,15 @@ static void innodb_log_file_size_update(THD *thd, st_mysql_sys_var*, mysql_mutex_lock(&LOCK_global_system_variables); } +static void innodb_log_spin_wait_delay_update(THD *, st_mysql_sys_var*, + void *, const void *save) +{ + log_sys.latch.wr_lock(SRW_LOCK_CALL); + mtr_t::spin_wait_delay= *static_cast(save); + mtr_t::finisher_update(); + log_sys.latch.wr_unlock(); +} + /** Update innodb_status_output or innodb_status_output_locks, which control InnoDB "status monitor" output to the error log. @param[out] var current value @@ -19328,6 +19350,12 @@ static MYSQL_SYSVAR_ULONGLONG(log_file_size, srv_log_file_size, nullptr, innodb_log_file_size_update, 96 << 20, 4 << 20, std::numeric_limits::max(), 4096); +static MYSQL_SYSVAR_UINT(log_spin_wait_delay, mtr_t::spin_wait_delay, + PLUGIN_VAR_OPCMDARG, + "Delay between log buffer spin lock polls (0 to use a blocking latch)", + nullptr, innodb_log_spin_wait_delay_update, + 0, 0, 6000, 0); + static MYSQL_SYSVAR_UINT(old_blocks_pct, innobase_old_blocks_pct, PLUGIN_VAR_RQCMDARG, "Percentage of the buffer pool to reserve for 'old' blocks.", @@ -19753,6 +19781,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(data_file_buffering), MYSQL_SYSVAR(data_file_write_through), MYSQL_SYSVAR(log_file_size), + MYSQL_SYSVAR(log_spin_wait_delay), MYSQL_SYSVAR(log_group_home_dir), MYSQL_SYSVAR(max_dirty_pages_pct), MYSQL_SYSVAR(max_dirty_pages_pct_lwm), @@ -19773,6 +19802,7 @@ static struct st_mysql_sys_var* innobase_system_variables[]= { MYSQL_SYSVAR(ft_server_stopword_table), MYSQL_SYSVAR(ft_user_stopword_table), MYSQL_SYSVAR(disable_sort_file_cache), + MYSQL_SYSVAR(snapshot_isolation), MYSQL_SYSVAR(stats_on_metadata), MYSQL_SYSVAR(stats_transient_sample_pages), MYSQL_SYSVAR(stats_persistent), diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index d73e8f259bf..8bd9a6310f1 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -864,6 +864,9 @@ my_error_innodb( case DB_DEADLOCK: my_error(ER_LOCK_DEADLOCK, MYF(0)); break; + case DB_RECORD_CHANGED: + my_error(ER_CHECKREAD, MYF(0), table); + break; case DB_LOCK_WAIT_TIMEOUT: my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0)); break; @@ -2815,6 +2818,14 @@ cannot_create_many_fulltext_index: } } + if (m_prebuilt->table->is_stats_table()) { + if (ha_alter_info->online) { + ha_alter_info->unsupported_reason = + table_share->table_name.str; + } + online= false; + } + // FIXME: implement Online DDL for system-versioned operations if (ha_alter_info->handler_flags & INNOBASE_ALTER_VERSIONED_REBUILD) { if (ha_alter_info->online) { diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 4aa7ba93348..c12708a0f15 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -708,17 +708,16 @@ public: @retval DB_FAIL if the page contains the wrong ID */ dberr_t read_complete(const fil_node_t &node); - /** Note that a block is no longer dirty, while not removing - it from buf_pool.flush_list - @param temporary whether the page belongs to the temporary tablespace - @param error whether an error may have occurred while writing */ - inline void write_complete(bool temporary, bool error); + /** Release a write fix after a page write was completed. + @param persistent whether the page belongs to a persistent tablespace + @param error whether an error may have occurred while writing + @param state recently read state() value with the correct io-fix */ + void write_complete(bool persistent, bool error, uint32_t state); /** Write a flushable page to a file or free a freeable block. - @param evict whether to evict the page on write completion @param space tablespace @return whether a page write was initiated and buf_pool.mutex released */ - bool flush(bool evict, fil_space_t *space); + bool flush(fil_space_t *space); /** Notify that a page in a temporary tablespace has been modified. */ void set_temp_modified() @@ -1625,10 +1624,6 @@ public: /** Decrement the number of pending LRU flush */ inline void n_flush_dec(); - /** Decrement the number of pending LRU flush - while holding flush_list_mutex */ - inline void n_flush_dec_holding_mutex(); - /** @return whether flush_list flushing is active */ bool flush_list_active() const { @@ -1778,6 +1773,9 @@ public: /** Free a page whose underlying file page has been freed. */ ATTRIBUTE_COLD void release_freed_page(buf_page_t *bpage) noexcept; + /** Issue a warning that we could not free up buffer pool pages. */ + ATTRIBUTE_COLD void LRU_warn(); + private: /** Temporary memory for page_compressed and encrypted I/O */ struct io_buf_t diff --git a/storage/innobase/include/buf0flu.h b/storage/innobase/include/buf0flu.h index 0cce514b2d2..cc32a38a4ef 100644 --- a/storage/innobase/include/buf0flu.h +++ b/storage/innobase/include/buf0flu.h @@ -85,16 +85,6 @@ buf_flush_init_for_writing( bool buf_flush_list_space(fil_space_t *space, ulint *n_flushed= nullptr) MY_ATTRIBUTE((warn_unused_result)); -/** Write out dirty blocks from buf_pool.LRU, -and move clean blocks to buf_pool.free. -The caller must invoke buf_dblwr.flush_buffered_writes() -after releasing buf_pool.mutex. -@param max_n wished maximum mumber of blocks flushed -@param evict whether to evict pages after flushing -@return evict ? number of processed pages : number of pages written -@retval 0 if a buf_pool.LRU batch is already running */ -ulint buf_flush_LRU(ulint max_n, bool evict); - /** Wait until a LRU flush batch ends. */ void buf_flush_wait_LRU_batch_end(); /** Wait until all persistent pages are flushed up to a limit. diff --git a/storage/innobase/include/db0err.h b/storage/innobase/include/db0err.h index 64182aabc38..960ec3905eb 100644 --- a/storage/innobase/include/db0err.h +++ b/storage/innobase/include/db0err.h @@ -32,23 +32,25 @@ Created 5/24/1996 Heikki Tuuri enum dberr_t { DB_SUCCESS, - DB_SUCCESS_LOCKED_REC = 9, /*!< like DB_SUCCESS, but a new + DB_SUCCESS_LOCKED_REC= 9, /*!< like DB_SUCCESS, but a new explicit record lock was created */ /* The following are error codes */ - DB_ERROR = 11, + DB_RECORD_CHANGED, + DB_ERROR, DB_INTERRUPTED, DB_OUT_OF_MEMORY, DB_OUT_OF_FILE_SPACE, DB_LOCK_WAIT, DB_DEADLOCK, - DB_ROLLBACK, DB_DUPLICATE_KEY, DB_MISSING_HISTORY, /*!< required history data has been deleted due to lack of space in rollback segment */ - DB_CLUSTER_NOT_FOUND = 30, - DB_TABLE_NOT_FOUND, +#ifdef WITH_WSREP + DB_ROLLBACK, +#endif + DB_TABLE_NOT_FOUND= 31, DB_TOO_BIG_RECORD, /*!< a record in an index would not fit on a compressed page, or it would become bigger than 1/2 free space in diff --git a/storage/innobase/include/dyn0buf.h b/storage/innobase/include/dyn0buf.h index 06af4dcca88..c2fc93c4cf3 100644 --- a/storage/innobase/include/dyn0buf.h +++ b/storage/innobase/include/dyn0buf.h @@ -57,11 +57,7 @@ public: /** Gets the number of used bytes in a block. @return number of bytes used */ - ulint used() const - MY_ATTRIBUTE((warn_unused_result)) - { - return(static_cast(m_used & ~DYN_BLOCK_FULL_FLAG)); - } + uint32_t used() const { return m_used; } /** Gets pointer to the start of data. diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index 786999121c3..1f9b329efa3 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -1610,17 +1610,34 @@ void fil_close_tablespace(uint32_t id); /*******************************************************************//** Allocates and builds a file name from a path, a table or tablespace name and a suffix. The string must be freed by caller with ut_free(). -@param[in] path NULL or the directory path or the full path and filename. +@param[in] path nullptr or the directory path or the full path and filename @param[in] name {} if path is full, or Table/Tablespace name -@param[in] ext the file extension to use -@param[in] trim_name true if the last name on the path should be trimmed. +@param[in] extension the file extension to use +@param[in] trim_name true if the last name on the path should be trimmed @return own: file name */ -char* fil_make_filepath(const char *path, const fil_space_t::name_type &name, - ib_extention ext, bool trim_name); +char* fil_make_filepath_low(const char *path, + const fil_space_t::name_type &name, + ib_extention extension, bool trim_name); char *fil_make_filepath(const char* path, const table_name_t name, ib_extention suffix, bool strip_name); +/** Wrapper function over fil_make_filepath_low to build file name. +@param path nullptr or the directory path or the full path and filename +@param name {} if path is full, or Table/Tablespace name +@param extension the file extension to use +@param trim_name true if the last name on the path should be trimmed +@return own: file name */ +static inline char* +fil_make_filepath(const char* path, const fil_space_t::name_type &name, + ib_extention extension, bool trim_name) +{ + /* If we are going to strip a name off the path, there better be a + path and a new name to put back on. */ + ut_ad(!trim_name || (path && name.data())); + return fil_make_filepath_low(path, name, extension, trim_name); +} + /** Create a tablespace file. @param[in] space_id Tablespace ID @param[in] name Tablespace name in dbname/tablename format. diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index 26261554f9b..99459bcb4f6 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -209,24 +209,6 @@ typedef byte fseg_inode_t; static constexpr byte FSEG_MAGIC_N_BYTES[4]={0x05,0xd6,0x69,0xd2}; -#define FSEG_FILLFACTOR 8 /* If the number of unused but reserved - pages in a segment is less than - reserved pages / FSEG_FILLFACTOR, - and there are - at least FSEG_FRAG_LIMIT used pages, - then we allow a new empty extent to - be added to the segment in - fseg_alloc_free_page_general(). - Otherwise, we - use unused pages of the segment. */ - -#define FSEG_FRAG_LIMIT FSEG_FRAG_ARR_N_SLOTS - /* If the segment has >= this many - used pages, it may be expanded by - allocating extents to the segment; - until that only individual fragment - pages are allocated from the space */ - #define FSEG_FREE_LIST_LIMIT 40 /* If the reserved size of a segment is at least this many extents, we allow extents to be put to the free @@ -294,7 +276,7 @@ Determine if a page is marked free. @param[in] descr extent descriptor @param[in] offset page offset within extent @return whether the page is free */ -inline bool xdes_is_free(const xdes_t *descr, ulint offset) +inline bool xdes_is_free(const xdes_t *descr, uint32_t offset) { ut_ad(offset < FSP_EXTENT_SIZE); ulint index= XDES_FREE_BIT + XDES_BITS_PER_PAGE * offset; diff --git a/storage/innobase/include/log0crypt.h b/storage/innobase/include/log0crypt.h index ad32dc8faa5..1b8c4b41ca7 100644 --- a/storage/innobase/include/log0crypt.h +++ b/storage/innobase/include/log0crypt.h @@ -28,6 +28,9 @@ MDEV-11782: Rewritten for MariaDB 10.2 by Marko Mäkelä, MariaDB Corporation. #include "log0log.h" +/** innodb_encrypt_log: whether to encrypt the redo log */ +extern my_bool srv_encrypt_log; + /** Initialize the redo log encryption key and random parameters when creating a new redo log. The random parameters will be persisted in the log header. diff --git a/storage/innobase/include/log0log.h b/storage/innobase/include/log0log.h index ecc0b91e80f..26e6a57c54c 100644 --- a/storage/innobase/include/log0log.h +++ b/storage/innobase/include/log0log.h @@ -165,60 +165,92 @@ struct log_t static constexpr lsn_t FIRST_LSN= START_OFFSET; private: - /** The log sequence number of the last change of durable InnoDB files */ + /** the lock bit in buf_free */ + static constexpr size_t buf_free_LOCK= ~(~size_t{0} >> 1); alignas(CPU_LEVEL1_DCACHE_LINESIZE) + /** first free offset within buf used; + the most significant bit is set by lock_lsn() to protect this field + as well as write_to_buf, waits */ + std::atomic buf_free; +public: + /** number of write requests (to buf); protected by lock_lsn() or lsn_lock */ + size_t write_to_buf; + /** log record buffer, written to by mtr_t::commit() */ + byte *buf; +private: + /** The log sequence number of the last change of durable InnoDB files; + protected by lock_lsn() or lsn_lock or latch.wr_lock() */ std::atomic lsn; /** the first guaranteed-durable log sequence number */ std::atomic flushed_to_disk_lsn; - /** log sequence number when log resizing was initiated, or 0 */ - std::atomic resize_lsn; - /** set when there may be need to initiate a log checkpoint. - This must hold if lsn - last_checkpoint_lsn > max_checkpoint_age. */ - std::atomic need_checkpoint; +public: + /** number of append_prepare_wait(); protected by lock_lsn() or lsn_lock */ + size_t waits; + /** innodb_log_buffer_size (size of buf,flush_buf if !is_pmem(), in bytes) */ + size_t buf_size; + /** log file size in bytes, including the header */ + lsn_t file_size; -#if defined(__aarch64__) - /* On ARM, we do more spinning */ +#ifdef LOG_LATCH_DEBUG + typedef srw_lock_debug log_rwlock; + typedef srw_mutex log_lsn_lock; + + bool latch_have_wr() const { return latch.have_wr(); } + bool latch_have_rd() const { return latch.have_rd(); } + bool latch_have_any() const { return latch.have_any(); } +#else +# ifndef UNIV_DEBUG +# elif defined SUX_LOCK_GENERIC + bool latch_have_wr() const { return true; } + bool latch_have_rd() const { return true; } + bool latch_have_any() const { return true; } +# else + bool latch_have_wr() const { return latch.is_write_locked(); } + bool latch_have_rd() const { return latch.is_locked(); } + bool latch_have_any() const { return latch.is_locked(); } +# endif +# ifdef __aarch64__ + /* On ARM, we spin more */ typedef srw_spin_lock log_rwlock; typedef pthread_mutex_wrapper log_lsn_lock; -#else +# else typedef srw_lock log_rwlock; typedef srw_mutex log_lsn_lock; +# endif #endif - -public: - /** rw-lock protecting writes to buf; normal mtr_t::commit() - outside any log checkpoint is covered by a shared latch */ + /** exclusive latch for checkpoint, shared for mtr_t::commit() to buf */ alignas(CPU_LEVEL1_DCACHE_LINESIZE) log_rwlock latch; -private: - /** mutex protecting buf_free et al, together with latch */ - log_lsn_lock lsn_lock; -public: - /** first free offset within buf use; protected by lsn_lock */ - Atomic_relaxed buf_free; - /** number of write requests (to buf); protected by lsn_lock */ - size_t write_to_buf; - /** number of append_prepare_wait(); protected by lsn_lock */ - size_t waits; -private: - /** Last written LSN */ - lsn_t write_lsn; -public: - /** log record buffer, written to by mtr_t::commit() */ - byte *buf; - /** buffer for writing data to ib_logfile0, or nullptr if is_pmem() - In write_buf(), buf and flush_buf are swapped */ - byte *flush_buf; + /** number of std::swap(buf, flush_buf) and writes from buf to log; protected by latch.wr_lock() */ ulint write_to_log; + /** Last written LSN */ + lsn_t write_lsn; + /** recommended maximum buf_free size, after which the buffer is flushed */ + size_t max_buf_free; + + /** buffer for writing data to ib_logfile0, or nullptr if is_pmem() + In write_buf(), buf and flush_buf are swapped */ + byte *flush_buf; + /** set when there may be need to initiate a log checkpoint. + This must hold if lsn - last_checkpoint_lsn > max_checkpoint_age. */ + std::atomic need_checkpoint; + /** whether a checkpoint is pending; protected by latch.wr_lock() */ + Atomic_relaxed checkpoint_pending; /** Log sequence number when a log file overwrite (broken crash recovery) was noticed. Protected by latch.wr_lock(). */ lsn_t overwrite_warned; - /** innodb_log_buffer_size (size of buf,flush_buf if !is_pmem(), in bytes) */ - size_t buf_size; + /** latest completed checkpoint (protected by latch.wr_lock()) */ + Atomic_relaxed last_checkpoint_lsn; + /** next checkpoint LSN (protected by latch.wr_lock()) */ + lsn_t next_checkpoint_lsn; + /** next checkpoint number (protected by latch.wr_lock()) */ + ulint next_checkpoint_no; + /** Log file */ + log_file_t log; private: /** Log file being constructed during resizing; protected by latch */ log_file_t resize_log; @@ -229,18 +261,14 @@ private: /** Buffer for writing to resize_log; @see flush_buf */ byte *resize_flush_buf; - void init_lsn_lock() {lsn_lock.init(); } - void lock_lsn() { lsn_lock.wr_lock(); } - void unlock_lsn() {lsn_lock.wr_unlock(); } - void destroy_lsn_lock() { lsn_lock.destroy(); } + /** Special implementation of lock_lsn() for IA-32 and AMD64 */ + void lsn_lock_bts() noexcept; + /** Acquire a lock for updating buf_free and related fields. + @return the value of buf_free */ + size_t lock_lsn() noexcept; -public: - /** recommended maximum size of buf, after which the buffer is flushed */ - size_t max_buf_free; - - /** log file size in bytes, including the header */ - lsn_t file_size; -private: + /** log sequence number when log resizing was initiated, or 0 */ + std::atomic resize_lsn; /** the log sequence number at the start of the log file */ lsn_t first_lsn; #if defined __linux__ || defined _WIN32 @@ -250,8 +278,6 @@ private: public: /** format of the redo log: e.g., FORMAT_10_8 */ uint32_t format; - /** Log file */ - log_file_t log; #if defined __linux__ || defined _WIN32 /** whether file system caching is enabled for the log */ my_bool log_buffered; @@ -281,21 +307,28 @@ public: /*!< this is the maximum allowed value for lsn - last_checkpoint_lsn when a new query step is started */ - /** latest completed checkpoint (protected by latch.wr_lock()) */ - Atomic_relaxed last_checkpoint_lsn; - /** next checkpoint LSN (protected by log_sys.latch) */ - lsn_t next_checkpoint_lsn; - /** next checkpoint number (protected by latch.wr_lock()) */ - ulint next_checkpoint_no; - /** whether a checkpoint is pending */ - Atomic_relaxed checkpoint_pending; /** buffer for checkpoint header */ byte *checkpoint_buf; /* @} */ +private: + /** A lock when the spin-only lock_lsn() is not being used */ + log_lsn_lock lsn_lock; +public: + bool is_initialised() const noexcept { return max_buf_free != 0; } + /** whether there is capacity in the log buffer */ + bool buf_free_ok() const noexcept + { + return (buf_free.load(std::memory_order_relaxed) & ~buf_free_LOCK) < + max_buf_free; + } + + void set_buf_free(size_t f) noexcept + { ut_ad(f < buf_free_LOCK); buf_free.store(f, std::memory_order_relaxed); } + #ifdef HAVE_PMEM bool is_pmem() const noexcept { return !flush_buf; } #else @@ -304,7 +337,7 @@ public: bool is_opened() const noexcept { return log.is_opened(); } - /** @return target write LSN to react on buf_free >= max_buf_free */ + /** @return target write LSN to react on !buf_free_ok() */ inline lsn_t get_write_target() const; /** @return LSN at which log resizing was started and is still in progress @@ -406,9 +439,7 @@ public: void set_recovered_lsn(lsn_t lsn) noexcept { -#ifndef SUX_LOCK_GENERIC - ut_ad(latch.is_write_locked()); -#endif /* SUX_LOCK_GENERIC */ + ut_ad(latch_have_wr()); write_lsn= lsn; this->lsn.store(lsn, std::memory_order_relaxed); flushed_to_disk_lsn.store(lsn, std::memory_order_relaxed); @@ -448,17 +479,23 @@ public: private: /** Wait in append_prepare() for buffer to become available - @param lsn log sequence number to write up to - @param ex whether log_sys.latch is exclusively locked */ - ATTRIBUTE_COLD void append_prepare_wait(lsn_t lsn, bool ex) noexcept; + @tparam spin whether to use the spin-only lock_lsn() + @param b the value of buf_free + @param ex whether log_sys.latch is exclusively locked + @param lsn log sequence number to write up to + @return the new value of buf_free */ + template + ATTRIBUTE_COLD size_t append_prepare_wait(size_t b, bool ex, lsn_t lsn) + noexcept; public: /** Reserve space in the log buffer for appending data. + @tparam spin whether to use the spin-only lock_lsn() @tparam pmem log_sys.is_pmem() @param size total length of the data to append(), in bytes @param ex whether log_sys.latch is exclusively locked @return the start LSN and the buffer position for append() */ - template - inline std::pair append_prepare(size_t size, bool ex) noexcept; + template + std::pair append_prepare(size_t size, bool ex) noexcept; /** Append a string of bytes to the redo log. @param d destination @@ -466,9 +503,7 @@ public: @param size length of str, in bytes */ void append(byte *&d, const void *s, size_t size) noexcept { -#ifndef SUX_LOCK_GENERIC - ut_ad(latch.is_locked()); -#endif + ut_ad(latch_have_any()); ut_ad(d + size <= buf + (is_pmem() ? file_size : buf_size)); memcpy(d, s, size); d+= size; diff --git a/storage/innobase/include/mtr0mtr.h b/storage/innobase/include/mtr0mtr.h index fe0ad3a7128..7621e5a0b6f 100644 --- a/storage/innobase/include/mtr0mtr.h +++ b/storage/innobase/include/mtr0mtr.h @@ -691,9 +691,26 @@ private: std::pair do_write(); /** Append the redo log records to the redo log buffer. + @tparam spin whether to use the spin-only log_sys.lock_lsn() + @tparam pmem log_sys.is_pmem() + @param mtr mini-transaction @param len number of bytes to write @return {start_lsn,flush_ahead} */ - std::pair finish_write(size_t len); + template static + std::pair finish_writer(mtr_t *mtr, size_t len); + + /** The applicable variant of finish_writer() */ + static std::pair (*finisher)(mtr_t *, size_t); + + std::pair finish_write(size_t len) + { return finisher(this, len); } +public: + /** Poll interval in log_sys.lock_lsn(); 0 to use log_sys.lsn_lock. + Protected by LOCK_global_system_variables and log_sys.latch. */ + static unsigned spin_wait_delay; + /** Update finisher when spin_wait_delay is changing to or from 0. */ + static void finisher_update(); +private: /** Release all latches. */ void release(); diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index 9584990e72e..a5953dcfd51 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -187,14 +187,10 @@ public: WRITE_ASYNC= WRITE_SYNC | 1, /** A doublewrite batch */ DBLWR_BATCH= WRITE_ASYNC | 8, - /** Write data; evict the block on write completion */ - WRITE_LRU= WRITE_ASYNC | 32, /** Write data and punch hole for the rest */ - PUNCH= WRITE_ASYNC | 64, - /** Write data and punch hole; evict the block on write completion */ - PUNCH_LRU= PUNCH | WRITE_LRU, + PUNCH= WRITE_ASYNC | 16, /** Zero out a range of bytes in fil_space_t::io() */ - PUNCH_RANGE= WRITE_SYNC | 128, + PUNCH_RANGE= WRITE_SYNC | 32, }; constexpr IORequest(buf_page_t *bpage, buf_tmp_buffer_t *slot, @@ -207,7 +203,6 @@ public: bool is_read() const { return (type & READ_SYNC) != 0; } bool is_write() const { return (type & WRITE_SYNC) != 0; } - bool is_LRU() const { return (type & (WRITE_LRU ^ WRITE_ASYNC)) != 0; } bool is_async() const { return (type & (READ_SYNC ^ READ_ASYNC)) != 0; } void write_complete(int io_error) const; diff --git a/storage/innobase/include/row0row.h b/storage/innobase/include/row0row.h index a26924d08a0..85c18ddea74 100644 --- a/storage/innobase/include/row0row.h +++ b/storage/innobase/include/row0row.h @@ -356,6 +356,12 @@ row_search_index_entry( mtr_t* mtr) /*!< in: mtr */ MY_ATTRIBUTE((nonnull, warn_unused_result)); +/** Get the byte offset of the DB_TRX_ID column +@param[in] rec clustered index record +@param[in] index clustered index +@return the byte offset of DB_TRX_ID, from the start of rec */ +ulint row_trx_id_offset(const rec_t* rec, const dict_index_t* index); + #define ROW_COPY_DATA 1 #define ROW_COPY_POINTERS 2 diff --git a/storage/innobase/include/srv0mon.h b/storage/innobase/include/srv0mon.h index 4672ce00a36..6b9a6f09681 100644 --- a/storage/innobase/include/srv0mon.h +++ b/storage/innobase/include/srv0mon.h @@ -194,7 +194,6 @@ enum monitor_id_t { MONITOR_FLUSH_ADAPTIVE_AVG_PASS, MONITOR_LRU_GET_FREE_LOOPS, - MONITOR_LRU_GET_FREE_WAITS, MONITOR_FLUSH_AVG_PAGE_RATE, MONITOR_FLUSH_LSN_AVG_RATE, @@ -215,7 +214,6 @@ enum monitor_id_t { MONITOR_LRU_BATCH_SCANNED_PER_CALL, MONITOR_LRU_BATCH_FLUSH_TOTAL_PAGE, MONITOR_LRU_BATCH_EVICT_TOTAL_PAGE, - MONITOR_LRU_SINGLE_FLUSH_FAILURE_COUNT, MONITOR_LRU_GET_FREE_SEARCH, MONITOR_LRU_SEARCH_SCANNED, MONITOR_LRU_SEARCH_SCANNED_NUM_CALL, diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 8e0037998d5..46fa1ca95e2 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -121,10 +121,6 @@ struct srv_stats_t ulint_ctr_n_t n_temp_blocks_decrypted; }; -/** We are prepared for a situation that we have this many threads waiting for -a transactional lock inside InnoDB. srv_start() sets the value. */ -extern ulint srv_max_n_threads; - extern const char* srv_main_thread_op_info; /** Prefix used by MySQL to indicate pre-5.1 table name encoding */ diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 7682932392e..bb152e4a9a5 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -787,6 +787,9 @@ public: string */ /** TRX_ISO_REPEATABLE_READ, ... */ unsigned isolation_level:2; + /** when set, REPEATABLE READ will actually be Snapshot Isolation, due to + detecting write/write conflicts and disabling "semi-consistent read" */ + unsigned snapshot_isolation:1; /** normally set; "SET foreign_key_checks=0" can be issued to suppress foreign key checks, in table imports, for example */ unsigned check_foreigns:1; diff --git a/storage/innobase/include/ut0new.h b/storage/innobase/include/ut0new.h index f4183e4c61a..3ff5f8853e0 100644 --- a/storage/innobase/include/ut0new.h +++ b/storage/innobase/include/ut0new.h @@ -1071,9 +1071,8 @@ static inline void *ut_malloc_dontdump(size_t n_bytes, ...) { void *ptr = my_large_malloc(&n_bytes, MYF(0)); - ut_dontdump(ptr, n_bytes, true); - if (ptr) { + ut_dontdump(ptr, n_bytes, true); os_total_large_mem_allocated += n_bytes; } return ptr; diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 907eaf58997..e961ad6c56d 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -970,8 +970,31 @@ func_exit: for (lock_t *lock= UT_LIST_GET_FIRST(table->locks); lock; lock= UT_LIST_GET_NEXT(un_member.tab_lock.locks, lock)) { - /* if victim has also BF status, but has earlier seqno, we have to wait */ - if (lock->trx != trx && + /* Victim trx needs to be different from BF trx and it has to have a + THD so that we can kill it. Victim might not have THD in two cases: + + (1) An incomplete transaction that was recovered from undo logs + on server startup (and not yet rolled back). + + (2) Transaction that is in XA PREPARE state and whose client + connection was disconnected. + + Neither of these can complete before lock_wait_wsrep() releases + lock_sys.latch. + + (1) trx_t::commit_in_memory() is clearing both + trx_t::state and trx_t::is_recovered before it invokes + lock_release(trx_t*) (which would be blocked by the exclusive + lock_sys.latch that we are holding here). Hence, it is not + possible to write a debug assertion to document this scenario. + + (2) If is in XA PREPARE state, it would eventually be rolled + back and the lock conflict would be resolved when an XA COMMIT + or XA ROLLBACK statement is executed in some other connection. + + If victim has also BF status, but has earlier seqno, we have to wait. + */ + if (lock->trx != trx && lock->trx->mysql_thd && !(wsrep_thd_is_BF(lock->trx->mysql_thd, false) && wsrep_thd_order_before(lock->trx->mysql_thd, trx->mysql_thd))) { @@ -1003,8 +1026,11 @@ func_exit: lock= lock_rec_get_next(heap_no, lock); do { - /* if victim has also BF status, but has earlier seqno, we have to wait */ - if (lock->trx != trx && + /* This is similar case as above except here we have + record-locks instead of table locks. See details + from comment above. + */ + if (lock->trx != trx && lock->trx->mysql_thd && !(wsrep_thd_is_BF(lock->trx->mysql_thd, false) && wsrep_thd_order_before(lock->trx->mysql_thd, trx->mysql_thd))) { @@ -1030,8 +1056,12 @@ func_exit: std::vector> victim_id; for (trx_t *v : victims) + { + /* Victim must have THD */ + ut_ad(v->mysql_thd); victim_id.emplace_back(std::pair {thd_get_thread_id(v->mysql_thd), v->id}); + } DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", { @@ -4182,7 +4212,7 @@ restart: ulint count= 1000; /* We will not attempt hardware lock elision (memory transaction) here. Both lock_rec_dequeue_from_page() and lock_table_dequeue() - would likely lead to a memory transaction due to a system call, to + would likely lead to a memory transaction abort due to a system call, to wake up a waiting transaction. */ lock_sys.rd_lock(SRW_LOCK_CALL); trx->mutex_lock(); @@ -4352,28 +4382,82 @@ void lock_release_on_drop(trx_t *trx) } } -/** Reset lock bit for supremum and rebuild waiting queue. +/** Reset a lock bit and rebuild waiting queue. @param cell rec hash cell of in_lock @param lock the lock with supemum bit set */ -static void lock_rec_unlock_supremum(hash_cell_t &cell, lock_t *lock) +static void lock_rec_unlock(hash_cell_t &cell, lock_t *lock, ulint heap_no) { - ut_ad(lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM)); + ut_ad(lock_rec_get_nth_bit(lock, heap_no)); #ifdef SAFE_MUTEX ut_ad(!mysql_mutex_is_owner(&lock_sys.wait_mutex)); #endif /* SAFE_MUTEX */ ut_ad(!lock->is_table()); ut_ad(lock_sys.is_writer() || lock->trx->mutex_is_owner()); - lock_rec_reset_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM); + lock_rec_reset_nth_bit(lock, heap_no); - lock_t *first_lock= lock_sys_t::get_first( - cell, lock->un_member.rec_lock.page_id, PAGE_HEAP_NO_SUPREMUM); + lock_t *first_lock= + lock_sys_t::get_first(cell, lock->un_member.rec_lock.page_id, heap_no); lock_rec_rebuild_waiting_queue( #if defined(UNIV_DEBUG) || !defined(DBUG_OFF) lock->trx, #endif /* defined(UNIV_DEBUG) || !defined(DBUG_OFF) */ - cell, first_lock, PAGE_HEAP_NO_SUPREMUM); + cell, first_lock, heap_no); +} + +/** Release locks to unmodified records on a clustered index page. +@param cell lock_sys.rec_hash cell of lock +@param lock record lock +@param offsets storage for rec_get_offsets() +@param heap storage for rec_get_offsets() +@param mtr mini-transaction (will be started and committed) */ +static void lock_rec_unlock_unmodified(hash_cell_t &cell, lock_t *lock, + rec_offs *&offsets, mem_heap_t *&heap, + mtr_t &mtr) +{ + ut_ad(!lock->is_waiting()); + + dict_index_t *const index= lock->index; + + mtr.start(); + if (buf_block_t *block= + btr_block_get(*index, lock->un_member.rec_lock.page_id.page_no(), + RW_S_LATCH, &mtr)) + { + if (UNIV_UNLIKELY(!page_is_leaf(block->page.frame))) + { + ut_ad("corrupted lock system" == 0); + goto func_exit; + } + + for (ulint i= PAGE_HEAP_NO_USER_LOW; i < lock_rec_get_n_bits(lock); ++i) + { + if (!lock_rec_get_nth_bit(lock, i)); + else if (const rec_t *rec= + page_find_rec_with_heap_no(block->page.frame, i)) + { + if (index->is_clust()) + { + if (trx_read_trx_id(rec + row_trx_id_offset(rec, index)) == + lock->trx->id) + continue; + unlock_rec: + lock_rec_unlock(cell, lock, i); + } + else + { + offsets= rec_get_offsets(rec, index, offsets, index->n_core_fields, + ULINT_UNDEFINED, &heap); + if (lock->trx != + lock_sec_rec_some_has_impl(lock->trx, rec, index, offsets)) + goto unlock_rec; + } + } + } + } +func_exit: + mtr.commit(); } /** Release non-exclusive locks on XA PREPARE, @@ -4391,6 +4475,12 @@ static bool lock_release_on_prepare_try(trx_t *trx) DBUG_ASSERT(trx->state == TRX_STATE_PREPARED); bool all_released= true; + mtr_t mtr; + rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; + rec_offs *offsets= offsets_; + mem_heap_t *heap= nullptr; + rec_offs_init(offsets_); + lock_sys.rd_lock(SRW_LOCK_CALL); trx->mutex_lock(); @@ -4407,20 +4497,24 @@ static bool lock_release_on_prepare_try(trx_t *trx) if (!lock->is_table()) { ut_ad(!lock->index->table->is_temporary()); - bool supremum_bit = lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM); - bool rec_granted_exclusive_not_gap = + bool supremum_bit= lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM); + bool rec_granted_exclusive_not_gap= lock->is_rec_granted_exclusive_not_gap(); if (!supremum_bit && rec_granted_exclusive_not_gap) continue; - auto &lock_hash= lock_sys.hash_get(lock->type_mode); - auto cell= lock_hash.cell_get(lock->un_member.rec_lock.page_id.fold()); + if (UNIV_UNLIKELY(lock->type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE))) + continue; /* SPATIAL INDEX locking is broken. */ + auto cell= + lock_sys.rec_hash.cell_get(lock->un_member.rec_lock.page_id.fold()); auto latch= lock_sys_t::hash_table::latch(cell); if (latch->try_acquire()) { if (!rec_granted_exclusive_not_gap) lock_rec_dequeue_from_page(lock, false); else if (supremum_bit) - lock_rec_unlock_supremum(*cell, lock); + lock_rec_unlock(*cell, lock, PAGE_HEAP_NO_SUPREMUM); + else + lock_rec_unlock_unmodified(*cell, lock, offsets, heap, mtr); latch->release(); } else @@ -4453,6 +4547,8 @@ static bool lock_release_on_prepare_try(trx_t *trx) lock_sys.rd_unlock(); trx->mutex_unlock(); + if (UNIV_LIKELY_NULL(heap)) + mem_heap_free(heap); return all_released; } @@ -4466,52 +4562,71 @@ void lock_release_on_prepare(trx_t *trx) if (lock_release_on_prepare_try(trx)) return; - LockMutexGuard g{SRW_LOCK_CALL}; - trx->mutex_lock(); + mtr_t mtr; + rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; + rec_offs *offsets= offsets_; + mem_heap_t *heap= nullptr; + + rec_offs_init(offsets_); - for (lock_t *prev, *lock= UT_LIST_GET_LAST(trx->lock.trx_locks); lock; - lock= prev) { - ut_ad(lock->trx == trx); - prev= UT_LIST_GET_PREV(trx_locks, lock); - if (!lock->is_table()) + LockMutexGuard g{SRW_LOCK_CALL}; + trx->mutex_lock(); + + for (lock_t *prev, *lock= UT_LIST_GET_LAST(trx->lock.trx_locks); lock; + lock= prev) { - ut_ad(!lock->index->table->is_temporary()); - if (!lock->is_rec_granted_exclusive_not_gap()) - lock_rec_dequeue_from_page(lock, false); - else if (lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM)) + ut_ad(lock->trx == trx); + prev= UT_LIST_GET_PREV(trx_locks, lock); + if (!lock->is_table()) { - auto &lock_hash= lock_sys.hash_get(lock->type_mode); - auto cell= lock_hash.cell_get(lock->un_member.rec_lock.page_id.fold()); - lock_rec_unlock_supremum(*cell, lock); + ut_ad(!lock->index->table->is_temporary()); + if (!lock->is_rec_granted_exclusive_not_gap()) + lock_rec_dequeue_from_page(lock, false); + else if (UNIV_UNLIKELY(lock->type_mode & + (LOCK_PREDICATE | LOCK_PRDT_PAGE))) + /* SPATIAL INDEX locking is broken. */; + else + { + auto cell= lock_sys.rec_hash.cell_get(lock->un_member.rec_lock. + page_id.fold()); + if (lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM)) + lock_rec_unlock(*cell, lock, PAGE_HEAP_NO_SUPREMUM); + else + { + ut_ad(lock->trx->isolation_level > TRX_ISO_READ_COMMITTED || + /* Insert-intention lock is valid for supremum for isolation + level > TRX_ISO_READ_COMMITTED */ + lock->mode() == LOCK_X || + !lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM)); + lock_rec_unlock_unmodified(*cell, lock, offsets, heap, mtr); + } + } } else - ut_ad(lock->trx->isolation_level > TRX_ISO_READ_COMMITTED || - /* Insert-intention lock is valid for supremum for isolation - level > TRX_ISO_READ_COMMITTED */ - lock->mode() == LOCK_X || - !lock_rec_get_nth_bit(lock, PAGE_HEAP_NO_SUPREMUM)); - } - else - { - ut_d(dict_table_t *table= lock->un_member.tab_lock.table); - ut_ad(!table->is_temporary()); - switch (lock->mode()) { - case LOCK_IS: - case LOCK_S: - lock_table_dequeue(lock, false); - break; - case LOCK_IX: - case LOCK_X: - ut_ad(table->id >= DICT_HDR_FIRST_ID || trx->dict_operation); - /* fall through */ - default: - break; + { + ut_d(dict_table_t *table= lock->un_member.tab_lock.table); + ut_ad(!table->is_temporary()); + switch (lock->mode()) { + case LOCK_IS: + case LOCK_S: + lock_table_dequeue(lock, false); + break; + case LOCK_IX: + case LOCK_X: + ut_ad(table->id >= DICT_HDR_FIRST_ID || trx->dict_operation); + /* fall through */ + default: + break; + } } } } trx->mutex_unlock(); + + if (UNIV_LIKELY_NULL(heap)) + mem_heap_free(heap); } /** Release locks on a table whose creation is being rolled back */ @@ -5949,6 +6064,14 @@ lock_clust_rec_read_check_and_lock( return DB_SUCCESS; } + if (heap_no > PAGE_HEAP_NO_SUPREMUM && gap_mode != LOCK_GAP + && trx->snapshot_isolation + && trx->read_view.is_open() + && !trx->read_view.changes_visible( + trx_read_trx_id(rec + row_trx_id_offset(rec, index)))) { + return DB_RECORD_CHANGED; + } + dberr_t err = lock_rec_lock(false, gap_mode | mode, block, heap_no, index, thr); @@ -6713,6 +6836,7 @@ and less modified rows. Bit 0 is used to prefer orig_trx in case of a tie. print(buf); } + DBUG_EXECUTE_IF("innodb_deadlock_victim_self", victim= trx;); ut_ad(victim->state == TRX_STATE_ACTIVE); /* victim->lock.was_chosen_as_deadlock_victim must always be set before diff --git a/storage/innobase/log/log0log.cc b/storage/innobase/log/log0log.cc index 33375ca6631..e9f4389f8e1 100644 --- a/storage/innobase/log/log0log.cc +++ b/storage/innobase/log/log0log.cc @@ -69,9 +69,7 @@ log_t log_sys; void log_t::set_capacity() { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif + ut_ad(log_sys.latch_have_wr()); /* Margin for the free space in the smallest log, before a new query step which modifies the database, is started */ @@ -134,7 +132,6 @@ bool log_t::create() #endif latch.SRW_LOCK_INIT(log_latch_key); - init_lsn_lock(); last_checkpoint_lsn= FIRST_LSN; log_capacity= 0; @@ -143,7 +140,7 @@ bool log_t::create() next_checkpoint_lsn= 0; checkpoint_pending= false; - buf_free= 0; + set_buf_free(0); ut_ad(is_initialised()); #ifndef HAVE_PMEM @@ -244,6 +241,7 @@ void log_t::attach_low(log_file_t file, os_offset_t size) # endif log_maybe_unbuffered= true; log_buffered= false; + mtr_t::finisher_update(); return true; } } @@ -278,6 +276,7 @@ void log_t::attach_low(log_file_t file, os_offset_t size) block_size); #endif + mtr_t::finisher_update(); #ifdef HAVE_PMEM checkpoint_buf= static_cast(aligned_malloc(block_size, block_size)); memset_aligned<64>(checkpoint_buf, 0, block_size); @@ -313,9 +312,7 @@ void log_t::header_write(byte *buf, lsn_t lsn, bool encrypted) void log_t::create(lsn_t lsn) noexcept { -#ifndef SUX_LOCK_GENERIC - ut_ad(latch.is_write_locked()); -#endif + ut_ad(latch_have_wr()); ut_ad(!recv_no_log_write); ut_ad(is_latest()); ut_ad(this == &log_sys); @@ -332,12 +329,12 @@ void log_t::create(lsn_t lsn) noexcept { mprotect(buf, size_t(file_size), PROT_READ | PROT_WRITE); memset_aligned<4096>(buf, 0, 4096); - buf_free= START_OFFSET; + set_buf_free(START_OFFSET); } else #endif { - buf_free= 0; + set_buf_free(0); memset_aligned<4096>(flush_buf, 0, buf_size); memset_aligned<4096>(buf, 0, buf_size); } @@ -838,9 +835,7 @@ ATTRIBUTE_COLD void log_t::resize_write_buf(size_t length) noexcept @return the current log sequence number */ template inline lsn_t log_t::write_buf() noexcept { -#ifndef SUX_LOCK_GENERIC - ut_ad(latch.is_write_locked()); -#endif + ut_ad(latch_have_wr()); ut_ad(!is_pmem()); ut_ad(!srv_read_only_mode); @@ -956,7 +951,7 @@ wait and check if an already running write is covering the request. void log_write_up_to(lsn_t lsn, bool durable, const completion_callback *callback) { - ut_ad(!srv_read_only_mode || (log_sys.buf_free < log_sys.max_buf_free)); + ut_ad(!srv_read_only_mode || log_sys.buf_free_ok()); ut_ad(lsn != LSN_MAX); ut_ad(lsn != 0); ut_ad(lsn <= log_sys.get_lsn()); @@ -1085,7 +1080,7 @@ NOTE that this function may only be called while not holding any synchronization objects except dict_sys.latch. */ void log_free_check() { - ut_ad(!lock_sys.is_writer()); + ut_ad(!lock_sys.is_holder()); if (log_sys.check_for_checkpoint()) { ut_ad(!recv_no_log_write); @@ -1302,6 +1297,7 @@ log_print( void log_t::close() { ut_ad(this == &log_sys); + ut_ad(!(buf_free & buf_free_LOCK)); if (!is_initialised()) return; close_file(); @@ -1319,7 +1315,6 @@ void log_t::close() #endif latch.destroy(); - destroy_lsn_lock(); recv_sys.close(); diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 3656ce72f37..19c924854e8 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -2426,11 +2426,9 @@ recv_sys_t::parse_mtr_result recv_sys_t::parse(source &l, bool if_exists) noexcept { restart: -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked() || + ut_ad(log_sys.latch_have_wr() || srv_operation == SRV_OPERATION_BACKUP || srv_operation == SRV_OPERATION_BACKUP_NO_DEFER); -#endif mysql_mutex_assert_owner(&mutex); ut_ad(log_sys.next_checkpoint_lsn); ut_ad(log_sys.is_latest()); @@ -3967,9 +3965,7 @@ static bool recv_scan_log(bool last_phase) lsn_t rewound_lsn= 0; for (ut_d(lsn_t source_offset= 0);;) { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif + ut_ad(log_sys.latch_have_wr()); #ifdef UNIV_DEBUG const bool wrap{source_offset + recv_sys.len == log_sys.file_size}; #endif @@ -4364,9 +4360,7 @@ recv_init_crash_recovery_spaces(bool rescan, bool& missing_tablespace) static dberr_t recv_rename_files() { mysql_mutex_assert_owner(&recv_sys.mutex); -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif + ut_ad(log_sys.latch_have_wr()); dberr_t err= DB_SUCCESS; @@ -4649,7 +4643,7 @@ err_exit: PROT_READ | PROT_WRITE); #endif } - log_sys.buf_free = recv_sys.offset; + log_sys.set_buf_free(recv_sys.offset); if (recv_needed_recovery && srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { /* Write a FILE_CHECKPOINT marker as the first thing, diff --git a/storage/innobase/mtr/mtr0mtr.cc b/storage/innobase/mtr/mtr0mtr.cc index dc0b073618e..3896fec593b 100644 --- a/storage/innobase/mtr/mtr0mtr.cc +++ b/storage/innobase/mtr/mtr0mtr.cc @@ -37,6 +37,24 @@ Created 11/26/1995 Heikki Tuuri #include "srv0start.h" #include "log.h" #include "mariadb_stats.h" +#include "my_cpu.h" + +std::pair (*mtr_t::finisher)(mtr_t *, size_t); +unsigned mtr_t::spin_wait_delay; + +void mtr_t::finisher_update() +{ + ut_ad(log_sys.latch_have_wr()); + finisher= +#ifdef HAVE_PMEM + log_sys.is_pmem() + ? (spin_wait_delay + ? mtr_t::finish_writer : mtr_t::finish_writer) + : +#endif + (spin_wait_delay + ? mtr_t::finish_writer : mtr_t::finish_writer); +} void mtr_memo_slot_t::release() const { @@ -82,9 +100,7 @@ void mtr_memo_slot_t::release() const inline buf_page_t *buf_pool_t::prepare_insert_into_flush_list(lsn_t lsn) noexcept { -#ifndef SUX_LOCK_GENERIC - ut_ad(recv_recovery_is_on() || log_sys.latch.is_locked()); -#endif + ut_ad(recv_recovery_is_on() || log_sys.latch_have_any()); ut_ad(lsn >= log_sys.last_checkpoint_lsn); mysql_mutex_assert_owner(&flush_list_mutex); static_assert(log_t::FIRST_LSN >= 2, "compatibility"); @@ -316,10 +332,8 @@ void mtr_t::release() inline lsn_t log_t::get_write_target() const { -#ifndef SUX_LOCK_GENERIC - ut_ad(latch.is_locked()); -#endif - if (UNIV_LIKELY(buf_free < max_buf_free)) + ut_ad(latch_have_any()); + if (UNIV_LIKELY(buf_free_ok())) return 0; ut_ad(!is_pmem()); /* The LSN corresponding to the end of buf is @@ -556,9 +570,7 @@ void mtr_t::commit_shrink(fil_space_t &space, uint32_t size) /* Durably write the reduced FSP_SIZE before truncating the data file. */ log_write_and_flush(); -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif + ut_ad(log_sys.latch_have_wr()); os_file_truncate(space.chain.start->name, space.chain.start->handle, os_offset_t{size} << srv_page_size_shift, true); @@ -714,9 +726,7 @@ This is to be used at log_checkpoint(). @return current LSN */ ATTRIBUTE_COLD lsn_t mtr_t::commit_files(lsn_t checkpoint_lsn) { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif + ut_ad(log_sys.latch_have_wr()); ut_ad(is_active()); ut_ad(m_log_mode == MTR_LOG_ALL); ut_ad(!m_made_dirty); @@ -870,13 +880,111 @@ ATTRIBUTE_COLD static void log_overwrite_warning(lsn_t lsn) ? ". Shutdown is in progress" : ""); } -/** Wait in append_prepare() for buffer to become available -@param lsn log sequence number to write up to -@param ex whether log_sys.latch is exclusively locked */ -ATTRIBUTE_COLD void log_t::append_prepare_wait(lsn_t lsn, bool ex) noexcept +static ATTRIBUTE_NOINLINE void lsn_delay(size_t delay, size_t mult) noexcept +{ + delay*= mult * 2; // GCC 13.2.0 -O2 targeting AMD64 wants to unroll twice + HMT_low(); + do + MY_RELAX_CPU(); + while (--delay); + HMT_medium(); +} + +#if defined __clang_major__ && __clang_major__ < 10 +/* Only clang-10 introduced support for asm goto */ +#elif defined __APPLE__ +/* At least some versions of Apple Xcode do not support asm goto */ +#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__) +# if SIZEOF_SIZE_T == 8 +# define LOCK_TSET \ + __asm__ goto("lock btsq $63, %0\n\t" "jnc %l1" \ + : : "m"(buf_free) : "cc", "memory" : got) +# else +# define LOCK_TSET \ + __asm__ goto("lock btsl $31, %0\n\t" "jnc %l1" \ + : : "m"(buf_free) : "cc", "memory" : got) +# endif +#elif defined _MSC_VER && (defined _M_IX86 || defined _M_X64) +# if SIZEOF_SIZE_T == 8 +# define LOCK_TSET \ + if (!_interlockedbittestandset64 \ + (reinterpret_cast(&buf_free), 63)) return +# else +# define LOCK_TSET \ + if (!_interlockedbittestandset \ + (reinterpret_cast(&buf_free), 31)) return +# endif +#endif + +#ifdef LOCK_TSET +ATTRIBUTE_NOINLINE +void log_t::lsn_lock_bts() noexcept +{ + LOCK_TSET; + { + const size_t m= mtr_t::spin_wait_delay; + constexpr size_t DELAY= 10, MAX_ITERATIONS= 10; + for (size_t delay_count= DELAY, delay_iterations= 1;; + lsn_delay(delay_iterations, m)) + { + if (!(buf_free.load(std::memory_order_relaxed) & buf_free_LOCK)) + LOCK_TSET; + if (!delay_count); + else if (delay_iterations < MAX_ITERATIONS) + delay_count= DELAY, delay_iterations++; + else + delay_count--; + } + } + +# ifdef __GNUC__ + got: + return; +# endif +} + +inline +#else +ATTRIBUTE_NOINLINE +#endif +size_t log_t::lock_lsn() noexcept +{ +#ifdef LOCK_TSET + lsn_lock_bts(); + return ~buf_free_LOCK & buf_free.load(std::memory_order_relaxed); +# undef LOCK_TSET +#else + size_t b= buf_free.fetch_or(buf_free_LOCK, std::memory_order_acquire); + if (b & buf_free_LOCK) + { + const size_t m= mtr_t::spin_wait_delay; + constexpr size_t DELAY= 10, MAX_ITERATIONS= 10; + for (size_t delay_count= DELAY, delay_iterations= 1; + ((b= buf_free.load(std::memory_order_relaxed)) & buf_free_LOCK) || + (buf_free_LOCK & (b= buf_free.fetch_or(buf_free_LOCK, + std::memory_order_acquire))); + lsn_delay(delay_iterations, m)) + if (!delay_count); + else if (delay_iterations < MAX_ITERATIONS) + delay_count= DELAY, delay_iterations++; + else + delay_count--; + } + return b; +#endif +} + +template +ATTRIBUTE_COLD size_t log_t::append_prepare_wait(size_t b, bool ex, lsn_t lsn) + noexcept { waits++; - unlock_lsn(); + ut_ad(buf_free.load(std::memory_order_relaxed) == + (spin ? (b | buf_free_LOCK) : b)); + if (spin) + buf_free.store(b, std::memory_order_release); + else + lsn_lock.wr_unlock(); if (ex) latch.wr_unlock(); @@ -890,51 +998,57 @@ ATTRIBUTE_COLD void log_t::append_prepare_wait(lsn_t lsn, bool ex) noexcept else latch.rd_lock(SRW_LOCK_CALL); - lock_lsn(); + if (spin) + return lock_lsn(); + + lsn_lock.wr_lock(); + return buf_free.load(std::memory_order_relaxed); } /** Reserve space in the log buffer for appending data. +@tparam spin whether to use the spin-only lock_lsn() @tparam pmem log_sys.is_pmem() @param size total length of the data to append(), in bytes @param ex whether log_sys.latch is exclusively locked @return the start LSN and the buffer position for append() */ -template +template inline std::pair log_t::append_prepare(size_t size, bool ex) noexcept { -#ifndef SUX_LOCK_GENERIC - ut_ad(latch.is_locked()); -# ifndef _WIN32 // there is no accurate is_write_locked() on SRWLOCK - ut_ad(ex == latch.is_write_locked()); -# endif -#endif + ut_ad(ex ? latch_have_wr() : latch_have_rd()); ut_ad(pmem == is_pmem()); - lock_lsn(); + if (!spin) + lsn_lock.wr_lock(); + size_t b{spin ? lock_lsn() : buf_free.load(std::memory_order_relaxed)}; write_to_buf++; const lsn_t l{lsn.load(std::memory_order_relaxed)}, end_lsn{l + size}; - size_t b{buf_free}; if (UNIV_UNLIKELY(pmem ? (end_lsn - get_flushed_lsn(std::memory_order_relaxed)) > capacity() : b + size >= buf_size)) - { - append_prepare_wait(l, ex); - b= buf_free; - } + b= append_prepare_wait(b, ex, l); - lsn.store(end_lsn, std::memory_order_relaxed); size_t new_buf_free= b + size; if (pmem && new_buf_free >= file_size) new_buf_free-= size_t(capacity()); - buf_free= new_buf_free; - unlock_lsn(); + + lsn.store(end_lsn, std::memory_order_relaxed); if (UNIV_UNLIKELY(end_lsn >= last_checkpoint_lsn + log_capacity)) - set_check_for_checkpoint(); + set_check_for_checkpoint(true); - return {l, &buf[b]}; + byte *our_buf= buf; + if (spin) + buf_free.store(new_buf_free, std::memory_order_release); + else + { + buf_free.store(new_buf_free, std::memory_order_relaxed); + lsn_lock.wr_unlock(); + } + + return {l, our_buf + b}; } /** Finish appending data to the log. @@ -942,9 +1056,7 @@ std::pair log_t::append_prepare(size_t size, bool ex) noexcept @return whether buf_flush_ahead() will have to be invoked */ static mtr_t::page_flush_ahead log_close(lsn_t lsn) noexcept { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_locked()); -#endif + ut_ad(log_sys.latch_have_any()); const lsn_t checkpoint_age= lsn - log_sys.last_checkpoint_lsn; @@ -1009,9 +1121,7 @@ std::pair mtr_t::do_write() ut_ad(!recv_no_log_write); ut_ad(is_logged()); ut_ad(m_log.size()); -#ifndef SUX_LOCK_GENERIC - ut_ad(!m_latch_ex || log_sys.latch.is_write_locked()); -#endif + ut_ad(!m_latch_ex || log_sys.latch_have_wr()); #ifndef DBUG_OFF do @@ -1069,9 +1179,7 @@ func_exit: inline void log_t::resize_write(lsn_t lsn, const byte *end, size_t len, size_t seq) noexcept { -#ifndef SUX_LOCK_GENERIC - ut_ad(latch.is_locked()); -#endif + ut_ad(latch_have_any()); if (UNIV_LIKELY_NULL(resize_buf)) { @@ -1176,50 +1284,47 @@ inline void log_t::resize_write(lsn_t lsn, const byte *end, size_t len, } } +template std::pair -mtr_t::finish_write(size_t len) +mtr_t::finish_writer(mtr_t *mtr, size_t len) { + ut_ad(log_sys.is_latest()); ut_ad(!recv_no_log_write); - ut_ad(is_logged()); -#ifndef SUX_LOCK_GENERIC -# ifndef _WIN32 // there is no accurate is_write_locked() on SRWLOCK - ut_ad(m_latch_ex == log_sys.latch.is_write_locked()); -# endif -#endif + ut_ad(mtr->is_logged()); + ut_ad(mtr->m_latch_ex ? log_sys.latch_have_wr() : log_sys.latch_have_rd()); - const size_t size{m_commit_lsn ? 5U + 8U : 5U}; - std::pair start; + const size_t size{mtr->m_commit_lsn ? 5U + 8U : 5U}; + std::pair start= + log_sys.append_prepare(len, mtr->m_latch_ex); - if (!log_sys.is_pmem()) + if (!pmem) { - start= log_sys.append_prepare(len, m_latch_ex); - m_log.for_each_block([&start](const mtr_buf_t::block_t *b) + mtr->m_log.for_each_block([&start](const mtr_buf_t::block_t *b) { log_sys.append(start.second, b->begin(), b->used()); return true; }); #ifdef HAVE_PMEM write_trailer: #endif *start.second++= log_sys.get_sequence_bit(start.first + len - size); - if (m_commit_lsn) + if (mtr->m_commit_lsn) { - mach_write_to_8(start.second, m_commit_lsn); - m_crc= my_crc32c(m_crc, start.second, 8); + mach_write_to_8(start.second, mtr->m_commit_lsn); + mtr->m_crc= my_crc32c(mtr->m_crc, start.second, 8); start.second+= 8; } - mach_write_to_4(start.second, m_crc); + mach_write_to_4(start.second, mtr->m_crc); start.second+= 4; } #ifdef HAVE_PMEM else { - start= log_sys.append_prepare(len, m_latch_ex); if (UNIV_LIKELY(start.second + len <= &log_sys.buf[log_sys.file_size])) { - m_log.for_each_block([&start](const mtr_buf_t::block_t *b) + mtr->m_log.for_each_block([&start](const mtr_buf_t::block_t *b) { log_sys.append(start.second, b->begin(), b->used()); return true; }); goto write_trailer; } - m_log.for_each_block([&start](const mtr_buf_t::block_t *b) + mtr->m_log.for_each_block([&start](const mtr_buf_t::block_t *b) { size_t size{b->used()}; const size_t size_left(&log_sys.buf[log_sys.file_size] - start.second); @@ -1242,14 +1347,14 @@ mtr_t::finish_write(size_t len) byte tail[5 + 8]; tail[0]= log_sys.get_sequence_bit(start.first + len - size); - if (m_commit_lsn) + if (mtr->m_commit_lsn) { - mach_write_to_8(tail + 1, m_commit_lsn); - m_crc= my_crc32c(m_crc, tail + 1, 8); - mach_write_to_4(tail + 9, m_crc); + mach_write_to_8(tail + 1, mtr->m_commit_lsn); + mtr->m_crc= my_crc32c(mtr->m_crc, tail + 1, 8); + mach_write_to_4(tail + 9, mtr->m_crc); } else - mach_write_to_4(tail + 1, m_crc); + mach_write_to_4(tail + 1, mtr->m_crc); ::memcpy(start.second, tail, size_left); ::memcpy(log_sys.buf + log_sys.START_OFFSET, tail + size_left, @@ -1258,12 +1363,14 @@ mtr_t::finish_write(size_t len) ((size >= size_left) ? log_sys.START_OFFSET : log_sys.file_size) + (size - size_left); } +#else + static_assert(!pmem, ""); #endif log_sys.resize_write(start.first, start.second, len, size); - m_commit_lsn= start.first + len; - return {start.first, log_close(m_commit_lsn)}; + mtr->m_commit_lsn= start.first + len; + return {start.first, log_close(mtr->m_commit_lsn)}; } bool mtr_t::have_x_latch(const buf_block_t &block) const diff --git a/storage/innobase/rem/rem0rec.cc b/storage/innobase/rem/rem0rec.cc index ca81c910c69..46dcfb1ba83 100644 --- a/storage/innobase/rem/rem0rec.cc +++ b/storage/innobase/rem/rem0rec.cc @@ -427,7 +427,7 @@ start: } if (!field->fixed_len - || (format == REC_LEAF_TEMP + || (format <= REC_LEAF_TEMP_INSTANT && !dict_col_get_fixed_size(col, true))) { /* Variable-length field: read the length */ len = *lens--; diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 9e1867c3d3c..e065dccdd58 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -604,7 +604,7 @@ protected: if (m_xdes != 0) { const xdes_t* xdesc = xdes(page_no, m_xdes); - ulint pos = page_no % FSP_EXTENT_SIZE; + uint32_t pos = page_no % FSP_EXTENT_SIZE; return xdes_is_free(xdesc, pos); } @@ -4501,7 +4501,7 @@ static void row_import_autoinc(dict_table_t *table, row_prebuilt_t *prebuilt, @param new_pos position value @param trx transaction @return DB_SUCCESS or error code */ -dberr_t update_vcol_pos(ulint table_id, ulint new_pos, trx_t *trx) +dberr_t update_vcol_pos(table_id_t table_id, ulint new_pos, trx_t *trx) { pars_info_t *info= pars_info_create(); pars_info_add_ull_literal(info, "id", table_id); diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 770675c5e43..2c75a12d5fb 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -2568,12 +2568,6 @@ row_ins_index_entry_big_rec( return(error); } -#ifdef HAVE_REPLICATION /* Working around MDEV-24622 */ -extern "C" int thd_is_slave(const MYSQL_THD thd); -#else -# define thd_is_slave(thd) 0 -#endif - #if defined __aarch64__&&defined __GNUC__&&__GNUC__==4&&!defined __clang__ /* Avoid GCC 4.8.5 internal compiler error due to srw_mutex::wr_unlock(). We would only need this for row_ins_clust_index_entry_low(), @@ -2724,8 +2718,7 @@ err_exit: && !index->table->n_rec_locks && !index->table->is_active_ddl() && !index->table->has_spatial_index() - && !index->table->versioned() - && !thd_is_slave(trx->mysql_thd) /* FIXME: MDEV-24622 */) { + && !index->table->versioned()) { DEBUG_SYNC_C("empty_root_page_insert"); trx->bulk_insert = true; diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 2bffcddaee0..d54d452b691 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -694,6 +694,7 @@ handle_new_error: DBUG_RETURN(true); case DB_DEADLOCK: + case DB_RECORD_CHANGED: case DB_LOCK_TABLE_FULL: rollback: /* Roll back the whole transaction; this resolution was added @@ -1584,7 +1585,8 @@ init_fts_doc_id_for_ref( for (dict_foreign_t* foreign : table->referenced_set) { ut_ad(foreign->foreign_table); - if (foreign->foreign_table->fts) { + if (foreign->foreign_table->space + && foreign->foreign_table->fts) { fts_init_doc_id(foreign->foreign_table); } diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 916a5dde15c..6d4aed377b3 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -864,6 +864,11 @@ row_sel_build_committed_vers_for_mysql( column version if any */ mtr_t* mtr) /*!< in: mtr */ { + if (prebuilt->trx->snapshot_isolation) { + *old_vers = rec; + return; + } + if (prebuilt->old_vers_heap) { mem_heap_empty(prebuilt->old_vers_heap); } else { diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc index bcc598e6523..38d19882de2 100644 --- a/storage/innobase/row/row0umod.cc +++ b/storage/innobase/row/row0umod.cc @@ -189,7 +189,7 @@ row_undo_mod_clust_low( @param[in] rec clustered index record @param[in] index clustered index @return the byte offset of DB_TRX_ID, from the start of rec */ -static ulint row_trx_id_offset(const rec_t* rec, const dict_index_t* index) +ulint row_trx_id_offset(const rec_t* rec, const dict_index_t* index) { ut_ad(index->n_uniq <= MAX_REF_PARTS); ulint trx_id_offset = index->trx_id_offset; diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index 2bc1c2e6922..2a22403e125 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -363,11 +363,6 @@ static monitor_info_t innodb_counter_info[] = MONITOR_NONE, MONITOR_DEFAULT_START, MONITOR_LRU_GET_FREE_LOOPS}, - {"buffer_LRU_get_free_waits", "buffer", - "Total sleep waits in LRU get free.", - MONITOR_NONE, - MONITOR_DEFAULT_START, MONITOR_LRU_GET_FREE_WAITS}, - {"buffer_flush_avg_page_rate", "buffer", "Average number of pages at which flushing is happening", MONITOR_NONE, @@ -471,11 +466,6 @@ static monitor_info_t innodb_counter_info[] = MONITOR_EXISTING | MONITOR_DEFAULT_ON), MONITOR_DEFAULT_START, MONITOR_LRU_BATCH_EVICT_TOTAL_PAGE}, - {"buffer_LRU_single_flush_failure_count", "Buffer", - "Number of times attempt to flush a single page from LRU failed", - MONITOR_NONE, - MONITOR_DEFAULT_START, MONITOR_LRU_SINGLE_FLUSH_FAILURE_COUNT}, - {"buffer_LRU_get_free_search", "Buffer", "Number of searches performed for a clean page", MONITOR_NONE, diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index e123fcf9a8d..3c90186374c 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -91,10 +91,6 @@ Created 2/16/1996 Heikki Tuuri #include "zlib.h" #include "log.h" -/** We are prepared for a situation that we have this many threads waiting for -a transactional lock inside InnoDB. srv_start() sets the value. */ -ulint srv_max_n_threads; - /** Log sequence number at shutdown */ lsn_t srv_shutdown_lsn; @@ -1243,12 +1239,6 @@ dberr_t srv_start(bool create_new_db) mysql_stage_register("innodb", srv_stages, static_cast(UT_ARR_SIZE(srv_stages))); - srv_max_n_threads = - 1 /* dict_stats_thread */ - + 1 /* fts_optimize_thread */ - + 128 /* safety margin */ - + max_connections; - srv_boot(); ib::info() << my_crc32c_implementation(); @@ -1634,6 +1624,24 @@ dberr_t srv_start(bool create_new_db) fil_system.space_id_reuse_warned = false; + if (srv_operation > SRV_OPERATION_EXPORT_RESTORED) { + ut_ad(srv_operation == SRV_OPERATION_RESTORE_EXPORT + || srv_operation == SRV_OPERATION_RESTORE); + return(err); + } + + /* Upgrade or resize or rebuild the redo logs before + generating any dirty pages, so that the old redo log + file will not be written to. */ + + err = srv_log_rebuild_if_needed(); + + if (err != DB_SUCCESS) { + return srv_init_abort(err); + } + + recv_sys.debug_free(); + if (!srv_read_only_mode) { const uint32_t flags = FSP_FLAGS_PAGE_SSIZE(); for (uint32_t id = srv_undo_space_id_start; @@ -1718,23 +1726,6 @@ dberr_t srv_start(bool create_new_db) return(srv_init_abort(DB_ERROR)); } } - - if (srv_operation > SRV_OPERATION_EXPORT_RESTORED) { - ut_ad(srv_operation == SRV_OPERATION_RESTORE_EXPORT - || srv_operation == SRV_OPERATION_RESTORE); - return(err); - } - - /* Upgrade or resize or rebuild the redo logs before - generating any dirty pages, so that the old redo log - file will not be written to. */ - err = srv_log_rebuild_if_needed(); - - if (err != DB_SUCCESS) { - return(srv_init_abort(err)); - } - - recv_sys.debug_free(); } ut_ad(err == DB_SUCCESS); diff --git a/storage/innobase/trx/trx0rseg.cc b/storage/innobase/trx/trx0rseg.cc index d0231d54090..a9df0dc8e17 100644 --- a/storage/innobase/trx/trx0rseg.cc +++ b/storage/innobase/trx/trx0rseg.cc @@ -210,6 +210,11 @@ bool trx_rseg_read_wsrep_checkpoint(const buf_block_t *rseg_header, XID &xid) @return whether the WSREP XID is present */ static bool trx_rseg_init_wsrep_xid(const page_t* page, XID& xid) { + if (memcmp(TRX_SYS + TRX_SYS_WSREP_XID_INFO + page, + field_ref_zero, TRX_SYS_WSREP_XID_LEN) == 0) { + return false; + } + if (mach_read_from_4(TRX_SYS + TRX_SYS_WSREP_XID_INFO + TRX_SYS_WSREP_XID_MAGIC_N_FLD + page) @@ -565,10 +570,6 @@ static void trx_rseg_init_binlog_info(const page_t* page) + TRX_SYS + page); trx_sys.recovered_binlog_is_legacy_pos= true; } - -#ifdef WITH_WSREP - trx_rseg_init_wsrep_xid(page, trx_sys.recovered_wsrep_xid); -#endif } /** Initialize or recover the rollback segments at startup. */ @@ -600,7 +601,11 @@ dberr_t trx_rseg_array_init() + sys->page.frame); trx_rseg_init_binlog_info(sys->page.frame); #ifdef WITH_WSREP - wsrep_sys_xid.set(&trx_sys.recovered_wsrep_xid); + if (trx_rseg_init_wsrep_xid( + sys->page.frame, trx_sys.recovered_wsrep_xid)) { + wsrep_sys_xid.set( + &trx_sys.recovered_wsrep_xid); + } #endif } @@ -665,7 +670,7 @@ dberr_t trx_rseg_array_init() } #ifdef WITH_WSREP - if (!wsrep_sys_xid.is_null()) { + if (srv_operation == SRV_OPERATION_NORMAL && !wsrep_sys_xid.is_null()) { /* Upgrade from a version prior to 10.3.5, where WSREP XID was stored in TRX_SYS page. If no rollback segment has a WSREP XID set, diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 7d21e34dfac..c16d704c69e 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -413,7 +413,7 @@ void trx_t::free() read_view.mem_noaccess(); MEM_NOACCESS(&lock, sizeof lock); MEM_NOACCESS(&op_info, sizeof op_info + - sizeof(unsigned) /* isolation_level, + sizeof(unsigned) /* isolation_level, snapshot_isolation, check_foreigns, check_unique_secondary, bulk_insert */); MEM_NOACCESS(&is_registered, sizeof is_registered); diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc index cdcec63ef2a..5b3bc185779 100644 --- a/storage/innobase/ut/ut0ut.cc +++ b/storage/innobase/ut/ut0ut.cc @@ -312,14 +312,16 @@ ut_strerr( return("Lock wait"); case DB_DEADLOCK: return("Deadlock"); + case DB_RECORD_CHANGED: + return("Record changed"); +#ifdef WITH_WSREP case DB_ROLLBACK: return("Rollback"); +#endif case DB_DUPLICATE_KEY: return("Duplicate key"); case DB_MISSING_HISTORY: return("Required history data has been deleted"); - case DB_CLUSTER_NOT_FOUND: - return("Cluster not found"); case DB_TABLE_NOT_FOUND: return("Table not found"); case DB_TOO_BIG_RECORD: diff --git a/storage/maria/aria_chk.c b/storage/maria/aria_chk.c index 62f794a6291..b7ce90bb40a 100644 --- a/storage/maria/aria_chk.c +++ b/storage/maria/aria_chk.c @@ -146,7 +146,8 @@ int main(int argc, char **argv) { if ((ma_control_file_open(FALSE, opt_require_control_file || !(check_param.testflag & T_SILENT), - TRUE))) + TRUE, + control_file_open_flags))) { if (opt_require_control_file || (opt_transaction_logging && (check_param.testflag & T_REP_ANY))) diff --git a/storage/maria/aria_pack.c b/storage/maria/aria_pack.c index eab4d512e8b..ee694931a6d 100644 --- a/storage/maria/aria_pack.c +++ b/storage/maria/aria_pack.c @@ -241,7 +241,8 @@ int main(int argc, char **argv) if (!opt_ignore_control_file && (no_control_file= ma_control_file_open(FALSE, (opt_require_control_file || - !silent), FALSE)) && + !silent), FALSE, + control_file_open_flags)) && opt_require_control_file) { error= 1; diff --git a/storage/maria/aria_read_log.c b/storage/maria/aria_read_log.c index 85a6f4a5e97..cde28e91c0a 100644 --- a/storage/maria/aria_read_log.c +++ b/storage/maria/aria_read_log.c @@ -104,7 +104,7 @@ int main(int argc, char **argv) goto end; } /* we don't want to create a control file, it MUST exist */ - if (ma_control_file_open(FALSE, TRUE, TRUE)) + if (ma_control_file_open(FALSE, TRUE, TRUE, control_file_open_flags)) { fprintf(stderr, "Can't open control file (%d)\n", errno); goto err; diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index 991bc73f3b8..333af2c2ffb 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -38,6 +38,7 @@ C_MODE_START #include "ma_recovery.h" C_MODE_END #include "ma_trnman.h" +#include "ma_loghandler.h" //#include "sql_priv.h" #include "protocol.h" @@ -3381,6 +3382,8 @@ int ha_maria::create(const char *name, TABLE *table_arg, if (ha_create_info->tmp_table()) { create_flags|= HA_CREATE_TMP_TABLE | HA_CREATE_DELAY_KEY_WRITE; + if (ha_create_info->options & HA_LEX_CREATE_GLOBAL_TMP_TABLE) + create_flags|= HA_CREATE_GLOBAL_TMP_TABLE; create_info.transactional= 0; } if (ha_create_info->options & HA_CREATE_KEEP_FILES) @@ -3919,7 +3922,8 @@ static int ha_maria_init(void *p) if (!aria_readonly) res= maria_upgrade(); res= res || maria_init(); - tmp= ma_control_file_open(!aria_readonly, !aria_readonly, !aria_readonly); + tmp= ma_control_file_open(!aria_readonly, !aria_readonly, !aria_readonly, + control_file_open_flags); res= res || aria_readonly ? tmp == CONTROL_FILE_LOCKED : tmp != 0; res= res || ((force_start_after_recovery_failures != 0 && !aria_readonly) && diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c index ec1b0955655..dfd1cf9a4cb 100644 --- a/storage/maria/ma_bitmap.c +++ b/storage/maria/ma_bitmap.c @@ -232,7 +232,7 @@ my_bool _ma_bitmap_init(MARIA_SHARE *share, File file, uint max_page_size; MARIA_FILE_BITMAP *bitmap= &share->bitmap; uint size= share->block_size; - myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); + myf flag= MY_WME | share->malloc_flag; pgcache_page_no_t first_bitmap_with_space; #ifndef DBUG_OFF /* We want to have a copy of the bitmap to be able to print differences */ diff --git a/storage/maria/ma_blockrec.c b/storage/maria/ma_blockrec.c index bc1d44d42a3..561cc324ed1 100644 --- a/storage/maria/ma_blockrec.c +++ b/storage/maria/ma_blockrec.c @@ -488,7 +488,7 @@ my_bool _ma_init_block_record(MARIA_HA *info) { MARIA_ROW *row= &info->cur_row, *new_row= &info->new_row; MARIA_SHARE *share= info->s; - myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); + myf flag= MY_WME | share->malloc_flag; uint default_extents; DBUG_ENTER("_ma_init_block_record"); @@ -2654,7 +2654,6 @@ static my_bool write_block_record(MARIA_HA *info, LSN lsn; my_off_t position; uint save_my_errno; - myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("write_block_record"); head_block= bitmap_blocks->block; @@ -2721,7 +2720,7 @@ static my_bool write_block_record(MARIA_HA *info, for every data segment we want to store. */ if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size, - row->head_length, myflag)) + row->head_length, MY_WME | share->malloc_flag)) DBUG_RETURN(1); tmp_data_used= 0; /* Either 0 or last used uchar in 'data' */ @@ -4750,7 +4749,7 @@ int _ma_read_block_record2(MARIA_HA *info, uchar *record, MARIA_EXTENT_CURSOR extent; MARIA_COLUMNDEF *column, *end_column; MARIA_ROW *cur_row= &info->cur_row; - myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); + myf myflag= MY_WME | share->malloc_flag; DBUG_ENTER("_ma_read_block_record2"); start_of_data= data; @@ -5089,7 +5088,6 @@ static my_bool read_row_extent_info(MARIA_HA *info, uchar *buff, uint flag, row_extents, row_extents_size; uint field_lengths __attribute__ ((unused)); uchar *extents, *end; - myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("read_row_extent_info"); if (!(data= get_record_position(share, buff, @@ -5113,7 +5111,7 @@ static my_bool read_row_extent_info(MARIA_HA *info, uchar *buff, if (info->cur_row.extents_buffer_length < row_extents_size && _ma_alloc_buffer(&info->cur_row.extents, &info->cur_row.extents_buffer_length, - row_extents_size, myflag)) + row_extents_size, MY_WME | share->malloc_flag)) DBUG_RETURN(1); memcpy(info->cur_row.extents, data, ROW_EXTENT_SIZE); data+= ROW_EXTENT_SIZE; @@ -5283,8 +5281,7 @@ my_bool _ma_cmp_block_unique(MARIA_HA *info, MARIA_UNIQUEDEF *def, my_bool _ma_scan_init_block_record(MARIA_HA *info) { MARIA_SHARE *share= info->s; - myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); - my_bool res; + myf flag= MY_WME | share->malloc_flag; DBUG_ENTER("_ma_scan_init_block_record"); DBUG_ASSERT(info->dfile.file == share->bitmap.file.file); @@ -5311,8 +5308,7 @@ my_bool _ma_scan_init_block_record(MARIA_HA *info) _ma_scan_block_record()), we may miss recently inserted rows (bitmap page in page cache would be too old). */ - res= _ma_bitmap_flush(info->s); - DBUG_RETURN(res); + DBUG_RETURN(_ma_bitmap_flush(info->s)); } diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index f30e2c78ffc..5e6644ac9f3 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -125,6 +125,7 @@ void maria_chk_init(HA_CHECK *param) param->max_stage= 1; param->stack_end_ptr= &my_thread_var->stack_ends_here; param->max_allowed_lsn= (LSN) ~0ULL; + /* Flag when initializing buffers possible used by parallel repair threads */ param->malloc_flags= MY_THREAD_SPECIFIC; } @@ -1305,7 +1306,6 @@ static int check_dynamic_record(HA_CHECK *param, MARIA_HA *info, int extend, ulong UNINIT_VAR(left_length); uint b_type; char llbuff[22],llbuff2[22],llbuff3[22]; - myf myflag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("check_dynamic_record"); pos= 0; @@ -1413,7 +1413,8 @@ static int check_dynamic_record(HA_CHECK *param, MARIA_HA *info, int extend, { if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size, block_info.rec_len + - share->base.extra_rec_buff_size, myflag)) + share->base.extra_rec_buff_size, + MY_WME | share->malloc_flag)) { _ma_check_print_error(param, @@ -2130,7 +2131,7 @@ int maria_chk_data_link(HA_CHECK *param, MARIA_HA *info, my_bool extend) if (!(record= (uchar*) my_malloc(PSI_INSTRUMENT_ME, share->base.default_rec_buff_size, - MYF(param->malloc_flags)))) + MYF(MY_THREAD_SPECIFIC)))) { _ma_check_print_error(param,"Not enough memory for record"); DBUG_RETURN(-1); @@ -2766,10 +2767,10 @@ int maria_repair(HA_CHECK *param, register MARIA_HA *info, if (!(sort_param.record= (uchar *) my_malloc(PSI_INSTRUMENT_ME, (uint) share->base.default_rec_buff_size, - MYF(param->malloc_flags))) || + MYF(MY_THREAD_SPECIFIC))) || _ma_alloc_buffer(&sort_param.rec_buff, &sort_param.rec_buff_size, share->base.default_rec_buff_size, - MYF(param->malloc_flags))) + MYF(MY_THREAD_SPECIFIC))) { _ma_check_print_error(param, "Not enough memory for extra record"); goto err; @@ -3728,7 +3729,7 @@ int maria_filecopy(HA_CHECK *param, File to,File from,my_off_t start, buff_length=(ulong) MY_MIN(param->write_buffer_length,length); if (!(buff=my_malloc(PSI_INSTRUMENT_ME, buff_length, - MYF(param->malloc_flags)))) + MYF(MY_THREAD_SPECIFIC)))) { buff=tmp_buff; buff_length=IO_SIZE; } @@ -3874,10 +3875,10 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info, if (!(sort_param.record= (uchar*) my_malloc(PSI_INSTRUMENT_ME, (size_t) share->base.default_rec_buff_size, - MYF(param->malloc_flags))) || + MYF(MY_THREAD_SPECIFIC))) || _ma_alloc_buffer(&sort_param.rec_buff, &sort_param.rec_buff_size, share->base.default_rec_buff_size, - MYF(param->malloc_flags))) + MYF(MY_THREAD_SPECIFIC))) { _ma_check_print_error(param, "Not enough memory for extra record"); goto err; @@ -3896,7 +3897,7 @@ int maria_repair_by_sort(HA_CHECK *param, register MARIA_HA *info, sort_param.wordlist=NULL; init_alloc_root(PSI_INSTRUMENT_ME, &sort_param.wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0, - MYF(param->malloc_flags)); + MYF(MY_THREAD_SPECIFIC)); sort_param.key_cmp=sort_key_cmp; sort_param.lock_in_memory=maria_lock_memory; @@ -4464,7 +4465,7 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info, if (!(sort_param=(MARIA_SORT_PARAM *) my_malloc(PSI_INSTRUMENT_ME, (uint) share->base.keys * (sizeof(MARIA_SORT_PARAM) + share->base.pack_reclength), - MYF(MY_ZEROFILL | param->malloc_flags)))) + MYF(MY_ZEROFILL | MY_THREAD_SPECIFIC)))) { _ma_check_print_error(param,"Not enough memory for key!"); goto err; @@ -4522,9 +4523,10 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info, sort_param[i].record= (((uchar *)(sort_param+share->base.keys))+ (share->base.pack_reclength * i)); + /* These buffers are per thread */ if (_ma_alloc_buffer(&sort_param[i].rec_buff, &sort_param[i].rec_buff_size, share->base.default_rec_buff_size, - MYF(param->malloc_flags))) + MYF(0))) { _ma_check_print_error(param,"Not enough memory!"); goto err; @@ -4553,7 +4555,7 @@ int maria_repair_parallel(HA_CHECK *param, register MARIA_HA *info, sort_param[i].key_length+=ft_max_word_len_for_sort-HA_FT_MAXBYTELEN; init_alloc_root(PSI_INSTRUMENT_ME, &sort_param[i].wordroot, FTPARSER_MEMROOT_ALLOC_SIZE, 0, - MYF(param->malloc_flags)); + MYF(MY_THREAD_SPECIFIC)); } } sort_info.total_keys=i; @@ -6112,7 +6114,7 @@ static MA_SORT_KEY_BLOCKS *alloc_key_blocks(HA_CHECK *param, uint blocks, if (!(block= (MA_SORT_KEY_BLOCKS*) my_malloc(PSI_INSTRUMENT_ME, (sizeof(MA_SORT_KEY_BLOCKS)+buffer_length+IO_SIZE)*blocks, - MYF(param->malloc_flags)))) + MYF(MY_THREAD_SPECIFIC)))) { _ma_check_print_error(param,"Not enough memory for sort-key-blocks"); return(0); diff --git a/storage/maria/ma_control_file.c b/storage/maria/ma_control_file.c index 237b75b99b7..a90a12612d8 100644 --- a/storage/maria/ma_control_file.c +++ b/storage/maria/ma_control_file.c @@ -272,7 +272,8 @@ static int lock_control_file(const char *name, my_bool do_retry) CONTROL_FILE_ERROR ma_control_file_open(my_bool create_if_missing, my_bool print_error, - my_bool wait_for_lock) + my_bool wait_for_lock, + int open_flags) { uchar buffer[CF_MAX_SIZE]; char name[FN_REFLEN], errmsg_buff[256]; @@ -280,7 +281,6 @@ CONTROL_FILE_ERROR ma_control_file_open(my_bool create_if_missing, " file is probably in use by another process"; uint new_cf_create_time_size, new_cf_changeable_size, new_block_size; my_off_t file_size; - int open_flags= O_BINARY | /*O_DIRECT |*/ O_RDWR | O_CLOEXEC; int error= CONTROL_FILE_UNKNOWN_ERROR; DBUG_ENTER("ma_control_file_open"); @@ -460,6 +460,15 @@ err: DBUG_RETURN(error); } +/* + The most common way to open the control file when writing tests +*/ + +CONTROL_FILE_ERROR ma_control_file_open_or_create() +{ + return ma_control_file_open(TRUE, TRUE, TRUE, + control_file_open_flags); +} /* Write information durably to the control file; stores this information into @@ -630,7 +639,7 @@ my_bool print_aria_log_control() int error= CONTROL_FILE_UNKNOWN_ERROR; uint recovery_fails; File file; - DBUG_ENTER("ma_control_file_open"); + DBUG_ENTER("print_aria_log_control"); if (fn_format(name, CONTROL_FILE_BASE_NAME, maria_data_root, "", MYF(MY_WME)) == NullS) diff --git a/storage/maria/ma_control_file.h b/storage/maria/ma_control_file.h index c74957b8322..35ad4a671ea 100644 --- a/storage/maria/ma_control_file.h +++ b/storage/maria/ma_control_file.h @@ -68,10 +68,13 @@ typedef enum enum_control_file_error { CONTROL_FILE_ERROR ma_control_file_open(my_bool create_if_missing, my_bool print_error, - my_bool wait_for_lock); + my_bool wait_for_lock, + int open_flags); int ma_control_file_write_and_force(LSN last_checkpoint_lsn_arg, uint32 last_logno_arg, TrID max_trid_arg, uint8 recovery_failures_arg); +/* For simple programs that creates Aria files*/ +CONTROL_FILE_ERROR ma_control_file_open_or_create(); int ma_control_file_end(void); my_bool ma_control_file_inited(void); my_bool print_aria_log_control(void); diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c index 7fd739d13a8..9ce48ae9e7f 100644 --- a/storage/maria/ma_create.c +++ b/storage/maria/ma_create.c @@ -101,7 +101,7 @@ int maria_create(const char *name, enum data_file_type datafile_type, DBUG_ASSERT(maria_inited); - if (flags & HA_CREATE_TMP_TABLE) + if ((flags & HA_CREATE_TMP_TABLE) && !(flags & HA_CREATE_GLOBAL_TMP_TABLE)) common_flag|= MY_THREAD_SPECIFIC; if (!ci) diff --git a/storage/maria/ma_dynrec.c b/storage/maria/ma_dynrec.c index 33f238d9754..fed1bf411f4 100644 --- a/storage/maria/ma_dynrec.c +++ b/storage/maria/ma_dynrec.c @@ -1488,7 +1488,6 @@ int _ma_read_dynamic_record(MARIA_HA *info, uchar *buf, uchar *UNINIT_VAR(to); uint UNINIT_VAR(left_length); MARIA_SHARE *share= info->s; - myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("_ma_read_dynamic_record"); if (filepos == HA_OFFSET_ERROR) @@ -1525,7 +1524,8 @@ int _ma_read_dynamic_record(MARIA_HA *info, uchar *buf, { if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size, block_info.rec_len + - share->base.extra_rec_buff_size, flag)) + share->base.extra_rec_buff_size, + MY_WME | share->malloc_flag)) goto err; } to= info->rec_buff; @@ -1784,7 +1784,6 @@ int _ma_read_rnd_dynamic_record(MARIA_HA *info, uchar *UNINIT_VAR(to); MARIA_BLOCK_INFO block_info; MARIA_SHARE *share= info->s; - myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); DBUG_ENTER("_ma_read_rnd_dynamic_record"); #ifdef MARIA_EXTERNAL_LOCKING @@ -1875,7 +1874,8 @@ int _ma_read_rnd_dynamic_record(MARIA_HA *info, { if (_ma_alloc_buffer(&info->rec_buff, &info->rec_buff_size, block_info.rec_len + - share->base.extra_rec_buff_size, flag)) + share->base.extra_rec_buff_size, + MY_WME | share->malloc_flag)) goto err; } to= info->rec_buff; diff --git a/storage/maria/ma_extra.c b/storage/maria/ma_extra.c index 087100e3d8c..08f62f31507 100644 --- a/storage/maria/ma_extra.c +++ b/storage/maria/ma_extra.c @@ -551,7 +551,7 @@ int maria_reset(MARIA_HA *info) { int error= 0; MARIA_SHARE *share= info->s; - myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); + myf flag= MY_WME | share->malloc_flag; DBUG_ENTER("maria_reset"); /* Free buffers and reset the following flags: diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c index d62ec3c8c89..99a7e2099b7 100644 --- a/storage/maria/ma_loghandler.c +++ b/storage/maria/ma_loghandler.c @@ -1099,10 +1099,6 @@ static TRANSLOG_FILE *get_current_logfile() uchar maria_trans_file_magic[]= { (uchar) 254, (uchar) 254, (uchar) 11, '\001', 'M', 'A', 'R', 'I', 'A', 'L', 'O', 'G' }; -#define LOG_HEADER_DATA_SIZE (sizeof(maria_trans_file_magic) + \ - 8 + 4 + 4 + 4 + 2 + 3 + \ - LSN_STORE_SIZE) - /* Write log file page header in the just opened new log file diff --git a/storage/maria/ma_loghandler.h b/storage/maria/ma_loghandler.h index abe85a12727..02d7b747d09 100644 --- a/storage/maria/ma_loghandler.h +++ b/storage/maria/ma_loghandler.h @@ -538,5 +538,13 @@ typedef enum } enum_maria_sync_log_dir; extern ulong sync_log_dir; +/* sizeof(maria_trans_file_magic) */ +#define LOG_MAGIC_SIZE 12 +#define LOG_HEADER_DATA_SIZE (LOG_MAGIC_SIZE + \ + 8 + 4 + 4 + 4 + 2 + 3 + \ + LSN_STORE_SIZE) +/* Flags when creating aria_log_control */ +#define control_file_open_flags (O_BINARY | /*O_DIRECT |*/ O_RDWR | O_CLOEXEC) + C_MODE_END #endif diff --git a/storage/maria/ma_open.c b/storage/maria/ma_open.c index ad98a534393..8e311ee715c 100644 --- a/storage/maria/ma_open.c +++ b/storage/maria/ma_open.c @@ -94,7 +94,7 @@ static MARIA_HA *maria_clone_internal(MARIA_SHARE *share, uint errpos; MARIA_HA info,*m_info; my_bitmap_map *changed_fields_bitmap; - myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); + myf flag= MY_WME | share->malloc_flag; DBUG_ENTER("maria_clone_internal"); errpos= 0; @@ -266,7 +266,9 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags, uint i,j,len,errpos,head_length,base_pos,keys, realpath_err, key_parts,base_key_parts,unique_key_parts,fulltext_keys,uniques; uint internal_table= MY_TEST(open_flags & HA_OPEN_INTERNAL_TABLE); - myf common_flag= open_flags & HA_OPEN_TMP_TABLE ? MY_THREAD_SPECIFIC : 0; + myf common_flag= (((open_flags & HA_OPEN_TMP_TABLE) && + !(open_flags & HA_OPEN_GLOBAL_TMP_TABLE)) ? + MY_THREAD_SPECIFIC : 0); uint file_version; size_t info_length; char name_buff[FN_REFLEN], org_name[FN_REFLEN], index_name[FN_REFLEN], @@ -986,9 +988,10 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags, if (open_flags & HA_OPEN_TMP_TABLE || share->options & HA_OPTION_TMP_TABLE) { - common_flag|= MY_THREAD_SPECIFIC; share->options|= HA_OPTION_TMP_TABLE; share->temporary= share->delay_key_write= 1; + share->malloc_flag= + (open_flags & HA_OPEN_GLOBAL_TMP_TABLE) ? 0 : MY_THREAD_SPECIFIC; share->write_flag=MYF(MY_NABP); share->w_locks++; /* We don't have to update status */ share->tot_locks++; @@ -2046,9 +2049,8 @@ void _ma_set_index_pagecache_callbacks(PAGECACHE_FILE *file, int _ma_open_datafile(MARIA_HA *info, MARIA_SHARE *share) { - myf flags= (share->mode & O_NOFOLLOW) ? MY_NOSYMLINKS | MY_WME : MY_WME; - if (share->temporary) - flags|= MY_THREAD_SPECIFIC; + myf flags= ((share->mode & O_NOFOLLOW) ? MY_NOSYMLINKS | MY_WME : MY_WME) | + share->malloc_flag; DEBUG_SYNC_C("mi_open_datafile"); info->dfile.file= share->bitmap.file.file= mysql_file_open(key_file_dfile, share->data_file_name.str, diff --git a/storage/maria/ma_packrec.c b/storage/maria/ma_packrec.c index 19783423ab5..57926ee49a7 100644 --- a/storage/maria/ma_packrec.c +++ b/storage/maria/ma_packrec.c @@ -1417,7 +1417,6 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff, uchar *header= info->header; uint head_length,UNINIT_VAR(ref_length); MARIA_SHARE *share= maria->s; - myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); if (file >= 0) { @@ -1444,7 +1443,8 @@ uint _ma_pack_get_block_info(MARIA_HA *maria, MARIA_BIT_BUFF *bit_buff, */ if (_ma_alloc_buffer(rec_buff_p, rec_buff_size_p, info->rec_len + info->blob_len + - share->base.extra_rec_buff_size, flag)) + share->base.extra_rec_buff_size, + MY_WME | share->malloc_flag)) return BLOCK_FATAL_ERROR; /* not enough memory */ bit_buff->blob_pos= *rec_buff_p + info->rec_len; bit_buff->blob_end= bit_buff->blob_pos + info->blob_len; @@ -1586,7 +1586,6 @@ _ma_mempack_get_block_info(MARIA_HA *maria, uchar *header) { MARIA_SHARE *share= maria->s; - myf flag= MY_WME | (share->temporary ? MY_THREAD_SPECIFIC : 0); header+= read_pack_length((uint) share->pack.version, header, &info->rec_len); @@ -1596,7 +1595,8 @@ _ma_mempack_get_block_info(MARIA_HA *maria, &info->blob_len); /* _ma_alloc_rec_buff sets my_errno on error */ if (_ma_alloc_buffer(rec_buff_p, rec_buff_size_p, - info->blob_len + share->base.extra_rec_buff_size, flag)) + info->blob_len + share->base.extra_rec_buff_size, + MY_WME | share->malloc_flag)) return 0; /* not enough memory */ bit_buff->blob_pos= *rec_buff_p; bit_buff->blob_end= *rec_buff_p + info->blob_len; diff --git a/storage/maria/ma_rt_test.c b/storage/maria/ma_rt_test.c index 3af7d93879e..17145231409 100644 --- a/storage/maria/ma_rt_test.c +++ b/storage/maria/ma_rt_test.c @@ -101,7 +101,7 @@ int main(int argc, char *argv[]) if (maria_init() || (init_pagecache(maria_pagecache, maria_block_size * 16, 0, 0, maria_block_size, 0, MY_WME) == 0) || - ma_control_file_open(TRUE, TRUE, TRUE) || + ma_control_file_open_or_create() || (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) || diff --git a/storage/maria/ma_test1.c b/storage/maria/ma_test1.c index 22f80ca2d9e..a14679d3a60 100644 --- a/storage/maria/ma_test1.c +++ b/storage/maria/ma_test1.c @@ -81,7 +81,7 @@ int main(int argc,char *argv[]) if (maria_init() || (init_pagecache(maria_pagecache, maria_block_size * 16, 0, 0, maria_block_size, 0, MY_WME) == 0) || - ma_control_file_open(TRUE, TRUE, TRUE) || + ma_control_file_open_or_create() || (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) || diff --git a/storage/maria/ma_test2.c b/storage/maria/ma_test2.c index 6628465365f..400e6193695 100644 --- a/storage/maria/ma_test2.c +++ b/storage/maria/ma_test2.c @@ -90,7 +90,7 @@ int main(int argc, char *argv[]) if (maria_init() || (init_pagecache(maria_pagecache, pagecache_size, 0, 0, maria_block_size, 0, MY_WME) == 0) || - ma_control_file_open(TRUE, TRUE, TRUE) || + ma_control_file_open_or_create() || (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) || diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h index 705562eb795..f8398316130 100644 --- a/storage/maria/maria_def.h +++ b/storage/maria/maria_def.h @@ -761,6 +761,11 @@ typedef struct st_maria_share ulong max_pack_length; ulong state_diff_length; uint rec_reflength; /* rec_reflength in use now */ + /* + Extra flag to use for my_malloc(); set to MY_THREAD_SPECIFIC for temporary + tables whose memory allocation should be accounted to the current THD. + */ + uint malloc_flag; uint keypage_header; uint32 ftkeys; /* Number of distinct full-text keys + 1 */ diff --git a/storage/maria/test_ma_backup.c b/storage/maria/test_ma_backup.c index c57ec6ece0d..5cb2b074887 100644 --- a/storage/maria/test_ma_backup.c +++ b/storage/maria/test_ma_backup.c @@ -47,7 +47,7 @@ int main(int argc __attribute__((unused)), char *argv[]) if (maria_init() || (init_pagecache(maria_pagecache, maria_block_size * 2000, 0, 0, maria_block_size, 0, MY_WME) == 0) || - ma_control_file_open(TRUE, TRUE, TRUE) || + ma_control_file_open_or_create() || (init_pagecache(maria_log_pagecache, TRANSLOG_PAGECACHE_SIZE, 0, 0, TRANSLOG_PAGE_SIZE, 0, MY_WME) == 0) || diff --git a/storage/maria/unittest/ma_control_file-t.c b/storage/maria/unittest/ma_control_file-t.c index 859d5514ffa..fdbe86de01b 100644 --- a/storage/maria/unittest/ma_control_file-t.c +++ b/storage/maria/unittest/ma_control_file-t.c @@ -114,7 +114,7 @@ static CONTROL_FILE_ERROR local_ma_control_file_open(void) { CONTROL_FILE_ERROR error; error_handler_hook= my_ignore_message; - error= ma_control_file_open(TRUE, TRUE, TRUE); + error= ma_control_file_open(TRUE, TRUE, TRUE, control_file_open_flags); error_handler_hook= default_error_handler_hook; return error; } diff --git a/storage/maria/unittest/ma_test_loghandler-t.c b/storage/maria/unittest/ma_test_loghandler-t.c index ccda66af755..49184447356 100644 --- a/storage/maria/unittest/ma_test_loghandler-t.c +++ b/storage/maria/unittest/ma_test_loghandler-t.c @@ -197,7 +197,7 @@ int main(int argc __attribute__((unused)), char *argv[]) } #endif - if (ma_control_file_open(TRUE, TRUE, TRUE)) + if (ma_control_file_open(TRUE, TRUE, TRUE, control_file_open_flags)) { fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c b/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c index 21f6b7d7b44..5c4045cc659 100644 --- a/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c +++ b/storage/maria/unittest/ma_test_loghandler_first_lsn-t.c @@ -66,7 +66,7 @@ int main(int argc __attribute__((unused)), char *argv[]) } #endif - if (ma_control_file_open(TRUE, TRUE,TRUE)) + if (ma_control_file_open(TRUE, TRUE,TRUE, control_file_open_flags)) { fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c b/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c index 391d785159a..eedeb9d70d4 100644 --- a/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c +++ b/storage/maria/unittest/ma_test_loghandler_max_lsn-t.c @@ -64,7 +64,7 @@ int main(int argc __attribute__((unused)), char *argv[]) } #endif - if (ma_control_file_open(TRUE, TRUE, TRUE)) + if (ma_control_file_open(TRUE, TRUE, TRUE, control_file_open_flags)) { fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c index e8e114dd155..b0e1be8f0d9 100644 --- a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c +++ b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c @@ -280,7 +280,7 @@ int main(int argc __attribute__((unused)), char *argv[]) bzero(long_tr_id, 6); - if (ma_control_file_open(TRUE, TRUE, TRUE)) + if (ma_control_file_open(TRUE, TRUE, TRUE, control_file_open_flags)) { fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); @@ -443,7 +443,7 @@ int main(int argc __attribute__((unused)), char *argv[]) end_pagecache(&pagecache, 1); ma_control_file_end(); - if (ma_control_file_open(TRUE,TRUE,TRUE)) + if (ma_control_file_open(TRUE,TRUE,TRUE, control_file_open_flags)) { fprintf(stderr, "pass2: Can't init control file (%d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_multithread-t.c b/storage/maria/unittest/ma_test_loghandler_multithread-t.c index be6046abab4..9b7e239b4cd 100644 --- a/storage/maria/unittest/ma_test_loghandler_multithread-t.c +++ b/storage/maria/unittest/ma_test_loghandler_multithread-t.c @@ -331,7 +331,7 @@ int main(int argc __attribute__((unused)), exit(1); } - if (ma_control_file_open(TRUE, TRUE, TRUE)) + if (ma_control_file_open(TRUE, TRUE, TRUE, control_file_open_flags)) { fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_noflush-t.c b/storage/maria/unittest/ma_test_loghandler_noflush-t.c index 46b3a8e71aa..28cac2ee1cf 100644 --- a/storage/maria/unittest/ma_test_loghandler_noflush-t.c +++ b/storage/maria/unittest/ma_test_loghandler_noflush-t.c @@ -65,7 +65,7 @@ int main(int argc __attribute__((unused)), char *argv[]) } #endif - if (ma_control_file_open(TRUE, TRUE, TRUE)) + if (ma_control_file_open(TRUE, TRUE, TRUE, control_file_open_flags)) { fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_nologs-t.c b/storage/maria/unittest/ma_test_loghandler_nologs-t.c index b95d8bee24c..a7c526b2e17 100644 --- a/storage/maria/unittest/ma_test_loghandler_nologs-t.c +++ b/storage/maria/unittest/ma_test_loghandler_nologs-t.c @@ -66,7 +66,7 @@ int main(int argc __attribute__((unused)), char *argv[]) } #endif - if (ma_control_file_open(TRUE, TRUE, TRUE)) + if (ma_control_file_open(TRUE, TRUE, TRUE, control_file_open_flags)) { fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); @@ -139,7 +139,7 @@ int main(int argc __attribute__((unused)), char *argv[]) } } - if (ma_control_file_open(TRUE, TRUE, TRUE)) + if (ma_control_file_open(TRUE, TRUE, TRUE, control_file_open_flags)) { fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_pagecache-t.c b/storage/maria/unittest/ma_test_loghandler_pagecache-t.c index 892a773b475..4ecc8f1f8e7 100644 --- a/storage/maria/unittest/ma_test_loghandler_pagecache-t.c +++ b/storage/maria/unittest/ma_test_loghandler_pagecache-t.c @@ -69,7 +69,7 @@ int main(int argc __attribute__((unused)), char *argv[]) } #endif - if (ma_control_file_open(TRUE, TRUE, TRUE)) + if (ma_control_file_open(TRUE, TRUE, TRUE, control_file_open_flags)) { fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); diff --git a/storage/maria/unittest/ma_test_loghandler_purge-t.c b/storage/maria/unittest/ma_test_loghandler_purge-t.c index 07b50f197de..b26a2f99ae7 100644 --- a/storage/maria/unittest/ma_test_loghandler_purge-t.c +++ b/storage/maria/unittest/ma_test_loghandler_purge-t.c @@ -67,7 +67,7 @@ int main(int argc __attribute__((unused)), char *argv[]) } #endif - if (ma_control_file_open(TRUE, TRUE, TRUE)) + if (ma_control_file_open(TRUE, TRUE, TRUE, control_file_open_flags)) { fprintf(stderr, "Can't init control file (%d)\n", errno); exit(1); diff --git a/storage/mroonga/CMakeLists.txt b/storage/mroonga/CMakeLists.txt index bea0eecc8b7..4553fd432b0 100644 --- a/storage/mroonga/CMakeLists.txt +++ b/storage/mroonga/CMakeLists.txt @@ -57,6 +57,11 @@ if(MRN_BUNDLED) "${PLUGIN_MROONGA}" STREQUAL "NO") return() endif() + if(WITHOUT_DYNAMIC_PLUGINS) + if(NOT (PLUGIN_MROONGA STREQUAL STATIC)) + return() + endif() + endif() endif() set(MRN_BUNDLED_GROONGA_RELATIVE_DIR "vendor/groonga") diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c index f69f1869383..3d95fffacaf 100644 --- a/storage/myisam/ft_boolean_search.c +++ b/storage/myisam/ft_boolean_search.c @@ -287,6 +287,8 @@ static int ftb_parse_query_internal(MYSQL_FTPARSER_PARAM *param, uchar *end= (uchar*) query + len; FT_WORD w; + w.pos= NULL; + w.len= 0; info.prev= ' '; info.quot= 0; while (ft_get_word(cs, start, end, &w, &info)) diff --git a/storage/rocksdb/ha_rocksdb.h b/storage/rocksdb/ha_rocksdb.h index d03c183873e..eeb2ddfe94f 100644 --- a/storage/rocksdb/ha_rocksdb.h +++ b/storage/rocksdb/ha_rocksdb.h @@ -397,7 +397,7 @@ class ha_rocksdb : public my_core::handler { current lookup to be covered. If the bitmap field is null, that means this index does not cover the current lookup for any record. */ - MY_BITMAP m_lookup_bitmap = {nullptr, nullptr, 0, 0}; + MY_BITMAP m_lookup_bitmap = {nullptr, nullptr, 0, 0, 0}; int alloc_key_buffers(const TABLE *const table_arg, const Rdb_tbl_def *const tbl_def_arg, diff --git a/storage/rocksdb/mysql-test/rocksdb/r/group_min_max.result b/storage/rocksdb/mysql-test/rocksdb/r/group_min_max.result index 5a1350fe0ff..dbf4d7d362e 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/group_min_max.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/group_min_max.result @@ -3517,7 +3517,7 @@ SHOW SESSION STATUS LIKE 'Handler_read%'; Variable_name Value Handler_read_first 0 Handler_read_key 3 -Handler_read_last 1 +Handler_read_last 0 Handler_read_next 0 Handler_read_prev 0 Handler_read_retry 0 diff --git a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result index 68240d98533..4b8e3802c56 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/innodb_i_s_tables_disabled.result @@ -68,7 +68,6 @@ buffer_flush_n_to_flush_by_age buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NUL buffer_flush_adaptive_avg_time buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Avg time (ms) spent for adaptive flushing recently. buffer_flush_adaptive_avg_pass buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of adaptive flushes passed during the recent Avg period. buffer_LRU_get_free_loops buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Total loops in LRU get free. -buffer_LRU_get_free_waits buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Total sleep waits in LRU get free. buffer_flush_avg_page_rate buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Average number of pages at which flushing is happening buffer_flush_lsn_avg_rate buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Average redo generation rate buffer_flush_pct_for_dirty buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Percent of IO capacity used to avoid max dirty page limit @@ -88,7 +87,6 @@ buffer_LRU_batch_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NUL buffer_LRU_batch_scanned_per_call buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 set_member Pages scanned per LRU batch call buffer_LRU_batch_flush_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Total pages flushed as part of LRU batches buffer_LRU_batch_evict_total_pages buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 status_counter Total pages evicted as part of LRU batches -buffer_LRU_single_flush_failure_count Buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of times attempt to flush a single page from LRU failed buffer_LRU_get_free_search Buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 counter Number of searches performed for a clean page buffer_LRU_search_scanned buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 set_owner Total pages scanned as part of LRU search buffer_LRU_search_num_scan buffer 0 NULL NULL NULL 0 NULL NULL NULL NULL NULL NULL NULL 0 set_member Number of times LRU search is performed diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc index 046567724d9..c6e05888b4a 100644 --- a/storage/spider/ha_spider.cc +++ b/storage/spider/ha_spider.cc @@ -268,19 +268,19 @@ int ha_spider::open( spider_bulk_malloc(spider_current_trx, 16, MYF(MY_WME | MY_ZEROFILL), &wide_handler, sizeof(SPIDER_WIDE_HANDLER), &searched_bitmap, - (uint) sizeof(uchar) * no_bytes_in_map(table->read_set), + (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), &ft_discard_bitmap, - (uint) sizeof(uchar) * no_bytes_in_map(table->read_set), + (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), &position_bitmap, - (uint) sizeof(uchar) * no_bytes_in_map(table->read_set), + (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), &idx_read_bitmap, - (uint) sizeof(uchar) * no_bytes_in_map(table->read_set), + (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), &idx_write_bitmap, - (uint) sizeof(uchar) * no_bytes_in_map(table->read_set), + (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), &rnd_read_bitmap, - (uint) sizeof(uchar) * no_bytes_in_map(table->read_set), + (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), &rnd_write_bitmap, - (uint) sizeof(uchar) * no_bytes_in_map(table->read_set), + (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), &partition_handler, (uint) sizeof(SPIDER_PARTITION_HANDLER), NullS) @@ -304,9 +304,9 @@ int ha_spider::open( wide_handler->top_share = table->s; owner->wide_handler_owner = TRUE; memset(wide_handler->ft_discard_bitmap, 0xFF, - no_bytes_in_map(table->read_set)); + my_bitmap_buffer_size(table->read_set)); memset(wide_handler->searched_bitmap, 0, - no_bytes_in_map(table->read_set)); + my_bitmap_buffer_size(table->read_set)); wide_handler_alloc = TRUE; if (!share && !spider_get_share(name, table, thd, this, &error_num)) @@ -984,9 +984,9 @@ int ha_spider::reset() if (!is_clone) { memset(wide_handler->ft_discard_bitmap, 0xFF, - no_bytes_in_map(table->read_set)); + my_bitmap_buffer_size(table->read_set)); memset(wide_handler->searched_bitmap, 0, - no_bytes_in_map(table->read_set)); + my_bitmap_buffer_size(table->read_set)); } while (wide_handler->condition) { @@ -1211,7 +1211,7 @@ int ha_spider::index_init( bitmap_set_all(table->read_set); if (is_clone) memset(wide_handler->searched_bitmap, 0xFF, - no_bytes_in_map(table->read_set)); + my_bitmap_buffer_size(table->read_set)); } } @@ -3107,7 +3107,7 @@ ha_rows ha_spider::multi_range_read_info_const( bitmap_set_all(table->read_set); if (is_clone) memset(wide_handler->searched_bitmap, 0xFF, - no_bytes_in_map(table->read_set)); + my_bitmap_buffer_size(table->read_set)); } } @@ -3161,7 +3161,7 @@ ha_rows ha_spider::multi_range_read_info( bitmap_set_all(table->read_set); if (is_clone) memset(wide_handler->searched_bitmap, 0xFF, - no_bytes_in_map(table->read_set)); + my_bitmap_buffer_size(table->read_set)); } } @@ -5420,7 +5420,7 @@ int ha_spider::rnd_init( bitmap_set_all(table->read_set); if (is_clone) memset(wide_handler->searched_bitmap, 0xFF, - no_bytes_in_map(table->read_set)); + my_bitmap_buffer_size(table->read_set)); } set_select_column_mode(); diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_33434.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_33434.result new file mode 100644 index 00000000000..2cbcff38752 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_33434.result @@ -0,0 +1,12 @@ +# +# MDEV-33434 MDEV-33434 UBSAN null pointer passed as argument 2, which is declared to never be null in spider_udf_direct_sql_create_conn +# +INSTALL SONAME 'ha_spider'; +SET character_set_connection=ucs2; +SELECT SPIDER_DIRECT_SQL('SELECT SLEEP(1)', '', 'srv "dummy", port "3307"'); +ERROR HY000: Unable to connect to foreign data source: localhost +Warnings: +Warning 1620 Plugin is busy and will be uninstalled on shutdown +# +# end of test mdev_33434 +# diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_33494.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_33494.result new file mode 100644 index 00000000000..3db28c0f08e --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_33494.result @@ -0,0 +1,4 @@ +set @old_sql_mode=@@global.sql_mode; +set global sql_mode=(SELECT CONCAT (@@sql_mode,',no_zero_date')); +install soname 'ha_spider'; +set global sql_mode=@old_sql_mode; diff --git a/storage/spider/mysql-test/spider/bugfix/r/slave_transaction_retry_errors_5digit.result b/storage/spider/mysql-test/spider/bugfix/r/slave_transaction_retry_errors_5digit.result index f2cab6b0a95..c8b1de982eb 100644 --- a/storage/spider/mysql-test/spider/bugfix/r/slave_transaction_retry_errors_5digit.result +++ b/storage/spider/mysql-test/spider/bugfix/r/slave_transaction_retry_errors_5digit.result @@ -9,7 +9,7 @@ for slave1_1 connection slave1_1; SHOW VARIABLES LIKE 'slave_transaction_retry_errors'; Variable_name Value -slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1429,2013,12701,10000,20000,30000 +slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1020,1429,2013,12701,10000,20000,30000 connection slave1_1; for slave1_1 for master_1 diff --git a/storage/spider/mysql-test/spider/bugfix/r/udf_mysql_func_early_init_file.result b/storage/spider/mysql-test/spider/bugfix/r/udf_mysql_func_early_init_file.result deleted file mode 120000 index 045ddc4372c..00000000000 --- a/storage/spider/mysql-test/spider/bugfix/r/udf_mysql_func_early_init_file.result +++ /dev/null @@ -1 +0,0 @@ -udf_mysql_func_early.result \ No newline at end of file diff --git a/storage/spider/mysql-test/spider/bugfix/r/udf_mysql_func_early_init_file.result b/storage/spider/mysql-test/spider/bugfix/r/udf_mysql_func_early_init_file.result new file mode 100644 index 00000000000..b84f60a67fb --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/udf_mysql_func_early_init_file.result @@ -0,0 +1,43 @@ +# +# Test that udf created by inserting into mysql_func works as expected +# +CREATE SERVER s_1 FOREIGN DATA WRAPPER mysql OPTIONS ( +HOST 'localhost', +DATABASE 'auto_test_local', +USER 'root', +PASSWORD '', +SOCKET '$MASTER_1_MYSOCK' + ); +CREATE SERVER s_2_1 FOREIGN DATA WRAPPER mysql OPTIONS ( +HOST 'localhost', +DATABASE 'auto_test_remote', +USER 'root', +PASSWORD '', +SOCKET '$CHILD2_1_MYSOCK' + ); +connect master_1, localhost, root, , , $MASTER_1_MYPORT, $MASTER_1_MYSOCK; +connect child2_1, localhost, root, , , $CHILD2_1_MYPORT, $CHILD2_1_MYSOCK; +connection child2_1; +CREATE DATABASE auto_test_remote; +USE auto_test_remote; +CREATE TABLE tbl_a ( +a INT +) ENGINE=InnoDB DEFAULT CHARSET=utf8; +insert into tbl_a values (42); +connection master_1; +CREATE DATABASE auto_test_local; +USE auto_test_local; +CREATE TABLE tbl_a ( +a INT +) ENGINE=Spider DEFAULT CHARSET=utf8 COMMENT='table "tbl_a", srv "s_2_1"'; +create temporary table results (a int); +SELECT SPIDER_DIRECT_SQL('select * from tbl_a', 'results', 'srv "s_2_1", database "auto_test_remote"'); +SPIDER_DIRECT_SQL('select * from tbl_a', 'results', 'srv "s_2_1", database "auto_test_remote"') +1 +select * from results; +a +42 +connection master_1; +DROP DATABASE IF EXISTS auto_test_local; +connection child2_1; +DROP DATABASE IF EXISTS auto_test_remote; diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_33434.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_33434.test new file mode 100644 index 00000000000..dd9f882f42e --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_33434.test @@ -0,0 +1,15 @@ +--echo # +--echo # MDEV-33434 MDEV-33434 UBSAN null pointer passed as argument 2, which is declared to never be null in spider_udf_direct_sql_create_conn +--echo # + +INSTALL SONAME 'ha_spider'; +SET character_set_connection=ucs2; +--error ER_CONNECT_TO_FOREIGN_DATA_SOURCE +SELECT SPIDER_DIRECT_SQL('SELECT SLEEP(1)', '', 'srv "dummy", port "3307"'); +--disable_query_log +--source ../../include/clean_up_spider.inc +--enable_query_log + +--echo # +--echo # end of test mdev_33434 +--echo # diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_33494.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_33494.test new file mode 100644 index 00000000000..30beca77f35 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_33494.test @@ -0,0 +1,11 @@ +# This test tests spider init with global no_zero_date sql mode +set @old_sql_mode=@@global.sql_mode; +set global sql_mode=(SELECT CONCAT (@@sql_mode,',no_zero_date')); +install soname 'ha_spider'; +set global sql_mode=@old_sql_mode; + +--disable_query_log +--disable_result_log +--source ../../include/clean_up_spider.inc +--enable_result_log +--enable_query_log diff --git a/storage/spider/mysql-test/spider/feature/r/slave_transaction_retry_errors.result b/storage/spider/mysql-test/spider/feature/r/slave_transaction_retry_errors.result index 0a147c0356a..1de1ba04528 100644 --- a/storage/spider/mysql-test/spider/feature/r/slave_transaction_retry_errors.result +++ b/storage/spider/mysql-test/spider/feature/r/slave_transaction_retry_errors.result @@ -9,7 +9,7 @@ for slave1_1 connection slave1_1; SHOW VARIABLES LIKE 'slave_transaction_retry_errors'; Variable_name Value -slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1429,2013,12701 +slave_transaction_retry_errors 1158,1159,1160,1161,1205,1213,1020,1429,2013,12701 connection slave1_1; for slave1_1 for master_1 diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index c786bd42746..bca71de52b1 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -343,7 +343,6 @@ int spider_db_conn_queue_action( ) || ( conn->loop_check_queue.records && - conn->db_conn->set_loop_check_in_bulk_sql() && (error_num = spider_dbton[conn->dbton_id].db_util-> append_loop_check(&sql_str, conn)) ) || @@ -442,13 +441,6 @@ int spider_db_conn_queue_action( ) { DBUG_RETURN(error_num); } - if ( - conn->loop_check_queue.records && - !conn->db_conn->set_loop_check_in_bulk_sql() && - (error_num = conn->db_conn->set_loop_check((int *) conn->need_mon)) - ) { - DBUG_RETURN(error_num); - } if ( conn->queued_trx_isolation && !conn->queued_semi_trx_isolation && diff --git a/storage/spider/spd_db_include.cc b/storage/spider/spd_db_include.cc index a4d7d4b49a0..514470a2c80 100644 --- a/storage/spider/spd_db_include.cc +++ b/storage/spider/spd_db_include.cc @@ -64,22 +64,6 @@ spider_db_conn::spider_db_conn( DBUG_VOID_RETURN; } -bool spider_db_conn::set_loop_check_in_bulk_sql() -{ - DBUG_ENTER("spider_db_conn::set_loop_check_in_bulk_sql"); - DBUG_PRINT("info",("spider this=%p", this)); - DBUG_RETURN(FALSE); -} - -int spider_db_conn::set_loop_check( - int *need_mon -) { - DBUG_ENTER("spider_db_conn::set_loop_check"); - DBUG_PRINT("info",("spider this=%p", this)); - /* nothing to do */ - DBUG_RETURN(0); -} - int spider_db_conn::fin_loop_check() { st_spider_conn_loop_check *lcptr; diff --git a/storage/spider/spd_db_include.h b/storage/spider/spd_db_include.h index 8c1c06f994f..2bf492abbab 100644 --- a/storage/spider/spd_db_include.h +++ b/storage/spider/spd_db_include.h @@ -1031,10 +1031,6 @@ public: Time_zone *time_zone, int *need_mon ) = 0; - virtual bool set_loop_check_in_bulk_sql(); - virtual int set_loop_check( - int *need_mon - ); virtual int fin_loop_check(); virtual int show_master_status( SPIDER_TRX *trx, diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index 0c7f69f7b5c..29c8d525ecf 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -3268,110 +3268,6 @@ int spider_db_mbase::set_time_zone( DBUG_RETURN(0); } -bool spider_db_mbase::set_loop_check_in_bulk_sql() -{ - DBUG_ENTER("spider_db_mbase::set_loop_check_in_bulk_sql"); - DBUG_PRINT("info",("spider this=%p", this)); - DBUG_RETURN(TRUE); -} - -int spider_db_mbase::set_loop_check( - int *need_mon -) { - SPIDER_CONN_LOOP_CHECK *lcptr; - char sql_buf[MAX_FIELD_WIDTH]; - spider_string sql_str(sql_buf, sizeof(sql_buf), &my_charset_bin); - DBUG_ENTER("spider_db_mbase::set_loop_check"); - DBUG_PRINT("info",("spider this=%p", this)); - sql_str.init_calc_mem(SPD_MID_DB_MBASE_SET_LOOP_CHECK_1); - while ((lcptr = (SPIDER_CONN_LOOP_CHECK *) my_hash_element( - &conn->loop_check_queue, 0))) - { - sql_str.length(0); - if (sql_str.reserve(SPIDER_SQL_SET_USER_VAL_LEN + - SPIDER_SQL_LOP_CHK_PRM_PRF_LEN + lcptr->to_name.length + - SPIDER_SQL_NAME_QUOTE_LEN + SPIDER_SQL_EQUAL_LEN + - SPIDER_SQL_VALUE_QUOTE_LEN + - lcptr->merged_value.length + SPIDER_SQL_VALUE_QUOTE_LEN)) - { - DBUG_RETURN(HA_ERR_OUT_OF_MEM); - } - sql_str.q_append(SPIDER_SQL_SET_USER_VAL_STR, SPIDER_SQL_SET_USER_VAL_LEN); - sql_str.q_append(SPIDER_SQL_LOP_CHK_PRM_PRF_STR, - SPIDER_SQL_LOP_CHK_PRM_PRF_LEN); - sql_str.q_append(lcptr->to_name.str, lcptr->to_name.length); - sql_str.q_append(SPIDER_SQL_NAME_QUOTE_STR, SPIDER_SQL_NAME_QUOTE_LEN); - sql_str.q_append(SPIDER_SQL_EQUAL_STR, SPIDER_SQL_EQUAL_LEN); - sql_str.q_append(SPIDER_SQL_VALUE_QUOTE_STR, SPIDER_SQL_VALUE_QUOTE_LEN); - sql_str.q_append(lcptr->merged_value.str, lcptr->merged_value.length); - sql_str.q_append(SPIDER_SQL_VALUE_QUOTE_STR, SPIDER_SQL_VALUE_QUOTE_LEN); - - pthread_mutex_assert_not_owner(&conn->mta_conn_mutex); - pthread_mutex_lock(&conn->mta_conn_mutex); - SPIDER_SET_FILE_POS(&conn->mta_conn_mutex_file_pos); - conn->need_mon = need_mon; - DBUG_ASSERT(!conn->mta_conn_mutex_lock_already); - DBUG_ASSERT(!conn->mta_conn_mutex_unlock_later); - conn->mta_conn_mutex_lock_already = TRUE; - conn->mta_conn_mutex_unlock_later = TRUE; - if (spider_db_query( - conn, - sql_str.ptr(), - sql_str.length(), - -1, - need_mon) - ) { - DBUG_ASSERT(conn->mta_conn_mutex_lock_already); - DBUG_ASSERT(conn->mta_conn_mutex_unlock_later); - conn->mta_conn_mutex_lock_already = FALSE; - conn->mta_conn_mutex_unlock_later = FALSE; - DBUG_RETURN(spider_db_errorno(conn)); - } - DBUG_ASSERT(conn->mta_conn_mutex_lock_already); - DBUG_ASSERT(conn->mta_conn_mutex_unlock_later); - conn->mta_conn_mutex_lock_already = FALSE; - conn->mta_conn_mutex_unlock_later = FALSE; - SPIDER_CLEAR_FILE_POS(&conn->mta_conn_mutex_file_pos); - pthread_mutex_unlock(&conn->mta_conn_mutex); - - my_hash_delete(&conn->loop_check_queue, (uchar*) lcptr); - } - DBUG_RETURN(0); -} - -int spider_db_mbase::fin_loop_check() -{ - st_spider_conn_loop_check *lcptr; - DBUG_ENTER("spider_db_mbase::fin_loop_check"); - DBUG_PRINT("info",("spider this=%p", this)); - if (conn->loop_check_queue.records) - { - uint l = 0; - while ((lcptr = (SPIDER_CONN_LOOP_CHECK *) my_hash_element( - &conn->loop_check_queue, l))) - { - lcptr->flag = 0; - ++l; - } - my_hash_reset(&conn->loop_check_queue); - } - lcptr = conn->loop_check_ignored_first; - while (lcptr) - { - lcptr->flag = 0; - lcptr = lcptr->next; - } - conn->loop_check_ignored_first = NULL; - lcptr = conn->loop_check_meraged_first; - while (lcptr) - { - lcptr->flag = 0; - lcptr = lcptr->next; - } - conn->loop_check_meraged_first = NULL; - DBUG_RETURN(0); -} - int spider_db_mbase::exec_simple_sql_with_result( SPIDER_TRX *trx, SPIDER_SHARE *share, @@ -8292,7 +8188,7 @@ int spider_mbase_handler::init() &link_for_hash, sizeof(SPIDER_LINK_FOR_HASH) * share->link_count, &minimum_select_bitmap, - table ? sizeof(uchar) * no_bytes_in_map(table->read_set) : 0, + table ? sizeof(uchar) * my_bitmap_buffer_size(table->read_set) : 0, NullS)) ) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -14439,7 +14335,7 @@ void spider_mbase_handler::minimum_select_bitmap_create() Field **field_p; DBUG_ENTER("spider_mbase_handler::minimum_select_bitmap_create"); DBUG_PRINT("info",("spider this=%p", this)); - memset(minimum_select_bitmap, 0, no_bytes_in_map(table->read_set)); + memset(minimum_select_bitmap, 0, my_bitmap_buffer_size(table->read_set)); if ( spider->use_index_merge || spider->is_clone @@ -14450,7 +14346,7 @@ void spider_mbase_handler::minimum_select_bitmap_create() table_share->primary_key == MAX_KEY ) { /* need all columns */ - memset(minimum_select_bitmap, 0xFF, no_bytes_in_map(table->read_set)); + memset(minimum_select_bitmap, 0xFF, my_bitmap_buffer_size(table->read_set)); DBUG_VOID_RETURN; } else { /* need primary key columns */ diff --git a/storage/spider/spd_db_mysql.h b/storage/spider/spd_db_mysql.h index b14e24629c4..89c6dd813e3 100644 --- a/storage/spider/spd_db_mysql.h +++ b/storage/spider/spd_db_mysql.h @@ -523,11 +523,6 @@ public: Time_zone *time_zone, int *need_mon ); - bool set_loop_check_in_bulk_sql(); - int set_loop_check( - int *need_mon - ); - int fin_loop_check(); int exec_simple_sql_with_result( SPIDER_TRX *trx, SPIDER_SHARE *share, diff --git a/storage/spider/spd_direct_sql.cc b/storage/spider/spd_direct_sql.cc index 3dd8e4293c1..2f593ddc2e2 100644 --- a/storage/spider/spd_direct_sql.cc +++ b/storage/spider/spd_direct_sql.cc @@ -364,6 +364,23 @@ int spider_udf_direct_sql_create_conn_key( DBUG_RETURN(0); } +static inline void spider_maybe_memcpy_string( + char **dest, + char *src, + char *tmp, + uint *dest_len, + uint src_len) +{ + *dest_len= src_len; + if (src_len) + { + *dest= tmp; + memcpy(*dest, src, src_len); + } else + *dest= NULL; +} + + SPIDER_CONN *spider_udf_direct_sql_create_conn( const SPIDER_DIRECT_SQL *direct_sql, int *error_num @@ -433,105 +450,49 @@ SPIDER_CONN *spider_udf_direct_sql_create_conn( conn->tgt_host = tmp_host; memcpy(conn->tgt_host, direct_sql->tgt_host, direct_sql->tgt_host_length); conn->tgt_port = direct_sql->tgt_port; - conn->tgt_socket_length = direct_sql->tgt_socket_length; - conn->tgt_socket = tmp_socket; - memcpy(conn->tgt_socket, direct_sql->tgt_socket, - direct_sql->tgt_socket_length); + spider_maybe_memcpy_string( + &conn->tgt_socket, direct_sql->tgt_socket, tmp_socket, + &conn->tgt_socket_length, direct_sql->tgt_socket_length); if (!tables_on_different_db_are_joinable) - { - conn->tgt_db_length = direct_sql->tgt_default_db_name_length; - conn->tgt_db = tmp_db; - memcpy(conn->tgt_db, direct_sql->tgt_default_db_name, - direct_sql->tgt_default_db_name_length); - } - conn->tgt_username_length = direct_sql->tgt_username_length; - conn->tgt_username = tmp_username; - memcpy(conn->tgt_username, direct_sql->tgt_username, - direct_sql->tgt_username_length); - conn->tgt_password_length = direct_sql->tgt_password_length; - conn->tgt_password = tmp_password; - memcpy(conn->tgt_password, direct_sql->tgt_password, - direct_sql->tgt_password_length); - conn->tgt_ssl_ca_length = direct_sql->tgt_ssl_ca_length; - if (conn->tgt_ssl_ca_length) - { - conn->tgt_ssl_ca = tmp_ssl_ca; - memcpy(conn->tgt_ssl_ca, direct_sql->tgt_ssl_ca, - direct_sql->tgt_ssl_ca_length); - } else - conn->tgt_ssl_ca = NULL; - conn->tgt_ssl_capath_length = direct_sql->tgt_ssl_capath_length; - if (conn->tgt_ssl_capath_length) - { - conn->tgt_ssl_capath = tmp_ssl_capath; - memcpy(conn->tgt_ssl_capath, direct_sql->tgt_ssl_capath, - direct_sql->tgt_ssl_capath_length); - } else - conn->tgt_ssl_capath = NULL; - conn->tgt_ssl_cert_length = direct_sql->tgt_ssl_cert_length; - if (conn->tgt_ssl_cert_length) - { - conn->tgt_ssl_cert = tmp_ssl_cert; - memcpy(conn->tgt_ssl_cert, direct_sql->tgt_ssl_cert, - direct_sql->tgt_ssl_cert_length); - } else - conn->tgt_ssl_cert = NULL; - conn->tgt_ssl_cipher_length = direct_sql->tgt_ssl_cipher_length; - if (conn->tgt_ssl_cipher_length) - { - conn->tgt_ssl_cipher = tmp_ssl_cipher; - memcpy(conn->tgt_ssl_cipher, direct_sql->tgt_ssl_cipher, - direct_sql->tgt_ssl_cipher_length); - } else - conn->tgt_ssl_cipher = NULL; - conn->tgt_ssl_key_length = direct_sql->tgt_ssl_key_length; - if (conn->tgt_ssl_key_length) - { - conn->tgt_ssl_key = tmp_ssl_key; - memcpy(conn->tgt_ssl_key, direct_sql->tgt_ssl_key, - direct_sql->tgt_ssl_key_length); - } else - conn->tgt_ssl_key = NULL; - conn->tgt_default_file_length = direct_sql->tgt_default_file_length; - if (conn->tgt_default_file_length) - { - conn->tgt_default_file = tmp_default_file; - memcpy(conn->tgt_default_file, direct_sql->tgt_default_file, - direct_sql->tgt_default_file_length); - } else - conn->tgt_default_file = NULL; - conn->tgt_default_group_length = direct_sql->tgt_default_group_length; - if (conn->tgt_default_group_length) - { - conn->tgt_default_group = tmp_default_group; - memcpy(conn->tgt_default_group, direct_sql->tgt_default_group, - direct_sql->tgt_default_group_length); - } else - conn->tgt_default_group = NULL; - conn->tgt_dsn_length = direct_sql->tgt_dsn_length; - if (conn->tgt_dsn_length) - { - conn->tgt_dsn = tmp_dsn; - memcpy(conn->tgt_dsn, direct_sql->tgt_dsn, - direct_sql->tgt_dsn_length); - } else - conn->tgt_dsn = NULL; - conn->tgt_filedsn_length = direct_sql->tgt_filedsn_length; - if (conn->tgt_filedsn_length) - { - conn->tgt_filedsn = tmp_filedsn; - memcpy(conn->tgt_filedsn, direct_sql->tgt_filedsn, - direct_sql->tgt_filedsn_length); - } else - conn->tgt_filedsn = NULL; - conn->tgt_driver_length = direct_sql->tgt_driver_length; - if (conn->tgt_driver_length) - { - conn->tgt_driver = tmp_driver; - memcpy(conn->tgt_driver, direct_sql->tgt_driver, - direct_sql->tgt_driver_length); - } else - conn->tgt_driver = NULL; + spider_maybe_memcpy_string( + &conn->tgt_db, direct_sql->tgt_default_db_name, tmp_db, + &conn->tgt_db_length, direct_sql->tgt_default_db_name_length); + spider_maybe_memcpy_string( + &conn->tgt_username, direct_sql->tgt_username, tmp_username, + &conn->tgt_username_length, direct_sql->tgt_username_length); + spider_maybe_memcpy_string( + &conn->tgt_password, direct_sql->tgt_password, tmp_password, + &conn->tgt_password_length, direct_sql->tgt_password_length); + spider_maybe_memcpy_string( + &conn->tgt_ssl_ca, direct_sql->tgt_ssl_ca, tmp_ssl_ca, + &conn->tgt_ssl_ca_length, direct_sql->tgt_ssl_ca_length); + spider_maybe_memcpy_string( + &conn->tgt_ssl_capath, direct_sql->tgt_ssl_capath, tmp_ssl_capath, + &conn->tgt_ssl_capath_length, direct_sql->tgt_ssl_capath_length); + spider_maybe_memcpy_string( + &conn->tgt_ssl_cert, direct_sql->tgt_ssl_cert, tmp_ssl_cert, + &conn->tgt_ssl_cert_length, direct_sql->tgt_ssl_cert_length); + spider_maybe_memcpy_string( + &conn->tgt_ssl_cipher, direct_sql->tgt_ssl_cipher, tmp_ssl_cipher, + &conn->tgt_ssl_cipher_length, direct_sql->tgt_ssl_cipher_length); + spider_maybe_memcpy_string( + &conn->tgt_ssl_key, direct_sql->tgt_ssl_key, tmp_ssl_key, + &conn->tgt_ssl_key_length, direct_sql->tgt_ssl_key_length); + spider_maybe_memcpy_string( + &conn->tgt_default_file, direct_sql->tgt_default_file, tmp_default_file, + &conn->tgt_default_file_length, direct_sql->tgt_default_file_length); + spider_maybe_memcpy_string( + &conn->tgt_default_group, direct_sql->tgt_default_group, tmp_default_group, + &conn->tgt_default_group_length, direct_sql->tgt_default_group_length); + spider_maybe_memcpy_string( + &conn->tgt_dsn, direct_sql->tgt_dsn, tmp_dsn, + &conn->tgt_dsn_length, direct_sql->tgt_dsn_length); + spider_maybe_memcpy_string( + &conn->tgt_filedsn, direct_sql->tgt_filedsn, tmp_filedsn, + &conn->tgt_filedsn_length, direct_sql->tgt_filedsn_length); + spider_maybe_memcpy_string( + &conn->tgt_driver, direct_sql->tgt_driver, tmp_driver, + &conn->tgt_driver_length, direct_sql->tgt_driver_length); conn->tgt_ssl_vsc = direct_sql->tgt_ssl_vsc; conn->dbton_id = direct_sql->dbton_id; conn->conn_need_mon = need_mon; diff --git a/storage/spider/spd_include.h b/storage/spider/spd_include.h index b46a593f391..4adf9a911f0 100644 --- a/storage/spider/spd_include.h +++ b/storage/spider/spd_include.h @@ -253,7 +253,6 @@ enum spider_malloc_id { SPD_MID_DB_MBASE_RESULT_FETCH_ROW_FROM_TMP_TABLE_3, SPD_MID_DB_MBASE_ROW_APPEND_ESCAPED_TO_STR_1, SPD_MID_DB_MBASE_ROW_CLONE_1, - SPD_MID_DB_MBASE_SET_LOOP_CHECK_1, SPD_MID_DB_MBASE_SET_SQL_MODE_1, SPD_MID_DB_MBASE_SET_TIME_ZONE_1, SPD_MID_DB_MBASE_SET_WAIT_TIMEOUT_1, diff --git a/storage/spider/spd_init_query.h b/storage/spider/spd_init_query.h index 2400046c261..0e42e228481 100644 --- a/storage/spider/spd_init_query.h +++ b/storage/spider/spd_init_query.h @@ -21,7 +21,7 @@ static LEX_STRING spider_init_queries[] = { {C_STRING_WITH_LEN( - "SET @@SQL_MODE = REPLACE(@@SQL_MODE, 'ORACLE', '');" + "SET @@SQL_MODE = REGEXP_REPLACE(@@SQL_MODE, '(ORACLE|NO_ZERO_DATE)', '');" )}, {C_STRING_WITH_LEN( "SET @@OLD_MODE = CONCAT(@@OLD_MODE, ',UTF8_IS_UTF8MB3');" diff --git a/support-files/mariadb.service.in b/support-files/mariadb.service.in index 1af0106e929..79c8b9a8cca 100644 --- a/support-files/mariadb.service.in +++ b/support-files/mariadb.service.in @@ -51,7 +51,7 @@ Group=mysql # CAP_DAC_OVERRIDE To allow auth_pam_tool (which is SUID root) to read /etc/shadow when it's chmod 0 # does nothing for non-root, not needed if /etc/shadow is u+r # CAP_AUDIT_WRITE auth_pam_tool needs it on Debian for whatever reason -CapabilityBoundingSet=CAP_IPC_LOCK CAP_DAC_OVERRIDE CAP_AUDIT_WRITE +AmbientCapabilities=CAP_IPC_LOCK CAP_DAC_OVERRIDE CAP_AUDIT_WRITE # PrivateDevices=true implies NoNewPrivileges=true and # SUID auth_pam_tool suddenly doesn't do setuid anymore diff --git a/support-files/mariadb@.service.in b/support-files/mariadb@.service.in index 8cb3ab2a4d7..31f1586f1bf 100644 --- a/support-files/mariadb@.service.in +++ b/support-files/mariadb@.service.in @@ -181,7 +181,7 @@ PrivateNetwork=false # CAP_DAC_OVERRIDE To allow auth_pam_tool (which is SUID root) to read /etc/shadow when it's chmod 0 # does nothing for non-root, not needed if /etc/shadow is u+r # CAP_AUDIT_WRITE auth_pam_tool needs it on Debian for whatever reason -CapabilityBoundingSet=CAP_IPC_LOCK CAP_DAC_OVERRIDE CAP_AUDIT_WRITE +AmbientCapabilities=CAP_IPC_LOCK CAP_DAC_OVERRIDE CAP_AUDIT_WRITE # PrivateDevices=true implies NoNewPrivileges=true and # SUID auth_pam_tool suddenly doesn't do setuid anymore diff --git a/support-files/mini-benchmark.sh b/support-files/mini-benchmark.sh index 9b7cb6dc698..a7bf3591a39 100755 --- a/support-files/mini-benchmark.sh +++ b/support-files/mini-benchmark.sh @@ -1,6 +1,6 @@ #!/bin/bash # Abort on errors -set -e +set -ex display_help() { echo "Usage: $(basename "$0") [-h] [--perf] [--perf-flamegraph]" @@ -121,6 +121,12 @@ then exit 1 fi +if [ "$PERF" == true ] && [ "$PERF_RECORD" == true ] +then + echo "ERROR: Cannot select both --perf and --perf-flamegraph options simultaneously. Please choose one or the other." + exit 1 +fi + if [ "$PERF" == true ] || [ "$PERF_RECORD" == true ] then if [ ! -e /usr/bin/perf ] @@ -158,28 +164,26 @@ then # shellcheck disable=SC2046 debuginfo-install -y mariadb-server $(cat mariadbd-dependencies.txt) - perf record echo "testing perf" > /dev/null 2>&1 - if [ $? -ne 0 ] + if ! (perf record echo "testing perf") > /dev/null 2>&1 then echo "perf does not have permission to run on this system. Skipping." - PERF="" + PERF_COMMAND="" else echo "Using 'perf' to record performance counters in perf.data files" - PERF="perf record -g --freq=99 --output=perf.data --timestamp-filename --pid=$MARIADB_SERVER_PID --" + PERF_COMMAND="perf record -g --freq=99 --output=perf.data --timestamp-filename --pid=$MARIADB_SERVER_PID --" fi -elif [ -e /usr/bin/perf ] +elif [ "$PERF" == true ] then # If flamegraphs were not requested, log normal perf counters if possible - perf stat echo "testing perf" > /dev/null 2>&1 - if [ $? -ne 0 ] + if ! (perf stat echo "testing perf") > /dev/null 2>&1 then echo "perf does not have permission to run on this system. Skipping." - PERF="" + PERF_COMMAND="" else echo "Using 'perf' to log basic performance counters for benchmark" - PERF="perf stat -p $MARIADB_SERVER_PID --" + PERF_COMMAND="perf stat -p $MARIADB_SERVER_PID --" fi fi @@ -222,7 +226,7 @@ do # Prepend command with perf if defined # Output stderr to stdout as perf outputs everything in stderr # shellcheck disable=SC2086 - $PERF $TASKSET_SYSBENCH sysbench "$WORKLOAD" run --threads=$t --time=$DURATION --report-interval=10 2>&1 | tee sysbench-run-$t.log + $PERF_COMMAND $TASKSET_SYSBENCH sysbench "$WORKLOAD" run --threads=$t --time=$DURATION --report-interval=10 2>&1 | tee sysbench-run-$t.log done sysbench "$WORKLOAD" cleanup --tables=20 | tee sysbench-cleanup.log diff --git a/support-files/policy/apparmor/usr.sbin.mysqld b/support-files/policy/apparmor/usr.sbin.mysqld index c60ecd28531..732f4b3a97a 100644 --- a/support-files/policy/apparmor/usr.sbin.mysqld +++ b/support-files/policy/apparmor/usr.sbin.mysqld @@ -14,6 +14,7 @@ capability chown, capability dac_override, + capability ipc_lock, capability setgid, capability setuid, capability sys_rawio, diff --git a/support-files/policy/selinux/mariadb-server.te b/support-files/policy/selinux/mariadb-server.te index 89846063506..ba53c97d4a8 100644 --- a/support-files/policy/selinux/mariadb-server.te +++ b/support-files/policy/selinux/mariadb-server.te @@ -25,7 +25,7 @@ require { class lnk_file read; class process { getattr signull }; class unix_stream_socket connectto; - class capability { sys_resource sys_nice }; + class capability { ipc_lock sys_resource sys_nice }; class tcp_socket { name_bind name_connect }; class file { execute setattr read create getattr execute_no_trans write ioctl open append unlink }; class sock_file { create unlink getattr }; @@ -87,6 +87,8 @@ allow mysqld_t bin_t:file { getattr read execute open execute_no_trans ioctl }; # MariaDB additions allow mysqld_t self:process setpgid; +allow mysqld_t self:capability { ipc_lock }; + # This rule allows port tcp/4444 allow mysqld_t kerberos_port_t:tcp_socket { name_bind name_connect }; # This rule allows port tcp/4567 (tram_port_t may not be available on diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index ffc4a19e077..04c439cf7a9 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -3842,7 +3842,7 @@ static void test_bind_result_ext1() short i_data; uchar b_data; int f_data; - long bData; + int bData; char d_data[20]; double szData; MYSQL_BIND my_bind[8]; @@ -3938,7 +3938,7 @@ static void test_bind_result_ext1() fprintf(stdout, "\n data (float) : %d(%lu)", f_data, length[4]); fprintf(stdout, "\n data (double) : %s(%lu)", d_data, length[5]); - fprintf(stdout, "\n data (bin) : %ld(%lu)", bData, length[6]); + fprintf(stdout, "\n data (bin) : %d(%lu)", bData, length[6]); fprintf(stdout, "\n data (str) : %g(%lu)", szData, length[7]); } @@ -20564,7 +20564,6 @@ typedef struct { #ifndef EMBEDDED_LIBRARY static void test_proxy_header_tcp(const char *ipaddr, int port) { - int rc; MYSQL_RES *result; int family = (strchr(ipaddr,':') == NULL)?AF_INET:AF_INET6; @@ -20639,6 +20638,11 @@ static void test_proxy_header_tcp(const char *ipaddr, int port) DIE_UNLESS(strncmp(row[0], normalized_addr, addrlen) == 0); DIE_UNLESS(atoi(row[0] + addrlen+1) == port); mysql_free_result(result); + if (i == 0 && !strcmp(ipaddr,"192.0.2.1")) + { + /* do "dirty" close, to get aborted message in error log.*/ + mariadb_cancel(m); + } mysql_close(m); } sprintf(query,"DROP USER 'u'@'%s'",normalized_addr); diff --git a/unittest/mysys/bitmap-t.c b/unittest/mysys/bitmap-t.c index 22466355191..9fc61c6e787 100644 --- a/unittest/mysys/bitmap-t.c +++ b/unittest/mysys/bitmap-t.c @@ -29,6 +29,8 @@ uint get_rand_bit(uint bitsize) { + if (bitsize == 0) + return 0; return (rand() % bitsize); } @@ -266,7 +268,7 @@ my_bool test_get_first_bit(MY_BITMAP *map, uint bitsize) bitmap_clear_all(map); for (i=0; i < bitsize; i++) bitmap_set_bit(map, i); - if (bitmap_get_first(map) != MY_BIT_NONE) + if (bitmap_get_first_clear(map) != MY_BIT_NONE) goto error2; bitmap_clear_all(map); @@ -278,7 +280,7 @@ my_bool test_get_first_bit(MY_BITMAP *map, uint bitsize) goto error1; bitmap_set_all(map); bitmap_clear_bit(map, test_bit); - if (bitmap_get_first(map) != test_bit) + if (bitmap_get_first_clear(map) != test_bit) goto error2; bitmap_clear_all(map); } @@ -297,14 +299,45 @@ my_bool test_get_next_bit(MY_BITMAP *map, uint bitsize) uint no_loops= bitsize > 128 ? 128 : bitsize; for (i=0; i < no_loops; i++) { + uint count= 0, bits_set= 0; + bitmap_clear_all(map); test_bit=get_rand_bit(bitsize); for (j=0; j < test_bit; j++) bitmap_set_next(map); if (!bitmap_is_prefix(map, test_bit)) goto error1; + j= bitmap_get_first_set(map); + if (j == MY_BIT_NONE) + { + if (test_bit != 0) + goto error1; + continue; + } + count= 1; + while ((j= bitmap_get_next_set(map,j)) != MY_BIT_NONE) + count++; + if (count != test_bit) + goto error1; + + if (test_bit < 3) + continue; bitmap_clear_all(map); + for (j=1; j < test_bit; j+=2) + { + bits_set++; + bitmap_set_bit(map, j); + } + if ((j= bitmap_get_first_set(map)) == MY_BIT_NONE) + goto error1; + count= 1; + while ((j= bitmap_get_next_set(map,j)) != MY_BIT_NONE) + count++; + if (count != bits_set) + goto error1; } + return FALSE; + error1: diag("get_next error bitsize= %u, prefix_size= %u", bitsize,test_bit); return TRUE; @@ -371,7 +404,7 @@ error5: my_bool test_compare(MY_BITMAP *map, uint bitsize) { MY_BITMAP map2; - uint32 map2buf[MAX_TESTED_BITMAP_SIZE]; + my_bitmap_map map2buf[MAX_TESTED_BITMAP_SIZE]; uint i, test_bit; uint no_loops= bitsize > 128 ? 128 : bitsize; if (my_bitmap_init(&map2, map2buf, bitsize)) @@ -431,7 +464,7 @@ my_bool test_intersect(MY_BITMAP *map, uint bitsize) { uint bitsize2 = 1 + get_rand_bit(MAX_TESTED_BITMAP_SIZE - 1); MY_BITMAP map2; - uint32 map2buf[MAX_TESTED_BITMAP_SIZE]; + my_bitmap_map map2buf[MAX_TESTED_BITMAP_SIZE]; uint i, test_bit1, test_bit2, test_bit3; if (my_bitmap_init(&map2, map2buf, bitsize2)) { @@ -477,6 +510,107 @@ error: return TRUE; } +my_bool test_copy(MY_BITMAP *map, uint bitsize) +{ + my_bitmap_map buff[16], buff2[16], buff3[16]; + MY_BITMAP map2, map3; + uint rnd_bit; + + my_bitmap_init(&map2, buff, sizeof(buff)*8); + my_bitmap_init(&map3, buff2, sizeof(buff)*8); + bitmap_set_all(&map2); + bitmap_set_all(&map3); + + bitsize= MY_MIN(bitsize, map2.n_bits); + bitmap_copy(map, &map2); + if (bitmap_bits_set(map) != bitsize) + { + diag("bitmap_copy failed on bitsize %d", bitsize); + return 1; + } + bitmap_set_prefix(&map2, rnd_bit= get_rand_bit(bitsize)+1); + bitmap_export((uchar*) buff3, &map2); + bitmap_import(&map3, (uchar*) buff3); + if (!bitmap_cmp(&map2, &map3)) + { + diag("bitmap_export/bitmap_import failed on bitsize %d rnd_bit: %d", + bitsize, rnd_bit); + return 1; + } + return 0; +} + +static my_bool exec_bitmap_exists_intersection(MY_BITMAP **maps, uint bitsize, + uint start, uint end, uint bit) +{ + bitmap_clear_all(maps[0]); + bitmap_clear_all(maps[1]); + bitmap_set_bit(maps[0], bit); + bitmap_set_bit(maps[1], bit); + return bitmap_exists_intersection(maps, 2, start, end); +} + +my_bool test_bitmap_exists_intersection(MY_BITMAP *map, uint bitsize) +{ + MY_BITMAP map2; + uint start_bit, end_bit, rnd_bit; + MY_BITMAP *maps[2]; + maps[0]= map; + maps[1]= &map2; + + my_bitmap_init(&map2, 0, bitsize); + bitmap_clear_all(map); + bitmap_clear_all(&map2); + + start_bit= get_rand_bit(bitsize); + end_bit= get_rand_bit(bitsize); + if (start_bit > end_bit) + swap_variables(uint, start_bit, end_bit); + rnd_bit= start_bit+get_rand_bit(end_bit-start_bit); + + if (!exec_bitmap_exists_intersection(maps, bitsize, start_bit, end_bit, + rnd_bit)) + goto err; + + start_bit= end_bit= rnd_bit= 0; + if (!exec_bitmap_exists_intersection(maps, bitsize, start_bit, end_bit, + rnd_bit)) + goto err; + + start_bit= rnd_bit= 0 ; end_bit= bitsize-1; + if (!exec_bitmap_exists_intersection(maps, bitsize, start_bit, end_bit, + rnd_bit)) + goto err; + + start_bit= rnd_bit= end_bit= bitsize-1; + if (!exec_bitmap_exists_intersection(maps, bitsize, start_bit, end_bit, + rnd_bit)) + goto err; + + if (bitsize > 1) + { + start_bit= end_bit= 1 ; rnd_bit= 0; + if (exec_bitmap_exists_intersection(maps, bitsize, start_bit, end_bit, + rnd_bit)) + goto err; + + start_bit= end_bit= bitsize-1 ; rnd_bit= bitsize-2; + if (exec_bitmap_exists_intersection(maps, bitsize, start_bit, end_bit, + rnd_bit)) + goto err; + } + + my_bitmap_free(&map2); + return 0; +err: + diag("bitmap_exist_intersection failed on bitsize: %d start_bit: %d " + "end_bit: %d rnd_bit: %d", + bitsize, start_bit, end_bit, rnd_bit); + my_bitmap_free(&map2); + return 1; +} + + my_bool do_test(uint bitsize) { MY_BITMAP map; @@ -515,6 +649,12 @@ my_bool do_test(uint bitsize) bitmap_clear_all(&map); if (test_intersect(&map,bitsize)) goto error; + bitmap_clear_all(&map); + if (test_copy(&map,bitsize)) + goto error; + bitmap_clear_all(&map); + if (test_bitmap_exists_intersection(&map, bitsize)) + goto error; return FALSE; error: return TRUE; diff --git a/vio/vio.c b/vio/vio.c index 7a98eb2af7b..bf1e79ae36b 100644 --- a/vio/vio.c +++ b/vio/vio.c @@ -79,6 +79,7 @@ static my_bool has_no_data(Vio *vio __attribute__((unused))) int vio_pipe_shutdown(Vio *vio, int how) { vio->shutdown_flag= how; + vio->state= VIO_STATE_SHUTDOWN; return CancelIoEx(vio->hPipe, NULL); } #endif @@ -98,6 +99,7 @@ static void vio_init(Vio *vio, enum enum_vio_type type, #endif memset(vio, 0, sizeof(*vio)); vio->type= type; + vio->state= VIO_STATE_ACTIVE; vio->mysql_socket= MYSQL_INVALID_SOCKET; mysql_socket_setfd(&vio->mysql_socket, sd); vio->localhost= flags & VIO_LOCALHOST; diff --git a/vio/viosocket.c b/vio/viosocket.c index 002ff274b74..ffdc76a5deb 100644 --- a/vio/viosocket.c +++ b/vio/viosocket.c @@ -288,12 +288,18 @@ size_t vio_write(Vio *vio, const uchar* buf, size_t size) int vio_socket_shutdown(Vio *vio, int how) { - int ret= shutdown(mysql_socket_getfd(vio->mysql_socket), how); + int ret; + DBUG_ENTER("vio_socket_shutdown"); + DBUG_PRINT("enter", ("sd: %d", (int)mysql_socket_getfd(vio->mysql_socket))); + + vio->state= VIO_STATE_SHUTDOWN; + ret= shutdown(mysql_socket_getfd(vio->mysql_socket), how); + #ifdef _WIN32 /* Cancel possible IO in progress (shutdown does not do that on Windows). */ (void) CancelIoEx((HANDLE)mysql_socket_getfd(vio->mysql_socket), NULL); #endif - return ret; + DBUG_RETURN(ret); } @@ -552,7 +558,8 @@ my_bool vio_should_retry(Vio *vio) { DBUG_ENTER("vio_should_retry"); - DBUG_PRINT("info", ("vio_errno: %d", vio_errno(vio))); + DBUG_PRINT("info", ("vio_errno: %d state: %d", + vio_errno(vio), (int) vio->state)); DBUG_RETURN(vio_errno(vio) == SOCKET_EINTR); } @@ -576,28 +583,30 @@ vio_was_timeout(Vio *vio) int vio_close(Vio *vio) { - int r=0; DBUG_ENTER("vio_close"); DBUG_PRINT("enter", ("sd: %d", (int)mysql_socket_getfd(vio->mysql_socket))); if (vio->type != VIO_CLOSED) { + MYSQL_SOCKET mysql_socket= vio->mysql_socket; DBUG_ASSERT(vio->type == VIO_TYPE_TCPIP || - vio->type == VIO_TYPE_SOCKET || - vio->type == VIO_TYPE_SSL); + vio->type == VIO_TYPE_SOCKET || + vio->type == VIO_TYPE_SSL); - DBUG_ASSERT(mysql_socket_getfd(vio->mysql_socket) >= 0); - if (mysql_socket_close(vio->mysql_socket)) - r= -1; + + vio->type= VIO_CLOSED; + vio->state= VIO_STATE_CLOSED; + vio->mysql_socket= MYSQL_INVALID_SOCKET; + + DBUG_ASSERT(mysql_socket_getfd(mysql_socket) >= 0); + if (mysql_socket_close(mysql_socket)) + { + DBUG_PRINT("vio_error", ("close() failed, error: %d",socket_errno)); + /* FIXME: error handling (not critical for MySQL) */ + DBUG_RETURN(-1); + } } - if (r) - { - DBUG_PRINT("vio_error", ("close() failed, error: %d",socket_errno)); - /* FIXME: error handling (not critical for MySQL) */ - } - vio->type= VIO_CLOSED; - vio->mysql_socket= MYSQL_INVALID_SOCKET; - DBUG_RETURN(r); + DBUG_RETURN(0); } @@ -917,8 +926,11 @@ int vio_io_wait(Vio *vio, enum enum_vio_io_event event, int timeout) my_socket sd= mysql_socket_getfd(vio->mysql_socket); MYSQL_SOCKET_WAIT_VARIABLES(locker, state) /* no ';' */ DBUG_ENTER("vio_io_wait"); - DBUG_PRINT("enter", ("timeout: %d", timeout)); + DBUG_PRINT("enter", ("sd: %d timeout: %d", + (int) mysql_socket_getfd(vio->mysql_socket), + timeout)); + DBUG_ASSERT(vio->state != VIO_STATE_CLOSED); memset(&pfd, 0, sizeof(pfd)); pfd.fd= sd; @@ -948,7 +960,7 @@ int vio_io_wait(Vio *vio, enum enum_vio_io_event event, int timeout) switch ((ret= poll(&pfd, 1, timeout))) { case -1: - DBUG_PRINT("error", ("poll returned -1")); + DBUG_PRINT("error", ("poll returned -1 errno: %d", vio_errno(vio))); /* On error, -1 is returned. */ break; case 0: @@ -979,6 +991,7 @@ int vio_io_wait(Vio *vio, enum enum_vio_io_event event, int timeout) fd_set readfds, writefds, exceptfds; MYSQL_SOCKET_WAIT_VARIABLES(locker, state) /* no ';' */ DBUG_ENTER("vio_io_wait"); + DBUG_ASSERT(vio->state != VIO_STATE_CLOSED); /* Convert the timeout, in milliseconds, to seconds and microseconds. */ if (timeout >= 0) @@ -1152,6 +1165,7 @@ my_bool vio_is_connected(Vio *vio) { uint bytes= 0; DBUG_ENTER("vio_is_connected"); + DBUG_ASSERT(vio->state != VIO_STATE_CLOSED); /* The first step of detecting an EOF condition is verifying @@ -1159,6 +1173,7 @@ my_bool vio_is_connected(Vio *vio) the EOF. An exceptional condition event and/or errors are interpreted as if there is data to read. */ + if (!vio_io_wait(vio, VIO_IO_EVENT_READ, 0)) DBUG_RETURN(TRUE); diff --git a/wsrep-lib b/wsrep-lib index a5d95f0175f..7d108eb8706 160000 --- a/wsrep-lib +++ b/wsrep-lib @@ -1 +1 @@ -Subproject commit a5d95f0175f10b6127ea039c542725f6c4aa5cb9 +Subproject commit 7d108eb8706962abc74705bedfc60cfc3f296ea6