diff --git a/client/mysql.cc b/client/mysql.cc index 5341fd63fe0..11858ef52f1 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -3450,6 +3450,9 @@ static int com_charset(String *, char *line) MYF(MY_UTF8_IS_UTF8MB3 | MY_WME)); if (new_cs) { + if (new_cs->mbminlen > 1) + return put_info("Character sets with mbminlen>1 are not supported", + INFO_ERROR, 0); charset_info= new_cs; mysql_set_character_set(&mysql, charset_info->cs_name.str); default_charset= (char *)charset_info->cs_name.str; diff --git a/client/mysqldump.cc b/client/mysqldump.cc index 05aa4772be4..2e33b9a10da 100644 --- a/client/mysqldump.cc +++ b/client/mysqldump.cc @@ -844,11 +844,10 @@ static void write_footer(FILE *sql_file) } /* write_footer */ -uchar* get_table_key(const char *entry, size_t *length, - my_bool not_used __attribute__((unused))) +const uchar *get_table_key(const void *entry, size_t *length, my_bool) { - *length= strlen(entry); - return (uchar*) entry; + *length= strlen(static_cast(entry)); + return static_cast(entry); } @@ -1083,11 +1082,11 @@ static int get_options(int *argc, char ***argv) load_defaults_or_exit("my", load_default_groups, argc, argv); defaults_argv= *argv; - if (my_hash_init(PSI_NOT_INSTRUMENTED, &ignore_database, charset_info, 16, 0, 0, - (my_hash_get_key) get_table_key, my_free, 0)) + if (my_hash_init(PSI_NOT_INSTRUMENTED, &ignore_database, charset_info, 16, 0, + 0, get_table_key, my_free, 0)) return(EX_EOM); if (my_hash_init(PSI_NOT_INSTRUMENTED, &ignore_table, charset_info, 16, 0, 0, - (my_hash_get_key) get_table_key, my_free, 0)) + get_table_key, my_free, 0)) return(EX_EOM); /* Don't copy internal log tables */ if (my_hash_insert(&ignore_table, (uchar*) my_strdup(PSI_NOT_INSTRUMENTED, @@ -1103,7 +1102,7 @@ static int get_options(int *argc, char ***argv) return(EX_EOM); if (my_hash_init(PSI_NOT_INSTRUMENTED, &ignore_data, charset_info, 16, 0, 0, - (my_hash_get_key) get_table_key, my_free, 0)) + get_table_key, my_free, 0)) return(EX_EOM); if ((ho_error= handle_options(argc, argv, my_long_options, get_one_option))) @@ -1794,7 +1793,7 @@ static int switch_character_set_results(MYSQL *mysql, const char *cs_name) query_length= my_snprintf(query_buffer, sizeof (query_buffer), "SET SESSION character_set_results = '%s'", - (const char *) cs_name); + cs_name); return mysql_real_query(mysql, query_buffer, (ulong)query_length); } diff --git a/client/mysqlimport.c b/client/mysqlimport.c index fd688f8cdb2..d3c48ebcbd7 100644 --- a/client/mysqlimport.c +++ b/client/mysqlimport.c @@ -640,7 +640,6 @@ error: pthread_cond_signal(&count_threshhold); pthread_mutex_unlock(&counter_mutex); mysql_thread_end(); - pthread_exit(0); return 0; } diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 240fe500735..5a855976426 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -998,7 +998,6 @@ end_thread: cn->mysql= 0; cn->query_done= 1; mysql_thread_end(); - pthread_exit(0); DBUG_RETURN(0); } @@ -2408,13 +2407,12 @@ static void strip_parentheses(struct st_command *command) C_MODE_START -static uchar *get_var_key(const uchar* var, size_t *len, - my_bool __attribute__((unused)) t) +static const uchar *get_var_key(const void *var, size_t *len, my_bool) { char* key; - key = ((VAR*)var)->name; - *len = ((VAR*)var)->name_len; - return (uchar*)key; + key= (static_cast(var))->name; + *len= (static_cast(var))->name_len; + return reinterpret_cast(key); } @@ -12275,8 +12273,10 @@ void replace_dynstr_append_uint(DYNAMIC_STRING *ds, uint val) keep_header If header should not be sorted */ -static int comp_lines(const char **a, const char **b) +static int comp_lines(const void *a_, const void *b_) { + auto a= static_cast(a_); + auto b= static_cast(b_); return (strcmp(*a,*b)); } diff --git a/cmake/maintainer.cmake b/cmake/maintainer.cmake index 176bf05e361..dd5a6ef5bab 100644 --- a/cmake/maintainer.cmake +++ b/cmake/maintainer.cmake @@ -39,12 +39,23 @@ SET(MY_WARNING_FLAGS -Woverloaded-virtual -Wvla -Wwrite-strings + -Wcast-function-type-strict + ) + +# Warning flags that are in testing before moving +# to MY_WARNING_FLAGS if stable. +SET(MY_WARNING_FLAGS_NON_FATAL ) FOREACH(F ${MY_WARNING_FLAGS}) MY_CHECK_AND_SET_COMPILER_FLAG(${F} DEBUG RELWITHDEBINFO) ENDFOREACH() +FOREACH(F ${MY_WARNING_FLAGS_NON_FATAL}) + MY_CHECK_AND_SET_COMPILER_FLAG(-W${F} DEBUG RELWITHDEBINFO) + MY_CHECK_AND_SET_COMPILER_FLAG(-Wno-error=${F} DEBUG RELWITHDEBINFO) +ENDFOREACH() + SET(MY_ERROR_FLAGS -Werror -fno-operator-names -Wsuggest-override) IF(CMAKE_COMPILER_IS_GNUCC AND CMAKE_C_COMPILER_VERSION VERSION_LESS "6.0.0") diff --git a/extra/innochecksum.cc b/extra/innochecksum.cc index ec2b2e25db9..d4d15df2d72 100644 --- a/extra/innochecksum.cc +++ b/extra/innochecksum.cc @@ -76,6 +76,8 @@ static my_bool do_leaf; static my_bool per_page_details; static ulint n_merge; static ulint physical_page_size; /* Page size in bytes on disk. */ +static ulint extent_size; +static ulint xdes_size; ulong srv_page_size; uint32_t srv_page_size_shift; /* Current page number (0 based). */ @@ -100,7 +102,7 @@ char* log_filename = NULL; FILE* log_file = NULL; /* Enabled for log write option. */ static bool is_log_enabled = false; - +static bool skip_freed_pages; static byte field_ref_zero_buf[UNIV_PAGE_SIZE_MAX]; const byte *field_ref_zero = field_ref_zero_buf; @@ -268,6 +270,8 @@ static void init_page_size(const byte* buf) srv_page_size_shift = UNIV_ZIP_SIZE_SHIFT_MIN - 1 + ssize; srv_page_size = 512U << ssize; physical_page_size = srv_page_size; + extent_size = FSP_EXTENT_SIZE; + xdes_size = XDES_SIZE; return; } @@ -279,6 +283,8 @@ static void init_page_size(const byte* buf) srv_page_size = fil_space_t::logical_size(flags); physical_page_size = fil_space_t::physical_size(flags); + extent_size = FSP_EXTENT_SIZE; + xdes_size = XDES_SIZE; } #ifdef _WIN32 @@ -551,8 +557,8 @@ bool is_page_doublewritebuffer( const byte* page) { - if ((cur_page_num >= FSP_EXTENT_SIZE) - && (cur_page_num < FSP_EXTENT_SIZE * 3)) { + if ((cur_page_num >= extent_size) + && (cur_page_num < extent_size * 3)) { /* page is doublewrite buffer. */ return (true); } @@ -753,8 +759,8 @@ static inline bool is_page_free(const byte *xdes, ulint physical_page_size, { const byte *des= xdes + XDES_ARR_OFFSET + - XDES_SIZE * ((page_no & (physical_page_size - 1)) / FSP_EXTENT_SIZE); - return xdes_is_free(des, page_no % FSP_EXTENT_SIZE); + xdes_size * ((page_no & (physical_page_size - 1)) / extent_size); + return xdes_is_free(des, page_no % extent_size); } /* @@ -782,6 +788,16 @@ parse_page( /* Check whether page is doublewrite buffer. */ str = skip_page ? "Double_write_buffer" : "-"; + page_no = mach_read_from_4(page + FIL_PAGE_OFFSET); + if (skip_freed_pages) { + const byte *des= xdes + XDES_ARR_OFFSET + + xdes_size * ((page_no & (physical_page_size - 1)) + / extent_size); + if (mach_read_from_4(des) != XDES_FSEG && + xdes_is_free(des, page_no % extent_size)) { + return; + } + } switch (fil_page_get_type(page)) { @@ -1207,6 +1223,9 @@ static struct my_option innochecksum_options[] = { &do_leaf, &do_leaf, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"merge", 'm', "leaf page count if merge given number of consecutive pages", &n_merge, &n_merge, 0, GET_ULONG, REQUIRED_ARG, 0, 0, (longlong)10L, 0, 1, 0}, + {"skip-freed-pages", 'r', "skip freed pages for the tablespace", + &skip_freed_pages, &skip_freed_pages, 0, GET_BOOL, NO_ARG, + 0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0} }; @@ -1216,7 +1235,7 @@ static void usage(void) print_version(); puts(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000")); printf("InnoDB offline file checksum utility.\n"); - printf("Usage: %s [-c] [-s ] [-e ] " + printf("Usage: %s [-c] [-r] [-s ] [-e ] " "[-p ] [-i] [-v] [-a ] [-n] " "[-S] [-D ] " "[-l ] [-l] [-m ] \n", my_progname); @@ -1229,8 +1248,8 @@ static void usage(void) extern "C" my_bool innochecksum_get_one_option( const struct my_option *opt, - const char *argument MY_ATTRIBUTE((unused)), - const char *) + const char *IF_DBUG(argument,), + const char *) { switch (opt->id) { #ifndef DBUG_OFF @@ -1255,15 +1274,6 @@ innochecksum_get_one_option( my_end(0); exit(EXIT_SUCCESS); break; - case 'n': - no_check = true; - break; - case 'a': - case 'S': - break; - case 'w': - do_write = true; - break; case 'D': page_type_dump = true; break; @@ -1310,8 +1320,8 @@ get_options( */ static bool check_encryption(const char* filename, const byte* page) { - ulint offset = FSP_HEADER_OFFSET + XDES_ARR_OFFSET + XDES_SIZE * - physical_page_size / FSP_EXTENT_SIZE; + ulint offset = FSP_HEADER_OFFSET + XDES_ARR_OFFSET + xdes_size * + physical_page_size / extent_size; if (memcmp(page + offset, CRYPT_MAGIC, MAGIC_SZ) != 0) { return false; @@ -1843,7 +1853,7 @@ first_non_zero: printf("page " UINT32PF " ", cur_page_num); } - if (page_get_page_no(buf) % physical_page_size == 0) { + if (cur_page_num % physical_page_size == 0) { memcpy(xdes, buf, physical_page_size); } diff --git a/extra/mariabackup/encryption_plugin.cc b/extra/mariabackup/encryption_plugin.cc index ab0c51400dd..147563f243c 100644 --- a/extra/mariabackup/encryption_plugin.cc +++ b/extra/mariabackup/encryption_plugin.cc @@ -203,7 +203,7 @@ const char *encryption_plugin_get_config() return encryption_plugin_config.c_str(); } -extern int finalize_encryption_plugin(st_plugin_int *plugin); +extern int finalize_encryption_plugin(void *plugin); void encryption_plugin_prepare_init(int argc, char **argv) diff --git a/extra/mariabackup/xbstream.cc b/extra/mariabackup/xbstream.cc index 5a54caceb72..23200c19465 100644 --- a/extra/mariabackup/xbstream.cc +++ b/extra/mariabackup/xbstream.cc @@ -353,22 +353,23 @@ err: } static -uchar * -get_file_entry_key(file_entry_t *entry, size_t *length, - my_bool not_used __attribute__((unused))) +const uchar * +get_file_entry_key(const void *entry_, size_t *length, my_bool) { - *length = entry->pathlen; - return (uchar *) entry->path; + const file_entry_t *entry= static_cast(entry_); + *length= entry->pathlen; + return reinterpret_cast(entry->path); } static void -file_entry_free(file_entry_t *entry) +file_entry_free(void *entry_) { - pthread_mutex_destroy(&entry->mutex); - ds_close(entry->file); - my_free(entry->path); - my_free(entry); + file_entry_t *entry= static_cast(entry_); + pthread_mutex_destroy(&entry->mutex); + ds_close(entry->file); + my_free(entry->path); + my_free(entry); } static @@ -540,14 +541,15 @@ mode_extract(int n_threads, int argc __attribute__((unused)), pthread_mutex_t mutex; int ret = 0; - if (my_hash_init(PSI_NOT_INSTRUMENTED, &filehash, &my_charset_bin, - START_FILE_HASH_SIZE, 0, 0, (my_hash_get_key) get_file_entry_key, - (my_hash_free_key) file_entry_free, MYF(0))) { - msg("%s: failed to initialize file hash.", my_progname); - return 1; - } + if (my_hash_init(PSI_NOT_INSTRUMENTED, &filehash, &my_charset_bin, + START_FILE_HASH_SIZE, 0, 0, get_file_entry_key, + file_entry_free, MYF(0))) + { + msg("%s: failed to initialize file hash.", my_progname); + return 1; + } - if (pthread_mutex_init(&mutex, NULL)) { + if (pthread_mutex_init(&mutex, NULL)) { msg("%s: failed to initialize mutex.", my_progname); my_hash_free(&filehash); return 1; diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index a92d47b2813..4bb691074c1 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -1186,7 +1186,7 @@ static void backup_file_op_fail(uint32_t space_id, int type, const byte* new_name, ulint new_len) { bool fail = false; - const static std::string spacename{filename_to_spacename(name, len)}; + const std::string spacename{filename_to_spacename(name, len)}; switch (type) { case FILE_CREATE: msg("DDL tracking : create %u \"%.*s\"", space_id, int(len), name); @@ -2978,25 +2978,19 @@ my_bool regex_list_check_match( return(FALSE); } -static -my_bool -find_filter_in_hashtable( - const char* name, - hash_table_t* table, - xb_filter_entry_t** result -) +static bool find_filter_in_hashtable(const char *name, hash_table_t *table, + xb_filter_entry_t **result) noexcept { - xb_filter_entry_t* found = NULL; - const ulint fold = my_crc32c(0, name, strlen(name)); - HASH_SEARCH(name_hash, table, fold, - xb_filter_entry_t*, - found, (void) 0, - !strcmp(found->name, name)); - - if (found && result) { - *result = found; - } - return (found != NULL); + const ulint fold= my_crc32c(0, name, strlen(name)); + if (auto found= table->cell_get(fold)-> + find(&xb_filter_entry_t::name_hash,[name](xb_filter_entry_t *f) + { return !strcmp(f->name, name); })) + { + if (result) + *result= found; + return true; + } + return false; } /************************************************************************ @@ -4478,14 +4472,13 @@ xb_add_filter( const char* name, /*!< in: name of table/database */ hash_table_t* hash) /*!< in/out: hash to insert into */ { - xb_filter_entry_t* entry = xb_new_filter_entry(name); + xb_filter_entry_t *entry= xb_new_filter_entry(name); - if (UNIV_UNLIKELY(!hash->array)) { - hash->create(1000); - } - const ulint fold = my_crc32c(0, entry->name, strlen(entry->name)); - HASH_INSERT(xb_filter_entry_t, name_hash, hash, fold, entry); - return entry; + if (UNIV_UNLIKELY(!hash->array)) + hash->create(1000); + hash->cell_get(my_crc32c(0, entry->name, strlen(entry->name)))-> + append(*entry, &xb_filter_entry_t::name_hash); + return entry; } /*********************************************************************** @@ -4523,12 +4516,8 @@ xb_register_filter_entry( hash_table_t* tables_hash ) { - const char* p; - size_t namelen; - xb_filter_entry_t* db_entry = NULL; - - namelen = strlen(name); - if ((p = strchr(name, '.')) != NULL) { + size_t namelen = strlen(name); + if (const char* p = strchr(name, '.')) { char dbname[NAME_LEN + 1]; xb_validate_name(name, p - name); @@ -4537,18 +4526,20 @@ xb_register_filter_entry( strncpy(dbname, name, p - name); dbname[p - name] = 0; - if (databases_hash && databases_hash->array) { - const ulint fold = my_crc32c(0, dbname, p - name); - HASH_SEARCH(name_hash, databases_hash, - fold, - xb_filter_entry_t*, - db_entry, (void) 0, - !strcmp(db_entry->name, dbname)); + if (UNIV_UNLIKELY(!databases_hash->array)) { + databases_hash->create(1000); } - if (!db_entry) { - db_entry = xb_add_filter(dbname, databases_hash); + + xb_filter_entry_t **prev = + databases_hash->cell_get(my_crc32c(0, name, p - name)) + ->search(&xb_filter_entry_t::name_hash, + [dbname](xb_filter_entry_t* f) + { return f && !strcmp(f->name, dbname); }); + if (!*prev) { + (*prev = xb_new_filter_entry(dbname)) + ->has_tables = TRUE; } - db_entry->has_tables = TRUE; + ut_ad((*prev)->has_tables); xb_add_filter(name, tables_hash); } else { xb_validate_name(name, namelen); @@ -4731,33 +4722,17 @@ xb_filters_init() } } -static -void -xb_filter_hash_free(hash_table_t* hash) +static void xb_filter_hash_free(hash_table_t* hash) { - ulint i; - - /* free the hash elements */ - for (i = 0; i < hash->n_cells; i++) { - xb_filter_entry_t* table; - - table = static_cast - (HASH_GET_FIRST(hash, i)); - - while (table) { - xb_filter_entry_t* prev_table = table; - - table = static_cast - (HASH_GET_NEXT(name_hash, prev_table)); - const ulint fold = my_crc32c(0, prev_table->name, - strlen(prev_table->name)); - HASH_DELETE(xb_filter_entry_t, name_hash, hash, - fold, prev_table); - free(prev_table); - } - } - - hash->free(); + for (ulint i= 0; i < hash->n_cells; i++) + for (auto prev= static_cast(hash->array[i].node); + prev; ) + { + auto next= prev->name_hash; + free(prev); + prev= next; + } + hash->free(); } static void xb_regex_list_free(regex_list_t* list) @@ -6025,8 +6000,8 @@ exit: table->name = ((char*)table) + sizeof(xb_filter_entry_t); memcpy(table->name, dest_space_name, len + 1); const ulint fold = my_crc32c(0, dest_space_name, len); - HASH_INSERT(xb_filter_entry_t, name_hash, &inc_dir_tables_hash, - fold, table); + inc_dir_tables_hash.cell_get(fold)->append( + *table, &xb_filter_entry_t::name_hash); mysql_mutex_lock(&fil_system.mutex); fil_space = fil_space_get_by_name(dest_space_name); @@ -6446,8 +6421,8 @@ static ibool prepare_handle_new_files(const char *data_home_dir, strcpy(table->name, table_name.c_str()); const ulint fold = my_crc32c(0, table->name, table_name.size()); - HASH_INSERT(xb_filter_entry_t, name_hash, &inc_dir_tables_hash, - fold, table); + inc_dir_tables_hash.cell_get(fold)->append( + *table, &xb_filter_entry_t::name_hash); } return TRUE; @@ -6463,29 +6438,15 @@ rm_if_not_found( const char* data_home_dir, /*!name, name)); - - if (!table) { - snprintf(name, FN_REFLEN, "%s/%s/%s", data_home_dir, - db_name, file_name); - return os_file_delete(0, name); - } - - return(TRUE); + char name[FN_REFLEN]; + /* Truncate ".ibd" */ + name[snprintf(name, FN_REFLEN, "%s/%s", db_name, file_name) - 4]= '\0'; + if (find_filter_in_hashtable(name, &inc_dir_tables_hash, nullptr)) + return true; + snprintf(name, FN_REFLEN, "%s/%s/%s", data_home_dir, db_name, file_name); + return os_file_delete(0, name); } /** Function enumerates files in datadir (provided by path) which are matched diff --git a/extra/my_print_defaults.c b/extra/my_print_defaults.c index c15752aa23e..08f4d8c8652 100644 --- a/extra/my_print_defaults.c +++ b/extra/my_print_defaults.c @@ -49,7 +49,7 @@ static struct my_option my_long_options[] = {"debug", '#', "Output debug log", (char**) &default_dbug_option, (char**) &default_dbug_option, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0}, #endif - {"mysqld", 0, "Read the same set of groups that the mysqld binary does.", + {"mysqld", 0, "Read the same set of groups that the mariadbd (previously known as mysqld) binary does.", &opt_mysqld, &opt_mysqld, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, {"mariadbd", 0, "Read the same set of groups that the mariadbd binary does.", &opt_mysqld, &opt_mysqld, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0}, @@ -116,7 +116,7 @@ static int get_options(int *argc,char ***argv) int ho_error; if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option))) - exit(ho_error); + cleanup_and_exit(ho_error); return 0; } @@ -160,7 +160,8 @@ int main(int argc, char **argv) load_default_groups=(char**) my_malloc(PSI_NOT_INSTRUMENTED, nargs*sizeof(char*), MYF(MY_WME)); if (!load_default_groups) - exit(1); + cleanup_and_exit(1); + if (opt_mysqld) { for (; mysqld_groups[i]; i++) diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt index a82d1143649..5452b56db6f 100644 --- a/include/CMakeLists.txt +++ b/include/CMakeLists.txt @@ -42,6 +42,7 @@ SET(HEADERS my_dbug.h m_string.h my_sys.h + my_cmp.h my_xml.h mysql_embed.h my_decimal_limits.h diff --git a/include/hash.h b/include/hash.h index c0a846ac120..c69ab5c5793 100644 --- a/include/hash.h +++ b/include/hash.h @@ -42,7 +42,7 @@ extern "C" { #define HASH_THREAD_SPECIFIC 2 /* Mark allocated memory THREAD_SPECIFIC */ typedef uint32 my_hash_value_type; -typedef uchar *(*my_hash_get_key)(const uchar *,size_t*,my_bool); +typedef const uchar *(*my_hash_get_key)(const void *, size_t *, my_bool); typedef my_hash_value_type (*my_hash_function)(CHARSET_INFO *, const uchar *, size_t); typedef void (*my_hash_free_key)(void *); diff --git a/include/my_cmp.h b/include/my_cmp.h new file mode 100644 index 00000000000..acaa081cf21 --- /dev/null +++ b/include/my_cmp.h @@ -0,0 +1,25 @@ +/* Copyright (c) 2024, MariaDB Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ + +#pragma once + +#ifdef __cplusplus +extern "C" { +#endif +typedef int (*qsort_cmp)(const void *, const void *); +typedef int (*qsort_cmp2)(void *param, const void *a, const void *b); +#ifdef __cplusplus +} +#endif diff --git a/include/my_global.h b/include/my_global.h index 2325d50107d..f58f76b46b4 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -540,10 +540,9 @@ typedef int pbool; /* Mixed prototypes can't take char */ typedef int pshort; /* Mixed prototypes can't take short int */ typedef double pfloat; /* Mixed prototypes can't take float */ #endif -C_MODE_START -typedef int (*qsort_cmp)(const void *,const void *); -typedef int (*qsort_cmp2)(void*, const void *,const void *); -C_MODE_END + +#include + #define qsort_t RETQSORTTYPE /* Broken GCC can't handle typedef !!!! */ #ifdef HAVE_SYS_SOCKET_H #include diff --git a/include/my_rdtsc.h b/include/my_rdtsc.h index 31af5d48ccb..45e91d228f1 100644 --- a/include/my_rdtsc.h +++ b/include/my_rdtsc.h @@ -230,8 +230,8 @@ static inline ulonglong my_timer_cycles(void) ulonglong result; __asm __volatile__("rdtime %0" : "=r"(result)); return result; - } # endif + } #elif defined(HAVE_SYS_TIMES_H) && defined(HAVE_GETHRTIME) #define MY_TIMER_ROUTINE_CYCLES MY_TIMER_ROUTINE_GETHRTIME /* gethrtime may appear as either cycle or nanosecond counter */ diff --git a/include/my_sys.h b/include/my_sys.h index 2255c43b76b..0f9fb4518fe 100644 --- a/include/my_sys.h +++ b/include/my_sys.h @@ -29,6 +29,7 @@ C_MODE_START #include #include #include +#include #include #include @@ -486,8 +487,6 @@ typedef struct st_io_cache /* Used when caching files */ size_t alloced_buffer; } IO_CACHE; -typedef int (*qsort2_cmp)(const void *, const void *, const void *); - typedef void (*my_error_reporter)(enum loglevel level, const char *format, ...) ATTRIBUTE_FORMAT_FPTR(printf, 2, 3); @@ -790,8 +789,8 @@ extern void radixsort_for_str_ptr(uchar* base[], uint number_of_elements, extern qsort_t my_qsort(void *base_ptr, size_t total_elems, size_t size, qsort_cmp cmp); extern qsort_t my_qsort2(void *base_ptr, size_t total_elems, size_t size, - qsort2_cmp cmp, void *cmp_argument); -extern qsort2_cmp get_ptr_compare(size_t); + qsort_cmp2 cmp, void *cmp_argument); +extern qsort_cmp2 get_ptr_compare(size_t); void my_store_ptr(uchar *buff, size_t pack_length, my_off_t pos); my_off_t my_get_ptr(uchar *ptr, size_t pack_length); extern int init_io_cache(IO_CACHE *info,File file,size_t cachesize, diff --git a/include/myisam.h b/include/myisam.h index dd4f9084b00..829a607d28b 100644 --- a/include/myisam.h +++ b/include/myisam.h @@ -373,7 +373,7 @@ typedef struct st_mi_sort_param my_bool fix_datafile, master; my_bool calc_checksum; /* calculate table checksum */ - int (*key_cmp)(struct st_mi_sort_param *, const void *, const void *); + int (*key_cmp)(void *, const void *, const void *); int (*key_read)(struct st_mi_sort_param *,void *); int (*key_write)(struct st_mi_sort_param *, const void *); void (*lock_in_memory)(HA_CHECK *); diff --git a/include/queues.h b/include/queues.h index 9cc7c15a980..44b3c7f4b28 100644 --- a/include/queues.h +++ b/include/queues.h @@ -31,6 +31,8 @@ #ifndef _queues_h #define _queues_h +#include + #ifdef __cplusplus extern "C" { #endif @@ -44,7 +46,7 @@ typedef struct st_queue { uint offset_to_queue_pos; /* If we want to store position in element */ uint auto_extent; int max_at_top; /* Normally 1, set to -1 if queue_top gives max */ - int (*compare)(void *, uchar *,uchar *); + qsort_cmp2 compare; } QUEUE; #define queue_first_element(queue) 1 @@ -58,14 +60,13 @@ typedef struct st_queue { #define queue_set_max_at_top(queue, set_arg) \ (queue)->max_at_top= set_arg ? -1 : 1 #define queue_remove_top(queue_arg) queue_remove((queue_arg), queue_first_element(queue_arg)) -typedef int (*queue_compare)(void *,uchar *, uchar *); int init_queue(QUEUE *queue,uint max_elements,uint offset_to_key, - my_bool max_at_top, queue_compare compare, + my_bool max_at_top, qsort_cmp2 compare, void *first_cmp_arg, uint offset_to_queue_pos, uint auto_extent); int reinit_queue(QUEUE *queue,uint max_elements,uint offset_to_key, - my_bool max_at_top, queue_compare compare, + my_bool max_at_top, qsort_cmp2 compare, void *first_cmp_arg, uint offset_to_queue_pos, uint auto_extent); int resize_queue(QUEUE *queue, uint max_elements); diff --git a/mysql-test/include/have_met_timezone.require b/mysql-test/include/have_cet_timezone.require similarity index 100% rename from mysql-test/include/have_met_timezone.require rename to mysql-test/include/have_cet_timezone.require diff --git a/mysql-test/include/search_pattern_in_file.inc b/mysql-test/include/search_pattern_in_file.inc index 05bd2f71e65..479d775e35a 100644 --- a/mysql-test/include/search_pattern_in_file.inc +++ b/mysql-test/include/search_pattern_in_file.inc @@ -15,8 +15,17 @@ # file. By default the search happens from the last CURRENT_TEST: # marker till the end of file (appropriate for searching error logs). # -# Optionally, SEARCH_ABORT can be set to "FOUND" or "NOT FOUND" and this -# will abort if the search result doesn't match the requested one. +# Optionally, SEARCH_ABORT can be specified to abort the search (in error) +# if a specific search result is found. Its value is a regular expression +# (with an implicit start-of-string anchor '^' prepended), and the search +# result that it will match against is either 1) "FOUND ", where is +# the specific number of matches found, or 2) "NOT FOUND". +# +# Optionally, SEARCH_WAIT can be specified to wait for a specific search +# result. Its usage mimics that of SEARCH_ABORT, in that its value is also +# a '^'-prepended regular expression, which will be matched against the same +# search result. The timeout can be set in SEARCH_TIMEOUT, default is 60 +# seconds. # # Optionally, SEARCH_WAIT can be set to "FOUND" or "NOT FOUND", and this # will wait for the condition to occur. The timeout can be set in @@ -91,8 +100,7 @@ perl; @matches= ($content =~ /$search_pattern/gs); $res=@matches ? "FOUND " . scalar(@matches) : "NOT FOUND"; - if (($ENV{SEARCH_WAIT} eq 'FOUND' && $res eq 'NOT FOUND') || - ($ENV{SEARCH_WAIT} eq 'NOT FOUND' && $res =~ m{^FOUND })) { + if ($ENV{SEARCH_WAIT} and not $res =~ /^$ENV{SEARCH_WAIT}/) { if (time() - $start_time < $timeout) { # Millisceond sleep emulated with select select(undef, undef, undef, 0.1); diff --git a/mysql-test/include/wait_for_pattern_in_file.inc b/mysql-test/include/wait_for_pattern_in_file.inc deleted file mode 100644 index a551761012f..00000000000 --- a/mysql-test/include/wait_for_pattern_in_file.inc +++ /dev/null @@ -1,55 +0,0 @@ -# ==== Purpose ==== -# -# Waits until pattern comes into log file or until a timeout is reached. -# This is a timeout wrapper for search_pattern_in_file.inc -# -# -# ==== Usage ==== -# -# [--let $timeout= NUMBER in seconds] -# For other parameters, check search_pattern_in_file.inc - ---let $wait_save_keep_include_silent=$keep_include_silent ---let $include_filename= wait_for_pattern_in_file.inc ---source include/begin_include_file.inc ---let $keep_include_silent= 1 - -let $_timeout= $timeout; -if (!$_timeout) -{ - let $_timeout= 10; - if ($VALGRIND_TEST) - { - let $_timeout= 30; - } -} - -let $_timeout_counter=`SELECT $_timeout * 10`; -let SEARCH_ABORT=NOT FOUND; -let $_continue= 1; -disable_abort_on_error; -while ($_continue) -{ - source include/search_pattern_in_file.inc; - if (!$errno) - { - # Found match - let $_continue= 0; - } - if ($errno) - { - dec $_timeout_counter; - if ($_timeout_counter == 1) - { - enable_abort_on_error; - } - if (!$_timeout_counter) - { - let $_continue= 0; - } - } -} -enable_abort_on_error; - ---source include/end_include_file.inc ---let $keep_include_silent=$wait_save_keep_include_silent diff --git a/mysql-test/main/column_compression.result b/mysql-test/main/column_compression.result index e68d9385f1e..15976939f70 100644 --- a/mysql-test/main/column_compression.result +++ b/mysql-test/main/column_compression.result @@ -2943,4 +2943,39 @@ t2 CREATE TABLE `t2` ( `c` text /*M!100301 COMPRESSED*/ DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci drop table t1, t2; +# +# MDEV-16698 ASAN: heap-use-after-free in field_longstr::uncompress +# +CREATE TABLE t5 ( +i1 smallint(11) unsigned zerofill , +e1 enum('','a') , +b1 mediumblob /*!100301 COMPRESSED*/ , +d2 date NOT NULL DEFAULT '1900-01-01', +pk bigint(20) unsigned NOT NULL DEFAULT 0, +d1 timestamp NULL , +v1 varbinary(3362) , +t1 time NOT NULL DEFAULT '00:00:00' +); +INSERT INTO t5 VALUES +(00000000004,'','ufhjdtv','1992-07-25',1,'2035-06-05 09:02:48','f','13:25:21'), +(00000000001,'','jdt','1998-07-03',2,'1994-05-05 19:59:20','','09:09:19'), +(00000000000,'','d','2007-12-05',3,'0000-00-00 00:00:00','tvs','02:51:15'); +SELECT GROUP_CONCAT(t5.i1, IF(t5.e1, t5.b1, t5.e1), +IF(t5.d1, t5.t1, t5.d1), t5.v1, +IF(t5.i1, t5.i1, t5.d2), t5.v1, t5.b1 +ORDER BY 2,6 SEPARATOR ';') +FROM (t5 JOIN t5 AS tt ON (tt.pk != t5.pk)); +DROP TABLE t5; +create table t1 (pk int not null, b1 blob compressed, v1 varbinary(100))engine=innodb; +insert into t1 values (1,'ufhjdtv','f'),(2,'jdt',''),(3,'d','tvs'); +select group_concat(t1.v1, t1.b1 order by 1) from (t1 join t1 as tt on (tt.pk != t1.pk)); +group_concat(t1.v1, t1.b1 order by 1) +jdt,jdt,fufhjdtv,fufhjdtv,tvsd,tvsd +drop table t1; +CREATE TABLE t1 (a CHAR(1), b TEXT /*!100302 COMPRESSED */); +INSERT INTO t1 VALUES ('c','n'),('d','mmmmmmmmmm'); +SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1; +f +nc,mmmmmmmmmmd +DROP TABLE t1; # End of 10.5 tests diff --git a/mysql-test/main/column_compression.test b/mysql-test/main/column_compression.test index 01b408f362e..f9b7cd31355 100644 --- a/mysql-test/main/column_compression.test +++ b/mysql-test/main/column_compression.test @@ -482,4 +482,41 @@ create table t2 as select group_concat(c order by 1), concat(c), c from t1; show create table t2; drop table t1, t2; +--echo # +--echo # MDEV-16698 ASAN: heap-use-after-free in field_longstr::uncompress +--echo # + +CREATE TABLE t5 ( + i1 smallint(11) unsigned zerofill , + e1 enum('','a') , + b1 mediumblob /*!100301 COMPRESSED*/ , + d2 date NOT NULL DEFAULT '1900-01-01', + pk bigint(20) unsigned NOT NULL DEFAULT 0, + d1 timestamp NULL , + v1 varbinary(3362) , + t1 time NOT NULL DEFAULT '00:00:00' +); +INSERT INTO t5 VALUES +(00000000004,'','ufhjdtv','1992-07-25',1,'2035-06-05 09:02:48','f','13:25:21'), +(00000000001,'','jdt','1998-07-03',2,'1994-05-05 19:59:20','','09:09:19'), +(00000000000,'','d','2007-12-05',3,'0000-00-00 00:00:00','tvs','02:51:15'); +--disable_result_log +SELECT GROUP_CONCAT(t5.i1, IF(t5.e1, t5.b1, t5.e1), + IF(t5.d1, t5.t1, t5.d1), t5.v1, + IF(t5.i1, t5.i1, t5.d2), t5.v1, t5.b1 + ORDER BY 2,6 SEPARATOR ';') +FROM (t5 JOIN t5 AS tt ON (tt.pk != t5.pk)); +DROP TABLE t5; +--enable_result_log + +create table t1 (pk int not null, b1 blob compressed, v1 varbinary(100))engine=innodb; +insert into t1 values (1,'ufhjdtv','f'),(2,'jdt',''),(3,'d','tvs'); +select group_concat(t1.v1, t1.b1 order by 1) from (t1 join t1 as tt on (tt.pk != t1.pk)); +drop table t1; + +CREATE TABLE t1 (a CHAR(1), b TEXT /*!100302 COMPRESSED */); +INSERT INTO t1 VALUES ('c','n'),('d','mmmmmmmmmm'); +SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1; +DROP TABLE t1; + --echo # End of 10.5 tests diff --git a/mysql-test/main/ctype_binary.result b/mysql-test/main/ctype_binary.result index 3e99cecfa93..10b7937a188 100644 --- a/mysql-test/main/ctype_binary.result +++ b/mysql-test/main/ctype_binary.result @@ -3455,3 +3455,51 @@ DROP TABLE t1; # # End of 10.5 tests # +# +# Start of 10.11 tests +# +# +# MDEV-28767 Collation "binary" is not accepted for databases, tables, columns +# +CREATE TABLE t1 (a CHAR(1) COLLATE binary); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` binary(1) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +DROP TABLE t1; +CREATE TABLE t1 (a enum('a') CHARACTER SET binary COLLATE binary DEFAULT NULL); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` enum('a') CHARACTER SET binary DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +DROP TABLE t1; +CREATE TABLE t1 (a enum('a') COLLATE binary DEFAULT NULL); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` enum('a') CHARACTER SET binary DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +DROP TABLE t1; +CREATE TABLE t1 (a CHAR(1) CHARACTER SET latin1 COLLATE binary); +ERROR 42000: COLLATION 'binary' is not valid for CHARACTER SET 'latin1' +CREATE TABLE t1 (a CHAR(10)) COLLATE binary; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` binary(10) DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=binary +DROP TABLE t1; +CREATE TABLE t1 (a CHAR(10)) CHARACTER SET latin1 COLLATE binary; +ERROR 42000: COLLATION 'binary' is not valid for CHARACTER SET 'latin1' +CREATE DATABASE db1 COLLATE binary; +SHOW CREATE DATABASE db1; +Database Create Database +db1 CREATE DATABASE `db1` /*!40100 DEFAULT CHARACTER SET binary */ +DROP DATABASE db1; +CREATE DATABASE db1 CHARACTER SET latin1 COLLATE binary; +ERROR 42000: COLLATION 'binary' is not valid for CHARACTER SET 'latin1' +# +# End of 10.11 tests +# diff --git a/mysql-test/main/ctype_binary.test b/mysql-test/main/ctype_binary.test index 756c96fcf60..9537f09e9b1 100644 --- a/mysql-test/main/ctype_binary.test +++ b/mysql-test/main/ctype_binary.test @@ -294,3 +294,50 @@ DROP TABLE t1; --echo # --echo # End of 10.5 tests --echo # + +--echo # +--echo # Start of 10.11 tests +--echo # + +--echo # +--echo # MDEV-28767 Collation "binary" is not accepted for databases, tables, columns +--echo # + +# Column level + +CREATE TABLE t1 (a CHAR(1) COLLATE binary); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a enum('a') CHARACTER SET binary COLLATE binary DEFAULT NULL); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a enum('a') COLLATE binary DEFAULT NULL); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +--error ER_COLLATION_CHARSET_MISMATCH +CREATE TABLE t1 (a CHAR(1) CHARACTER SET latin1 COLLATE binary); + + +# Table level +CREATE TABLE t1 (a CHAR(10)) COLLATE binary; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +--error ER_COLLATION_CHARSET_MISMATCH +CREATE TABLE t1 (a CHAR(10)) CHARACTER SET latin1 COLLATE binary; + + +# Database level +CREATE DATABASE db1 COLLATE binary; +SHOW CREATE DATABASE db1; +DROP DATABASE db1; + +--error ER_COLLATION_CHARSET_MISMATCH +CREATE DATABASE db1 CHARACTER SET latin1 COLLATE binary; + +--echo # +--echo # End of 10.11 tests +--echo # diff --git a/mysql-test/main/ctype_filename.result b/mysql-test/main/ctype_filename.result index a8e5093a224..07e8e2d9b77 100644 --- a/mysql-test/main/ctype_filename.result +++ b/mysql-test/main/ctype_filename.result @@ -161,6 +161,21 @@ ERROR 42000: Column length too big for column 'a' (max = 204); use BLOB or TEXT DROP TABLE t1; # End of 10.5 tests # +# Start of 10.6 tests +# +# +# MDEV-35393 ASAN unknown-crash in Field_varstring::reset when inserting NULL value to a table with filename charset +# +SET sql_mode=''; +CREATE TABLE t (a CHAR(205)) ENGINE=MYISAM CHARACTER SET filename; +Warnings: +Note 1246 Converting column 'a' from CHAR to VARCHAR +INSERT INTO t VALUES (NULL); +DROP TABLE t; +# +# End of 10.6 tests +# +# # Start of 10.9 tests # # diff --git a/mysql-test/main/ctype_filename.test b/mysql-test/main/ctype_filename.test index 76c7cbf136c..867eecd5e3e 100644 --- a/mysql-test/main/ctype_filename.test +++ b/mysql-test/main/ctype_filename.test @@ -168,6 +168,24 @@ DROP TABLE t1; --echo # End of 10.5 tests + +--echo # +--echo # Start of 10.6 tests +--echo # + +--echo # +--echo # MDEV-35393 ASAN unknown-crash in Field_varstring::reset when inserting NULL value to a table with filename charset +--echo # + +SET sql_mode=''; +CREATE TABLE t (a CHAR(205)) ENGINE=MYISAM CHARACTER SET filename; +INSERT INTO t VALUES (NULL); +DROP TABLE t; + +--echo # +--echo # End of 10.6 tests +--echo # + --echo # --echo # Start of 10.9 tests --echo # diff --git a/mysql-test/main/ctype_filename_innodb.result b/mysql-test/main/ctype_filename_innodb.result new file mode 100644 index 00000000000..d3535e70a3f --- /dev/null +++ b/mysql-test/main/ctype_filename_innodb.result @@ -0,0 +1,18 @@ +# +# Start of 10.6 tests +# +# +# MDEV-35392 Assertion `!__asan_region_is_poisoned((vo id*) dest,templ->mysql_col_len)' failed in void row_sel_field_store_in_mysql_format_func(byte *, const mysql_row_templ_t *, const byte *, ulint) +# +SET sql_mode=''; +CREATE TABLE t (a CHAR(205)) ENGINE=INNODB CHARACTER SET filename; +Warnings: +Note 1246 Converting column 'a' from CHAR to VARCHAR +INSERT INTO t VALUES (1); +SELECT * FROM t; +a +1 +DROP TABLE t; +# +# End of 10.6 tests +# diff --git a/mysql-test/main/ctype_filename_innodb.test b/mysql-test/main/ctype_filename_innodb.test new file mode 100644 index 00000000000..a0340919c04 --- /dev/null +++ b/mysql-test/main/ctype_filename_innodb.test @@ -0,0 +1,19 @@ +--source include/have_innodb.inc + +--echo # +--echo # Start of 10.6 tests +--echo # + +--echo # +--echo # MDEV-35392 Assertion `!__asan_region_is_poisoned((vo id*) dest,templ->mysql_col_len)' failed in void row_sel_field_store_in_mysql_format_func(byte *, const mysql_row_templ_t *, const byte *, ulint) +--echo # + +SET sql_mode=''; +CREATE TABLE t (a CHAR(205)) ENGINE=INNODB CHARACTER SET filename; +INSERT INTO t VALUES (1); +SELECT * FROM t; +DROP TABLE t; + +--echo # +--echo # End of 10.6 tests +--echo # diff --git a/mysql-test/main/ctype_ucs.result b/mysql-test/main/ctype_ucs.result index 8b070216a84..de5efd86437 100644 --- a/mysql-test/main/ctype_ucs.result +++ b/mysql-test/main/ctype_ucs.result @@ -6554,3 +6554,23 @@ c1 # # End of 10.5 tests # +# +# Start of 10.6 tests +# +# +# MDEV-23895 Server crash, ASAN heap-buffer-overflow or Valgrind Invalid write in Item_func_rpad::val_str +# +CREATE TABLE t1 (a CHAR(8)); +INSERT INTO t1 VALUES ('foo'),('bar'); +SET collation_connection= ucs2_danish_ci; +SET last_insert_id=0; +SELECT * FROM t1 WHERE RPAD(a, 50, LAST_INSERT_ID()); +a +Warnings: +Warning 1292 Truncated incorrect DOUBLE value: 'foo00000000000000000000000000000000000000000000000' +Warning 1292 Truncated incorrect DOUBLE value: 'bar00000000000000000000000000000000000000000000000' +DROP TABLE t1; +SET names latin1; +# +# End of 10.6 tests +# diff --git a/mysql-test/main/ctype_ucs.test b/mysql-test/main/ctype_ucs.test index 4c5cd69b08d..7b2db58fc64 100644 --- a/mysql-test/main/ctype_ucs.test +++ b/mysql-test/main/ctype_ucs.test @@ -1239,3 +1239,24 @@ SELECT CAST(CONVERT('-9223372036854775808' USING ucs2) AS SIGNED) AS c1; --echo # --echo # End of 10.5 tests --echo # + +--echo # +--echo # Start of 10.6 tests +--echo # + +--echo # +--echo # MDEV-23895 Server crash, ASAN heap-buffer-overflow or Valgrind Invalid write in Item_func_rpad::val_str +--echo # + +CREATE TABLE t1 (a CHAR(8)); +INSERT INTO t1 VALUES ('foo'),('bar'); +SET collation_connection= ucs2_danish_ci; +SET last_insert_id=0; +SELECT * FROM t1 WHERE RPAD(a, 50, LAST_INSERT_ID()); +DROP TABLE t1; +SET names latin1; + + +--echo # +--echo # End of 10.6 tests +--echo # diff --git a/mysql-test/main/ctype_utf16.result b/mysql-test/main/ctype_utf16.result index 71c7ee77a1d..f1c79fdf8d8 100644 --- a/mysql-test/main/ctype_utf16.result +++ b/mysql-test/main/ctype_utf16.result @@ -1,5 +1,4 @@ SET TIME_ZONE='+03:00'; -DROP TABLE IF EXISTS t1; # # Start of 5.5 tests # @@ -2910,5 +2909,15 @@ CAST(_utf16 0x0061D83DDE0E0062 AS INT) Warnings: Warning 1292 Truncated incorrect INTEGER value: 'a?b' # +# MDEV-23138 Odd behavior of character_set variables set to utf16 (when allowed) +# +SET character_set_connection=utf16; +SET transaction_isolation= 'READ-COMMITTED'; +SELECT @@transaction_isolation; +@@transaction_isolation +READ-COMMITTED +SET transaction_isolation=DEFAULT; +SET NAMES utf8mb3; +# # End of 10.5 tests # diff --git a/mysql-test/main/ctype_utf16.test b/mysql-test/main/ctype_utf16.test index 9f23f02b0fa..961505cf4a5 100644 --- a/mysql-test/main/ctype_utf16.test +++ b/mysql-test/main/ctype_utf16.test @@ -3,10 +3,6 @@ SET TIME_ZONE='+03:00'; ---disable_warnings -DROP TABLE IF EXISTS t1; ---enable_warnings - --echo # --echo # Start of 5.5 tests --echo # @@ -1029,6 +1025,15 @@ SET NAMES utf8; # surrogate pairs is replaced to a single question mark. SELECT CAST(_utf16 0x0061D83DDE0E0062 AS INT); +--echo # +--echo # MDEV-23138 Odd behavior of character_set variables set to utf16 (when allowed) +--echo # + +SET character_set_connection=utf16; +SET transaction_isolation= 'READ-COMMITTED'; +SELECT @@transaction_isolation; +SET transaction_isolation=DEFAULT; +SET NAMES utf8mb3; --echo # --echo # End of 10.5 tests diff --git a/mysql-test/main/ctype_utf8.result b/mysql-test/main/ctype_utf8.result index 926e4006fae..c61bd1c612c 100644 --- a/mysql-test/main/ctype_utf8.result +++ b/mysql-test/main/ctype_utf8.result @@ -11660,8 +11660,8 @@ Warning 1292 Truncated incorrect INTEGER value: 'яяя' # MDEV-28118 Wrong collation of `CAST(.. AS CHAR COLLATE DEFAULT)` # SET NAMES utf8mb3 COLLATE utf8mb3_bin; -SELECT COLLATION(CAST('a' AS CHAR COLLATE DEFAULT)); -COLLATION(CAST('a' AS CHAR COLLATE DEFAULT)) +SELECT COLLATION(CAST('a' AS CHAR COLLATE DEFAULT)) AS c1; +c1 utf8mb3_general_ci CREATE TABLE t1 AS SELECT CAST('a' AS CHAR COLLATE DEFAULT) AS c1; SHOW CREATE TABLE t1; diff --git a/mysql-test/main/ctype_utf8.test b/mysql-test/main/ctype_utf8.test index fb1f271b0ee..bc05671d222 100644 --- a/mysql-test/main/ctype_utf8.test +++ b/mysql-test/main/ctype_utf8.test @@ -2544,7 +2544,7 @@ SELECT CAST(_utf8 'яяя' AS INT); --echo # SET NAMES utf8mb3 COLLATE utf8mb3_bin; -SELECT COLLATION(CAST('a' AS CHAR COLLATE DEFAULT)); +SELECT COLLATION(CAST('a' AS CHAR COLLATE DEFAULT)) AS c1; CREATE TABLE t1 AS SELECT CAST('a' AS CHAR COLLATE DEFAULT) AS c1; SHOW CREATE TABLE t1; DROP TABLE t1; diff --git a/mysql-test/main/func_analyse.result b/mysql-test/main/func_analyse.result index 1dfdc828793..dc56cf5d7d7 100644 --- a/mysql-test/main/func_analyse.result +++ b/mysql-test/main/func_analyse.result @@ -245,5 +245,15 @@ Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_ test.t1.c 1.1 1.3 3 3 0 0 3.0000 NULL ENUM('1.1','1.3') NOT NULL DROP TABLE t1; # +# MDEV-31881 ASAN: unknown-crash in check_ulonglong (sql/sql_analyse.cc) on SELECT ... FROM ... PROCEDURE ANALYSE() +# +CREATE TABLE t (a INT, b CHAR(10)); +INSERT INTO t VALUES (0,'0000000000'); +SELECT * FROM t PROCEDURE ANALYSE(); +Field_name Min_value Max_value Min_length Max_length Empties_or_zeros Nulls Avg_value_or_avg_length Std Optimal_fieldtype +test.t.a 0 0 1 1 1 0 0.0000 0.0000 ENUM('0') NOT NULL +test.t.b 0000000000 0000000000 10 10 0 0 10.0000 NULL ENUM('0000000000') NOT NULL +DROP TABLE t; +# # End of 10.5 tests # diff --git a/mysql-test/main/func_analyse.test b/mysql-test/main/func_analyse.test index 8afc2ab34a6..f5c027a5283 100644 --- a/mysql-test/main/func_analyse.test +++ b/mysql-test/main/func_analyse.test @@ -255,6 +255,15 @@ INSERT INTO t1 VALUES (1.3),(1.1); SELECT * FROM t1 PROCEDURE ANALYSE(); DROP TABLE t1; +--echo # +--echo # MDEV-31881 ASAN: unknown-crash in check_ulonglong (sql/sql_analyse.cc) on SELECT ... FROM ... PROCEDURE ANALYSE() +--echo # + +CREATE TABLE t (a INT, b CHAR(10)); +INSERT INTO t VALUES (0,'0000000000'); +SELECT * FROM t PROCEDURE ANALYSE(); +DROP TABLE t; + --echo # --echo # End of 10.5 tests --echo # diff --git a/mysql-test/main/func_equal.result b/mysql-test/main/func_equal.result index f20b259191c..4f876b4f27b 100644 --- a/mysql-test/main/func_equal.result +++ b/mysql-test/main/func_equal.result @@ -43,3 +43,14 @@ a 4828532208463511553 drop table t1; #End of 4.1 tests +# Start of 10.5 tests +# +# MDEV-21029 Incorrect result for expression with the <=> operator and IS NULL +# +CREATE TABLE t0(c0 INT); +INSERT INTO t0 VALUES (1); +SELECT (c0 > (NULL <=> 0)) IS NULL AS c1 FROM t0; +c1 +0 +DROP TABLE t0; +# End of 10.5 tests diff --git a/mysql-test/main/func_equal.test b/mysql-test/main/func_equal.test index f17ebb5bd84..88cab9e95a0 100644 --- a/mysql-test/main/func_equal.test +++ b/mysql-test/main/func_equal.test @@ -44,3 +44,18 @@ select * from t1 where a in ('4828532208463511553'); drop table t1; --echo #End of 4.1 tests + + +--echo # Start of 10.5 tests + +--echo # +--echo # MDEV-21029 Incorrect result for expression with the <=> operator and IS NULL +--echo # + +CREATE TABLE t0(c0 INT); +INSERT INTO t0 VALUES (1); +SELECT (c0 > (NULL <=> 0)) IS NULL AS c1 FROM t0; +DROP TABLE t0; + + +--echo # End of 10.5 tests diff --git a/mysql-test/main/func_extract.result b/mysql-test/main/func_extract.result index 7c1fd5009ca..c68d5defea2 100644 --- a/mysql-test/main/func_extract.result +++ b/mysql-test/main/func_extract.result @@ -1478,5 +1478,55 @@ NULL Warnings: Warning 1292 Incorrect interval value: '42949672955000x1' # +# MDEV-23687 Assertion `is_valid_value_slow()' failed in Datetime::Datetime upon EXTRACT under mode ZERO_DATE_TIME_CAST +# +SET SESSION old_mode='ZERO_DATE_TIME_CAST'; +Warnings: +Warning 1287 'ZERO_DATE_TIME_CAST' is deprecated and will be removed in a future release +SELECT CAST('100000:00:00' AS DATETIME); +CAST('100000:00:00' AS DATETIME) +NULL +Warnings: +Warning 1292 Incorrect datetime value: '100000:00:00' +SELECT EXTRACT(DAY FROM CAST('100000:00:00' AS DATETIME)); +EXTRACT(DAY FROM CAST('100000:00:00' AS DATETIME)) +NULL +Warnings: +Warning 1292 Incorrect datetime value: '100000:00:00' +SELECT CAST('100000:00:00' AS DATE); +CAST('100000:00:00' AS DATE) +NULL +Warnings: +Warning 1292 Incorrect datetime value: '100000:00:00' +SELECT EXTRACT(DAY FROM CAST('100000:00:00' AS DATE)); +EXTRACT(DAY FROM CAST('100000:00:00' AS DATE)) +NULL +Warnings: +Warning 1292 Incorrect datetime value: '100000:00:00' +SET SESSION old_mode=DEFAULT; +# +# MDEV-35489 Assertion `!ldate->neg' or unexpected result upon extracting unit from invalid value +# +SELECT EXTRACT(DAY FROM TIMESTAMP(-177498480000)); +EXTRACT(DAY FROM TIMESTAMP(-177498480000)) +NULL +Warnings: +Warning 1292 Incorrect datetime value: '-177498480000' +SELECT EXTRACT(DAY FROM TIMESTAMP(-177498480001)); +EXTRACT(DAY FROM TIMESTAMP(-177498480001)) +NULL +Warnings: +Warning 1292 Incorrect datetime value: '-177498480001' +SELECT EXTRACT(DAY FROM TIMESTAMP(-200000000000)); +EXTRACT(DAY FROM TIMESTAMP(-200000000000)) +NULL +Warnings: +Warning 1292 Incorrect datetime value: '-200000000000' +SELECT EXTRACT(DAY FROM TIMESTAMP(-221938034527)); +EXTRACT(DAY FROM TIMESTAMP(-221938034527)) +NULL +Warnings: +Warning 1292 Incorrect datetime value: '-221938034527' +# # End of 10.5 tests # diff --git a/mysql-test/main/func_extract.test b/mysql-test/main/func_extract.test index 6167780b9bf..234a65547f1 100644 --- a/mysql-test/main/func_extract.test +++ b/mysql-test/main/func_extract.test @@ -517,6 +517,26 @@ DROP FUNCTION select02; SELECT EXTRACT(HOUR_MICROSECOND FROM '42949672955000x1'); +--echo # +--echo # MDEV-23687 Assertion `is_valid_value_slow()' failed in Datetime::Datetime upon EXTRACT under mode ZERO_DATE_TIME_CAST +--echo # + +SET SESSION old_mode='ZERO_DATE_TIME_CAST'; +SELECT CAST('100000:00:00' AS DATETIME); +SELECT EXTRACT(DAY FROM CAST('100000:00:00' AS DATETIME)); +SELECT CAST('100000:00:00' AS DATE); +SELECT EXTRACT(DAY FROM CAST('100000:00:00' AS DATE)); +SET SESSION old_mode=DEFAULT; + +--echo # +--echo # MDEV-35489 Assertion `!ldate->neg' or unexpected result upon extracting unit from invalid value +--echo # + +SELECT EXTRACT(DAY FROM TIMESTAMP(-177498480000)); +SELECT EXTRACT(DAY FROM TIMESTAMP(-177498480001)); +SELECT EXTRACT(DAY FROM TIMESTAMP(-200000000000)); +SELECT EXTRACT(DAY FROM TIMESTAMP(-221938034527)); + --echo # --echo # End of 10.5 tests diff --git a/mysql-test/main/func_hybrid_type.result b/mysql-test/main/func_hybrid_type.result index 1773f05ab24..216473b063f 100644 --- a/mysql-test/main/func_hybrid_type.result +++ b/mysql-test/main/func_hybrid_type.result @@ -4342,5 +4342,27 @@ SELECT * FROM t0 WHERE LEAST(c0, NULL); c0 DROP TABLE t0; # +# MDEV-28001 greatest/least with bigint unsigned maxium has unexpected results compared to 0 +# +CREATE TABLE t1 (a BIGINT UNSIGNED, b BIGINT UNSIGNED); +INSERT INTO t1 VALUES (18446744073709551615,0); +SELECT a,b, LEAST(a,b), GREATEST(a,b) FROM t1; +a b LEAST(a,b) GREATEST(a,b) +18446744073709551615 0 0 18446744073709551615 +DROP TABLE t1; +# # End of 10.5 tests # +# +# Start of 10.6 tests +# +# +# MDEV-20944 Wrong result of LEAST() and ASAN heap-use-after-free in my_strnncollsp_simple / Item::temporal_precision on TIME() +# +SET NAMES latin1; +SELECT LEAST( CAST( 0 AS CHAR ), OLD_PASSWORD( 1 ) ); +LEAST( CAST( 0 AS CHAR ), OLD_PASSWORD( 1 ) ) +0 +# +# End of 10.6 tests +# diff --git a/mysql-test/main/func_hybrid_type.test b/mysql-test/main/func_hybrid_type.test index 2ebfb3cfdf1..e1e347b115f 100644 --- a/mysql-test/main/func_hybrid_type.test +++ b/mysql-test/main/func_hybrid_type.test @@ -1137,7 +1137,30 @@ SELECT * FROM t0 WHERE GREATEST(c0, NULL); SELECT * FROM t0 WHERE LEAST(c0, NULL); DROP TABLE t0; +--echo # +--echo # MDEV-28001 greatest/least with bigint unsigned maxium has unexpected results compared to 0 +--echo # + +CREATE TABLE t1 (a BIGINT UNSIGNED, b BIGINT UNSIGNED); +INSERT INTO t1 VALUES (18446744073709551615,0); +SELECT a,b, LEAST(a,b), GREATEST(a,b) FROM t1; +DROP TABLE t1; --echo # --echo # End of 10.5 tests --echo # + +--echo # +--echo # Start of 10.6 tests +--echo # + +--echo # +--echo # MDEV-20944 Wrong result of LEAST() and ASAN heap-use-after-free in my_strnncollsp_simple / Item::temporal_precision on TIME() +--echo # + +SET NAMES latin1; +SELECT LEAST( CAST( 0 AS CHAR ), OLD_PASSWORD( 1 ) ); + +--echo # +--echo # End of 10.6 tests +--echo # diff --git a/mysql-test/main/func_misc.result b/mysql-test/main/func_misc.result index 33038777804..46d6c2e2546 100644 --- a/mysql-test/main/func_misc.result +++ b/mysql-test/main/func_misc.result @@ -1757,5 +1757,18 @@ count(*) 0 drop table t1; # +# MDEV-29462 ASAN: heap-use-after-free in Binary_string::copy on DO CONVERT +# +DO CONVERT (INET_ATON (CAST(LEFT (-1,1) as BINARY (30))) USING utf8); +DO FROM_BASE64(CAST((MID(UUID(),20,64)) AS BINARY (55))); +Warnings: +Warning 1958 Bad base64 data as position 4 +DO FROM_BASE64(CAST((MID(17653,ROW('-688:20:162697', (NULL))>=ROW(('*.)$'),(0xc254b6)),1)) AS BINARY (34))); +Warnings: +Warning 1958 Bad base64 data as position 1 +DO FROM_BASE64(CAST(LEFT (-1,1) as BINARY (30))); +Warnings: +Warning 1958 Bad base64 data as position 0 +# # End of 10.5 tests # diff --git a/mysql-test/main/func_misc.test b/mysql-test/main/func_misc.test index 709de4461bd..c51d6386f39 100644 --- a/mysql-test/main/func_misc.test +++ b/mysql-test/main/func_misc.test @@ -1381,6 +1381,16 @@ SELECT r as r1, r FROM cte; select count(*) from t1 where r1!=r; drop table t1; +--echo # +--echo # MDEV-29462 ASAN: heap-use-after-free in Binary_string::copy on DO CONVERT +--echo # + +DO CONVERT (INET_ATON (CAST(LEFT (-1,1) as BINARY (30))) USING utf8); +DO FROM_BASE64(CAST((MID(UUID(),20,64)) AS BINARY (55))); +DO FROM_BASE64(CAST((MID(17653,ROW('-688:20:162697', (NULL))>=ROW(('*.)$'),(0xc254b6)),1)) AS BINARY (34))); +DO FROM_BASE64(CAST(LEFT (-1,1) as BINARY (30))); + --echo # --echo # End of 10.5 tests --echo # + diff --git a/mysql-test/main/func_set.result b/mysql-test/main/func_set.result index 9dc63220d9e..8fccd0d6374 100644 --- a/mysql-test/main/func_set.result +++ b/mysql-test/main/func_set.result @@ -224,3 +224,8 @@ SELECT INTERVAL(1,ROW(1,2)); ERROR 21000: Operand should contain 1 column(s) SELECT INTERVAL(ROW(1,2),1); ERROR 21000: Operand should contain 1 column(s) +# +# MDEV-29184 Assertion `0' in Item_row::illegal_method_call, Type_handler_row::Item_update_null_value, Item::update_null_value +# +SELECT INTERVAL(0, ROW(1,1), 1, 1, 1, 1, 1, 1, 1) AS f; +ERROR 21000: Operand should contain 1 column(s) diff --git a/mysql-test/main/func_set.test b/mysql-test/main/func_set.test index 6df1d8ab97f..13250316a2f 100644 --- a/mysql-test/main/func_set.test +++ b/mysql-test/main/func_set.test @@ -147,3 +147,11 @@ SELECT INTERVAL(ROW(1,1),ROW(1,2)); SELECT INTERVAL(1,ROW(1,2)); --error ER_OPERAND_COLUMNS SELECT INTERVAL(ROW(1,2),1); + + +--echo # +--echo # MDEV-29184 Assertion `0' in Item_row::illegal_method_call, Type_handler_row::Item_update_null_value, Item::update_null_value +--echo # + +--error ER_OPERAND_COLUMNS +SELECT INTERVAL(0, ROW(1,1), 1, 1, 1, 1, 1, 1, 1) AS f; diff --git a/mysql-test/main/func_str.result b/mysql-test/main/func_str.result index 1dd7a7dcdf4..0d777f101d6 100644 --- a/mysql-test/main/func_str.result +++ b/mysql-test/main/func_str.result @@ -744,9 +744,9 @@ encode('abcd','ab') show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `bin(130)` varchar(64) DEFAULT NULL, - `oct(130)` varchar(64) DEFAULT NULL, - `conv(130,16,10)` varchar(64) DEFAULT NULL, + `bin(130)` varchar(65) DEFAULT NULL, + `oct(130)` varchar(65) DEFAULT NULL, + `conv(130,16,10)` varchar(65) DEFAULT NULL, `hex(130)` varchar(16) DEFAULT NULL, `char(130)` varbinary(4) DEFAULT NULL, `format(130,10)` varchar(25) DEFAULT NULL, @@ -5259,7 +5259,7 @@ conv(i,16,2) SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( - `conv(i,16,2)` varchar(64) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL + `conv(i,16,2)` varchar(65) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci DROP TABLE t1,t2; # @@ -5329,9 +5329,94 @@ BIN(c) DROP TABLE t1; DO OCT(-9223372036854775808); # +# MDEV-28652 SUBSTRING(str,pos,len) returns incorrect result in view (returns an empty string) +# +create view v1 as select substring('hello', 1, 4294967295); +select * from v1; +substring('hello', 1, 4294967295) +hello +drop view v1; +# # End of 10.5 tests # # +# Start of 10.6 tests +# +# +# MDEV-29552 LEFT and RIGHT with big value for parameter 'len' >0 return empty value in view +# +create view v1 as select left('hello', 4294967295); +select * from v1; +left('hello', 4294967295) +hello +drop view v1; +# +# MDEV-33942 View cuts off the end of string with the utf8 character set in INSERT function +# +SELECT HEX(INSERT(_utf8 0xD18FD18E, 2, 1, 0x20)); +HEX(INSERT(_utf8 0xD18FD18E, 2, 1, 0x20)) +D120D18E +CREATE VIEW v1 AS SELECT HEX(INSERT(_utf8 0xD18FD18E, 2, 1, 0x20)); +SELECT * FROM v1; +HEX(INSERT(_utf8 0xD18FD18E, 2, 1, 0x20)) +D120D18E +DROP VIEW v1; +# +# MDEV-28686 Assertion `0' in Type_handler_string_result::make_sort_key or unexpected result +# +CREATE TABLE t (s DATE, e DATE, PERIOD FOR p(s,e)); +INSERT INTO t (s,e) VALUES ('1970-01-01','1970-01-02'),('1980-01-01','1980-01-02'); +SET sql_mode=''; +SELECT e, GROUP_CONCAT(s) FROM t GROUP BY CONVERT((LPAD(e, -1) AND e) USING utf8); +e GROUP_CONCAT(s) +1970-01-02 1970-01-01,1980-01-01 +DROP TABLE t; +CREATE TABLE t (s DATE, e DATE, PERIOD FOR p(s,e)); +INSERT INTO t (s,e) VALUES ('1970-01-01','1970-01-02'),('1980-01-01','1980-01-02'); +SET sql_mode=''; +SELECT DISTINCT CONVERT((LPAD(e, -1) AND e) USING utf8) FROM t; +CONVERT((LPAD(e, -1) AND e) USING utf8) +NULL +SET sql_mode=STRICT_TRANS_TABLES; +SELECT DISTINCT CONVERT((LPAD(e, -1) AND e) USING utf8) FROM t; +CONVERT((LPAD(e, -1) AND e) USING utf8) +NULL +DROP TABLE t; +SET sql_mode=DEFAULT; +# +# MDEV-32755 Stack-Buffer-Overflow at /mariadb-11.3.0/strings/int2str.c:122 +# +CREATE TABLE t0 ( c55 INT , c38 INT ) ; +INSERT INTO t0 VALUES ( -54 , -27 ) , ( -107 , -62 ) ; +CREATE INDEX i0 ON t0 ( c38 ) ; +INSERT INTO t0 ( c55 ) VALUES ( 43 ) , ( 77 ) ; +SELECT t0 . c55 AS c47 FROM +( SELECT c15 AS c40 FROM +( SELECT c55 AS c15 FROM t0 ) AS t1 +JOIN t0 ON t1.c15 = t1.c15 SOUNDS LIKE + CONV ( -2919286674558440404 , -17 , -2 ) ) AS t2 +JOIN t0 ON t0.c38 = t0.c38; +c47 +DROP TABLE t0; +SELECT CONV(-29223372036854775809, -10, 18446744073709551614); +CONV(-29223372036854775809, -10, 18446744073709551614) +-1000000000000000000000000000000000000000000000000000000000000000 +SELECT CONV(1<<63, 10, -2); +CONV(1<<63, 10, -2) +-1000000000000000000000000000000000000000000000000000000000000000 +# +# MDEV-35416 CONV(1<<63, 10, -2) fails with --view-protocol +# +CREATE TABLE t1 AS SELECT CONV(1<<63, 10, -2) AS c1; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c1` varchar(65) CHARACTER SET utf8mb3 COLLATE utf8mb3_general_ci DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +DROP TABLE t1; +# +# End of 10.6 tests +# +# # MDEV-25704 Function random_bytes # create table t1 as select random_bytes(100); diff --git a/mysql-test/main/func_str.test b/mysql-test/main/func_str.test index 295bfc37af2..993909de3b1 100644 --- a/mysql-test/main/func_str.test +++ b/mysql-test/main/func_str.test @@ -2373,11 +2373,88 @@ DROP TABLE t1; DO OCT(-9223372036854775808); +--echo # +--echo # MDEV-28652 SUBSTRING(str,pos,len) returns incorrect result in view (returns an empty string) +--echo # + +create view v1 as select substring('hello', 1, 4294967295); +select * from v1; +drop view v1; --echo # --echo # End of 10.5 tests --echo # +--echo # +--echo # Start of 10.6 tests +--echo # + +--echo # +--echo # MDEV-29552 LEFT and RIGHT with big value for parameter 'len' >0 return empty value in view +--echo # + +create view v1 as select left('hello', 4294967295); +select * from v1; +drop view v1; + +--echo # +--echo # MDEV-33942 View cuts off the end of string with the utf8 character set in INSERT function +--echo # + +SELECT HEX(INSERT(_utf8 0xD18FD18E, 2, 1, 0x20)); +CREATE VIEW v1 AS SELECT HEX(INSERT(_utf8 0xD18FD18E, 2, 1, 0x20)); +SELECT * FROM v1; +DROP VIEW v1; + +--echo # +--echo # MDEV-28686 Assertion `0' in Type_handler_string_result::make_sort_key or unexpected result +--echo # + +CREATE TABLE t (s DATE, e DATE, PERIOD FOR p(s,e)); +INSERT INTO t (s,e) VALUES ('1970-01-01','1970-01-02'),('1980-01-01','1980-01-02'); +SET sql_mode=''; +SELECT e, GROUP_CONCAT(s) FROM t GROUP BY CONVERT((LPAD(e, -1) AND e) USING utf8); +DROP TABLE t; + +CREATE TABLE t (s DATE, e DATE, PERIOD FOR p(s,e)); +INSERT INTO t (s,e) VALUES ('1970-01-01','1970-01-02'),('1980-01-01','1980-01-02'); +SET sql_mode=''; +SELECT DISTINCT CONVERT((LPAD(e, -1) AND e) USING utf8) FROM t; +SET sql_mode=STRICT_TRANS_TABLES; +SELECT DISTINCT CONVERT((LPAD(e, -1) AND e) USING utf8) FROM t; +DROP TABLE t; +SET sql_mode=DEFAULT; + +--echo # +--echo # MDEV-32755 Stack-Buffer-Overflow at /mariadb-11.3.0/strings/int2str.c:122 +--echo # + +CREATE TABLE t0 ( c55 INT , c38 INT ) ; +INSERT INTO t0 VALUES ( -54 , -27 ) , ( -107 , -62 ) ; +CREATE INDEX i0 ON t0 ( c38 ) ; +INSERT INTO t0 ( c55 ) VALUES ( 43 ) , ( 77 ) ; +SELECT t0 . c55 AS c47 FROM +( SELECT c15 AS c40 FROM + ( SELECT c55 AS c15 FROM t0 ) AS t1 + JOIN t0 ON t1.c15 = t1.c15 SOUNDS LIKE + CONV ( -2919286674558440404 , -17 , -2 ) ) AS t2 + JOIN t0 ON t0.c38 = t0.c38; +DROP TABLE t0; + +SELECT CONV(-29223372036854775809, -10, 18446744073709551614); +SELECT CONV(1<<63, 10, -2); + +--echo # +--echo # MDEV-35416 CONV(1<<63, 10, -2) fails with --view-protocol +--echo # + +CREATE TABLE t1 AS SELECT CONV(1<<63, 10, -2) AS c1; +SHOW CREATE TABLE t1; +DROP TABLE t1; + +--echo # +--echo # End of 10.6 tests +--echo # + --echo # --echo # MDEV-25704 Function random_bytes --echo # diff --git a/mysql-test/main/gis.result b/mysql-test/main/gis.result index be1a6dd18a2..51a8dc94e82 100644 --- a/mysql-test/main/gis.result +++ b/mysql-test/main/gis.result @@ -5453,4 +5453,17 @@ t2 CREATE TABLE `t2` ( `c` polygon DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci drop table t1, t2; +# +# MDEV-33987 Server crashes at Item_func_as_wkt::val_str_ascii +# +SELECT ST_ASTEXT(BOUNDARY(INET6_ATON('255.255.255.255'))) AS c1; +c1 +NULL +# +# Server crash in DTCollation::set_repertoire_from_charset +# +CREATE TABLE t (f POINT, KEY(f)); +DELETE FROM t WHERE f NOT IN (NULL,'x'); +ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field +DROP TABLE t; # End of 10.5 tests diff --git a/mysql-test/main/gis.test b/mysql-test/main/gis.test index a908c3c6dda..88b9a59409d 100644 --- a/mysql-test/main/gis.test +++ b/mysql-test/main/gis.test @@ -3456,4 +3456,20 @@ create table t2 as select group_concat(c, c order by 1,2), concat(c), c from t1; show create table t2; drop table t1, t2; + +--echo # +--echo # MDEV-33987 Server crashes at Item_func_as_wkt::val_str_ascii +--echo # + +SELECT ST_ASTEXT(BOUNDARY(INET6_ATON('255.255.255.255'))) AS c1; + +--echo # +--echo # Server crash in DTCollation::set_repertoire_from_charset +--echo # + +CREATE TABLE t (f POINT, KEY(f)); +--error ER_CANT_CREATE_GEOMETRY_OBJECT +DELETE FROM t WHERE f NOT IN (NULL,'x'); +DROP TABLE t; + --echo # End of 10.5 tests diff --git a/mysql-test/main/grant_plugin.result b/mysql-test/main/grant_plugin.result index 879ea9075d8..63bdd42853e 100644 --- a/mysql-test/main/grant_plugin.result +++ b/mysql-test/main/grant_plugin.result @@ -4,12 +4,12 @@ install soname 'auth_0x0100'; CREATE USER foo@localhost IDENTIFIED VIA auth_0x0100; uninstall plugin auth_0x0100; -select Priv from mysql.global_priv where User = "foo" and host="localhost" -into @priv; +create table t as select Priv from mysql.global_priv where User = "foo" and host="localhost"; SET PASSWORD FOR foo@localhost = "1111"; ERROR HY000: Plugin 'auth_0x0100' is not loaded -select Priv = @priv as "Nothing changed" from mysql.global_priv where User = "foo" and host="localhost"; +select global_priv.Priv = t.Priv as "Nothing changed" from mysql.global_priv join t where User = "foo" and host="localhost"; Nothing changed 1 +drop table t; DROP USER foo@localhost; # End of 10.5 tests diff --git a/mysql-test/main/grant_plugin.test b/mysql-test/main/grant_plugin.test index 92d76040c0b..7043e159e37 100644 --- a/mysql-test/main/grant_plugin.test +++ b/mysql-test/main/grant_plugin.test @@ -13,13 +13,12 @@ install soname 'auth_0x0100'; CREATE USER foo@localhost IDENTIFIED VIA auth_0x0100; uninstall plugin auth_0x0100; ---disable_ps_protocol -select Priv from mysql.global_priv where User = "foo" and host="localhost" -into @priv; ---enable_ps_protocol +create table t as select Priv from mysql.global_priv where User = "foo" and host="localhost"; --error ER_PLUGIN_IS_NOT_LOADED SET PASSWORD FOR foo@localhost = "1111"; -select Priv = @priv as "Nothing changed" from mysql.global_priv where User = "foo" and host="localhost"; +select global_priv.Priv = t.Priv as "Nothing changed" from mysql.global_priv join t where User = "foo" and host="localhost"; + +drop table t; DROP USER foo@localhost; diff --git a/mysql-test/main/loaddata.test b/mysql-test/main/loaddata.test index a625415ad8e..a7a6e2bcfcb 100644 --- a/mysql-test/main/loaddata.test +++ b/mysql-test/main/loaddata.test @@ -843,10 +843,12 @@ CREATE OR REPLACE TABLE t1 ( ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; INSERT INTO t1 VALUES (GeomFromText('POINT(37.646944 -75.761111)')); +--disable_cursor_protocol --disable_ps2_protocol --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR eval SELECT * INTO OUTFILE '$MYSQLTEST_VARDIR/tmp/t1.tsv' FROM t1; --enable_ps2_protocol +--enable_cursor_protocol CREATE OR REPLACE TABLE t2 LIKE t1; @@ -863,10 +865,12 @@ CREATE OR REPLACE TABLE t1 ( ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; INSERT INTO t1 VALUES (GeomFromText('POINT(37.646944 -75.761111)'),"їєі"); +--disable_cursor_protocol --disable_ps2_protocol --replace_result $MYSQLTEST_VARDIR MYSQLTEST_VARDIR eval SELECT * INTO OUTFILE '$MYSQLTEST_VARDIR/tmp/t1.tsv' FROM t1; --enable_ps2_protocol +--enable_cursor_protocol CREATE OR REPLACE TABLE t2 LIKE t1; diff --git a/mysql-test/main/mysql_not_windows.result b/mysql-test/main/mysql_not_windows.result index 644b3a0f4f0..f683d87c678 100644 --- a/mysql-test/main/mysql_not_windows.result +++ b/mysql-test/main/mysql_not_windows.result @@ -11,3 +11,11 @@ X 3 ERROR 1300 (HY000): Invalid utf8mb3 character string: 'test\xF0\x9F\x98\x81 ' ERROR 1300 (HY000): Invalid binary character string: 'test\xF0\x9F\x98\x81 ' +# Start of 10.5 tests +# +# MDEV-34090 Client allows to set character set to utf32 and crashes on the next command +# +SELECT "Success" AS c1; +c1 +Success +# End of 10.5 tests diff --git a/mysql-test/main/mysql_not_windows.test b/mysql-test/main/mysql_not_windows.test index 816160c4f3e..00d8ed8c4f9 100644 --- a/mysql-test/main/mysql_not_windows.test +++ b/mysql-test/main/mysql_not_windows.test @@ -29,3 +29,16 @@ exec $MYSQL test -e "$query"; --exec $MYSQL --default-character-set=utf8 -e "select 1" "test😁 " 2>&1 --error 1 --exec $MYSQL --default-character-set=binary -e "select 1" "test😁 " 2>&1 + + +--echo # Start of 10.5 tests + +--echo # +--echo # MDEV-34090 Client allows to set character set to utf32 and crashes on the next command +--echo # + +--error 1 +--exec $MYSQL test -e '\C utf32 ; SELECT 1' +SELECT "Success" AS c1; + +--echo # End of 10.5 tests diff --git a/mysql-test/main/mysql_upgrade.result b/mysql-test/main/mysql_upgrade.result index dd89b55d651..c4e3b61ad41 100644 --- a/mysql-test/main/mysql_upgrade.result +++ b/mysql-test/main/mysql_upgrade.result @@ -2523,7 +2523,7 @@ set global sql_safe_updates=@orig_sql_safe_updates; # MDEV-32043 Remove plugins previously external that are now built in (unix_socket) # INSERT INTO mysql.plugin SELECT 'unix_socket', 'auth_socket.so' - FROM dual WHERE convert(@@version_compile_os using latin1) not in ('Win32', 'Win64', 'Windows'); + FROM information_schema.plugins WHERE plugin_name='unix_socket' AND plugin_library IS NULL; # mariadb-upgrade --force --silent 2>&1 SELECT * FROM mysql.plugin WHERE name='unix_socket'; name dl diff --git a/mysql-test/main/mysql_upgrade.test b/mysql-test/main/mysql_upgrade.test index a17f1869141..e6c46a95978 100644 --- a/mysql-test/main/mysql_upgrade.test +++ b/mysql-test/main/mysql_upgrade.test @@ -522,7 +522,7 @@ set global sql_safe_updates=@orig_sql_safe_updates; --echo # INSERT INTO mysql.plugin SELECT 'unix_socket', 'auth_socket.so' - FROM dual WHERE convert(@@version_compile_os using latin1) not in ('Win32', 'Win64', 'Windows'); + FROM information_schema.plugins WHERE plugin_name='unix_socket' AND plugin_library IS NULL; --echo # mariadb-upgrade --force --silent 2>&1 --exec $MYSQL_UPGRADE --force --silent 2>&1 SELECT * FROM mysql.plugin WHERE name='unix_socket'; diff --git a/mysql-test/main/stack.result b/mysql-test/main/stack.result new file mode 100644 index 00000000000..5444381327d --- /dev/null +++ b/mysql-test/main/stack.result @@ -0,0 +1,46 @@ +# Checking stack usage +# +# basic tests +# +select variable_value > 0 from information_schema.session_status where variable_name="stack_usage"; +variable_value > 0 +1 +# +# Ensure stack usage is same for each iteration when using WITH recursive +# +create table t1 +WITH recursive Fibonacci(PrevN, N, Stack) AS +( +SELECT 0, 1, 0 +UNION ALL +SELECT N, PrevN + N, (select variable_value from information_schema.session_status where variable_name="stack_usage") +FROM Fibonacci +WHERE N < 100000 +) +SELECT PrevN as N, Stack FROM Fibonacci; +select (select stack from t1 where n=2) = (select stack from t1 where N=75025) as c; +c +1 +DROP table t1; +# +# Check stack with recursion +# +set @@max_sp_recursion_depth=20; +create or replace procedure recursion(x int, max int, OUT res int) +begin +select variable_value into res from information_schema.session_status where variable_name="stack_usage"; +if (x < max) then +call recursion(x+1, max, res); +end if; +end; +$$ +call recursion(0,2,@s1); +call recursion(0,3,@s2); +call recursion(0,4,@s3); +select @s1 > 0 && @s2 > 0 && @s3 > 0; +@s1 > 0 && @s2 > 0 && @s3 > 0 +1 +drop procedure recursion; +# +# End of 10.5 tests +# diff --git a/mysql-test/main/stack.test b/mysql-test/main/stack.test new file mode 100644 index 00000000000..2277b0f48ff --- /dev/null +++ b/mysql-test/main/stack.test @@ -0,0 +1,60 @@ +--echo # Checking stack usage + +--echo # +--echo # basic tests +--echo # + +select variable_value > 0 from information_schema.session_status where variable_name="stack_usage"; + + +--echo # +--echo # Ensure stack usage is same for each iteration when using WITH recursive +--echo # + +create table t1 +WITH recursive Fibonacci(PrevN, N, Stack) AS +( + SELECT 0, 1, 0 + UNION ALL + SELECT N, PrevN + N, (select variable_value from information_schema.session_status where variable_name="stack_usage") + FROM Fibonacci + WHERE N < 100000 +) +SELECT PrevN as N, Stack FROM Fibonacci; + +select (select stack from t1 where n=2) = (select stack from t1 where N=75025) as c; +DROP table t1; + +--echo # +--echo # Check stack with recursion +--echo # + +set @@max_sp_recursion_depth=20; +delimiter $$; +create or replace procedure recursion(x int, max int, OUT res int) +begin + select variable_value into res from information_schema.session_status where variable_name="stack_usage"; + if (x < max) then + call recursion(x+1, max, res); + end if; +end; +$$ + +delimiter ;$$ + +call recursion(0,2,@s1); +call recursion(0,3,@s2); +call recursion(0,4,@s3); + +select @s1 > 0 && @s2 > 0 && @s3 > 0; +if (`select @s2-@s1 <> @s3 - @s2`) +{ + echo "Wrong result"; + select @s1 ,@s2, @s3, @s2-@s1, @s3-@s2; +} + +drop procedure recursion; + +--echo # +--echo # End of 10.5 tests +--echo # diff --git a/mysql-test/main/subselect3.inc b/mysql-test/main/subselect3.inc index 154b78c366f..f0bf2cf3609 100644 --- a/mysql-test/main/subselect3.inc +++ b/mysql-test/main/subselect3.inc @@ -1002,7 +1002,7 @@ select count(*) from t0 A, t0 B, t0 C, t0 D where D.a in (select a from t1 E whe --enable_ps2_protocol show status like 'Created_tmp_disk_tables'; --enable_cursor_protocol -set @save_max_heap_table_size=@@max_heap_table_size; +set @@max_heap_table_size=@save_max_heap_table_size; set @@optimizer_switch=@save_optimizer_switch; drop table t0, t1; diff --git a/mysql-test/main/subselect3.result b/mysql-test/main/subselect3.result index 550989edf1a..82fc202ce8d 100644 --- a/mysql-test/main/subselect3.result +++ b/mysql-test/main/subselect3.result @@ -1179,7 +1179,7 @@ count(*) show status like 'Created_tmp_disk_tables'; Variable_name Value Created_tmp_disk_tables 1 -set @save_max_heap_table_size=@@max_heap_table_size; +set @@max_heap_table_size=@save_max_heap_table_size; set @@optimizer_switch=@save_optimizer_switch; drop table t0, t1; create table t0 (a int); diff --git a/mysql-test/main/subselect3_jcl6.result b/mysql-test/main/subselect3_jcl6.result index 542fd71a6d8..3e1f471ace4 100644 --- a/mysql-test/main/subselect3_jcl6.result +++ b/mysql-test/main/subselect3_jcl6.result @@ -1182,7 +1182,7 @@ count(*) show status like 'Created_tmp_disk_tables'; Variable_name Value Created_tmp_disk_tables 1 -set @save_max_heap_table_size=@@max_heap_table_size; +set @@max_heap_table_size=@save_max_heap_table_size; set @@optimizer_switch=@save_optimizer_switch; drop table t0, t1; create table t0 (a int); diff --git a/mysql-test/main/subselect_mat.result b/mysql-test/main/subselect_mat.result index 653989b0213..4ba2b0d7a42 100644 --- a/mysql-test/main/subselect_mat.result +++ b/mysql-test/main/subselect_mat.result @@ -3067,3 +3067,391 @@ UNION ALL 1 1 drop table t1, t2; +# +# MDEV-34665: Simplify IN predicate processing for NULL-aware +# materialization involving only one column +set @save_optimizer_switch=@@optimizer_switch; +set @@optimizer_switch = "materialization=on,in_to_exists=off,semijoin=off"; +create table t1 (a int); +create table t2 (b int); +insert into t1 values (null), (1), (2), (3); +# Query against empty t2 +select a, a in (select b from t2) from t1; +a a in (select b from t2) +1 0 +2 0 +3 0 +NULL 0 +# Insert some not-NULL values +insert into t2 values (3), (4); +select a, a in (select b from t2) from t1; +a a in (select b from t2) +1 0 +2 0 +3 1 +NULL NULL +# Ensure the correct strategy is tested +analyze format=json select a, a in (select b from t2) from t1; +ANALYZE +{ + "query_optimization": { + "r_total_time_ms": "REPLACED" + }, + "query_block": { + "select_id": 1, + "cost": "REPLACED", + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "loops": 1, + "r_loops": 1, + "rows": 4, + "r_rows": 4, + "cost": "REPLACED", + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_engine_stats": REPLACED, + "filtered": 100, + "r_filtered": 100 + } + } + ], + "subqueries": [ + { + "materialization": { + "r_strategy": "null-aware index_lookup", + "r_loops": 4, + "r_index_lookups": 3, + "r_partial_matches": 1, + "query_block": { + "select_id": 2, + "cost": "REPLACED", + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "loops": 1, + "r_loops": 1, + "rows": 2, + "r_rows": 2, + "cost": "REPLACED", + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_engine_stats": REPLACED, + "filtered": 100, + "r_filtered": 100 + } + } + ] + } + } + } + ] + } +} +# Insert NULL value (so there are both NULLs and and not-NULL values) +insert into t2 values (null); +select a, a in (select b from t2) from t1; +a a in (select b from t2) +1 NULL +2 NULL +3 1 +NULL NULL +analyze format=json select a, a in (select b from t2) from t1; +ANALYZE +{ + "query_optimization": { + "r_total_time_ms": "REPLACED" + }, + "query_block": { + "select_id": 1, + "cost": "REPLACED", + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "loops": 1, + "r_loops": 1, + "rows": 4, + "r_rows": 4, + "cost": "REPLACED", + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_engine_stats": REPLACED, + "filtered": 100, + "r_filtered": 100 + } + } + ], + "subqueries": [ + { + "materialization": { + "r_strategy": "null-aware index_lookup", + "r_loops": 4, + "r_index_lookups": 3, + "query_block": { + "select_id": 2, + "cost": "REPLACED", + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "loops": 1, + "r_loops": 1, + "rows": 3, + "r_rows": 3, + "cost": "REPLACED", + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_engine_stats": REPLACED, + "filtered": 100, + "r_filtered": 100 + } + } + ] + } + } + } + ] + } +} +delete from t2; +# Insert NULL values only +insert into t2 values (null), (null); +select a, a in (select b from t2) from t1; +a a in (select b from t2) +1 NULL +2 NULL +3 NULL +NULL NULL +analyze format=json select a, a in (select b from t2) from t1; +ANALYZE +{ + "query_optimization": { + "r_total_time_ms": "REPLACED" + }, + "query_block": { + "select_id": 1, + "cost": "REPLACED", + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ALL", + "loops": 1, + "r_loops": 1, + "rows": 4, + "r_rows": 4, + "cost": "REPLACED", + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_engine_stats": REPLACED, + "filtered": 100, + "r_filtered": 100 + } + } + ], + "subqueries": [ + { + "materialization": { + "r_strategy": "return NULL", + "query_block": { + "select_id": 2, + "cost": "REPLACED", + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "loops": 1, + "r_loops": 1, + "rows": 2, + "r_rows": 2, + "cost": "REPLACED", + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_engine_stats": REPLACED, + "filtered": 100, + "r_filtered": 100 + } + } + ] + } + } + } + ] + } +} +# Test UPDATE +insert into t2 values (3), (4); +update t1 set a=a+1 where a not in (select b from t2); +# Nothing updated due to NULLs on both sides of IN +select * from t1; +a +NULL +1 +2 +3 +# Remove NULLs from the right side +delete from t2 where b is null; +update t1 set a=a+1 where a not in (select b from t2); +# Now some rows are updated: +select * from t1; +a +NULL +2 +3 +3 +analyze format=json update t1 set a=a+1 where a not in (select b from t2); +ANALYZE +{ + "query_optimization": { + "r_total_time_ms": "REPLACED" + }, + "query_block": { + "select_id": 1, + "r_total_time_ms": "REPLACED", + "table": { + "update": 1, + "table_name": "t1", + "access_type": "ALL", + "rows": 4, + "r_rows": 4, + "r_filtered": 25, + "r_total_time_ms": "REPLACED", + "r_engine_stats": REPLACED, + "attached_condition": "!((t1.a,t1.a in (subquery#2)))" + }, + "subqueries": [ + { + "materialization": { + "r_strategy": "null-aware index_lookup", + "r_loops": 4, + "r_index_lookups": 3, + "r_partial_matches": 1, + "query_block": { + "select_id": 2, + "cost": "REPLACED", + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "loops": 1, + "r_loops": 1, + "rows": 2, + "r_rows": 2, + "cost": "REPLACED", + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_engine_stats": REPLACED, + "filtered": 100, + "r_filtered": 100 + } + } + ] + } + } + } + ] + } +} +# Test DELETE +# Restore initial data-set: +delete from t1; +insert into t1 values (null), (1), (2), (3); +# Add some NULL values to the right side of IN +insert into t2 values (null), (null); +delete from t1 where a not in (select b from t2); +# Nothing deleted due to NULLs on both sides of IN +select * from t1; +a +NULL +1 +2 +3 +# Remove NULLs from the right side +delete from t2 where b is null; +delete from t1 where a not in (select b from t2); +# Now some rows are deleted: +select * from t1; +a +NULL +3 +analyze format=json delete from t1 where a not in (select b from t2); +ANALYZE +{ + "query_optimization": { + "r_total_time_ms": "REPLACED" + }, + "query_block": { + "select_id": 1, + "r_total_time_ms": "REPLACED", + "table": { + "delete": 1, + "table_name": "t1", + "access_type": "ALL", + "rows": 2, + "r_rows": 2, + "r_filtered": 0, + "r_total_time_ms": "REPLACED", + "r_engine_stats": REPLACED, + "attached_condition": "!((t1.a,t1.a in (subquery#2)))" + }, + "subqueries": [ + { + "materialization": { + "r_strategy": "null-aware index_lookup", + "r_loops": 2, + "r_index_lookups": 1, + "r_partial_matches": 1, + "query_block": { + "select_id": 2, + "cost": "REPLACED", + "r_loops": 1, + "r_total_time_ms": "REPLACED", + "nested_loop": [ + { + "table": { + "table_name": "t2", + "access_type": "ALL", + "loops": 1, + "r_loops": 1, + "rows": 2, + "r_rows": 2, + "cost": "REPLACED", + "r_table_time_ms": "REPLACED", + "r_other_time_ms": "REPLACED", + "r_engine_stats": REPLACED, + "filtered": 100, + "r_filtered": 100 + } + } + ] + } + } + } + ] + } +} +drop table t1, t2; +set @@optimizer_switch=@save_optimizer_switch; diff --git a/mysql-test/main/subselect_mat.test b/mysql-test/main/subselect_mat.test index 7528e930a59..105421d34e3 100644 --- a/mysql-test/main/subselect_mat.test +++ b/mysql-test/main/subselect_mat.test @@ -297,3 +297,76 @@ UNION ALL ; drop table t1, t2; + +--echo # +--echo # MDEV-34665: Simplify IN predicate processing for NULL-aware +--echo # materialization involving only one column + +set @save_optimizer_switch=@@optimizer_switch; +set @@optimizer_switch = "materialization=on,in_to_exists=off,semijoin=off"; + +create table t1 (a int); +create table t2 (b int); +insert into t1 values (null), (1), (2), (3); + +--echo # Query against empty t2 +--sorted_result +select a, a in (select b from t2) from t1; + +--echo # Insert some not-NULL values +insert into t2 values (3), (4); +--sorted_result +select a, a in (select b from t2) from t1; +--echo # Ensure the correct strategy is tested +--source include/analyze-format.inc +analyze format=json select a, a in (select b from t2) from t1; + +--echo # Insert NULL value (so there are both NULLs and and not-NULL values) +insert into t2 values (null); +--sorted_result +select a, a in (select b from t2) from t1; +--source include/analyze-format.inc +analyze format=json select a, a in (select b from t2) from t1; + +delete from t2; +--echo # Insert NULL values only +insert into t2 values (null), (null); +--sorted_result +select a, a in (select b from t2) from t1; +--source include/analyze-format.inc +analyze format=json select a, a in (select b from t2) from t1; + +--echo # Test UPDATE +insert into t2 values (3), (4); +update t1 set a=a+1 where a not in (select b from t2); +--echo # Nothing updated due to NULLs on both sides of IN +select * from t1; +--echo # Remove NULLs from the right side +delete from t2 where b is null; +update t1 set a=a+1 where a not in (select b from t2); +--echo # Now some rows are updated: +select * from t1; +--source include/analyze-format.inc +analyze format=json update t1 set a=a+1 where a not in (select b from t2); + +--echo # Test DELETE +--echo # Restore initial data-set: +delete from t1; +insert into t1 values (null), (1), (2), (3); +--echo # Add some NULL values to the right side of IN +insert into t2 values (null), (null); +delete from t1 where a not in (select b from t2); +--echo # Nothing deleted due to NULLs on both sides of IN +select * from t1; +--echo # Remove NULLs from the right side +delete from t2 where b is null; +delete from t1 where a not in (select b from t2); +--echo # Now some rows are deleted: +select * from t1; +--source include/analyze-format.inc +analyze format=json delete from t1 where a not in (select b from t2); + +drop table t1, t2; + +set @@optimizer_switch=@save_optimizer_switch; + diff --git a/mysql-test/main/subselect_mat_analyze_json.result b/mysql-test/main/subselect_mat_analyze_json.result index 7530c680ce0..b6255eeccb7 100644 --- a/mysql-test/main/subselect_mat_analyze_json.result +++ b/mysql-test/main/subselect_mat_analyze_json.result @@ -113,8 +113,6 @@ ANALYZE } } # "Partial match" is used due to NOT IN -# Force rowid-merge partial partial matching -set @@optimizer_switch="partial_match_rowid_merge=on,partial_match_table_scan=off"; analyze format=json select * from t1 where a not in (select b from t2); ANALYZE { @@ -148,77 +146,7 @@ ANALYZE "subqueries": [ { "materialization": { - "r_strategy": "index_lookup;array merge for partial match", - "r_loops": 4, - "r_index_lookups": 3, - "r_partial_matches": 1, - "r_partial_match_buffer_size": "REPLACED", - "r_partial_match_array_sizes": ["2"], - "query_block": { - "select_id": 2, - "cost": "REPLACED", - "r_loops": 1, - "r_total_time_ms": "REPLACED", - "nested_loop": [ - { - "table": { - "table_name": "t2", - "access_type": "ALL", - "loops": 1, - "r_loops": 1, - "rows": 2, - "r_rows": 2, - "cost": "REPLACED", - "r_table_time_ms": "REPLACED", - "r_other_time_ms": "REPLACED", - "r_engine_stats": REPLACED, - "filtered": 100, - "r_filtered": 100 - } - } - ] - } - } - } - ] - } -} -# Force table scan partial matching -set @@optimizer_switch="partial_match_rowid_merge=off,partial_match_table_scan=on"; -analyze format=json select * from t1 where a not in (select b from t2); -ANALYZE -{ - "query_optimization": { - "r_total_time_ms": "REPLACED" - }, - "query_block": { - "select_id": 1, - "cost": "REPLACED", - "r_loops": 1, - "r_total_time_ms": "REPLACED", - "nested_loop": [ - { - "table": { - "table_name": "t1", - "access_type": "ALL", - "loops": 1, - "r_loops": 1, - "rows": 4, - "r_rows": 4, - "cost": "REPLACED", - "r_table_time_ms": "REPLACED", - "r_other_time_ms": "REPLACED", - "r_engine_stats": REPLACED, - "filtered": 100, - "r_filtered": 50, - "attached_condition": "!(t1.a,t1.a in (subquery#2))" - } - } - ], - "subqueries": [ - { - "materialization": { - "r_strategy": "index_lookup;full scan for partial match", + "r_strategy": "null-aware index_lookup", "r_loops": 4, "r_index_lookups": 3, "r_partial_matches": 1, @@ -293,7 +221,7 @@ ANALYZE "subqueries": [ { "materialization": { - "r_strategy": "index_lookup;full scan for partial match", + "r_strategy": "null-aware index_lookup", "r_loops": 4, "r_index_lookups": 3, "r_partial_matches": 1, @@ -328,7 +256,6 @@ ANALYZE } } } -set @@optimizer_switch="partial_match_rowid_merge=on,partial_match_table_scan=off"; analyze format=json select a from t1 group by a not in (select b from t2); ANALYZE { @@ -370,12 +297,10 @@ ANALYZE "subqueries": [ { "materialization": { - "r_strategy": "index_lookup;array merge for partial match", + "r_strategy": "null-aware index_lookup", "r_loops": 4, "r_index_lookups": 3, "r_partial_matches": 1, - "r_partial_match_buffer_size": "REPLACED", - "r_partial_match_array_sizes": ["2"], "query_block": { "select_id": 2, "cost": "REPLACED", @@ -407,7 +332,6 @@ ANALYZE } } } -set @@optimizer_switch="partial_match_rowid_merge=on,partial_match_table_scan=on"; # Subselect in ORDER BY analyze format=json select a from t1 order by a in (select b from t2); ANALYZE @@ -453,7 +377,7 @@ ANALYZE "subqueries": [ { "materialization": { - "r_strategy": "index_lookup;full scan for partial match", + "r_strategy": "null-aware index_lookup", "r_loops": 4, "r_index_lookups": 3, "r_partial_matches": 1, @@ -520,7 +444,7 @@ ANALYZE "subqueries": [ { "materialization": { - "r_strategy": "index_lookup;full scan for partial match", + "r_strategy": "null-aware index_lookup", "r_loops": 4, "r_index_lookups": 3, "r_partial_matches": 1, @@ -714,7 +638,7 @@ ANALYZE "subqueries": [ { "materialization": { - "r_strategy": "index_lookup;full scan for partial match", + "r_strategy": "null-aware index_lookup", "r_loops": 4, "r_index_lookups": 3, "query_block": { @@ -834,6 +758,7 @@ create table t1 (a1 char(1), a2 char(1)); insert into t1 values (null, 'b'); create table t2 (b1 char(1), b2 char(2)); insert into t2 values ('a','b'), ('c', 'd'), (null, 'e'), ('f', 'g'); +# Force rowid-merge partial matching set @@optimizer_switch="partial_match_rowid_merge=on,partial_match_table_scan=off"; explain format=json select * from t1 where (a1, a2) not in (select b1, b2 from t2); EXPLAIN @@ -937,6 +862,7 @@ ANALYZE ] } } +# Force table scan partial matching set @@optimizer_switch="partial_match_rowid_merge=off,partial_match_table_scan=on"; analyze format=json select * from t1 where (a1, a2) not in (select b1, b2 from t2); ANALYZE diff --git a/mysql-test/main/subselect_mat_analyze_json.test b/mysql-test/main/subselect_mat_analyze_json.test index c727cc91a33..7e517e28071 100644 --- a/mysql-test/main/subselect_mat_analyze_json.test +++ b/mysql-test/main/subselect_mat_analyze_json.test @@ -14,13 +14,6 @@ explain format=json select * from t1 where a in (select b from t2); analyze format=json select * from t1 where a in (select b from t2); --echo # "Partial match" is used due to NOT IN ---echo # Force rowid-merge partial partial matching -set @@optimizer_switch="partial_match_rowid_merge=on,partial_match_table_scan=off"; ---source include/analyze-format.inc -analyze format=json select * from t1 where a not in (select b from t2); - ---echo # Force table scan partial matching -set @@optimizer_switch="partial_match_rowid_merge=off,partial_match_table_scan=on"; --source include/analyze-format.inc analyze format=json select * from t1 where a not in (select b from t2); @@ -28,11 +21,9 @@ analyze format=json select * from t1 where a not in (select b from t2); --source include/analyze-format.inc analyze format=json select a from t1 group by a in (select b from t2); -set @@optimizer_switch="partial_match_rowid_merge=on,partial_match_table_scan=off"; --source include/analyze-format.inc analyze format=json select a from t1 group by a not in (select b from t2); -set @@optimizer_switch="partial_match_rowid_merge=on,partial_match_table_scan=on"; --echo # Subselect in ORDER BY --source include/analyze-format.inc analyze format=json select a from t1 order by a in (select b from t2); @@ -69,12 +60,14 @@ insert into t1 values (null, 'b'); create table t2 (b1 char(1), b2 char(2)); insert into t2 values ('a','b'), ('c', 'd'), (null, 'e'), ('f', 'g'); +--echo # Force rowid-merge partial matching set @@optimizer_switch="partial_match_rowid_merge=on,partial_match_table_scan=off"; --source include/explain-no-costs.inc explain format=json select * from t1 where (a1, a2) not in (select b1, b2 from t2); --source include/analyze-format.inc analyze format=json select * from t1 where (a1, a2) not in (select b1, b2 from t2); +--echo # Force table scan partial matching set @@optimizer_switch="partial_match_rowid_merge=off,partial_match_table_scan=on"; --source include/analyze-format.inc analyze format=json select * from t1 where (a1, a2) not in (select b1, b2 from t2); diff --git a/mysql-test/main/timezone.opt b/mysql-test/main/timezone.opt new file mode 100644 index 00000000000..7b6415fb2df --- /dev/null +++ b/mysql-test/main/timezone.opt @@ -0,0 +1 @@ +--timezone=Europe/Budapest diff --git a/mysql-test/main/timezone.result b/mysql-test/main/timezone.result index 2a099e90bad..a75bf9ebc00 100644 --- a/mysql-test/main/timezone.result +++ b/mysql-test/main/timezone.result @@ -1,6 +1,6 @@ show variables like "system_time_zone"; Variable_name Value -system_time_zone MET +system_time_zone CET # # Test unix timestamp # @@ -82,7 +82,7 @@ alter table mysql.time_zone_transition_type add primary key (time_zone_id,transi SET @@time_zone= default; SELECT DATE_FORMAT('2009-11-01 22:23:00', '%z %Z') AS current_timezone; current_timezone -+0100 MET ++0100 CET SELECT DATE_FORMAT('2008-06-04 02:23:00', '%z %Z') AS current_timezone; current_timezone -+0200 MEST ++0200 CEST diff --git a/mysql-test/main/timezone.test b/mysql-test/main/timezone.test index 50e062b45f1..82bacc1c142 100644 --- a/mysql-test/main/timezone.test +++ b/mysql-test/main/timezone.test @@ -1,14 +1,14 @@ # # Test of SYSTEM time zone handling ( for my_system_gmt_sec()). -# This script must be run with TZ=MET +# This script must have zonedata for CET +-- require include/have_cet_timezone.require --- require include/have_met_timezone.require disable_query_log; select FROM_UNIXTIME(24*3600); enable_query_log; # The following is because of daylight saving time ---replace_result MEST MET +--replace_result MEST CET MET CET show variables like "system_time_zone"; --echo # diff --git a/mysql-test/main/type_float.result b/mysql-test/main/type_float.result index 4f49025e437..cceb14fc98b 100644 --- a/mysql-test/main/type_float.result +++ b/mysql-test/main/type_float.result @@ -1287,6 +1287,19 @@ h varchar(16) YES NULL DROP TABLE t1, t2; SET sql_mode=DEFAULT; # +# MDEV-25174 DOUBLE columns do not accept large hex hybrids +# +CREATE TABLE t1 (a DOUBLE); +INSERT INTO t1 VALUES (0x7FFFFFFFFFFFFFFF); +INSERT INTO t1 VALUES (0x8000000000000000); +INSERT INTO t1 VALUES (0xFFFFFFFFFFFFFFFF); +SELECT * FROM t1 ORDER BY a; +a +9.223372036854776e18 +9.223372036854776e18 +1.8446744073709552e19 +DROP TABLE t1; +# # End of 10.5 tests # # diff --git a/mysql-test/main/type_float.test b/mysql-test/main/type_float.test index 5aef453dd08..e728d4e1ed7 100644 --- a/mysql-test/main/type_float.test +++ b/mysql-test/main/type_float.test @@ -798,6 +798,17 @@ DROP TABLE t1, t2; SET sql_mode=DEFAULT; +--echo # +--echo # MDEV-25174 DOUBLE columns do not accept large hex hybrids +--echo # + +CREATE TABLE t1 (a DOUBLE); +INSERT INTO t1 VALUES (0x7FFFFFFFFFFFFFFF); +INSERT INTO t1 VALUES (0x8000000000000000); +INSERT INTO t1 VALUES (0xFFFFFFFFFFFFFFFF); +SELECT * FROM t1 ORDER BY a; +DROP TABLE t1; + --echo # --echo # End of 10.5 tests --echo # diff --git a/mysql-test/main/type_newdecimal.result b/mysql-test/main/type_newdecimal.result index bb06c4a8424..0e5280636a1 100644 --- a/mysql-test/main/type_newdecimal.result +++ b/mysql-test/main/type_newdecimal.result @@ -2877,6 +2877,19 @@ h varchar(16) YES NULL DROP TABLE t1, t2; SET sql_mode=DEFAULT; # +# MDEV-25174 DOUBLE columns do not accept large hex hybrids +# +CREATE TABLE t1 (a DECIMAL(30,0)); +INSERT INTO t1 VALUES (0x7FFFFFFFFFFFFFFF); +INSERT INTO t1 VALUES (0x8000000000000000); +INSERT INTO t1 VALUES (0xFFFFFFFFFFFFFFFF); +SELECT * FROM t1 ORDER BY a; +a +9223372036854775807 +9223372036854775808 +18446744073709551615 +DROP TABLE t1; +# # End of 10.5 tests # # diff --git a/mysql-test/main/type_newdecimal.test b/mysql-test/main/type_newdecimal.test index 8de814c64b9..34273a19a67 100644 --- a/mysql-test/main/type_newdecimal.test +++ b/mysql-test/main/type_newdecimal.test @@ -2052,6 +2052,17 @@ DROP TABLE t1, t2; SET sql_mode=DEFAULT; +--echo # +--echo # MDEV-25174 DOUBLE columns do not accept large hex hybrids +--echo # + +CREATE TABLE t1 (a DECIMAL(30,0)); +INSERT INTO t1 VALUES (0x7FFFFFFFFFFFFFFF); +INSERT INTO t1 VALUES (0x8000000000000000); +INSERT INTO t1 VALUES (0xFFFFFFFFFFFFFFFF); +SELECT * FROM t1 ORDER BY a; +DROP TABLE t1; + --echo # --echo # End of 10.5 tests --echo # diff --git a/mysql-test/suite/compat/oracle/r/func_concat.result b/mysql-test/suite/compat/oracle/r/func_concat.result index 17ca4be078a..b96b9d7b97b 100644 --- a/mysql-test/suite/compat/oracle/r/func_concat.result +++ b/mysql-test/suite/compat/oracle/r/func_concat.result @@ -391,3 +391,16 @@ Warnings: Note 1003 select "test"."t1"."c1" AS "c1","test"."t1"."c2" AS "c2","test"."t1"."c1" like concat("test"."t1"."c2",'_') AS "c1 LIKE c2||'_'" from "test"."t1" order by "test"."t1"."ord" DROP VIEW v1; DROP TABLE t1; +# +# Start of 10.6 tests +# +# +# MDEV-31910 ASAN memcpy-param-overlap upon CONCAT in ORACLE mode +# +SET SQL_MODE= ORACLE; +SELECT CONCAT(SUBSTR(123 FROM 2)); +CONCAT(SUBSTR(123 FROM 2)) +23 +# +# End of 10.6 tests +# diff --git a/mysql-test/suite/compat/oracle/t/func_concat.test b/mysql-test/suite/compat/oracle/t/func_concat.test index 5a613242e87..075ab752968 100644 --- a/mysql-test/suite/compat/oracle/t/func_concat.test +++ b/mysql-test/suite/compat/oracle/t/func_concat.test @@ -182,3 +182,19 @@ EXPLAIN EXTENDED SELECT * FROM v1; DROP VIEW v1; DROP TABLE t1; + + +--echo # +--echo # Start of 10.6 tests +--echo # + +--echo # +--echo # MDEV-31910 ASAN memcpy-param-overlap upon CONCAT in ORACLE mode +--echo # + +SET SQL_MODE= ORACLE; +SELECT CONCAT(SUBSTR(123 FROM 2)); + +--echo # +--echo # End of 10.6 tests +--echo # diff --git a/mysql-test/suite/events/events_bugs.result b/mysql-test/suite/events/events_bugs.result index ee4411e6dbf..a34a6111eb6 100644 --- a/mysql-test/suite/events/events_bugs.result +++ b/mysql-test/suite/events/events_bugs.result @@ -870,3 +870,14 @@ USE test; DROP DATABASE events_test; SET GLOBAL event_scheduler= 'ON'; SET @@global.concurrent_insert= @concurrent_insert; +# +# MDEV-33472 Assertion `0' failed in Item_row::illegal_method_call on CREATE EVENT +# +CREATE EVENT e ON SCHEDULE EVERY 1 HOUR STARTS ROW(1,2) DO SELECT 1; +ERROR 21000: Operand should contain 1 column(s) +CREATE EVENT e ON SCHEDULE EVERY 1 HOUR ENDS ROW(1,2) DO SELECT 1; +ERROR 21000: Operand should contain 1 column(s) +CREATE EVENT e ON SCHEDULE AT ROW(1,2) DO SELECT *; +ERROR 21000: Operand should contain 1 column(s) +CREATE EVENT e ON SCHEDULE EVERY ROW(1,2) HOUR DO SELECT 1; +ERROR 21000: Operand should contain 1 column(s) diff --git a/mysql-test/suite/events/events_bugs.test b/mysql-test/suite/events/events_bugs.test index 421f3eaf33f..ea79563748e 100644 --- a/mysql-test/suite/events/events_bugs.test +++ b/mysql-test/suite/events/events_bugs.test @@ -1308,3 +1308,20 @@ SET GLOBAL event_scheduler= 'ON'; --source include/running_event_scheduler.inc SET @@global.concurrent_insert= @concurrent_insert; # THIS MUST BE THE LAST LINE in this file. + + +--echo # +--echo # MDEV-33472 Assertion `0' failed in Item_row::illegal_method_call on CREATE EVENT +--echo # + +--error ER_OPERAND_COLUMNS +CREATE EVENT e ON SCHEDULE EVERY 1 HOUR STARTS ROW(1,2) DO SELECT 1; + +--error ER_OPERAND_COLUMNS +CREATE EVENT e ON SCHEDULE EVERY 1 HOUR ENDS ROW(1,2) DO SELECT 1; + +--error ER_OPERAND_COLUMNS +CREATE EVENT e ON SCHEDULE AT ROW(1,2) DO SELECT *; + +--error ER_OPERAND_COLUMNS +CREATE EVENT e ON SCHEDULE EVERY ROW(1,2) HOUR DO SELECT 1; diff --git a/mysql-test/suite/galera/r/MDEV-10715.result b/mysql-test/suite/galera/r/MDEV-10715.result index 12b9c2367c4..471c8b34c4d 100644 --- a/mysql-test/suite/galera/r/MDEV-10715.result +++ b/mysql-test/suite/galera/r/MDEV-10715.result @@ -27,4 +27,4 @@ wsrep_last_written_gtid() connection node_1; drop table t1; connection node_2; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/r/MDEV-27806.result b/mysql-test/suite/galera/r/MDEV-27806.result index 6fe288f4e8e..5b30d05fdf9 100644 --- a/mysql-test/suite/galera/r/MDEV-27806.result +++ b/mysql-test/suite/galera/r/MDEV-27806.result @@ -49,4 +49,4 @@ mysqld-bin.000003 # Query # # COMMIT BINLOG_POSITIONS_MATCH 1 DROP TABLE t1,ts1; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/r/galera_gtid_server_id.result b/mysql-test/suite/galera/r/galera_gtid_server_id.result index 3be6fd2fda7..74de5bfbe0a 100644 --- a/mysql-test/suite/galera/r/galera_gtid_server_id.result +++ b/mysql-test/suite/galera/r/galera_gtid_server_id.result @@ -5,7 +5,7 @@ select @@gtid_domain_id, @@server_id, @@wsrep_gtid_domain_id,@@wsrep_gtid_mode; @@gtid_domain_id @@server_id @@wsrep_gtid_domain_id @@wsrep_gtid_mode 0 11 1 1 connection node_2; -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); select @@gtid_domain_id, @@server_id, @@wsrep_gtid_domain_id,@@wsrep_gtid_mode; @@gtid_domain_id @@server_id @@wsrep_gtid_domain_id @@wsrep_gtid_mode 0 12 1 1 diff --git a/mysql-test/suite/galera/r/galera_gtid_slave.result b/mysql-test/suite/galera/r/galera_gtid_slave.result index d460d169811..180be320102 100644 --- a/mysql-test/suite/galera/r/galera_gtid_slave.result +++ b/mysql-test/suite/galera/r/galera_gtid_slave.result @@ -42,4 +42,4 @@ SET GLOBAL wsrep_on=ON; connection node_3; reset master; connection node_2; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/r/galera_gtid_slave_sst_rsync.result b/mysql-test/suite/galera/r/galera_gtid_slave_sst_rsync.result index f41e3abc497..e9f11d6f44b 100644 --- a/mysql-test/suite/galera/r/galera_gtid_slave_sst_rsync.result +++ b/mysql-test/suite/galera/r/galera_gtid_slave_sst_rsync.result @@ -167,4 +167,4 @@ set global wsrep_on=ON; connection node_3; reset master; connection node_2; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/r/galera_gtid_trx_conflict.result b/mysql-test/suite/galera/r/galera_gtid_trx_conflict.result index 602816f18a9..a30351b2c39 100644 --- a/mysql-test/suite/galera/r/galera_gtid_trx_conflict.result +++ b/mysql-test/suite/galera/r/galera_gtid_trx_conflict.result @@ -42,4 +42,4 @@ SELECT @@gtid_binlog_state; @@gtid_binlog_state 1-1-101 DROP TABLE t1; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/r/galera_last_committed_id.result b/mysql-test/suite/galera/r/galera_last_committed_id.result index 1a1339ede32..58206baf7fe 100644 --- a/mysql-test/suite/galera/r/galera_last_committed_id.result +++ b/mysql-test/suite/galera/r/galera_last_committed_id.result @@ -34,4 +34,4 @@ wsrep_last_written_id_advanced SET AUTOCOMMIT=ON; DROP TABLE t1; connection node_2; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/r/galera_query_cache_invalidate.result b/mysql-test/suite/galera/r/galera_query_cache_invalidate.result index a1ce50b4f56..4ba7f700c8b 100644 --- a/mysql-test/suite/galera/r/galera_query_cache_invalidate.result +++ b/mysql-test/suite/galera/r/galera_query_cache_invalidate.result @@ -3,9 +3,9 @@ connection node_1; connect node_3, 127.0.0.1, root, , test, $NODE_MYPORT_3; connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4; connection node_2; -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); connection node_4; -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); connection node_3; CHANGE MASTER TO master_host='127.0.0.1', master_user='root', master_port=NODE_MYPORT_1, master_ssl_verify_server_cert=0, master_use_gtid=current_pos; START SLAVE; diff --git a/mysql-test/suite/galera/r/galera_sync_wait_upto.result b/mysql-test/suite/galera/r/galera_sync_wait_upto.result index c9f468774fb..c6e5be5c232 100644 --- a/mysql-test/suite/galera/r/galera_sync_wait_upto.result +++ b/mysql-test/suite/galera/r/galera_sync_wait_upto.result @@ -24,4 +24,4 @@ WSREP_SYNC_WAIT_UPTO connection node_1; DROP TABLE t1; connection node_2; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result b/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result index 0edfdf47d34..7de74462ebc 100644 --- a/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result +++ b/mysql-test/suite/galera/r/galera_var_gtid_domain_id.result @@ -90,7 +90,7 @@ set global wsrep_on=OFF; reset master; set global wsrep_on=ON; connection node_2; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); disconnect node_2; disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/r/galera_var_ignore_apply_errors.result b/mysql-test/suite/galera/r/galera_var_ignore_apply_errors.result index f6b68ea20b9..c102e003d34 100644 --- a/mysql-test/suite/galera/r/galera_var_ignore_apply_errors.result +++ b/mysql-test/suite/galera/r/galera_var_ignore_apply_errors.result @@ -207,6 +207,7 @@ connection node_1; CREATE TABLE t1 (f1 INTEGER, f2 INTEGER); DROP TABLE t1; connection node_2; +set session wsrep_sync_wait=0; SELECT * FROM t1; ERROR 42S02: Table 'test.t1' doesn't exist SET GLOBAL wsrep_ignore_apply_errors = 10; diff --git a/mysql-test/suite/galera/r/galera_wsrep_schema_detached.result b/mysql-test/suite/galera/r/galera_wsrep_schema_detached.result index 41275ede6d2..2bfa50ebd0a 100644 --- a/mysql-test/suite/galera/r/galera_wsrep_schema_detached.result +++ b/mysql-test/suite/galera/r/galera_wsrep_schema_detached.result @@ -3,8 +3,9 @@ connection node_1; connection node_1; connection node_2; connection node_1; -call mtr.add_suppression("WSREP: async IST sender failed to serve.*"); +call mtr.add_suppression("WSREP:.*"); SET @wsrep_provider_options_orig = @@GLOBAL.wsrep_provider_options; +SET GLOBAL wsrep_provider_options ='pc.ignore_sb=true;pc.weight=2'; connection node_2; SET @wsrep_cluster_address_orig = @@GLOBAL.wsrep_cluster_address; SET GLOBAL WSREP_ON=0; @@ -26,11 +27,37 @@ SELECT 1; 1 1 DELETE FROM mysql.wsrep_allowlist; -connection node_1; -SET GLOBAL wsrep_provider_options ='pc.ignore_sb=true'; connection node_2; Killing server ... connection node_1; +SELECT COUNT(*) AS EXPECT_0 FROM mysql.wsrep_streaming_log; +EXPECT_0 +0 +SELECT COUNT(*) AS EXPECT_1 FROM mysql.wsrep_cluster; +EXPECT_1 +1 +SELECT COUNT(*) AS EXPECT_1 FROM mysql.wsrep_cluster_members; +EXPECT_1 +1 connection node_2; connection node_1; -SET GLOBAL wsrep_provider_options ='pc.ignore_sb=false'; +SET GLOBAL wsrep_provider_options ='pc.ignore_sb=false;pc.weight=1'; +SELECT COUNT(*) AS EXPECT_0 FROM mysql.wsrep_streaming_log; +EXPECT_0 +0 +SELECT COUNT(*) AS EXPECT_1 FROM mysql.wsrep_cluster; +EXPECT_1 +1 +SELECT COUNT(*) AS EXPECT_2 FROM mysql.wsrep_cluster_members; +EXPECT_2 +2 +connection node_2; +SELECT COUNT(*) AS EXPECT_0 FROM mysql.wsrep_streaming_log; +EXPECT_0 +0 +SELECT COUNT(*) AS EXPECT_1 FROM mysql.wsrep_cluster; +EXPECT_1 +1 +SELECT COUNT(*) AS EXPECT_2 FROM mysql.wsrep_cluster_members; +EXPECT_2 +2 diff --git a/mysql-test/suite/galera/r/mdev_10518.result b/mysql-test/suite/galera/r/mdev_10518.result index 8426cd31349..09d4a6cb729 100644 --- a/mysql-test/suite/galera/r/mdev_10518.result +++ b/mysql-test/suite/galera/r/mdev_10518.result @@ -90,7 +90,7 @@ set global wsrep_on=OFF; reset master; set global wsrep_on=ON; connection node_2; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); disconnect node_2; disconnect node_1; # End of test diff --git a/mysql-test/suite/galera/t/MDEV-10715.test b/mysql-test/suite/galera/t/MDEV-10715.test index 20c5293f01b..29831994fdd 100644 --- a/mysql-test/suite/galera/t/MDEV-10715.test +++ b/mysql-test/suite/galera/t/MDEV-10715.test @@ -19,4 +19,4 @@ select wsrep_last_written_gtid(); drop table t1; --connection node_2 -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); \ No newline at end of file +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/t/MDEV-27806.test b/mysql-test/suite/galera/t/MDEV-27806.test index 62a0ca483e0..f8d896adb2d 100644 --- a/mysql-test/suite/galera/t/MDEV-27806.test +++ b/mysql-test/suite/galera/t/MDEV-27806.test @@ -48,4 +48,4 @@ CREATE TABLE ts1 AS SELECT * FROM t1; DROP TABLE t1,ts1; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/t/MW-402.test b/mysql-test/suite/galera/t/MW-402.test index 4b83e25dc50..f84752e1b25 100644 --- a/mysql-test/suite/galera/t/MW-402.test +++ b/mysql-test/suite/galera/t/MW-402.test @@ -1,5 +1,7 @@ --source include/galera_cluster.inc --source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc --source include/galera_have_debug_sync.inc # diff --git a/mysql-test/suite/galera/t/galera_gtid_server_id.test b/mysql-test/suite/galera/t/galera_gtid_server_id.test index f61bef4909b..df72e9a0c6e 100644 --- a/mysql-test/suite/galera/t/galera_gtid_server_id.test +++ b/mysql-test/suite/galera/t/galera_gtid_server_id.test @@ -3,7 +3,7 @@ --connection node_1 select @@gtid_domain_id, @@server_id, @@wsrep_gtid_domain_id,@@wsrep_gtid_mode; --connection node_2 -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); select @@gtid_domain_id, @@server_id, @@wsrep_gtid_domain_id,@@wsrep_gtid_mode; --connection node_1 diff --git a/mysql-test/suite/galera/t/galera_gtid_slave.test b/mysql-test/suite/galera/t/galera_gtid_slave.test index 7bcaac77f81..b326ffdffcd 100644 --- a/mysql-test/suite/galera/t/galera_gtid_slave.test +++ b/mysql-test/suite/galera/t/galera_gtid_slave.test @@ -92,4 +92,4 @@ SET GLOBAL wsrep_on=ON; reset master; --connection node_2 -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/t/galera_gtid_slave_sst_rsync.test b/mysql-test/suite/galera/t/galera_gtid_slave_sst_rsync.test index 6c83c7a34cd..25e79c7100d 100644 --- a/mysql-test/suite/galera/t/galera_gtid_slave_sst_rsync.test +++ b/mysql-test/suite/galera/t/galera_gtid_slave_sst_rsync.test @@ -199,4 +199,4 @@ set global wsrep_on=ON; reset master; --connection node_2 -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/t/galera_gtid_trx_conflict.test b/mysql-test/suite/galera/t/galera_gtid_trx_conflict.test index 6f05196b8da..940b66e54ab 100644 --- a/mysql-test/suite/galera/t/galera_gtid_trx_conflict.test +++ b/mysql-test/suite/galera/t/galera_gtid_trx_conflict.test @@ -53,4 +53,4 @@ SELECT @@gtid_binlog_state; SELECT @@gtid_binlog_state; DROP TABLE t1; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/t/galera_last_committed_id.test b/mysql-test/suite/galera/t/galera_last_committed_id.test index 6e1f0de7d0b..e3746b5a96d 100644 --- a/mysql-test/suite/galera/t/galera_last_committed_id.test +++ b/mysql-test/suite/galera/t/galera_last_committed_id.test @@ -66,4 +66,4 @@ SET AUTOCOMMIT=ON; DROP TABLE t1; --connection node_2 -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/t/galera_query_cache_invalidate.test b/mysql-test/suite/galera/t/galera_query_cache_invalidate.test index 5e4b4331ea3..03729aaa6f2 100644 --- a/mysql-test/suite/galera/t/galera_query_cache_invalidate.test +++ b/mysql-test/suite/galera/t/galera_query_cache_invalidate.test @@ -22,9 +22,9 @@ --connect node_4, 127.0.0.1, root, , test, $NODE_MYPORT_4 --connection node_2 -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); --connection node_4 -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); --connection node_3 diff --git a/mysql-test/suite/galera/t/galera_sync_wait_upto.test b/mysql-test/suite/galera/t/galera_sync_wait_upto.test index 8465ddebfeb..aaecd8760f5 100644 --- a/mysql-test/suite/galera/t/galera_sync_wait_upto.test +++ b/mysql-test/suite/galera/t/galera_sync_wait_upto.test @@ -3,6 +3,7 @@ # --source include/galera_cluster.inc +--source include/have_debug.inc --source include/have_debug_sync.inc CREATE TABLE t1 (f1 INTEGER) ENGINE=InnoDB; @@ -72,4 +73,4 @@ INSERT INTO t1 VALUES (2); DROP TABLE t1; --connection node_2 -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera/t/galera_var_gtid_domain_id.test b/mysql-test/suite/galera/t/galera_var_gtid_domain_id.test index 4de1121ae77..c9b8a890209 100644 --- a/mysql-test/suite/galera/t/galera_var_gtid_domain_id.test +++ b/mysql-test/suite/galera/t/galera_var_gtid_domain_id.test @@ -60,7 +60,7 @@ reset master; set global wsrep_on=ON; --connection node_2 -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); --source include/galera_end.inc --echo # End of test diff --git a/mysql-test/suite/galera/t/galera_var_ignore_apply_errors.test b/mysql-test/suite/galera/t/galera_var_ignore_apply_errors.test index 02dd9fa8416..feec79196c3 100644 --- a/mysql-test/suite/galera/t/galera_var_ignore_apply_errors.test +++ b/mysql-test/suite/galera/t/galera_var_ignore_apply_errors.test @@ -259,6 +259,9 @@ CREATE TABLE t1 (f1 INTEGER, f2 INTEGER); DROP TABLE t1; --connection node_2 +set session wsrep_sync_wait=0; +--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1' +--source include/wait_condition.inc --error ER_NO_SUCH_TABLE SELECT * FROM t1; SET GLOBAL wsrep_ignore_apply_errors = 10; diff --git a/mysql-test/suite/galera/t/galera_wsrep_schema_detached.test b/mysql-test/suite/galera/t/galera_wsrep_schema_detached.test index 9942d63f142..5fa170f3a02 100644 --- a/mysql-test/suite/galera/t/galera_wsrep_schema_detached.test +++ b/mysql-test/suite/galera/t/galera_wsrep_schema_detached.test @@ -6,8 +6,9 @@ --source include/auto_increment_offset_save.inc --connection node_1 -call mtr.add_suppression("WSREP: async IST sender failed to serve.*"); +call mtr.add_suppression("WSREP:.*"); SET @wsrep_provider_options_orig = @@GLOBAL.wsrep_provider_options; +SET GLOBAL wsrep_provider_options ='pc.ignore_sb=true;pc.weight=2'; --connection node_2 SET @wsrep_cluster_address_orig = @@GLOBAL.wsrep_cluster_address; @@ -21,16 +22,15 @@ INSERT INTO mysql.wsrep_allowlist (ip) VALUES (0); SET GLOBAL wsrep_cluster_address = @wsrep_cluster_address_orig; SELECT 1; DELETE FROM mysql.wsrep_allowlist; - ---connection node_1 -SET GLOBAL wsrep_provider_options ='pc.ignore_sb=true'; - --connection node_2 --source include/kill_galera.inc --connection node_1 --let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' --source include/wait_condition.inc +SELECT COUNT(*) AS EXPECT_0 FROM mysql.wsrep_streaming_log; +SELECT COUNT(*) AS EXPECT_1 FROM mysql.wsrep_cluster; +SELECT COUNT(*) AS EXPECT_1 FROM mysql.wsrep_cluster_members; --connection node_2 --source include/start_mysqld.inc @@ -39,7 +39,15 @@ SET GLOBAL wsrep_provider_options ='pc.ignore_sb=true'; --let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' --source include/wait_condition.inc -SET GLOBAL wsrep_provider_options ='pc.ignore_sb=false'; +SET GLOBAL wsrep_provider_options ='pc.ignore_sb=false;pc.weight=1'; +SELECT COUNT(*) AS EXPECT_0 FROM mysql.wsrep_streaming_log; +SELECT COUNT(*) AS EXPECT_1 FROM mysql.wsrep_cluster; +SELECT COUNT(*) AS EXPECT_2 FROM mysql.wsrep_cluster_members; + +--connection node_2 +SELECT COUNT(*) AS EXPECT_0 FROM mysql.wsrep_streaming_log; +SELECT COUNT(*) AS EXPECT_1 FROM mysql.wsrep_cluster; +SELECT COUNT(*) AS EXPECT_2 FROM mysql.wsrep_cluster_members; # Cleanup --source include/auto_increment_offset_restore.inc diff --git a/mysql-test/suite/galera/t/mdev_10518.test b/mysql-test/suite/galera/t/mdev_10518.test index 4de1121ae77..c9b8a890209 100644 --- a/mysql-test/suite/galera/t/mdev_10518.test +++ b/mysql-test/suite/galera/t/mdev_10518.test @@ -60,7 +60,7 @@ reset master; set global wsrep_on=ON; --connection node_2 -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); --source include/galera_end.inc --echo # End of test diff --git a/mysql-test/suite/galera_3nodes/r/MDEV-29171.result b/mysql-test/suite/galera_3nodes/r/MDEV-29171.result index 3a91bfea211..88883eb17a0 100644 --- a/mysql-test/suite/galera_3nodes/r/MDEV-29171.result +++ b/mysql-test/suite/galera_3nodes/r/MDEV-29171.result @@ -56,7 +56,7 @@ connection node_1; set global wsrep_gtid_domain_id=100; connection node_2; set global wsrep_gtid_domain_id=100; -CALL mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +CALL mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); connection node_3; set global wsrep_gtid_domain_id=100; -CALL mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +CALL mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera_3nodes/r/galera_2_cluster.result b/mysql-test/suite/galera_3nodes/r/galera_2_cluster.result index 5214eafa1c8..bdd18ee2534 100644 --- a/mysql-test/suite/galera_3nodes/r/galera_2_cluster.result +++ b/mysql-test/suite/galera_3nodes/r/galera_2_cluster.result @@ -75,19 +75,19 @@ connection node_2; SET GLOBAL wsrep_on = OFF; RESET MASTER; SET GLOBAL wsrep_on = ON; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); connection node_3; SET GLOBAL wsrep_on = OFF; RESET MASTER; SET GLOBAL wsrep_on = ON; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); connection node_5; SET GLOBAL wsrep_on = OFF; RESET MASTER; SET GLOBAL wsrep_on = ON; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); connection node_6; SET GLOBAL wsrep_on = OFF; RESET MASTER; SET GLOBAL wsrep_on = ON; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result b/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result index b280a803b37..2ecd5edfa99 100644 --- a/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result +++ b/mysql-test/suite/galera_3nodes/r/galera_evs_suspect_timeout.result @@ -12,16 +12,10 @@ connection node_3; Suspending node ... connection node_1; SET SESSION wsrep_sync_wait=0; -SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; -VARIABLE_VALUE = 2 -1 CREATE TABLE t1 (f1 INTEGER) engine=InnoDB; INSERT INTO t1 VALUES (1); connection node_2; SET SESSION wsrep_sync_wait=0; -SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; -VARIABLE_VALUE = 2 -1 SET SESSION wsrep_sync_wait = 15; SELECT COUNT(*) FROM t1; COUNT(*) diff --git a/mysql-test/suite/galera_3nodes/r/galera_gtid_consistency.result b/mysql-test/suite/galera_3nodes/r/galera_gtid_consistency.result index 91ff0342b8d..22eef0eec80 100644 --- a/mysql-test/suite/galera_3nodes/r/galera_gtid_consistency.result +++ b/mysql-test/suite/galera_3nodes/r/galera_gtid_consistency.result @@ -199,12 +199,12 @@ SELECT COUNT(*) FROM t1; COUNT(*) 1950 connection node_2; -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node"); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); call mtr.add_suppression("Sending JOIN failed: "); call mtr.add_suppression("WSREP: Failed to JOIN the cluster after SST"); call mtr.add_suppression("WSREP: FLOW message from member .* in non-primary configuration"); connection node_3; -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node"); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); call mtr.add_suppression("Sending JOIN failed: "); call mtr.add_suppression("WSREP: Failed to JOIN the cluster after SST"); call mtr.add_suppression("WSREP: FLOW message from member .* in non-primary configuration"); diff --git a/mysql-test/suite/galera_3nodes/t/MDEV-29171.test b/mysql-test/suite/galera_3nodes/t/MDEV-29171.test index df1282609f0..4d67bf6ad0b 100644 --- a/mysql-test/suite/galera_3nodes/t/MDEV-29171.test +++ b/mysql-test/suite/galera_3nodes/t/MDEV-29171.test @@ -119,8 +119,8 @@ set global wsrep_gtid_domain_id=100; --connection node_2 set global wsrep_gtid_domain_id=100; -CALL mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +CALL mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); --connection node_3 set global wsrep_gtid_domain_id=100; -CALL mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +CALL mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera_3nodes/t/galera_2_cluster.test b/mysql-test/suite/galera_3nodes/t/galera_2_cluster.test index d7b0f36551f..8fc9e492342 100644 --- a/mysql-test/suite/galera_3nodes/t/galera_2_cluster.test +++ b/mysql-test/suite/galera_3nodes/t/galera_2_cluster.test @@ -129,7 +129,7 @@ SET GLOBAL wsrep_on = OFF; RESET MASTER; SET GLOBAL wsrep_on = ON; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); --connection node_3 @@ -137,7 +137,7 @@ SET GLOBAL wsrep_on = OFF; RESET MASTER; SET GLOBAL wsrep_on = ON; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); --connection node_5 @@ -145,7 +145,7 @@ SET GLOBAL wsrep_on = OFF; RESET MASTER; SET GLOBAL wsrep_on = ON; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); --connection node_6 @@ -153,4 +153,4 @@ SET GLOBAL wsrep_on = OFF; RESET MASTER; SET GLOBAL wsrep_on = ON; -CALL mtr.add_suppression("Ignoring server id for non bootstrap node"); +CALL mtr.add_suppression("Ignoring server id .* for non bootstrap node"); diff --git a/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test b/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test index c0b23e4cc8e..c52bebc7019 100644 --- a/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test +++ b/mysql-test/suite/galera_3nodes/t/galera_evs_suspect_timeout.test @@ -18,6 +18,9 @@ --source ../galera/include/auto_increment_offset_save.inc --connection node_1 +--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' +--source include/wait_condition.inc + --let $wsrep_provider_options_node1 = `SELECT @@wsrep_provider_options` SET GLOBAL wsrep_provider_options = 'evs.inactive_timeout=PT100M; evs.suspect_timeout=PT1S'; @@ -26,20 +29,20 @@ SET GLOBAL wsrep_provider_options = 'evs.inactive_timeout=PT100M; evs.suspect_ti SET GLOBAL wsrep_provider_options = 'evs.inactive_timeout=PT100M; evs.suspect_timeout=PT1S'; --connection node_3 ---source include/wait_until_connected_again.inc --let $wsrep_cluster_address_node3 = `SELECT @@wsrep_cluster_address` +--let $wsrep_provider_options_node3 = `SELECT @@wsrep_provider_options` # Suspend node #3 --connection node_3 --source include/galera_suspend.inc ---sleep 5 # Confirm that the other nodes have booted it out --connection node_1 ---source include/wait_until_connected_again.inc SET SESSION wsrep_sync_wait=0; -SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' +--source include/wait_condition.inc + --disable_query_log --eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_node1'; --enable_query_log @@ -49,9 +52,10 @@ CREATE TABLE t1 (f1 INTEGER) engine=InnoDB; INSERT INTO t1 VALUES (1); --connection node_2 ---source include/wait_until_connected_again.inc SET SESSION wsrep_sync_wait=0; -SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' +--source include/wait_condition.inc + --disable_query_log --eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_node2'; --enable_query_log @@ -67,6 +71,7 @@ SELECT COUNT(*) FROM t1; --source include/wait_until_connected_again.inc --disable_query_log +--eval SET GLOBAL wsrep_provider_options = '$wsrep_provider_options_node3'; --eval SET GLOBAL wsrep_cluster_address = '$wsrep_cluster_address_node3'; --enable_query_log --source include/galera_wait_ready.inc @@ -78,6 +83,9 @@ SET SESSION wsrep_sync_wait = 15; SELECT COUNT(*) FROM t1; --connection node_1 +--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size' +--source include/wait_condition.inc + DROP TABLE t1; # Restore original auto_increment_offset values. --source ../galera/include/auto_increment_offset_restore.inc diff --git a/mysql-test/suite/galera_3nodes/t/galera_gtid_consistency.test b/mysql-test/suite/galera_3nodes/t/galera_gtid_consistency.test index 871014b39d0..4d99f865cb2 100644 --- a/mysql-test/suite/galera_3nodes/t/galera_gtid_consistency.test +++ b/mysql-test/suite/galera_3nodes/t/galera_gtid_consistency.test @@ -343,12 +343,12 @@ SELECT COUNT(*) FROM t1; # cleanups # --connection node_2 -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node"); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); call mtr.add_suppression("Sending JOIN failed: "); call mtr.add_suppression("WSREP: Failed to JOIN the cluster after SST"); call mtr.add_suppression("WSREP: FLOW message from member .* in non-primary configuration"); --connection node_3 -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node"); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); call mtr.add_suppression("Sending JOIN failed: "); call mtr.add_suppression("WSREP: Failed to JOIN the cluster after SST"); call mtr.add_suppression("WSREP: FLOW message from member .* in non-primary configuration"); diff --git a/mysql-test/suite/galera_sr/r/GCF-572.result b/mysql-test/suite/galera_sr/r/GCF-572.result index 41ae2378a3f..b28ce1ae346 100644 --- a/mysql-test/suite/galera_sr/r/GCF-572.result +++ b/mysql-test/suite/galera_sr/r/GCF-572.result @@ -37,8 +37,6 @@ f1 f2 SET SESSION wsrep_trx_fragment_size = 10000; START TRANSACTION; INSERT INTO t1 VALUE (10, 'node1'); -SELECT * FROM mysql.wsrep_streaming_log; -node_uuid trx_id seqno flags frag connection node_1a; INSERT INTO t1 VALUES(15, 'node2'); connection node_1; @@ -47,6 +45,7 @@ f1 f2 1 node1 5 node2 10 node1 +15 node2 INSERT INTO t1 VALUES(15, 'node1'); ERROR 23000: Duplicate entry '15' for key 'PRIMARY' COMMIT; diff --git a/mysql-test/suite/galera_sr/r/MDEV-35281.result b/mysql-test/suite/galera_sr/r/MDEV-35281.result new file mode 100644 index 00000000000..eeac965db19 --- /dev/null +++ b/mysql-test/suite/galera_sr/r/MDEV-35281.result @@ -0,0 +1,9 @@ +connection node_2; +connection node_1; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY); +SET SESSION wsrep_trx_fragment_size=1; +SET SESSION innodb_snapshot_isolation=ON; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +INSERT INTO t1 VALUES (1); +COMMIT; +DROP TABLE t1; diff --git a/mysql-test/suite/galera_sr/r/mdev_18631.result b/mysql-test/suite/galera_sr/r/mdev_18631.result index aa3b2c252e8..8e02d09ffd8 100644 --- a/mysql-test/suite/galera_sr/r/mdev_18631.result +++ b/mysql-test/suite/galera_sr/r/mdev_18631.result @@ -5,7 +5,7 @@ connection node_1; CREATE TABLE t1(f1 INT PRIMARY KEY) ENGINE=INNODB; INSERT INTO t1 VALUES (1), (2), (3); connection node_2; -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); SELECT * FROM t1; f1 1 diff --git a/mysql-test/suite/galera_sr/t/GCF-572.test b/mysql-test/suite/galera_sr/t/GCF-572.test index be77451a332..b9bd90d1a96 100644 --- a/mysql-test/suite/galera_sr/t/GCF-572.test +++ b/mysql-test/suite/galera_sr/t/GCF-572.test @@ -61,7 +61,6 @@ SET SESSION wsrep_trx_fragment_size = 10000; START TRANSACTION; INSERT INTO t1 VALUE (10, 'node1'); -SELECT * FROM mysql.wsrep_streaming_log; --connection node_1a INSERT INTO t1 VALUES(15, 'node2'); diff --git a/mysql-test/suite/galera_sr/t/MDEV-35281.test b/mysql-test/suite/galera_sr/t/MDEV-35281.test new file mode 100644 index 00000000000..1ed2feab7f8 --- /dev/null +++ b/mysql-test/suite/galera_sr/t/MDEV-35281.test @@ -0,0 +1,37 @@ +# +# MDEV-35281 - SR transaction crashes with innodb_snapshot_isolation +# +# Test outline: a simple SR transaction fails to remove +# its fragments from streaming_log table, with error +# HA_ERR_RECORD_CHANGED. +# This happens with the following sequence of events: +# 1. Start a streaming replication transaction +# 2. The transaction creates a read view in InnoDB +# (this must happen before a fragment is replicated) +# 3. The transaction replicates a fragment. +# Internally, a new transaction is created to INSERT +# a row representing the fragment into the streaming_log +# table and is committed immediately. +# 4. The streaming replication transaction COMMITs. +# Before committing, the transaction replicates +# a commit fragment and DELETEs its fragments that +# were created in the streaming_log table. +# If bug is present, fragment removal from the +# streaming_log table violates snapshot isolation, +# thus the operation fails with HA_ERR_RECORD_CHANGED. +# (One or more records from the streaming_log table +# are removed, while these were not visible to +# the transaction). + +--source include/galera_cluster.inc + +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY); + +SET SESSION wsrep_trx_fragment_size=1; +SET SESSION innodb_snapshot_isolation=ON; + +START TRANSACTION WITH CONSISTENT SNAPSHOT; +INSERT INTO t1 VALUES (1); +COMMIT; + +DROP TABLE t1; diff --git a/mysql-test/suite/galera_sr/t/mdev_18631.test b/mysql-test/suite/galera_sr/t/mdev_18631.test index 3e99e9fa9dc..2721e5028eb 100644 --- a/mysql-test/suite/galera_sr/t/mdev_18631.test +++ b/mysql-test/suite/galera_sr/t/mdev_18631.test @@ -13,7 +13,7 @@ CREATE TABLE t1(f1 INT PRIMARY KEY) ENGINE=INNODB; INSERT INTO t1 VALUES (1), (2), (3); --connection node_2 -call mtr.add_suppression("WSREP: Ignoring server id for non bootstrap node\\."); +call mtr.add_suppression("WSREP: Ignoring server id .* for non bootstrap node"); SELECT * FROM t1; --connection node_1 diff --git a/mysql-test/suite/innodb/r/innochecksum_undo_page.result b/mysql-test/suite/innodb/r/innochecksum_undo_page.result new file mode 100644 index 00000000000..b9fd1da89cf --- /dev/null +++ b/mysql-test/suite/innodb/r/innochecksum_undo_page.result @@ -0,0 +1,12 @@ +SET GLOBAL INNODB_FILE_PER_TABLE= 0; +Warnings: +Warning 1287 '@@innodb_file_per_table' is deprecated and will be removed in a future release +CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB; +INSERT INTO t1 VALUES(1); +DROP TABLE t1; +SET GLOBAL innodb_fast_shutdown=0; +# Run the innochecksum to display undo log pages +FOUND 1 /Undo page state: 0 active, [0-9]+ cached, [0-9]+ to_purge, [0-9]+ prepared, [0-9]+ other/ in result.log +# Run the innochecksum with --skip-freed-pages +FOUND 1 /Undo page state: 0 active, 0 cached, 0 to_purge, 0 prepared, 0 other/ in result.log +# restart diff --git a/mysql-test/suite/innodb/r/innodb-replace,INPLACE.rdiff b/mysql-test/suite/innodb/r/innodb-replace,INPLACE.rdiff new file mode 100644 index 00000000000..a05e4bec7e9 --- /dev/null +++ b/mysql-test/suite/innodb/r/innodb-replace,INPLACE.rdiff @@ -0,0 +1,16 @@ +--- innodb-replace.result ++++ innodb-replace,INPLACE.result +@@ -31,10 +31,10 @@ + REPLACE INTO t1 (c1,c2,c3) VALUES (0,1,b'11'); + SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN ('HANDLER_DELETE','HANDLER_WRITE','HANDLER_READ_KEY','HANDLER_UPDATE'); + VARIABLE_NAME VARIABLE_VALUE +-HANDLER_DELETE 1 ++HANDLER_DELETE 2 + HANDLER_READ_KEY 2 +-HANDLER_UPDATE 1 +-HANDLER_WRITE 2 ++HANDLER_UPDATE 0 ++HANDLER_WRITE 3 + SELECT * FROM t1; + c1 c2 c3 + 0 1  diff --git a/mysql-test/suite/innodb/r/innodb-replace.result b/mysql-test/suite/innodb/r/innodb-replace.result index c926bb89a2e..16b1df73a85 100644 --- a/mysql-test/suite/innodb/r/innodb-replace.result +++ b/mysql-test/suite/innodb/r/innodb-replace.result @@ -11,3 +11,53 @@ ERROR HY000: DELAYED option not supported for table 't1' select * from t1; c1 c2 stamp drop table t1; +# +# MDEV-35115 Inconsistent Replace behaviour when multiple +# unique index exist +# +CREATE TABLE t1 (c1 NUMERIC UNSIGNED NOT NULL, +c2 INT3 UNIQUE, +c3 BIT(2) PRIMARY KEY)ENGINE=InnoDB; +ALTER TABLE t1 ADD UNIQUE INDEX(c1); +INSERT INTO t1 (c1,c2,c3) VALUES (0,0,b'01'); +INSERT INTO t1 (c1,c2,c3) VALUES (1,1,b'10'); +FLUSH STATUS; +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN ('HANDLER_DELETE','HANDLER_WRITE','HANDLER_READ_KEY','HANDLER_UPDATE'); +VARIABLE_NAME VARIABLE_VALUE +HANDLER_DELETE 0 +HANDLER_READ_KEY 0 +HANDLER_UPDATE 0 +HANDLER_WRITE 0 +REPLACE INTO t1 (c1,c2,c3) VALUES (0,1,b'11'); +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN ('HANDLER_DELETE','HANDLER_WRITE','HANDLER_READ_KEY','HANDLER_UPDATE'); +VARIABLE_NAME VARIABLE_VALUE +HANDLER_DELETE 1 +HANDLER_READ_KEY 2 +HANDLER_UPDATE 1 +HANDLER_WRITE 2 +SELECT * FROM t1; +c1 c2 c3 +0 1  +DROP TABLE t1; +CREATE TABLE t1 (f1 INT NOT NULL PRIMARY KEY, +f2 INT, f3 INT, f4 INT, +UNIQUE INDEX i1(f2))ENGINE=InnoDB; +ALTER TABLE t1 ADD INDEX i3(f4); +ALTER TABLE t1 ADD UNIQUE INDEX i2(f3); +INSERT INTO t1 VALUES (0,0,0,0); +INSERT INTO t1 VALUES (1,1,1,1); +FLUSH STATUS; +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN ('HANDLER_DELETE','HANDLER_WRITE','HANDLER_READ_KEY','HANDLER_UPDATE'); +VARIABLE_NAME VARIABLE_VALUE +HANDLER_DELETE 0 +HANDLER_READ_KEY 0 +HANDLER_UPDATE 0 +HANDLER_WRITE 0 +REPLACE INTO t1 VALUES (0,0,1,1); +SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN ('HANDLER_DELETE','HANDLER_WRITE','HANDLER_READ_KEY','HANDLER_UPDATE'); +VARIABLE_NAME VARIABLE_VALUE +HANDLER_DELETE 1 +HANDLER_READ_KEY 2 +HANDLER_UPDATE 1 +HANDLER_WRITE 2 +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/r/log_file_name.result b/mysql-test/suite/innodb/r/log_file_name.result index 4b9e63a1144..1839c9bc1e0 100644 --- a/mysql-test/suite/innodb/r/log_file_name.result +++ b/mysql-test/suite/innodb/r/log_file_name.result @@ -49,7 +49,6 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' AND support IN ('YES', 'DEFAULT', 'ENABLED'); ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS -FOUND 1 /InnoDB: Could not measure the size of single-table tablespace file '.*test/t2\.ibd'/ in mysqld.1.err # restart SELECT * FROM INFORMATION_SCHEMA.ENGINES WHERE engine = 'innodb' diff --git a/mysql-test/suite/innodb/r/log_file_size_online.result b/mysql-test/suite/innodb/r/log_file_size_online.result index e34d4f3cdc2..197e1145b77 100644 --- a/mysql-test/suite/innodb/r/log_file_size_online.result +++ b/mysql-test/suite/innodb/r/log_file_size_online.result @@ -58,5 +58,13 @@ COUNT(*) LENGTH(b) SHOW VARIABLES LIKE 'innodb_log_file_size'; Variable_name Value innodb_log_file_size 5242880 -FOUND 1 /InnoDB: Resized log to 5\.000MiB/ in mysqld.1.err +SET GLOBAL innodb_log_file_size=6291456; +SHOW VARIABLES LIKE 'innodb_log_file_size'; +Variable_name Value +innodb_log_file_size 6291456 +SET GLOBAL innodb_log_file_size=5242880; +SHOW VARIABLES LIKE 'innodb_log_file_size'; +Variable_name Value +innodb_log_file_size 5242880 +FOUND 1 /InnoDB: Resized log to 6\.000MiB/ in mysqld.1.err DROP TABLE t; diff --git a/mysql-test/suite/innodb/r/purge_pessimistic.result b/mysql-test/suite/innodb/r/purge_pessimistic.result new file mode 100644 index 00000000000..a7ce4e13d60 --- /dev/null +++ b/mysql-test/suite/innodb/r/purge_pessimistic.result @@ -0,0 +1,36 @@ +# +# MDEV-35508: Race condition between purge and secondary index INSERT or UPDATE +# +SET @old_debug_dbug = @@global.debug_dbug; +CREATE TABLE t1(col1 INT PRIMARY KEY, col2 int, KEY k1(col2)) ENGINE=Innodb; +INSERT INTO t1 VALUES(1, 100); +CREATE TABLE t2(col1 INT PRIMARY KEY) Engine=Innodb; +InnoDB 0 transactions not purged +START TRANSACTION; +INSERT INTO t2 VALUES(10); +SET DEBUG_SYNC='RESET'; +SET GLOBAL debug_dbug= "+d,btr_force_pessimistic_delete"; +SET GLOBAL debug_dbug= "+d,enable_row_purge_sec_tree_sync"; +connect con1,localhost,root; +UPDATE t1 SET col2 = 200 WHERE col1 = 1; +connection default; +SET DEBUG_SYNC= 'now WAIT_FOR purge_sec_tree_begin'; +SET GLOBAL debug_dbug= "-d,enable_row_purge_sec_tree_sync"; +UPDATE t1 SET col2 = 100 WHERE col1 = 1; +SET DEBUG_SYNC= 'now SIGNAL purge_sec_tree_execute'; +COMMIT; +InnoDB 0 transactions not purged +disconnect con1; +SELECT * FROM t1; +col1 col2 +1 100 +CHECK TABLE t1; +Table Op Msg_type Msg_text +test.t1 check status OK +DROP TABLE t1; +SELECT * FROM t2; +col1 +10 +DROP TABLE t2; +SET @@GLOBAL.debug_dbug = @old_debug_dbug; +SET DEBUG_SYNC='RESET'; diff --git a/mysql-test/suite/innodb/t/innochecksum_undo_page.opt b/mysql-test/suite/innodb/t/innochecksum_undo_page.opt new file mode 100644 index 00000000000..5e2202bd1d2 --- /dev/null +++ b/mysql-test/suite/innodb/t/innochecksum_undo_page.opt @@ -0,0 +1 @@ +--innodb_undo_tablespaces=0 diff --git a/mysql-test/suite/innodb/t/innochecksum_undo_page.test b/mysql-test/suite/innodb/t/innochecksum_undo_page.test new file mode 100644 index 00000000000..5e6ab15e68b --- /dev/null +++ b/mysql-test/suite/innodb/t/innochecksum_undo_page.test @@ -0,0 +1,27 @@ +--source include/have_innodb.inc +--source include/not_embedded.inc +let MYSQLD_DATADIR= `SELECT @@datadir`; + +SET GLOBAL INNODB_FILE_PER_TABLE= 0; +CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB; +INSERT INTO t1 VALUES(1); +DROP TABLE t1; +SET GLOBAL innodb_fast_shutdown=0; +--source include/shutdown_mysqld.inc + +--echo # Run the innochecksum to display undo log pages +let $resultlog=$MYSQLTEST_VARDIR/tmp/result.log; +let SEARCH_FILE = $MYSQLTEST_VARDIR/tmp/result.log; +let SEARCH_ABORT = NOT FOUND; +exec $INNOCHECKSUM -S $MYSQLD_DATADIR/ibdata1 > $resultlog; +# Expected > 0 cached undo log pages, but can't guarantee it because +# the writes of freed pages may be optimized while flushing +let SEARCH_PATTERN= Undo page state: 0 active, [0-9]+ cached, [0-9]+ to_purge, [0-9]+ prepared, [0-9]+ other; +--source include/search_pattern_in_file.inc + +--echo # Run the innochecksum with --skip-freed-pages +exec $INNOCHECKSUM -S -r $MYSQLD_DATADIR/ibdata1 > $resultlog; +let SEARCH_PATTERN= Undo page state: 0 active, 0 cached, 0 to_purge, 0 prepared, 0 other; +--source include/search_pattern_in_file.inc +--remove_file $resultlog +--source include/start_mysqld.inc diff --git a/mysql-test/suite/innodb/t/innodb-replace.combinations b/mysql-test/suite/innodb/t/innodb-replace.combinations new file mode 100644 index 00000000000..e84e17b06ac --- /dev/null +++ b/mysql-test/suite/innodb/t/innodb-replace.combinations @@ -0,0 +1,2 @@ +[COPY] +[INPLACE] diff --git a/mysql-test/suite/innodb/t/innodb-replace.test b/mysql-test/suite/innodb/t/innodb-replace.test index 8c3aacde5e8..ee3a508786d 100644 --- a/mysql-test/suite/innodb/t/innodb-replace.test +++ b/mysql-test/suite/innodb/t/innodb-replace.test @@ -20,3 +20,67 @@ select * from t1; drop table t1; # End of 4.1 tests + +--echo # +--echo # MDEV-35115 Inconsistent Replace behaviour when multiple +--echo # unique index exist +--echo # +let $get_handler_status_counts= SELECT * FROM INFORMATION_SCHEMA.SESSION_STATUS WHERE VARIABLE_NAME IN ('HANDLER_DELETE','HANDLER_WRITE','HANDLER_READ_KEY','HANDLER_UPDATE'); + +let $MYSQLD_DATADIR= `select @@datadir`; +let $algorithm=`select regexp_replace('$MTR_COMBINATIONS', 'innodb,\|,innodb', '')`; + +CREATE TABLE t1 (c1 NUMERIC UNSIGNED NOT NULL, + c2 INT3 UNIQUE, + c3 BIT(2) PRIMARY KEY)ENGINE=InnoDB; + +replace_result ,ALGORITHM=COPY '' ,ALGORITHM=INPLACE ''; +eval ALTER TABLE t1 ADD UNIQUE INDEX(c1),ALGORITHM=$algorithm; +INSERT INTO t1 (c1,c2,c3) VALUES (0,0,b'01'); +INSERT INTO t1 (c1,c2,c3) VALUES (1,1,b'10'); + +FLUSH STATUS; + +--disable_ps2_protocol +eval $get_handler_status_counts; +--enable_ps2_protocol + +# INPLACE algorithm appends the index, so unique index +# reordering happened between innodb and .frm file. This +# lead to deletion of 2 existing rows for the replace statement + +# COPY algorithm does table rebuild everytime. No reordering +# happened in this case. This lead to 1 deletion of record +# and 1 update on the existing record +REPLACE INTO t1 (c1,c2,c3) VALUES (0,1,b'11'); + +--disable_ps2_protocol +eval $get_handler_status_counts; +--enable_ps2_protocol +SELECT * FROM t1; +DROP TABLE t1; + +CREATE TABLE t1 (f1 INT NOT NULL PRIMARY KEY, + f2 INT, f3 INT, f4 INT, + UNIQUE INDEX i1(f2))ENGINE=InnoDB; +replace_result ,ALGORITHM=COPY '' ,ALGORITHM=INPLACE ''; +eval ALTER TABLE t1 ADD INDEX i3(f4),ALGORITHM=$algorithm; + +replace_result ,ALGORITHM=COPY '' ,ALGORITHM=INPLACE ''; +eval ALTER TABLE t1 ADD UNIQUE INDEX i2(f3),ALGORITHM=$algorithm; + +INSERT INTO t1 VALUES (0,0,0,0); +INSERT INTO t1 VALUES (1,1,1,1); + +FLUSH STATUS; +--disable_ps2_protocol +eval $get_handler_status_counts; +--enable_ps2_protocol + +REPLACE INTO t1 VALUES (0,0,1,1); + +--disable_ps2_protocol +eval $get_handler_status_counts; +--enable_ps2_protocol + +DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/log_file_name.test b/mysql-test/suite/innodb/t/log_file_name.test index e541837c287..fc7899384f3 100644 --- a/mysql-test/suite/innodb/t/log_file_name.test +++ b/mysql-test/suite/innodb/t/log_file_name.test @@ -92,7 +92,6 @@ let SEARCH_PATTERN= InnoDB: Set innodb_force_recovery=1 to ignore this and to pe --source include/start_mysqld.inc eval $check_no_innodb; ---let $on_linux= `select @@version_compile_os LIKE 'Linux%'` --source include/shutdown_mysqld.inc let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t[12].ibd. @@ -107,17 +106,14 @@ let SEARCH_PATTERN= InnoDB: Tablespace \d+ was not found at .*t[12].ibd. eval $check_no_innodb; --source include/shutdown_mysqld.inc ---let SEARCH_PATTERN= InnoDB: Could not measure the size of single-table tablespace file '.*test/t2\\.ibd' -if (!$on_linux) -{ -# os_file_get_size() would succeed on a directory. ---echo FOUND 1 /$SEARCH_PATTERN/ in mysqld.1.err -} -if ($on_linux) -{ -# lseek() reports EINVAL when invoked on a directory. ---source include/search_pattern_in_file.inc -} +# On Linux, lseek() would typically report EINVAL when invoked on a directory. +# On other plaftorms as well as some GNU/Linux based environments, such as +# a Ubuntu 22.04 based image on Amazon Web Services, +# os_file_get_size() would succeed on a directory, and we would get another +# error about inability to apply log to a corrupted page. + +#--let SEARCH_PATTERN= InnoDB: Could not measure the size of single-table tablespace file '.*test/t2\\.ibd' +#--source include/search_pattern_in_file.inc --rmdir $MYSQLD_DATADIR/test/t2.ibd diff --git a/mysql-test/suite/innodb/t/log_file_size_online.test b/mysql-test/suite/innodb/t/log_file_size_online.test index 14224ab9c47..0435e288cbc 100644 --- a/mysql-test/suite/innodb/t/log_file_size_online.test +++ b/mysql-test/suite/innodb/t/log_file_size_online.test @@ -60,7 +60,11 @@ SELECT * FROM t WHERE a<10; SELECT COUNT(*),LENGTH(b) FROM t GROUP BY b; SHOW VARIABLES LIKE 'innodb_log_file_size'; -let SEARCH_PATTERN = InnoDB: Resized log to 5\\.000MiB; +SET GLOBAL innodb_log_file_size=6291456; +SHOW VARIABLES LIKE 'innodb_log_file_size'; +SET GLOBAL innodb_log_file_size=5242880; +SHOW VARIABLES LIKE 'innodb_log_file_size'; +let SEARCH_PATTERN = InnoDB: Resized log to 6\\.000MiB; --source include/search_pattern_in_file.inc DROP TABLE t; diff --git a/mysql-test/suite/innodb/t/purge_pessimistic.opt b/mysql-test/suite/innodb/t/purge_pessimistic.opt new file mode 100644 index 00000000000..a39e5228c9d --- /dev/null +++ b/mysql-test/suite/innodb/t/purge_pessimistic.opt @@ -0,0 +1 @@ +--innodb_purge_threads=1 diff --git a/mysql-test/suite/innodb/t/purge_pessimistic.test b/mysql-test/suite/innodb/t/purge_pessimistic.test new file mode 100644 index 00000000000..88be65a6be5 --- /dev/null +++ b/mysql-test/suite/innodb/t/purge_pessimistic.test @@ -0,0 +1,51 @@ +--source include/have_innodb.inc +--source include/count_sessions.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/not_embedded.inc + +--echo # +--echo # MDEV-35508: Race condition between purge and secondary index INSERT or UPDATE +--echo # + +SET @old_debug_dbug = @@global.debug_dbug; + +CREATE TABLE t1(col1 INT PRIMARY KEY, col2 int, KEY k1(col2)) ENGINE=Innodb; +INSERT INTO t1 VALUES(1, 100); + +CREATE TABLE t2(col1 INT PRIMARY KEY) Engine=Innodb; +--source include/wait_all_purged.inc + +START TRANSACTION; +INSERT INTO t2 VALUES(10); + +SET DEBUG_SYNC='RESET'; + +SET GLOBAL debug_dbug= "+d,btr_force_pessimistic_delete"; +SET GLOBAL debug_dbug= "+d,enable_row_purge_sec_tree_sync"; + +--connect (con1,localhost,root) +UPDATE t1 SET col2 = 200 WHERE col1 = 1; + +--connection default +SET DEBUG_SYNC= 'now WAIT_FOR purge_sec_tree_begin'; +SET GLOBAL debug_dbug= "-d,enable_row_purge_sec_tree_sync"; + +UPDATE t1 SET col2 = 100 WHERE col1 = 1; +SET DEBUG_SYNC= 'now SIGNAL purge_sec_tree_execute'; + +COMMIT; +--source include/wait_all_purged.inc + +--disconnect con1 +--source include/wait_until_count_sessions.inc + +SELECT * FROM t1; +CHECK TABLE t1; +DROP TABLE t1; + +SELECT * FROM t2; +DROP TABLE t2; + +SET @@GLOBAL.debug_dbug = @old_debug_dbug; +SET DEBUG_SYNC='RESET'; diff --git a/mysql-test/suite/innodb_fts/r/fulltext.result b/mysql-test/suite/innodb_fts/r/fulltext.result index 6b1524c527e..3f63d6e764c 100644 --- a/mysql-test/suite/innodb_fts/r/fulltext.result +++ b/mysql-test/suite/innodb_fts/r/fulltext.result @@ -801,10 +801,10 @@ title VARCHAR(200), book VARCHAR(200), FULLTEXT fidx(title)) ENGINE = InnoDB; INSERT INTO t1(title) VALUES('database'); ALTER TABLE t1 DROP INDEX fidx; -select space into @common_space from information_schema.innodb_sys_tables where name like "test/FTS_%_CONFIG"; +create table t2 as select space from information_schema.innodb_sys_tables where name like "test/FTS_%_CONFIG"; ALTER TABLE t1 ADD FULLTEXT fidx_1(book); -select space=@common_space from information_schema.innodb_sys_tables where name like "test/FTS_%_CONFIG"; -space=@common_space +select i_s.space=t2.space from information_schema.innodb_sys_tables i_s join t2 where name like "test/FTS_%_CONFIG"; +i_s.space=t2.space 1 SHOW CREATE TABLE t1; Table Create Table @@ -815,4 +815,5 @@ t1 CREATE TABLE `t1` ( PRIMARY KEY (`ID`), FULLTEXT KEY `fidx_1` (`book`) ) ENGINE=InnoDB AUTO_INCREMENT=2 DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci -DROP TABLE t1; +DROP TABLE t1, t2; +# End of 10.5 tests diff --git a/mysql-test/suite/innodb_fts/t/fulltext.test b/mysql-test/suite/innodb_fts/t/fulltext.test index bfa1dc65146..ffcfb4fbe95 100644 --- a/mysql-test/suite/innodb_fts/t/fulltext.test +++ b/mysql-test/suite/innodb_fts/t/fulltext.test @@ -816,8 +816,10 @@ CREATE TABLE t1 ( FULLTEXT fidx(title)) ENGINE = InnoDB; INSERT INTO t1(title) VALUES('database'); ALTER TABLE t1 DROP INDEX fidx; -select space into @common_space from information_schema.innodb_sys_tables where name like "test/FTS_%_CONFIG"; +create table t2 as select space from information_schema.innodb_sys_tables where name like "test/FTS_%_CONFIG"; ALTER TABLE t1 ADD FULLTEXT fidx_1(book); -select space=@common_space from information_schema.innodb_sys_tables where name like "test/FTS_%_CONFIG"; +select i_s.space=t2.space from information_schema.innodb_sys_tables i_s join t2 where name like "test/FTS_%_CONFIG"; SHOW CREATE TABLE t1; -DROP TABLE t1; +DROP TABLE t1, t2; + +--echo # End of 10.5 tests diff --git a/mysql-test/suite/innodb_zip/r/innochecksum_2.result b/mysql-test/suite/innodb_zip/r/innochecksum_2.result index 33d80b02ca8..e80aad11a5b 100644 --- a/mysql-test/suite/innodb_zip/r/innochecksum_2.result +++ b/mysql-test/suite/innodb_zip/r/innochecksum_2.result @@ -34,6 +34,7 @@ per-page-details FALSE log (No default value) leaf FALSE merge 0 +skip-freed-pages FALSE [1]:# check the both short and long options for "help" [2]:# Run the innochecksum when file isn't provided. # It will print the innochecksum usage similar to --help option. @@ -41,7 +42,7 @@ innochecksum Ver #.#.# Copyright (c) YEAR, YEAR , Oracle, MariaDB Corporation Ab and others. InnoDB offline file checksum utility. -Usage: innochecksum [-c] [-s ] [-e ] [-p ] [-i] [-v] [-a ] [-n] [-S] [-D ] [-l ] [-l] [-m ] +Usage: innochecksum [-c] [-r] [-s ] [-e ] [-p ] [-i] [-v] [-a ] [-n] [-S] [-D ] [-l ] [-l] [-m ] See https://mariadb.com/kb/en/library/innochecksum/ for usage hints. -?, --help Displays this help and exits. -I, --info Synonym for --help. @@ -66,6 +67,8 @@ See https://mariadb.com/kb/en/library/innochecksum/ for usage hints. -f, --leaf Examine leaf index pages -m, --merge=# leaf page count if merge given number of consecutive pages + -r, --skip-freed-pages + skip freed pages for the tablespace Variables (--variable-name=value) and boolean options {FALSE|TRUE} Value (after reading options) @@ -84,6 +87,7 @@ per-page-details FALSE log (No default value) leaf FALSE merge 0 +skip-freed-pages FALSE [3]:# check the both short and long options for "count" and exit Number of pages:# Number of pages:# diff --git a/mysql-test/suite/innodb_zip/r/innochecksum_3.result b/mysql-test/suite/innodb_zip/r/innochecksum_3.result index 04d2fcaa748..0e5e4d2d7f7 100644 --- a/mysql-test/suite/innodb_zip/r/innochecksum_3.result +++ b/mysql-test/suite/innodb_zip/r/innochecksum_3.result @@ -139,6 +139,7 @@ per-page-details FALSE log (No default value) leaf FALSE merge 0 +skip-freed-pages FALSE [5]: Page type dump for with shortform for tab1.ibd diff --git a/mysql-test/suite/plugins/r/server_audit_pwd_mask.result b/mysql-test/suite/plugins/r/server_audit_pwd_mask.result new file mode 100644 index 00000000000..7cf3b05bcf1 --- /dev/null +++ b/mysql-test/suite/plugins/r/server_audit_pwd_mask.result @@ -0,0 +1,22 @@ +install plugin ed25519 soname 'auth_ed25519'; +install plugin server_audit soname 'server_audit'; +set global server_audit_file_path='server_audit.log'; +set global server_audit_output_type=file; +set global server_audit_logging=on; +# unsafe to log passwords (pwd-123) +CREATE USER u1 IDENTIFIED BY 'pwd_123'; +create user u2 IDENTIFIED VIA ed25519 USING PASSWORD('pwd_123'); +SET PASSWORD FOR u1 = PASSWORD('pwd_123'); +ALTER USER u1 IDENTIFIED BY 'pwd_123'; +alter user u2 identified VIA ed25519 USING password('pwd_123'); +GRANT ALL ON test TO u1 IDENTIFIED BY "pwd_123"; +GRANT ALL ON test TO u1 identified VIA ed25519 as password('pwd_123') or ed25519 using password('pwd_123'); +# pattern should not be found +NOT FOUND /pwd_123/ in server_audit.log +# pattern should not be found +# cleaunup +DROP USER u1; +DROP USER u2; +set global server_audit_logging=off; +UNINSTALL PLUGIN ed25519; +UNINSTALL PLUGIN server_audit; diff --git a/mysql-test/suite/plugins/t/server_audit_pwd_mask.test b/mysql-test/suite/plugins/t/server_audit_pwd_mask.test new file mode 100644 index 00000000000..af6425b2035 --- /dev/null +++ b/mysql-test/suite/plugins/t/server_audit_pwd_mask.test @@ -0,0 +1,46 @@ + +--source include/have_plugin_auth.inc +--source include/not_embedded.inc + +if (!$SERVER_AUDIT_SO) { + skip No SERVER_AUDIT plugin; +} +if (!$AUTH_ED25519_SO) { + skip No auth_ed25519 plugin; +} +--disable_ps2_protocol + +let $MYSQLD_DATADIR= `SELECT @@datadir`; +let SEARCH_FILE= $MYSQLD_DATADIR/server_audit.log; + +install plugin ed25519 soname 'auth_ed25519'; +install plugin server_audit soname 'server_audit'; + + +set global server_audit_file_path='server_audit.log'; +set global server_audit_output_type=file; +set global server_audit_logging=on; + +--echo # unsafe to log passwords (pwd-123) + +CREATE USER u1 IDENTIFIED BY 'pwd_123'; +create user u2 IDENTIFIED VIA ed25519 USING PASSWORD('pwd_123'); +SET PASSWORD FOR u1 = PASSWORD('pwd_123'); +ALTER USER u1 IDENTIFIED BY 'pwd_123'; +alter user u2 identified VIA ed25519 USING password('pwd_123'); +GRANT ALL ON test TO u1 IDENTIFIED BY "pwd_123"; +GRANT ALL ON test TO u1 identified VIA ed25519 as password('pwd_123') or ed25519 using password('pwd_123'); +--let SEARCH_PATTERN=pwd_123 +--echo # pattern should not be found +--source include/search_pattern_in_file.inc +--echo # pattern should not be found + +--echo # cleaunup +DROP USER u1; +DROP USER u2; +set global server_audit_logging=off; +--remove_file $SEARCH_FILE +--disable_warnings +UNINSTALL PLUGIN ed25519; +UNINSTALL PLUGIN server_audit; +--enable_warnings diff --git a/mysql-test/suite/rpl/r/parallel_backup_xa_debug.result b/mysql-test/suite/rpl/r/parallel_backup_xa_debug.result new file mode 100644 index 00000000000..aa5ff772552 --- /dev/null +++ b/mysql-test/suite/rpl/r/parallel_backup_xa_debug.result @@ -0,0 +1,42 @@ +include/master-slave.inc +[connection master] +connection master; +CREATE TABLE t (a INT) ENGINE = innodb; +connection slave; +include/stop_slave.inc +SET @old_parallel_threads= @@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode = @@GLOBAL.slave_parallel_mode; +SET @@global.slave_parallel_threads= 2; +SET @@global.slave_parallel_mode = 'optimistic'; +connection master; +# MDEV-35110 +SET @@gtid_seq_no=100; +insert into t set a=1; +xa start 'x'; +insert into t set a=2; +xa end 'x'; +xa prepare 'x'; +connection slave; +SET @@global.debug_dbug="+d,hold_worker_on_schedule"; +start slave; +connection slave1; +backup stage start; +backup stage block_commit; +connection slave; +SET debug_sync = 'now SIGNAL continue_worker'; +SET debug_sync = RESET; +connection slave1; +backup stage end; +connection master; +xa rollback 'x'; +connection slave; +# Clean up. +connection slave; +include/stop_slave.inc +SET @@global.debug_dbug=""; +SET @@global.slave_parallel_threads= @old_parallel_threads; +SET @@global.slave_parallel_mode = @old_parallel_mode; +include/start_slave.inc +connection server_1; +DROP TABLE t; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/r/rpl_binlog_dump_slave_gtid_state_info.result b/mysql-test/suite/rpl/r/rpl_binlog_dump_slave_gtid_state_info.result index 20dd9076a5d..c6db1afa31b 100644 --- a/mysql-test/suite/rpl/r/rpl_binlog_dump_slave_gtid_state_info.result +++ b/mysql-test/suite/rpl/r/rpl_binlog_dump_slave_gtid_state_info.result @@ -9,7 +9,6 @@ CHANGE MASTER TO MASTER_USE_GTID=slave_pos; include/start_slave.inc connection master; "Test Case 1: Start binlog_dump to slave_server(#), pos(master-bin.000001, ###), using_gtid(1), gtid('')" -include/wait_for_pattern_in_file.inc FOUND 1 /using_gtid\(1\), gtid\(\'\'\).*/ in mysqld.1.err connection slave; include/stop_slave.inc @@ -17,7 +16,6 @@ CHANGE MASTER TO MASTER_USE_GTID=no; include/start_slave.inc connection master; "Test Case 2: Start binlog_dump to slave_server(#), pos(master-bin.000001, ###), using_gtid(0), gtid('')" -include/wait_for_pattern_in_file.inc FOUND 1 /using_gtid\(0\), gtid\(\'\'\).*/ in mysqld.1.err CREATE TABLE t (f INT) ENGINE=INNODB; INSERT INTO t VALUES(10); @@ -28,7 +26,6 @@ CHANGE MASTER TO MASTER_USE_GTID=slave_pos; include/start_slave.inc connection master; "Test Case 3: Start binlog_dump to slave_server(#), pos(master-bin.000001, ###), using_gtid(1), gtid('0-1-2')" -include/wait_for_pattern_in_file.inc FOUND 1 /using_gtid\(1\), gtid\(\'0-1-2\'\).*/ in mysqld.1.err SET @@SESSION.gtid_domain_id=10; INSERT INTO t VALUES(20); @@ -39,7 +36,6 @@ CHANGE MASTER TO MASTER_USE_GTID=slave_pos; include/start_slave.inc connection master; "Test Case 4: Start binlog_dump to slave_server(#), pos(master-bin.000001, ###), using_gtid(1), gtid('0-1-2,10-1-1')" -include/wait_for_pattern_in_file.inc FOUND 1 /using_gtid\(1\), gtid\(\'0-1-2,10-1-1\'\).*/ in mysqld.1.err "===== Clean up =====" SET GLOBAL LOG_WARNINGS=@org_log_warnings; diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync_coord_consistency.result b/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync_coord_consistency.result new file mode 100644 index 00000000000..fdddd438d82 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_after_sync_coord_consistency.result @@ -0,0 +1,88 @@ +include/master-slave.inc +[connection master] +connection slave; +include/stop_slave.inc +set @old_enabled= @@global.rpl_semi_sync_slave_enabled; +set @old_dbug= @@global.debug_dbug; +set global rpl_semi_sync_slave_enabled= 1; +connection master; +set @old_enabled= @@global.rpl_semi_sync_master_enabled; +set @old_timeout= @@global.rpl_semi_sync_master_timeout; +set @old_wait_point= @@global.rpl_semi_sync_master_wait_point; +set global rpl_semi_sync_master_enabled= 1; +set global rpl_semi_sync_master_timeout= 2000; +set global rpl_semi_sync_master_wait_point= AFTER_SYNC; +connection slave; +include/start_slave.inc +# Ensure slave connection is semi-simulate_delay_semisync_slave_reply +connection master; +connection slave; +# +# Initialize test data +connection master; +create table t1 (a int) engine=innodb; +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/stop_slave.inc +set @@global.debug_dbug="+d,simulate_delay_semisync_slave_reply"; +include/start_slave.inc +# Ensure connection is semi-sync +connection master; +connection slave; +connection master; +set @old_bgc_count= @@global.binlog_commit_wait_count; +set @old_bgc_usec= @@global.binlog_commit_wait_usec; +set @@global.binlog_commit_wait_count=1; +set @@global.binlog_commit_wait_usec=100000; +connection server_1; +set debug_sync= "commit_after_release_LOCK_log SIGNAL ddl_binlogged WAIT_FOR ddl_cont"; +# T1 (DDL) +create table t2 (a int); +connect server_1_sync, localhost, root,,; +connection server_1_sync; +set debug_sync= "now WAIT_FOR ddl_binlogged"; +include/save_master_gtid.inc +connection server_2; +set debug_sync= "now WAIT_FOR io_thd_at_slave_reply"; +connection master; +set debug_sync= "commit_before_get_LOCK_after_binlog_sync SIGNAL mdl_binlogged WAIT_FOR mdl_cont"; +# T2 (DML) +insert into t1 values (100);; +connection server_1_sync; +set debug_sync= "now WAIT_FOR mdl_binlogged"; +# Both transactions binlogged and released LOCK_log, and are just before +# wait_after_sync() +set debug_sync= "now SIGNAL ddl_cont"; +set debug_sync= "now SIGNAL mdl_cont"; +connection server_2; +# slave_reply for DDL +set debug_sync= "now SIGNAL io_thd_do_reply"; +# slave_reply for MDL +set debug_sync= "now WAIT_FOR io_thd_at_slave_reply"; +set debug_sync= "now SIGNAL io_thd_do_reply"; +# Reaping MDL.. +connection master; +# ..done +# Reaping DDL.. +connection server_1; +# ..done +# +# Cleanup +connection slave; +include/stop_slave.inc +set @@global.rpl_semi_sync_slave_enabled= @old_enabled; +set @@global.debug_dbug= @old_dbug; +include/start_slave.inc +connection master; +set @@global.binlog_commit_wait_count= @old_bgc_count; +set @@global.binlog_commit_wait_usec= @old_bgc_usec; +set @@global.rpl_semi_sync_master_enabled= @old_enabled; +set @@global.rpl_semi_sync_master_timeout= @old_timeout; +set @@global.rpl_semi_sync_master_wait_point= @old_wait_point; +drop table t1, t2; +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +include/rpl_end.inc +# End of rpl_semi_sync_after_sync_coord_consistency.test diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_no_missed_ack_after_add_slave.result b/mysql-test/suite/rpl/r/rpl_semi_sync_no_missed_ack_after_add_slave.result index 19fed30ffb7..8034314c90f 100644 --- a/mysql-test/suite/rpl/r/rpl_semi_sync_no_missed_ack_after_add_slave.result +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_no_missed_ack_after_add_slave.result @@ -3,7 +3,7 @@ connection server_1; set @old_enabled= @@global.rpl_semi_sync_master_enabled; set @old_timeout= @@global.rpl_semi_sync_master_timeout; set global rpl_semi_sync_master_enabled= 1; -set global rpl_semi_sync_master_timeout= 500; +set global rpl_semi_sync_master_timeout= 2000; connection server_2; include/stop_slave.inc set @old_enabled= @@global.rpl_semi_sync_slave_enabled; @@ -14,12 +14,24 @@ include/start_slave.inc connection server_3; include/stop_slave.inc set @old_enabled= @@global.rpl_semi_sync_slave_enabled; +set @old_dbug= @@global.debug_dbug; set global rpl_semi_sync_slave_enabled= 1; +set global debug_dbug="+d,simulate_delay_semisync_slave_reply"; include/start_slave.inc # Ensure primary recognizes both replicas are semi-sync connection server_1; -connection server_1; -create table t1 (a int); +connection default; +create table t1 (a int);; +# Ensure both slaves get the event with the need_ack flag set (i.e. one +# slave shouldn't be able to receive the event and send an ACK before +# the dump thread for the other server prepares the event to send). +connection server_3; +set debug_sync= "now wait_for io_thd_at_slave_reply"; +connection server_2; +set debug_sync= "now wait_for io_thd_at_slave_reply"; +connection server_3; +set debug_sync= "now signal io_thd_do_reply"; +connection default; connection server_2; # Verifying server_2 did not send ACK connection server_3; @@ -27,15 +39,18 @@ connection server_3; connection server_1; # Verifying master's semi-sync status is still ON (This failed pre-MDEV-32960 fixes) # Verifying rpl_semi_sync_master_yes_tx incremented +connection server_2; +set debug_sync= "now signal io_thd_do_reply"; # # Cleanup connection server_2; +include/stop_slave.inc set global rpl_semi_sync_slave_enabled= @old_enabled; set global debug_dbug= @old_dbug; -include/stop_slave.inc connection server_3; -set global rpl_semi_sync_slave_enabled= @old_enabled; include/stop_slave.inc +set global rpl_semi_sync_slave_enabled= @old_enabled; +set global debug_dbug= @old_dbug; connection server_1; set global rpl_semi_sync_master_enabled= @old_enabled; set global rpl_semi_sync_master_timeout= @old_timeout; diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_shutdown_await_ack.result b/mysql-test/suite/rpl/r/rpl_semi_sync_shutdown_await_ack.result index 3048b6b5635..a3365fd11f2 100644 --- a/mysql-test/suite/rpl/r/rpl_semi_sync_shutdown_await_ack.result +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_shutdown_await_ack.result @@ -83,12 +83,26 @@ connection server_1; INSERT INTO t1 VALUES (1); connection server_1_con2; #-- Wait until master recognizes a connection is awaiting semi-sync ACK +connection server_2; +set debug_sync= "now wait_for io_thd_at_slave_reply"; +connection server_3; +set debug_sync= "now wait_for io_thd_at_slave_reply"; +connection server_1_con2; #-- Begin master shutdown SHUTDOWN WAIT FOR ALL SLAVES; +connection server_2; +# Waitng for shutdown to be delayed.. +FOUND 1 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err +connection server_2; +set debug_sync= "now signal io_thd_do_reply"; +connection server_3; +set debug_sync= "now signal io_thd_do_reply"; +# Reaping transaction.. connection server_1; ERROR HY000: Lost connection to server during query -# Check logs to ensure shutdown was delayed -FOUND 1 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err +connection server_2; +# Reaping shutdown.. +connection server_1_con2; # Validate slave data is in correct state connection server_2; select count(*)=1 from t1; @@ -184,12 +198,18 @@ connection server_1; INSERT INTO t1 VALUES (1); connection server_1_con2; #-- Wait until master recognizes a connection is awaiting semi-sync ACK +connection server_1_con2; #-- Begin master shutdown SHUTDOWN WAIT FOR ALL SLAVES; +connection server_2; +# Waitng for shutdown to be delayed.. +FOUND 2 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err +# Reaping transaction.. connection server_1; ERROR HY000: Lost connection to server during query -# Check logs to ensure shutdown was delayed -FOUND 2 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err +connection server_2; +# Reaping shutdown.. +connection server_1_con2; # Validate slave data is in correct state connection server_2; select count(*)=0 from t1; @@ -298,12 +318,22 @@ connection server_1; INSERT INTO t1 VALUES (1); connection server_1_con2; #-- Wait until master recognizes a connection is awaiting semi-sync ACK +connection server_3; +set debug_sync= "now wait_for io_thd_at_slave_reply"; +connection server_1_con2; #-- Begin master shutdown SHUTDOWN WAIT FOR ALL SLAVES; +connection server_2; +# Waitng for shutdown to be delayed.. +FOUND 3 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err +connection server_3; +set debug_sync= "now signal io_thd_do_reply"; +# Reaping transaction.. connection server_1; ERROR HY000: Lost connection to server during query -# Check logs to ensure shutdown was delayed -FOUND 3 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err +connection server_2; +# Reaping shutdown.. +connection server_1_con2; # Validate slave data is in correct state connection server_2; select count(*)=0 from t1; @@ -412,12 +442,22 @@ connection server_1; INSERT INTO t1 VALUES (1); connection server_1_con2; #-- Wait until master recognizes a connection is awaiting semi-sync ACK +connection server_3; +set debug_sync= "now wait_for io_thd_at_slave_reply"; +connection server_1_con2; #-- Begin master shutdown SHUTDOWN WAIT FOR ALL SLAVES; +connection server_2; +# Waitng for shutdown to be delayed.. +FOUND 4 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err +connection server_3; +set debug_sync= "now signal io_thd_do_reply"; +# Reaping transaction.. connection server_1; ERROR HY000: Lost connection to server during query -# Check logs to ensure shutdown was delayed -FOUND 4 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err +connection server_2; +# Reaping shutdown.. +connection server_1_con2; # Validate slave data is in correct state connection server_2; select count(*)=0 from t1; @@ -501,15 +541,20 @@ insert into t1 values (2); connection server_1; # Wait for thd to begin semi-sync wait.. # ..done +connection server_2; +set debug_sync= "now wait_for io_thd_at_slave_reply"; disconnect con1; connection default; connection con2; SHUTDOWN WAIT FOR ALL SLAVES; -# Ensure the primary waited for the ACK of the killed thread +# Waitng for shutdown to be delayed.. +connection server_2; FOUND 5 /Delaying shutdown to await semi-sync ACK/ in mysqld.1.err connection default; connection server_1; +connection con2; connection server_2; +set debug_sync= "now signal io_thd_do_reply"; include/stop_slave.inc connection server_3; include/stop_slave.inc diff --git a/mysql-test/suite/rpl/t/parallel_backup_xa_debug.test b/mysql-test/suite/rpl/t/parallel_backup_xa_debug.test new file mode 100644 index 00000000000..03fc66d89e7 --- /dev/null +++ b/mysql-test/suite/rpl/t/parallel_backup_xa_debug.test @@ -0,0 +1,64 @@ +# Verify deadlock between XA-PREPARE and BACKUP on the optimistic slave +--source include/have_debug.inc +--source include/have_debug_sync.inc +--source include/have_innodb.inc +# The test is not format specific, MIXED is required to optimize testing time +--source include/have_binlog_format_mixed.inc +--source include/master-slave.inc + +--connection master +CREATE TABLE t (a INT) ENGINE = innodb; + +--sync_slave_with_master +--source include/stop_slave.inc +SET @old_parallel_threads= @@GLOBAL.slave_parallel_threads; +SET @old_parallel_mode = @@GLOBAL.slave_parallel_mode; +SET @@global.slave_parallel_threads= 2; +SET @@global.slave_parallel_mode = 'optimistic'; + +--connection master +--echo # MDEV-35110 +SET @@gtid_seq_no=100; +insert into t set a=1; +xa start 'x'; + insert into t set a=2; +xa end 'x'; +xa prepare 'x'; + +--connection slave +SET @@global.debug_dbug="+d,hold_worker_on_schedule"; +start slave; +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for prior transaction to commit" +--source include/wait_condition.inc + +--connection slave1 +backup stage start; +--send backup stage block_commit + +--connection slave +--let $wait_condition= SELECT count(*) = 1 FROM information_schema.processlist WHERE state LIKE "Waiting for backup lock" +SET debug_sync = 'now SIGNAL continue_worker'; +SET debug_sync = RESET; + +--connection slave1 +reap; +backup stage end; + +--connection master +xa rollback 'x'; + +--sync_slave_with_master + +--echo # Clean up. +--connection slave +--source include/stop_slave.inc +SET @@global.debug_dbug=""; +SET @@global.slave_parallel_threads= @old_parallel_threads; +SET @@global.slave_parallel_mode = @old_parallel_mode; + +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t; + +--source include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_binlog_dump_slave_gtid_state_info.test b/mysql-test/suite/rpl/t/rpl_binlog_dump_slave_gtid_state_info.test index 4f0eafc4020..91360e4cca5 100644 --- a/mysql-test/suite/rpl/t/rpl_binlog_dump_slave_gtid_state_info.test +++ b/mysql-test/suite/rpl/t/rpl_binlog_dump_slave_gtid_state_info.test @@ -60,7 +60,8 @@ if(!$log_error_) --let SEARCH_FILE=$log_error_ --let SEARCH_RANGE=-50000 --let SEARCH_PATTERN=using_gtid\(1\), gtid\(\'\'\).* ---source include/wait_for_pattern_in_file.inc +--let SEARCH_WAIT=FOUND +--source include/search_pattern_in_file.inc --connection slave --source include/stop_slave.inc @@ -72,7 +73,8 @@ CHANGE MASTER TO MASTER_USE_GTID=no; --let SEARCH_FILE=$log_error_ --let SEARCH_RANGE=-50000 --let SEARCH_PATTERN=using_gtid\(0\), gtid\(\'\'\).* ---source include/wait_for_pattern_in_file.inc +--let SEARCH_WAIT=FOUND +--source include/search_pattern_in_file.inc CREATE TABLE t (f INT) ENGINE=INNODB; INSERT INTO t VALUES(10); save_master_pos; @@ -90,7 +92,8 @@ CHANGE MASTER TO MASTER_USE_GTID=slave_pos; --let SEARCH_FILE=$log_error_ --let SEARCH_RANGE=-50000 --let SEARCH_PATTERN=using_gtid\(1\), gtid\(\'0-1-2\'\).* ---source include/wait_for_pattern_in_file.inc +--let SEARCH_WAIT=FOUND +--source include/search_pattern_in_file.inc SET @@SESSION.gtid_domain_id=10; INSERT INTO t VALUES(20); save_master_pos; @@ -108,7 +111,8 @@ CHANGE MASTER TO MASTER_USE_GTID=slave_pos; --let SEARCH_FILE=$log_error_ --let SEARCH_RANGE=-50000 --let SEARCH_PATTERN=using_gtid\(1\), gtid\(\'0-1-2,10-1-1\'\).* ---source include/wait_for_pattern_in_file.inc +--let SEARCH_WAIT=FOUND +--source include/search_pattern_in_file.inc --echo "===== Clean up =====" SET GLOBAL LOG_WARNINGS=@org_log_warnings; diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_after_sync_coord_consistency.test b/mysql-test/suite/rpl/t/rpl_semi_sync_after_sync_coord_consistency.test new file mode 100644 index 00000000000..ac20b555332 --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_after_sync_coord_consistency.test @@ -0,0 +1,184 @@ +# +# This test ensures that a race condition (reported in MDEV-35109) which +# could cause semi-sync transactions binlogging through MYSQL_BIN_LOG::write() +# to hang until the semi-sync timeout has been hit, despite the ACK actually +# having been received. The problem was that, for a primary configured with +# wait_point=AFTER_SYNC, if two threads T1 (binlogging through +# MYSQL_BIN_LOG::write()) and T2 were binlogging at the same time, T1 could +# accidentally wait for the binlog coordinates of T2. Prior to MDEV-33551, +# this only resulted in delayed transactions, because all transactions shared +# the same condition variable for ACK signaling. However, with the MDEV-33551 +# changes, each thread has its own condition variable to signal. So T1 could +# wait indefinitely when either: +# 1) T1's ACK is received but not T2's when T1 goes into wait_after_sync(), +# because the ACK receiver thread has already notified about the T1 ACK, +# but T1 was _actually_ waiting on T2's ACK, and therefore tries to wait +# (in vain). +# +# 2) T1 goes to wait_after_sync() before any ACKs have arrived. When T1's ACK +# comes in, T1 is woken up; however, sees it needs to wait more (because +# it was actually waiting on T2's ACK), and goes to wait again (this time, +# in vain). +# +# Note that the actual cause of T1 waiting on T2's binlog coordinates is when +# MYSQL_BIN_LOG::write() would call Repl_semisync_master::wait_after_sync(), +# the binlog offset parameter was read as the end of MYSQL_BIN_LOG::log_file, +# which is shared among transactions. So if T2 had updated the binary log +# _after_ T1 had released LOCK_log, but not yet invoked wait_after_sync(), it +# would use the end of the binary log file as the binlog offset, which was that +# of T2 (or any future transaction). The patch itself fixes this issue, and +# ensures consistency between the binary log coordinates a transaction uses +# between report_binlog_update() and wait_after_sync(). +# +# This test reproduces the above race condition, to ensure that T1 can no +# longer use T2's binlog coordinates when wait_point=AFTER_SYNC (AFTER_COMMIT +# was never affected). That is, because T1 in the above scenario must binlog +# through MYSQL_BIN_LOG::write(), it is a DDL. T2 is an MDL that commits +# through binlog group commit. We use debug_sync to stop both T1 and T2 +# after binlogging, reporting their coordinates to the semi-sync cache (i.e. +# after report_binlog_update()), and after releasing LOCK_log. Debug_sync is +# used on the replica to ensure no ACKs are sent until after both T1 and T2 +# are paused. Then, T1 and T2 are signaled for wakeup, and both ACKs are sent. +# Prior to MDEV-35109 fixes, this results in T1 timing out after the configured +# 2 second timeout. +# +# References: +# MDEV-33551: Semi-sync Wait Point AFTER_COMMIT Slow on Workloads with Heavy +# Concurrency +# MDEV-35109: Semi-sync Replication stalling Primary using +# wait_point=AFTER_SYNC +# + +--source include/have_innodb.inc +--source include/have_debug_sync.inc +# Test is format independent +--source include/have_binlog_format_row.inc +--source include/master-slave.inc + +--connection slave +--source include/stop_slave.inc +set @old_enabled= @@global.rpl_semi_sync_slave_enabled; +set @old_dbug= @@global.debug_dbug; +set global rpl_semi_sync_slave_enabled= 1; + +--connection master +set @old_enabled= @@global.rpl_semi_sync_master_enabled; +set @old_timeout= @@global.rpl_semi_sync_master_timeout; +set @old_wait_point= @@global.rpl_semi_sync_master_wait_point; +set global rpl_semi_sync_master_enabled= 1; +set global rpl_semi_sync_master_timeout= 2000; # 2s +set global rpl_semi_sync_master_wait_point= AFTER_SYNC; + +--connection slave +--source include/start_slave.inc + +--echo # Ensure slave connection is semi-simulate_delay_semisync_slave_reply +--connection master +--let $status_var_value= ON +--let $status_var= Rpl_semi_sync_master_status +--source include/wait_for_status_var.inc +--connection slave +--let $status_var_value= ON +--let $status_var= Rpl_semi_sync_slave_status +--source include/wait_for_status_var.inc + +--echo # +--echo # Initialize test data +--connection master + +# It is simpler to create t1 before using debug_sync on the slave_reply +create table t1 (a int) engine=innodb; +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc + +--source include/stop_slave.inc +set @@global.debug_dbug="+d,simulate_delay_semisync_slave_reply"; +--source include/start_slave.inc + +--echo # Ensure connection is semi-sync +--connection master +--let $status_var_value= ON +--let $status_var= Rpl_semi_sync_master_status +--source include/wait_for_status_var.inc +--connection slave +--let $status_var_value= ON +--let $status_var= Rpl_semi_sync_slave_status +--source include/wait_for_status_var.inc + +--connection master +set @old_bgc_count= @@global.binlog_commit_wait_count; +set @old_bgc_usec= @@global.binlog_commit_wait_usec; +set @@global.binlog_commit_wait_count=1; +set @@global.binlog_commit_wait_usec=100000; + +--connection server_1 +set debug_sync= "commit_after_release_LOCK_log SIGNAL ddl_binlogged WAIT_FOR ddl_cont"; +--echo # T1 (DDL) +--send create table t2 (a int) + +--connect(server_1_sync, localhost, root,,) +--connection server_1_sync +set debug_sync= "now WAIT_FOR ddl_binlogged"; +--source include/save_master_gtid.inc + +--connection server_2 +set debug_sync= "now WAIT_FOR io_thd_at_slave_reply"; + +--connection master +set debug_sync= "commit_before_get_LOCK_after_binlog_sync SIGNAL mdl_binlogged WAIT_FOR mdl_cont"; +--echo # T2 (DML) +--send insert into t1 values (100); + +--connection server_1_sync +set debug_sync= "now WAIT_FOR mdl_binlogged"; + +--echo # Both transactions binlogged and released LOCK_log, and are just before +--echo # wait_after_sync() +set debug_sync= "now SIGNAL ddl_cont"; +# Ensure ddl_cont is seen before next SIGNAL +--let $wait_condition= SELECT count(*)=0 FROM information_schema.processlist WHERE state LIKE "%commit_after_release_LOCK_log%" +--source include/wait_condition.inc + +set debug_sync= "now SIGNAL mdl_cont"; + +--connection server_2 +--echo # slave_reply for DDL +set debug_sync= "now SIGNAL io_thd_do_reply"; +--echo # slave_reply for MDL +set debug_sync= "now WAIT_FOR io_thd_at_slave_reply"; +set debug_sync= "now SIGNAL io_thd_do_reply"; + +--echo # Reaping MDL.. +--connection master +--reap +--echo # ..done + +--echo # Reaping DDL.. +--connection server_1 +--reap +--echo # ..done + + +--echo # +--echo # Cleanup +--connection slave +--source include/stop_slave.inc +set @@global.rpl_semi_sync_slave_enabled= @old_enabled; +set @@global.debug_dbug= @old_dbug; +--source include/start_slave.inc + +--connection master +set @@global.binlog_commit_wait_count= @old_bgc_count; +set @@global.binlog_commit_wait_usec= @old_bgc_usec; +set @@global.rpl_semi_sync_master_enabled= @old_enabled; +set @@global.rpl_semi_sync_master_timeout= @old_timeout; +set @@global.rpl_semi_sync_master_wait_point= @old_wait_point; +drop table t1, t2; +--source include/save_master_gtid.inc + +--connection slave +--source include/sync_with_master_gtid.inc + +--source include/rpl_end.inc +--echo # End of rpl_semi_sync_after_sync_coord_consistency.test diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_no_missed_ack_after_add_slave.test b/mysql-test/suite/rpl/t/rpl_semi_sync_no_missed_ack_after_add_slave.test index d3523a149ef..9d8f87b4345 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_no_missed_ack_after_add_slave.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_no_missed_ack_after_add_slave.test @@ -26,7 +26,7 @@ set @old_enabled= @@global.rpl_semi_sync_master_enabled; set @old_timeout= @@global.rpl_semi_sync_master_timeout; set global rpl_semi_sync_master_enabled= 1; -set global rpl_semi_sync_master_timeout= 500; +set global rpl_semi_sync_master_timeout= 2000; # 2s --connection server_2 --source include/stop_slave.inc @@ -39,7 +39,9 @@ set global debug_dbug="+d,simulate_delay_semisync_slave_reply"; --connection server_3 --source include/stop_slave.inc set @old_enabled= @@global.rpl_semi_sync_slave_enabled; +set @old_dbug= @@global.debug_dbug; set global rpl_semi_sync_slave_enabled= 1; +set global debug_dbug="+d,simulate_delay_semisync_slave_reply"; --source include/start_slave.inc --echo # Ensure primary recognizes both replicas are semi-sync @@ -55,9 +57,23 @@ if (`SELECT strcmp("$master_ss_status", "ON") != 0`) --die rpl_semi_sync_master_status should be ON to start } ---connection server_1 +--connection default --let $init_master_yes_tx= query_get_value(SHOW STATUS LIKE 'rpl_semi_sync_master_yes_tx', Value, 1) -create table t1 (a int); +--send create table t1 (a int); + +--echo # Ensure both slaves get the event with the need_ack flag set (i.e. one +--echo # slave shouldn't be able to receive the event and send an ACK before +--echo # the dump thread for the other server prepares the event to send). +--connection server_3 +set debug_sync= "now wait_for io_thd_at_slave_reply"; +--connection server_2 +set debug_sync= "now wait_for io_thd_at_slave_reply"; +--connection server_3 +set debug_sync= "now signal io_thd_do_reply"; + +--connection default +--reap + --connection server_2 --echo # Verifying server_2 did not send ACK @@ -70,10 +86,6 @@ if (`SELECT $slave1_sent_ack`) --connection server_3 --echo # Verifying server_3 did send ACK ---let $status_var= Rpl_semi_sync_slave_send_ack ---let $status_var_comparsion= > ---let $status_var_value= 0 ---source include/wait_for_status_var.inc --let $slave2_sent_ack= query_get_value(SHOW STATUS LIKE 'rpl_semi_sync_slave_send_ack', Value, 1) if (`SELECT NOT $slave2_sent_ack`) { @@ -99,18 +111,22 @@ if (`SELECT $cur_master_yes_tx != ($init_master_yes_tx + 1)`) --die rpl_semi_sync_master_yes_tx should have been incremented by primary } +--connection server_2 +set debug_sync= "now signal io_thd_do_reply"; + --echo # --echo # Cleanup --connection server_2 +--source include/stop_slave.inc set global rpl_semi_sync_slave_enabled= @old_enabled; set global debug_dbug= @old_dbug; ---source include/stop_slave.inc --connection server_3 -set global rpl_semi_sync_slave_enabled= @old_enabled; --source include/stop_slave.inc +set global rpl_semi_sync_slave_enabled= @old_enabled; +set global debug_dbug= @old_dbug; --connection server_1 set global rpl_semi_sync_master_enabled= @old_enabled; diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.inc b/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.inc index d20ef628327..c96b4db09f2 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.inc +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.inc @@ -12,10 +12,20 @@ # replica after the shutdown # server_3_expect_row_count (int) The number of rows expected on the second # replica after the shutdown +# server_2_sync_slave_reply (bool) True if server_2_dbug is configured to use +# debug_sync to synchronize the slave reply +# (i.e. to pause before the reply) +# server_3_sync_slave_reply (bool) True if server_3_dbug is configured to use +# debug_sync to synchronize the slave reply +# (i.e. to pause before the reply) # --connection server_1 let $log_error_file= `SELECT @@GLOBAL.log_error`; +if (!$n_logged_delayed_shutdown_notes) +{ + let $n_logged_delayed_shutdown_notes= 0; +} --echo #-- --echo #-- Semi-sync Setup @@ -78,19 +88,66 @@ let $status_var= Rpl_semi_sync_master_wait_sessions; let $status_var_value= 1; source include/wait_for_status_var.inc; +if ($server_2_sync_slave_reply) +{ + --connection server_2 + set debug_sync= "now wait_for io_thd_at_slave_reply"; +} +if ($server_3_sync_slave_reply) +{ + --connection server_3 + set debug_sync= "now wait_for io_thd_at_slave_reply"; +} + +--connection server_1_con2 --echo #-- Begin master shutdown SHUTDOWN WAIT FOR ALL SLAVES; ---source include/wait_until_disconnected.inc +# Use server_2 to search error log because 1 is down +--connection server_2 + +--echo # Waitng for shutdown to be delayed.. +# Increment the number of notes to find each test case to ensure the pattern is +# ours +--inc $n_logged_delayed_shutdown_notes +let SEARCH_FILE= $log_error_file; +let SEARCH_PATTERN=Delaying shutdown to await semi-sync ACK; +let SEARCH_WAIT=FOUND $n_logged_delayed_shutdown_notes; +source include/search_pattern_in_file.inc; + +if (`SELECT $server_2_sync_slave_reply AND $server_2_expect_row_count`) +{ + --connection server_2 + set debug_sync= "now signal io_thd_do_reply"; +} +if (`SELECT $server_3_sync_slave_reply AND $server_3_expect_row_count`) +{ + --connection server_3 + set debug_sync= "now signal io_thd_do_reply"; +} + +--echo # Reaping transaction.. --connection server_1 --error 2013 --reap --source include/wait_until_disconnected.inc ---echo # Check logs to ensure shutdown was delayed ---let SEARCH_FILE=$log_error_file ---let SEARCH_PATTERN=Delaying shutdown to await semi-sync ACK ---source include/search_pattern_in_file.inc +# Timeout should be hit from prior reap +--connection server_2 +if (`SELECT $server_2_sync_slave_reply AND NOT $server_2_expect_row_count`) +{ + --connection server_2 + set debug_sync= "now signal io_thd_do_reply"; +} +if (`SELECT $server_3_sync_slave_reply AND NOT $server_3_expect_row_count`) +{ + --connection server_3 + set debug_sync= "now signal io_thd_do_reply"; +} + +--echo # Reaping shutdown.. +--connection server_1_con2 +--source include/wait_until_disconnected.inc --echo # Validate slave data is in correct state --connection server_2 diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.test b/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.test index 4ed9ca0aa7c..906cddc9971 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_shutdown_await_ack.test @@ -113,7 +113,9 @@ while (`SELECT $i <= $slave_last`) --echo # allowed timeout, the primary should delay killing the Ack_thread --echo # until an ACK is received. --echo # +--let server_2_sync_slave_reply=1 --let server_2_dbug= "+d,simulate_delay_semisync_slave_reply" +--let server_3_sync_slave_reply=1 --let server_3_dbug= "+d,simulate_delay_semisync_slave_reply" --let semisync_timeout= 1600 --let server_2_expect_row_count= 1 @@ -125,7 +127,9 @@ while (`SELECT $i <= $slave_last`) --echo # the primary should delay killing the Ack_thread until the --echo # timeout is reached. --echo # +--let server_2_sync_slave_reply=0 --let server_2_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141" +--let server_3_sync_slave_reply=0 --let server_3_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141" --let semisync_timeout= 500 --let server_2_expect_row_count= 0 @@ -138,7 +142,9 @@ while (`SELECT $i <= $slave_last`) --echo # primary should delay killing the Ack_thread until it receives an --echo # ACK from the delayed slave. --echo # +--let server_2_sync_slave_reply=0 --let server_2_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141" +--let server_3_sync_slave_reply=1 --let server_3_dbug= "+d,simulate_delay_semisync_slave_reply" --let semisync_timeout= 1600 --let server_2_expect_row_count= 0 @@ -156,7 +162,9 @@ while (`SELECT $i <= $slave_last`) --echo # sent to kill an active connection. This test case validates that the --echo # slave does not send a `QUIT` in this case. --echo # +--let server_2_sync_slave_reply=0 --let server_2_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141" +--let server_3_sync_slave_reply=1 --let server_3_dbug= "+d,simulate_delay_semisync_slave_reply" --let semisync_timeout= 1600 --let server_2_expect_row_count= 0 @@ -208,6 +216,9 @@ SET GLOBAL debug_dbug="+d,simulate_delay_semisync_slave_reply"; --source include/wait_condition.inc --echo # ..done +--connection server_2 +set debug_sync= "now wait_for io_thd_at_slave_reply"; + --disconnect con1 --connection default @@ -217,12 +228,14 @@ EOF --connection con2 SHUTDOWN WAIT FOR ALL SLAVES; ---source include/wait_until_disconnected.inc ---echo # Ensure the primary waited for the ACK of the killed thread ---let $SEARCH_PATTERN= Delaying shutdown to await semi-sync ACK ---let $SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err ---source include/search_pattern_in_file.inc +--echo # Waitng for shutdown to be delayed.. +--connection server_2 +--inc $n_logged_delayed_shutdown_notes +let SEARCH_FILE= $log_error_file; +let SEARCH_PATTERN=Delaying shutdown to await semi-sync ACK; +let SEARCH_WAIT=FOUND $n_logged_delayed_shutdown_notes; +source include/search_pattern_in_file.inc; --connection default --source include/wait_until_disconnected.inc @@ -230,7 +243,11 @@ SHUTDOWN WAIT FOR ALL SLAVES; --connection server_1 --source include/wait_until_disconnected.inc +--connection con2 +--source include/wait_until_disconnected.inc + --connection server_2 +set debug_sync= "now signal io_thd_do_reply"; --let $rpl_allow_error= 1 source include/stop_slave.inc; --connection server_3 diff --git a/mysys/array.c b/mysys/array.c index 02a54d44656..b44d34285fc 100644 --- a/mysys/array.c +++ b/mysys/array.c @@ -318,7 +318,7 @@ void get_dynamic(DYNAMIC_ARRAY *array, void *element, size_t idx) { if (unlikely(idx >= array->elements)) { - DBUG_PRINT("warning",("To big array idx: %d, array size is %d", + DBUG_PRINT("warning",("To big array idx: %zu, array size is %zu", idx,array->elements)); bzero(element,array->size_of_element); return; diff --git a/mysys/charset.c b/mysys/charset.c index 67abfe628a2..66dd1141672 100644 --- a/mysys/charset.c +++ b/mysys/charset.c @@ -690,13 +690,12 @@ const char *my_collation_get_tailoring(uint id) HASH charset_name_hash; -static uchar *get_charset_key(const uchar *object, - size_t *size, - my_bool not_used __attribute__((unused))) +static const uchar *get_charset_key(const void *object, size_t *size, + my_bool not_used __attribute__((unused))) { - CHARSET_INFO *cs= (CHARSET_INFO*) object; + CHARSET_INFO *cs= object; *size= cs->cs_name.length; - return (uchar*) cs->cs_name.str; + return (const uchar*) cs->cs_name.str; } static void init_available_charsets(void) diff --git a/mysys/hash.c b/mysys/hash.c index fccd4a24373..e3c7411142c 100644 --- a/mysys/hash.c +++ b/mysys/hash.c @@ -886,8 +886,8 @@ my_bool my_hash_check(HASH *hash) #define RECORDS 1000 -uchar *test_get_key(uchar *data, size_t *length, - my_bool not_used __attribute__((unused))) +const uchar *test_get_key(const void *data, size_t *length, + my_bool not_used __attribute__((unused))) { *length= 2; return data; @@ -903,8 +903,8 @@ int main(int argc __attribute__((unused)),char **argv __attribute__((unused))) DBUG_PUSH("d:t:O,/tmp/test_hash.trace"); printf("my_hash_init\n"); - if (my_hash_init2(PSI_INSTRUMENT_ME, &hash_test, 100, &my_charset_bin, 20, - 0, 0, (my_hash_get_key) test_get_key, 0, 0, HASH_UNIQUE)) + if (my_hash_init2(PSI_INSTRUMENT_ME, &hash_test, 100, &my_charset_bin, 20, 0, + 0, test_get_key, 0, 0, HASH_UNIQUE)) { fprintf(stderr, "hash init failed\n"); exit(1); diff --git a/mysys/mf_keycache.c b/mysys/mf_keycache.c index 4b0fef183d2..a563d52c0e9 100644 --- a/mysys/mf_keycache.c +++ b/mysys/mf_keycache.c @@ -321,7 +321,7 @@ KEY_CACHE *dflt_key_cache= &dflt_key_cache_var; #define FLUSH_CACHE 2000 /* sort this many blocks at once */ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache); -static void end_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, my_bool cleanup); +static void end_simple_key_cache(void *keycache_, my_bool cleanup); static void wait_on_queue(KEYCACHE_WQUEUE *wqueue, mysql_mutex_t *mutex); static void release_whole_queue(KEYCACHE_WQUEUE *wqueue); @@ -473,11 +473,12 @@ static inline uint next_power(uint value) */ static -int init_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, +int init_simple_key_cache(void *keycache_, uint key_cache_block_size, size_t use_mem, uint division_limit, uint age_threshold, uint changed_blocks_hash_size) { + SIMPLE_KEY_CACHE_CB *keycache= keycache_; size_t blocks, hash_links; size_t length; int error; @@ -834,11 +835,12 @@ void finish_resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache) */ static -int resize_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, +int resize_simple_key_cache(void *keycache_, uint key_cache_block_size, size_t use_mem, uint division_limit, uint age_threshold, uint changed_blocks_hash_size) { + SIMPLE_KEY_CACHE_CB *keycache= keycache_; int blocks= 0; DBUG_ENTER("resize_simple_key_cache"); @@ -914,9 +916,10 @@ static inline void dec_counter_for_resize_op(SIMPLE_KEY_CACHE_CB *keycache) */ static -void change_simple_key_cache_param(SIMPLE_KEY_CACHE_CB *keycache, uint division_limit, +void change_simple_key_cache_param(void *keycache_, uint division_limit, uint age_threshold) { + SIMPLE_KEY_CACHE_CB *keycache= keycache_; DBUG_ENTER("change_simple_key_cache_param"); keycache_pthread_mutex_lock(&keycache->cache_lock); if (division_limit) @@ -953,8 +956,9 @@ void change_simple_key_cache_param(SIMPLE_KEY_CACHE_CB *keycache, uint division_ */ static -void end_simple_key_cache(SIMPLE_KEY_CACHE_CB *keycache, my_bool cleanup) +void end_simple_key_cache(void *keycache_, my_bool cleanup) { + SIMPLE_KEY_CACHE_CB *keycache= keycache_; DBUG_ENTER("end_simple_key_cache"); DBUG_PRINT("enter", ("key_cache: %p", keycache)); @@ -2763,12 +2767,13 @@ static void read_block_secondary(SIMPLE_KEY_CACHE_CB *keycache, have to be a multiple of key_cache_block_size; */ -uchar *simple_key_cache_read(SIMPLE_KEY_CACHE_CB *keycache, +uchar *simple_key_cache_read(void *keycache_, File file, my_off_t filepos, int level, uchar *buff, uint length, uint block_length __attribute__((unused)), int return_buffer __attribute__((unused))) { + SIMPLE_KEY_CACHE_CB *keycache= keycache_; my_bool locked_and_incremented= FALSE; int error=0; uchar *start= buff; @@ -3015,10 +3020,11 @@ end: */ static -int simple_key_cache_insert(SIMPLE_KEY_CACHE_CB *keycache, +int simple_key_cache_insert(void *keycache_, File file, my_off_t filepos, int level, uchar *buff, uint length) { + SIMPLE_KEY_CACHE_CB *keycache= keycache_; int error= 0; DBUG_ENTER("key_cache_insert"); DBUG_PRINT("enter", ("fd: %u pos: %lu length: %u", @@ -3280,13 +3286,14 @@ int simple_key_cache_insert(SIMPLE_KEY_CACHE_CB *keycache, */ static -int simple_key_cache_write(SIMPLE_KEY_CACHE_CB *keycache, +int simple_key_cache_write(void *keycache_, File file, void *file_extra __attribute__((unused)), my_off_t filepos, int level, uchar *buff, uint length, uint block_length __attribute__((unused)), int dont_write) { + SIMPLE_KEY_CACHE_CB *keycache= keycache_; my_bool locked_and_incremented= FALSE; int error=0; DBUG_ENTER("simple_key_cache_write"); @@ -3753,8 +3760,10 @@ static void free_block(SIMPLE_KEY_CACHE_CB *keycache, BLOCK_LINK *block) } -static int cmp_sec_link(BLOCK_LINK **a, BLOCK_LINK **b) +static int cmp_sec_link(const void *_a, const void *_b) { + BLOCK_LINK *const *a= _a; + BLOCK_LINK *const *b= _b; return (((*a)->hash_link->diskpos < (*b)->hash_link->diskpos) ? -1 : ((*a)->hash_link->diskpos > (*b)->hash_link->diskpos) ? 1 : 0); } @@ -4363,11 +4372,12 @@ err: */ static -int flush_simple_key_cache_blocks(SIMPLE_KEY_CACHE_CB *keycache, +int flush_simple_key_cache_blocks(void *keycache_, File file, void *file_extra __attribute__((unused)), enum flush_type type) { + SIMPLE_KEY_CACHE_CB *keycache= keycache_; int res= 0; DBUG_ENTER("flush_key_blocks"); DBUG_PRINT("enter", ("keycache: %p", keycache)); @@ -4544,8 +4554,9 @@ static int flush_all_key_blocks(SIMPLE_KEY_CACHE_CB *keycache) static int reset_simple_key_cache_counters(const char *name __attribute__((unused)), - SIMPLE_KEY_CACHE_CB *keycache) + void *keycache_) { + SIMPLE_KEY_CACHE_CB *keycache= keycache_; DBUG_ENTER("reset_simple_key_cache_counters"); if (!keycache->key_cache_inited) { @@ -4887,10 +4898,11 @@ static int cache_empty(SIMPLE_KEY_CACHE_CB *keycache) */ static -void get_simple_key_cache_statistics(SIMPLE_KEY_CACHE_CB *keycache, +void get_simple_key_cache_statistics(void *keycache_, uint partition_no __attribute__((unused)), KEY_CACHE_STATISTICS *keycache_stats) { + SIMPLE_KEY_CACHE_CB *keycache= keycache_; DBUG_ENTER("simple_get_key_cache_statistics"); keycache_stats->mem_size= (longlong) keycache->key_cache_mem_size; @@ -4978,12 +4990,12 @@ typedef struct st_partitioned_key_cache_cb } PARTITIONED_KEY_CACHE_CB; static -void end_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, +void end_partitioned_key_cache(void *keycache_, my_bool cleanup); static int reset_partitioned_key_cache_counters(const char *name, - PARTITIONED_KEY_CACHE_CB *keycache); + void *keycache_); /* Determine the partition to which the index block to read is ascribed @@ -5091,11 +5103,12 @@ static SIMPLE_KEY_CACHE_CB */ static -int init_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, +int init_partitioned_key_cache(void *keycache_, uint key_cache_block_size, size_t use_mem, uint division_limit, uint age_threshold, uint changed_blocks_hash_size) { + PARTITIONED_KEY_CACHE_CB *keycache= keycache_; int i; size_t mem_per_cache; size_t mem_decr; @@ -5257,12 +5270,13 @@ int init_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, */ static -int resize_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, +int resize_partitioned_key_cache(void *keycache_, uint key_cache_block_size, size_t use_mem, uint division_limit, uint age_threshold, uint changed_blocks_hash_size) { + PARTITIONED_KEY_CACHE_CB *keycache= keycache_; uint i; uint partitions= keycache->partitions; my_bool cleanup= use_mem == 0; @@ -5321,10 +5335,11 @@ int resize_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, */ static -void change_partitioned_key_cache_param(PARTITIONED_KEY_CACHE_CB *keycache, +void change_partitioned_key_cache_param(void *keycache_, uint division_limit, uint age_threshold) { + PARTITIONED_KEY_CACHE_CB *keycache= keycache_; uint i; uint partitions= keycache->partitions; DBUG_ENTER("partitioned_change_key_cache_param"); @@ -5363,9 +5378,10 @@ void change_partitioned_key_cache_param(PARTITIONED_KEY_CACHE_CB *keycache, */ static -void end_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, +void end_partitioned_key_cache(void *keycache_, my_bool cleanup) { + PARTITIONED_KEY_CACHE_CB *keycache= keycache_; uint i; uint partitions= keycache->partitions; DBUG_ENTER("partitioned_end_key_cache"); @@ -5430,12 +5446,13 @@ void end_partitioned_key_cache(PARTITIONED_KEY_CACHE_CB *keycache, */ static -uchar *partitioned_key_cache_read(PARTITIONED_KEY_CACHE_CB *keycache, +uchar *partitioned_key_cache_read(void *keycache_, File file, my_off_t filepos, int level, uchar *buff, uint length, uint block_length __attribute__((unused)), int return_buffer __attribute__((unused))) { + PARTITIONED_KEY_CACHE_CB *keycache= keycache_; uint r_length; uint offset= (uint) (filepos % keycache->key_cache_block_size); uchar *start= buff; @@ -5508,10 +5525,11 @@ uchar *partitioned_key_cache_read(PARTITIONED_KEY_CACHE_CB *keycache, */ static -int partitioned_key_cache_insert(PARTITIONED_KEY_CACHE_CB *keycache, +int partitioned_key_cache_insert(void *keycache_, File file, my_off_t filepos, int level, uchar *buff, uint length) { + PARTITIONED_KEY_CACHE_CB *keycache= keycache_; uint w_length; uint offset= (uint) (filepos % keycache->key_cache_block_size); DBUG_ENTER("partitioned_key_cache_insert"); @@ -5590,13 +5608,14 @@ int partitioned_key_cache_insert(PARTITIONED_KEY_CACHE_CB *keycache, */ static -int partitioned_key_cache_write(PARTITIONED_KEY_CACHE_CB *keycache, +int partitioned_key_cache_write(void *keycache_, File file, void *file_extra, my_off_t filepos, int level, uchar *buff, uint length, uint block_length __attribute__((unused)), int dont_write) { + PARTITIONED_KEY_CACHE_CB *keycache= keycache_; uint w_length; ulonglong *part_map= (ulonglong *) file_extra; uint offset= (uint) (filepos % keycache->key_cache_block_size); @@ -5674,10 +5693,11 @@ int partitioned_key_cache_write(PARTITIONED_KEY_CACHE_CB *keycache, */ static -int flush_partitioned_key_cache_blocks(PARTITIONED_KEY_CACHE_CB *keycache, +int flush_partitioned_key_cache_blocks(void *keycache_, File file, void *file_extra, enum flush_type type) { + PARTITIONED_KEY_CACHE_CB *keycache= keycache_; uint i; uint partitions= keycache->partitions; int err= 0; @@ -5724,8 +5744,9 @@ int flush_partitioned_key_cache_blocks(PARTITIONED_KEY_CACHE_CB *keycache, static int reset_partitioned_key_cache_counters(const char *name __attribute__((unused)), - PARTITIONED_KEY_CACHE_CB *keycache) + void *keycache_) { + PARTITIONED_KEY_CACHE_CB *keycache= keycache_; uint i; uint partitions= keycache->partitions; DBUG_ENTER("partitioned_reset_key_cache_counters"); @@ -5766,10 +5787,11 @@ reset_partitioned_key_cache_counters(const char *name __attribute__((unused)), static void -get_partitioned_key_cache_statistics(PARTITIONED_KEY_CACHE_CB *keycache, +get_partitioned_key_cache_statistics(void *keycache_, uint partition_no, KEY_CACHE_STATISTICS *keycache_stats) { + PARTITIONED_KEY_CACHE_CB *keycache= keycache_; uint i; SIMPLE_KEY_CACHE_CB *partition; uint partitions= keycache->partitions; diff --git a/mysys/mf_qsort.c b/mysys/mf_qsort.c index b516639a341..4dee20750c0 100644 --- a/mysys/mf_qsort.c +++ b/mysys/mf_qsort.c @@ -84,7 +84,7 @@ typedef struct st_stack /**************************************************************************** ** 'standard' quicksort with the following extensions: ** -** Can be compiled with the qsort2_cmp compare function +** Can be compiled with the qsort_cmp2 compare function ** Store ranges on stack to avoid recursion ** Use insert sort on small ranges ** Optimize for sorting of pointers (used often by MySQL) @@ -92,7 +92,7 @@ typedef struct st_stack *****************************************************************************/ #ifdef QSORT_EXTRA_CMP_ARGUMENT -qsort_t my_qsort2(void *base_ptr, size_t count, size_t size, qsort2_cmp cmp, +qsort_t my_qsort2(void *base_ptr, size_t count, size_t size, qsort_cmp2 cmp, void *cmp_argument) #else qsort_t my_qsort(void *base_ptr, size_t count, size_t size, qsort_cmp cmp) diff --git a/mysys/my_lib.c b/mysys/my_lib.c index fb03f0aa5c2..f905e757869 100644 --- a/mysys/my_lib.c +++ b/mysys/my_lib.c @@ -57,7 +57,7 @@ #define NAMES_START_SIZE 32768 -static int comp_names(struct fileinfo *a,struct fileinfo *b); +static int comp_names(const void *a, const void *b); typedef struct { MY_DIR dir; @@ -83,8 +83,10 @@ void my_dirend(MY_DIR *dir) /* Compare in sort of filenames */ -static int comp_names(struct fileinfo *a, struct fileinfo *b) +static int comp_names(const void *a_, const void *b_) { + const struct fileinfo *a= a_; + const struct fileinfo *b= b_; return (strcmp(a->name,b->name)); } /* comp_names */ diff --git a/mysys/my_likely.c b/mysys/my_likely.c index d52074f01e4..0d5463f64b0 100644 --- a/mysys/my_likely.c +++ b/mysys/my_likely.c @@ -35,11 +35,12 @@ typedef struct st_likely_entry ulonglong ok,fail; } LIKELY_ENTRY; -static uchar *get_likely_key(LIKELY_ENTRY *part, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *get_likely_key(const void *part_, size_t *length, + my_bool not_used __attribute__((unused))) { + const LIKELY_ENTRY *part= (const LIKELY_ENTRY *) part_; *length= part->key_length; - return (uchar*) part->key; + return (const uchar *) part->key; } pthread_mutex_t likely_mutex; @@ -49,14 +50,15 @@ void init_my_likely() { /* Allocate big enough to avoid malloc calls */ my_hash_init2(PSI_NOT_INSTRUMENTED, &likely_hash, 10000, &my_charset_bin, - 1024, 0, 0, (my_hash_get_key) get_likely_key, 0, free, - HASH_UNIQUE); + 1024, 0, 0, get_likely_key, 0, free, HASH_UNIQUE); likely_inited= 1; pthread_mutex_init(&likely_mutex, MY_MUTEX_INIT_FAST); } -static int likely_cmp(LIKELY_ENTRY **a, LIKELY_ENTRY **b) +static int likely_cmp(const void *a_, const void *b_) { + const LIKELY_ENTRY *const *a= a_; + const LIKELY_ENTRY *const *b= b_; int cmp; if ((cmp= strcmp((*a)->key, (*b)->key))) return cmp; diff --git a/mysys/my_safehash.c b/mysys/my_safehash.c index 7d37b707c3a..70ed450e037 100644 --- a/mysys/my_safehash.c +++ b/mysys/my_safehash.c @@ -50,8 +50,9 @@ This function is called by the hash object on delete */ -static void safe_hash_entry_free(SAFE_HASH_ENTRY *entry) +static void safe_hash_entry_free(void *entry_) { + SAFE_HASH_ENTRY *entry= entry_; DBUG_ENTER("safe_hash_entry_free"); my_free(entry); DBUG_VOID_RETURN; @@ -70,11 +71,13 @@ static void safe_hash_entry_free(SAFE_HASH_ENTRY *entry) # reference on the key */ -static uchar *safe_hash_entry_get(SAFE_HASH_ENTRY *entry, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *safe_hash_entry_get(const void *entry_, size_t *length, + my_bool not_used + __attribute__((unused))) { + const SAFE_HASH_ENTRY *entry= entry_; *length= entry->length; - return (uchar*) entry->key; + return entry->key; } @@ -101,8 +104,8 @@ my_bool safe_hash_init(SAFE_HASH *hash, uint elements, { DBUG_ENTER("safe_hash_init"); if (my_hash_init(key_memory_SAFE_HASH_ENTRY, &hash->hash, &my_charset_bin, - elements, 0, 0, (my_hash_get_key) safe_hash_entry_get, - (void (*)(void*)) safe_hash_entry_free, 0)) + elements, 0, 0, safe_hash_entry_get, safe_hash_entry_free, + 0)) { hash->default_value= 0; DBUG_RETURN(1); diff --git a/mysys/my_stack.c b/mysys/my_stack.c index 3eec01092ac..b32925c8f6c 100644 --- a/mysys/my_stack.c +++ b/mysys/my_stack.c @@ -40,56 +40,36 @@ extern void my_get_stack_bounds(void **stack_start, void **stack_end, void *fallback_stack_start, size_t fallback_stack_size) { -#if defined(__GNUC__) || defined(__clang__) /* GCC or Clang compilers */ size_t stack_size; -#if defined(HAVE_PTHREAD_GETATTR_NP) +#if defined(HAVE_PTHREAD_GETATTR_NP) && !defined(_AIX) /* POSIX-compliant system (Linux, macOS, etc.) */ pthread_attr_t attr; pthread_t thread= pthread_self(); - void *stack_base; /* Get the thread attributes */ if (pthread_getattr_np(thread, &attr) == 0) { /* Get stack base and size */ - pthread_attr_getstack(&attr, &stack_base, &stack_size); - /* - stack_base points to start of the stack region to which the - stack grows to - */ - *stack_start= stack_base - stack_size * STACK_DIRECTION; - pthread_attr_destroy(&attr); /* Clean up */ - } - else - { - /* - Fallback: - Use the current stack pointer as an approximation of the start - */ - *stack_start= my_get_stack_pointer(fallback_stack_start); - stack_size= (fallback_stack_size - - MY_MIN(fallback_stack_size, MY_STACK_SAFE_MARGIN)); - } + void *low_addr, *high_addr= NULL; + if (pthread_attr_getstack(&attr, &low_addr, &stack_size) == 0) + { + high_addr= (char *) low_addr + stack_size; +#if STACK_DIRECTION < 0 + *stack_start= high_addr; + *stack_end= low_addr; #else - /* Platform does not have pthread_getattr_np */ + *stack_start= low_addr; + *stack_end= high_addr; +#endif + } + pthread_attr_destroy(&attr); /* Clean up */ + if (high_addr) + return; + } +#endif + /* Platform does not have pthread_getattr_np, or fallback */ *stack_start= my_get_stack_pointer(fallback_stack_start); stack_size= (fallback_stack_size - MY_MIN(fallback_stack_size, MY_STACK_SAFE_MARGIN)); -#endif /* defined(HAVE_PTHREAD_GETATTR_NP) */ - *stack_end= *stack_start + stack_size * STACK_DIRECTION; - -#elif defined(_MSC_VER) && defined(_WIN32) - /* Windows platform (MSVC) */ - NT_TIB* teb= (NT_TIB*)NtCurrentTeb(); - - *stack_start= teb->StackBase; /* Start of the stack */ - *stack_end= teb->StackLimit; /* End of the stack (stack limit) */ -#else - /* Unsupported platform / compiler */ - *stack_start= my_get_stack_pointer(fallback_stack_start); - *stack_end= (*stack_start + - (fallback_stack_size - - MY_MIN(fallback_stack_size, MY_STACK_SAFE_MARGIN)) * - STACK_DIRECTON); -#endif /* defined(__GNUC__) || defined(__clang__) */ + *stack_end= (char *)(*stack_start) + stack_size * STACK_DIRECTION; } diff --git a/mysys/ptr_cmp.c b/mysys/ptr_cmp.c index 7ea15baf86d..40af3f63afd 100644 --- a/mysys/ptr_cmp.c +++ b/mysys/ptr_cmp.c @@ -44,40 +44,43 @@ #include -static int native_compare(size_t *length, unsigned char **a, unsigned char **b) +static int native_compare(void *length_, const void *a_, const void *b_) { + size_t *length= length_; + const unsigned char *const *a= a_; + const unsigned char *const *b= b_; return memcmp(*a, *b, *length); } -qsort2_cmp get_ptr_compare (size_t size __attribute__((unused))) +qsort_cmp2 get_ptr_compare (size_t size __attribute__((unused))) { - return (qsort2_cmp) native_compare; + return native_compare; } #else /* USE_NATIVE_MEMCMP */ -static int ptr_compare(size_t *compare_length, uchar **a, uchar **b); -static int ptr_compare_0(size_t *compare_length, uchar **a, uchar **b); -static int ptr_compare_1(size_t *compare_length, uchar **a, uchar **b); -static int ptr_compare_2(size_t *compare_length, uchar **a, uchar **b); -static int ptr_compare_3(size_t *compare_length, uchar **a, uchar **b); -static int degenerate_compare_func(size_t *compare_length, uchar **a, uchar **b) +static int ptr_compare(void *compare_length, const void *a, const void *b); +static int ptr_compare_0(void *compare_length, const void *a, const void *b); +static int ptr_compare_1(void *compare_length, const void *a, const void *b); +static int ptr_compare_2(void *compare_length, const void *a, const void *b); +static int ptr_compare_3(void *compare_length, const void *a, const void *b); +static int degenerate_compare_func(void *compare_length, const void *a, const void *b) { - DBUG_ASSERT(*compare_length == 0); + DBUG_ASSERT(*((size_t *) compare_length) == 0); return 0; } -qsort2_cmp get_ptr_compare (size_t size) +qsort_cmp2 get_ptr_compare (size_t size) { if (size == 0) - return (qsort2_cmp) degenerate_compare_func; + return degenerate_compare_func; if (size < 4) - return (qsort2_cmp) ptr_compare; + return ptr_compare; switch (size & 3) { - case 0: return (qsort2_cmp) ptr_compare_0; - case 1: return (qsort2_cmp) ptr_compare_1; - case 2: return (qsort2_cmp) ptr_compare_2; - case 3: return (qsort2_cmp) ptr_compare_3; + case 0: return ptr_compare_0; + case 1: return ptr_compare_1; + case 2: return ptr_compare_2; + case 3: return ptr_compare_3; } return 0; /* Impossible */ } @@ -88,13 +91,13 @@ qsort2_cmp get_ptr_compare (size_t size) #define cmp(N) if (first[N] != last[N]) return (int) first[N] - (int) last[N] -static int ptr_compare(size_t *compare_length, uchar **a, uchar **b) +static int ptr_compare(void *compare_length, const void *a, const void *b) { - size_t length= *compare_length; - uchar *first,*last; + size_t length= *((size_t *) compare_length); + const uchar *first= *((const uchar *const *) a); + const uchar *last= *((const uchar *const *) b); DBUG_ASSERT(length > 0); - first= *a; last= *b; while (--length) { if (*first++ != *last++) @@ -104,12 +107,11 @@ static int ptr_compare(size_t *compare_length, uchar **a, uchar **b) } -static int ptr_compare_0(size_t *compare_length,uchar **a, uchar **b) +static int ptr_compare_0(void *compare_length, const void *a, const void *b) { - size_t length= *compare_length; - uchar *first,*last; - - first= *a; last= *b; + size_t length= *((size_t *) compare_length); + const uchar *first= *((const uchar *const *) a); + const uchar *last= *((const uchar *const *) b); loop: cmp(0); cmp(1); @@ -125,12 +127,13 @@ static int ptr_compare_0(size_t *compare_length,uchar **a, uchar **b) } -static int ptr_compare_1(size_t *compare_length,uchar **a, uchar **b) +static int ptr_compare_1(void *compare_length, const void *a, const void *b) { - size_t length= *compare_length-1; - uchar *first,*last; - first= *a+1; last= *b+1; + size_t length= *((size_t *) compare_length) - 1; + const uchar *first= *((const uchar *const *) a) + 1; + const uchar *last= *((const uchar *const *) b) + 1; + cmp(-1); loop: cmp(0); @@ -146,12 +149,12 @@ static int ptr_compare_1(size_t *compare_length,uchar **a, uchar **b) return (0); } -static int ptr_compare_2(size_t *compare_length,uchar **a, uchar **b) +static int ptr_compare_2(void *compare_length, const void *a, const void *b) { - size_t length= *compare_length-2; - uchar *first,*last; + size_t length= *((size_t *) compare_length) - 2; + const uchar *first= *((const uchar *const *) a) + 2; + const uchar *last= *((const uchar *const *) b) + 2; - first= *a +2 ; last= *b +2; cmp(-2); cmp(-1); loop: @@ -168,12 +171,12 @@ static int ptr_compare_2(size_t *compare_length,uchar **a, uchar **b) return (0); } -static int ptr_compare_3(size_t *compare_length,uchar **a, uchar **b) +static int ptr_compare_3(void *compare_length, const void *a, const void *b) { - size_t length= *compare_length-3; - uchar *first,*last; + size_t length= *((size_t *) compare_length) - 3; + const uchar *first= *((const uchar *const *) a) + 3; + const uchar *last= *((const uchar *const *) b) + 3; - first= *a +3 ; last= *b +3; cmp(-3); cmp(-2); cmp(-1); diff --git a/mysys/queues.c b/mysys/queues.c index 0a1149bf9fc..07b3b4f7ed1 100644 --- a/mysys/queues.c +++ b/mysys/queues.c @@ -70,7 +70,7 @@ */ int init_queue(QUEUE *queue, uint max_elements, uint offset_to_key, - my_bool max_at_top, int (*compare) (void *, uchar *, uchar *), + my_bool max_at_top, qsort_cmp2 compare, void *first_cmp_arg, uint offset_to_queue_pos, uint auto_extent) { @@ -109,7 +109,7 @@ int init_queue(QUEUE *queue, uint max_elements, uint offset_to_key, */ int reinit_queue(QUEUE *queue, uint max_elements, uint offset_to_key, - my_bool max_at_top, int (*compare) (void *, uchar *, uchar *), + my_bool max_at_top, qsort_cmp2 compare, void *first_cmp_arg, uint offset_to_queue_pos, uint auto_extent) { diff --git a/mysys/thr_timer.c b/mysys/thr_timer.c index d3627fea983..00fc74cdf77 100644 --- a/mysys/thr_timer.c +++ b/mysys/thr_timer.c @@ -46,10 +46,11 @@ static void *timer_handler(void *arg __attribute__((unused))); */ static int compare_timespec(void *not_used __attribute__((unused)), - uchar *a_ptr, uchar *b_ptr) + const void *a_ptr, const void *b_ptr) { - return cmp_timespec((*(struct timespec*) a_ptr), - (*(struct timespec*) b_ptr)); + const struct timespec *ap= a_ptr; + const struct timespec *bp= b_ptr; + return cmp_timespec((*ap), (*bp)); } diff --git a/mysys/waiting_threads.c b/mysys/waiting_threads.c index 7c20c2eec5d..8f747092376 100644 --- a/mysys/waiting_threads.c +++ b/mysys/waiting_threads.c @@ -449,7 +449,7 @@ void wt_init() sizeof_WT_RESOURCE_ID, 0, 0); reshash.alloc.constructor= wt_resource_create; reshash.alloc.destructor= wt_resource_destroy; - reshash.initializer= (lf_hash_initializer) wt_resource_init; + reshash.initializer= wt_resource_init; bzero(wt_wait_stats, sizeof(wt_wait_stats)); bzero(wt_cycle_stats, sizeof(wt_cycle_stats)); @@ -829,7 +829,7 @@ static int unlock_lock_and_free_resource(WT_THD *thd, WT_RESOURCE *rc) if (rc->owners.elements || rc->waiter_count) { - DBUG_PRINT("wt", ("nothing to do, %u owners, %u waiters", + DBUG_PRINT("wt", ("nothing to do, %zu owners, %u waiters", rc->owners.elements, rc->waiter_count)); rc_unlock(rc); DBUG_RETURN(0); @@ -1142,4 +1142,3 @@ void wt_thd_release(WT_THD *thd, const WT_RESOURCE_ID *resid) reset_dynamic(&thd->my_resources); DBUG_VOID_RETURN; } - diff --git a/plugin/auth_socket/CMakeLists.txt b/plugin/auth_socket/CMakeLists.txt index 83e0b86d8cb..9de916d9c39 100644 --- a/plugin/auth_socket/CMakeLists.txt +++ b/plugin/auth_socket/CMakeLists.txt @@ -19,7 +19,9 @@ IF(WIN32) ENDIF() CHECK_CXX_SOURCE_COMPILES( -"#define _GNU_SOURCE +"#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif #include int main() { struct ucred cred; diff --git a/plugin/daemon_example/daemon_example.cc b/plugin/daemon_example/daemon_example.cc index 50026e92be1..fc6b9f19bb2 100644 --- a/plugin/daemon_example/daemon_example.cc +++ b/plugin/daemon_example/daemon_example.cc @@ -26,14 +26,6 @@ #include "m_string.h" // strlen #include "sql_plugin.h" // st_plugin_int -/* - Disable __attribute__() on non-gcc compilers. -*/ -#if !defined(__attribute__) && !defined(__GNUC__) -#define __attribute__(A) -#endif - - #define HEART_STRING_BUFFER 100 struct mysql_heartbeat_context @@ -77,14 +69,14 @@ pthread_handler_t mysql_heartbeat(void *p) daemon_example_plugin_init() DESCRIPTION - Starts up heartbeatbeat thread + Starts up heartbeat thread (mysql_heartbeat) RETURN VALUE 0 success 1 failure (cannot happen) */ -static int daemon_example_plugin_init(void *p __attribute__ ((unused))) +static int daemon_example_plugin_init(void *p) { DBUG_ENTER("daemon_example_plugin_init"); @@ -150,7 +142,7 @@ static int daemon_example_plugin_init(void *p __attribute__ ((unused))) */ -static int daemon_example_plugin_deinit(void *p __attribute__ ((unused))) +static int daemon_example_plugin_deinit(void *p) { DBUG_ENTER("daemon_example_plugin_deinit"); char buffer[HEART_STRING_BUFFER]; @@ -162,6 +154,10 @@ static int daemon_example_plugin_deinit(void *p __attribute__ ((unused))) pthread_cancel(con->heartbeat_thread); pthread_join(con->heartbeat_thread, NULL); + /* + As thread is joined, we can close the file it writes to and + free the memory it uses. + */ localtime_r(&result, &tm_tmp); my_snprintf(buffer, sizeof(buffer), @@ -174,12 +170,6 @@ static int daemon_example_plugin_deinit(void *p __attribute__ ((unused))) tm_tmp.tm_sec); my_write(con->heartbeat_file, (uchar*) buffer, strlen(buffer), MYF(0)); - /* - Need to wait for the hearbeat thread to terminate before closing - the file it writes to and freeing the memory it uses. - */ - pthread_join(con->heartbeat_thread, NULL); - my_close(con->heartbeat_file, MYF(0)); my_free(con); diff --git a/plugin/hashicorp_key_management/hashicorp_key_management_plugin.cc b/plugin/hashicorp_key_management/hashicorp_key_management_plugin.cc index dfeb1aca737..3c59e993879 100644 --- a/plugin/hashicorp_key_management/hashicorp_key_management_plugin.cc +++ b/plugin/hashicorp_key_management/hashicorp_key_management_plugin.cc @@ -562,7 +562,7 @@ int HCData::curl_run (const char *url, std::string *response, else if (is_error) { my_printf_error(ER_UNKNOWN_ERROR, PLUGIN_ERROR_HEADER - "Hashicorp server error: %d, response: %s", + "Hashicorp server error: %ld, response: %s", ME_ERROR_LOG_ONLY | ME_WARNING, http_code, res); } } diff --git a/plugin/server_audit/server_audit.c b/plugin/server_audit/server_audit.c index 3c94df243d0..20eda30657b 100644 --- a/plugin/server_audit/server_audit.c +++ b/plugin/server_audit/server_audit.c @@ -1415,7 +1415,7 @@ static size_t log_header(char *message, size_t message_len, if (output_type == OUTPUT_SYSLOG) return my_snprintf(message, message_len, "%.*s,%.*s,%.*s,%d,%lld,%s", - (unsigned int) serverhost_len, serverhost, + (int) serverhost_len, serverhost, username_len, username, host_len, host, connection_id, query_id, operation); @@ -1425,7 +1425,7 @@ static size_t log_header(char *message, size_t message_len, "%04d%02d%02d %02d:%02d:%02d,%.*s,%.*s,%.*s,%d,%lld,%s", tm_time.tm_year+1900, tm_time.tm_mon+1, tm_time.tm_mday, tm_time.tm_hour, tm_time.tm_min, tm_time.tm_sec, - serverhost_len, serverhost, + (int) serverhost_len, serverhost, username_len, username, host_len, host, connection_id, query_id, operation); @@ -1494,7 +1494,7 @@ static int log_connection_event(const struct mysql_event_connection *event, event->ip, event->ip_length, event->thread_id, 0, type); csize+= my_snprintf(message+csize, sizeof(message) - 1 - csize, - ",%.*s,,%d", event->database.length, event->database.str, event->status); + ",%.*s,,%d", (int) event->database.length, event->database.str, event->status); message[csize]= '\n'; return write_log(message, csize + 1, 1); } @@ -1529,12 +1529,33 @@ static size_t escape_string(const char *str, unsigned int len, return result - res_start; } +/* + Replace "password" with "*****" in + + "password" + + if is 0 + + "password" + + or + + "password" + + if is 0 + + "password" + + NOTE: there can be " or ' around the password, the words are case + insensitive. +*/ static size_t escape_string_hide_passwords(const char *str, unsigned int len, char *result, size_t result_len, const char *word1, size_t word1_len, const char *word2, size_t word2_len, - int next_text_string) + const char *word0, size_t word0_len, + char chr0) { const char *res_start= result; const char *res_end= result + result_len - 2; @@ -1542,18 +1563,32 @@ static size_t escape_string_hide_passwords(const char *str, unsigned int len, while (len) { - if (len > word1_len + 1 && strncasecmp(str, word1, word1_len) == 0) + int word1_found= (word1 && len > word1_len + 1 && + strncasecmp(str, word1, word1_len) == 0); + int word0_found= (word0 && len > word0_len + 1 && + strncasecmp(str, word0, word0_len) == 0); + if (word1_found || word0_found) { - const char *next_s= str + word1_len; + const char *next_s; size_t c; - if (next_text_string) + if (word0_found) { + next_s= str + word0_len; + if (chr0) + { + SKIP_SPACES(next_s); + if (len < (size_t)(next_s - str) + 1 + 1 || + next_s[0] != chr0) + goto no_password; + next_s++; + } while (*next_s && *next_s != '\'' && *next_s != '"') ++next_s; } else { + next_s= str + word1_len; if (word2) { SKIP_SPACES(next_s); @@ -1868,23 +1903,27 @@ do_log_query: case SQLCOM_ALTER_USER: csize+= escape_string_hide_passwords(query, query_len, uh_buffer, uh_buffer_size, - "IDENTIFIED", 10, "BY", 2, 0); + "IDENTIFIED", 10, "BY", 2, + "PASSWORD", 8, '('); break; case SQLCOM_CHANGE_MASTER: csize+= escape_string_hide_passwords(query, query_len, uh_buffer, uh_buffer_size, - "MASTER_PASSWORD", 15, "=", 1, 0); + "MASTER_PASSWORD", 15, "=", 1, + 0, 0, 0); break; case SQLCOM_CREATE_SERVER: case SQLCOM_ALTER_SERVER: csize+= escape_string_hide_passwords(query, query_len, uh_buffer, uh_buffer_size, - "PASSWORD", 8, NULL, 0, 0); + "PASSWORD", 8, NULL, 0, + 0, 0, 0); break; case SQLCOM_SET_OPTION: csize+= escape_string_hide_passwords(query, query_len, uh_buffer, uh_buffer_size, - "=", 1, NULL, 0, 1); + NULL, 0, NULL, 0, + "=", 1, 0); break; default: csize+= escape_string(query, query_len, @@ -1926,9 +1965,9 @@ static int log_table(const struct connection_info *cn, event->host, SAFE_STRLEN_UI(event->host), event->ip, SAFE_STRLEN_UI(event->ip), event->thread_id, cn->query_id, type); - csize+= my_snprintf(message+csize, sizeof(message) - 1 - csize, - ",%.*s,%.*s,",event->database.length, event->database.str, - event->table.length, event->table.str); + csize+= my_snprintf(message+csize, sizeof(message) - 1 - csize, ",%.*s,%.*s,", + (int) event->database.length, event->database.str, + (int) event->table.length, event->table.str); message[csize]= '\n'; return write_log(message, csize + 1, 1); } @@ -1949,10 +1988,11 @@ static int log_rename(const struct connection_info *cn, event->ip, SAFE_STRLEN_UI(event->ip), event->thread_id, cn->query_id, "RENAME"); csize+= my_snprintf(message+csize, sizeof(message) - 1 - csize, - ",%.*s,%.*s|%.*s.%.*s,",event->database.length, event->database.str, - event->table.length, event->table.str, - event->new_database.length, event->new_database.str, - event->new_table.length, event->new_table.str); + ",%.*s,%.*s|%.*s.%.*s,", + (int) event->database.length, event->database.str, + (int) event->table.length, event->table.str, + (int) event->new_database.length, event->new_database.str, + (int) event->new_table.length, event->new_table.str); message[csize]= '\n'; return write_log(message, csize + 1, 1); } @@ -3127,4 +3167,3 @@ exit: return; #endif } - diff --git a/plugin/type_inet/mysql-test/type_inet/func_inet_plugin.result b/plugin/type_inet/mysql-test/type_inet/func_inet_plugin.result index 9ee1a0202b0..d155e073274 100644 --- a/plugin/type_inet/mysql-test/type_inet/func_inet_plugin.result +++ b/plugin/type_inet/mysql-test/type_inet/func_inet_plugin.result @@ -110,3 +110,29 @@ PLUGIN_AUTH_VERSION 1.0 # # End of 10.5 tests # +# Start of 10.11 tests +# +# MDEV-34981 Functions missing from INFORMATION_SCHEMA.SQL_FUNCTIONS +# +SELECT FUNCTION FROM INFORMATION_SCHEMA.SQL_FUNCTIONS +WHERE FUNCTION IN ( +'INET6_ATON', +'INET6_NTOA', +'INET_ATON', +'INET_NTOA', +'IS_IPV4', +'IS_IPV4_COMPAT', +'IS_IPV4_MAPPED', +'IS_IPV6' +) +ORDER BY FUNCTION; +FUNCTION +INET6_ATON +INET6_NTOA +INET_ATON +INET_NTOA +IS_IPV4 +IS_IPV4_COMPAT +IS_IPV4_MAPPED +IS_IPV6 +# End of 10.11 tests diff --git a/plugin/type_inet/mysql-test/type_inet/func_inet_plugin.test b/plugin/type_inet/mysql-test/type_inet/func_inet_plugin.test index 45b462e8e82..f53d2a5d0bc 100644 --- a/plugin/type_inet/mysql-test/type_inet/func_inet_plugin.test +++ b/plugin/type_inet/mysql-test/type_inet/func_inet_plugin.test @@ -35,3 +35,24 @@ ORDER BY PLUGIN_NAME; --echo # --echo # End of 10.5 tests --echo # + +--echo # Start of 10.11 tests + +--echo # +--echo # MDEV-34981 Functions missing from INFORMATION_SCHEMA.SQL_FUNCTIONS +--echo # + +SELECT FUNCTION FROM INFORMATION_SCHEMA.SQL_FUNCTIONS +WHERE FUNCTION IN ( +'INET6_ATON', +'INET6_NTOA', +'INET_ATON', +'INET_NTOA', +'IS_IPV4', +'IS_IPV4_COMPAT', +'IS_IPV4_MAPPED', +'IS_IPV6' +) +ORDER BY FUNCTION; + +--echo # End of 10.11 tests diff --git a/plugin/type_uuid/mysql-test/type_uuid/func_uuid_plugin.result b/plugin/type_uuid/mysql-test/type_uuid/func_uuid_plugin.result index 716f33134ca..f37339db343 100644 --- a/plugin/type_uuid/mysql-test/type_uuid/func_uuid_plugin.result +++ b/plugin/type_uuid/mysql-test/type_uuid/func_uuid_plugin.result @@ -33,3 +33,17 @@ PLUGIN_AUTH_VERSION 1.0 # # End of 10.5 tests # +# Start of 10.11 tests +# +# MDEV-34981 Functions missing from INFORMATION_SCHEMA.SQL_FUNCTIONS +# +SELECT FUNCTION FROM INFORMATION_SCHEMA.SQL_FUNCTIONS +WHERE FUNCTION IN ( +'SYS_GUID', +'UUID' +) +ORDER BY FUNCTION; +FUNCTION +SYS_GUID +UUID +# End of 10.11 tests diff --git a/plugin/type_uuid/mysql-test/type_uuid/func_uuid_plugin.test b/plugin/type_uuid/mysql-test/type_uuid/func_uuid_plugin.test index ce8aba68979..2f179e8843f 100644 --- a/plugin/type_uuid/mysql-test/type_uuid/func_uuid_plugin.test +++ b/plugin/type_uuid/mysql-test/type_uuid/func_uuid_plugin.test @@ -28,3 +28,18 @@ ORDER BY PLUGIN_NAME; --echo # --echo # End of 10.5 tests --echo # + +--echo # Start of 10.11 tests + +--echo # +--echo # MDEV-34981 Functions missing from INFORMATION_SCHEMA.SQL_FUNCTIONS +--echo # + +SELECT FUNCTION FROM INFORMATION_SCHEMA.SQL_FUNCTIONS +WHERE FUNCTION IN ( +'SYS_GUID', +'UUID' +) +ORDER BY FUNCTION; + +--echo # End of 10.11 tests diff --git a/scripts/mysql_install_db.sh b/scripts/mysql_install_db.sh index b7366f032f0..09aa87f6bc3 100644 --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh @@ -639,7 +639,7 @@ else echo "--general-log gives you a log in $ldata that may be helpful." link_to_help echo "You can find the latest source at https://downloads.mariadb.org and" - echo "the maria-discuss email list at https://launchpad.net/~maria-discuss" + echo "the MariaDB discuss email list at https://lists.mariadb.org/postorius/lists/discuss.lists.mariadb.org/" echo echo "Please check all of the above before submitting a bug report" echo "at https://mariadb.org/jira" diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh index 73bcee75362..d4d3e370c4e 100644 --- a/scripts/wsrep_sst_mariabackup.sh +++ b/scripts/wsrep_sst_mariabackup.sh @@ -854,7 +854,7 @@ recv_joiner() wsrep_log_error "receiving process ended without creating" \ "magic file ($MAGIC_FILE)" wsrep_log_info "Contents of datadir:" - wsrep_log_info $(ls -l "$dir/"*) + wsrep_log_info "$(ls -l "$dir"/*)" exit 32 fi @@ -957,17 +957,17 @@ if [ $ssyslog -eq 1 ]; then wsrep_log_error() { - logger -p daemon.err -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE "$@" + logger -p daemon.err -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE -- "$@" } wsrep_log_warning() { - logger -p daemon.warning -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE "$@" + logger -p daemon.warning -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE -- "$@" } wsrep_log_info() { - logger -p daemon.info -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE "$@" + logger -p daemon.info -t ${ssystag}wsrep-sst-$WSREP_SST_OPT_ROLE -- "$@" } else wsrep_log_error "logger not in path: $PATH. Ignoring" @@ -1411,7 +1411,7 @@ else # joiner # Compact backups are not supported by mariadb-backup if grep -qw -F 'compact = 1' "$DATA/mariadb_backup_checkpoints"; then wsrep_log_info "Index compaction detected" - wsrel_log_error "Compact backups are not supported by mariadb-backup" + wsrep_log_error "Compact backups are not supported by mariadb-backup" exit 2 fi diff --git a/scripts/wsrep_sst_rsync.sh b/scripts/wsrep_sst_rsync.sh index b8f4c1bc62e..25057c498fd 100644 --- a/scripts/wsrep_sst_rsync.sh +++ b/scripts/wsrep_sst_rsync.sh @@ -108,7 +108,7 @@ check_pid_and_port() if [ $ss_available -ne 0 -o $sockstat_available -ne 0 ]; then if [ $ss_available -ne 0 ]; then port_info=$($socket_utility $ss_opts -t "( sport = :$port )" 2>/dev/null | \ - grep -E '[[:space:]]users:[[:space:]]?(' | \ + grep -E '[[:space:]]users:[[:space:]]?\(' | \ grep -o -E "([^[:space:]]+[[:space:]]+){4}[^[:space:]]+" || :) else if [ $sockstat_available -gt 1 ]; then diff --git a/sql-common/client.c b/sql-common/client.c index 6d030ce0a17..c5656be6a1e 100644 --- a/sql-common/client.c +++ b/sql-common/client.c @@ -3938,12 +3938,13 @@ mysql_options(MYSQL *mysql,enum mysql_option option, const void *arg) /** A function to return the key from a connection attribute */ -uchar * -get_attr_key(LEX_STRING *part, size_t *length, +const uchar * +get_attr_key(const void *part_, size_t *length, my_bool not_used __attribute__((unused))) { + const LEX_STRING *part= part_; *length= part[0].length; - return (uchar *) part[0].str; + return (const uchar *) part[0].str; } int STDCALL @@ -3992,7 +3993,7 @@ mysql_options4(MYSQL *mysql,enum mysql_option option, { if (my_hash_init(key_memory_mysql_options, &mysql->options.extension->connection_attributes, - &my_charset_bin, 0, 0, 0, (my_hash_get_key) + &my_charset_bin, 0, 0, 0, get_attr_key, my_free, HASH_UNIQUE)) { set_mysql_error(mysql, CR_OUT_OF_MEMORY, unknown_sqlstate); diff --git a/sql/bounded_queue.h b/sql/bounded_queue.h index 07ab6dbaab9..0675135373c 100644 --- a/sql/bounded_queue.h +++ b/sql/bounded_queue.h @@ -62,16 +62,6 @@ public: Element_type *from, bool packing_keys); - /** - Function for comparing two keys. - @param n Pointer to number of bytes to compare. - @param a First key. - @param b Second key. - @retval -1, 0, or 1 depending on whether the left argument is - less than, equal to, or greater than the right argument. - */ - typedef int (*compare_function)(size_t *n, Key_type **a, Key_type **b); - /** Initialize the queue. @@ -81,8 +71,6 @@ public: pop() will return the smallest key in the result set. true: We keep the n smallest elements. pop() will return the largest key in the result set. - @param compare Compare function for elements, takes 3 arguments. - If NULL, we use get_ptr_compare(compare_length). @param compare_length Length of the data (i.e. the keys) used for sorting. @param keymaker Function which generates keys for elements. @param sort_param Sort parameters. @@ -93,7 +81,7 @@ public: We do *not* take ownership of any of the input pointer arguments. */ int init(ha_rows max_elements, bool max_at_top, - compare_function compare, size_t compare_length, + size_t compare_length, keymaker_function keymaker, Sort_param *sort_param, Key_type **sort_keys); @@ -148,7 +136,6 @@ private: template int Bounded_queue::init(ha_rows max_elements, bool max_at_top, - compare_function compare, size_t compare_length, keymaker_function keymaker, Sort_param *sort_param, @@ -163,13 +150,10 @@ int Bounded_queue::init(ha_rows max_elements, // init_queue() takes an uint, and also does (max_elements + 1) if (max_elements >= (UINT_MAX - 1)) return 1; - if (compare == NULL) - compare= - reinterpret_cast(get_ptr_compare(compare_length)); // We allocate space for one extra element, for replace when queue is full. return init_queue(&m_queue, (uint) max_elements + 1, 0, max_at_top, - reinterpret_cast(compare), + get_ptr_compare(compare_length), &m_compare_length, 0, 0); } diff --git a/sql/debug_sync.cc b/sql/debug_sync.cc index 9d95b3c70fc..def55a67caf 100644 --- a/sql/debug_sync.cc +++ b/sql/debug_sync.cc @@ -105,10 +105,11 @@ struct st_debug_sync_globals } /* Hash key function for ds_signal_set. */ - static uchar *signal_key(const LEX_CSTRING *str, size_t *klen, my_bool) + static const uchar *signal_key(const void *str_, size_t *klen, my_bool) { + const LEX_CSTRING *str= static_cast(str_); *klen= str->length; - return (uchar*) str->str; + return reinterpret_cast(str->str); } /** diff --git a/sql/encryption.cc b/sql/encryption.cc index ca161749c41..da61d8d71f5 100644 --- a/sql/encryption.cc +++ b/sql/encryption.cc @@ -59,8 +59,9 @@ uint ctx_size(unsigned int, unsigned int) } /* extern "C" */ -int initialize_encryption_plugin(st_plugin_int *plugin) +int initialize_encryption_plugin(void *plugin_) { + st_plugin_int *plugin= static_cast(plugin_); if (encryption_manager) return 1; @@ -107,8 +108,9 @@ int initialize_encryption_plugin(st_plugin_int *plugin) return 0; } -int finalize_encryption_plugin(st_plugin_int *plugin) +int finalize_encryption_plugin(void *plugin_) { + st_plugin_int *plugin= static_cast(plugin_); int deinit_status= 0; bool used= plugin_ref_to_int(encryption_manager) == plugin; diff --git a/sql/event_parse_data.cc b/sql/event_parse_data.cc index 80d2f9c9fe4..adb3fbbb8a6 100644 --- a/sql/event_parse_data.cc +++ b/sql/event_parse_data.cc @@ -211,6 +211,12 @@ Event_parse_data::init_execute_at(THD *thd) if (item_execute_at->fix_fields(thd, &item_execute_at)) goto wrong_value; + if (item_execute_at->check_cols(1)) + { + // Don't go to wrong_value, it will call val_str() and hit DBUG_ASSERT(0) + DBUG_RETURN(ER_WRONG_VALUE); + } + /* no starts and/or ends in case of execute_at */ DBUG_PRINT("info", ("starts_null && ends_null should be 1 is %d", (starts_null && ends_null))); @@ -281,6 +287,12 @@ Event_parse_data::init_interval(THD *thd) if (item_expression->fix_fields(thd, &item_expression)) goto wrong_value; + if (item_expression->check_cols(1)) + { + // Don't go to wrong_value, it will call val_str() and hit DBUG_ASSERT(0) + DBUG_RETURN(ER_WRONG_VALUE); + } + if (get_interval_value(thd, item_expression, interval, &interval_tmp)) goto wrong_value; @@ -384,6 +396,12 @@ Event_parse_data::init_starts(THD *thd) if (item_starts->fix_fields(thd, &item_starts)) goto wrong_value; + if (item_starts->check_cols(1)) + { + // Don't go to wrong_value, it will call val_str() and hit DBUG_ASSERT(0) + DBUG_RETURN(ER_WRONG_VALUE); + } + if (item_starts->get_date(thd, <ime, TIME_NO_ZERO_DATE | thd->temporal_round_mode())) goto wrong_value; @@ -439,6 +457,15 @@ Event_parse_data::init_ends(THD *thd) if (item_ends->fix_fields(thd, &item_ends)) goto error_bad_params; + if (item_ends->check_cols(1)) + { + /* + Don't go to error_bad_params it will call val_str() and + hit DBUG_ASSERT(0) + */ + DBUG_RETURN(EVEX_BAD_PARAMS); + } + DBUG_PRINT("info", ("convert to TIME")); if (item_ends->get_date(thd, <ime, TIME_NO_ZERO_DATE | thd->temporal_round_mode())) diff --git a/sql/event_queue.cc b/sql/event_queue.cc index 7ed4d8c61a1..37c2fb52a42 100644 --- a/sql/event_queue.cc +++ b/sql/event_queue.cc @@ -67,10 +67,10 @@ extern "C" int event_queue_element_compare_q(void *, uchar *, uchar *); -int event_queue_element_compare_q(void *vptr, uchar* a, uchar *b) +int event_queue_element_compare_q(void *, const void *a, const void *b) { - Event_queue_element *left = (Event_queue_element *)a; - Event_queue_element *right = (Event_queue_element *)b; + auto left= static_cast(a); + auto right= static_cast(b); my_time_t lhs = left->execute_at; my_time_t rhs = right->execute_at; diff --git a/sql/field.cc b/sql/field.cc index 071d17efa69..96c2af46531 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -1411,7 +1411,8 @@ int Field::store_hex_hybrid(const char *str, size_t length) goto warn; } nr= (ulonglong) longlong_from_hex_hybrid(str, length); - if ((length == 8) && !(flags & UNSIGNED_FLAG) && (nr > LONGLONG_MAX)) + if ((length == 8) && cmp_type()== INT_RESULT && + !(flags & UNSIGNED_FLAG) && (nr > LONGLONG_MAX)) { nr= LONGLONG_MAX; goto warn; diff --git a/sql/filesort.cc b/sql/filesort.cc index abcd4127a0b..e4449f9ef79 100644 --- a/sql/filesort.cc +++ b/sql/filesort.cc @@ -339,7 +339,6 @@ SORT_INFO *filesort(THD *thd, TABLE *table, Filesort *filesort, sort->init_record_pointers(); if (pq.init(param.limit_rows, true, // max_at_top - NULL, // compare_function compare_length, &make_sortkey, ¶m, sort->get_sort_keys())) { @@ -1747,7 +1746,7 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, uchar *strpos; Merge_chunk *buffpek; QUEUE queue; - qsort2_cmp cmp; + qsort_cmp2 cmp; void *first_cmp_arg; element_count dupl_count= 0; uchar *src; @@ -1791,9 +1790,9 @@ bool merge_buffers(Sort_param *param, IO_CACHE *from_file, cmp= param->get_compare_function(); first_cmp_arg= param->get_compare_argument(&sort_length); } - if (unlikely(init_queue(&queue, (uint) (Tb-Fb)+1, - offsetof(Merge_chunk,m_current_key), 0, - (queue_compare) cmp, first_cmp_arg, 0, 0))) + if (unlikely(init_queue(&queue, (uint) (Tb - Fb) + 1, + offsetof(Merge_chunk, m_current_key), 0, cmp, + first_cmp_arg, 0, 0))) DBUG_RETURN(1); /* purecov: inspected */ const size_t chunk_sz= (sort_buffer.size()/((uint) (Tb-Fb) +1)); for (buffpek= Fb; buffpek <= Tb; buffpek++) @@ -2713,9 +2712,9 @@ void SORT_FIELD_ATTR::set_length_and_original_length(THD *thd, uint length_arg) Compare function used for packing sort keys */ -qsort2_cmp get_packed_keys_compare_ptr() +qsort_cmp2 get_packed_keys_compare_ptr() { - return (qsort2_cmp) compare_packed_sort_keys; + return compare_packed_sort_keys; } @@ -2729,8 +2728,8 @@ qsort2_cmp get_packed_keys_compare_ptr() suffix_bytes are used only for binary columns. */ -int SORT_FIELD_ATTR::compare_packed_varstrings(uchar *a, size_t *a_len, - uchar *b, size_t *b_len) +int SORT_FIELD_ATTR::compare_packed_varstrings(const uchar *a, size_t *a_len, + const uchar *b, size_t *b_len) { int retval; size_t a_length, b_length; @@ -2789,8 +2788,8 @@ int SORT_FIELD_ATTR::compare_packed_varstrings(uchar *a, size_t *a_len, packed-value format. */ -int SORT_FIELD_ATTR::compare_packed_fixed_size_vals(uchar *a, size_t *a_len, - uchar *b, size_t *b_len) +int SORT_FIELD_ATTR::compare_packed_fixed_size_vals(const uchar *a, size_t *a_len, + const uchar *b, size_t *b_len) { if (maybe_null) { @@ -2835,15 +2834,15 @@ int SORT_FIELD_ATTR::compare_packed_fixed_size_vals(uchar *a, size_t *a_len, */ -int compare_packed_sort_keys(void *sort_param, - unsigned char **a_ptr, unsigned char **b_ptr) +int compare_packed_sort_keys(void *sort_param, const void *a_ptr, + const void *b_ptr) { int retval= 0; size_t a_len, b_len; - Sort_param *param= (Sort_param*)sort_param; + Sort_param *param= static_cast(sort_param); Sort_keys *sort_keys= param->sort_keys; - uchar *a= *a_ptr; - uchar *b= *b_ptr; + auto a= *(static_cast(a_ptr)); + auto b= *(static_cast(b_ptr)); a+= Sort_keys::size_of_length_field; b+= Sort_keys::size_of_length_field; diff --git a/sql/filesort_utils.h b/sql/filesort_utils.h index 776e986e14a..d8b64ad6031 100644 --- a/sql/filesort_utils.h +++ b/sql/filesort_utils.h @@ -342,7 +342,7 @@ double cost_of_filesort(TABLE *table, ORDER *order_by, ha_rows rows_to_read, ha_rows limit_rows, enum sort_type *used_sort_type); double get_qsort_sort_cost(ha_rows num_rows, bool with_addon_fields); -int compare_packed_sort_keys(void *sort_keys, unsigned char **a, - unsigned char **b); -qsort2_cmp get_packed_keys_compare_ptr(); +int compare_packed_sort_keys(void *sort_param, const void *a_ptr, + const void *b_ptr); +qsort_cmp2 get_packed_keys_compare_ptr(); #endif // FILESORT_UTILS_INCLUDED diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index e49da10d761..eef7d95926e 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -1474,8 +1474,8 @@ bool print_admin_msg(THD* thd, uint len, Also we likely need to lock mutex here (in both cases with protocol and push_warning). */ - DBUG_PRINT("info",("print_admin_msg: %s, %s, %s, %s", name, op_name, - msg_type, msgbuf)); + DBUG_PRINT("info",("print_admin_msg: %s, %s, %s, %s", name, op_name->str, + msg_type->str, msgbuf)); protocol->prepare_for_resend(); protocol->store(name, length, system_charset_info); protocol->store(op_name, system_charset_info); @@ -3478,9 +3478,9 @@ bool ha_partition::re_create_par_file(const char *name) @return Partition name */ -static uchar *get_part_name(PART_NAME_DEF *part, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *get_part_name(const void *part_, size_t *length, my_bool) { + auto part= reinterpret_cast(part_); *length= part->length; return part->partition_name; } @@ -3566,8 +3566,7 @@ bool ha_partition::populate_partition_name_hash() tot_names= m_is_sub_partitioned ? m_tot_parts + num_parts : num_parts; if (my_hash_init(key_memory_Partition_share, &part_share->partition_name_hash, system_charset_info, - tot_names, 0, 0, (my_hash_get_key) get_part_name, my_free, - HASH_UNIQUE)) + tot_names, 0, 0, get_part_name, my_free, HASH_UNIQUE)) { unlock_shared_ha_data(); DBUG_RETURN(TRUE); @@ -5644,7 +5643,7 @@ bool ha_partition::init_record_priority_queue() m_start_key.key= (const uchar*)ptr; /* Initialize priority queue, initialized to reading forward. */ - int (*cmp_func)(void *, uchar *, uchar *); + int (*cmp_func)(void *, const void *, const void *); void *cmp_arg= (void*) this; if (!m_using_extended_keys && !(table_flags() & HA_SLOW_CMP_REF)) cmp_func= cmp_key_rowid_part_id; @@ -5892,8 +5891,10 @@ int ha_partition::index_read_map(uchar *buf, const uchar *key, /* Compare two part_no partition numbers */ -static int cmp_part_ids(uchar *ref1, uchar *ref2) +static int cmp_part_ids(const void *ref1_, const void *ref2_) { + auto ref1= static_cast(ref1_); + auto ref2= static_cast(ref2_); uint32 diff2= uint2korr(ref2); uint32 diff1= uint2korr(ref1); if (diff2 > diff1) @@ -5909,9 +5910,12 @@ static int cmp_part_ids(uchar *ref1, uchar *ref2) Provide ordering by (key_value, part_no). */ -extern "C" int cmp_key_part_id(void *ptr, uchar *ref1, uchar *ref2) +extern "C" int cmp_key_part_id(void *ptr, const void *ref1_, const void *ref2_) { - ha_partition *file= (ha_partition*)ptr; + const ha_partition *file= static_cast(ptr); + const uchar *ref1= static_cast(ref1_); + const uchar *ref2= static_cast(ref2_); + if (int res= key_rec_cmp(file->m_curr_key_info, ref1 + PARTITION_BYTES_IN_POS, ref2 + PARTITION_BYTES_IN_POS)) @@ -5923,9 +5927,13 @@ extern "C" int cmp_key_part_id(void *ptr, uchar *ref1, uchar *ref2) @brief Provide ordering by (key_value, underying_table_rowid, part_no). */ -extern "C" int cmp_key_rowid_part_id(void *ptr, uchar *ref1, uchar *ref2) +extern "C" int cmp_key_rowid_part_id(void *ptr, const void *ref1_, + const void *ref2_) { - ha_partition *file= (ha_partition*)ptr; + const ha_partition *file= static_cast(ptr); + const uchar *ref1= static_cast(ref1_); + const uchar *ref2= static_cast(ref2_); + int res; if ((res= key_rec_cmp(file->m_curr_key_info, ref1 + PARTITION_BYTES_IN_POS, @@ -8477,10 +8485,12 @@ int ha_partition::handle_ordered_prev(uchar *buf) Helper function for sorting according to number of rows in descending order. */ -int ha_partition::compare_number_of_records(ha_partition *me, - const uint32 *a, - const uint32 *b) +int ha_partition::compare_number_of_records(void *me_, const void *a_, + const void *b_) { + const ha_partition *me= static_cast(me_); + const uint32 *a= static_cast(a_); + const uint32 *b= static_cast(b_); handler **file= me->m_file; /* Note: sorting in descending order! */ if (file[*a]->stats.records > file[*b]->stats.records) @@ -8780,7 +8790,7 @@ int ha_partition::info(uint flag) my_qsort2((void*) m_part_ids_sorted_by_num_of_records, m_tot_parts, sizeof(uint32), - (qsort2_cmp) compare_number_of_records, + compare_number_of_records, this); file= m_file[handler_instance]; diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 1789b7d5964..7826d4a5d4e 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -279,8 +279,8 @@ typedef struct st_partition_part_key_multi_range_hld } PARTITION_PART_KEY_MULTI_RANGE_HLD; -extern "C" int cmp_key_part_id(void *key_p, uchar *ref1, uchar *ref2); -extern "C" int cmp_key_rowid_part_id(void *ptr, uchar *ref1, uchar *ref2); +extern "C" int cmp_key_part_id(void *key_p, const void *ref1, const void *ref2); +extern "C" int cmp_key_rowid_part_id(void *ptr, const void *ref1, const void *ref2); class ha_partition final :public handler { @@ -449,9 +449,7 @@ private: /** Sorted array of partition ids in descending order of number of rows. */ uint32 *m_part_ids_sorted_by_num_of_records; /* Compare function for my_qsort2, for reversed order. */ - static int compare_number_of_records(ha_partition *me, - const uint32 *a, - const uint32 *b); + static int compare_number_of_records(void *me, const void *a, const void *b); /** keep track of partitions to call ha_reset */ MY_BITMAP m_partitions_to_reset; /** partitions that returned HA_ERR_KEY_NOT_FOUND. */ @@ -1645,8 +1643,9 @@ public: int notify_tabledef_changed(LEX_CSTRING *db, LEX_CSTRING *table, LEX_CUSTRING *frm, LEX_CUSTRING *version); - friend int cmp_key_rowid_part_id(void *ptr, uchar *ref1, uchar *ref2); - friend int cmp_key_part_id(void *key_p, uchar *ref1, uchar *ref2); + friend int cmp_key_rowid_part_id(void *ptr, const void *ref1, + const void *ref2); + friend int cmp_key_part_id(void *key_p, const void *ref1, const void *ref2); bool can_convert_nocopy(const Field &field, const Column_definition &new_field) const override; diff --git a/sql/handler.cc b/sql/handler.cc index de6b7cc6883..1243c3395f3 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -562,13 +562,6 @@ static void update_discovery_counters(handlerton *hton, int val) engines_with_discover+= val; } -int ha_drop_table(THD *thd, handlerton *hton, const char *path) -{ - if (ha_check_if_updates_are_ignored(thd, hton, "DROP")) - return 0; // Simulate dropped - return hton->drop_table(hton, path); -} - static int hton_drop_table(handlerton *hton, const char *path) { Table_path_buffer tmp_path; @@ -588,8 +581,9 @@ static int hton_drop_table(handlerton *hton, const char *path) } -int ha_finalize_handlerton(st_plugin_int *plugin) +int ha_finalize_handlerton(void *plugin_) { + st_plugin_int *plugin= static_cast(plugin_); int deinit_status= 0; handlerton *hton= (handlerton *)plugin->data; DBUG_ENTER("ha_finalize_handlerton"); @@ -670,8 +664,9 @@ static bool update_optimizer_costs(handlerton *hton) const char *hton_no_exts[]= { 0 }; static bool ddl_recovery_done= false; -int ha_initialize_handlerton(st_plugin_int *plugin) +int ha_initialize_handlerton(void *plugin_) { + st_plugin_int *plugin= static_cast(plugin_); handlerton *hton; int ret= 0; DBUG_ENTER("ha_initialize_handlerton"); @@ -1816,7 +1811,13 @@ int ha_commit_trans(THD *thd, bool all) DBUG_PRINT("info", ("is_real_trans: %d rw_trans: %d rw_ha_count: %d", is_real_trans, rw_trans, rw_ha_count)); - if (rw_trans) + /* + backup_commit_lock may have already been set. + This can happen in case of spider that does xa_commit() by + calling ha_commit_trans() from spader_commit(). + */ + + if (rw_trans && !thd->backup_commit_lock) { /* Acquire a metadata lock which will ensure that COMMIT is blocked @@ -2095,8 +2096,8 @@ end: not needed. */ thd->mdl_context.release_lock(mdl_backup.ticket); + thd->backup_commit_lock= 0; } - thd->backup_commit_lock= 0; #ifdef WITH_WSREP if (wsrep_is_active(thd) && is_real_trans && !error && (rw_ha_count == 0 || all) && @@ -5680,27 +5681,6 @@ handler::ha_rename_table(const char *from, const char *to) } -/** - Drop table in the engine: public interface. - - @sa handler::drop_table() - - The difference between this and delete_table() is that the table is open in - drop_table(). -*/ - -void -handler::ha_drop_table(const char *name) -{ - DBUG_ASSERT(m_lock_type == F_UNLCK); - if (check_if_updates_are_ignored("DROP")) - return; - - mark_trx_read_write(); - drop_table(name); -} - - /** Structure used during force drop table. */ @@ -6719,15 +6699,19 @@ static int cmp_file_names(const void *a, const void *b) return cs->strnncoll(aa, strlen(aa), bb, strlen(bb)); } -static int cmp_table_names(LEX_CSTRING * const *a, LEX_CSTRING * const *b) +static int cmp_table_names(const void *a_, const void *b_) { + auto a= static_cast(a_); + auto b= static_cast(b_); return my_charset_bin.strnncoll((*a)->str, (*a)->length, (*b)->str, (*b)->length); } #ifndef DBUG_OFF -static int cmp_table_names_desc(LEX_CSTRING * const *a, LEX_CSTRING * const *b) +static int cmp_table_names_desc(const void *a_, const void *b_) { + auto a= static_cast(a_); + auto b= static_cast(b_); return -cmp_table_names(a, b); } #endif diff --git a/sql/handler.h b/sql/handler.h index 5e9431d8ab2..6b1c1825555 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3620,7 +3620,6 @@ public: int ha_enable_indexes(key_map map, bool persist); int ha_discard_or_import_tablespace(my_bool discard); int ha_rename_table(const char *from, const char *to); - void ha_drop_table(const char *name); int ha_create(const char *name, TABLE *form, HA_CREATE_INFO *info); @@ -5570,8 +5569,8 @@ static inline bool ha_storage_engine_is_enabled(const handlerton *db_type) int ha_init_errors(void); int ha_init(void); int ha_end(void); -int ha_initialize_handlerton(st_plugin_int *plugin); -int ha_finalize_handlerton(st_plugin_int *plugin); +int ha_initialize_handlerton(void *plugin); +int ha_finalize_handlerton(void *plugin); TYPELIB *ha_known_exts(void); int ha_panic(enum ha_panic_function flag); diff --git a/sql/hostname.cc b/sql/hostname.cc index 0641528f98b..d3e8ddc90af 100644 --- a/sql/hostname.cc +++ b/sql/hostname.cc @@ -151,7 +151,7 @@ bool hostname_cache_init() if (!(hostname_cache= new Hash_filo(key_memory_host_cache_hostname, host_cache_size, key_offset, HOST_ENTRY_KEY_SIZE, - NULL, (my_hash_free_key) my_free, &my_charset_bin))) + NULL, my_free, &my_charset_bin))) return 1; hostname_cache->clear(); diff --git a/sql/item.cc b/sql/item.cc index 271469ee672..fc91d2bfceb 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -165,20 +165,24 @@ longlong Item::val_time_packed_result(THD *thd) String *Item::val_str_ascii(String *str) { DBUG_ASSERT(str != &str_value); - - uint errors; - String *res= val_str(&str_value); + + if (!(collation.collation->state & MY_CS_NONASCII)) + return val_str(str); + + /* + We cannot use str_value as a buffer here, + because val_str() can use it. Let's have a local buffer. + */ + StringBuffer tmp; + String *res= val_str(&tmp); + if (!res) return 0; - - if (!(res->charset()->state & MY_CS_NONASCII)) - str= res; - else - { - if ((null_value= str->copy(res->ptr(), res->length(), collation.collation, - &my_charset_latin1, &errors))) - return 0; - } + + uint errors; + if ((null_value= str->copy(res->ptr(), res->length(), collation.collation, + &my_charset_latin1, &errors))) + return 0; return str; } @@ -10437,7 +10441,7 @@ String *Item_cache_int::val_str(String *str) { if (!has_value()) return NULL; - str->set_int(value, unsigned_flag, default_charset()); + str->set_int(value, unsigned_flag, &my_charset_numeric); return str; } @@ -10678,7 +10682,7 @@ String* Item_cache_double::val_str(String *str) { if (!has_value()) return NULL; - str->set_real(value, decimals, default_charset()); + str->set_real(value, decimals, &my_charset_numeric); return str; } diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index eb43b18b91d..b4790402840 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1985,23 +1985,16 @@ bool Item_func_opt_neg::eq(const Item *item, bool binary_cmp) const } -bool Item_func_interval::fix_fields(THD *thd, Item **ref) +bool Item_func_interval::fix_length_and_dec(THD *thd) { - if (Item_long_func::fix_fields(thd, ref)) - return true; - for (uint i= 0 ; i < row->cols(); i++) + uint rows= row->cols(); + + for (uint i= 0 ; i < rows; i++) { if (row->element_index(i)->check_cols(1)) return true; } - return false; -} - -bool Item_func_interval::fix_length_and_dec(THD *thd) -{ - uint rows= row->cols(); - use_decimal_comparison= ((row->element_index(0)->result_type() == DECIMAL_RESULT) || (row->element_index(0)->result_type() == @@ -2018,10 +2011,9 @@ bool Item_func_interval::fix_length_and_dec(THD *thd) if (not_null_consts) { - intervals= (interval_range*) current_thd->alloc(sizeof(interval_range) * - (rows - 1)); + intervals= (interval_range*) thd->alloc(sizeof *intervals * (rows - 1)); if (!intervals) - return TRUE; + return true; if (use_decimal_comparison) { @@ -2061,7 +2053,7 @@ bool Item_func_interval::fix_length_and_dec(THD *thd) used_tables_and_const_cache_join(row); not_null_tables_cache= row->not_null_tables(); with_flags|= row->with_flags; - return FALSE; + return false; } @@ -3730,10 +3722,10 @@ static inline int cmp_ulongs (ulonglong a_val, ulonglong b_val) 0 left argument is equal to the right argument. 1 left argument is greater than the right argument. */ -int cmp_longlong(void *cmp_arg, - in_longlong::packed_longlong *a, - in_longlong::packed_longlong *b) +int cmp_longlong(void *, const void *a_, const void *b_) { + auto a= static_cast(a_); + auto b= static_cast(b_); if (a->unsigned_flag != b->unsigned_flag) { /* @@ -3755,19 +3747,26 @@ int cmp_longlong(void *cmp_arg, return cmp_longs(a->val, b->val); } -static int cmp_double(void *cmp_arg, double *a,double *b) +static int cmp_double(void *, const void *a_, const void *b_) { + const double *a= static_cast(a_); + const double *b= static_cast(b_); return *a < *b ? -1 : *a == *b ? 0 : 1; } -static int cmp_row(void *cmp_arg, cmp_item_row *a, cmp_item_row *b) +static int cmp_row(void *, const void *a_, const void *b_) { + const cmp_item_row *a= static_cast(a_); + const cmp_item_row *b= static_cast(b_); return a->compare(b); } -static int cmp_decimal(void *cmp_arg, my_decimal *a, my_decimal *b) +static int cmp_decimal(void *, const void *a_, const void *b_) { + my_decimal *a= const_cast(static_cast(a_)); + my_decimal *b= const_cast(static_cast(b_)); + /* We need call of fixing buffer pointer, because fast sort just copy decimal buffers in memory and pointers left pointing on old buffer place @@ -3790,17 +3789,19 @@ bool in_vector::find(Item *item) { uint mid=(start+end+1)/2; int res; - if ((res=(*compare)(collation, base+mid*size, result)) == 0) + if ((res= (*compare)(const_cast(collation), + base + mid * size, result)) == 0) return true; if (res < 0) start=mid; else end=mid-1; } - return ((*compare)(collation, base+start*size, result) == 0); + return ((*compare)(const_cast(collation), + base + start * size, result) == 0); } -in_string::in_string(THD *thd, uint elements, qsort2_cmp cmp_func, +in_string::in_string(THD *thd, uint elements, qsort_cmp2 cmp_func, CHARSET_INFO *cs) :in_vector(thd, elements, sizeof(String), cmp_func, cs), tmp(buff, sizeof(buff), &my_charset_bin) @@ -3855,7 +3856,7 @@ in_row::in_row(THD *thd, uint elements, Item * item) { base= (char*) new (thd->mem_root) cmp_item_row[count= elements]; size= sizeof(cmp_item_row); - compare= (qsort2_cmp) cmp_row; + compare= cmp_row; /* We need to reset these as otherwise we will call sort() with uninitialized (even if not used) elements @@ -3887,8 +3888,7 @@ bool in_row::set(uint pos, Item *item) } in_longlong::in_longlong(THD *thd, uint elements) - :in_vector(thd, elements, sizeof(packed_longlong), - (qsort2_cmp) cmp_longlong, 0) + : in_vector(thd, elements, sizeof(packed_longlong), cmp_longlong, 0) {} bool in_longlong::set(uint pos, Item *item) @@ -3919,16 +3919,16 @@ Item *in_longlong::create_item(THD *thd) } -static int cmp_timestamp(void *cmp_arg, - Timestamp_or_zero_datetime *a, - Timestamp_or_zero_datetime *b) +static int cmp_timestamp(void *, const void *a_, const void *b_) { + auto a= static_cast(a_); + auto b= static_cast(b_); return a->cmp(*b); } in_timestamp::in_timestamp(THD *thd, uint elements) - :in_vector(thd, elements, sizeof(Value), (qsort2_cmp) cmp_timestamp, 0) + :in_vector(thd, elements, sizeof(Value), cmp_timestamp, 0) {} @@ -4012,7 +4012,7 @@ Item *in_temporal::create_item(THD *thd) in_double::in_double(THD *thd, uint elements) - :in_vector(thd, elements, sizeof(double), (qsort2_cmp) cmp_double, 0) + :in_vector(thd, elements, sizeof(double), cmp_double, 0) {} bool in_double::set(uint pos, Item *item) @@ -4036,7 +4036,7 @@ Item *in_double::create_item(THD *thd) in_decimal::in_decimal(THD *thd, uint elements) - :in_vector(thd, elements, sizeof(my_decimal), (qsort2_cmp) cmp_decimal, 0) + :in_vector(thd, elements, sizeof(my_decimal), cmp_decimal, 0) {} @@ -4282,9 +4282,9 @@ int cmp_item_row::cmp(Item *arg) } -int cmp_item_row::compare(cmp_item *c) +int cmp_item_row::compare(const cmp_item *c) const { - cmp_item_row *l_cmp= (cmp_item_row *) c; + auto l_cmp= static_cast(c); for (uint i=0; i < n; i++) { int res; @@ -4320,9 +4320,9 @@ int cmp_item_decimal::cmp(Item *arg) } -int cmp_item_decimal::compare(cmp_item *arg) +int cmp_item_decimal::compare(const cmp_item *arg) const { - cmp_item_decimal *l_cmp= (cmp_item_decimal*) arg; + auto l_cmp= static_cast(arg); return my_decimal_cmp(&value, &l_cmp->value); } @@ -4363,9 +4363,9 @@ int cmp_item_time::cmp(Item *arg) } -int cmp_item_temporal::compare(cmp_item *ci) +int cmp_item_temporal::compare(const cmp_item *ci) const { - cmp_item_temporal *l_cmp= (cmp_item_temporal *)ci; + auto l_cmp= static_cast(ci); return (value < l_cmp->value) ? -1 : ((value == l_cmp->value) ? 0 : 1); } @@ -4411,9 +4411,9 @@ int cmp_item_timestamp::cmp(Item *arg) } -int cmp_item_timestamp::compare(cmp_item *arg) +int cmp_item_timestamp::compare(const cmp_item *arg) const { - cmp_item_timestamp *tmp= static_cast(arg); + auto tmp= static_cast(arg); return type_handler_timestamp2.cmp_native(m_native, tmp->m_native); } @@ -6216,7 +6216,7 @@ bool Regexp_processor_pcre::compile(String *pattern, bool send_error) (PCRE2_UCHAR8 *)buff, sizeof(buff)); if (lmsg >= 0) my_snprintf(buff+lmsg, sizeof(buff)-lmsg, - " at offset %d", pcreErrorOffset); + " at offset %zu", pcreErrorOffset); my_error(ER_REGEXP_ERROR, MYF(0), buff); } return true; diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index d7fa3710ca1..29588730d4c 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -837,6 +837,7 @@ public: enum Functype functype() const override { return EQUAL_FUNC; } enum Functype rev_functype() const override { return EQUAL_FUNC; } cond_result eq_cmp_result() const override { return COND_TRUE; } + bool is_null() override { return false; } LEX_CSTRING func_name_cstring() const override { static LEX_CSTRING name= {STRING_WITH_LEN("<=>") }; @@ -1122,7 +1123,6 @@ public: Item_func_interval(THD *thd, Item_row *a): Item_long_func(thd, a), row(a), intervals(0) { } - bool fix_fields(THD *, Item **) override; longlong val_int() override; bool fix_length_and_dec(THD *thd) override; LEX_CSTRING func_name_cstring() const override @@ -1487,12 +1487,12 @@ class in_vector :public Sql_alloc public: char *base; uint size; - qsort2_cmp compare; + qsort_cmp2 compare; CHARSET_INFO *collation; uint count; uint used_count; in_vector() = default; - in_vector(THD *thd, uint elements, uint element_length, qsort2_cmp cmp_func, + in_vector(THD *thd, uint elements, uint element_length, qsort_cmp2 cmp_func, CHARSET_INFO *cmp_coll) :base((char*) thd_calloc(thd, elements * element_length)), size(element_length), compare(cmp_func), collation(cmp_coll), @@ -1536,7 +1536,8 @@ public: /* Compare values number pos1 and pos2 for equality */ bool compare_elems(uint pos1, uint pos2) { - return MY_TEST(compare(collation, base + pos1 * size, base + pos2 * size)); + return MY_TEST(compare(const_cast(collation), + base + pos1 * size, base + pos2 * size)); } virtual const Type_handler *type_handler() const= 0; }; @@ -1558,7 +1559,7 @@ class in_string :public in_vector } }; public: - in_string(THD *thd, uint elements, qsort2_cmp cmp_func, CHARSET_INFO *cs); + in_string(THD *thd, uint elements, qsort_cmp2 cmp_func, CHARSET_INFO *cs); ~in_string(); bool set(uint pos, Item *item) override; uchar *get_value(Item *item) override; @@ -1600,7 +1601,7 @@ public: const Type_handler *type_handler() const override { return &type_handler_slonglong; } - friend int cmp_longlong(void *cmp_arg, packed_longlong *a,packed_longlong *b); + friend int cmp_longlong(void *cmp_arg, const void *a, const void *b); }; @@ -1635,7 +1636,7 @@ public: Item_datetime *dt= static_cast(item); dt->set_from_packed(val->val, type_handler()->mysql_timestamp_type()); } - friend int cmp_longlong(void *cmp_arg, packed_longlong *a,packed_longlong *b); + friend int cmp_longlong(void *cmp_arg, const void *a, const void *b); }; @@ -1719,7 +1720,7 @@ public: virtual int cmp(Item *item)= 0; virtual int cmp_not_null(const Value *value)= 0; // for optimized IN with row - virtual int compare(cmp_item *item)= 0; + virtual int compare(const cmp_item *item) const= 0; virtual cmp_item *make_same(THD *thd)= 0; /* Store a scalar or a ROW value into "this". @@ -1797,7 +1798,7 @@ public: else return TRUE; } - int compare(cmp_item *ci) override + int compare(const cmp_item *ci) const override { cmp_item_string *l_cmp= (cmp_item_string *) ci; return sortcmp(value_res, l_cmp->value_res, cmp_charset); @@ -1831,7 +1832,7 @@ public: const bool rc= value != arg->val_int(); return (m_null_value || arg->null_value) ? UNKNOWN : rc; } - int compare(cmp_item *ci) override + int compare(const cmp_item *ci) const override { cmp_item_int *l_cmp= (cmp_item_int *)ci; return (value < l_cmp->value) ? -1 : ((value == l_cmp->value) ? 0 : 1); @@ -1848,7 +1849,7 @@ protected: longlong value; public: cmp_item_temporal() = default; - int compare(cmp_item *ci) override; + int compare(const cmp_item *ci) const override; }; @@ -1894,7 +1895,7 @@ public: void store_value(Item *item) override; int cmp_not_null(const Value *val) override; int cmp(Item *arg) override; - int compare(cmp_item *ci) override; + int compare(const cmp_item *ci) const override; cmp_item *make_same(THD *thd) override; }; @@ -1920,7 +1921,7 @@ public: const bool rc= value != arg->val_real(); return (m_null_value || arg->null_value) ? UNKNOWN : rc; } - int compare(cmp_item *ci) override + int compare(const cmp_item *ci) const override { cmp_item_real *l_cmp= (cmp_item_real *) ci; return (value < l_cmp->value)? -1 : ((value == l_cmp->value) ? 0 : 1); @@ -1937,7 +1938,7 @@ public: void store_value(Item *item) override; int cmp(Item *arg) override; int cmp_not_null(const Value *val) override; - int compare(cmp_item *c) override; + int compare(const cmp_item *c) const override; cmp_item *make_same(THD *thd) override; }; @@ -1970,7 +1971,7 @@ public: DBUG_ASSERT(false); return TRUE; } - int compare(cmp_item *ci) override + int compare(const cmp_item *ci) const override { cmp_item_string *l_cmp= (cmp_item_string *) ci; return sortcmp(value_res, l_cmp->value_res, cmp_charset); @@ -2708,7 +2709,7 @@ public: DBUG_ASSERT(false); return TRUE; } - int compare(cmp_item *arg) override; + int compare(const cmp_item *arg) const override; cmp_item *make_same(THD *thd) override; bool store_value_by_template(THD *thd, cmp_item *tmpl, Item *) override; friend class Item_func_in; diff --git a/sql/item_create.cc b/sql/item_create.cc index 0dbbce6348a..bef9d24b512 100644 --- a/sql/item_create.cc +++ b/sql/item_create.cc @@ -39,13 +39,12 @@ #include -extern "C" uchar* -get_native_fct_hash_key(const uchar *buff, size_t *length, - my_bool /* unused */) +extern "C" const uchar *get_native_fct_hash_key(const void *buff, + size_t *length, my_bool) { - Native_func_registry *func= (Native_func_registry*) buff; + auto func= static_cast(buff); *length= func->name.length; - return (uchar*) func->name.str; + return reinterpret_cast(func->name.str); } @@ -6506,9 +6505,8 @@ bool Native_functions_hash::init(size_t count) { DBUG_ENTER("Native_functions_hash::init"); - if (my_hash_init(key_memory_native_functions, this, - system_charset_info, (ulong) count, 0, 0, (my_hash_get_key) - get_native_fct_hash_key, NULL, MYF(0))) + if (my_hash_init(key_memory_native_functions, this, system_charset_info, + (ulong) count, 0, 0, get_native_fct_hash_key, NULL, MYF(0))) DBUG_RETURN(true); DBUG_RETURN(false); diff --git a/sql/item_func.cc b/sql/item_func.cc index 87192aa1f61..153375b07ab 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -2996,13 +2996,15 @@ String *Item_func_min_max::val_str_native(String *str) res=args[i]->val_str(str); else { - String *res2; - res2= args[i]->val_str(res == str ? &tmp_value : str); + String *res2= args[i]->val_str(&tmp_value); if (res2) { int cmp= sortcmp(res,res2,collation.collation); if ((cmp_sign < 0 ? cmp : -cmp) < 0) - res=res2; + { + str->copy(*res2); + res= str; + } } } if ((null_value= args[i]->null_value)) @@ -3054,6 +3056,27 @@ longlong Item_func_min_max::val_int_native() } +longlong Item_func_min_max::val_uint_native() +{ + DBUG_ASSERT(fixed()); + ulonglong value= 0; + for (uint i=0; i < arg_count ; i++) + { + if (i == 0) + value= (ulonglong) args[i]->val_int(); + else + { + ulonglong tmp= (ulonglong) args[i]->val_int(); + if (!args[i]->null_value && (tmp < value ? cmp_sign : -cmp_sign) > 0) + value= tmp; + } + if ((null_value= args[i]->null_value)) + return 0; + } + return (longlong) value; +} + + my_decimal *Item_func_min_max::val_decimal_native(my_decimal *dec) { DBUG_ASSERT(fixed()); @@ -4121,13 +4144,12 @@ public: /** Extract a hash key from User_level_lock. */ -uchar *ull_get_key(const uchar *ptr, size_t *length, - my_bool not_used __attribute__((unused))) +const uchar *ull_get_key(const void *ptr, size_t *length, my_bool) { User_level_lock *ull = (User_level_lock*) ptr; MDL_key *key = ull->lock->get_key(); *length= key->length(); - return (uchar*) key->ptr(); + return key->ptr(); } diff --git a/sql/item_func.h b/sql/item_func.h index e892969e8eb..098ac4a461d 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -2366,6 +2366,7 @@ public: String *val_str_native(String *str); double val_real_native(); longlong val_int_native(); + longlong val_uint_native(); my_decimal *val_decimal_native(my_decimal *); bool get_date_native(THD *thd, MYSQL_TIME *res, date_mode_t fuzzydate); bool get_time_native(THD *thd, MYSQL_TIME *res); diff --git a/sql/item_geofunc.cc b/sql/item_geofunc.cc index 722d96640d0..a22691d1252 100644 --- a/sql/item_geofunc.cc +++ b/sql/item_geofunc.cc @@ -182,8 +182,8 @@ String *Item_func_geometry_from_json::val_str(String *str) if (code) { THD *thd= current_thd; - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, code, - ER_THD(thd, code)); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, code, + ER_THD(thd, code)); } return 0; } @@ -449,7 +449,7 @@ String *Item_func_boundary::val_str(String *str_value) Transporter trn(&res_receiver); Geometry *g= Geometry::construct(&buffer, swkb->ptr(), swkb->length()); - if (!g) + if ((null_value= !g)) DBUG_RETURN(0); if (g->store_shapes(&trn)) diff --git a/sql/item_jsonfunc.cc b/sql/item_jsonfunc.cc index 236e7f45cab..02f7ed6fb56 100644 --- a/sql/item_jsonfunc.cc +++ b/sql/item_jsonfunc.cc @@ -5115,7 +5115,7 @@ static bool create_hash(json_engine_t *value, HASH *items, bool &hash_inited, { int level= value->stack_p; if (my_hash_init(PSI_INSTRUMENT_ME, items, value->s.cs, 0, 0, 0, - (my_hash_get_key) get_key_name, NULL, 0)) + get_key_name, NULL, 0)) return true; hash_inited= true; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 7311838c19a..a8f33ad7594 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -71,14 +71,21 @@ size_t username_char_length= USERNAME_CHAR_LENGTH; Calculate max length of string from length argument to LEFT and RIGHT */ -static uint32 max_length_for_string(Item *item) +static uint32 max_length_for_string(Item *item, bool *neg) { + *neg= false; ulonglong length= item->val_int(); - /* Note that if value is NULL, val_int() returned 0 */ + if (item->null_value) + return 0; + if (length > (ulonglong) LONGLONG_MAX && !item->unsigned_flag) + { + *neg= true; + return 0; // Negative + } if (length > (ulonglong) INT_MAX32) { /* Limit string length to maxium string length in MariaDB (2G) */ - length= item->unsigned_flag ? (ulonglong) INT_MAX32 : 0; + length= (ulonglong) INT_MAX32; } return (uint32) length; } @@ -840,7 +847,7 @@ String *Item_func_concat_operator_oracle::val_str(String *str) goto null; if (res != str) - str->copy(res->ptr(), res->length(), res->charset()); + str->copy_or_move(res->ptr(), res->length(), res->charset()); for (i++ ; i < arg_count ; i++) { @@ -1956,8 +1963,12 @@ bool Item_func_insert::fix_length_and_dec(THD *thd) // Handle character set for args[0] and args[3]. if (agg_arg_charsets_for_string_result(collation, args, 2, 3)) return TRUE; - char_length= ((ulonglong) args[0]->max_char_length() + - (ulonglong) args[3]->max_char_length()); + if (collation.collation == &my_charset_bin) + char_length= (ulonglong) args[0]->max_length + + (ulonglong) args[3]->max_length; + else + char_length= ((ulonglong) args[0]->max_char_length() + + (ulonglong) args[3]->max_char_length()); fix_char_length_ulonglong(char_length); return FALSE; } @@ -2054,7 +2065,8 @@ void Item_str_func::left_right_max_length() uint32 char_length= args[0]->max_char_length(); if (args[1]->can_eval_in_optimize()) { - uint32 length= max_length_for_string(args[1]); + bool neg; + uint32 length= max_length_for_string(args[1], &neg); set_if_smaller(char_length, length); } fix_char_length(char_length); @@ -2173,11 +2185,11 @@ bool Item_func_substr::fix_length_and_dec(THD *thd) } if (arg_count == 3 && args[2]->const_item()) { - int32 length= (int32) args[2]->val_int(); - if (args[2]->null_value || length <= 0) + longlong length= args[2]->val_int(); + if (args[2]->null_value || (length <= 0 && !args[2]->unsigned_flag)) max_length=0; /* purecov: inspected */ - else - set_if_smaller(max_length,(uint) length); + else if (length < UINT32_MAX) + set_if_smaller(max_length, (uint32) length); } max_length*= collation.collation->mbmaxlen; return FALSE; @@ -3468,7 +3480,8 @@ bool Item_func_repeat::fix_length_and_dec(THD *thd) DBUG_ASSERT(collation.collation != NULL); if (args[1]->can_eval_in_optimize()) { - uint32 length= max_length_for_string(args[1]); + bool neg; + uint32 length= max_length_for_string(args[1], &neg); ulonglong char_length= (ulonglong) args[0]->max_char_length() * length; fix_char_length_ulonglong(char_length); return false; @@ -3542,7 +3555,8 @@ bool Item_func_space::fix_length_and_dec(THD *thd) collation.set(default_charset(), DERIVATION_COERCIBLE, MY_REPERTOIRE_ASCII); if (args[0]->can_eval_in_optimize()) { - fix_char_length_ulonglong(max_length_for_string(args[0])); + bool neg; + fix_char_length_ulonglong(max_length_for_string(args[0], &neg)); return false; } max_length= MAX_BLOB_WIDTH; @@ -3668,7 +3682,10 @@ bool Item_func_pad::fix_length_and_dec(THD *thd) DBUG_ASSERT(collation.collation->mbmaxlen > 0); if (args[1]->can_eval_in_optimize()) { - fix_char_length_ulonglong(max_length_for_string(args[1])); + bool neg; + fix_char_length_ulonglong(max_length_for_string(args[1], &neg)); + if (neg) + set_maybe_null(); return false; } max_length= MAX_BLOB_WIDTH; @@ -3900,7 +3917,7 @@ String *Item_func_conv::val_str(String *str) { DBUG_ASSERT(fixed()); String *res= args[0]->val_str(str); - char *endptr,ans[65],*ptr; + char *endptr,ans[66],*ptr; longlong dec; int from_base= (int) args[1]->val_int(); int to_base= (int) args[2]->val_int(); @@ -4679,9 +4696,9 @@ longlong Item_func_uncompressed_length::val_int() if (res->length() <= 4) { THD *thd= current_thd; - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_ZLIB_Z_DATA_ERROR, - ER_THD(thd, ER_ZLIB_Z_DATA_ERROR)); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + ER_ZLIB_Z_DATA_ERROR, + ER_THD(thd, ER_ZLIB_Z_DATA_ERROR)); null_value= 1; return 0; } @@ -4816,7 +4833,7 @@ String *Item_func_uncompress::val_str(String *str) if (res->length() <= 4) { THD *thd= current_thd; - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_ZLIB_Z_DATA_ERROR, ER_THD(thd, ER_ZLIB_Z_DATA_ERROR)); goto err; diff --git a/sql/item_strfunc.h b/sql/item_strfunc.h index 1dbca6652b3..25f01b0f369 100644 --- a/sql/item_strfunc.h +++ b/sql/item_strfunc.h @@ -1649,7 +1649,7 @@ public: bool fix_length_and_dec(THD *thd) override { collation.set(default_charset()); - fix_char_length(64); + fix_char_length(65); set_maybe_null(); return FALSE; } diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index cc6739a74ee..520bc73a37e 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -5078,12 +5078,18 @@ subselect_hash_sj_engine::get_strategy_using_data() void subselect_hash_sj_engine::choose_partial_match_strategy( - bool has_non_null_key, bool has_covering_null_row, + uint field_count, bool has_non_null_key, bool has_covering_null_row, MY_BITMAP *partial_match_key_parts_arg) { ulonglong pm_buff_size; DBUG_ASSERT(strategy == PARTIAL_MATCH); + if (field_count == 1) + { + strategy= SINGLE_COLUMN_MATCH; + return; + } + /* Choose according to global optimizer switch. If only one of the switches is 'ON', then the remaining strategy is the only possible one. The only cases @@ -5519,7 +5525,8 @@ void subselect_hash_sj_engine::cleanup() related engines are created and chosen for each execution. */ item->get_IN_subquery()->engine= materialize_engine; - if (lookup_engine_type == TABLE_SCAN_ENGINE || + if (lookup_engine_type == SINGLE_COLUMN_ENGINE || + lookup_engine_type == TABLE_SCAN_ENGINE || lookup_engine_type == ROWID_MERGE_ENGINE) { subselect_engine *inner_lookup_engine; @@ -5843,8 +5850,9 @@ int subselect_hash_sj_engine::exec() item_in->null_value= 1; item_in->make_const(); item_in->set_first_execution(); - thd->lex->current_select= save_select; - DBUG_RETURN(FALSE); + res= 0; + strategy= CONST_RETURN_NULL; + goto err; } if (has_covering_null_row) @@ -5858,11 +5866,26 @@ int subselect_hash_sj_engine::exec() count_pm_keys= count_partial_match_columns - count_null_only_columns + (nn_key_parts ? 1 : 0); - choose_partial_match_strategy(MY_TEST(nn_key_parts), + choose_partial_match_strategy(field_count, MY_TEST(nn_key_parts), has_covering_null_row, &partial_match_key_parts); - DBUG_ASSERT(strategy == PARTIAL_MATCH_MERGE || + DBUG_ASSERT(strategy == SINGLE_COLUMN_MATCH || + strategy == PARTIAL_MATCH_MERGE || strategy == PARTIAL_MATCH_SCAN); + if (strategy == SINGLE_COLUMN_MATCH) + { + if (!(pm_engine= new subselect_single_column_match_engine(thd, + (subselect_uniquesubquery_engine*) lookup_engine, tmp_table, + item, result, semi_join_conds->argument_list(), + has_covering_null_row, has_covering_null_columns, + count_columns_with_nulls)) || + pm_engine->prepare(thd)) + { + /* This is an irrecoverable error. */ + res= 1; + goto err; + } + } if (strategy == PARTIAL_MATCH_MERGE) { pm_engine= @@ -5892,7 +5915,6 @@ int subselect_hash_sj_engine::exec() strategy= PARTIAL_MATCH_SCAN; } } - if (strategy == PARTIAL_MATCH_SCAN) { if (!(pm_engine= @@ -5914,12 +5936,12 @@ int subselect_hash_sj_engine::exec() } } - item_in->get_materialization_tracker()->report_exec_strategy(strategy); if (pm_engine) lookup_engine= pm_engine; item_in->change_engine(lookup_engine); err: + item_in->get_materialization_tracker()->report_exec_strategy(strategy); thd->lex->current_select= save_select; DBUG_RETURN(res); } @@ -6125,7 +6147,7 @@ bool Ordered_key::alloc_keys_buffers() */ int -Ordered_key::cmp_keys_by_row_data(ha_rows a, ha_rows b) +Ordered_key::cmp_keys_by_row_data(const ha_rows a, const ha_rows b) const { uchar *rowid_a, *rowid_b; int error; @@ -6167,10 +6189,12 @@ Ordered_key::cmp_keys_by_row_data(ha_rows a, ha_rows b) } -int -Ordered_key::cmp_keys_by_row_data_and_rownum(Ordered_key *key, - rownum_t* a, rownum_t* b) +int Ordered_key::cmp_keys_by_row_data_and_rownum(void *key_, const void *a_, + const void *b_) { + Ordered_key *key= static_cast(key_); + const rownum_t *a= static_cast(a_); + const rownum_t *b= static_cast(b_); /* The result of comparing the two keys according to their row data. */ int cmp_row_res= key->cmp_keys_by_row_data(*a, *b); if (cmp_row_res) @@ -6184,7 +6208,7 @@ bool Ordered_key::sort_keys() if (tbl->file->ha_rnd_init_with_error(0)) return TRUE; my_qsort2(key_buff, (size_t) key_buff_elements, sizeof(rownum_t), - (qsort2_cmp) &cmp_keys_by_row_data_and_rownum, (void*) this); + &cmp_keys_by_row_data_and_rownum, (void *) this); /* Invalidate the current row position. */ cur_key_idx= HA_POS_ERROR; tbl->file->ha_rnd_end(); @@ -6200,7 +6224,7 @@ bool Ordered_key::sort_keys() @retval 0 if only NULLs */ -double Ordered_key::null_selectivity() +inline double Ordered_key::null_selectivity() const { /* We should not be processing empty tables. */ DBUG_ASSERT(tbl->file->stats.records); @@ -6332,7 +6356,7 @@ bool Ordered_key::next_same() } -void Ordered_key::print(String *str) +void Ordered_key::print(String *str) const { uint i; @@ -6704,10 +6728,11 @@ void subselect_rowid_merge_engine::cleanup() @retval -1 if k1 is more selective than k2 */ -int -subselect_rowid_merge_engine::cmp_keys_by_null_selectivity(Ordered_key **k1, - Ordered_key **k2) +int subselect_rowid_merge_engine::cmp_keys_by_null_selectivity(const void *k1_, + const void *k2_) { + auto k1= static_cast(k1_); + auto k2= static_cast(k2_); double k1_sel= (*k1)->null_selectivity(); double k2_sel= (*k2)->null_selectivity(); if (k1_sel < k2_sel) @@ -6721,12 +6746,14 @@ subselect_rowid_merge_engine::cmp_keys_by_null_selectivity(Ordered_key **k1, /* */ -int -subselect_rowid_merge_engine::cmp_keys_by_cur_rownum(void *arg, - uchar *k1, uchar *k2) +int subselect_rowid_merge_engine::cmp_keys_by_cur_rownum(void *, + const void *k1_, + const void *k2_) { - rownum_t r1= ((Ordered_key*) k1)->current(); - rownum_t r2= ((Ordered_key*) k2)->current(); + auto k1= static_cast(k1_); + auto k2= static_cast(k2_); + rownum_t r1= k1->current(); + rownum_t r2= k2->current(); return (r1 < r2) ? -1 : (r1 > r2) ? 1 : 0; } @@ -7101,6 +7128,45 @@ void subselect_table_scan_engine::cleanup() } +subselect_single_column_match_engine::subselect_single_column_match_engine( + THD *thd, + subselect_uniquesubquery_engine *engine_arg, + TABLE *tmp_table_arg, + Item_subselect *item_arg, + select_result_interceptor *result_arg, + List *equi_join_conds_arg, + bool has_covering_null_row_arg, + bool has_covering_null_columns_arg, + uint count_columns_with_nulls_arg) + :subselect_partial_match_engine(thd, engine_arg, tmp_table_arg, item_arg, + result_arg, equi_join_conds_arg, + has_covering_null_row_arg, + has_covering_null_columns_arg, + count_columns_with_nulls_arg) +{} + + +bool subselect_single_column_match_engine::partial_match() +{ + /* + We get here if: + - there is only one column in the materialized table; + - its current value of left_expr is NULL (otherwise we would have hit + the earlier "index lookup" branch at subselect_partial_match::exec()); + - the materialized table does not have NULL values (for a similar reason); + - the materialized table is not empty. + The case when materialization produced no rows (empty table) is handled at + subselect_hash_sj_engine::exec(), the result of IN predicate is always + FALSE in that case. + After all those preconditions met, the result of the partial match is TRUE. + */ + DBUG_ASSERT(item->get_IN_subquery()->left_expr_has_null() && + !has_covering_null_row && + tmp_table->file->stats.records > 0); + return true; +} + + void Item_subselect::register_as_with_rec_ref(With_element *with_elem) { with_elem->sq_with_rec_ref.link_in_list(this, &this->next_with_rec_ref); diff --git a/sql/item_subselect.h b/sql/item_subselect.h index 5bac04e2d15..bf7e484303b 100644 --- a/sql/item_subselect.h +++ b/sql/item_subselect.h @@ -829,7 +829,8 @@ public: enum enum_engine_type {ABSTRACT_ENGINE, SINGLE_SELECT_ENGINE, UNION_ENGINE, UNIQUESUBQUERY_ENGINE, INDEXSUBQUERY_ENGINE, HASH_SJ_ENGINE, - ROWID_MERGE_ENGINE, TABLE_SCAN_ENGINE}; + ROWID_MERGE_ENGINE, TABLE_SCAN_ENGINE, + SINGLE_COLUMN_ENGINE}; subselect_engine(Item_subselect *si, select_result_interceptor *res): @@ -1172,6 +1173,9 @@ public: PARTIAL_MATCH, /* Use some partial matching strategy. */ PARTIAL_MATCH_MERGE, /* Use partial matching through index merging. */ PARTIAL_MATCH_SCAN, /* Use partial matching through table scan. */ + SINGLE_COLUMN_MATCH, /* Use simplified matching when there is only + one field involved. */ + CONST_RETURN_NULL, /* The result of IN predicate is constant NULL */ IMPOSSIBLE /* Subquery materialization is not applicable. */ }; @@ -1192,7 +1196,8 @@ protected: ulonglong rowid_merge_buff_size(bool has_non_null_key, bool has_covering_null_row, MY_BITMAP *partial_match_key_parts); - void choose_partial_match_strategy(bool has_non_null_key, + void choose_partial_match_strategy(uint field_count, + bool has_non_null_key, bool has_covering_null_row, MY_BITMAP *partial_match_key_parts); bool make_semi_join_conds(); @@ -1288,9 +1293,9 @@ protected: Quick sort comparison function that compares two rows of the same table indentfied with their row numbers. */ - int cmp_keys_by_row_data(rownum_t a, rownum_t b); - static int cmp_keys_by_row_data_and_rownum(Ordered_key *key, - rownum_t* a, rownum_t* b); + int cmp_keys_by_row_data(const rownum_t a, const rownum_t b) const; + static int cmp_keys_by_row_data_and_rownum(void *key, const void *a, + const void *b); int cmp_key_with_search_key(rownum_t row_num); @@ -1306,23 +1311,23 @@ public: /* Initialize a single-column index. */ bool init(int col_idx); - uint get_column_count() { return key_column_count; } - uint get_keyid() { return keyid; } - Field *get_field(uint i) + uint get_column_count() const { return key_column_count; } + uint get_keyid() const { return keyid; } + Field *get_field(uint i) const { DBUG_ASSERT(i < key_column_count); return key_columns[i]->field; } - rownum_t get_min_null_row() { return min_null_row; } - rownum_t get_max_null_row() { return max_null_row; } + rownum_t get_min_null_row() const { return min_null_row; } + rownum_t get_max_null_row() const { return max_null_row; } MY_BITMAP * get_null_key() { return &null_key; } - ha_rows get_null_count() { return null_count; } - ha_rows get_key_buff_elements() { return key_buff_elements; } + ha_rows get_null_count() const { return null_count; } + ha_rows get_key_buff_elements() const { return key_buff_elements; } /* Get the search key element that corresponds to the i-th key part of this index. */ - Item *get_search_key(uint i) + Item *get_search_key(uint i) const { return search_key->element_index(key_columns[i]->field->field_index); } @@ -1335,7 +1340,7 @@ public: } bool sort_keys(); - double null_selectivity(); + inline double null_selectivity() const; /* Position the current element at the first row that matches the key. @@ -1363,7 +1368,7 @@ public: return FALSE; }; /* Return the current index element. */ - rownum_t current() + rownum_t current() const { DBUG_ASSERT(key_buff_elements && cur_key_idx < key_buff_elements); return key_buff[cur_key_idx]; @@ -1373,7 +1378,7 @@ public: { bitmap_set_bit(&null_key, (uint)row_num); } - bool is_null(rownum_t row_num) + bool is_null(rownum_t row_num) const { /* Indexes consisting of only NULLs do not have a bitmap buffer at all. @@ -1389,7 +1394,7 @@ public: return FALSE; return bitmap_is_set(&null_key, (uint)row_num); } - void print(String *str); + void print(String *str) const; }; @@ -1508,12 +1513,12 @@ protected: Comparison function to compare keys in order of decreasing bitmap selectivity. */ - static int cmp_keys_by_null_selectivity(Ordered_key **k1, Ordered_key **k2); + static int cmp_keys_by_null_selectivity(const void *k1, const void *k2); /* Comparison function used by the priority queue pq, the 'smaller' key is the one with the smaller current row number. */ - static int cmp_keys_by_cur_rownum(void *arg, uchar *k1, uchar *k2); + static int cmp_keys_by_cur_rownum(void *, const void *k1, const void *k2); bool test_null_row(rownum_t row_num); bool exists_complementing_null_row(MY_BITMAP *keys_to_complement); @@ -1559,6 +1564,37 @@ public: enum_engine_type engine_type() override { return TABLE_SCAN_ENGINE; } }; + +/* + An engine to handle NULL-aware Materialization for subqueries + that compare one column: + + col1 IN (SELECT t2.col2 FROM t2 ...) + + When only one column is used, we need to handle NULL values of + col1 and col2 but don't need to perform "partial" matches when only + a subset of compared columns is NULL. + This allows to save on some data structures. +*/ + +class subselect_single_column_match_engine: + public subselect_partial_match_engine +{ +protected: + bool partial_match() override; +public: + subselect_single_column_match_engine(THD *thd, + subselect_uniquesubquery_engine *engine_arg, + TABLE *tmp_table_arg, Item_subselect *item_arg, + select_result_interceptor *result_arg, + List *equi_join_conds_arg, + bool has_covering_null_row_arg, + bool has_covering_null_columns_arg, + uint count_columns_with_nulls_arg); + void cleanup() override {} + enum_engine_type engine_type() override { return SINGLE_COLUMN_ENGINE; } +}; + /** @brief Subquery materialization tracker @@ -1641,6 +1677,10 @@ private: return "index_lookup;array merge for partial match"; case Strategy::PARTIAL_MATCH_SCAN: return "index_lookup;full scan for partial match"; + case Strategy::SINGLE_COLUMN_MATCH: + return "null-aware index_lookup"; + case Strategy::CONST_RETURN_NULL: + return "return NULL"; default: return "unsupported"; } diff --git a/sql/item_sum.cc b/sql/item_sum.cc index 5d26a52e2dd..21f5eb607b4 100644 --- a/sql/item_sum.cc +++ b/sql/item_sum.cc @@ -681,10 +681,11 @@ bool Item_sum::check_vcol_func_processor(void *arg) @retval > 0 if key1 > key2 */ -int simple_str_key_cmp(void* arg, uchar* key1, uchar* key2) +int simple_str_key_cmp(void *arg, const void *key1, const void *key2) { - Field *f= (Field*) arg; - return f->cmp(key1, key2); + Field *f= static_cast(arg); + return f->cmp(static_cast(key1), + static_cast(key2)); } @@ -714,9 +715,12 @@ C_MODE_END @retval >0 if key1 > key2 */ -int Aggregator_distinct::composite_key_cmp(void* arg, uchar* key1, uchar* key2) +int Aggregator_distinct::composite_key_cmp(void *arg, const void *key1_, + const void *key2_) { - Aggregator_distinct *aggr= (Aggregator_distinct *) arg; + const uchar *key1= static_cast(key1_); + const uchar *key2= static_cast(key2_); + Aggregator_distinct *aggr= static_cast(arg); Field **field = aggr->table->field; Field **field_end= field + aggr->table->s->fields; uint32 *lengths=aggr->field_lengths; @@ -733,7 +737,6 @@ int Aggregator_distinct::composite_key_cmp(void* arg, uchar* key1, uchar* key2) return 0; } - /***************************************************************************/ C_MODE_START @@ -742,7 +745,7 @@ C_MODE_START int simple_raw_key_cmp(void* arg, const void* key1, const void* key2) { - return memcmp(key1, key2, *(uint *) arg); + return memcmp(key1, key2, *(static_cast(arg))); } @@ -855,7 +858,7 @@ bool Aggregator_distinct::setup(THD *thd) if (all_binary) { cmp_arg= (void*) &tree_key_length; - compare_key= (qsort_cmp2) simple_raw_key_cmp; + compare_key= simple_raw_key_cmp; } else { @@ -867,14 +870,14 @@ bool Aggregator_distinct::setup(THD *thd) compare method that can take advantage of not having to worry about other fields. */ - compare_key= (qsort_cmp2) simple_str_key_cmp; + compare_key= simple_str_key_cmp; cmp_arg= (void*) table->field[0]; /* tree_key_length has been set already */ } else { uint32 *length; - compare_key= (qsort_cmp2) composite_key_cmp; + compare_key= composite_key_cmp; cmp_arg= (void*) this; field_lengths= (uint32*) thd->alloc(table->s->fields * sizeof(uint32)); for (tree_key_length= 0, length= field_lengths, field= table->field; @@ -3579,11 +3582,10 @@ String *Item_sum_udf_str::val_str(String *str) @retval 1 : key1 > key2 */ -extern "C" -int group_concat_key_cmp_with_distinct(void* arg, const void* key1, - const void* key2) +extern "C" int group_concat_key_cmp_with_distinct(void *arg, const void *key1, + const void *key2) { - Item_func_group_concat *item_func= (Item_func_group_concat*)arg; + auto item_func= static_cast(arg); for (uint i= 0; i < item_func->arg_count_field; i++) { @@ -3622,11 +3624,11 @@ int group_concat_key_cmp_with_distinct(void* arg, const void* key1, Used for JSON_ARRAYAGG function */ -int group_concat_key_cmp_with_distinct_with_nulls(void* arg, - const void* key1_arg, - const void* key2_arg) +int group_concat_key_cmp_with_distinct_with_nulls(void *arg, + const void *key1_arg, + const void *key2_arg) { - Item_func_group_concat *item_func= (Item_func_group_concat*)arg; + auto item_func= static_cast(arg); uchar *key1= (uchar*)key1_arg + item_func->table->s->null_bytes; uchar *key2= (uchar*)key2_arg + item_func->table->s->null_bytes; @@ -3675,11 +3677,10 @@ int group_concat_key_cmp_with_distinct_with_nulls(void* arg, function of sort for syntax: GROUP_CONCAT(expr,... ORDER BY col,... ) */ -extern "C" -int group_concat_key_cmp_with_order(void* arg, const void* key1, - const void* key2) +extern "C" int group_concat_key_cmp_with_order(void *arg, const void *key1, + const void *key2) { - Item_func_group_concat* grp_item= (Item_func_group_concat*) arg; + auto grp_item= static_cast(arg); ORDER **order_item, **end; for (order_item= grp_item->order, end=order_item+ grp_item->arg_count_order; @@ -3735,10 +3736,11 @@ int group_concat_key_cmp_with_order(void* arg, const void* key1, Used for JSON_ARRAYAGG function */ -int group_concat_key_cmp_with_order_with_nulls(void *arg, const void *key1_arg, +int group_concat_key_cmp_with_order_with_nulls(void *arg, + const void *key1_arg, const void *key2_arg) { - Item_func_group_concat* grp_item= (Item_func_group_concat*) arg; + auto grp_item= static_cast(arg); ORDER **order_item, **end; uchar *key1= (uchar*)key1_arg + grp_item->table->s->null_bytes; diff --git a/sql/item_sum.h b/sql/item_sum.h index 52cf926d09b..8724a2820ec 100644 --- a/sql/item_sum.h +++ b/sql/item_sum.h @@ -716,7 +716,7 @@ public: bool unique_walk_function(void *element); bool unique_walk_function_for_count(void *element); - static int composite_key_cmp(void* arg, uchar* key1, uchar* key2); + static int composite_key_cmp(void *arg, const void *key1, const void *key2); }; @@ -1935,12 +1935,12 @@ public: #endif /* HAVE_DLOPEN */ C_MODE_START -int group_concat_key_cmp_with_distinct(void* arg, const void* key1, - const void* key2); -int group_concat_key_cmp_with_distinct_with_nulls(void* arg, const void* key1, - const void* key2); -int group_concat_key_cmp_with_order(void* arg, const void* key1, - const void* key2); +int group_concat_key_cmp_with_distinct(void *arg, const void *key1, + const void *key2); +int group_concat_key_cmp_with_distinct_with_nulls(void *arg, const void *key1, + const void *key2); +int group_concat_key_cmp_with_order(void *arg, const void *key1, + const void *key2); int group_concat_key_cmp_with_order_with_nulls(void *arg, const void *key1, const void *key2); int dump_leaf_key(void* key_arg, @@ -2003,15 +2003,16 @@ protected: */ bool add(bool exclude_nulls); - friend int group_concat_key_cmp_with_distinct(void* arg, const void* key1, - const void* key2); - friend int group_concat_key_cmp_with_distinct_with_nulls(void* arg, - const void* key1, - const void* key2); - friend int group_concat_key_cmp_with_order(void* arg, const void* key1, - const void* key2); + friend int group_concat_key_cmp_with_distinct(void *arg, const void *key1, + const void *key2); + friend int group_concat_key_cmp_with_distinct_with_nulls(void *arg, + const void *key1, + const void *key2); + friend int group_concat_key_cmp_with_order(void *arg, const void *key1, + const void *key2); friend int group_concat_key_cmp_with_order_with_nulls(void *arg, - const void *key1, const void *key2); + const void *key1, + const void *key2); friend int dump_leaf_key(void* key_arg, element_count count __attribute__((unused)), void* item_arg); diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 4d3c0ac26a8..1200fa33382 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -3418,7 +3418,8 @@ Sql_mode_dependency Item_time_typecast::value_depends_on_sql_mode() const bool Item_date_typecast::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - date_mode_t tmp= (fuzzydate | sql_mode_for_dates(thd)) & ~TIME_TIME_ONLY; + date_mode_t tmp= (fuzzydate | sql_mode_for_dates(thd)) + & ~TIME_TIME_ONLY & ~TIME_INTERVAL_DAY; // Force truncation Date *d= new(ltime) Date(thd, args[0], Date::Options(date_conv_mode_t(tmp))); return (null_value= !d->is_valid_date()); @@ -3427,7 +3428,8 @@ bool Item_date_typecast::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzy bool Item_datetime_typecast::get_date(THD *thd, MYSQL_TIME *ltime, date_mode_t fuzzydate) { - date_mode_t tmp= (fuzzydate | sql_mode_for_dates(thd)) & ~TIME_TIME_ONLY; + date_mode_t tmp= (fuzzydate | sql_mode_for_dates(thd)) + & ~TIME_TIME_ONLY & ~TIME_INTERVAL_DAY; // Force rounding if the current sql_mode says so Datetime::Options opt(date_conv_mode_t(tmp), thd); Datetime *dt= new(ltime) Datetime(thd, args[0], opt, diff --git a/sql/json_schema.cc b/sql/json_schema.cc index 8f818abc9bb..c5dfdec409a 100644 --- a/sql/json_schema.cc +++ b/sql/json_schema.cc @@ -547,7 +547,7 @@ bool Json_schema_enum::handle_keyword(THD *thd, json_engine_t *je, int count= 0; if (my_hash_init(PSI_INSTRUMENT_ME, &this->enum_values, - je->s.cs, 1024, 0, 0, (my_hash_get_key) get_key_name, + je->s.cs, 1024, 0, 0, get_key_name, NULL, 0)) return true; @@ -1326,7 +1326,7 @@ bool Json_schema_unique_items::validate(const json_engine_t *je, return false; if (my_hash_init(PSI_INSTRUMENT_ME, &unique_items, curr_je.s.cs, - 1024, 0, 0, (my_hash_get_key) get_key_name, NULL, 0)) + 1024, 0, 0, get_key_name, NULL, 0)) return true; while(json_scan_next(&curr_je)==0 && level <= curr_je.stack_p) @@ -1538,7 +1538,7 @@ bool Json_schema_required::validate(const json_engine_t *je, return false; if(my_hash_init(PSI_INSTRUMENT_ME, &required, - curr_je.s.cs, 1024, 0, 0, (my_hash_get_key) get_key_name, + curr_je.s.cs, 1024, 0, 0, get_key_name, NULL, 0)) return true; while (json_scan_next(&curr_je)== 0 && curr_je.stack_p >= curr_level) @@ -1636,7 +1636,7 @@ bool Json_schema_dependent_required::validate(const json_engine_t *je, return false; if (my_hash_init(PSI_INSTRUMENT_ME, &properties, - curr_je.s.cs, 1024, 0, 0, (my_hash_get_key) get_key_name, + curr_je.s.cs, 1024, 0, 0, get_key_name, NULL, 0)) return true; @@ -2096,7 +2096,7 @@ bool Json_schema_properties::handle_keyword(THD *thd, json_engine_t *je, if (my_hash_init(PSI_INSTRUMENT_ME, &this->properties, je->s.cs, 1024, 0, 0, - (my_hash_get_key) get_key_name_for_property, + get_key_name_for_property, NULL, 0)) return true; is_hash_inited= true; @@ -2496,7 +2496,7 @@ bool Json_schema_dependent_schemas::handle_keyword(THD *thd, json_engine_t *je, if (my_hash_init(PSI_INSTRUMENT_ME, &this->properties, je->s.cs, 1024, 0, 0, - (my_hash_get_key) get_key_name_for_property, + get_key_name_for_property, NULL, 0)) return true; is_hash_inited= true; @@ -2829,23 +2829,22 @@ bool create_object_and_handle_keyword(THD *thd, json_engine_t *je, return je->s.error ? true : false; } -uchar* get_key_name_for_property(const char *key_name, size_t *length, - my_bool /* unused */) +const uchar *get_key_name_for_property(const void *key_name, size_t *length, + my_bool) { - st_property * curr_property= (st_property*)(key_name); - + auto curr_property= static_cast(key_name); *length= strlen(curr_property->key_name); - return (uchar*) curr_property->key_name; + return reinterpret_cast(curr_property->key_name); } -uchar* get_key_name_for_func(const char *key_name, size_t *length, - my_bool /* unused */) +const uchar *get_key_name_for_func(const void *key_name, size_t *length, + my_bool) { - st_json_schema_keyword_map * curr_keyword= - (st_json_schema_keyword_map*)(key_name); + auto curr_keyword= + static_cast(key_name); *length= curr_keyword->func_name.length; - return (uchar*)curr_keyword->func_name.str; + return reinterpret_cast(curr_keyword->func_name.str); } bool setup_json_schema_keyword_hash() @@ -2853,7 +2852,7 @@ bool setup_json_schema_keyword_hash() if (my_hash_init(PSI_INSTRUMENT_ME, &all_keywords_hash, system_charset_info, 1024, 0, 0, - (my_hash_get_key) get_key_name_for_func, + get_key_name_for_func, NULL, 0)) return true; diff --git a/sql/json_schema.h b/sql/json_schema.h index 96e5f8f6719..28cd8b411b9 100644 --- a/sql/json_schema.h +++ b/sql/json_schema.h @@ -793,10 +793,10 @@ class Json_schema_reference : public Json_schema_keyword bool create_object_and_handle_keyword(THD *thd, json_engine_t *je, List *keyword_list, List *all_keywords); -uchar* get_key_name_for_property(const char *key_name, size_t *length, - my_bool /* unused */); -uchar* get_key_name_for_func(const char *key_name, size_t *length, - my_bool /* unused */); +const uchar *get_key_name_for_property(const void *key_name, size_t *length, + my_bool); +const uchar *get_key_name_for_func(const void *key_name, size_t *length, + my_bool); enum keyword_flag { diff --git a/sql/json_schema_helper.cc b/sql/json_schema_helper.cc index 1b75b15699b..5d4d04ae6c1 100644 --- a/sql/json_schema_helper.cc +++ b/sql/json_schema_helper.cc @@ -51,11 +51,11 @@ bool json_assign_type(uint *curr_type, json_engine_t *je) return false; } -uchar* get_key_name(const char *key_name, size_t *length, - my_bool /* unused */) +const uchar *get_key_name(const void *key_name_, size_t *length, my_bool) { + auto key_name= static_cast(key_name_); *length= strlen(key_name); - return (uchar*) key_name; + return reinterpret_cast(key_name); } void json_get_normalized_string(json_engine_t *je, String *res, diff --git a/sql/json_schema_helper.h b/sql/json_schema_helper.h index 4a596b0b40d..d2accc9352f 100644 --- a/sql/json_schema_helper.h +++ b/sql/json_schema_helper.h @@ -23,8 +23,7 @@ bool json_key_equals(const char* key, LEX_CSTRING val, int key_len); bool json_assign_type(uint *curr_type, json_engine_t *je); -uchar* get_key_name(const char *key_name, size_t *length, - my_bool /* unused */); +const uchar *get_key_name(const void *key_name, size_t *length, my_bool); void json_get_normalized_string(json_engine_t *je, String *res, int *error); #endif diff --git a/sql/key.cc b/sql/key.cc index 0f5ea18eca3..c5eba4c33bf 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -552,10 +552,10 @@ int key_cmp(KEY_PART_INFO *key_part, const uchar *key, uint key_length) @retval +1 first_rec is greater than second_rec */ -int key_rec_cmp(void *key_p, uchar *first_rec, uchar *second_rec) +int key_rec_cmp(const KEY *const *key, const uchar *first_rec, + const uchar *second_rec) { - KEY **key= (KEY**) key_p; - KEY *key_info= *(key++); // Start with first key + const KEY *key_info= *(key++); // Start with first key uint key_parts, key_part_num; KEY_PART_INFO *key_part= key_info->key_part; uchar *rec0= key_part->field->ptr - key_part->offset; @@ -646,10 +646,10 @@ next_loop: @retval +1 key1 > key2 */ -int key_tuple_cmp(KEY_PART_INFO *part, uchar *key1, uchar *key2, +int key_tuple_cmp(KEY_PART_INFO *part, const uchar *key1, const uchar *key2, uint tuple_length) { - uchar *key1_end= key1 + tuple_length; + const uchar *key1_end= key1 + tuple_length; int UNINIT_VAR(len); int res; for (;key1 < key1_end; key1 += len, key2 += len, part++) @@ -676,7 +676,6 @@ int key_tuple_cmp(KEY_PART_INFO *part, uchar *key1, uchar *key2, return 0; } - /** Get hash value for the key from a key buffer diff --git a/sql/key.h b/sql/key.h index 871373bfcd5..1af10168357 100644 --- a/sql/key.h +++ b/sql/key.h @@ -38,7 +38,9 @@ int key_cmp(KEY_PART_INFO *key_part, const uchar *key, uint key_length); ulong key_hashnr(KEY *key_info, uint used_key_parts, const uchar *key); bool key_buf_cmp(KEY *key_info, uint used_key_parts, const uchar *key1, const uchar *key2); -extern "C" int key_rec_cmp(void *key_info, uchar *a, uchar *b); -int key_tuple_cmp(KEY_PART_INFO *part, uchar *key1, uchar *key2, uint tuple_length); +extern "C" int key_rec_cmp(const KEY *const *key_info, const uchar *a, + const uchar *b); +int key_tuple_cmp(KEY_PART_INFO *part, const uchar *key1, const uchar *key2, + uint tuple_length); #endif /* KEY_INCLUDED */ diff --git a/sql/log.cc b/sql/log.cc index c949d959bd7..a91150d7d82 100644 --- a/sql/log.cc +++ b/sql/log.cc @@ -7325,8 +7325,8 @@ err: mysql_mutex_assert_not_owner(&LOCK_after_binlog_sync); mysql_mutex_assert_not_owner(&LOCK_commit_ordered); #ifdef HAVE_REPLICATION - if (repl_semisync_master.report_binlog_update( - thd, thd, log_file_name, file->pos_in_file)) + if (repl_semisync_master.report_binlog_update(thd, thd, + log_file_name, offset)) { sql_print_error("Failed to run 'after_flush' hooks"); error= 1; @@ -7353,13 +7353,14 @@ err: mysql_mutex_lock(&LOCK_after_binlog_sync); mysql_mutex_unlock(&LOCK_log); + DEBUG_SYNC(thd, "commit_after_release_LOCK_log"); + mysql_mutex_assert_not_owner(&LOCK_prepare_ordered); mysql_mutex_assert_not_owner(&LOCK_log); mysql_mutex_assert_owner(&LOCK_after_binlog_sync); mysql_mutex_assert_not_owner(&LOCK_commit_ordered); #ifdef HAVE_REPLICATION - if (repl_semisync_master.wait_after_sync(log_file_name, - file->pos_in_file)) + if (repl_semisync_master.wait_after_sync(log_file_name, offset)) { error=1; /* error is already printed inside hook */ @@ -12097,7 +12098,7 @@ int TC_LOG_BINLOG::recover(LOG_INFO *linfo, const char *last_log_name, Query_log_event *query_ev= (Query_log_event*) ev; if (query_ev->xid) { - DBUG_PRINT("QQ", ("xid: %llu xid")); + DBUG_PRINT("QQ", ("xid: %llu xid", query_ev->xid)); DBUG_ASSERT(sizeof(query_ev->xid) == sizeof(my_xid)); uchar *x= (uchar *) memdup_root(&mem_root, (uchar*) &query_ev->xid, diff --git a/sql/log_event_server.cc b/sql/log_event_server.cc index 84da9ab17aa..b645854ed6a 100644 --- a/sql/log_event_server.cc +++ b/sql/log_event_server.cc @@ -1847,10 +1847,7 @@ int Query_log_event::do_apply_event(rpl_group_info *rgi, { bool is_rb_alter= gtid_flags_extra & Gtid_log_event::FL_ROLLBACK_ALTER_E1; -#ifdef WITH_WSREP - if (!wsrep_thd_is_applying(thd)) -#endif - thd->set_time(when, when_sec_part); + thd->set_time(when, when_sec_part); thd->set_query_and_id((char*)query_arg, q_len_arg, thd->charset(), next_query_id()); thd->variables.pseudo_thread_id= thread_id; // for temp tables @@ -2468,10 +2465,10 @@ static void check_and_remove_stale_alter(Relay_log_info *rli) { DBUG_ASSERT(info->state == start_alter_state::REGISTERED); - sql_print_warning("ALTER query started at %u-%u-%llu could not " + sql_print_warning("ALTER query started at %u-%lu-%llu could not " "be completed because of unexpected master server " - "or its binlog change", info->sa_seq_no, // todo:gtid - 0, 0); + "or its binlog change", info->domain_id, + mi->master_id, info->sa_seq_no); info_iterator.remove(); mysql_mutex_lock(&mi->start_alter_lock); info->state= start_alter_state::ROLLBACK_ALTER; @@ -3672,7 +3669,7 @@ int Xid_apply_log_event::do_apply_event(rpl_group_info *rgi) #endif } - general_log_print(thd, COM_QUERY, get_query()); + general_log_print(thd, COM_QUERY, "%s", get_query()); thd->variables.option_bits&= ~OPTION_GTID_BEGIN; res= do_commit(); if (!res && rgi->gtid_pending) @@ -7399,7 +7396,7 @@ void issue_long_find_row_warning(Log_event_type type, "while looking up records to be processed. Consider adding a " "primary key (or unique key) to the table to improve " "performance.", - evt_type, table_name, (long) delta, scan_type); + evt_type, table_name, delta, scan_type); } } } diff --git a/sql/mdl.cc b/sql/mdl.cc index faccd1c9476..cffb0bd83e9 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -703,13 +703,12 @@ static MDL_map mdl_locks; extern "C" { -static uchar * -mdl_locks_key(const uchar *record, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *mdl_locks_key(const void *record, size_t *length, + my_bool) { - MDL_lock *lock=(MDL_lock*) record; + const MDL_lock *lock= static_cast(record); *length= lock->key.length(); - return (uchar*) lock->key.ptr(); + return lock->key.ptr(); } } /* extern "C" */ @@ -821,7 +820,7 @@ void MDL_map::init() mdl_locks_key, &my_charset_bin); m_locks.alloc.constructor= MDL_lock::lf_alloc_constructor; m_locks.alloc.destructor= MDL_lock::lf_alloc_destructor; - m_locks.initializer= (lf_hash_initializer) MDL_lock::lf_hash_initializer; + m_locks.initializer= MDL_lock::lf_hash_initializer; m_locks.hash_function= mdl_hash_function; } diff --git a/sql/multi_range_read.cc b/sql/multi_range_read.cc index a7e14811888..c8d19f6b9fa 100644 --- a/sql/multi_range_read.cc +++ b/sql/multi_range_read.cc @@ -835,9 +835,9 @@ int Mrr_ordered_index_reader::refill_buffer(bool initial) status_var_increment(thd->status_var.ha_mrr_key_refills_count); } - key_buffer->sort((key_buffer->type() == Lifo_buffer::FORWARD)? - (qsort2_cmp)Mrr_ordered_index_reader::compare_keys_reverse : - (qsort2_cmp)Mrr_ordered_index_reader::compare_keys, + key_buffer->sort((key_buffer->type() == Lifo_buffer::FORWARD) + ? Mrr_ordered_index_reader::compare_keys_reverse + : Mrr_ordered_index_reader::compare_keys, this); DBUG_RETURN(0); } @@ -869,9 +869,11 @@ int Mrr_ordered_index_reader::init(handler *h_arg, RANGE_SEQ_IF *seq_funcs, } -static int rowid_cmp_reverse(void *file, uchar *a, uchar *b) +static int rowid_cmp_reverse(void *file, const void *a, const void *b) { - return - ((handler*)file)->cmp_ref(a, b); + return -(static_cast(file)) + ->cmp_ref(static_cast(a), + static_cast(b)); } @@ -1007,7 +1009,7 @@ int Mrr_ordered_rndpos_reader::refill_from_index_reader() if (!index_reader_needs_refill) index_reader->interrupt_read(); /* Sort the buffer contents by rowid */ - rowid_buffer->sort((qsort2_cmp)rowid_cmp_reverse, (void*)file); + rowid_buffer->sort(rowid_cmp_reverse, (void*)file); rowid_buffer->setup_reading(file->ref_length, is_mrr_assoc ? sizeof(range_id_t) : 0); @@ -1476,14 +1478,16 @@ void DsMrr_impl::dsmrr_close() my_qsort2-compatible static member function to compare key tuples */ -int Mrr_ordered_index_reader::compare_keys(void* arg, uchar* key1_arg, - uchar* key2_arg) +int Mrr_ordered_index_reader::compare_keys(void *arg, const void *key1_arg_, + const void *key2_arg_) { - Mrr_ordered_index_reader *reader= (Mrr_ordered_index_reader*)arg; + auto key1_arg= static_cast(key1_arg_); + auto key2_arg= static_cast(key2_arg_); + auto reader= static_cast(arg); TABLE *table= reader->file->get_table(); KEY_PART_INFO *part= table->key_info[reader->file->active_index].key_part; - uchar *key1, *key2; - + const uchar *key1, *key2; + if (reader->keypar.use_key_pointers) { /* the buffer stores pointers to keys, get to the keys */ @@ -1500,8 +1504,8 @@ int Mrr_ordered_index_reader::compare_keys(void* arg, uchar* key1_arg, } -int Mrr_ordered_index_reader::compare_keys_reverse(void* arg, uchar* key1, - uchar* key2) +int Mrr_ordered_index_reader::compare_keys_reverse(void *arg, const void *key1, + const void *key2) { return -compare_keys(arg, key1, key2); } diff --git a/sql/multi_range_read.h b/sql/multi_range_read.h index c66c0abcca0..afdaf00389f 100644 --- a/sql/multi_range_read.h +++ b/sql/multi_range_read.h @@ -346,9 +346,10 @@ private: */ bool read_was_interrupted; - static int compare_keys(void* arg, uchar* key1, uchar* key2); - static int compare_keys_reverse(void* arg, uchar* key1, uchar* key2); - + static int compare_keys(void *arg, const void *key1, const void *key2); + static int compare_keys_reverse(void *arg, const void *key1, + const void *key2); + friend class Key_value_records_iterator; friend class DsMrr_impl; friend class Mrr_ordered_rndpos_reader; diff --git a/sql/my_decimal.cc b/sql/my_decimal.cc index 54b038ccb2d..bb12efaae29 100644 --- a/sql/my_decimal.cc +++ b/sql/my_decimal.cc @@ -59,7 +59,7 @@ int decimal_operation_results(int result, const char *value, const char *type) value, type); break; case E_DEC_DIV_ZERO: - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, ER_DIVISION_BY_ZERO, ER_THD(thd, ER_DIVISION_BY_ZERO)); break; case E_DEC_BAD_NUM: diff --git a/sql/mysqld.cc b/sql/mysqld.cc index fd28a2232ff..2d7fe632778 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1419,6 +1419,7 @@ static int systemd_sock_activation; /* systemd socket activation */ static int termination_event_fd= -1; + C_MODE_START #ifdef WITH_PERFSCHEMA_STORAGE_ENGINE /** @@ -3217,17 +3218,7 @@ static void start_signal_handler(void) DBUG_VOID_RETURN; } -/** Called only from signal_hand function. */ -static void* exit_signal_handler() -{ - my_thread_end(); - signal_thread_in_use= 0; - pthread_exit(0); // Safety - return nullptr; // Avoid compiler warnings -} - - -/** This threads handles all signals and alarms. */ +/** This thread handles all signals. */ /* ARGSUSED */ pthread_handler_t signal_hand(void *) { @@ -3328,8 +3319,7 @@ pthread_handler_t signal_hand(void *) DBUG_PRINT("quit", ("signal_handler: calling my_thread_end()")); my_thread_end(); signal_thread_in_use= 0; - pthread_exit(0); // Safety - return exit_signal_handler(); + return nullptr; } static void check_data_home(const char *path) @@ -4840,7 +4830,7 @@ init_gtid_pos_auto_engines(void) #define us_to_ms(X) if (X > 0) X/= 1000; -static int adjust_optimizer_costs(void *, OPTIMIZER_COSTS *oc, void *) +static int adjust_optimizer_costs(const LEX_CSTRING *, OPTIMIZER_COSTS *oc, TABLE *) { us_to_ms(oc->disk_read_cost); us_to_ms(oc->index_block_copy_cost); @@ -5223,7 +5213,7 @@ static int init_server_components() if (ddl_log_initialize()) unireg_abort(1); - process_optimizer_costs((process_optimizer_costs_t)adjust_optimizer_costs, 0); + process_optimizer_costs(adjust_optimizer_costs, 0); us_to_ms(global_system_variables.optimizer_where_cost); us_to_ms(global_system_variables.optimizer_scan_setup_cost); } @@ -6361,7 +6351,6 @@ void handle_connections_sockets() for (int fd : termination_fds) (void)fcntl(fd, F_SETFD, FD_CLOEXEC); #endif - mysql_mutex_lock(&LOCK_start_thread); termination_event_fd= termination_fds[1]; mysql_mutex_unlock(&LOCK_start_thread); @@ -7384,6 +7373,21 @@ static int show_binlog_space_total(THD *thd, SHOW_VAR *var, char *buff, } +static int show_stack_usage(THD *thd, SHOW_VAR *var, void *buff, + system_status_var *, enum_var_type scope) +{ + var->type= SHOW_ULONGLONG; + var->value= buff; + // We cannot get stack usage for 'global' or for another thread + if (scope == OPT_GLOBAL || thd != current_thd) + *(ulonglong*) buff= 0; + else + *(ulonglong*) buff= (ulonglong) (available_stack_size((char*) thd->thread_stack, + my_get_stack_pointer(0))); + return 0; +} + + #ifndef DBUG_OFF static int debug_status_func(THD *thd, SHOW_VAR *var, void *buff, system_status_var *, enum_var_type) @@ -7661,6 +7665,7 @@ SHOW_VAR status_vars[]= { {"Ssl_version", (char*) &show_ssl_get_version, SHOW_SIMPLE_FUNC}, #endif #endif /* HAVE_OPENSSL */ + SHOW_FUNC_ENTRY("stack_usage", &show_stack_usage), {"Syncs", (char*) &my_sync_count, SHOW_LONG_NOFLUSH}, /* Expression cache used only for caching subqueries now, so its statistic @@ -7743,10 +7748,10 @@ static void print_version(void) } /** Compares two options' names, treats - and _ the same */ -static int option_cmp(my_option *a, my_option *b) +static int option_cmp(const void *a, const void *b) { - const char *sa= a->name; - const char *sb= b->name; + const char *sa= static_cast(a)->name; + const char *sb= static_cast(b)->name; for (; *sa || *sb; sa++, sb++) { if (*sa < *sb) diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 43ff358bbbc..44236fc9fee 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -1753,11 +1753,13 @@ QUICK_ROR_UNION_SELECT::QUICK_ROR_UNION_SELECT(THD *thd_param, C_MODE_START -static int QUICK_ROR_UNION_SELECT_queue_cmp(void *arg, uchar *val1, uchar *val2) +static int QUICK_ROR_UNION_SELECT_queue_cmp(void *arg, const void *val1_, + const void *val2_) { - QUICK_ROR_UNION_SELECT *self= (QUICK_ROR_UNION_SELECT*)arg; - return self->head->file->cmp_ref(((QUICK_SELECT_I*)val1)->last_rowid, - ((QUICK_SELECT_I*)val2)->last_rowid); + auto self= static_cast(arg); + auto val1= static_cast(val1_); + auto val2= static_cast(val2_); + return self->head->file->cmp_ref(val1->last_rowid, val2->last_rowid); } C_MODE_END @@ -3449,13 +3451,13 @@ double records_in_column_ranges(PARAM *param, uint idx, use histograms for columns b and c */ -static -int cmp_quick_ranges(TABLE::OPT_RANGE **a, TABLE::OPT_RANGE **b) +static int cmp_quick_ranges(const void *a_, const void *b_) { - int tmp=CMP_NUM((*a)->rows, (*b)->rows); - if (tmp) + const auto a= *static_cast(a_); + const auto b= *static_cast(b_); + if (int tmp= CMP_NUM(a->rows, b->rows)) return tmp; - return -CMP_NUM((*a)->key_parts, (*b)->key_parts); + return -CMP_NUM(a->key_parts, b->key_parts); } @@ -3556,9 +3558,8 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond) if (table->opt_range_keys.is_set(keynr)) optimal_key_order[ranges++]= table->opt_range + keynr; - my_qsort(optimal_key_order, ranges, - sizeof(optimal_key_order[0]), - (qsort_cmp) cmp_quick_ranges); + my_qsort(optimal_key_order, ranges, sizeof *optimal_key_order, + cmp_quick_ranges); for (range_index= 0 ; range_index < ranges ; range_index++) { @@ -5927,8 +5928,10 @@ bool create_fields_bitmap(PARAM *param, MY_BITMAP *fields_bitmap) /* Compare two indexes scans for sort before search for the best intersection */ static -int cmp_intersect_index_scan(INDEX_SCAN_INFO **a, INDEX_SCAN_INFO **b) +int cmp_intersect_index_scan(const void *a_, const void *b_) { + auto a= static_cast(a_); + auto b= static_cast(b_); return CMP_NUM((*a)->records, (*b)->records); } @@ -6179,7 +6182,7 @@ bool prepare_search_best_index_intersect(PARAM *param, return TRUE; my_qsort(selected_index_scans, n_search_scans, sizeof(INDEX_SCAN_INFO *), - (qsort_cmp) cmp_intersect_index_scan); + cmp_intersect_index_scan); Json_writer_array selected_idx_scans(thd, "selected_index_scans"); if (cpk_scan) @@ -6930,8 +6933,10 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg) 1 a > b */ -static int cmp_ror_scan_info(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b) +static int cmp_ror_scan_info(const void *a_, const void *b_) { + auto a= static_cast(a_); + auto b= static_cast(b_); double val1= rows2double((*a)->records) * (*a)->key_rec_length; double val2= rows2double((*b)->records) * (*b)->key_rec_length; return (val1 < val2)? -1: (val1 == val2)? 0 : 1; @@ -6954,8 +6959,10 @@ static int cmp_ror_scan_info(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b) 1 a > b */ -static int cmp_ror_scan_info_covering(ROR_SCAN_INFO** a, ROR_SCAN_INFO** b) +static int cmp_ror_scan_info_covering(const void *a_, const void *b_) { + auto a= static_cast(a_); + auto b= static_cast(b_); if ((*a)->used_fields_covered > (*b)->used_fields_covered) return -1; if ((*a)->used_fields_covered < (*b)->used_fields_covered) @@ -7450,7 +7457,7 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree, Step 2: Get best ROR-intersection using an approximate algorithm. */ my_qsort(tree->ror_scans, tree->n_ror_scans, sizeof(ROR_SCAN_INFO*), - (qsort_cmp)cmp_ror_scan_info); + cmp_ror_scan_info); DBUG_EXECUTE("info",print_ror_scans_arr(param->table, "ordered", tree->ror_scans, tree->ror_scans_end);); @@ -7718,7 +7725,7 @@ TRP_ROR_INTERSECT *get_best_covering_ror_intersect(PARAM *param, } my_qsort(ror_scan_mark, ror_scans_end-ror_scan_mark, sizeof(ROR_SCAN_INFO*), - (qsort_cmp)cmp_ror_scan_info_covering); + cmp_ror_scan_info_covering); DBUG_EXECUTE("info", print_ror_scans_arr(param->table, "remaining scans", @@ -8268,7 +8275,7 @@ SEL_TREE *Item_func_in::get_func_mm_tree(RANGE_OPT_PARAM *param, if (!tree) break; i++; - } while (i < array->count && tree->type == SEL_TREE::IMPOSSIBLE); + } while (i < array->used_count && tree->type == SEL_TREE::IMPOSSIBLE); if (!tree || tree->type == SEL_TREE::IMPOSSIBLE) { diff --git a/sql/opt_split.cc b/sql/opt_split.cc index 9ae3eb47cf3..627572ac17a 100644 --- a/sql/opt_split.cc +++ b/sql/opt_split.cc @@ -673,8 +673,10 @@ add_ext_keyuse_for_splitting(Dynamic_array *ext_keyuses, static int -sort_ext_keyuse(KEYUSE_EXT *a, KEYUSE_EXT *b) +sort_ext_keyuse(const void *a_, const void *b_) { + const KEYUSE_EXT *a= static_cast(a_); + const KEYUSE_EXT *b= static_cast(b_); if (a->table->tablenr != b->table->tablenr) return (int) (a->table->tablenr - b->table->tablenr); if (a->key != b->key) diff --git a/sql/optimizer_costs.h b/sql/optimizer_costs.h index 3b2300b9019..cd4ac888019 100644 --- a/sql/optimizer_costs.h +++ b/sql/optimizer_costs.h @@ -153,7 +153,7 @@ struct TABLE; extern "C" { typedef int (*process_optimizer_costs_t) (const LEX_CSTRING *, - const OPTIMIZER_COSTS *, + OPTIMIZER_COSTS *, TABLE *); bool process_optimizer_costs(process_optimizer_costs_t func, TABLE *param); } diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 7d1fb2b48ab..f7fdaba5c7b 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -180,7 +180,7 @@ bool partition_info::add_named_partition(const char *part_name, size_t length) } DBUG_PRINT("info", ("Found partition %u is_subpart %d for name %.*s", part_def->part_id, part_def->is_subpart, - length, part_name)); + static_cast(length), part_name)); DBUG_RETURN(false); } @@ -672,11 +672,11 @@ partition_element *partition_info::get_part_elem(const char *partition_name, Helper function to find_duplicate_name. */ -static const char *get_part_name_from_elem(const char *name, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *get_part_name_from_elem(const void *name, size_t *length, + my_bool) { - *length= strlen(name); - return name; + *length= strlen(static_cast(name)); + return static_cast(name); } /* @@ -714,8 +714,8 @@ char *partition_info::find_duplicate_name() max_names= num_parts; if (is_sub_partitioned()) max_names+= num_parts * num_subparts; - if (my_hash_init(PSI_INSTRUMENT_ME, &partition_names, system_charset_info, max_names, 0, 0, - (my_hash_get_key) get_part_name_from_elem, 0, HASH_UNIQUE)) + if (my_hash_init(PSI_INSTRUMENT_ME, &partition_names, system_charset_info, + max_names, 0, 0, get_part_name_from_elem, 0, HASH_UNIQUE)) { DBUG_ASSERT(0); curr_name= (const uchar*) "Internal failure"; diff --git a/sql/records.cc b/sql/records.cc index eb4d5f4d958..bf80ef77997 100644 --- a/sql/records.cc +++ b/sql/records.cc @@ -43,7 +43,7 @@ template static int rr_unpack_from_buffer(READ_RECORD *info); int rr_from_pointers(READ_RECORD *info); static int rr_from_cache(READ_RECORD *info); static int init_rr_cache(THD *thd, READ_RECORD *info); -static int rr_cmp(uchar *a,uchar *b); +static int rr_cmp(const void *a, const void *b); static int rr_index_first(READ_RECORD *info); static int rr_index_last(READ_RECORD *info); static int rr_index(READ_RECORD *info); @@ -766,8 +766,10 @@ static int rr_from_cache(READ_RECORD *info) } /* rr_from_cache */ -static int rr_cmp(uchar *a,uchar *b) +static int rr_cmp(const void *a_, const void *b_) { + auto a= static_cast(a_); + auto b= static_cast(b_); if (a[0] != b[0]) return (int) a[0] - (int) b[0]; if (a[1] != b[1]) diff --git a/sql/rowid_filter.cc b/sql/rowid_filter.cc index d4fb958fb26..43198c6089e 100644 --- a/sql/rowid_filter.cc +++ b/sql/rowid_filter.cc @@ -191,12 +191,12 @@ Rowid_filter_container *Range_rowid_filter_cost_info::create_container() } -static -int compare_range_rowid_filter_cost_info_by_a( - Range_rowid_filter_cost_info **filter_ptr_1, - Range_rowid_filter_cost_info **filter_ptr_2) +static int compare_range_rowid_filter_cost_info_by_a(const void *p1_, + const void *p2_) { - double diff= (*filter_ptr_2)->get_gain() - (*filter_ptr_1)->get_gain(); + auto p1= static_cast(p1_); + auto p2= static_cast(p2_); + double diff= (*p2)->get_gain() - (*p1)->get_gain(); return (diff < 0 ? -1 : (diff > 0 ? 1 : 0)); } diff --git a/sql/rowid_filter.h b/sql/rowid_filter.h index 286d3b50b48..7a4858d0613 100644 --- a/sql/rowid_filter.h +++ b/sql/rowid_filter.h @@ -339,11 +339,9 @@ public: inline uint elements() const { return (uint) array.elements; } - void sort (int (*cmp) (void *ctxt, const void *el1, const void *el2), - void *cmp_arg) + void sort(qsort_cmp2 cmp, void *cmp_arg) { - my_qsort2(array.buffer, array.elements, - elem_size, (qsort2_cmp) cmp, cmp_arg); + my_qsort2(array.buffer, array.elements, elem_size, cmp, cmp_arg); } }; @@ -468,10 +466,10 @@ public: Rowid_filter_container *create_container(); - double get_setup_cost() { return cost_of_building_range_filter; } + double get_setup_cost() const { return cost_of_building_range_filter; } double get_lookup_cost(); - double get_gain() { return gain; } - uint get_key_no() { return key_no; } + double get_gain() const { return gain; } + uint get_key_no() const { return key_no; } void trace_info(THD *thd); diff --git a/sql/rpl_filter.cc b/sql/rpl_filter.cc index 76c5b8b3ca6..f98f30eab9f 100644 --- a/sql/rpl_filter.cc +++ b/sql/rpl_filter.cc @@ -652,16 +652,14 @@ Rpl_filter::set_ignore_db(const char* db_spec) } -extern "C" uchar *get_table_key(const uchar *, size_t *, my_bool); +extern "C" const uchar *get_table_key(const void *, size_t *, my_bool); extern "C" void free_table_ent(void* a); -uchar *get_table_key(const uchar* a, size_t *len, - my_bool __attribute__((unused))) +const uchar *get_table_key(const void *a, size_t *len, my_bool) { - TABLE_RULE_ENT *e= (TABLE_RULE_ENT *) a; - + auto e= static_cast(a); *len= e->key_len; - return (uchar*)e->db; + return reinterpret_cast(e->db); } diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index b9f012d3029..eca15ec19fa 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -3131,10 +3131,10 @@ gtid_waiting::destroy() static int -cmp_queue_elem(void *, uchar *a, uchar *b) +cmp_queue_elem(void *, const void *a, const void *b) { - uint64 seq_no_a= *(uint64 *)a; - uint64 seq_no_b= *(uint64 *)b; + auto seq_no_a= *(static_cast(a)); + auto seq_no_b= *(static_cast(b)); if (seq_no_a < seq_no_b) return -1; else if (seq_no_a == seq_no_b) diff --git a/sql/rpl_mi.cc b/sql/rpl_mi.cc index b7b018b8f63..8c81762dbcc 100644 --- a/sql/rpl_mi.cc +++ b/sql/rpl_mi.cc @@ -864,12 +864,12 @@ void end_master_info(Master_info* mi) } /* Multi-Master By P.Linux */ -uchar *get_key_master_info(Master_info *mi, size_t *length, - my_bool not_used __attribute__((unused))) +const uchar *get_key_master_info(const void *mi_, size_t *length, my_bool) { + auto mi= static_cast(mi_); /* Return lower case name */ *length= mi->cmp_connection_name.length; - return (uchar*) mi->cmp_connection_name.str; + return reinterpret_cast(mi->cmp_connection_name.str); } /* @@ -879,8 +879,9 @@ uchar *get_key_master_info(Master_info *mi, size_t *length, Stops associated slave threads and frees master_info */ -void free_key_master_info(Master_info *mi) +void free_key_master_info(void *mi_) { + Master_info *mi= static_cast(mi_); DBUG_ENTER("free_key_master_info"); mysql_mutex_unlock(&LOCK_active_mi); @@ -1117,10 +1118,9 @@ bool Master_info_index::init_all_master_info() } /* Initialize Master_info Hash Table */ - if (my_hash_init(PSI_INSTRUMENT_ME, &master_info_hash, system_charset_info, - MAX_REPLICATION_THREAD, 0, 0, - (my_hash_get_key) get_key_master_info, - (my_hash_free_key)free_key_master_info, HASH_UNIQUE)) + if (my_hash_init(PSI_INSTRUMENT_ME, &master_info_hash, system_charset_info, + MAX_REPLICATION_THREAD, 0, 0, get_key_master_info, + free_key_master_info, HASH_UNIQUE)) { sql_print_error("Initializing Master_info hash table failed"); DBUG_RETURN(1); diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc index d650412cd73..9e187403065 100644 --- a/sql/semisync_master.cc +++ b/sql/semisync_master.cc @@ -1468,7 +1468,7 @@ void Repl_semi_sync_master::await_all_slave_replies(const char *msg) if (msg && first) { first= false; - sql_print_information(msg); + sql_print_information("%s", msg); } wait_result= diff --git a/sql/session_tracker.cc b/sql/session_tracker.cc index a0b88f60f22..4c65230ad8d 100644 --- a/sql/session_tracker.cc +++ b/sql/session_tracker.cc @@ -566,13 +566,13 @@ void Session_sysvars_tracker::mark_as_changed(THD *thd, const sys_var *var) @return Pointer to the key buffer. */ -uchar *Session_sysvars_tracker::sysvars_get_key(const char *entry, - size_t *length, - my_bool not_used __attribute__((unused))) +const uchar *Session_sysvars_tracker::sysvars_get_key(const void *entry, + size_t *length, my_bool) { - auto key=&(((sysvar_node_st *) entry)->m_svar->offset); + ptrdiff_t *key= + &((static_cast(entry))->m_svar->offset); *length= sizeof(*key); - return (uchar *) key; + return reinterpret_cast(key); } diff --git a/sql/session_tracker.h b/sql/session_tracker.h index a02549262b8..3cfeab72dab 100644 --- a/sql/session_tracker.h +++ b/sql/session_tracker.h @@ -145,8 +145,9 @@ class Session_sysvars_tracker: public State_tracker void init() { my_hash_init(PSI_INSTRUMENT_ME, &m_registered_sysvars, &my_charset_bin, - 0, 0, 0, (my_hash_get_key) sysvars_get_key, my_free, - HASH_UNIQUE | (mysqld_server_initialized ? HASH_THREAD_SPECIFIC : 0)); + 0, 0, 0, sysvars_get_key, my_free, + HASH_UNIQUE | + (mysqld_server_initialized ? HASH_THREAD_SPECIFIC : 0)); } void free_hash() { @@ -209,8 +210,8 @@ public: void mark_as_changed(THD *thd, const sys_var *var); void deinit() { orig_list.deinit(); } /* callback */ - static uchar *sysvars_get_key(const char *entry, size_t *length, - my_bool not_used __attribute__((unused))); + static const uchar *sysvars_get_key(const void *entry, size_t *length, + my_bool); friend bool sysvartrack_global_update(THD *thd, char *str, size_t len); }; diff --git a/sql/set_var.cc b/sql/set_var.cc index 7f8f2490252..a925452f797 100644 --- a/sql/set_var.cc +++ b/sql/set_var.cc @@ -48,11 +48,12 @@ static ulonglong system_variable_hash_version= 0; Return variable name and length for hashing of variables. */ -static uchar *get_sys_var_length(const sys_var *var, size_t *length, - my_bool first) +static const uchar *get_sys_var_length(const void *var_, size_t *length, + my_bool) { + auto var= static_cast(var_); *length= var->name.length; - return (uchar*) var->name.str; + return reinterpret_cast(var->name.str); } sys_var_chain all_sys_vars = { NULL, NULL }; @@ -64,8 +65,9 @@ int sys_var_init() /* Must be already initialized. */ DBUG_ASSERT(system_charset_info != NULL); - if (my_hash_init(PSI_INSTRUMENT_ME, &system_variable_hash, system_charset_info, 700, 0, - 0, (my_hash_get_key) get_sys_var_length, 0, HASH_UNIQUE)) + if (my_hash_init(PSI_INSTRUMENT_ME, &system_variable_hash, + system_charset_info, 700, 0, 0, get_sys_var_length, 0, + HASH_UNIQUE)) goto error; if (mysql_add_sys_var_chain(all_sys_vars.first)) @@ -601,8 +603,10 @@ int mysql_del_sys_var_chain(sys_var *first) } -static int show_cmp(SHOW_VAR *a, SHOW_VAR *b) +static int show_cmp(const void *a_, const void *b_) { + const SHOW_VAR *a= static_cast(a_); + const SHOW_VAR *b= static_cast(b_); return strcmp(a->name, b->name); } diff --git a/sql/slave.cc b/sql/slave.cc index 4d9de12691e..5cd41ba7d15 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -3444,9 +3444,11 @@ static bool send_show_master_info_data(THD *thd, Master_info *mi, bool full, /* Used to sort connections by name */ -static int cmp_mi_by_name(const Master_info **arg1, - const Master_info **arg2) +static int cmp_mi_by_name(const void *arg1_, + const void *arg2_) { + auto arg1= static_cast(arg1_); + auto arg2= static_cast(arg2_); return my_strcasecmp(system_charset_info, (*arg1)->connection_name.str, (*arg2)->connection_name.str); } @@ -5157,8 +5159,16 @@ Stopping slave I/O thread due to out-of-memory error from master"); mi->semi_sync_reply_enabled && (mi->semi_ack & SEMI_SYNC_NEED_ACK)) { - DBUG_EXECUTE_IF("simulate_delay_semisync_slave_reply", - my_sleep(800000);); +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF("simulate_delay_semisync_slave_reply", { + const char act[]= "now " + "signal io_thd_at_slave_reply " + "wait_for io_thd_do_reply"; + DBUG_ASSERT(debug_sync_service); + DBUG_ASSERT( + !debug_sync_set_action(current_thd, STRING_WITH_LEN(act))); + };); +#endif if (repl_semisync_slave.slave_reply(mi)) { /* @@ -6903,7 +6913,7 @@ dbug_gtid_accept: "the last seen GTID is %u-%u-%llu", Log_event::get_type_str((Log_event_type) (uchar) buf[EVENT_TYPE_OFFSET]), - mi->last_queued_gtid); + PARAM_GTID(mi->last_queued_gtid)); goto err; } } @@ -7908,7 +7918,7 @@ end: #ifdef WITH_WSREP enum Log_event_type wsrep_peak_event(rpl_group_info *rgi, ulonglong* event_size) { - enum Log_event_type ev_type; + enum Log_event_type ev_type= UNKNOWN_EVENT; mysql_mutex_lock(&rgi->rli->data_lock); @@ -7919,6 +7929,11 @@ enum Log_event_type wsrep_peak_event(rpl_group_info *rgi, ulonglong* event_size) /* scan the log to read next event and we skip annotate events. */ do { + /* We've reached the end of log, return the last found event, if any. */ + if (future_pos >= rgi->rli->cur_log->end_of_file) + { + break; + } my_b_seek(rgi->rli->cur_log, future_pos); rgi->rli->event_relay_log_pos= future_pos; rgi->event_relay_log_pos= future_pos; diff --git a/sql/sp.cc b/sql/sp.cc index 242abe3d83e..f072a44ea19 100644 --- a/sql/sp.cc +++ b/sql/sp.cc @@ -2300,12 +2300,11 @@ Sp_handler::sp_exist_routines(THD *thd, TABLE_LIST *routines) const } -extern "C" uchar* sp_sroutine_key(const uchar *ptr, size_t *plen, - my_bool first) +extern "C" const uchar *sp_sroutine_key(const void *ptr, size_t *plen, my_bool) { - Sroutine_hash_entry *rn= (Sroutine_hash_entry *)ptr; + auto rn= static_cast(ptr); *plen= rn->mdl_request.key.length(); - return (uchar *)rn->mdl_request.key.ptr(); + return rn->mdl_request.key.ptr(); } diff --git a/sql/sp.h b/sql/sp.h index 8a90e2aab00..40cd40c39a0 100644 --- a/sql/sp.h +++ b/sql/sp.h @@ -691,8 +691,8 @@ void sp_update_stmt_used_routines(THD *thd, Query_tables_list *prelocking_ctx, SQL_I_List *src, TABLE_LIST *belong_to_view); -extern "C" uchar* sp_sroutine_key(const uchar *ptr, size_t *plen, - my_bool first); +extern "C" const uchar *sp_sroutine_key(const void *ptr, size_t *plen, + my_bool); /* Routines which allow open/lock and close mysql.proc table even when diff --git a/sql/sp_cache.cc b/sql/sp_cache.cc index 2a777c14d92..96e335d2eb4 100644 --- a/sql/sp_cache.cc +++ b/sql/sp_cache.cc @@ -270,16 +270,15 @@ sp_cache_enforce_limit(sp_cache *c, ulong upper_limit_for_elements) Internal functions *************************************************************************/ -extern "C" uchar *hash_get_key_for_sp_head(const uchar *ptr, size_t *plen, - my_bool first); +extern "C" const uchar *hash_get_key_for_sp_head(const void *ptr, size_t *plen, + my_bool); extern "C" void hash_free_sp_head(void *p); -uchar *hash_get_key_for_sp_head(const uchar *ptr, size_t *plen, - my_bool first) +const uchar *hash_get_key_for_sp_head(const void *ptr, size_t *plen, my_bool) { - sp_head *sp= (sp_head *)ptr; + auto sp= static_cast(ptr); *plen= sp->m_qname.length; - return (uchar*) sp->m_qname.str; + return reinterpret_cast(sp->m_qname.str); } diff --git a/sql/sp_head.cc b/sql/sp_head.cc index 8a6ff5718e3..aa51d212a8c 100644 --- a/sql/sp_head.cc +++ b/sql/sp_head.cc @@ -97,7 +97,7 @@ void init_sp_psi_keys() #define MYSQL_RUN_SP(SP, CODE) do { CODE; } while(0) #endif -extern "C" uchar *sp_table_key(const uchar *ptr, size_t *plen, my_bool first); +extern "C" const uchar *sp_table_key(const void *ptr, size_t *plen, my_bool); /** Helper function which operates on a THD object to set the query start_time to @@ -3477,11 +3477,11 @@ typedef struct st_sp_table } SP_TABLE; -uchar *sp_table_key(const uchar *ptr, size_t *plen, my_bool first) +const uchar *sp_table_key(const void *ptr, size_t *plen, my_bool) { - SP_TABLE *tab= (SP_TABLE *)ptr; + auto tab= static_cast(ptr); *plen= tab->qname.length; - return (uchar *)tab->qname.str; + return reinterpret_cast(tab->qname.str); } diff --git a/sql/sp_instr.cc b/sql/sp_instr.cc index 36931592f1d..6b70199f513 100644 --- a/sql/sp_instr.cc +++ b/sql/sp_instr.cc @@ -16,9 +16,10 @@ */ static const int SP_STMT_PRINT_MAXLEN= 40; -static int cmp_rqp_locations(Rewritable_query_parameter * const *a, - Rewritable_query_parameter * const *b) +static int cmp_rqp_locations(const void *a_, const void *b_) { + auto a= static_cast(a_); + auto b= static_cast(b_); return (int)((*a)->pos_in_query - (*b)->pos_in_query); } diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index aa61b299514..35b291c90ff 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -569,18 +569,20 @@ public: }; -static uchar* acl_entry_get_key(acl_entry *entry, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *acl_entry_get_key(const void *entry_, size_t *length, + my_bool) { + auto entry= static_cast(entry_); *length=(uint) entry->length; - return (uchar*) entry->key; + return reinterpret_cast(entry->key); } -static uchar* acl_role_get_key(ACL_ROLE *entry, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *acl_role_get_key(const void *entry_, size_t *length, + my_bool) { + auto entry= static_cast(entry_); *length=(uint) entry->user.length; - return (uchar*) entry->user.str; + return reinterpret_cast(entry->user.str); } struct ROLE_GRANT_PAIR : public Sql_alloc @@ -595,11 +597,12 @@ struct ROLE_GRANT_PAIR : public Sql_alloc const char *rolename, bool with_admin_option); }; -static uchar* acl_role_map_get_key(ROLE_GRANT_PAIR *entry, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *acl_role_map_get_key(const void *entry_, size_t *length, + my_bool) { + auto entry= static_cast(entry_); *length=(uint) entry->hashkey.length; - return (uchar*) entry->hashkey.str; + return reinterpret_cast(entry->hashkey.str); } bool ROLE_GRANT_PAIR::init(MEM_ROOT *mem, const char *username, @@ -688,7 +691,6 @@ bool ROLE_GRANT_PAIR::init(MEM_ROOT *mem, const char *username, static DYNAMIC_ARRAY acl_hosts, acl_users, acl_proxy_users; static Dynamic_array acl_dbs(PSI_INSTRUMENT_MEM, 0, 50); -typedef Dynamic_array::CMP_FUNC acl_dbs_cmp; static HASH acl_roles; /* An hash containing mappings user <--> role @@ -706,10 +708,10 @@ static DYNAMIC_ARRAY acl_wild_hosts; static Hash_filo *acl_cache; static uint grant_version=0; /* Version of priv tables. incremented by acl_load */ static privilege_t get_access(TABLE *form, uint fieldnr, uint *next_field=0); -static int acl_compare(const ACL_ACCESS *a, const ACL_ACCESS *b); -static int acl_user_compare(const ACL_USER *a, const ACL_USER *b); +static int acl_compare(const void *a, const void *b); +static int acl_user_compare(const void *a, const void *b); static void rebuild_acl_users(); -static int acl_db_compare(const ACL_DB *a, const ACL_DB *b); +static int acl_db_compare(const void *a, const void *b); static void rebuild_acl_dbs(); static void init_check_host(void); static void rebuild_check_host(void); @@ -726,9 +728,9 @@ static bool add_role_user_mapping(const char *uname, const char *hname, const ch static bool get_YN_as_bool(Field *field); #define ROLE_CYCLE_FOUND 2 -static int traverse_role_graph_up(ACL_ROLE *, void *, - int (*) (ACL_ROLE *, void *), - int (*) (ACL_ROLE *, ACL_ROLE *, void *)); +static int +traverse_role_graph_up(ACL_ROLE *, void *, int (*)(ACL_USER_BASE *, void *), + int (*)(ACL_USER_BASE *, ACL_ROLE *, void *)); static int traverse_role_graph_down(ACL_USER_BASE *, void *, int (*) (ACL_USER_BASE *, void *), @@ -2260,13 +2262,15 @@ error: } -static void free_acl_user(ACL_USER *user) +static void free_acl_user(void *user_) { + ACL_USER *user= static_cast(user_); delete_dynamic(&(user->role_grants)); } -static void free_acl_role(ACL_ROLE *role) +static void free_acl_role(void *role_) { + ACL_ROLE *role= static_cast(role_); delete_dynamic(&(role->role_grants)); delete_dynamic(&(role->parent_grantee)); } @@ -2542,10 +2546,9 @@ bool acl_init(bool dont_read_acl_tables) bool return_val; DBUG_ENTER("acl_init"); - acl_cache= new Hash_filo(key_memory_acl_cache, ACL_CACHE_SIZE, 0, 0, - (my_hash_get_key) acl_entry_get_key, - (my_hash_free_key) my_free, - &my_charset_utf8mb3_bin); + acl_cache= new Hash_filo(key_memory_acl_cache, ACL_CACHE_SIZE, 0, + 0, acl_entry_get_key, my_free, + &my_charset_utf8mb3_bin); /* cache built-in native authentication plugins, @@ -2915,7 +2918,7 @@ void acl_free(bool end) acl_public= NULL; free_root(&acl_memroot,MYF(0)); delete_dynamic(&acl_hosts); - delete_dynamic_with_callback(&acl_users, (FREE_FUNC) free_acl_user); + delete_dynamic_with_callback(&acl_users, free_acl_user); acl_dbs.free_memory(); delete_dynamic(&acl_wild_hosts); delete_dynamic(&acl_proxy_users); @@ -2998,12 +3001,11 @@ bool acl_reload(THD *thd) my_init_dynamic_array(key_memory_acl_mem, &acl_users, sizeof(ACL_USER), 50, 100, MYF(0)); acl_dbs.init(key_memory_acl_mem, 50, 100); my_init_dynamic_array(key_memory_acl_mem, &acl_proxy_users, sizeof(ACL_PROXY_USER), 50, 100, MYF(0)); - my_hash_init2(key_memory_acl_mem, &acl_roles,50, &my_charset_utf8mb3_bin, - 0, 0, 0, (my_hash_get_key) acl_role_get_key, 0, - (void (*)(void *))free_acl_role, 0); + my_hash_init2(key_memory_acl_mem, &acl_roles, 50, &my_charset_utf8mb3_bin, 0, + 0, 0, acl_role_get_key, 0, free_acl_role, 0); my_hash_init2(key_memory_acl_mem, &acl_roles_mappings, 50, - &my_charset_utf8mb3_bin, 0, 0, 0, (my_hash_get_key) - acl_role_map_get_key, 0, 0, 0); + &my_charset_utf8mb3_bin, 0, 0, 0, acl_role_map_get_key, 0, 0, + 0); old_mem= acl_memroot; delete_dynamic(&acl_wild_hosts); my_hash_free(&acl_check_hosts); @@ -3029,7 +3031,7 @@ bool acl_reload(THD *thd) my_hash_free(&old_acl_roles); free_root(&old_mem,MYF(0)); delete_dynamic(&old_acl_hosts); - delete_dynamic_with_callback(&old_acl_users, (FREE_FUNC) free_acl_user); + delete_dynamic_with_callback(&old_acl_users, free_acl_user); delete_dynamic(&old_acl_proxy_users); my_hash_free(&old_acl_roles_mappings); } @@ -3079,8 +3081,10 @@ static privilege_t get_access(TABLE *form, uint fieldnr, uint *next_field) } -static int acl_compare(const ACL_ACCESS *a, const ACL_ACCESS *b) +static int acl_compare(const void *a_, const void *b_) { + const ACL_ACCESS *a= static_cast(a_); + const ACL_ACCESS *b= static_cast(b_); if (a->sort > b->sort) return -1; if (a->sort < b->sort) @@ -3088,8 +3092,11 @@ static int acl_compare(const ACL_ACCESS *a, const ACL_ACCESS *b) return 0; } -static int acl_user_compare(const ACL_USER *a, const ACL_USER *b) +static int acl_user_compare(const void *a_, const void *b_) { + const ACL_USER *a= static_cast(a_); + const ACL_USER *b= static_cast(b_); + int res= strcmp(a->user.str, b->user.str); if (res) return res; @@ -3108,8 +3115,10 @@ static int acl_user_compare(const ACL_USER *a, const ACL_USER *b) return -strcmp(a->host.hostname, b->host.hostname); } -static int acl_db_compare(const ACL_DB *a, const ACL_DB *b) +static int acl_db_compare(const void *a_, const void *b_) { + const ACL_DB *a= static_cast(a_); + const ACL_DB *b= static_cast(b_); int res= strcmp(a->user, b->user); if (res) return res; @@ -3505,11 +3514,11 @@ int acl_setrole(THD *thd, const char *rolename, privilege_t access) return 0; } -static uchar* check_get_key(ACL_USER *buff, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *check_get_key(const void *buff_, size_t *length, my_bool) { + auto buff= static_cast(buff_); *length=buff->hostname_length; - return (uchar*) buff->host.hostname; + return reinterpret_cast(buff->host.hostname); } @@ -3854,9 +3863,9 @@ static void init_check_host(void) (void) my_init_dynamic_array(key_memory_acl_mem, &acl_wild_hosts, sizeof(struct acl_host_and_ip), acl_users.elements, 1, MYF(0)); - (void) my_hash_init(key_memory_acl_mem, &acl_check_hosts,system_charset_info, - acl_users.elements, 0, 0, - (my_hash_get_key) check_get_key, 0, 0); + (void) my_hash_init(key_memory_acl_mem, &acl_check_hosts, + system_charset_info, acl_users.elements, 0, 0, + check_get_key, 0, 0); if (!allow_all_hosts) { for (size_t i=0 ; i < acl_users.elements ; i++) @@ -5455,11 +5464,12 @@ public: }; -static uchar* get_key_column(GRANT_COLUMN *buff, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *get_key_column(const void *buff_, size_t *length, my_bool) { + auto buff= + static_cast(buff_); *length=buff->key_length; - return (uchar*) buff->column; + return reinterpret_cast(buff->column); } class GRANT_NAME :public Sql_alloc @@ -5503,7 +5513,7 @@ public: void init_hash() { my_hash_init2(key_memory_acl_memex, &hash_columns, 4, system_charset_info, - 0, 0, 0, (my_hash_get_key) get_key_column, 0, 0, 0); + 0, 0, 0, get_key_column, 0, 0, 0); } }; @@ -5688,16 +5698,17 @@ GRANT_TABLE::~GRANT_TABLE() } -static uchar* get_grant_table(GRANT_NAME *buff, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *get_grant_table(const void *buff_, size_t *length, my_bool) { + auto buff= static_cast(buff_); *length=buff->key_length; - return (uchar*) buff->hash_key; + return reinterpret_cast(buff->hash_key); } -static void free_grant_table(GRANT_TABLE *grant_table) +static void free_grant_table(void *grant_table_) { + GRANT_TABLE *grant_table= static_cast(grant_table_); grant_table->~GRANT_TABLE(); } @@ -6334,19 +6345,20 @@ static enum PRIVS_TO_MERGE::what sp_privs_to_merge(enum_sp_type type) } -static int init_role_for_merging(ACL_ROLE *role, void *context) +static int init_role_for_merging(ACL_USER_BASE *role_, void *context) { + ACL_ROLE *role= static_cast(role_); role->counter= 0; return 0; } -static int count_subgraph_nodes(ACL_ROLE *role, ACL_ROLE *grantee, void *context) +static int count_subgraph_nodes(ACL_USER_BASE *, ACL_ROLE *grantee, void *context) { grantee->counter++; return 0; } -static int merge_role_privileges(ACL_ROLE *, ACL_ROLE *, void *); +static int merge_role_privileges(ACL_USER_BASE *, ACL_ROLE *, void *); static bool merge_one_role_privileges(ACL_ROLE *grantee, PRIVS_TO_MERGE what); /** @@ -6586,13 +6598,11 @@ end: */ static int traverse_role_graph_up(ACL_ROLE *role, void *context, - int (*on_node) (ACL_ROLE *role, void *context), - int (*on_edge) (ACL_ROLE *current, ACL_ROLE *neighbour, void *context)) + int (*on_node) (ACL_USER_BASE *role, void *context), + int (*on_edge) (ACL_USER_BASE *current, ACL_ROLE *neighbour, void *context)) { - return traverse_role_graph_impl(role, context, - my_offsetof(ACL_ROLE, parent_grantee), - (int (*)(ACL_USER_BASE *, void *))on_node, - (int (*)(ACL_USER_BASE *, ACL_ROLE *, void *))on_edge); + return traverse_role_graph_impl( + role, context, my_offsetof(ACL_ROLE, parent_grantee), on_node, on_edge); } /** @@ -6623,10 +6633,11 @@ static int traverse_role_graph_down(ACL_USER_BASE *user, void *context, entries using the role hash. We put all these "interesting" entries in a (suposedly small) dynamic array and them use it for merging. */ -static uchar* role_key(const ACL_ROLE *role, size_t *klen, my_bool) +static const uchar *role_key(const void *role_, size_t *klen, my_bool) { + auto role= static_cast(role_); *klen= role->user.length; - return (uchar*) role->user.str; + return reinterpret_cast(role->user.str); } typedef Hash_set role_hash_t; @@ -6645,8 +6656,10 @@ static bool merge_role_global_privileges(ACL_ROLE *grantee) return old != grantee->access; } -static int db_name_sort(const int *db1, const int *db2) +static int db_name_sort(const void *db1_, const void *db2_) { + auto db1= static_cast(db1_); + auto db2= static_cast(db2_); return strcmp(acl_dbs.at(*db1).db, acl_dbs.at(*db2).db); } @@ -6799,8 +6812,10 @@ static bool merge_role_db_privileges(ACL_ROLE *grantee, const char *dbname, return update_flags; } -static int table_name_sort(GRANT_TABLE * const *tbl1, GRANT_TABLE * const *tbl2) +static int table_name_sort(const void *tbl1_, const void *tbl2_) { + auto tbl1= static_cast(tbl1_); + auto tbl2= static_cast(tbl2_); int res = strcmp((*tbl1)->db, (*tbl2)->db); if (res) return res; return strcmp((*tbl1)->tname, (*tbl2)->tname); @@ -7001,8 +7016,10 @@ static bool merge_role_table_and_column_privileges(ACL_ROLE *grantee, return update_flags; } -static int routine_name_sort(GRANT_NAME * const *r1, GRANT_NAME * const *r2) +static int routine_name_sort(const void *r1_, const void *r2_) { + auto r1= static_cast(r1_); + auto r2= static_cast(r2_); int res= strcmp((*r1)->db, (*r2)->db); if (res) return res; return strcmp((*r1)->tname, (*r2)->tname); @@ -7125,7 +7142,7 @@ static bool merge_role_routine_grant_privileges(ACL_ROLE *grantee, /** update privileges of the 'grantee' from all roles, granted to it */ -static int merge_role_privileges(ACL_ROLE *role __attribute__((unused)), +static int merge_role_privileges(ACL_USER_BASE *, ACL_ROLE *grantee, void *context) { PRIVS_TO_MERGE *data= (PRIVS_TO_MERGE *)context; @@ -8111,20 +8128,16 @@ static bool grant_load(THD *thd, Sql_mode_instant_remove sms(thd, MODE_PAD_CHAR_TO_FULL_LENGTH); (void) my_hash_init(key_memory_acl_memex, &column_priv_hash, - &my_charset_utf8mb3_bin, 0,0,0, (my_hash_get_key) - get_grant_table, (my_hash_free_key) free_grant_table, 0); + &my_charset_utf8mb3_bin, 0, 0, 0, get_grant_table, + free_grant_table, 0); (void) my_hash_init(key_memory_acl_memex, &proc_priv_hash, - &my_charset_utf8mb3_bin, 0,0,0, (my_hash_get_key) - get_grant_table, 0,0); + &my_charset_utf8mb3_bin, 0, 0, 0, get_grant_table, 0, 0); (void) my_hash_init(key_memory_acl_memex, &func_priv_hash, - &my_charset_utf8mb3_bin, 0,0,0, (my_hash_get_key) - get_grant_table, 0,0); + &my_charset_utf8mb3_bin, 0, 0, 0, get_grant_table, 0, 0); (void) my_hash_init(key_memory_acl_memex, &package_spec_priv_hash, - &my_charset_utf8mb3_bin, 0,0,0, (my_hash_get_key) - get_grant_table, 0,0); + &my_charset_utf8mb3_bin, 0, 0, 0, get_grant_table, 0, 0); (void) my_hash_init(key_memory_acl_memex, &package_body_priv_hash, - &my_charset_utf8mb3_bin, 0,0,0, (my_hash_get_key) - get_grant_table, 0,0); + &my_charset_utf8mb3_bin, 0, 0, 0, get_grant_table, 0, 0); init_sql_alloc(key_memory_acl_mem, &grant_memroot, ACL_ALLOC_BLOCK_SIZE, 0, MYF(0)); t_table= tables_priv.table(); @@ -12294,9 +12307,9 @@ void Sql_cmd_grant::warn_hostname_requires_resolving(THD *thd, while ((user= it++)) { if (opt_skip_name_resolve && hostname_requires_resolving(user->host.str)) - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_WARN_HOSTNAME_WONT_WORK, - ER_THD(thd, ER_WARN_HOSTNAME_WONT_WORK)); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + ER_WARN_HOSTNAME_WONT_WORK, + ER_THD(thd, ER_WARN_HOSTNAME_WONT_WORK)); } } @@ -13465,7 +13478,7 @@ static bool secure_auth(THD *thd) else { my_error(ER_NOT_SUPPORTED_AUTH_MODE, MYF(0)); - general_log_print(thd, COM_CONNECT, + general_log_print(thd, COM_CONNECT, "%s", ER_THD(thd, ER_NOT_SUPPORTED_AUTH_MODE)); } return 1; @@ -13538,7 +13551,7 @@ static bool send_plugin_request_packet(MPVIO_EXT *mpvio, if (switch_from_short_to_long_scramble) { my_error(ER_NOT_SUPPORTED_AUTH_MODE, MYF(0)); - general_log_print(mpvio->auth_info.thd, COM_CONNECT, + general_log_print(mpvio->auth_info.thd, COM_CONNECT, "%s", ER_THD(mpvio->auth_info.thd, ER_NOT_SUPPORTED_AUTH_MODE)); DBUG_RETURN (1); } @@ -13628,7 +13641,7 @@ static bool find_mpvio_user(MPVIO_EXT *mpvio) !ignore_max_password_errors(mpvio->acl_user)) { my_error(ER_USER_IS_BLOCKED, MYF(0)); - general_log_print(mpvio->auth_info.thd, COM_CONNECT, + general_log_print(mpvio->auth_info.thd, COM_CONNECT, "%s", ER_THD(mpvio->auth_info.thd, ER_USER_IS_BLOCKED)); DBUG_RETURN(1); } @@ -13643,7 +13656,7 @@ static bool find_mpvio_user(MPVIO_EXT *mpvio) DBUG_ASSERT(my_strcasecmp(system_charset_info, mpvio->acl_user->auth->plugin.str, old_password_plugin_name.str)); my_error(ER_NOT_SUPPORTED_AUTH_MODE, MYF(0)); - general_log_print(mpvio->auth_info.thd, COM_CONNECT, + general_log_print(mpvio->auth_info.thd, COM_CONNECT, "%s", ER_THD(mpvio->auth_info.thd, ER_NOT_SUPPORTED_AUTH_MODE)); DBUG_RETURN (1); } diff --git a/sql/sql_analyse.cc b/sql/sql_analyse.cc index 7d34bc67f2e..0da8f2ca02a 100644 --- a/sql/sql_analyse.cc +++ b/sql/sql_analyse.cc @@ -38,32 +38,40 @@ #define MAX_TREEMEM 8192 #define MAX_TREE_ELEMENTS 256 -int sortcmp2(void* cmp_arg __attribute__((unused)), - const String *a,const String *b) +int sortcmp2(void *, const void *a_, const void *b_) { + const String *a= static_cast(a_); + const String *b= static_cast(b_); return sortcmp(a,b,a->charset()); } -int compare_double2(void* cmp_arg __attribute__((unused)), - const double *s, const double *t) +int compare_double2(void *, const void *s_, const void *t_) { + const double *s= static_cast(s_); + const double *t= static_cast(t_); + return compare_double(s,t); } -int compare_longlong2(void* cmp_arg __attribute__((unused)), - const longlong *s, const longlong *t) +int compare_longlong2(void *, const void *s_, const void *t_) { + const longlong *s= static_cast(s_); + const longlong *t= static_cast(t_); return compare_longlong(s,t); } -int compare_ulonglong2(void* cmp_arg __attribute__((unused)), - const ulonglong *s, const ulonglong *t) +int compare_ulonglong2(void *, const void *s_, const void *t_) { + const ulonglong *s= static_cast(s_); + const ulonglong *t= static_cast(t_); return compare_ulonglong(s,t); } -int compare_decimal2(int* len, const char *s, const char *t) +int compare_decimal2(void *_len, const void *s_, const void *t_) { + int *len= static_cast(_len); + const char *s= static_cast(s_); + const char *t= static_cast(t_); return memcmp(s, t, *len); } @@ -1075,10 +1083,10 @@ String *field_decimal::std(String *s, ha_rows rows) } -int collect_string(String *element, - element_count count __attribute__((unused)), - TREE_INFO *info) +int collect_string(void *element_, element_count, void *info_) { + String *element= static_cast(element_); + TREE_INFO *info= static_cast(info_); if (info->found) info->str->append(','); else @@ -1091,9 +1099,10 @@ int collect_string(String *element, } // collect_string -int collect_real(double *element, element_count count __attribute__((unused)), - TREE_INFO *info) +int collect_real(void *element_, element_count, void *info_) { + double *element= static_cast(element_); + TREE_INFO *info= static_cast(info_); char buff[MAX_FIELD_WIDTH]; String s(buff, sizeof(buff),current_thd->charset()); @@ -1109,9 +1118,10 @@ int collect_real(double *element, element_count count __attribute__((unused)), } // collect_real -int collect_decimal(uchar *element, element_count count, - TREE_INFO *info) +int collect_decimal(void *element_, element_count count, void *info_) { + uchar *element= static_cast(element_); + TREE_INFO *info= static_cast(info_); char buff[DECIMAL_MAX_STR_LENGTH]; String s(buff, sizeof(buff),&my_charset_bin); @@ -1128,10 +1138,10 @@ int collect_decimal(uchar *element, element_count count, } -int collect_longlong(longlong *element, - element_count count __attribute__((unused)), - TREE_INFO *info) +int collect_longlong(void *element_, element_count, void *info_) { + longlong *element= static_cast(element_); + TREE_INFO *info= static_cast(info_); char buff[MAX_FIELD_WIDTH]; String s(buff, sizeof(buff),&my_charset_bin); @@ -1147,10 +1157,10 @@ int collect_longlong(longlong *element, } // collect_longlong -int collect_ulonglong(ulonglong *element, - element_count count __attribute__((unused)), - TREE_INFO *info) +int collect_ulonglong(void *element_, element_count, void *info_) { + ulonglong *element= static_cast(element_); + TREE_INFO *info= static_cast(info_); char buff[MAX_FIELD_WIDTH]; String s(buff, sizeof(buff),&my_charset_bin); @@ -1214,7 +1224,7 @@ uint check_ulonglong(const char *str, uint length) const char *long_str = "2147483647", *ulonglong_str = "18446744073709551615"; const uint long_len = 10, ulonglong_len = 20; - while (*str == '0' && length) + while (length && *str == '0') { str++; length--; } diff --git a/sql/sql_analyse.h b/sql/sql_analyse.h index 7e3b66d024e..00d9500bbd9 100644 --- a/sql/sql_analyse.h +++ b/sql/sql_analyse.h @@ -57,15 +57,12 @@ uint check_ulonglong(const char *str, uint length); bool get_ev_num_info(EV_NUM_INFO *ev_info, NUM_INFO *info, const char *num); bool test_if_number(NUM_INFO *info, const char *str, uint str_len); int compare_double(const double *s, const double *t); -int compare_double2(void* cmp_arg __attribute__((unused)), - const double *s, const double *t); +int compare_double2(void *, const void *s, const void *t); int compare_longlong(const longlong *s, const longlong *t); -int compare_longlong2(void* cmp_arg __attribute__((unused)), - const longlong *s, const longlong *t); +int compare_longlong2(void *, const void *s, const void *t); int compare_ulonglong(const ulonglong *s, const ulonglong *t); -int compare_ulonglong2(void* cmp_arg __attribute__((unused)), - const ulonglong *s, const ulonglong *t); -int compare_decimal2(int* len, const char *s, const char *t); +int compare_ulonglong2(void *, const void *s, const void *t); +int compare_decimal2(void *len, const void *s, const void *t); Procedure *proc_analyse_init(THD *thd, ORDER *param, select_result *result, List &field_list); int free_string(void* str, TREE_FREE, void*); @@ -98,12 +95,9 @@ public: friend class analyse; }; +int collect_string(void *element, element_count count, void *info); -int collect_string(String *element, element_count count, - TREE_INFO *info); - -int sortcmp2(void* cmp_arg __attribute__((unused)), - const String *a,const String *b); +int sortcmp2(void *, const void *a, const void *b); class field_str :public field_info { @@ -120,8 +114,10 @@ public: max_arg("",0,default_charset_info), sum(0), must_be_blob(0), was_zero_fill(0), was_maybe_zerofill(0), can_be_still_num(1) - { init_tree(&tree, 0, 0, sizeof(String), (qsort_cmp2) sortcmp2, - free_string, NULL, MYF(MY_THREAD_SPECIFIC)); }; + { + init_tree(&tree, 0, 0, sizeof(String), sortcmp2, free_string, NULL, + MYF(MY_THREAD_SPECIFIC)); + }; void add() override; void get_opt_type(String*, ha_rows) override; @@ -141,15 +137,14 @@ public: friend int collect_string(String *element, element_count count, TREE_INFO *info); tree_walk_action collect_enum() override - { return (tree_walk_action) collect_string; } + { return collect_string; } String *std(String *s __attribute__((unused)), ha_rows rows __attribute__((unused))) override { return (String*) 0; } }; -int collect_decimal(uchar *element, element_count count, - TREE_INFO *info); +int collect_decimal(void *element, element_count count, void *info); class field_decimal :public field_info { @@ -161,8 +156,8 @@ public: field_decimal(Item* a, analyse* b) :field_info(a,b) { bin_size= my_decimal_get_binary_size(a->max_length, a->decimals); - init_tree(&tree, 0, 0, bin_size, (qsort_cmp2)compare_decimal2, - 0, (void *)&bin_size, MYF(MY_THREAD_SPECIFIC)); + init_tree(&tree, 0, 0, bin_size, compare_decimal2, 0, (void *) &bin_size, + MYF(MY_THREAD_SPECIFIC)); }; void add() override; @@ -173,12 +168,12 @@ public: friend int collect_decimal(uchar *element, element_count count, TREE_INFO *info); tree_walk_action collect_enum() override - { return (tree_walk_action) collect_decimal; } + { return collect_decimal; } String *std(String *s, ha_rows rows) override; }; -int collect_real(double *element, element_count count, TREE_INFO *info); +int collect_real(void *element, element_count count, void *info); class field_real: public field_info { @@ -189,9 +184,10 @@ class field_real: public field_info public: field_real(Item* a, analyse* b) :field_info(a,b), min_arg(0), max_arg(0), sum(0), sum_sqr(0), max_notzero_dec_len(0) - { init_tree(&tree, 0, 0, sizeof(double), - (qsort_cmp2) compare_double2, NULL, NULL, - MYF(MY_THREAD_SPECIFIC)); } + { + init_tree(&tree, 0, 0, sizeof(double), compare_double2, NULL, NULL, + MYF(MY_THREAD_SPECIFIC)); + } void add() override; void get_opt_type(String*, ha_rows) override; @@ -230,11 +226,10 @@ public: friend int collect_real(double *element, element_count count, TREE_INFO *info); tree_walk_action collect_enum() override - { return (tree_walk_action) collect_real;} + { return collect_real;} }; -int collect_longlong(longlong *element, element_count count, - TREE_INFO *info); +int collect_longlong(void *element, element_count count, void *info); class field_longlong: public field_info { @@ -244,9 +239,10 @@ class field_longlong: public field_info public: field_longlong(Item* a, analyse* b) :field_info(a,b), min_arg(0), max_arg(0), sum(0), sum_sqr(0) - { init_tree(&tree, 0, 0, sizeof(longlong), - (qsort_cmp2) compare_longlong2, NULL, NULL, - MYF(MY_THREAD_SPECIFIC)); } + { + init_tree(&tree, 0, 0, sizeof(longlong), compare_longlong2, NULL, NULL, + MYF(MY_THREAD_SPECIFIC)); + } void add() override; void get_opt_type(String*, ha_rows) override; @@ -276,11 +272,10 @@ public: friend int collect_longlong(longlong *element, element_count count, TREE_INFO *info); tree_walk_action collect_enum() override - { return (tree_walk_action) collect_longlong;} + { return collect_longlong;} }; -int collect_ulonglong(ulonglong *element, element_count count, - TREE_INFO *info); +int collect_ulonglong(void *element, element_count count, void *info); class field_ulonglong: public field_info { @@ -290,9 +285,10 @@ class field_ulonglong: public field_info public: field_ulonglong(Item* a, analyse * b) :field_info(a,b), min_arg(0), max_arg(0), sum(0),sum_sqr(0) - { init_tree(&tree, 0, 0, sizeof(ulonglong), - (qsort_cmp2) compare_ulonglong2, NULL, NULL, - MYF(MY_THREAD_SPECIFIC)); } + { + init_tree(&tree, 0, 0, sizeof(ulonglong), compare_ulonglong2, NULL, NULL, + MYF(MY_THREAD_SPECIFIC)); + } void add() override; void get_opt_type(String*, ha_rows) override; String *get_min_arg(String *s) override { s->set(min_arg,my_thd_charset); return s; } @@ -323,7 +319,7 @@ public: friend int collect_ulonglong(ulonglong *element, element_count count, TREE_INFO *info); tree_walk_action collect_enum() override - { return (tree_walk_action) collect_ulonglong; } + { return collect_ulonglong; } }; diff --git a/sql/sql_array.h b/sql/sql_array.h index 5f095edfbfb..d3b20af605e 100644 --- a/sql/sql_array.h +++ b/sql/sql_array.h @@ -291,17 +291,14 @@ public: delete_dynamic(&array); } - typedef int (*CMP_FUNC)(const Elem *el1, const Elem *el2); - - void sort(CMP_FUNC cmp_func) + void sort(int (*cmp_func)(const void *, const void *)) { - my_qsort(array.buffer, array.elements, sizeof(Elem), (qsort_cmp)cmp_func); + my_qsort(array.buffer, array.elements, sizeof(Elem), cmp_func); } - typedef int (*CMP_FUNC2)(void *, const Elem *el1, const Elem *el2); - void sort(CMP_FUNC2 cmp_func, void *data) + void sort(qsort_cmp2 cmp_func, void *data) { - my_qsort2(array.buffer, array.elements, sizeof(Elem), (qsort2_cmp)cmp_func, data); + my_qsort2(array.buffer, array.elements, sizeof(Elem), cmp_func, data); } }; diff --git a/sql/sql_audit.cc b/sql/sql_audit.cc index c9c59c1b849..5e10ca17448 100644 --- a/sql/sql_audit.cc +++ b/sql/sql_audit.cc @@ -18,8 +18,8 @@ #include "mysqld.h" #include "sql_audit.h" -extern int initialize_audit_plugin(st_plugin_int *plugin); -extern int finalize_audit_plugin(st_plugin_int *plugin); +extern int initialize_audit_plugin(void *plugin); +extern int finalize_audit_plugin(void *plugin); #ifndef EMBEDDED_LIBRARY @@ -262,8 +262,9 @@ void mysql_audit_finalize() @retval TRUE There was an error. */ -int initialize_audit_plugin(st_plugin_int *plugin) +int initialize_audit_plugin(void *plugin_) { + st_plugin_int *plugin= static_cast(plugin_); st_mysql_audit *data= (st_mysql_audit*) plugin->plugin->info; if (!data->event_notify || !data->class_mask[0]) @@ -346,8 +347,9 @@ static my_bool calc_class_mask(THD *thd, plugin_ref plugin, void *arg) @retval FALSE OK @retval TRUE There was an error. */ -int finalize_audit_plugin(st_plugin_int *plugin) +int finalize_audit_plugin(void *plugin_) { + st_plugin_int *plugin= static_cast(plugin_); int deinit_status= 0; unsigned long event_class_mask[MYSQL_AUDIT_CLASS_MASK_SIZE]; @@ -455,13 +457,13 @@ void mysql_audit_finalize() } -int initialize_audit_plugin(st_plugin_int *plugin) +int initialize_audit_plugin(void *plugin) { return 1; } -int finalize_audit_plugin(st_plugin_int *plugin) +int finalize_audit_plugin(void *plugin) { return 0; } diff --git a/sql/sql_base.h b/sql/sql_base.h index 6a9f13ab0a3..24b939a1467 100644 --- a/sql/sql_base.h +++ b/sql/sql_base.h @@ -334,10 +334,9 @@ int dynamic_column_error_message(enum_dyncol_func_result rc); /* open_and_lock_tables with optional derived handling */ int open_and_lock_tables_derived(THD *thd, TABLE_LIST *tables, bool derived); -extern "C" int simple_raw_key_cmp(void* arg, const void* key1, - const void* key2); +extern "C" qsort_cmp2 simple_raw_key_cmp; extern "C" int count_distinct_walk(void *elem, element_count count, void *arg); -int simple_str_key_cmp(void* arg, uchar* key1, uchar* key2); +int simple_str_key_cmp(void *arg, const void *key1, const void *key2); extern Item **not_found_item; extern Field *not_found_field; diff --git a/sql/sql_cache.cc b/sql/sql_cache.cc index d3fbd034d1f..d19e7933beb 100644 --- a/sql/sql_cache.cc +++ b/sql/sql_cache.cc @@ -840,13 +840,13 @@ void Query_cache_block::destroy() DBUG_VOID_RETURN; } -uint Query_cache_block::headers_len() +uint Query_cache_block::headers_len() const { return (ALIGN_SIZE(sizeof(Query_cache_block_table)*n_tables) + ALIGN_SIZE(sizeof(Query_cache_block))); } -uchar* Query_cache_block::data(void) +uchar* Query_cache_block::data(void) const { return (uchar*)( ((uchar*)this) + headers_len() ); } @@ -893,14 +893,14 @@ Query_cache_block_table * Query_cache_block::table(TABLE_COUNTER_TYPE n) extern "C" { -uchar *query_cache_table_get_key(const uchar *record, size_t *length, - my_bool not_used __attribute__((unused))) +const uchar *query_cache_table_get_key(const void *record, size_t *length, + my_bool) { - Query_cache_block* table_block = (Query_cache_block*) record; - *length = (table_block->used - table_block->headers_len() - - ALIGN_SIZE(sizeof(Query_cache_table))); - return (((uchar *) table_block->data()) + - ALIGN_SIZE(sizeof(Query_cache_table))); + auto table_block= static_cast(record); + *length= (table_block->used - table_block->headers_len() - + ALIGN_SIZE(sizeof(Query_cache_table))); + return reinterpret_cast( + ((table_block->data()) + ALIGN_SIZE(sizeof(Query_cache_table)))); } } @@ -991,14 +991,14 @@ void Query_cache_query::unlock_n_destroy() extern "C" { -uchar *query_cache_query_get_key(const uchar *record, size_t *length, - my_bool not_used) +const uchar *query_cache_query_get_key(const void *record, size_t *length, + my_bool) { - Query_cache_block *query_block = (Query_cache_block*) record; - *length = (query_block->used - query_block->headers_len() - - ALIGN_SIZE(sizeof(Query_cache_query))); - return (((uchar *) query_block->data()) + - ALIGN_SIZE(sizeof(Query_cache_query))); + auto query_block= static_cast(record); + *length= (query_block->used - query_block->headers_len() - + ALIGN_SIZE(sizeof(Query_cache_query))); + return reinterpret_cast + (((query_block->data()) + ALIGN_SIZE(sizeof(Query_cache_query)))); } } @@ -4329,10 +4329,10 @@ my_bool Query_cache::move_by_type(uchar **border, *new_block =(Query_cache_block *) *border; size_t tablename_offset = block->table()->table() - block->table()->db(); char *data = (char*) block->data(); - uchar *key; + const uchar *key; size_t key_length; - key=query_cache_table_get_key((uchar*) block, &key_length, 0); - my_hash_first(&tables, (uchar*) key, key_length, &record_idx); + key=query_cache_table_get_key( block, &key_length, 0); + my_hash_first(&tables, key, key_length, &record_idx); block->destroy(); new_block->init(len); @@ -4389,10 +4389,10 @@ my_bool Query_cache::move_by_type(uchar **border, char *data = (char*) block->data(); Query_cache_block *first_result_block = ((Query_cache_query *) block->data())->result(); - uchar *key; + const uchar *key; size_t key_length; - key=query_cache_query_get_key((uchar*) block, &key_length, 0); - my_hash_first(&queries, (uchar*) key, key_length, &record_idx); + key=query_cache_query_get_key( block, &key_length, 0); + my_hash_first(&queries, key, key_length, &record_idx); block->query()->unlock_n_destroy(); block->destroy(); // Move table of used tables @@ -5046,9 +5046,9 @@ my_bool Query_cache::check_integrity(bool locked) DBUG_PRINT("qcache", ("block %p, type %u...", block, (uint) block->type)); size_t length; - uchar *key = query_cache_query_get_key((uchar*) block, &length, 0); + const uchar *key= query_cache_query_get_key(block, &length, 0); uchar* val = my_hash_search(&queries, key, length); - if (((uchar*)block) != val) + if ((reinterpret_cast(block)) != val) { DBUG_PRINT("error", ("block %p found in queries hash like %p", block, val)); @@ -5081,9 +5081,9 @@ my_bool Query_cache::check_integrity(bool locked) DBUG_PRINT("qcache", ("block %p, type %u...", block, (uint) block->type)); size_t length; - uchar *key = query_cache_table_get_key((uchar*) block, &length, 0); + const uchar *key= query_cache_table_get_key(block, &length, 0); uchar* val = my_hash_search(&tables, key, length); - if (((uchar*)block) != val) + if (reinterpret_cast(block) != val) { DBUG_PRINT("error", ("block %p found in tables hash like %p", block, val)); diff --git a/sql/sql_cache.h b/sql/sql_cache.h index 1bdff55d963..a79e938fc88 100644 --- a/sql/sql_cache.h +++ b/sql/sql_cache.h @@ -141,8 +141,8 @@ struct Query_cache_block inline bool is_free(void) { return type == FREE; } void init(size_t length); void destroy(); - uint headers_len(); - uchar* data(void); + uint headers_len() const; + uchar* data(void) const; Query_cache_query *query(); Query_cache_table *table(); Query_cache_result *result(); @@ -256,10 +256,10 @@ struct Query_cache_result extern "C" { - uchar *query_cache_query_get_key(const uchar *record, size_t *length, - my_bool not_used); - uchar *query_cache_table_get_key(const uchar *record, size_t *length, - my_bool not_used); + const uchar *query_cache_query_get_key(const void *record, size_t *length, + my_bool); + const uchar *query_cache_table_get_key(const void *record, size_t *length, + my_bool); } extern "C" void query_cache_invalidate_by_MyISAM_filename(const char* filename); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index bcf699dca9a..08354e0fa2d 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -87,15 +87,17 @@ char empty_c_string[1]= {0}; /* used for not defined db */ ** User variables ****************************************************************************/ -extern "C" uchar *get_var_key(user_var_entry *entry, size_t *length, - my_bool not_used __attribute__((unused))) +extern "C" const uchar *get_var_key(const void *entry_, size_t *length, + my_bool) { + auto entry= static_cast(entry_); *length= entry->name.length; - return (uchar*) entry->name.str; + return reinterpret_cast(entry->name.str); } -extern "C" void free_user_var(user_var_entry *entry) +extern "C" void free_user_var(void *entry_) { + user_var_entry *entry= static_cast(entry_); char *pos= (char*) entry+ALIGN_SIZE(sizeof(*entry)); if (entry->value && entry->value != pos) my_free(entry->value); @@ -104,18 +106,17 @@ extern "C" void free_user_var(user_var_entry *entry) /* Functions for last-value-from-sequence hash */ -extern "C" uchar *get_sequence_last_key(SEQUENCE_LAST_VALUE *entry, - size_t *length, - my_bool not_used - __attribute__((unused))) +extern "C" const uchar *get_sequence_last_key(const void *entry_, + size_t *length, my_bool) { + auto *entry= static_cast(entry_); *length= entry->length; - return (uchar*) entry->key; + return entry->key; } -extern "C" void free_sequence_last(SEQUENCE_LAST_VALUE *entry) +extern "C" void free_sequence_last(void *entry) { - delete entry; + delete static_cast(entry); } @@ -620,8 +621,9 @@ handle_condition(THD *thd, timeouts at end of query (and thus before THD is destroyed) */ -extern "C" void thd_kill_timeout(THD* thd) +extern "C" void thd_kill_timeout(void *thd_) { + THD *thd= static_cast(thd_); thd->status_var.max_statement_time_exceeded++; /* Kill queries that can't cause data corruptions */ thd->awake(KILL_TIMEOUT); @@ -912,12 +914,11 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) #endif user_connect=(USER_CONN *)0; my_hash_init(key_memory_user_var_entry, &user_vars, system_charset_info, - USER_VARS_HASH_SIZE, 0, 0, (my_hash_get_key) get_var_key, - (my_hash_free_key) free_user_var, HASH_THREAD_SPECIFIC); - my_hash_init(PSI_INSTRUMENT_ME, &sequences, Lex_ident_fs::charset_info(), - SEQUENCES_HASH_SIZE, 0, 0, (my_hash_get_key) - get_sequence_last_key, (my_hash_free_key) free_sequence_last, + USER_VARS_HASH_SIZE, 0, 0, get_var_key, free_user_var, HASH_THREAD_SPECIFIC); + my_hash_init(PSI_INSTRUMENT_ME, &sequences, Lex_ident_fs::charset_info(), + SEQUENCES_HASH_SIZE, 0, 0, get_sequence_last_key, + free_sequence_last, HASH_THREAD_SPECIFIC); /* For user vars replication*/ if (opt_bin_log) @@ -1549,12 +1550,11 @@ void THD::change_user(void) init(); stmt_map.reset(); my_hash_init(key_memory_user_var_entry, &user_vars, system_charset_info, - USER_VARS_HASH_SIZE, 0, 0, (my_hash_get_key) get_var_key, - (my_hash_free_key) free_user_var, HASH_THREAD_SPECIFIC); + USER_VARS_HASH_SIZE, 0, 0, get_var_key, free_user_var, + HASH_THREAD_SPECIFIC); my_hash_init(key_memory_user_var_entry, &sequences, - Lex_ident_fs::charset_info(), - SEQUENCES_HASH_SIZE, 0, 0, (my_hash_get_key) - get_sequence_last_key, (my_hash_free_key) free_sequence_last, + Lex_ident_fs::charset_info(), SEQUENCES_HASH_SIZE, 0, 0, + get_sequence_last_key, free_sequence_last, HASH_THREAD_SPECIFIC); sp_caches_clear(); opt_trace.delete_traces(); @@ -4225,13 +4225,12 @@ Statement::~Statement() = default; C_MODE_START -static uchar * -get_statement_id_as_hash_key(const uchar *record, size_t *key_length, - my_bool not_used __attribute__((unused))) +static const uchar *get_statement_id_as_hash_key(const void *record, + size_t *key_length, my_bool) { - const Statement *statement= (const Statement *) record; + auto statement= static_cast(record); *key_length= sizeof(statement->id); - return (uchar *) &((const Statement *) statement)->id; + return reinterpret_cast(&(statement)->id); } static void delete_statement_as_hash_key(void *key) @@ -4239,11 +4238,12 @@ static void delete_statement_as_hash_key(void *key) delete (Statement *) key; } -static uchar *get_stmt_name_hash_key(Statement *entry, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *get_stmt_name_hash_key(const void *entry_, size_t *length, + my_bool) { + auto entry= static_cast(entry_); *length= entry->name.length; - return (uchar*) entry->name.str; + return reinterpret_cast(entry->name.str); } C_MODE_END @@ -4259,9 +4259,9 @@ Statement_map::Statement_map() : my_hash_init(key_memory_prepared_statement_map, &st_hash, &my_charset_bin, START_STMT_HASH_SIZE, 0, 0, get_statement_id_as_hash_key, delete_statement_as_hash_key, MYF(0)); - my_hash_init(key_memory_prepared_statement_map, &names_hash, system_charset_info, START_NAME_HASH_SIZE, 0, 0, - (my_hash_get_key) get_stmt_name_hash_key, - NULL, MYF(0)); + my_hash_init(key_memory_prepared_statement_map, &names_hash, + system_charset_info, START_NAME_HASH_SIZE, 0, 0, + get_stmt_name_hash_key, NULL, MYF(0)); } @@ -5321,10 +5321,11 @@ extern "C" MYSQL_THD thd_increment_pending_ops(MYSQL_THD thd) end of async operation (such as end of group commit write flush) - @param thd THD + @param thd_ THD */ -extern "C" void thd_decrement_pending_ops(MYSQL_THD thd) +extern "C" void thd_decrement_pending_ops(void *thd_) { + THD *thd= static_cast(thd_); DBUG_ASSERT(thd); DBUG_ASSERT(thd->system_thread == NON_SYSTEM_THREAD); diff --git a/sql/sql_class.h b/sql/sql_class.h index b03a4a7ab92..c9e6d8d5f2d 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -70,9 +70,9 @@ void set_thd_stage_info(void *thd, #include "wsrep.h" #include "wsrep_on.h" -#ifdef WITH_WSREP #include #include +#ifdef WITH_WSREP /* wsrep-lib */ #include "wsrep_client_service.h" #include "wsrep_client_state.h" @@ -5645,9 +5645,11 @@ public: query_id_t wsrep_last_query_id; XID wsrep_xid; - /** This flag denotes that record locking should be skipped during INSERT - and gap locking during SELECT. Only used by the streaming replication thread - that only modifies the wsrep_schema.SR table. */ + /** This flag denotes that record locking should be skipped during INSERT, + gap locking during SELECT, and write-write conflicts due to innodb + snapshot isolation during DELETE. + Only used by the streaming replication thread that only modifies the + mysql.wsrep_streaming_log table. */ my_bool wsrep_skip_locking; mysql_cond_t COND_wsrep_thd; @@ -7277,10 +7279,10 @@ struct SORT_FIELD_ATTR CHARSET_INFO *cs; uint pack_sort_string(uchar *to, const Binary_string *str, CHARSET_INFO *cs) const; - int compare_packed_fixed_size_vals(uchar *a, size_t *a_len, - uchar *b, size_t *b_len); - int compare_packed_varstrings(uchar *a, size_t *a_len, - uchar *b, size_t *b_len); + int compare_packed_fixed_size_vals(const uchar *a, size_t *a_len, + const uchar *b, size_t *b_len); + int compare_packed_varstrings(const uchar *a, size_t *a_len, + const uchar *b, size_t *b_len); bool check_if_packing_possible(THD *thd) const; bool is_variable_sized() { return type == VARIABLE_SIZE; } void set_length_and_original_length(THD *thd, uint length_arg); diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index a0704a606c7..e3bca5ad967 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -305,17 +305,12 @@ end: started with corresponding variable that is greater then 0. */ -extern "C" uchar *get_key_conn(user_conn *buff, size_t *length, - my_bool not_used __attribute__((unused))) +extern "C" const uchar *get_key_conn(const void *buff_, size_t *length, + my_bool) { + auto buff= static_cast(buff_); *length= buff->len; - return (uchar*) buff->user; -} - - -extern "C" void free_user(struct user_conn *uc) -{ - my_free(uc); + return reinterpret_cast(buff->user); } @@ -323,8 +318,8 @@ void init_max_user_conn(void) { #ifndef NO_EMBEDDED_ACCESS_CHECKS my_hash_init(key_memory_user_conn, &hash_user_connections, - system_charset_info, max_connections, 0, 0, (my_hash_get_key) - get_key_conn, (my_hash_free_key) free_user, 0); + system_charset_info, max_connections, 0, 0, get_key_conn, + my_free, 0); #endif } @@ -403,16 +398,12 @@ static const char *get_client_host(THD *client) client->security_ctx->host ? client->security_ctx->host : ""; } -extern "C" uchar *get_key_user_stats(USER_STATS *user_stats, size_t *length, - my_bool not_used __attribute__((unused))) +extern "C" const uchar *get_key_user_stats(const void *user_stats_, + size_t *length, my_bool) { + auto user_stats= static_cast(user_stats_); *length= user_stats->user_name_length; - return (uchar*) user_stats->user; -} - -void free_user_stats(USER_STATS* user_stats) -{ - my_free(user_stats); + return reinterpret_cast(user_stats->user); } void init_user_stats(USER_STATS *user_stats, @@ -483,56 +474,44 @@ void init_user_stats(USER_STATS *user_stats, void init_global_user_stats(void) { - my_hash_init(PSI_INSTRUMENT_ME, &global_user_stats, system_charset_info, max_connections, - 0, 0, (my_hash_get_key) get_key_user_stats, - (my_hash_free_key) free_user_stats, 0); + my_hash_init(PSI_INSTRUMENT_ME, &global_user_stats, system_charset_info, + max_connections, 0, 0, get_key_user_stats, my_free, 0); } void init_global_client_stats(void) { - my_hash_init(PSI_INSTRUMENT_ME, &global_client_stats, system_charset_info, max_connections, - 0, 0, (my_hash_get_key) get_key_user_stats, - (my_hash_free_key) free_user_stats, 0); + my_hash_init(PSI_INSTRUMENT_ME, &global_client_stats, system_charset_info, + max_connections, 0, 0, get_key_user_stats, my_free, 0); } -extern "C" uchar *get_key_table_stats(TABLE_STATS *table_stats, size_t *length, - my_bool not_used __attribute__((unused))) +extern "C" const uchar *get_key_table_stats(const void *table_stats_, + size_t *length, my_bool) { + auto table_stats= static_cast(table_stats_); *length= table_stats->table_name_length; - return (uchar*) table_stats->table; -} - -extern "C" void free_table_stats(TABLE_STATS* table_stats) -{ - my_free(table_stats); + return reinterpret_cast(table_stats->table); } void init_global_table_stats(void) { my_hash_init(PSI_INSTRUMENT_ME, &global_table_stats, - Lex_ident_fs::charset_info(), - max_connections, 0, 0, (my_hash_get_key) get_key_table_stats, - (my_hash_free_key) free_table_stats, 0); + Lex_ident_fs::charset_info(), max_connections, 0, 0, + get_key_table_stats, my_free, 0); } -extern "C" uchar *get_key_index_stats(INDEX_STATS *index_stats, size_t *length, - my_bool not_used __attribute__((unused))) +extern "C" const uchar *get_key_index_stats(const void *index_stats_, + size_t *length, my_bool) { + auto index_stats= static_cast(index_stats_); *length= index_stats->index_name_length; - return (uchar*) index_stats->index; -} - -extern "C" void free_index_stats(INDEX_STATS* index_stats) -{ - my_free(index_stats); + return reinterpret_cast(index_stats->index); } void init_global_index_stats(void) { my_hash_init(PSI_INSTRUMENT_ME, &global_index_stats, - Lex_ident_fs::charset_info(), - max_connections, 0, 0, (my_hash_get_key) get_key_index_stats, - (my_hash_free_key) free_index_stats, 0); + Lex_ident_fs::charset_info(), max_connections, 0, 0, + get_key_index_stats, my_free, 0); } diff --git a/sql/sql_db.cc b/sql/sql_db.cc index a1198b538da..83a848d8d31 100644 --- a/sql/sql_db.cc +++ b/sql/sql_db.cc @@ -147,16 +147,17 @@ private: Hash_set m_set; mysql_rwlock_t m_lock; - static uchar *get_key(const LEX_STRING *ls, size_t *sz, my_bool) + static const uchar *get_key(const void *ls_, size_t *sz, my_bool) { + const LEX_STRING *ls= static_cast(ls_); *sz= ls->length; - return (uchar *) ls->str; + return reinterpret_cast(ls->str); } public: dbname_cache_t() : m_set(key_memory_dbnames_cache, table_alias_charset, 10, 0, - sizeof(char *), (my_hash_get_key) get_key, my_free, 0) + sizeof(char *), get_key, my_free, 0) { mysql_rwlock_init(key_rwlock_LOCK_dbnames, &m_lock); } @@ -240,14 +241,14 @@ static int my_rmdir(const char *dir) Function we use in the creation of our hash to get key. */ -extern "C" uchar* dboptions_get_key(my_dbopt_t *opt, size_t *length, - my_bool not_used); +extern "C" const uchar *dboptions_get_key(const void *opt, size_t *length, + my_bool); -uchar* dboptions_get_key(my_dbopt_t *opt, size_t *length, - my_bool not_used __attribute__((unused))) +const uchar *dboptions_get_key(const void *opt_, size_t *length, my_bool) { + auto opt= static_cast(opt_); *length= opt->name_length; - return (uchar*) opt->name; + return reinterpret_cast(opt->name); } @@ -299,8 +300,8 @@ bool my_dboptions_cache_init(void) { dboptions_init= 1; error= my_hash_init(key_memory_dboptions_hash, &dboptions, - table_alias_charset, 32, 0, 0, (my_hash_get_key) - dboptions_get_key, free_dbopt, 0); + table_alias_charset, 32, 0, 0, dboptions_get_key, + free_dbopt, 0); } dbname_cache_init(); return error; @@ -333,7 +334,7 @@ void my_dbopt_cleanup(void) mysql_rwlock_wrlock(&LOCK_dboptions); my_hash_free(&dboptions); my_hash_init(key_memory_dboptions_hash, &dboptions, table_alias_charset, 32, - 0, 0, (my_hash_get_key) dboptions_get_key, free_dbopt, 0); + 0, 0, dboptions_get_key, free_dbopt, 0); mysql_rwlock_unlock(&LOCK_dboptions); } diff --git a/sql/sql_delete.cc b/sql/sql_delete.cc index 1e835efd9e6..68598c0e8ee 100644 --- a/sql/sql_delete.cc +++ b/sql/sql_delete.cc @@ -1032,10 +1032,11 @@ got_error: ***************************************************************************/ -extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b) +extern "C" int refpos_order_cmp(void *arg, const void *a, const void *b) { - handler *file= (handler*)arg; - return file->cmp_ref((const uchar*)a, (const uchar*)b); + auto file= static_cast(arg); + return file->cmp_ref(static_cast(a), + static_cast(b)); } diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 8e73662f3c6..ff61626e744 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -112,11 +112,12 @@ SQL_HANDLER::~SQL_HANDLER() Pointer to the TABLE_LIST struct. */ -static char *mysql_ha_hash_get_key(SQL_HANDLER *table, size_t *key_len, - my_bool first __attribute__((unused))) +static const uchar *mysql_ha_hash_get_key(const void *table_, size_t *key_len, + my_bool) { + auto table= static_cast(table_); *key_len= table->handler_name.length + 1 ; /* include '\0' in comparisons */ - return (char*) table->handler_name.str; + return reinterpret_cast(table->handler_name.str); } @@ -134,9 +135,9 @@ static char *mysql_ha_hash_get_key(SQL_HANDLER *table, size_t *key_len, Nothing */ -static void mysql_ha_hash_free(SQL_HANDLER *table) +static void mysql_ha_hash_free(void *table) { - delete table; + delete static_cast(table); } static void mysql_ha_close_childs(THD *thd, TABLE_LIST *current_table_list, @@ -291,8 +292,7 @@ bool mysql_ha_open(THD *thd, TABLE_LIST *tables, SQL_HANDLER *reopen) */ if (my_hash_init(key_memory_THD_handler_tables_hash, &thd->handler_tables_hash, &my_charset_latin1, - HANDLER_TABLES_HASH_SIZE, 0, 0, (my_hash_get_key) - mysql_ha_hash_get_key, (my_hash_free_key) + HANDLER_TABLES_HASH_SIZE, 0, 0, mysql_ha_hash_get_key, mysql_ha_hash_free, 0)) { DBUG_PRINT("exit",("ERROR")); diff --git a/sql/sql_hset.h b/sql/sql_hset.h index 41573fb5f03..95b52abec0a 100644 --- a/sql/sql_hset.h +++ b/sql/sql_hset.h @@ -31,11 +31,11 @@ public: /** Constructs an empty unique hash. */ - Hash_set(PSI_memory_key psi_key, uchar *(*K)(const T *, size_t *, my_bool), + Hash_set(PSI_memory_key psi_key, + const uchar *(*K)(const void *, size_t *, my_bool), CHARSET_INFO *cs= &my_charset_bin) { - my_hash_init(psi_key, &m_hash, cs, START_SIZE, 0, 0, (my_hash_get_key)K, 0, - HASH_UNIQUE); + my_hash_init(psi_key, &m_hash, cs, START_SIZE, 0, 0, K, 0, HASH_UNIQUE); } Hash_set(PSI_memory_key psi_key, CHARSET_INFO *charset, ulong default_array_elements, diff --git a/sql/sql_lifo_buffer.h b/sql/sql_lifo_buffer.h index 04496ef74b8..afe47b5f415 100644 --- a/sql/sql_lifo_buffer.h +++ b/sql/sql_lifo_buffer.h @@ -117,7 +117,7 @@ public: bool is_empty() { return used_size() == 0; } virtual bool read() = 0; - void sort(qsort2_cmp cmp_func, void *cmp_func_arg) + void sort(qsort_cmp2 cmp_func, void *cmp_func_arg) { size_t elem_size= size1 + size2; size_t n_elements= used_size() / elem_size; diff --git a/sql/sql_list.h b/sql/sql_list.h index 55666f1358c..a55f4764145 100644 --- a/sql/sql_list.h +++ b/sql/sql_list.h @@ -129,8 +129,6 @@ struct list_node :public Sql_alloc } }; -typedef bool List_eq(void *a, void *b); - extern MYSQL_PLUGIN_IMPORT list_node end_of_list; class base_list :public Sql_alloc @@ -301,11 +299,12 @@ public: inline void **head_ref() { return first != &end_of_list ? &first->info : 0; } inline bool is_empty() { return first == &end_of_list ; } inline list_node *last_ref() { return &end_of_list; } - inline bool add_unique(void *info, List_eq *eq) + template + inline bool add_unique(T *info, bool (*eq)(T *a, T *b)) { list_node *node= first; for (; - node != &end_of_list && (!(*eq)(node->info, info)); + node != &end_of_list && (!(*eq)(static_cast(node->info), info)); node= node->next) ; if (node == &end_of_list) return push_back(info); @@ -513,7 +512,7 @@ public: inline void prepend(List *list) { base_list::prepend(list); } inline void disjoin(List *list) { base_list::disjoin(list); } inline bool add_unique(T *a, bool (*eq)(T *a, T *b)) - { return base_list::add_unique(a, (List_eq *)eq); } + { return base_list::add_unique(a, eq); } inline bool copy(const List *list, MEM_ROOT *root) { return base_list::copy(list, root); } void delete_elements(void) diff --git a/sql/sql_plugin.cc b/sql/sql_plugin.cc index ff8a93b1d21..0fa22875150 100644 --- a/sql/sql_plugin.cc +++ b/sql/sql_plugin.cc @@ -103,16 +103,16 @@ const LEX_CSTRING plugin_type_names[MYSQL_MAX_PLUGIN_TYPE_NUM]= { STRING_WITH_LEN("FUNCTION") } }; -extern int initialize_schema_table(st_plugin_int *plugin); -extern int finalize_schema_table(st_plugin_int *plugin); +extern int initialize_schema_table(void *plugin); +extern int finalize_schema_table(void *plugin); -extern int initialize_audit_plugin(st_plugin_int *plugin); -extern int finalize_audit_plugin(st_plugin_int *plugin); +extern int initialize_audit_plugin(void *plugin); +extern int finalize_audit_plugin(void *plugin); -extern int initialize_encryption_plugin(st_plugin_int *plugin); -extern int finalize_encryption_plugin(st_plugin_int *plugin); +extern int initialize_encryption_plugin(void *plugin); +extern int finalize_encryption_plugin(void *plugin); -extern int initialize_data_type_plugin(st_plugin_int *plugin); +extern int initialize_data_type_plugin(void *plugin); /* The number of elements in both plugin_type_initialize and @@ -1449,8 +1449,9 @@ static int plugin_do_initialize(struct st_plugin_int *plugin, uint &state) mysql_mutex_assert_not_owner(&LOCK_plugin); plugin_type_init init= plugin_type_initialize[plugin->plugin->type]; if (!init) - init= (plugin_type_init) plugin->plugin->init; + init= plugin->plugin->init; if (init) + { if (int ret= init(plugin)) { /* Plugin init failed and did not requested a retry */ @@ -1458,6 +1459,7 @@ static int plugin_do_initialize(struct st_plugin_int *plugin, uint &state) print_init_failed_error(plugin); DBUG_RETURN(ret); } + } state= PLUGIN_IS_READY; // plugin->init() succeeded if (plugin->plugin->status_vars) @@ -1516,25 +1518,23 @@ static int plugin_initialize(MEM_ROOT *tmp_root, struct st_plugin_int *plugin, } -extern "C" uchar *get_plugin_hash_key(const uchar *, size_t *, my_bool); -extern "C" uchar *get_bookmark_hash_key(const uchar *, size_t *, my_bool); +extern "C" const uchar *get_plugin_hash_key(const void *, size_t *, my_bool); +extern "C" const uchar *get_bookmark_hash_key(const void *, size_t *, my_bool); -uchar *get_plugin_hash_key(const uchar *buff, size_t *length, - my_bool not_used __attribute__((unused))) +const uchar *get_plugin_hash_key(const void *buff, size_t *length, my_bool) { - struct st_plugin_int *plugin= (st_plugin_int *)buff; - *length= (uint)plugin->name.length; - return((uchar *)plugin->name.str); + auto plugin= static_cast(buff); + *length= plugin->name.length; + return reinterpret_cast(plugin->name.str); } -uchar *get_bookmark_hash_key(const uchar *buff, size_t *length, - my_bool not_used __attribute__((unused))) +const uchar *get_bookmark_hash_key(const void *buff, size_t *length, my_bool) { - struct st_bookmark *var= (st_bookmark *)buff; + auto var= static_cast(buff); *length= var->name_len + 1; - return (uchar*) var->key; + return reinterpret_cast(var->key); } static inline void convert_dash_to_underscore(char *str, size_t len) @@ -3402,7 +3402,7 @@ void plugin_thdvar_cleanup(THD *thd) if ((idx= thd->lex->plugins.elements)) { list= ((plugin_ref*) thd->lex->plugins.buffer) + idx - 1; - DBUG_PRINT("info",("unlocking %d plugins", idx)); + DBUG_PRINT("info",("unlocking %zu plugins", idx)); while ((uchar*) list >= thd->lex->plugins.buffer) intern_plugin_unlock(NULL, *list--); } diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h index d4df8c6468f..d9e48ff64b1 100644 --- a/sql/sql_plugin.h +++ b/sql/sql_plugin.h @@ -152,7 +152,7 @@ typedef struct st_plugin_int **plugin_ref; #define plugin_equals(p1,p2) ((p1) && (p2) && (p1)[0] == (p2)[0]) #endif -typedef int (*plugin_type_init)(struct st_plugin_int *); +typedef int (*plugin_type_init)(void *); extern I_List *opt_plugin_load_list_ptr; extern char *opt_plugin_dir_ptr; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 96500ff63e3..100237ba6ef 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -137,7 +137,7 @@ static bool update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse, uint tables, COND *conds, table_map table_map, SELECT_LEX *select_lex, SARGABLE_PARAM **sargables); -static int sort_keyuse(KEYUSE *a,KEYUSE *b); +static int sort_keyuse(const void *a, const void *b); static bool are_tables_local(JOIN_TAB *jtab, table_map used_tables); static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse, bool allow_full_scan, table_map used_tables); @@ -165,9 +165,9 @@ best_extension_by_limited_search(JOIN *join, table_map *processed_eq_ref_tables); static uint determine_search_depth(JOIN* join); C_MODE_START -static int join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2); -static int join_tab_cmp_straight(const void *dummy, const void* ptr1, const void* ptr2); -static int join_tab_cmp_embedded_first(const void *emb, const void* ptr1, const void *ptr2); +static int join_tab_cmp(void *dummy, const void* ptr1, const void* ptr2); +static int join_tab_cmp_straight(void *dummy, const void* ptr1, const void* ptr2); +static int join_tab_cmp_embedded_first(void *emb, const void* ptr1, const void *ptr2); C_MODE_END static uint cache_record_length(JOIN *join,uint index); static store_key *get_store_key(THD *thd, @@ -7381,8 +7381,10 @@ add_ft_keys(DYNAMIC_ARRAY *keyuse_array, static int -sort_keyuse(KEYUSE *a,KEYUSE *b) +sort_keyuse(const void *a_, const void *b_) { + const KEYUSE *a= static_cast(a_); + const KEYUSE *b= static_cast(b_); int res; if (a->table->tablenr != b->table->tablenr) return (int) (a->table->tablenr - b->table->tablenr); @@ -10056,7 +10058,7 @@ choose_plan(JOIN *join, table_map join_tables, TABLE_LIST *emb_sjm_nest) join->thd->variables.optimizer_use_condition_selectivity; bool straight_join= MY_TEST(join->select_options & SELECT_STRAIGHT_JOIN); THD *thd= join->thd; - qsort2_cmp jtab_sort_func; + qsort_cmp2 jtab_sort_func; DBUG_ENTER("choose_plan"); join->limit_optimization_mode= false; @@ -10203,7 +10205,7 @@ choose_plan(JOIN *join, table_map join_tables, TABLE_LIST *emb_sjm_nest) 1 - jt1 > jt2 */ -static int compare_embedding_subqueries(JOIN_TAB *jt1, JOIN_TAB *jt2) +static int compare_embedding_subqueries(const JOIN_TAB *jt1, const JOIN_TAB *jt2) { /* Determine if the first table is originally from a subquery */ TABLE_LIST *tbl1= jt1->table->pos_in_table_list; @@ -10278,10 +10280,10 @@ static int compare_embedding_subqueries(JOIN_TAB *jt1, JOIN_TAB *jt2) */ static int -join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2) +join_tab_cmp(void *, const void* ptr1, const void* ptr2) { - JOIN_TAB *jt1= *(JOIN_TAB**) ptr1; - JOIN_TAB *jt2= *(JOIN_TAB**) ptr2; + auto jt1= *(static_cast(ptr1)); + auto jt2= *(static_cast(ptr2)); int cmp; if ((cmp= compare_embedding_subqueries(jt1, jt2)) != 0) @@ -10303,10 +10305,10 @@ join_tab_cmp(const void *dummy, const void* ptr1, const void* ptr2) */ static int -join_tab_cmp_straight(const void *dummy, const void* ptr1, const void* ptr2) +join_tab_cmp_straight(void *, const void* ptr1, const void* ptr2) { - JOIN_TAB *jt1= *(JOIN_TAB**) ptr1; - JOIN_TAB *jt2= *(JOIN_TAB**) ptr2; + auto jt1= *(static_cast(ptr1)); + auto jt2= *(static_cast(ptr2)); /* We don't do subquery flattening if the parent or child select has @@ -10339,11 +10341,11 @@ join_tab_cmp_straight(const void *dummy, const void* ptr1, const void* ptr2) */ static int -join_tab_cmp_embedded_first(const void *emb, const void* ptr1, const void* ptr2) +join_tab_cmp_embedded_first(void *emb, const void* ptr1, const void* ptr2) { - const TABLE_LIST *emb_nest= (TABLE_LIST*) emb; - JOIN_TAB *jt1= *(JOIN_TAB**) ptr1; - JOIN_TAB *jt2= *(JOIN_TAB**) ptr2; + TABLE_LIST *emb_nest= static_cast(emb); + auto jt1= *(static_cast(ptr1)); + auto jt2= *(static_cast(ptr2)); if (jt1->emb_sj_nest == emb_nest && jt2->emb_sj_nest != emb_nest) return -1; @@ -11366,9 +11368,11 @@ struct SORT_POSITION (same table order as used in the original SQL query) */ -static int -sort_positions(SORT_POSITION *a, SORT_POSITION *b) +static int sort_positions(const void *a_, const void *b_) { + const SORT_POSITION *a= static_cast(a_); + const SORT_POSITION *b= static_cast(b_); + int cmp; if ((cmp= compare_embedding_subqueries(*a->join_tab, *b->join_tab)) != 0) return cmp; @@ -12018,8 +12022,7 @@ best_extension_by_limited_search(JOIN *join, Sort tables in ascending order of generated row combinations */ if (found_tables > 1) - my_qsort(sort, found_tables, sizeof(SORT_POSITION), - (qsort_cmp) sort_positions); + my_qsort(sort, found_tables, sizeof(SORT_POSITION), sort_positions); } DBUG_ASSERT(join->next_sort_position <= join->sort_positions + join->sort_space); @@ -23341,7 +23344,7 @@ free_tmp_table(THD *thd, TABLE *entry) thd->tmp_tables_size+= (entry->file->stats.data_file_length + entry->file->stats.index_file_length); } - entry->file->ha_drop_table(entry->s->path.str); + entry->file->drop_table(entry->s->path.str); delete entry->file; entry->file= NULL; entry->reset_created(); @@ -28045,8 +28048,7 @@ static int remove_dup_with_hash_index(THD *thd, TABLE *table, (*field_length++)= (*ptr)->sort_length(); if (my_hash_init(key_memory_hash_index_key_buffer, &hash, &my_charset_bin, - (uint) file->stats.records, 0, key_length, - (my_hash_get_key) 0, 0, 0)) + (uint) file->stats.records, 0, key_length, 0, 0, 0)) { my_free(key_buffer); DBUG_RETURN(1); diff --git a/sql/sql_select.h b/sql/sql_select.h index 242c54138a8..faea74b6ce7 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -2018,7 +2018,7 @@ int opt_sum_query(THD* thd, List &tables, List &all_fields, COND *conds); /* from sql_delete.cc, used by opt_range.cc */ -extern "C" int refpos_order_cmp(void* arg, const void *a,const void *b); +extern "C" int refpos_order_cmp(void *arg, const void *a,const void *b); /** class to copying an field/item to a key struct */ @@ -2709,15 +2709,7 @@ void propagate_new_equalities(THD *thd, Item *cond, COND_EQUAL *inherited, bool *is_simplifiable_cond); -template T prev_bits(T n_bits) -{ - if (!n_bits) - return 0; - T tmp= ((T)1 << (n_bits - 1)); - return (tmp - 1) | tmp; -} -// A wrapper for the above function: -#define PREV_BITS(type, A) prev_bits(A) +#define PREV_BITS(type, N_BITS) ((type)my_set_bits(N_BITS)) bool dbug_user_var_equals_str(THD *thd, const char *name, const char *value); diff --git a/sql/sql_servers.cc b/sql/sql_servers.cc index 8d716e85890..3ca5e0d1736 100644 --- a/sql/sql_servers.cc +++ b/sql/sql_servers.cc @@ -81,16 +81,17 @@ static int update_server_record_in_cache(FOREIGN_SERVER *existing, /* utility functions */ static void merge_server_struct(FOREIGN_SERVER *from, FOREIGN_SERVER *to); -static uchar *servers_cache_get_key(FOREIGN_SERVER *server, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *servers_cache_get_key(const void *server_, size_t *length, + my_bool) { + auto server= static_cast(server_); DBUG_ENTER("servers_cache_get_key"); DBUG_PRINT("info", ("server_name_length %zd server_name %s", server->server_name_length, server->server_name)); - *length= (uint) server->server_name_length; - DBUG_RETURN((uchar*) server->server_name); + *length= server->server_name_length; + DBUG_RETURN(reinterpret_cast(server->server_name)); } static PSI_memory_key key_memory_servers; @@ -232,8 +233,8 @@ bool servers_init(bool dont_read_servers_table) DBUG_RETURN(TRUE); /* initialise our servers cache */ - if (my_hash_init(key_memory_servers, &servers_cache, system_charset_info, 32, 0, 0, - (my_hash_get_key) servers_cache_get_key, 0, 0)) + if (my_hash_init(key_memory_servers, &servers_cache, system_charset_info, 32, + 0, 0, servers_cache_get_key, 0, 0)) { return_val= TRUE; /* we failed, out of memory? */ goto end; diff --git a/sql/sql_show.cc b/sql/sql_show.cc index da2dbaea232..cd3f09b735b 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -69,6 +69,8 @@ #include "key.h" #include "lex_symbol.h" +#include "mysql/plugin_function.h" + #define KEYWORD_SIZE 64 extern SYMBOL symbols[]; @@ -641,14 +643,13 @@ ignore_db_dirs_init() @return a pointer to the key */ -static uchar * -db_dirs_hash_get_key(const uchar *data, size_t *len_ret, - my_bool __attribute__((unused))) +static const uchar *db_dirs_hash_get_key(const void *data, size_t *len_ret, + my_bool) { - LEX_CSTRING *e= (LEX_CSTRING *) data; + auto e= static_cast(data); *len_ret= e->length; - return (uchar *) e->str; + return reinterpret_cast(e->str); } @@ -8452,6 +8453,30 @@ int fill_i_s_keywords(THD *thd, TABLE_LIST *tables, COND *cond) DBUG_RETURN(0); } + +class Add_func_arg +{ +public: + TABLE *m_table; + Add_func_arg(TABLE *table) + :m_table(table) + { } +}; + + +static my_bool add_plugin_func(THD *thd, plugin_ref plugin, void *arg) +{ + Add_func_arg *add_func_arg= (Add_func_arg*) arg; + char buf[NAME_LEN + 1]; + const LEX_CSTRING name= plugin_name(plugin)[0]; + size_t length= my_charset_utf8mb3_bin.caseup(name.str, name.length, + buf, sizeof(buf)-1); + buf[length]= '\0'; + if (add_symbol_to_table(buf, add_func_arg->m_table)) + return 1; + return 0; +} + int fill_i_s_sql_functions(THD *thd, TABLE_LIST *tables, COND *cond) { DBUG_ENTER("fill_i_s_sql_functions"); @@ -8467,6 +8492,11 @@ int fill_i_s_sql_functions(THD *thd, TABLE_LIST *tables, COND *cond) table)) DBUG_RETURN(1); + Add_func_arg add_func_arg(table); + if (plugin_foreach(thd, add_plugin_func, + MariaDB_FUNCTION_PLUGIN, &add_func_arg)) + DBUG_RETURN(1); + DBUG_RETURN(0); } @@ -9542,7 +9572,7 @@ static double fix_cost(double cost) } static int run_fill_optimizer_costs_tables(const LEX_CSTRING *name, - const OPTIMIZER_COSTS *costs, + OPTIMIZER_COSTS *costs, TABLE *table) { THD *thd= table->in_use; @@ -10456,8 +10486,9 @@ ST_SCHEMA_TABLE schema_tables[]= static_assert(array_elements(schema_tables) == SCH_ENUM_SIZE + 1, "Update enum_schema_tables as well."); -int initialize_schema_table(st_plugin_int *plugin) +int initialize_schema_table(void *plugin_) { + st_plugin_int *plugin= static_cast(plugin_); ST_SCHEMA_TABLE *schema_table; int err; DBUG_ENTER("initialize_schema_table"); @@ -10502,8 +10533,9 @@ int initialize_schema_table(st_plugin_int *plugin) DBUG_RETURN(0); } -int finalize_schema_table(st_plugin_int *plugin) +int finalize_schema_table(void *plugin_) { + st_plugin_int *plugin= static_cast(plugin_); int deinit_status= 0; ST_SCHEMA_TABLE *schema_table= (ST_SCHEMA_TABLE *)plugin->data; DBUG_ENTER("finalize_schema_table"); diff --git a/sql/sql_sort.h b/sql/sql_sort.h index 2b2c08b59cd..6905d472d5e 100644 --- a/sql/sql_sort.h +++ b/sql/sql_sort.h @@ -18,7 +18,7 @@ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ #include "my_base.h" /* ha_rows */ -#include /* qsort2_cmp */ +#include #include "queues.h" #include "sql_string.h" #include "sql_class.h" @@ -578,7 +578,7 @@ public: bool not_killable; String tmp_buffer; // The fields below are used only by Unique class. - qsort2_cmp compare; + qsort_cmp2 compare; BUFFPEK_COMPARE_CONTEXT cmp_context; Sort_param() @@ -688,7 +688,7 @@ public: void try_to_pack_sortkeys(); - qsort2_cmp get_compare_function() const + qsort_cmp2 get_compare_function() const { return using_packed_sortkeys() ? get_packed_keys_compare_ptr() : diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index 51f1595b83b..286d6a262d2 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -1789,8 +1789,8 @@ public: table_field= field; tree_key_length= field->pack_length(); - tree= new Unique((qsort_cmp2) simple_str_key_cmp, (void*) field, - tree_key_length, max_heap_table_size, 1); + tree= new Unique(simple_str_key_cmp, field, tree_key_length, + max_heap_table_size, 1); } virtual ~Count_distinct_field() @@ -1877,13 +1877,13 @@ public: static -int simple_ulonglong_key_cmp(void* arg, uchar* key1, uchar* key2) +int simple_ulonglong_key_cmp(void*, const void* key1, const void* key2) { - ulonglong *val1= (ulonglong *) key1; - ulonglong *val2= (ulonglong *) key2; + const ulonglong *val1= static_cast(key1); + const ulonglong *val2= static_cast(key2); return *val1 > *val2 ? 1 : *val1 == *val2 ? 0 : -1; } - + /* The class Count_distinct_field_bit is derived from the class @@ -1900,8 +1900,7 @@ public: table_field= field; tree_key_length= sizeof(ulonglong); - tree= new Unique((qsort_cmp2) simple_ulonglong_key_cmp, - (void*) &tree_key_length, + tree= new Unique(simple_ulonglong_key_cmp, &tree_key_length, tree_key_length, max_heap_table_size, 1); } diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 748ae999087..0368bfa7194 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -2112,8 +2112,10 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db, PRIMARY keys are prioritized. */ -static int sort_keys(KEY *a, KEY *b) +static int sort_keys(const void *a_, const void *b_) { + const KEY *a= static_cast(a_); + const KEY *b= static_cast(b_); ulong a_flags= a->flags, b_flags= b->flags; /* @@ -2459,6 +2461,34 @@ bool Column_definition::prepare_stage1_typelib(THD *thd, bool Column_definition::prepare_stage1_string(THD *thd, MEM_ROOT *mem_root) { + if (real_field_type() == FIELD_TYPE_STRING && + length*charset->mbmaxlen > 1024) + { + DBUG_ASSERT(charset->mbmaxlen > 4); + /* + Convert long CHAR columns to VARCHAR. + CHAR has an octet length limit of 1024 bytes. + The code in Binlog_type_info_fixed_string::Binlog_type_info_fixed_string + relies on this limit. If octet length of a CHAR column is greater + than 1024, then it cannot write its metadata to binlog properly. + In case of the filename character set with mbmaxlen=5, + the maximum possible character length is 1024/5=204 characters. + Upgrade to VARCHAR if octet length is greater than 1024. + */ + char warn_buff[MYSQL_ERRMSG_SIZE]; + if (thd->is_strict_mode()) + { + my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), field_name.str, + static_cast(1024 / charset->mbmaxlen)); + return true; + } + set_handler(&type_handler_varchar); + my_snprintf(warn_buff, sizeof(warn_buff), ER_THD(thd, ER_AUTO_CONVERT), + field_name.str, "CHAR", "VARCHAR"); + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_AUTO_CONVERT, + warn_buff); + } + create_length_to_internal_length_string(); if (prepare_blob_field(thd)) return true; @@ -3958,33 +3988,6 @@ bool Column_definition::prepare_blob_field(THD *thd) { DBUG_ENTER("Column_definition::prepare_blob_field"); - if (real_field_type() == FIELD_TYPE_STRING && length > 1024) - { - DBUG_ASSERT(charset->mbmaxlen > 4); - /* - Convert long CHAR columns to VARCHAR. - CHAR has an octet length limit of 1024 bytes. - The code in Binlog_type_info_fixed_string::Binlog_type_info_fixed_string - relies on this limit. If octet length of a CHAR column is greater - than 1024, then it cannot write its metadata to binlog properly. - In case of the filename character set with mbmaxlen=5, - the maximum possible character length is 1024/5=204 characters. - Upgrade to VARCHAR if octet length is greater than 1024. - */ - char warn_buff[MYSQL_ERRMSG_SIZE]; - if (thd->is_strict_mode()) - { - my_error(ER_TOO_BIG_FIELDLENGTH, MYF(0), field_name.str, - static_cast(1024 / charset->mbmaxlen)); - DBUG_RETURN(1); - } - set_handler(&type_handler_varchar); - my_snprintf(warn_buff, sizeof(warn_buff), ER_THD(thd, ER_AUTO_CONVERT), - field_name.str, "CHAR", "VARCHAR"); - push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, ER_AUTO_CONVERT, - warn_buff); - } - if (length > MAX_FIELD_VARCHARLENGTH && !(flags & BLOB_FLAG)) { /* Convert long VARCHAR columns to TEXT or BLOB */ @@ -6404,9 +6407,9 @@ remove_key: } if (!part_elem) { - push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, - ER_PARTITION_DOES_NOT_EXIST, - ER_THD(thd, ER_PARTITION_DOES_NOT_EXIST)); + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_PARTITION_DOES_NOT_EXIST, + ER_THD(thd, ER_PARTITION_DOES_NOT_EXIST)); names_it.remove(); } } @@ -6513,8 +6516,10 @@ static bool fix_constraints_names(THD *thd, List } -static int compare_uint(const uint *s, const uint *t) +static int compare_uint(const void *s_, const void *t_) { + const uint *s= static_cast(s_); + const uint *t= static_cast(t_); return (*s < *t) ? -1 : ((*s > *t) ? 1 : 0); } diff --git a/sql/sql_trigger.cc b/sql/sql_trigger.cc index 20c2afcb25c..e34e57ad7ef 100644 --- a/sql/sql_trigger.cc +++ b/sql/sql_trigger.cc @@ -2015,10 +2015,8 @@ static bool add_table_for_trigger_internal(THD *thd, { if (if_exists) { - push_warning_printf(thd, - Sql_condition::WARN_LEVEL_NOTE, - ER_TRG_DOES_NOT_EXIST, - ER_THD(thd, ER_TRG_DOES_NOT_EXIST)); + push_warning(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_TRG_DOES_NOT_EXIST, ER_THD(thd, ER_TRG_DOES_NOT_EXIST)); *table= NULL; diff --git a/sql/sql_type.cc b/sql/sql_type.cc index 9ae993f1708..12c971b3bd7 100644 --- a/sql/sql_type.cc +++ b/sql/sql_type.cc @@ -5959,9 +5959,9 @@ cmp_item *Type_handler_timestamp_common::make_cmp_item(THD *thd, /***************************************************************************/ -static int srtcmp_in(const void *cs_, const void *x_, const void *y_) +static int srtcmp_in(void *cs_, const void *x_, const void *y_) { - const CHARSET_INFO *cs= static_cast(cs_); + CHARSET_INFO *cs= static_cast(cs_); const String *x= static_cast(x_); const String *y= static_cast(y_); return cs->strnncollsp(x->ptr(), x->length(), y->ptr(), y->length()); @@ -5971,12 +5971,10 @@ in_vector *Type_handler_string_result::make_in_vector(THD *thd, const Item_func_in *func, uint nargs) const { - return new (thd->mem_root) in_string(thd, nargs, (qsort2_cmp) srtcmp_in, - func->compare_collation()); - + return new (thd->mem_root) + in_string(thd, nargs, srtcmp_in, func->compare_collation()); } - in_vector *Type_handler_int_result::make_in_vector(THD *thd, const Item_func_in *func, uint nargs) const @@ -6267,7 +6265,7 @@ longlong Type_handler_timestamp_common:: longlong Type_handler_numeric:: Item_func_min_max_val_int(Item_func_min_max *func) const { - return func->val_int_native(); + return is_unsigned() ? func->val_uint_native() : func->val_int_native(); } @@ -9819,9 +9817,11 @@ Charset::eq_collation_specific_names(CHARSET_INFO *cs) const return name0.length && !cmp(&name0, &name1); } -int initialize_data_type_plugin(st_plugin_int *plugin) +int initialize_data_type_plugin(void *plugin_) { - st_mariadb_data_type *data= (st_mariadb_data_type*) plugin->plugin->info; + st_plugin_int *plugin= static_cast(plugin_); + st_mariadb_data_type *data= + static_cast(plugin->plugin->info); data->type_handler->set_name(Name(plugin->name)); if (plugin->plugin->init && plugin->plugin->init(NULL)) { diff --git a/sql/sql_type.h b/sql/sql_type.h index 47d16677cfa..24c766ffd81 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -723,7 +723,7 @@ public: { return m_usec ? my_snprintf(to, nbytes, "%s%llu.%06lu", - m_neg ? "-" : "", m_sec, (uint) m_usec) : + m_neg ? "-" : "", m_sec, m_usec) : my_snprintf(to, nbytes, "%s%llu", m_neg ? "-" : "", m_sec); } void make_truncated_warning(THD *thd, const char *type_str) const; diff --git a/sql/sql_type_fixedbin.h b/sql/sql_type_fixedbin.h index 46c92f03c55..0879ef040ad 100644 --- a/sql/sql_type_fixedbin.h +++ b/sql/sql_type_fixedbin.h @@ -772,9 +772,9 @@ public: Fbt_null tmp(arg); return m_null_value || tmp.is_null() ? UNKNOWN : m_native.cmp(tmp) != 0; } - int compare(cmp_item *ci) override + int compare(const cmp_item *ci) const override { - cmp_item_fbt *tmp= static_cast(ci); + const cmp_item_fbt *tmp= static_cast(ci); DBUG_ASSERT(!m_null_value); DBUG_ASSERT(!tmp->m_null_value); return m_native.cmp(tmp->m_native); @@ -788,13 +788,13 @@ public: class in_fbt :public in_vector { Fbt m_value; - static int cmp_fbt(void *cmp_arg, Fbt *a, Fbt *b) + static int cmp_fbt(void *cmp_arg, const void *a, const void *b) { - return a->cmp(*b); + return static_cast(a)->cmp(*static_cast(b)); } public: in_fbt(THD *thd, uint elements) - :in_vector(thd, elements, sizeof(Fbt), (qsort2_cmp) cmp_fbt, 0), + :in_vector(thd, elements, sizeof(Fbt), cmp_fbt, 0), m_value(Fbt::zero()) { } const Type_handler *type_handler() const override diff --git a/sql/sql_udf.cc b/sql/sql_udf.cc index f84a91e25ce..bb130ee7b10 100644 --- a/sql/sql_udf.cc +++ b/sql/sql_udf.cc @@ -104,12 +104,11 @@ static const char *init_syms(udf_func *tmp, char *nm) } -extern "C" uchar* get_hash_key(const uchar *buff, size_t *length, - my_bool not_used __attribute__((unused))) +extern "C" const uchar *get_hash_key(const void *buff, size_t *length, my_bool) { - udf_func *udf=(udf_func*) buff; - *length=(uint) udf->name.length; - return (uchar*) udf->name.str; + auto udf= static_cast(buff); + *length= udf->name.length; + return reinterpret_cast(udf->name.str); } static PSI_memory_key key_memory_udf_mem; diff --git a/sql/sql_udf.h b/sql/sql_udf.h index 5bd2c6e5445..83e2df48eda 100644 --- a/sql/sql_udf.h +++ b/sql/sql_udf.h @@ -30,7 +30,7 @@ typedef void (*Udf_func_clear)(UDF_INIT *, uchar *, uchar *); typedef void (*Udf_func_add)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *); typedef void (*Udf_func_deinit)(UDF_INIT*); typedef my_bool (*Udf_func_init)(UDF_INIT *, UDF_ARGS *, char *); -typedef void (*Udf_func_any)(); +typedef void *Udf_func_any; typedef double (*Udf_func_double)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *); typedef longlong (*Udf_func_longlong)(UDF_INIT *, UDF_ARGS *, uchar *, uchar *); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 85717713325..dc97f505ae1 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -2307,17 +2307,17 @@ master_def: if (unlikely(Lex->mi.heartbeat_period > slave_net_timeout)) { - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX, - ER_THD(thd, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX)); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX, + ER_THD(thd, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX)); } if (unlikely(Lex->mi.heartbeat_period < 0.001)) { if (unlikely(Lex->mi.heartbeat_period != 0.0)) { - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN, - ER_THD(thd, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN)); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN, + ER_THD(thd, ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN)); Lex->mi.heartbeat_period= 0.0; } Lex->mi.heartbeat_opt= LEX_MASTER_INFO::LEX_MI_DISABLE; @@ -6734,6 +6734,11 @@ collation_name: collation_name_or_default: collation_name { $$=$1; } | DEFAULT { $$.set_collate_default(); } + | BINARY // MySQL compatibility + { + const Lex_exact_collation bin(&my_charset_bin); + $$= Lex_extended_collation(bin); + } ; opt_default: @@ -6800,6 +6805,22 @@ binary: { $$.set_collate_default(); } + | charset_or_alias COLLATE_SYM BINARY // MySQL compatibility + { + const Lex_exact_collation bin(&my_charset_bin); + Lex_extended_collation tmp(bin); + if (tmp.merge_exact_charset(thd, + thd->variables.character_set_collations, + Lex_exact_charset($1))) + MYSQL_YYABORT; + $$= Lex_exact_charset_extended_collation_attrs(tmp); + } + | COLLATE_SYM BINARY // MySQL compatibility + { + const Lex_exact_collation bin(&my_charset_bin); + const Lex_extended_collation tmp(bin); + $$= Lex_exact_charset_extended_collation_attrs(tmp); + } ; opt_bin_mod: diff --git a/sql/sys_vars.inl b/sql/sys_vars.inl index ee18b8d7428..9b894ce51cc 100644 --- a/sql/sys_vars.inl +++ b/sql/sys_vars.inl @@ -336,7 +336,11 @@ public: if (var->value->result_type() == STRING_RESULT) { - if (!(res=var->value->val_str(&str))) + /* + Convert from the expression character set to ascii. + This is OK, as typelib values cannot have non-ascii characters. + */ + if (!(res= var->value->val_str_ascii(&str))) return true; else if (!(var->save_result.ulonglong_value= diff --git a/sql/table.cc b/sql/table.cc index 9e5c42cf7b6..7ad11749427 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -250,11 +250,11 @@ View_creation_ctx * View_creation_ctx::create(THD *thd, /* Get column name from column hash */ -static uchar *get_field_name(Field **buff, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *get_field_name(const void *buff_, size_t *length, my_bool) { - *length= (uint) (*buff)->field_name.length; - return (uchar*) (*buff)->field_name.str; + auto buff= static_cast(buff_); + *length= (*buff)->field_name.length; + return reinterpret_cast((*buff)->field_name.str); } @@ -2355,7 +2355,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, if (use_hash) use_hash= !my_hash_init(PSI_INSTRUMENT_ME, &share->name_hash, system_charset_info, share->fields, 0, 0, - (my_hash_get_key) get_field_name, 0, 0); + get_field_name, 0, 0); if (share->mysql_version >= 50700 && share->mysql_version < 100000 && vcol_screen_length) @@ -4057,50 +4057,54 @@ static void print_long_unique_table(TABLE *table) " fields->offset,field->null_bit, field->null_pos and key_info ... \n" "\nPrinting Table keyinfo\n"); str.append(buff, strlen(buff)); - my_snprintf(buff, sizeof(buff), "\ntable->s->reclength %d\n" - "table->s->fields %d\n", + my_snprintf(buff, sizeof(buff), "\ntable->s->reclength %lu\n" + "table->s->fields %u\n", table->s->reclength, table->s->fields); str.append(buff, strlen(buff)); for (uint i= 0; i < table->s->keys; i++) { key_info_table= table->key_info + i; key_info_share= table->s->key_info + i; - my_snprintf(buff, sizeof(buff), "\ntable->key_info[%d] user_defined_key_parts = %d\n" - "table->key_info[%d] algorithm == HA_KEY_ALG_LONG_HASH = %d\n" - "table->key_info[%d] flags & HA_NOSAME = %d\n", - i, key_info_table->user_defined_key_parts, - i, key_info_table->algorithm == HA_KEY_ALG_LONG_HASH, - i, key_info_table->flags & HA_NOSAME); + my_snprintf(buff, sizeof(buff), + "\ntable->key_info[%u] user_defined_key_parts = %u\n" + "table->key_info[%u] algorithm == HA_KEY_ALG_LONG_HASH = %d\n" + "table->key_info[%u] flags & HA_NOSAME = %lu\n", + i, key_info_table->user_defined_key_parts, + i, key_info_table->algorithm == HA_KEY_ALG_LONG_HASH, + i, key_info_table->flags & HA_NOSAME); str.append(buff, strlen(buff)); - my_snprintf(buff, sizeof(buff), "\ntable->s->key_info[%d] user_defined_key_parts = %d\n" - "table->s->key_info[%d] algorithm == HA_KEY_ALG_LONG_HASH = %d\n" - "table->s->key_info[%d] flags & HA_NOSAME = %d\n", - i, key_info_share->user_defined_key_parts, - i, key_info_share->algorithm == HA_KEY_ALG_LONG_HASH, - i, key_info_share->flags & HA_NOSAME); + my_snprintf(buff, sizeof(buff), + "\ntable->s->key_info[%u] user_defined_key_parts = %u\n" + "table->s->key_info[%u] algorithm == HA_KEY_ALG_LONG_HASH = %d\n" + "table->s->key_info[%u] flags & HA_NOSAME = %lu\n", + i, key_info_share->user_defined_key_parts, + i, key_info_share->algorithm == HA_KEY_ALG_LONG_HASH, + i, key_info_share->flags & HA_NOSAME); str.append(buff, strlen(buff)); key_part = key_info_table->key_part; - my_snprintf(buff, sizeof(buff), "\nPrinting table->key_info[%d].key_part[0] info\n" - "key_part->offset = %d\n" - "key_part->field_name = %s\n" - "key_part->length = %d\n" - "key_part->null_bit = %d\n" - "key_part->null_offset = %d\n", - i, key_part->offset, key_part->field->field_name.str, key_part->length, - key_part->null_bit, key_part->null_offset); + my_snprintf(buff, sizeof(buff), + "\nPrinting table->key_info[%u].key_part[0] info\n" + "key_part->offset = %u\n" + "key_part->field_name = %s\n" + "key_part->length = %u\n" + "key_part->null_bit = %u\n" + "key_part->null_offset = %u\n", + i, key_part->offset, key_part->field->field_name.str, key_part->length, + key_part->null_bit, key_part->null_offset); str.append(buff, strlen(buff)); for (uint j= 0; j < key_info_share->user_defined_key_parts; j++) { key_part= key_info_share->key_part + j; - my_snprintf(buff, sizeof(buff), "\nPrinting share->key_info[%d].key_part[%d] info\n" - "key_part->offset = %d\n" - "key_part->field_name = %s\n" - "key_part->length = %d\n" - "key_part->null_bit = %d\n" - "key_part->null_offset = %d\n", - i,j,key_part->offset, key_part->field->field_name.str, key_part->length, - key_part->null_bit, key_part->null_offset); + my_snprintf(buff, sizeof(buff), + "\nPrinting share->key_info[%u].key_part[%u] info\n" + "key_part->offset = %u\n" + "key_part->field_name = %s\n" + "key_part->length = %u\n" + "key_part->null_bit = %u\n" + "key_part->null_offset = %u\n", + i, j, key_part->offset, key_part->field->field_name.str, + key_part->length, key_part->null_bit, key_part->null_offset); str.append(buff, strlen(buff)); } } @@ -4109,16 +4113,17 @@ static void print_long_unique_table(TABLE *table) for(uint i= 0; i < table->s->fields; i++) { field= table->field[i]; - my_snprintf(buff, sizeof(buff), "\ntable->field[%d]->field_name %s\n" - "table->field[%d]->offset = %d\n" - "table->field[%d]->field_length = %d\n" - "table->field[%d]->null_pos wrt to record 0 = %d\n" - "table->field[%d]->null_bit_pos = %d\n", - i, field->field_name.str, - i, field->ptr- table->record[0], - i, field->pack_length(), - i, field->null_bit ? field->null_ptr - table->record[0] : -1, - i, field->null_bit); + my_snprintf(buff, sizeof(buff), + "\ntable->field[%u]->field_name %s\n" + "table->field[%u]->offset = %" PRIdPTR "\n" // `%td` not available + "table->field[%u]->field_length = %d\n" + "table->field[%u]->null_pos wrt to record 0 = %" PRIdPTR "\n" + "table->field[%u]->null_bit_pos = %d\n", + i, field->field_name.str, + i, field->ptr- table->record[0], + i, field->pack_length(), + i, field->null_bit ? field->null_ptr - table->record[0] : -1, + i, field->null_bit); str.append(buff, strlen(buff)); } (*error_handler_hook)(1, str.ptr(), ME_NOTE); diff --git a/sql/table_cache.cc b/sql/table_cache.cc index 1700185b8f3..b804a3e0627 100644 --- a/sql/table_cache.cc +++ b/sql/table_cache.cc @@ -588,12 +588,11 @@ static void tdc_hash_initializer(LF_HASH *, } -static uchar *tdc_hash_key(const unsigned char *_element, size_t *length, - my_bool) +static const uchar *tdc_hash_key(const void *element_, size_t *length, my_bool) { - const TDC_element *element= (const TDC_element *) _element; + auto element= static_cast(element_); *length= element->m_key_length; - return (uchar*) element->m_key; + return reinterpret_cast(element->m_key); } @@ -616,14 +615,13 @@ bool tdc_init(void) tdc_inited= true; mysql_mutex_init(key_LOCK_unused_shares, &LOCK_unused_shares, MY_MUTEX_INIT_FAST); - lf_hash_init(&tdc_hash, sizeof(TDC_element) + - sizeof(Share_free_tables) * (tc_instances - 1), - LF_HASH_UNIQUE, 0, 0, - (my_hash_get_key) tdc_hash_key, - &my_charset_bin); + lf_hash_init(&tdc_hash, + sizeof(TDC_element) + + sizeof(Share_free_tables) * (tc_instances - 1), + LF_HASH_UNIQUE, 0, 0, tdc_hash_key, &my_charset_bin); tdc_hash.alloc.constructor= lf_alloc_constructor; tdc_hash.alloc.destructor= lf_alloc_destructor; - tdc_hash.initializer= (lf_hash_initializer) tdc_hash_initializer; + tdc_hash.initializer= tdc_hash_initializer; DBUG_RETURN(false); } @@ -1143,12 +1141,12 @@ struct eliminate_duplicates_arg }; -static uchar *eliminate_duplicates_get_key(const uchar *element, size_t *length, - my_bool) +static const uchar *eliminate_duplicates_get_key(const void *element, + size_t *length, my_bool) { - LEX_STRING *key= (LEX_STRING *) element; + auto key= static_cast(element); *length= key->length; - return (uchar *) key->str; + return reinterpret_cast(key->str); } diff --git a/sql/threadpool_common.cc b/sql/threadpool_common.cc index 407d73b0533..1dd30958883 100644 --- a/sql/threadpool_common.cc +++ b/sql/threadpool_common.cc @@ -154,16 +154,16 @@ static inline void set_thd_idle(THD *thd) */ struct OS_thread_info { + void *stack_start; + void *stack_end; pthread_t self; - ssize_t stack_size; uint32_t thread_id; + inline bool initialized() { return stack_start != 0; } - inline bool initialized() { return stack_size != 0; } - - void init(ssize_t ssize) + void init() { #if _WIN32 - self= thread_id= GetCurrentThreadId(); + self= thread_id= GetCurrentThreadId(); #else #ifdef __NR_gettid thread_id= (uint32) syscall(__NR_gettid); @@ -172,7 +172,10 @@ struct OS_thread_info #endif self= pthread_self(); #endif - stack_size= ssize; + char stack_var; + my_get_stack_bounds(&stack_start, &stack_end, &stack_var, my_thread_stack_size); + DBUG_ASSERT(stack_start); + DBUG_ASSERT(stack_end); } }; static thread_local OS_thread_info os_thread_info; @@ -181,7 +184,7 @@ static const OS_thread_info *get_os_thread_info() { auto *res= &os_thread_info; if (!res->initialized()) - res->init((ssize_t) (my_thread_stack_size * STACK_DIRECTION)); + res->init(); return res; } @@ -199,8 +202,8 @@ static void thread_attach(THD* thd) const OS_thread_info *tinfo= get_os_thread_info(); set_current_thd(thd); - my_get_stack_bounds(&thd->thread_stack, &thd->mysys_var->stack_ends_here, - (void*) &tinfo, my_thread_stack_size); + thd->thread_stack= tinfo->stack_start; + thd->mysys_var->stack_ends_here= tinfo->stack_end; thd->real_id= tinfo->self; thd->os_thread_id= tinfo->thread_id; PSI_CALL_set_thread(thd->get_psi()); diff --git a/sql/tztime.cc b/sql/tztime.cc index 7e437110727..e674376f8c9 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -1524,21 +1524,20 @@ public: they should obey C calling conventions. */ -extern "C" uchar * -my_tz_names_get_key(Tz_names_entry *entry, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *my_tz_names_get_key(const void *entry_, size_t *length, + my_bool) { + auto entry= static_cast(entry_); *length= entry->name.length(); - return (uchar*) entry->name.ptr(); + return reinterpret_cast(entry->name.ptr()); } -extern "C" uchar * -my_offset_tzs_get_key(Time_zone_offset *entry, - size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *my_offset_tzs_get_key(const void *entry_, + size_t *length, my_bool) { + auto entry= static_cast(entry_); *length= sizeof(long); - return (uchar*) &entry->offset; + return reinterpret_cast(&entry->offset); } @@ -1654,13 +1653,13 @@ my_tz_init(THD *org_thd, const char *default_tzname, my_bool bootstrap) /* Init all memory structures that require explicit destruction */ if (my_hash_init(key_memory_tz_storage, &tz_names, &my_charset_latin1, 20, 0, - 0, (my_hash_get_key) my_tz_names_get_key, 0, 0)) + 0, my_tz_names_get_key, 0, 0)) { sql_print_error("Fatal error: OOM while initializing time zones"); goto end; } if (my_hash_init(key_memory_tz_storage, &offset_tzs, &my_charset_latin1, 26, - 0, 0, (my_hash_get_key)my_offset_tzs_get_key, 0, 0)) + 0, 0, my_offset_tzs_get_key, 0, 0)) { sql_print_error("Fatal error: OOM while initializing time zones"); my_hash_free(&tz_names); diff --git a/sql/uniques.cc b/sql/uniques.cc index 74bf6d3caea..9f828dd63e9 100644 --- a/sql/uniques.cc +++ b/sql/uniques.cc @@ -40,8 +40,10 @@ #include "uniques.h" // Unique #include "sql_sort.h" -int unique_write_to_file(uchar* key, element_count count, Unique *unique) +int unique_write_to_file(void* key_, element_count, void *unique_) { + uchar *key= static_cast(key_); + Unique *unique= static_cast(unique_); /* Use unique->size (size of element stored in the tree) and not unique->tree.size_of_element. The latter is different from unique->size @@ -51,21 +53,30 @@ int unique_write_to_file(uchar* key, element_count count, Unique *unique) return my_b_write(&unique->file, key, unique->size) ? 1 : 0; } -int unique_write_to_file_with_count(uchar* key, element_count count, Unique *unique) +int unique_write_to_file_with_count(void* key_, element_count count, void *unique_) { + uchar *key= static_cast(key_); + Unique *unique= static_cast(unique_); return my_b_write(&unique->file, key, unique->size) || - my_b_write(&unique->file, (uchar*)&count, sizeof(element_count)) ? 1 : 0; + my_b_write(&unique->file, reinterpret_cast(&count), + sizeof(element_count)) + ? 1 + : 0; } -int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique) +int unique_write_to_ptrs(void* key_, element_count, void *unique_) { + uchar *key= static_cast(key_); + Unique *unique= static_cast(unique_); memcpy(unique->sort.record_pointers, key, unique->size); unique->sort.record_pointers+=unique->size; return 0; } -int unique_intersect_write_to_ptrs(uchar* key, element_count count, Unique *unique) +int unique_intersect_write_to_ptrs(void* key_, element_count count, void *unique_) { + uchar *key= static_cast(key_); + Unique *unique= static_cast(unique_); if (count >= unique->min_dupl_count) { memcpy(unique->sort.record_pointers, key, unique->size); @@ -388,8 +399,8 @@ bool Unique::flush() file_ptr.set_file_position(my_b_tell(&file)); tree_walk_action action= min_dupl_count ? - (tree_walk_action) unique_write_to_file_with_count : - (tree_walk_action) unique_write_to_file; + unique_write_to_file_with_count : + unique_write_to_file; if (tree_walk(&tree, action, (void*) this, left_root_right) || insert_dynamic(&file_ptrs, (uchar*) &file_ptr)) @@ -434,11 +445,13 @@ Unique::reset() C_MODE_START -static int buffpek_compare(void *arg, uchar *key_ptr1, uchar *key_ptr2) +static int buffpek_compare(void *arg, const void *key_ptr1, + const void *key_ptr2) { - BUFFPEK_COMPARE_CONTEXT *ctx= (BUFFPEK_COMPARE_CONTEXT *) arg; + auto ctx= static_cast(arg); return ctx->key_compare(ctx->key_compare_arg, - *((uchar **) key_ptr1), *((uchar **)key_ptr2)); + *(static_cast(key_ptr1)), + *(static_cast(key_ptr2))); } C_MODE_END @@ -739,7 +752,7 @@ bool Unique::merge(TABLE *table, uchar *buff, size_t buff_size, sort_param.unique_buff= buff +(sort_param.max_keys_per_buffer * sort_param.sort_length); - sort_param.compare= (qsort2_cmp) buffpek_compare; + sort_param.compare= buffpek_compare; sort_param.cmp_context.key_compare= tree.compare; sort_param.cmp_context.key_compare_arg= tree.custom_arg; @@ -803,8 +816,8 @@ bool Unique::get(TABLE *table) { uchar *save_record_pointers= sort.record_pointers; tree_walk_action action= min_dupl_count ? - (tree_walk_action) unique_intersect_write_to_ptrs : - (tree_walk_action) unique_write_to_ptrs; + unique_intersect_write_to_ptrs : + unique_write_to_ptrs; filtered_out_elems= 0; (void) tree_walk(&tree, action, this, left_root_right); diff --git a/sql/uniques.h b/sql/uniques.h index ecc49794efe..60c8fc48b9f 100644 --- a/sql/uniques.h +++ b/sql/uniques.h @@ -98,13 +98,13 @@ public: uint get_size() const { return size; } size_t get_max_in_memory_size() const { return max_in_memory_size; } - friend int unique_write_to_file(uchar* key, element_count count, Unique *unique); - friend int unique_write_to_ptrs(uchar* key, element_count count, Unique *unique); + friend int unique_write_to_file(void* key, element_count count, void *unique); + friend int unique_write_to_ptrs(void* key, element_count count, void *unique); - friend int unique_write_to_file_with_count(uchar* key, element_count count, - Unique *unique); - friend int unique_intersect_write_to_ptrs(uchar* key, element_count count, - Unique *unique); + friend int unique_write_to_file_with_count(void *key, element_count count, + void *unique); + friend int unique_intersect_write_to_ptrs(void *key, element_count count, + void *unique); }; #endif /* UNIQUE_INCLUDED */ diff --git a/sql/wsrep_applier.cc b/sql/wsrep_applier.cc index 2dc6f47d06b..04a20311771 100644 --- a/sql/wsrep_applier.cc +++ b/sql/wsrep_applier.cc @@ -1,4 +1,4 @@ -/* Copyright (C) 2013-2019 Codership Oy +/* Copyright (C) 2013-2024 Codership Oy This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -211,7 +211,17 @@ int wsrep_apply_events(THD* thd, } ev->thd= thd; + thd->set_time(); + + if (!ev->when) + { + my_hrtime_t hrtime= my_hrtime(); + ev->when= hrtime_to_my_time(hrtime); + ev->when_sec_part= hrtime_sec_part(hrtime); + } + exec_res= ev->apply_event(thd->wsrep_rgi); + DBUG_PRINT("info", ("exec_event result: %d", exec_res)); if (exec_res) diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc index 49fbde865b4..bc0aa5ac99c 100644 --- a/sql/wsrep_high_priority_service.cc +++ b/sql/wsrep_high_priority_service.cc @@ -216,7 +216,6 @@ int Wsrep_high_priority_service::start_transaction( const wsrep::ws_handle& ws_handle, const wsrep::ws_meta& ws_meta) { DBUG_ENTER(" Wsrep_high_priority_service::start_transaction"); - m_thd->set_time(); DBUG_RETURN(m_thd->wsrep_cs().start_transaction(ws_handle, ws_meta) || trans_begin(m_thd)); } diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index e55e265cc71..7d591712a2c 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -841,9 +841,8 @@ void wsrep_init_globals() else { if (wsrep_gtid_mode && wsrep_gtid_server.server_id != global_system_variables.server_id) - { - WSREP_WARN("Ignoring server id for non bootstrap node."); - } + WSREP_INFO("Ignoring server id %ld for non bootstrap node, using %ld.", + global_system_variables.server_id, wsrep_gtid_server.server_id); } wsrep_init_schema(); if (WSREP_ON) @@ -2550,7 +2549,7 @@ bool wsrep_can_run_in_toi(THD *thd, const char *db, const char *table, If mariadb master has replicated a CTAS, we should not replicate the create table part separately as TOI, but to replicate both create table and following inserts as one write set. - Howver, if CTAS creates empty table, we should replicate the create table alone + However, if CTAS creates empty table, we should replicate the create table alone as TOI. We have to do relay log event lookup to see if row events follow the create table event. */ @@ -2563,6 +2562,7 @@ bool wsrep_can_run_in_toi(THD *thd, const char *db, const char *table, switch (ev_type) { case QUERY_EVENT: + case XID_EVENT: /* CTAS with empty table, we replicate create table as TOI */ break; @@ -3146,7 +3146,6 @@ void wsrep_to_isolation_end(THD *thd) } else if (wsrep_thd_is_in_rsu(thd)) { - thd->variables.lock_wait_timeout= thd->variables.saved_lock_wait_timeout; DBUG_ASSERT(wsrep_OSU_method_get(thd) == WSREP_OSU_RSU); wsrep_RSU_end(thd); } diff --git a/sql/wsrep_schema.cc b/sql/wsrep_schema.cc index 9d25ab64274..13780ede0e5 100644 --- a/sql/wsrep_schema.cc +++ b/sql/wsrep_schema.cc @@ -193,6 +193,25 @@ private: my_bool m_wsrep_ignore_table; }; +class wsrep_skip_locking +{ +public: + wsrep_skip_locking(THD *thd) + : m_thd(thd) + , m_wsrep_skip_locking(thd->wsrep_skip_locking) + { + thd->wsrep_skip_locking= true; + } + ~wsrep_skip_locking() + { + m_thd->wsrep_skip_locking= m_wsrep_skip_locking; + } + +private: + THD *m_thd; + my_bool m_wsrep_skip_locking; +}; + class thd_server_status { public: @@ -1258,6 +1277,7 @@ int Wsrep_schema::remove_fragments(THD* thd, Wsrep_schema_impl::wsrep_ignore_table wsrep_ignore_table(thd); Wsrep_schema_impl::binlog_off binlog_off(thd); Wsrep_schema_impl::sql_safe_updates sql_safe_updates(thd); + Wsrep_schema_impl::wsrep_skip_locking skip_locking(thd); Query_tables_list query_tables_list_backup; Open_tables_backup open_tables_backup; diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index c55d713ed18..83e017a2fc3 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -966,7 +966,7 @@ bool wsrep_max_ws_size_update(sys_var *self, THD *thd, enum_var_type) { char max_ws_size_opt[128]; my_snprintf(max_ws_size_opt, sizeof(max_ws_size_opt), - "repl.max_ws_size=%d", wsrep_max_ws_size); + "repl.max_ws_size=%lu", wsrep_max_ws_size); enum wsrep::provider::status ret= Wsrep_server_state::instance().provider().options(max_ws_size_opt); if (ret) { diff --git a/sql/xa.cc b/sql/xa.cc index 810df69aa00..730e312b746 100644 --- a/sql/xa.cc +++ b/sql/xa.cc @@ -148,7 +148,7 @@ public: DBUG_ASSERT(!reinterpret_cast(ptr + LF_HASH_OVERHEAD) ->is_set(ACQUIRED)); } - static uchar *key(const unsigned char *el, size_t *length, my_bool) + static const uchar *key(const void *el, size_t *length, my_bool) { const XID &xid= reinterpret_cast(el)->xid; *length= xid.key_length(); @@ -514,6 +514,40 @@ bool trans_xa_end(THD *thd) } +/* + Get the BACKUP_COMMIT lock for the duration of the XA. + + The metadata lock which will ensure that COMMIT is blocked + by active FLUSH TABLES WITH READ LOCK (and vice versa COMMIT in + progress blocks FTWRL) and also by MDL_BACKUP_WAIT_COMMIT. + We allow FLUSHer to COMMIT; we assume FLUSHer knows what it does. + + Note that the function sets thd->backup_lock on sucess. The caller needs + to reset thd->backup_commit_lock before returning! +*/ + +static bool trans_xa_get_backup_lock(THD *thd, MDL_request *mdl_request) +{ + DBUG_ASSERT(thd->backup_commit_lock == 0); + MDL_REQUEST_INIT(mdl_request, MDL_key::BACKUP, "", "", MDL_BACKUP_COMMIT, + MDL_EXPLICIT); + if (thd->mdl_context.acquire_lock(mdl_request, + thd->variables.lock_wait_timeout)) + return 1; + thd->backup_commit_lock= mdl_request; + return 0; +} + +static inline void trans_xa_release_backup_lock(THD *thd) +{ + if (thd->backup_commit_lock) + { + thd->mdl_context.release_lock(thd->backup_commit_lock->ticket); + thd->backup_commit_lock= 0; + } +} + + /** Put a XA transaction in the PREPARED state. @@ -536,21 +570,12 @@ bool trans_xa_prepare(THD *thd) my_error(ER_XAER_NOTA, MYF(0)); else { - /* - Acquire metadata lock which will ensure that COMMIT is blocked - by active FLUSH TABLES WITH READ LOCK (and vice versa COMMIT in - progress blocks FTWRL). - - We allow FLUSHer to COMMIT; we assume FLUSHer knows what it does. - */ MDL_request mdl_request; - MDL_REQUEST_INIT(&mdl_request, MDL_key::BACKUP, "", "", MDL_BACKUP_COMMIT, - MDL_STATEMENT); - if (thd->mdl_context.acquire_lock(&mdl_request, - thd->variables.lock_wait_timeout) || + if (trans_xa_get_backup_lock(thd, &mdl_request) || ha_prepare(thd)) { if (!mdl_request.ticket) + /* Failed to get the backup lock */ ha_rollback_trans(thd, TRUE); thd->variables.option_bits&= ~(OPTION_BEGIN | OPTION_BINLOG_THIS_TRX); thd->transaction->all.reset(); @@ -579,6 +604,7 @@ bool trans_xa_prepare(THD *thd) res= thd->variables.pseudo_slave_mode || thd->slave_thread ? slave_applier_reset_xa_trans(thd) : 0; } + trans_xa_release_backup_lock(thd); } DBUG_RETURN(res); @@ -642,19 +668,8 @@ bool trans_xa_commit(THD *thd) res= 1; goto _end_external_xid; } - res= xa_trans_rolled_back(xs); - /* - Acquire metadata lock which will ensure that COMMIT is blocked - by active FLUSH TABLES WITH READ LOCK (and vice versa COMMIT in - progress blocks FTWRL). - - We allow FLUSHer to COMMIT; we assume FLUSHer knows what it does. - */ - MDL_REQUEST_INIT(&mdl_request, MDL_key::BACKUP, "", "", MDL_BACKUP_COMMIT, - MDL_EXPLICIT); - if (thd->mdl_context.acquire_lock(&mdl_request, - thd->variables.lock_wait_timeout)) + if (trans_xa_get_backup_lock(thd, &mdl_request)) { /* We can't rollback an XA transaction on lock failure due to @@ -666,10 +681,6 @@ bool trans_xa_commit(THD *thd) res= true; goto _end_external_xid; } - else - { - thd->backup_commit_lock= &mdl_request; - } DBUG_ASSERT(!xid_state.xid_cache_element); xid_state.xid_cache_element= xs; @@ -689,11 +700,7 @@ bool trans_xa_commit(THD *thd) res= res || thd->is_error(); if (!xid_deleted) xs->acquired_to_recovered(); - if (mdl_request.ticket) - { - thd->mdl_context.release_lock(mdl_request.ticket); - thd->backup_commit_lock= 0; - } + trans_xa_release_backup_lock(thd); } else my_error(ER_XAER_NOTA, MYF(0)); @@ -716,7 +723,8 @@ bool trans_xa_commit(THD *thd) if ((res= MY_TEST(r))) my_error(r == 1 ? ER_XA_RBROLLBACK : ER_XAER_RMERR, MYF(0)); } - else if (thd->transaction->xid_state.xid_cache_element->xa_state == XA_PREPARED) + else if (thd->transaction->xid_state.xid_cache_element->xa_state == + XA_PREPARED) { MDL_request mdl_request; if (thd->lex->xa_opt != XA_NONE) @@ -725,18 +733,7 @@ bool trans_xa_commit(THD *thd) DBUG_RETURN(TRUE); } - /* - Acquire metadata lock which will ensure that COMMIT is blocked - by active FLUSH TABLES WITH READ LOCK (and vice versa COMMIT in - progress blocks FTWRL). - - We allow FLUSHer to COMMIT; we assume FLUSHer knows what it does. - */ - MDL_REQUEST_INIT(&mdl_request, MDL_key::BACKUP, "", "", MDL_BACKUP_COMMIT, - MDL_TRANSACTION); - - if (thd->mdl_context.acquire_lock(&mdl_request, - thd->variables.lock_wait_timeout)) + if (trans_xa_get_backup_lock(thd, &mdl_request)) { /* We can't rollback an XA transaction on lock failure due to @@ -763,6 +760,7 @@ bool trans_xa_commit(THD *thd) } thd->m_transaction_psi= NULL; + trans_xa_release_backup_lock(thd); } } else @@ -800,7 +798,8 @@ bool trans_xa_commit(THD *thd) bool trans_xa_rollback(THD *thd) { XID_STATE &xid_state= thd->transaction->xid_state; - + MDL_request mdl_request; + bool error; DBUG_ENTER("trans_xa_rollback"); if (!xid_state.is_explicit_XA() || @@ -821,7 +820,6 @@ bool trans_xa_rollback(THD *thd) { bool res; bool xid_deleted= false; - MDL_request mdl_request; bool rw_trans= (xs->rm_error != ER_XA_RBROLLBACK); if (rw_trans && thd->is_read_only_ctx()) @@ -831,10 +829,7 @@ bool trans_xa_rollback(THD *thd) goto _end_external_xid; } - MDL_REQUEST_INIT(&mdl_request, MDL_key::BACKUP, "", "", MDL_BACKUP_COMMIT, - MDL_EXPLICIT); - if (thd->mdl_context.acquire_lock(&mdl_request, - thd->variables.lock_wait_timeout)) + if (trans_xa_get_backup_lock(thd, &mdl_request)) { /* We can't rollback an XA transaction on lock failure due to @@ -845,10 +840,6 @@ bool trans_xa_rollback(THD *thd) goto _end_external_xid; } - else - { - thd->backup_commit_lock= &mdl_request; - } res= xa_trans_rolled_back(xs); DBUG_ASSERT(!xid_state.xid_cache_element); @@ -865,11 +856,7 @@ bool trans_xa_rollback(THD *thd) xid_state.xid_cache_element= 0; if (!xid_deleted) xs->acquired_to_recovered(); - if (mdl_request.ticket) - { - thd->mdl_context.release_lock(mdl_request.ticket); - thd->backup_commit_lock= 0; - } + trans_xa_release_backup_lock(thd); } else my_error(ER_XAER_NOTA, MYF(0)); @@ -886,11 +873,7 @@ bool trans_xa_rollback(THD *thd) DBUG_RETURN(TRUE); } - MDL_request mdl_request; - MDL_REQUEST_INIT(&mdl_request, MDL_key::BACKUP, "", "", MDL_BACKUP_COMMIT, - MDL_STATEMENT); - if (thd->mdl_context.acquire_lock(&mdl_request, - thd->variables.lock_wait_timeout)) + if (trans_xa_get_backup_lock(thd, &mdl_request)) { /* We can't rollback an XA transaction on lock failure due to @@ -901,7 +884,9 @@ bool trans_xa_rollback(THD *thd) DBUG_RETURN(true); } - DBUG_RETURN(xa_trans_force_rollback(thd)); + error= xa_trans_force_rollback(thd); + trans_xa_release_backup_lock(thd); + DBUG_RETURN(error); } diff --git a/storage/blackhole/ha_blackhole.cc b/storage/blackhole/ha_blackhole.cc index 343f3c70286..12339617dfa 100644 --- a/storage/blackhole/ha_blackhole.cc +++ b/storage/blackhole/ha_blackhole.cc @@ -365,17 +365,18 @@ static void free_share(st_blackhole_share *share) mysql_mutex_unlock(&blackhole_mutex); } -static void blackhole_free_key(st_blackhole_share *share) +static void blackhole_free_key(void *share) { - thr_lock_delete(&share->lock); + thr_lock_delete(&static_cast(share)->lock); my_free(share); } -static uchar* blackhole_get_key(st_blackhole_share *share, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *blackhole_get_key(const void *share_, size_t *length, + my_bool) { + auto share= static_cast(share_); *length= share->table_name_length; - return (uchar*) share->table_name; + return reinterpret_cast(share->table_name); } #ifdef HAVE_PSI_INTERFACE @@ -416,9 +417,8 @@ static int blackhole_init(void *p) mysql_mutex_init(bh_key_mutex_blackhole, &blackhole_mutex, MY_MUTEX_INIT_FAST); (void) my_hash_init(PSI_INSTRUMENT_ME, &blackhole_open_tables, - system_charset_info, 32, 0, 0, - (my_hash_get_key) blackhole_get_key, - (my_hash_free_key) blackhole_free_key, 0); + system_charset_info, 32, 0, 0, blackhole_get_key, + blackhole_free_key, 0); return 0; } diff --git a/storage/connect/domdoc.cpp b/storage/connect/domdoc.cpp index 268ad771ef9..b881a10628e 100644 --- a/storage/connect/domdoc.cpp +++ b/storage/connect/domdoc.cpp @@ -165,7 +165,8 @@ bool DOMDOC::NewDoc(PGLOBAL g, PCSZ ver) sprintf(buf, "version=\"%s\" encoding=\"%s\"", ver, Encoding); pip = Docp->createProcessingInstruction("xml", buf); - return(TestHr(g, Docp->appendChild(pip))); + Docp->appendChild(pip); + return false; } // end of NewDoc /******************************************************************/ @@ -173,7 +174,7 @@ bool DOMDOC::NewDoc(PGLOBAL g, PCSZ ver) /******************************************************************/ void DOMDOC::AddComment(PGLOBAL g, char *com) { - TestHr(g, Docp->appendChild(Docp->createComment(com))); + Docp->appendChild(Docp->createComment(com)); } // end of AddComment /******************************************************************/ @@ -196,9 +197,9 @@ PXNODE DOMDOC::NewRoot(PGLOBAL g, char *name) { MSXML2::IXMLDOMElementPtr ep = Docp->createElement(name); - if (ep == NULL || TestHr(g, Docp->appendChild(ep))) + if (ep == NULL) return NULL; - + Docp->appendChild(ep); return new(g) DOMNODE(this, ep); } // end of NewRoot @@ -552,9 +553,9 @@ PXNODE DOMNODE::AddChildNode(PGLOBAL g, PCSZ name, PXNODE np) _bstr_t pfx = ep->prefix; _bstr_t uri = ep->namespaceURI; - if (ep == NULL || TestHr(g, Nodep->appendChild(ep))) + if (ep == NULL) return NULL; - + Nodep->appendChild(ep); if (np) ((PDOMNODE)np)->Nodep = ep; else @@ -593,7 +594,7 @@ void DOMNODE::AddText(PGLOBAL g, PCSZ txtp) MSXML2::IXMLDOMTextPtr tp= Docp->createTextNode((_bstr_t)txtp); if (tp != NULL) - TestHr(g, Nodep->appendChild(tp)); + Nodep->appendChild(tp); } // end of AddText @@ -602,7 +603,7 @@ void DOMNODE::AddText(PGLOBAL g, PCSZ txtp) /******************************************************************/ void DOMNODE::DeleteChild(PGLOBAL g, PXNODE dnp) { - TestHr(g, Nodep->removeChild(((PDOMNODE)dnp)->Nodep)); + Nodep->removeChild(((PDOMNODE)dnp)->Nodep); // ((PDOMNODE)dnp)->Nodep->Release(); bad idea, causes a crash Delete(dnp); } // end of DeleteChild diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index 4111fad3844..b749aaad0ba 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -96,8 +96,10 @@ static handler *tina_create_handler(handlerton *hton, /* Used for sorting chains with qsort(). */ -int sort_set (tina_set *a, tina_set *b) +int sort_set (const void *a_, const void *b_) { + const tina_set *a= static_cast(a_); + const tina_set *b= static_cast(b_); /* We assume that intervals do not intersect. So, it is enought to compare any two points. Here we take start of intervals for comparison. @@ -105,11 +107,11 @@ int sort_set (tina_set *a, tina_set *b) return ( a->begin > b->begin ? 1 : ( a->begin < b->begin ? -1 : 0 ) ); } -static uchar* tina_get_key(TINA_SHARE *share, size_t *length, - my_bool not_used __attribute__((unused))) +static const uchar *tina_get_key(const void *share_, size_t *length, my_bool) { + const TINA_SHARE *share= static_cast(share_); *length=share->table_name_length; - return (uchar*) share->table_name; + return reinterpret_cast(share->table_name); } static PSI_memory_key csv_key_memory_tina_share; @@ -181,12 +183,11 @@ static int tina_init_func(void *p) init_tina_psi_keys(); #endif - tina_hton= (handlerton *)p; + tina_hton= static_cast(p); mysql_mutex_init(csv_key_mutex_tina, &tina_mutex, MY_MUTEX_INIT_FAST); (void) my_hash_init(csv_key_memory_tina_share, &tina_open_tables, - Lex_ident_table::charset_info(), - 32, 0, 0, (my_hash_get_key) - tina_get_key, 0, 0); + Lex_ident_table::charset_info(), 32, 0, 0, tina_get_key, + 0, 0); tina_hton->db_type= DB_TYPE_CSV_DB; tina_hton->create= tina_create_handler; tina_hton->flags= (HTON_CAN_RECREATE | HTON_SUPPORT_LOG_TABLES | diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index ad5aa864f5b..a51796c78d7 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -430,11 +430,12 @@ static handler *federated_create_handler(handlerton *hton, /* Function we use in the creation of our hash to get key */ -static uchar *federated_get_key(FEDERATED_SHARE *share, size_t *length, - my_bool not_used __attribute__ ((unused))) +static const uchar *federated_get_key(const void *share_, size_t *length, + my_bool) { + auto share= static_cast(share_); *length= share->share_key_length; - return (uchar*) share->share_key; + return reinterpret_cast(share->share_key); } #ifdef HAVE_PSI_INTERFACE @@ -513,7 +514,7 @@ int federated_db_init(void *p) &federated_mutex, MY_MUTEX_INIT_FAST)) goto error; if (!my_hash_init(PSI_INSTRUMENT_ME, &federated_open_tables, &my_charset_bin, - 32, 0, 0, (my_hash_get_key) federated_get_key, 0, 0)) + 32, 0, 0, federated_get_key, 0, 0)) { DBUG_RETURN(FALSE); } diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index c5c38e31801..b2b9faf0aca 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -363,21 +363,20 @@ static handler *federatedx_create_handler(handlerton *hton, /* Function we use in the creation of our hash to get key */ -static uchar * -federatedx_share_get_key(FEDERATEDX_SHARE *share, size_t *length, - my_bool not_used __attribute__ ((unused))) +static const uchar *federatedx_share_get_key(const void *share_, + size_t *length, my_bool) { + auto share= static_cast(share_); *length= share->share_key_length; - return (uchar*) share->share_key; + return reinterpret_cast(share->share_key); } - -static uchar * -federatedx_server_get_key(FEDERATEDX_SERVER *server, size_t *length, - my_bool not_used __attribute__ ((unused))) +static const uchar *federatedx_server_get_key(const void *server_, + size_t *length, my_bool) { + auto server= static_cast(server_); *length= server->key_length; - return server->key; + return reinterpret_cast(server->key); } #ifdef HAVE_PSI_INTERFACE @@ -467,10 +466,12 @@ int federatedx_db_init(void *p) if (mysql_mutex_init(fe_key_mutex_federatedx, &federatedx_mutex, MY_MUTEX_INIT_FAST)) goto error; - if (!my_hash_init(PSI_INSTRUMENT_ME, &federatedx_open_tables, &my_charset_bin, 32, 0, 0, - (my_hash_get_key) federatedx_share_get_key, 0, 0) && - !my_hash_init(PSI_INSTRUMENT_ME, &federatedx_open_servers, &my_charset_bin, 32, 0, 0, - (my_hash_get_key) federatedx_server_get_key, 0, 0)) + if (!my_hash_init(PSI_INSTRUMENT_ME, &federatedx_open_tables, + &my_charset_bin, 32, 0, 0, federatedx_share_get_key, 0, + 0) && + !my_hash_init(PSI_INSTRUMENT_ME, &federatedx_open_servers, + &my_charset_bin, 32, 0, 0, federatedx_server_get_key, 0, + 0)) { DBUG_RETURN(FALSE); } diff --git a/storage/heap/hp_create.c b/storage/heap/hp_create.c index 935c6f8d0fd..c07a1e968c4 100644 --- a/storage/heap/hp_create.c +++ b/storage/heap/hp_create.c @@ -16,7 +16,7 @@ #include "heapdef.h" -static int keys_compare(heap_rb_param *param, uchar *key1, uchar *key2); +static int keys_compare(void *heap_rb, const void *key1, const void *key2); static void init_block(HP_BLOCK *block,uint reclength,ulong min_records, ulong max_records); @@ -190,7 +190,7 @@ int heap_create(const char *name, HP_CREATE_INFO *create_info, keyseg++; init_tree(&keyinfo->rb_tree, 0, 0, sizeof(uchar*), - (qsort_cmp2)keys_compare, NULL, NULL, + keys_compare, NULL, NULL, MYF((create_info->internal_table ? MY_THREAD_SPECIFIC : 0) | MY_TREE_WITH_DELETE)); keyinfo->delete_key= hp_rb_delete_key; @@ -255,11 +255,15 @@ err: } /* heap_create */ -static int keys_compare(heap_rb_param *param, uchar *key1, uchar *key2) +static int keys_compare(void *heap_rb_, const void *key1_, + const void *key2_) { + heap_rb_param *heap_rb= heap_rb_; + const uchar *key1= key1_; + const uchar *key2= key2_; uint not_used[2]; - return ha_key_cmp(param->keyseg, key1, key2, param->key_length, - param->search_flag, not_used); + return ha_key_cmp(heap_rb->keyseg, key1, key2, heap_rb->key_length, + heap_rb->search_flag, not_used); } static void init_block(HP_BLOCK *block, uint reclength, ulong min_records, diff --git a/storage/innobase/btr/btr0btr.cc b/storage/innobase/btr/btr0btr.cc index eb2cb2c9df2..8e3e8c3efb9 100644 --- a/storage/innobase/btr/btr0btr.cc +++ b/storage/innobase/btr/btr0btr.cc @@ -191,7 +191,7 @@ bool btr_root_fseg_validate(ulint offset, const buf_block_t &block, sql_print_error("InnoDB: Index root page " UINT32PF " in %s is corrupted " "at " ULINTPF, block.page.id().page_no(), - UT_LIST_GET_FIRST(space.chain)->name); + UT_LIST_GET_FIRST(space.chain)->name, offset); return false; } @@ -817,26 +817,31 @@ btr_page_get_father_block( mtr_t* mtr, /*!< in: mtr */ btr_cur_t* cursor) /*!< out: cursor on node pointer record, its page x-latched */ + noexcept { - rec_t *rec= - page_rec_get_next(page_get_infimum_rec(cursor->block()->page.frame)); + const page_t *page= btr_cur_get_page(cursor); + const rec_t *rec= page_is_comp(page) + ? page_rec_next_get(page, page + PAGE_NEW_INFIMUM) + : page_rec_next_get(page, page + PAGE_OLD_INFIMUM); if (UNIV_UNLIKELY(!rec)) return nullptr; - cursor->page_cur.rec= rec; + cursor->page_cur.rec= const_cast(rec); return btr_page_get_parent(offsets, heap, cursor, mtr); } /** Seek to the parent page of a B-tree page. -@param[in,out] mtr mini-transaction -@param[in,out] cursor cursor pointing to the x-latched parent page +@param mtr mini-transaction +@param cursor cursor pointing to the x-latched parent page @return whether the cursor was successfully positioned */ -bool btr_page_get_father(mtr_t* mtr, btr_cur_t* cursor) +bool btr_page_get_father(mtr_t *mtr, btr_cur_t *cursor) noexcept { - rec_t *rec= - page_rec_get_next(page_get_infimum_rec(cursor->block()->page.frame)); + page_t *page= btr_cur_get_page(cursor); + const rec_t *rec= page_is_comp(page) + ? page_rec_next_get(page, page + PAGE_NEW_INFIMUM) + : page_rec_next_get(page, page + PAGE_OLD_INFIMUM); if (UNIV_UNLIKELY(!rec)) return false; - cursor->page_cur.rec= rec; + cursor->page_cur.rec= const_cast(rec); mem_heap_t *heap= mem_heap_create(100); const bool got= btr_page_get_parent(nullptr, heap, cursor, mtr); mem_heap_free(heap); @@ -866,8 +871,7 @@ static void btr_free_root(buf_block_t *block, const fil_space_t &space, { /* Free the entire segment in small steps. */ ut_d(mtr->freeing_tree()); - while (!fseg_free_step(PAGE_HEADER + PAGE_BTR_SEG_TOP + - block->page.frame, mtr)); + while (!fseg_free_step(block, PAGE_HEADER + PAGE_BTR_SEG_TOP, mtr)); } } @@ -1028,8 +1032,8 @@ leaf_loop: /* NOTE: page hash indexes are dropped when a page is freed inside fsp0fsp. */ - bool finished = fseg_free_step(PAGE_HEADER + PAGE_BTR_SEG_LEAF - + block->page.frame, &mtr + bool finished = fseg_free_step(block, PAGE_HEADER + PAGE_BTR_SEG_LEAF, + &mtr #ifdef BTR_CUR_HASH_ADAPT , ahi #endif /* BTR_CUR_HASH_ADAPT */ @@ -1047,8 +1051,9 @@ top_loop: finished = !btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP, *block, *space) - || fseg_free_step_not_header(PAGE_HEADER + PAGE_BTR_SEG_TOP - + block->page.frame, &mtr + || fseg_free_step_not_header(block, + PAGE_HEADER + PAGE_BTR_SEG_TOP, + &mtr #ifdef BTR_CUR_HASH_ADAPT ,ahi #endif /* BTR_CUR_HASH_ADAPT */ @@ -1773,7 +1778,6 @@ btr_root_raise_and_insert( dberr_t* err) /*!< out: error code */ { dict_index_t* index; - rec_t* rec; dtuple_t* node_ptr; ulint level; rec_t* node_ptr_rec; @@ -1924,7 +1928,13 @@ btr_root_raise_and_insert( } const uint32_t new_page_no = new_block->page.id().page_no(); - rec = page_rec_get_next(page_get_infimum_rec(new_block->page.frame)); + const rec_t* rec= page_is_comp(new_block->page.frame) + ? page_rec_next_get(new_block->page.frame, + new_block->page.frame + + PAGE_NEW_INFIMUM) + : page_rec_next_get(new_block->page.frame, + new_block->page.frame + + PAGE_OLD_INFIMUM); ut_ad(rec); /* We just created the page. */ /* Build the node pointer (= node key and page address) for the @@ -1984,90 +1994,109 @@ btr_root_raise_and_insert( /** Decide if the page should be split at the convergence point of inserts converging to the left. -@param[in] cursor insert position +@param cursor insert position @return the first record to be moved to the right half page -@retval NULL if no split is recommended */ -rec_t* btr_page_get_split_rec_to_left(const btr_cur_t* cursor) +@retval nullptr if no split is recommended */ +rec_t *btr_page_get_split_rec_to_left(const btr_cur_t *cursor) noexcept { - rec_t* split_rec = btr_cur_get_rec(cursor); - const page_t* page = page_align(split_rec); + const rec_t *split_rec= btr_cur_get_rec(cursor); + const page_t *page= btr_cur_get_page(cursor); + const rec_t *const last= page + page_header_get_offs(page, PAGE_LAST_INSERT); - if (page_header_get_ptr(page, PAGE_LAST_INSERT) - != page_rec_get_next(split_rec)) { - return NULL; - } + if (page_is_comp(page)) + { + if (last != page_rec_next_get(page, split_rec)) + return nullptr; + /* The metadata record must be present in the leftmost leaf page + of the clustered index, if and only if index->is_instant(). + However, during innobase_instant_try(), index->is_instant() would + already hold when row_ins_clust_index_entry_low() is being invoked + to insert the the metadata record. So, we can only assert that + when the metadata record exists, index->is_instant() must hold. */ + const rec_t *const infimum= page + PAGE_NEW_INFIMUM; + ut_ad(!page_is_leaf(page) || page_has_prev(page) || + cursor->index()->is_instant() || + !(rec_get_info_bits(page_rec_next_get(page, infimum), true) & + REC_INFO_MIN_REC_FLAG)); + /* If the convergence is in the middle of a page, include also the + record immediately before the new insert to the upper page. + Otherwise, we could repeatedly move from page to page lots of + records smaller than the convergence point. */ + if (split_rec == infimum || + split_rec == page_rec_next_get(page, infimum)) + split_rec= page_rec_next_get(page, split_rec); + } + else + { + if (last != page_rec_next_get(page, split_rec)) + return nullptr; + const rec_t *const infimum= page + PAGE_OLD_INFIMUM; + ut_ad(!page_is_leaf(page) || page_has_prev(page) || + cursor->index()->is_instant() || + !(rec_get_info_bits(page_rec_next_get(page, infimum), false) & + REC_INFO_MIN_REC_FLAG)); + if (split_rec == infimum || + split_rec == page_rec_next_get(page, infimum)) + split_rec= page_rec_next_get(page, split_rec); + } - /* The metadata record must be present in the leftmost leaf page - of the clustered index, if and only if index->is_instant(). - However, during innobase_instant_try(), index->is_instant() - would already hold when row_ins_clust_index_entry_low() - is being invoked to insert the the metadata record. - So, we can only assert that when the metadata record exists, - index->is_instant() must hold. */ - ut_ad(!page_is_leaf(page) || page_has_prev(page) - || cursor->index()->is_instant() - || !(rec_get_info_bits(page_rec_get_next_const( - page_get_infimum_rec(page)), - cursor->index()->table->not_redundant()) - & REC_INFO_MIN_REC_FLAG)); - - const rec_t* infimum = page_get_infimum_rec(page); - - /* If the convergence is in the middle of a page, include also - the record immediately before the new insert to the upper - page. Otherwise, we could repeatedly move from page to page - lots of records smaller than the convergence point. */ - - if (split_rec == infimum - || split_rec == page_rec_get_next_const(infimum)) { - split_rec = page_rec_get_next(split_rec); - } - - return split_rec; + return const_cast(split_rec); } /** Decide if the page should be split at the convergence point of inserts converging to the right. -@param[in] cursor insert position -@param[out] split_rec if split recommended, the first record - on the right half page, or - NULL if the to-be-inserted record - should be first +@param cursor insert position +@param split_rec if split recommended, the first record on the right +half page, or nullptr if the to-be-inserted record should be first @return whether split is recommended */ bool -btr_page_get_split_rec_to_right(const btr_cur_t* cursor, rec_t** split_rec) +btr_page_get_split_rec_to_right(const btr_cur_t *cursor, rec_t **split_rec) + noexcept { - rec_t* insert_point = btr_cur_get_rec(cursor); - const page_t* page = page_align(insert_point); + const rec_t *insert_point= btr_cur_get_rec(cursor); + const page_t *page= btr_cur_get_page(cursor); - /* We use eager heuristics: if the new insert would be right after - the previous insert on the same page, we assume that there is a - pattern of sequential inserts here. */ + /* We use eager heuristics: if the new insert would be right after + the previous insert on the same page, we assume that there is a + pattern of sequential inserts here. */ + if (page + page_header_get_offs(page, PAGE_LAST_INSERT) != insert_point) + return false; - if (page_header_get_ptr(page, PAGE_LAST_INSERT) != insert_point) { - return false; - } + if (page_is_comp(page)) + { + const rec_t *const supremum= page + PAGE_NEW_SUPREMUM; + insert_point= page_rec_next_get(page, insert_point); + if (!insert_point); + else if (insert_point == supremum) + insert_point= nullptr; + else + { + insert_point= page_rec_next_get(page, insert_point); + if (insert_point == supremum) + insert_point= nullptr; + /* If there are >= 2 user records up from the insert point, + split all but 1 off. We want to keep one because then sequential + inserts can do the necessary checks of the right search position + just by looking at the records on this page. */ + } + } + else + { + const rec_t *const supremum= page + PAGE_OLD_SUPREMUM; + insert_point= page_rec_next_get(page, insert_point); + if (!insert_point); + else if (insert_point == supremum) + insert_point= nullptr; + else + { + insert_point= page_rec_next_get(page, insert_point); + if (insert_point == supremum) + insert_point= nullptr; + } + } - insert_point = page_rec_get_next(insert_point); - - if (!insert_point || page_rec_is_supremum(insert_point)) { - insert_point = NULL; - } else { - insert_point = page_rec_get_next(insert_point); - if (page_rec_is_supremum(insert_point)) { - insert_point = NULL; - } - - /* If there are >= 2 user records up from the insert - point, split all but 1 off. We want to keep one because - then sequential inserts can use the adaptive hash - index, as they can do the necessary checks of the right - search position just by looking at the records on this - page. */ - } - - *split_rec = insert_point; - return true; + *split_rec= const_cast(insert_point); + return true; } /*************************************************************//** @@ -4406,31 +4435,31 @@ btr_index_rec_validate_report( << " of table " << index->table->name << ", page " << page_id_t(page_get_space_id(page), page_get_page_no(page)) - << ", at offset " << page_offset(rec); + << ", at offset " << rec - page; } /************************************************************//** Checks the size and number of fields in a record based on the definition of the index. @return TRUE if ok */ -ibool +bool btr_index_rec_validate( /*===================*/ - const rec_t* rec, /*!< in: index record */ + const page_cur_t& cur, /*!< in: cursor to index record */ const dict_index_t* index, /*!< in: index */ - ibool dump_on_error) /*!< in: TRUE if the function + bool dump_on_error) /*!< in: true if the function should print hex dump of record and page on error */ + noexcept { ulint len; - const page_t* page; + const rec_t* rec = page_cur_get_rec(&cur); + const page_t* page = cur.block->page.frame; mem_heap_t* heap = NULL; rec_offs offsets_[REC_OFFS_NORMAL_SIZE]; rec_offs* offsets = offsets_; rec_offs_init(offsets_); - page = page_align(rec); - ut_ad(index->n_core_fields); #ifdef VIRTUAL_INDEX_DEBUG @@ -4603,7 +4632,7 @@ btr_index_page_validate( return true; } - if (!btr_index_rec_validate(cur.rec, index, TRUE)) { + if (!btr_index_rec_validate(cur, index, TRUE)) { break; } diff --git a/storage/innobase/btr/btr0bulk.cc b/storage/innobase/btr/btr0bulk.cc index 094d7570dc0..6b385997f53 100644 --- a/storage/innobase/btr/btr0bulk.cc +++ b/storage/innobase/btr/btr0bulk.cc @@ -157,7 +157,8 @@ inline void PageBulk::insertPage(rec_t *rec, rec_offs *offsets) const ulint rec_size= rec_offs_size(offsets); const ulint extra_size= rec_offs_extra_size(offsets); ut_ad(page_align(m_heap_top + rec_size) == m_page); - ut_d(const bool is_leaf= page_rec_is_leaf(m_cur_rec)); + ut_ad(page_align(m_cur_rec) == m_page); + ut_d(const bool is_leaf= page_is_leaf(m_page)); #ifdef UNIV_DEBUG /* Check whether records are in order. */ @@ -179,8 +180,8 @@ inline void PageBulk::insertPage(rec_t *rec, rec_offs *offsets) /* Insert the record in the linked list. */ if (fmt != REDUNDANT) { - const rec_t *next_rec= m_page + - page_offset(m_cur_rec + mach_read_from_2(m_cur_rec - REC_NEXT)); + const rec_t *next_rec= + m_cur_rec + int16_t(mach_read_from_2(m_cur_rec - REC_NEXT)); if (fmt != COMPRESSED) m_mtr.write<2>(*m_block, m_cur_rec - REC_NEXT, static_cast(insert_rec - m_cur_rec)); @@ -203,7 +204,8 @@ inline void PageBulk::insertPage(rec_t *rec, rec_offs *offsets) else { memcpy(const_cast(rec) - REC_NEXT, m_cur_rec - REC_NEXT, 2); - m_mtr.write<2>(*m_block, m_cur_rec - REC_NEXT, page_offset(insert_rec)); + m_mtr.write<2>(*m_block, m_cur_rec - REC_NEXT, + uintptr_t(insert_rec - m_page)); rec_set_bit_field_1(const_cast(rec), 0, REC_OLD_N_OWNED, REC_N_OWNED_MASK, REC_N_OWNED_SHIFT); rec_set_bit_field_2(const_cast(rec), @@ -213,7 +215,7 @@ inline void PageBulk::insertPage(rec_t *rec, rec_offs *offsets) if (fmt == COMPRESSED) /* We already wrote the record. Log is written in PageBulk::compress(). */; - else if (page_offset(m_cur_rec) == + else if (m_cur_rec - m_page == (fmt == REDUNDANT ? PAGE_OLD_INFIMUM : PAGE_NEW_INFIMUM)) m_mtr.memcpy(*m_block, m_heap_top, rec - extra_size, rec_size); else @@ -245,7 +247,7 @@ inline void PageBulk::insertPage(rec_t *rec, rec_offs *offsets) if (len > 2) { memcpy(b, c, len); - m_mtr.memmove(*m_block, page_offset(b), page_offset(c), len); + m_mtr.memmove(*m_block, b - m_page, c - m_page, len); c= cm; b= bm; r= rm; @@ -284,7 +286,7 @@ no_data: { m_mtr.memcpy(*m_block, b, r, m_cur_rec - c); memcpy(bd, cd, len); - m_mtr.memmove(*m_block, page_offset(bd), page_offset(cd), len); + m_mtr.memmove(*m_block, bd - m_page, cd - m_page, len); c= cdm; b= rdm - rd + bd; r= rdm; @@ -429,7 +431,7 @@ inline void PageBulk::finishPage() if (count == (PAGE_DIR_SLOT_MAX_N_OWNED + 1) / 2) { slot-= PAGE_DIR_SLOT_SIZE; - mach_write_to_2(slot, page_offset(insert_rec)); + mach_write_to_2(slot, insert_rec - m_page); page_rec_set_n_owned(m_block, insert_rec, count, false, &m_mtr); count= 0; } @@ -468,7 +470,7 @@ inline void PageBulk::finishPage() m_mtr.memcpy(*m_block, PAGE_HEADER + m_page, page_header, sizeof page_header); m_mtr.write<2>(*m_block, PAGE_HEADER + PAGE_N_RECS + m_page, m_rec_no); - m_mtr.memcpy(*m_block, page_offset(slot), slot0 - slot); + m_mtr.memcpy(*m_block, slot - m_page, slot0 - slot); } else { @@ -697,7 +699,7 @@ PageBulk::copyOut( ULINT_UNDEFINED, &m_heap); mach_write_to_2(rec - REC_NEXT, m_is_comp ? static_cast - (PAGE_NEW_SUPREMUM - page_offset(rec)) + (PAGE_NEW_SUPREMUM - (rec - m_page)) : PAGE_OLD_SUPREMUM); /* Set related members */ diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 3cd58755af1..c8eb7fd163d 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -231,7 +231,7 @@ unreadable: } ut_ad(page_cur_is_before_first(&cur.page_cur)); - ut_ad(page_is_leaf(cur.page_cur.block->page.frame)); + ut_ad(page_is_leaf(btr_cur_get_page(&cur))); const rec_t* rec = page_cur_move_to_next(&cur.page_cur); const ulint comp = dict_table_is_comp(index->table); @@ -593,14 +593,17 @@ btr_intention_t btr_cur_get_and_clear_intention(btr_latch_mode *latch_mode) /** @return whether the distance between two records is at most the specified value */ +template static bool -page_rec_distance_is_at_most(const rec_t *left, const rec_t *right, ulint val) +page_rec_distance_is_at_most(const page_t *page, const rec_t *left, + const rec_t *right, ulint val) + noexcept { do { if (left == right) return true; - left= page_rec_get_next_const(left); + left= page_rec_next_get(page, left); } while (left && val--); return false; @@ -671,25 +674,52 @@ btr_cur_will_modify_tree( } /* check delete will cause. (BTR_INTENTION_BOTH or BTR_INTENTION_DELETE) */ - if (n_recs <= max_nodes_deleted * 2 - || page_rec_is_first(rec, page)) { + if (n_recs <= max_nodes_deleted * 2) { /* The cursor record can be the left most record in this page. */ return true; } - if (page_has_prev(page) - && page_rec_distance_is_at_most( - page_get_infimum_rec(page), rec, - max_nodes_deleted)) { - return true; - } - - if (page_has_next(page) - && page_rec_distance_is_at_most( - rec, page_get_supremum_rec(page), - max_nodes_deleted)) { - return true; + if (page_is_comp(page)) { + const rec_t *const infimum + = page + PAGE_NEW_INFIMUM; + if (page_rec_next_get(page, infimum) + == rec) { + return true; + } + if (page_has_prev(page) + && page_rec_distance_is_at_most( + page, infimum, rec, + max_nodes_deleted)) { + return true; + } + if (page_has_next(page) + && page_rec_distance_is_at_most( + page, rec, + page + PAGE_NEW_SUPREMUM, + max_nodes_deleted)) { + return true; + } + } else { + const rec_t *const infimum + = page + PAGE_OLD_INFIMUM; + if (page_rec_next_get(page, infimum) + == rec) { + return true; + } + if (page_has_prev(page) + && page_rec_distance_is_at_most( + page, infimum, rec, + max_nodes_deleted)) { + return true; + } + if (page_has_next(page) + && page_rec_distance_is_at_most( + page, rec, + page + PAGE_OLD_SUPREMUM, + max_nodes_deleted)) { + return true; + } } /* Delete at leftmost record in a page causes delete @@ -2188,7 +2218,7 @@ btr_cur_ins_lock_and_undo( } } - if (!index->is_primary() || !page_is_leaf(page_align(rec))) { + if (!index->is_primary() || !page_is_leaf(btr_cur_get_page(cursor))) { return DB_SUCCESS; } @@ -2846,7 +2876,8 @@ static dberr_t btr_cur_upd_rec_sys(buf_block_t *block, rec_t *rec, To save space, we must have d>6, that is, the complete DB_TRX_ID and the first byte(s) of DB_ROLL_PTR must match the previous record. */ memcpy(dest, src, d); - mtr->memmove(*block, page_offset(dest), page_offset(src), d); + mtr->memmove(*block, dest - block->page.frame, src - block->page.frame, + d); dest+= d; len-= d; /* DB_TRX_ID,DB_ROLL_PTR must be unique in each record when @@ -3009,8 +3040,8 @@ void btr_cur_upd_rec_in_place(rec_t *rec, const dict_index_t *index, default: mtr->memset( block, - page_offset(rec_get_field_start_offs( - rec, n) + rec), + rec_get_field_start_offs(rec, n) + rec + - block->page.frame, size, 0); } ulint l = rec_get_1byte_offs_flag(rec) @@ -3703,7 +3734,9 @@ btr_cur_pess_upd_restore_supremum( lock_rec_reset_and_inherit_gap_locks(*prev_block, block_id, PAGE_HEAP_NO_SUPREMUM, - page_rec_get_heap_no(rec)); + page_is_comp(page) + ? rec_get_heap_no_new(rec) + : rec_get_heap_no_old(rec)); return DB_SUCCESS; } @@ -4139,7 +4172,7 @@ return_after_reservations: template void btr_rec_set_deleted(buf_block_t *block, rec_t *rec, mtr_t *mtr) { - if (page_rec_is_comp(rec)) + if (UNIV_LIKELY(page_is_comp(block->page.frame) != 0)) { byte *b= &rec[-REC_NEW_INFO_BITS]; const byte v= flag @@ -4320,6 +4353,9 @@ btr_cur_optimistic_delete( ULINT_UNDEFINED, &heap); dberr_t err = DB_SUCCESS; + DBUG_EXECUTE_IF("btr_force_pessimistic_delete", + err = DB_FAIL; goto func_exit;); + if (rec_offs_any_extern(offsets) || !btr_cur_can_delete_without_compress(cursor, rec_offs_size(offsets), @@ -4379,7 +4415,8 @@ btr_cur_optimistic_delete( } { - if (UNIV_UNLIKELY(rec_get_info_bits(rec, page_rec_is_comp(rec)) + if (UNIV_UNLIKELY(rec_get_info_bits(rec, page_is_comp( + block->page.frame)) & REC_INFO_MIN_REC_FLAG)) { /* This should be rolling back instant ADD COLUMN. If this is a recovered transaction, then @@ -4515,7 +4552,7 @@ btr_cur_pessimistic_delete( if (page_is_leaf(page)) { const bool is_metadata = rec_is_metadata( - rec, page_rec_is_comp(rec)); + rec, page_is_comp(block->page.frame)); if (UNIV_UNLIKELY(is_metadata)) { /* This should be rolling back instant ALTER TABLE. If this is a recovered transaction, then @@ -5666,7 +5703,8 @@ struct btr_blob_log_check_t { uint32_t page_no = FIL_NULL; if (UNIV_UNLIKELY(m_op == BTR_STORE_INSERT_BULK)) { - offs = page_offset(*m_rec); + offs = *m_rec - (*m_block)->page.frame; + ut_ad(offs == page_offset(*m_rec)); page_no = (*m_block)->page.id().page_no(); (*m_block)->page.fix(); ut_ad(page_no != FIL_NULL); @@ -5774,7 +5812,7 @@ btr_store_big_rec_extern_fields( ut_ad(buf_block_get_frame(rec_block) == page_align(rec)); ut_a(dict_index_is_clust(index)); - if (!fil_page_index_page_check(page_align(rec))) { + if (!fil_page_index_page_check(btr_pcur_get_page(pcur))) { if (op != BTR_STORE_INSERT_BULK) { return DB_PAGE_CORRUPTED; } @@ -6507,14 +6545,9 @@ btr_copy_zblob_prefix( buf_page_t* bpage; uint32_t next_page_no; - /* There is no latch on bpage directly. Instead, - bpage is protected by the B-tree page latch that - is being held on the clustered index record, or, - in row_merge_copy_blobs(), by an exclusive table lock. */ bpage = buf_page_get_zip(id); if (UNIV_UNLIKELY(!bpage)) { - ib::error() << "Cannot load compressed BLOB " << id; goto func_exit; } @@ -6584,12 +6617,10 @@ inflate_error: end_of_blob: bpage->lock.s_unlock(); - bpage->unfix(); goto func_exit; } bpage->lock.s_unlock(); - bpage->unfix(); /* On other BLOB pages except the first the BLOB header always is at the page header: */ diff --git a/storage/innobase/btr/btr0sea.cc b/storage/innobase/btr/btr0sea.cc index c32d32abbda..89b638395a9 100644 --- a/storage/innobase/btr/btr0sea.cc +++ b/storage/innobase/btr/btr0sea.cc @@ -536,9 +536,7 @@ static void ha_delete_hash_node(hash_table_t *table, mem_heap_t *heap, ut_a(del_node->block->n_pointers-- < MAX_N_POINTERS); #endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */ - const ulint fold= del_node->fold; - - HASH_DELETE(ha_node_t, next, table, fold, del_node); + table->cell_get(del_node->fold)->remove(*del_node, &ha_node_t::next); ha_node_t *top= static_cast(mem_heap_get_top(heap, sizeof *top)); @@ -557,8 +555,7 @@ static void ha_delete_hash_node(hash_table_t *table, mem_heap_t *heap, /* We have to look for the predecessor */ ha_node_t *node= static_cast(cell->node); - while (top != HASH_GET_NEXT(next, node)) - node= static_cast(HASH_GET_NEXT(next, node)); + while (top != node->next) node= node->next; /* Now we have the predecessor node */ node->next= del_node; @@ -680,7 +677,7 @@ btr_search_update_hash_ref( ut_ad(cursor->flag == BTR_CUR_HASH_FAIL); ut_ad(block->page.lock.have_x() || block->page.lock.have_s()); - ut_ad(page_align(btr_cur_get_rec(cursor)) == block->page.frame); + ut_ad(btr_cur_get_page(cursor) == block->page.frame); ut_ad(page_is_leaf(block->page.frame)); assert_block_ahi_valid(block); @@ -1274,21 +1271,30 @@ retry: /* Calculate and cache fold values into an array for fast deletion from the hash index */ - rec = page_get_infimum_rec(page); - rec = page_rec_get_next_low(rec, page_is_comp(page)); - + const auto comp = page_is_comp(page); ulint* folds; ulint n_cached = 0; ulint prev_fold = 0; - if (rec && rec_is_metadata(rec, *index)) { - rec = page_rec_get_next_low(rec, page_is_comp(page)); - if (!--n_recs) { - /* The page only contains the hidden metadata record - for instant ALTER TABLE that the adaptive hash index - never points to. */ - folds = nullptr; - goto all_deleted; + if (UNIV_LIKELY(comp != 0)) { + rec = page_rec_next_get(page, page + PAGE_NEW_INFIMUM); + if (rec && rec_is_metadata(rec, TRUE)) { + rec = page_rec_next_get(page, rec); +skipped_metadata: + if (!--n_recs) { + /* The page only contains the hidden + metadata record for instant ALTER + TABLE that the adaptive hash index + never points to. */ + folds = nullptr; + goto all_deleted; + } + } + } else { + rec = page_rec_next_get(page, page + PAGE_OLD_INFIMUM); + if (rec && rec_is_metadata(rec, FALSE)) { + rec = page_rec_next_get(page, rec); + goto skipped_metadata; } } @@ -1319,9 +1325,16 @@ retry: folds[n_cached++] = fold; next_rec: - rec = page_rec_get_next_low(rec, page_rec_is_comp(rec)); - if (!rec || page_rec_is_supremum(rec)) { - break; + if (comp) { + rec = page_rec_next_get(page, rec); + if (!rec || rec == page + PAGE_NEW_SUPREMUM) { + break; + } + } else { + rec = page_rec_next_get(page, rec); + if (!rec || rec == page + PAGE_OLD_SUPREMUM) { + break; + } } prev_fold = fold; } diff --git a/storage/innobase/buf/buf0buddy.cc b/storage/innobase/buf/buf0buddy.cc index f43c6672a95..5e31790aad4 100644 --- a/storage/innobase/buf/buf0buddy.cc +++ b/storage/innobase/buf/buf0buddy.cc @@ -341,58 +341,50 @@ static buf_buddy_free_t* buf_buddy_alloc_zip(ulint i) } /** Deallocate a buffer frame of srv_page_size. -@param[in] buf buffer frame to deallocate */ -static -void -buf_buddy_block_free(void* buf) +@param buf buffer frame to deallocate */ +static void buf_buddy_block_free(void *buf) noexcept { - const ulint fold = BUF_POOL_ZIP_FOLD_PTR(buf); - buf_page_t* bpage; - buf_block_t* block; + mysql_mutex_assert_owner(&buf_pool.mutex); + ut_a(!ut_align_offset(buf, srv_page_size)); - mysql_mutex_assert_owner(&buf_pool.mutex); - ut_a(!ut_align_offset(buf, srv_page_size)); + const ulint fold= BUF_POOL_ZIP_FOLD_PTR(buf); + buf_page_t **prev= buf_pool.zip_hash.cell_get(fold)-> + search(&buf_page_t::hash, [buf](const buf_page_t *b) + { + ut_ad(b->in_zip_hash); + ut_ad(b->state() == buf_page_t::MEMORY); + return b->frame == buf; + }); - HASH_SEARCH(hash, &buf_pool.zip_hash, fold, buf_page_t*, bpage, - ut_ad(bpage->state() == buf_page_t::MEMORY - && bpage->in_zip_hash), - bpage->frame == buf); - ut_a(bpage); - ut_a(bpage->state() == buf_page_t::MEMORY); - ut_ad(bpage->in_zip_hash); - ut_d(bpage->in_zip_hash = false); - HASH_DELETE(buf_page_t, hash, &buf_pool.zip_hash, fold, bpage); - bpage->hash = nullptr; + buf_page_t *bpage= *prev; + ut_a(bpage); + ut_a(bpage->frame == buf); + ut_d(bpage->in_zip_hash= false); + *prev= bpage->hash; + bpage->hash= nullptr; - ut_d(memset(buf, 0, srv_page_size)); - MEM_UNDEFINED(buf, srv_page_size); + ut_d(memset(buf, 0, srv_page_size)); + MEM_UNDEFINED(buf, srv_page_size); - block = (buf_block_t*) bpage; - buf_LRU_block_free_non_file_page(block); - - ut_ad(buf_pool.buddy_n_frames > 0); - ut_d(buf_pool.buddy_n_frames--); + buf_LRU_block_free_non_file_page(reinterpret_cast(bpage)); + ut_ad(buf_pool.buddy_n_frames > 0); + ut_d(buf_pool.buddy_n_frames--); } -/**********************************************************************//** -Allocate a buffer block to the buddy allocator. */ -static -void -buf_buddy_block_register( -/*=====================*/ - buf_block_t* block) /*!< in: buffer frame to allocate */ +/** Allocate a buffer block to the buddy allocator. +@param block buffer block to register */ +static void buf_buddy_block_register(buf_block_t *block) noexcept { - const ulint fold = BUF_POOL_ZIP_FOLD(block); - ut_ad(block->page.state() == buf_page_t::MEMORY); + const ulint fold= BUF_POOL_ZIP_FOLD(block); + ut_ad(block->page.state() == buf_page_t::MEMORY); - ut_a(block->page.frame); - ut_a(!ut_align_offset(block->page.frame, srv_page_size)); + ut_a(block->page.frame); + ut_a(!ut_align_offset(block->page.frame, srv_page_size)); - ut_ad(!block->page.in_zip_hash); - ut_d(block->page.in_zip_hash = true); - HASH_INSERT(buf_page_t, hash, &buf_pool.zip_hash, fold, &block->page); - - ut_d(buf_pool.buddy_n_frames++); + ut_ad(!block->page.in_zip_hash); + ut_d(block->page.in_zip_hash= true); + buf_pool.zip_hash.cell_get(fold)->append(block->page, &buf_page_t::hash); + ut_d(buf_pool.buddy_n_frames++); } /** Allocate a block from a bigger object. diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 5f7d2e034c1..5343e01cafb 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -2366,17 +2366,8 @@ static void buf_inc_get(ha_handler_stats *stats) ++buf_pool.stat.n_page_gets; } -/** Get read access to a compressed page (usually of type -FIL_PAGE_TYPE_ZBLOB or FIL_PAGE_TYPE_ZBLOB2). -The page must be released with unfix(). -NOTE: the page is not protected by any latch. Mutual exclusion has to -be implemented at a higher level. In other words, all possible -accesses to a given page through this function must be protected by -the same set of mutexes or latches. -@param page_id page identifier -@return pointer to the block, s-latched */ TRANSACTIONAL_TARGET -buf_page_t* buf_page_get_zip(const page_id_t page_id) +buf_page_t *buf_page_get_zip(const page_id_t page_id) { ha_handler_stats *const stats= mariadb_stats; buf_inc_get(stats); @@ -2385,109 +2376,83 @@ buf_page_t* buf_page_get_zip(const page_id_t page_id) page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain); buf_page_t *bpage; -lookup: - for (bool discard_attempted= false;;) + for (;;) { #ifndef NO_ELISION if (xbegin()) { if (hash_lock.is_locked()) - xabort(); - bpage= buf_pool.page_hash.get(page_id, chain); - if (!bpage) - { xend(); - goto must_read_page; - } - if (!bpage->zip.data) + else { - /* There is no ROW_FORMAT=COMPRESSED page. */ + bpage= buf_pool.page_hash.get(page_id, chain); + const bool got_s_latch= bpage && bpage->lock.s_lock_try(); xend(); - return nullptr; - } - if (discard_attempted || !bpage->frame) - { - if (!bpage->lock.s_lock_try()) - xabort(); - xend(); - break; - } - xend(); - } - else -#endif - { - hash_lock.lock_shared(); - bpage= buf_pool.page_hash.get(page_id, chain); - if (!bpage) - { - hash_lock.unlock_shared(); - goto must_read_page; - } - - ut_ad(bpage->in_file()); - ut_ad(page_id == bpage->id()); - - if (!bpage->zip.data) - { - /* There is no ROW_FORMAT=COMPRESSED page. */ - hash_lock.unlock_shared(); - return nullptr; - } - - if (discard_attempted || !bpage->frame) - { - const bool got_s_latch= bpage->lock.s_lock_try(); - hash_lock.unlock_shared(); - if (UNIV_LIKELY(got_s_latch)) + if (got_s_latch) break; - /* We may fail to acquire bpage->lock because - buf_page_t::read_complete() may be invoking - buf_pool_t::corrupted_evict() on this block, which it would - hold an exclusive latch on. - - Let us aqcuire and release buf_pool.mutex to ensure that any - buf_pool_t::corrupted_evict() will proceed before we reacquire - the hash_lock that it could be waiting for. */ - mysql_mutex_lock(&buf_pool.mutex); - mysql_mutex_unlock(&buf_pool.mutex); - goto lookup; } + } +#endif + hash_lock.lock_shared(); + bpage= buf_pool.page_hash.get(page_id, chain); + if (!bpage) + { hash_lock.unlock_shared(); + switch (dberr_t err= buf_read_page(page_id, chain, false)) { + case DB_SUCCESS: + case DB_SUCCESS_LOCKED_REC: + mariadb_increment_pages_read(stats); + continue; + case DB_TABLESPACE_DELETED: + return nullptr; + default: + sql_print_error("InnoDB: Reading compressed page " + "[page id: space=" UINT32PF ", page number=" UINT32PF + "] failed with error: %s", + page_id.space(), page_id.page_no(), ut_strerr(err)); + return nullptr; + } } - discard_attempted= true; + ut_ad(bpage->in_file()); + ut_ad(page_id == bpage->id()); + + const bool got_s_latch= bpage->lock.s_lock_try(); + hash_lock.unlock_shared(); + if (UNIV_LIKELY(got_s_latch)) + break; + /* We may fail to acquire bpage->lock because a read is holding an + exclusive latch on this block and either in progress or invoking + buf_pool_t::corrupted_evict(). + + Let us aqcuire and release buf_pool.mutex to ensure that any + buf_pool_t::corrupted_evict() will proceed before we reacquire + the hash_lock that it could be waiting for. + + While we are at it, let us also try to discard any uncompressed + page frame of the compressed BLOB page, in case one had been + allocated for writing the BLOB. */ mysql_mutex_lock(&buf_pool.mutex); - if (buf_page_t *bpage= buf_pool.page_hash.get(page_id, chain)) + bpage= buf_pool.page_hash.get(page_id, chain); + if (bpage) buf_LRU_free_page(bpage, false); mysql_mutex_unlock(&buf_pool.mutex); } + if (UNIV_UNLIKELY(!bpage->zip.data)) { - ut_d(const auto s=) bpage->fix(); - ut_ad(s >= buf_page_t::UNFIXED); - ut_ad(s < buf_page_t::READ_FIX || s >= buf_page_t::WRITE_FIX); + ut_ad("no ROW_FORMAT=COMPRESSED page!" == 0); + bpage->lock.s_unlock(); + bpage= nullptr; } - - buf_page_make_young_if_needed(bpage); + else + buf_page_make_young_if_needed(bpage); #ifdef UNIV_DEBUG if (!(++buf_dbg_counter % 5771)) buf_pool.validate(); #endif /* UNIV_DEBUG */ return bpage; - -must_read_page: - switch (dberr_t err= buf_read_page(page_id, chain)) { - case DB_SUCCESS: - case DB_SUCCESS_LOCKED_REC: - mariadb_increment_pages_read(stats); - goto lookup; - default: - ib::error() << "Reading compressed page " << page_id - << " failed with error: " << err; - return nullptr; - } } /********************************************************************//** diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 7144f95ae58..4416d695a2f 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -2665,6 +2665,7 @@ static void buf_flush_page_cleaner() ATTRIBUTE_COLD void buf_pool_t::LRU_warn() { mysql_mutex_assert_owner(&mutex); + try_LRU_scan= false; if (!LRU_warned.test_and_set(std::memory_order_acquire)) sql_print_warning("InnoDB: Could not free any blocks in the buffer pool!" " %zu blocks are in use and %zu free." diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 9549eee6f4a..139154e6baa 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -399,7 +399,7 @@ read_ahead: } dberr_t buf_read_page(const page_id_t page_id, - buf_pool_t::hash_chain &chain) + buf_pool_t::hash_chain &chain, bool unzip) noexcept { fil_space_t *space= fil_space_t::get(page_id.space()); if (UNIV_UNLIKELY(!space)) @@ -425,7 +425,7 @@ dberr_t buf_read_page(const page_id_t page_id, block= buf_LRU_get_free_block(have_mutex); mysql_mutex_unlock(&buf_pool.mutex); } - else + else if (unzip) { zip_size|= 1; goto allocate_block; diff --git a/storage/innobase/dict/dict0crea.cc b/storage/innobase/dict/dict0crea.cc index e01e600ed41..c567c0a0112 100644 --- a/storage/innobase/dict/dict0crea.cc +++ b/storage/innobase/dict/dict0crea.cc @@ -901,7 +901,8 @@ rec_corrupted: static_assert(FIL_NULL == 0xffffffff, "compatibility"); static_assert(DICT_FLD__SYS_INDEXES__PAGE_NO == DICT_FLD__SYS_INDEXES__SPACE + 1, "compatibility"); - mtr->memset(btr_pcur_get_block(pcur), page_offset(p + 4), 4, 0xff); + mtr->memset(btr_pcur_get_block(pcur), p + 4 - btr_pcur_get_page(pcur), + 4, 0xff); btr_free_if_exists(s, root_page_no, mach_read_from_8(rec + 8), mtr); } s->release(); @@ -1310,7 +1311,7 @@ function_exit: return(thr); } -bool dict_sys_t::load_sys_tables() +bool dict_sys_t::load_sys_tables() noexcept { ut_ad(!srv_any_background_activity()); bool mismatch= false; @@ -1353,7 +1354,7 @@ bool dict_sys_t::load_sys_tables() return mismatch; } -dberr_t dict_sys_t::create_or_check_sys_tables() +dberr_t dict_sys_t::create_or_check_sys_tables() noexcept { if (sys_tables_exist()) return DB_SUCCESS; diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index c6d3514138d..434d8ed5b66 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -639,6 +639,46 @@ template bool dict_table_t::parse_name<>(char(&)[NAME_LEN + 1], char(&)[NAME_LEN + 1], size_t*, size_t*) const; +dict_table_t *dict_sys_t::acquire_temporary_table(table_id_t id) const noexcept +{ + ut_ad(frozen()); + ut_ad(id >= DICT_HDR_FIRST_ID); + return temp_id_hash.cell_get(ut_fold_ull(id))-> + find(&dict_table_t::id_hash, [id](dict_table_t *t) + { + ut_ad(t->is_temporary()); + ut_ad(t->cached); + if (t->id != id) + return false; + t->acquire(); + return true; + }); +} + +dict_table_t *dict_sys_t::find_table(table_id_t id) const noexcept +{ + ut_ad(frozen()); + return table_id_hash.cell_get(ut_fold_ull(id))-> + find(&dict_table_t::id_hash, [id](const dict_table_t *t) + { + ut_ad(!t->is_temporary()); + ut_ad(t->cached); + return t->id == id; + }); +} + +dict_table_t *dict_sys_t::find_table(const span &name) + const noexcept +{ + ut_ad(frozen()); + return table_hash.cell_get(my_crc32c(0, name.data(), name.size()))-> + find(&dict_table_t::name_hash, [name](const dict_table_t *t) + { + return strlen(t->name.m_name) == name.size() && + !memcmp(t->name.m_name, name.data(), name.size()); + }); +} + /** Acquire MDL shared for the table name. @tparam trylock whether to use non-blocking operation @param[in,out] table table object @@ -927,7 +967,7 @@ dict_table_col_in_clustered_key( } /** Initialise the data dictionary cache. */ -void dict_sys_t::create() +void dict_sys_t::create() noexcept { ut_ad(this == &dict_sys); ut_ad(!is_initialised()); @@ -1135,60 +1175,35 @@ void dict_table_t::add_to_cache() } /** Add a table definition to the data dictionary cache */ -inline void dict_sys_t::add(dict_table_t* table) +inline void dict_sys_t::add(dict_table_t *table) noexcept { - ut_ad(!find(table)); - - ulint fold = my_crc32c(0, table->name.m_name, - strlen(table->name.m_name)); - - table->row_id = 0; - table->autoinc_mutex.init(); - table->lock_mutex_init(); - - /* Look for a table with the same name: error if such exists */ - { - dict_table_t* table2; - HASH_SEARCH(name_hash, &table_hash, fold, - dict_table_t*, table2, ut_ad(table2->cached), - !strcmp(table2->name.m_name, table->name.m_name)); - ut_a(table2 == NULL); - -#ifdef UNIV_DEBUG - /* Look for the same table pointer with a different name */ - HASH_SEARCH_ALL(name_hash, &table_hash, - dict_table_t*, table2, ut_ad(table2->cached), - table2 == table); - ut_ad(table2 == NULL); -#endif /* UNIV_DEBUG */ - } - HASH_INSERT(dict_table_t, name_hash, &table_hash, fold, table); - - /* Look for a table with the same id: error if such exists */ - hash_table_t* id_hash = table->is_temporary() - ? &temp_id_hash : &table_id_hash; - const ulint id_fold = ut_fold_ull(table->id); - { - dict_table_t* table2; - HASH_SEARCH(id_hash, id_hash, id_fold, - dict_table_t*, table2, ut_ad(table2->cached), - table2->id == table->id); - ut_a(table2 == NULL); - -#ifdef UNIV_DEBUG - /* Look for the same table pointer with a different id */ - HASH_SEARCH_ALL(id_hash, id_hash, - dict_table_t*, table2, ut_ad(table2->cached), - table2 == table); - ut_ad(table2 == NULL); -#endif /* UNIV_DEBUG */ - - HASH_INSERT(dict_table_t, id_hash, id_hash, id_fold, table); - } - - UT_LIST_ADD_FIRST(table->can_be_evicted ? table_LRU : table_non_LRU, - table); - ut_ad(dict_lru_validate()); + ut_ad(!table->name_hash); + ut_ad(!table->id_hash); + table->row_id= 0; + table->autoinc_mutex.init(); + table->lock_mutex_init(); + const char *name= table->name.m_name; + dict_table_t **prev= table_hash.cell_get(my_crc32c(0, name, strlen(name)))-> + search(&dict_table_t::name_hash, [name](const dict_table_t *t) + { + if (!t) return true; + ut_ad(t->cached); + ut_a(strcmp(t->name.m_name, name)); + return false; + }); + *prev= table; + prev= (table->is_temporary() ? temp_id_hash : table_id_hash). + cell_get(ut_fold_ull(table->id))-> + search(&dict_table_t::id_hash, [table](const dict_table_t *t) + { + if (!t) return true; + ut_ad(t->cached); + ut_a(t->id != table->id); + return false; + }); + *prev= table; + UT_LIST_ADD_FIRST(table->can_be_evicted ? table_LRU : table_non_LRU, table); + ut_ad(dict_lru_validate()); } /** Test whether a table can be evicted from dict_sys.table_LRU. @@ -1300,7 +1315,7 @@ dict_index_t *dict_index_t::clone_if_needed() /** Evict unused, unlocked tables from table_LRU. @param half whether to consider half the tables only (instead of all) @return number of tables evicted */ -ulint dict_sys_t::evict_table_LRU(bool half) +ulint dict_sys_t::evict_table_LRU(bool half) noexcept { #ifdef MYSQL_DYNAMIC_PLUGIN constexpr ulint max_tables = 400; @@ -1498,9 +1513,6 @@ dict_table_rename_in_cache( ut_a(old_name_len < sizeof old_name); strcpy(old_name, table->name.m_name); - const uint32_t fold= my_crc32c(0, new_name.data(), new_name.size()); - ut_a(!dict_sys.find_table(new_name)); - if (!dict_table_is_file_per_table(table)) { } else if (dberr_t err = table->rename_tablespace(new_name, replace_new_file)) { @@ -1508,10 +1520,11 @@ dict_table_rename_in_cache( } /* Remove table from the hash tables of tables */ - HASH_DELETE(dict_table_t, name_hash, &dict_sys.table_hash, - my_crc32c(0, table->name.m_name, old_name_len), table); + dict_sys.table_hash.cell_get(my_crc32c(0, table->name.m_name, + old_name_len)) + ->remove(*table, &dict_table_t::name_hash); - bool keep_mdl_name = !table->name.is_temporary(); + bool keep_mdl_name = !table->name.is_temporary(); if (!keep_mdl_name) { } else if (const char* s = static_cast @@ -1544,8 +1557,16 @@ dict_table_rename_in_cache( } /* Add table to hash table of tables */ - HASH_INSERT(dict_table_t, name_hash, &dict_sys.table_hash, fold, - table); + ut_ad(!table->name_hash); + dict_table_t** after = reinterpret_cast( + &dict_sys.table_hash.cell_get(my_crc32c(0, new_name.data(), + new_name.size())) + ->node); + for (; *after; after = &(*after)->name_hash) { + ut_ad((*after)->cached); + ut_a(strcmp((*after)->name.m_name, new_name.data())); + } + *after = table; if (table->name.is_temporary()) { /* In ALTER TABLE we think of the rename table operation @@ -1797,35 +1818,11 @@ dict_table_rename_in_cache( return(DB_SUCCESS); } -/**********************************************************************//** -Change the id of a table object in the dictionary cache. This is used in -DISCARD TABLESPACE. */ -void -dict_table_change_id_in_cache( -/*==========================*/ - dict_table_t* table, /*!< in/out: table object already in cache */ - table_id_t new_id) /*!< in: new id to set */ -{ - ut_ad(dict_sys.locked()); - ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); - ut_ad(!table->is_temporary()); - - /* Remove the table from the hash table of id's */ - - HASH_DELETE(dict_table_t, id_hash, &dict_sys.table_id_hash, - ut_fold_ull(table->id), table); - table->id = new_id; - - /* Add the table back to the hash table */ - HASH_INSERT(dict_table_t, id_hash, &dict_sys.table_id_hash, - ut_fold_ull(table->id), table); -} - /** Evict a table definition from the InnoDB data dictionary cache. @param[in,out] table cached table definition to be evicted @param[in] lru whether this is part of least-recently-used eviction @param[in] keep whether to keep (not free) the object */ -void dict_sys_t::remove(dict_table_t* table, bool lru, bool keep) +void dict_sys_t::remove(dict_table_t* table, bool lru, bool keep) noexcept { dict_foreign_t* foreign; dict_index_t* index; @@ -1861,16 +1858,12 @@ void dict_sys_t::remove(dict_table_t* table, bool lru, bool keep) } /* Remove table from the hash tables of tables */ - - HASH_DELETE(dict_table_t, name_hash, &table_hash, - my_crc32c(0, table->name.m_name, - strlen(table->name.m_name)), - table); - - hash_table_t* id_hash = table->is_temporary() - ? &temp_id_hash : &table_id_hash; - const ulint id_fold = ut_fold_ull(table->id); - HASH_DELETE(dict_table_t, id_hash, id_hash, id_fold, table); + table_hash.cell_get(my_crc32c(0, table->name.m_name, + strlen(table->name.m_name))) + ->remove(*table, &dict_table_t::name_hash); + (table->is_temporary() ? temp_id_hash : table_id_hash) + .cell_get(ut_fold_ull(table->id)) + ->remove(*table, &dict_table_t::id_hash); /* Remove table from LRU or non-LRU list. */ if (table->can_be_evicted) { @@ -4433,8 +4426,21 @@ dict_fs2utf8( } } +/** Insert a table into the hash tables +@param table the table +@param id_hash dict_sys.table_id_hash or dict_sys.temp_id_hash */ +static void hash_insert(dict_table_t *table, hash_table_t& id_hash) noexcept +{ + ut_ad(table->cached); + dict_sys.table_hash.cell_get(my_crc32c(0, table->name.m_name, + strlen(table->name.m_name)))-> + append(*table, &dict_table_t::name_hash); + id_hash.cell_get(ut_fold_ull(table->id))->append(*table, + &dict_table_t::id_hash); +} + /** Resize the hash tables based on the current buffer pool size. */ -void dict_sys_t::resize() +void dict_sys_t::resize() noexcept { ut_ad(this == &dict_sys); ut_ad(is_initialised()); @@ -4455,32 +4461,18 @@ void dict_sys_t::resize() table= UT_LIST_GET_NEXT(table_LRU, table)) { ut_ad(!table->is_temporary()); - ulint fold= my_crc32c(0, table->name.m_name, strlen(table->name.m_name)); - ulint id_fold= ut_fold_ull(table->id); - - HASH_INSERT(dict_table_t, name_hash, &table_hash, fold, table); - HASH_INSERT(dict_table_t, id_hash, &table_id_hash, id_fold, table); + hash_insert(table, table_id_hash); } for (dict_table_t *table = UT_LIST_GET_FIRST(table_non_LRU); table; table= UT_LIST_GET_NEXT(table_LRU, table)) - { - ulint fold= my_crc32c(0, table->name.m_name, strlen(table->name.m_name)); - ulint id_fold= ut_fold_ull(table->id); - - HASH_INSERT(dict_table_t, name_hash, &table_hash, fold, table); - - hash_table_t *id_hash= table->is_temporary() - ? &temp_id_hash : &table_id_hash; - - HASH_INSERT(dict_table_t, id_hash, id_hash, id_fold, table); - } + hash_insert(table, table->is_temporary() ? temp_id_hash : table_id_hash); unlock(); } /** Close the data dictionary cache on shutdown. */ -void dict_sys_t::close() +void dict_sys_t::close() noexcept { ut_ad(this == &dict_sys); if (!is_initialised()) return; @@ -4490,8 +4482,7 @@ void dict_sys_t::close() /* Free the hash elements. We don't remove them from table_hash because we are invoking table_hash.free() below. */ for (ulint i= table_hash.n_cells; i--; ) - while (dict_table_t *table= static_cast - (HASH_GET_FIRST(&table_hash, i))) + while (auto table= static_cast(table_hash.array[i].node)) dict_sys.remove(table); table_hash.free(); diff --git a/storage/innobase/dict/dict0load.cc b/storage/innobase/dict/dict0load.cc index 83e9287803a..4ac53071282 100644 --- a/storage/innobase/dict/dict0load.cc +++ b/storage/innobase/dict/dict0load.cc @@ -2283,9 +2283,9 @@ dict_load_tablespace( table->file_unreadable = true; if (!(ignore_err & DICT_ERR_IGNORE_RECOVER_LOCK)) { - sql_print_error("InnoDB: Failed to load tablespace " - ULINTPF " for table %s", - table->space_id, table->name); + sql_print_error("InnoDB: Failed to load tablespace %" + PRIu32 " for table %s", + table->space_id, table->name.m_name); } } @@ -2540,7 +2540,7 @@ corrupted: } dict_table_t *dict_sys_t::load_table(const span &name, - dict_err_ignore_t ignore) + dict_err_ignore_t ignore) noexcept { if (dict_table_t *table= find_table(name)) return table; diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 06119620c80..62ac074e915 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -36,6 +36,9 @@ Created Jan 06, 2010 Vasil Dimov #include "que0que.h" #include "scope.h" #include "debug_sync.h" +#ifdef WITH_WSREP +# include +#endif #include #include @@ -550,169 +553,6 @@ dberr_t dict_stats_exec_sql(pars_info_t *pinfo, const char* sql, trx_t *trx) return que_eval_sql(pinfo, sql, trx); } -/*********************************************************************//** -Duplicate a table object and its indexes. -This function creates a dummy dict_table_t object and initializes the -following table and index members: -dict_table_t::id (copied) -dict_table_t::heap (newly created) -dict_table_t::name (copied) -dict_table_t::corrupted (copied) -dict_table_t::indexes<> (newly created) -dict_table_t::magic_n -for each entry in dict_table_t::indexes, the following are initialized: -(indexes that have DICT_FTS set in index->type are skipped) -dict_index_t::id (copied) -dict_index_t::name (copied) -dict_index_t::table_name (points to the copied table name) -dict_index_t::table (points to the above semi-initialized object) -dict_index_t::type (copied) -dict_index_t::to_be_dropped (copied) -dict_index_t::online_status (copied) -dict_index_t::n_uniq (copied) -dict_index_t::fields[] (newly created, only first n_uniq, only fields[i].name) -dict_index_t::indexes<> (newly created) -dict_index_t::stat_n_diff_key_vals[] (only allocated, left uninitialized) -dict_index_t::stat_n_sample_sizes[] (only allocated, left uninitialized) -dict_index_t::stat_n_non_null_key_vals[] (only allocated, left uninitialized) -dict_index_t::magic_n -The returned object should be freed with dict_stats_table_clone_free() -when no longer needed. -@return incomplete table object */ -static -dict_table_t* -dict_stats_table_clone_create( -/*==========================*/ - const dict_table_t* table) /*!< in: table whose stats to copy */ -{ - size_t heap_size; - dict_index_t* index; - - /* Estimate the size needed for the table and all of its indexes */ - - heap_size = 0; - heap_size += sizeof(dict_table_t); - heap_size += strlen(table->name.m_name) + 1; - - for (index = dict_table_get_first_index(table); - index != NULL; - index = dict_table_get_next_index(index)) { - - if (dict_stats_should_ignore_index(index)) { - continue; - } - - ulint n_uniq = dict_index_get_n_unique(index); - - heap_size += sizeof(dict_index_t); - heap_size += strlen(index->name) + 1; - heap_size += n_uniq * sizeof(index->fields[0]); - for (ulint i = 0; i < n_uniq; i++) { - heap_size += strlen(index->fields[i].name) + 1; - } - heap_size += n_uniq * sizeof(index->stat_n_diff_key_vals[0]); - heap_size += n_uniq * sizeof(index->stat_n_sample_sizes[0]); - heap_size += n_uniq * sizeof(index->stat_n_non_null_key_vals[0]); - } - - /* Allocate the memory and copy the members */ - - mem_heap_t* heap; - - heap = mem_heap_create(heap_size); - - dict_table_t* t; - - t = (dict_table_t*) mem_heap_zalloc(heap, sizeof(*t)); - - t->stats_mutex_init(); - - MEM_CHECK_DEFINED(&table->id, sizeof(table->id)); - t->id = table->id; - - t->heap = heap; - - t->name.m_name = mem_heap_strdup(heap, table->name.m_name); - t->mdl_name.m_name = t->name.m_name; - - t->corrupted = table->corrupted; - - UT_LIST_INIT(t->indexes, &dict_index_t::indexes); -#ifdef BTR_CUR_HASH_ADAPT - UT_LIST_INIT(t->freed_indexes, &dict_index_t::indexes); -#endif /* BTR_CUR_HASH_ADAPT */ - - for (index = dict_table_get_first_index(table); - index != NULL; - index = dict_table_get_next_index(index)) { - - if (dict_stats_should_ignore_index(index)) { - continue; - } - - dict_index_t* idx; - - idx = (dict_index_t*) mem_heap_zalloc(heap, sizeof(*idx)); - - MEM_CHECK_DEFINED(&index->id, sizeof(index->id)); - idx->id = index->id; - - idx->name = mem_heap_strdup(heap, index->name); - - idx->table = t; - - idx->type = index->type; - - idx->to_be_dropped = 0; - - idx->online_status = ONLINE_INDEX_COMPLETE; - idx->set_committed(true); - - idx->n_uniq = index->n_uniq; - - idx->fields = (dict_field_t*) mem_heap_zalloc( - heap, idx->n_uniq * sizeof(idx->fields[0])); - - for (ulint i = 0; i < idx->n_uniq; i++) { - idx->fields[i].name = mem_heap_strdup( - heap, index->fields[i].name); - } - - /* hook idx into t->indexes */ - UT_LIST_ADD_LAST(t->indexes, idx); - - idx->stat_n_diff_key_vals = (ib_uint64_t*) mem_heap_zalloc( - heap, - idx->n_uniq * sizeof(idx->stat_n_diff_key_vals[0])); - - idx->stat_n_sample_sizes = (ib_uint64_t*) mem_heap_zalloc( - heap, - idx->n_uniq * sizeof(idx->stat_n_sample_sizes[0])); - - idx->stat_n_non_null_key_vals = (ib_uint64_t*) mem_heap_zalloc( - heap, - idx->n_uniq * sizeof(idx->stat_n_non_null_key_vals[0])); - ut_d(idx->magic_n = DICT_INDEX_MAGIC_N); - } - - ut_d(t->magic_n = DICT_TABLE_MAGIC_N); - - return(t); -} - -/*********************************************************************//** -Free the resources occupied by an object returned by -dict_stats_table_clone_create(). */ -static -void -dict_stats_table_clone_free( -/*========================*/ - dict_table_t* t) /*!< in: dummy table object to free */ -{ - t->stats_mutex_destroy(); - mem_heap_free(t->heap); -} - /*********************************************************************//** Write all zeros (or 1 where it makes sense) into an index statistics members. The resulting stats correspond to an empty index. */ @@ -840,169 +680,6 @@ dict_stats_assert_initialized( } } -#define INDEX_EQ(i1, i2) \ - ((i1) != NULL \ - && (i2) != NULL \ - && (i1)->id == (i2)->id \ - && strcmp((i1)->name, (i2)->name) == 0) - -/*********************************************************************//** -Copy table and index statistics from one table to another, including index -stats. Extra indexes in src are ignored and extra indexes in dst are -initialized to correspond to an empty index. */ -static -void -dict_stats_copy( -/*============*/ - dict_table_t* dst, /*!< in/out: destination table */ - const dict_table_t* src, /*!< in: source table */ - bool reset_ignored_indexes) /*!< in: if true, set ignored indexes - to have the same statistics as if - the table was empty */ -{ - ut_ad(src->stats_mutex_is_owner()); - ut_ad(dst->stats_mutex_is_owner()); - - dst->stats_last_recalc = src->stats_last_recalc; - dst->stat_n_rows = src->stat_n_rows; - dst->stat_clustered_index_size = src->stat_clustered_index_size; - dst->stat_sum_of_other_index_sizes = src->stat_sum_of_other_index_sizes; - dst->stat_modified_counter = src->stat_modified_counter; - - dict_index_t* dst_idx; - dict_index_t* src_idx; - - for (dst_idx = dict_table_get_first_index(dst), - src_idx = dict_table_get_first_index(src); - dst_idx != NULL; - dst_idx = dict_table_get_next_index(dst_idx), - (src_idx != NULL - && (src_idx = dict_table_get_next_index(src_idx)))) { - - if (dict_stats_should_ignore_index(dst_idx)) { - if (reset_ignored_indexes) { - /* Reset index statistics for all ignored indexes, - unless they are FT indexes (these have no statistics)*/ - if (dst_idx->type & DICT_FTS) { - continue; - } - dict_stats_empty_index(dst_idx); - } else { - continue; - } - } - - if (!INDEX_EQ(src_idx, dst_idx)) { - for (src_idx = dict_table_get_first_index(src); - src_idx != NULL; - src_idx = dict_table_get_next_index(src_idx)) { - - if (INDEX_EQ(src_idx, dst_idx)) { - break; - } - } - } - - if (!INDEX_EQ(src_idx, dst_idx)) { - dict_stats_empty_index(dst_idx); - continue; - } - - ulint n_copy_el; - - if (dst_idx->n_uniq > src_idx->n_uniq) { - n_copy_el = src_idx->n_uniq; - /* Since src is smaller some elements in dst - will remain untouched by the following memmove(), - thus we init all of them here. */ - dict_stats_empty_index(dst_idx); - } else { - n_copy_el = dst_idx->n_uniq; - } - - memmove(dst_idx->stat_n_diff_key_vals, - src_idx->stat_n_diff_key_vals, - n_copy_el * sizeof(dst_idx->stat_n_diff_key_vals[0])); - - memmove(dst_idx->stat_n_sample_sizes, - src_idx->stat_n_sample_sizes, - n_copy_el * sizeof(dst_idx->stat_n_sample_sizes[0])); - - memmove(dst_idx->stat_n_non_null_key_vals, - src_idx->stat_n_non_null_key_vals, - n_copy_el * sizeof(dst_idx->stat_n_non_null_key_vals[0])); - - dst_idx->stat_index_size = src_idx->stat_index_size; - - dst_idx->stat_n_leaf_pages = src_idx->stat_n_leaf_pages; - } - - dst->stat_initialized = TRUE; -} - -/** Duplicate the stats of a table and its indexes. -This function creates a dummy dict_table_t object and copies the input -table's stats into it. The returned table object is not in the dictionary -cache and cannot be accessed by any other threads. In addition to the -members copied in dict_stats_table_clone_create() this function initializes -the following: -dict_table_t::stat_initialized -dict_table_t::stat_persistent -dict_table_t::stat_n_rows -dict_table_t::stat_clustered_index_size -dict_table_t::stat_sum_of_other_index_sizes -dict_table_t::stat_modified_counter -dict_index_t::stat_n_diff_key_vals[] -dict_index_t::stat_n_sample_sizes[] -dict_index_t::stat_n_non_null_key_vals[] -dict_index_t::stat_index_size -dict_index_t::stat_n_leaf_pages -The returned object should be freed with dict_stats_snapshot_free() -when no longer needed. -@param[in] table table whose stats to copy -@return incomplete table object */ -static -dict_table_t* -dict_stats_snapshot_create( - dict_table_t* table) -{ - dict_sys.lock(SRW_LOCK_CALL); - - dict_stats_assert_initialized(table); - - dict_table_t* t; - - t = dict_stats_table_clone_create(table); - - table->stats_mutex_lock(); - ut_d(t->stats_mutex_lock()); - - dict_stats_copy(t, table, false); - - ut_d(t->stats_mutex_unlock()); - table->stats_mutex_unlock(); - - t->stat_persistent = table->stat_persistent; - t->stats_auto_recalc = table->stats_auto_recalc; - t->stats_sample_pages = table->stats_sample_pages; - - dict_sys.unlock(); - - return(t); -} - -/*********************************************************************//** -Free the resources occupied by an object returned by -dict_stats_snapshot_create(). */ -static -void -dict_stats_snapshot_free( -/*=====================*/ - dict_table_t* t) /*!< in: dummy table object to free */ -{ - dict_stats_table_clone_free(t); -} - /** Statistics for one field of an index. */ struct index_field_stats_t { @@ -1278,7 +955,7 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index, rec = page_rec_get_next(cursor.page_cur.rec); const ulint n_core = index->n_core_fields; - if (rec && !page_rec_is_supremum(rec)) { + if (rec && rec != page_get_supremum_rec(page)) { not_empty_flag = 1; offsets_rec = rec_get_offsets(rec, index, offsets_rec, n_core, @@ -1290,10 +967,11 @@ btr_estimate_number_of_different_key_vals(dict_index_t* index, } } - while (!page_rec_is_supremum(rec)) { + while (rec != page_get_supremum_rec(page)) { ulint matched_fields; rec_t* next_rec = page_rec_get_next(rec); - if (!next_rec || page_rec_is_supremum(next_rec)) { + if (!next_rec + || next_rec == page_get_supremum_rec(page)) { total_external_size += btr_rec_get_externally_stored_len( rec, offsets_rec); @@ -1803,14 +1481,12 @@ dict_stats_analyze_index_level( /* If rec and prev_rec are on different pages, then prev_rec must have been copied, because we hold latch only on the page where rec resides. */ - if (prev_rec != NULL - && page_align(rec) != page_align(prev_rec)) { + ut_ad(!prev_rec + || btr_pcur_get_page(&pcur) == page_align(prev_rec) + || prev_rec_is_copied); - ut_a(prev_rec_is_copied); - } - - rec_is_last_on_page = - page_rec_is_supremum(page_rec_get_next_const(rec)); + rec_is_last_on_page = page_rec_get_next_const(rec) + == page_get_supremum_rec(btr_pcur_get_page(&pcur)); /* increment the pages counter at the end of each page */ if (rec_is_last_on_page) { @@ -1827,7 +1503,8 @@ dict_stats_analyze_index_level( if (level == 0 && !srv_stats_include_delete_marked - && rec_get_deleted_flag(rec, page_rec_is_comp(rec))) { + && rec_get_deleted_flag( + rec, page_is_comp(btr_pcur_get_page(&pcur)))) { if (rec_is_last_on_page && !prev_rec_is_copied && prev_rec != NULL) { @@ -1992,34 +1669,23 @@ func_exit: mem_heap_free(heap); } - /************************************************************//** Gets the pointer to the next non delete-marked record on the page. If all subsequent records are delete-marked, then this function will return the supremum record. @return pointer to next non delete-marked record or pointer to supremum */ +template static const rec_t* -page_rec_get_next_non_del_marked( -/*=============================*/ - const rec_t* rec) /*!< in: pointer to record */ +page_rec_get_next_non_del_marked(const page_t *page, const rec_t *rec) { - const page_t *const page= page_align(rec); + ut_ad(!!page_is_comp(page) == comp); + ut_ad(page_align(rec) == page); - if (page_is_comp(page)) - { - for (rec= page_rec_get_next_low(rec, TRUE); - rec && rec_get_deleted_flag(rec, TRUE); - rec= page_rec_get_next_low(rec, TRUE)); - return rec ? rec : page + PAGE_NEW_SUPREMUM; - } - else - { - for (rec= page_rec_get_next_low(rec, FALSE); - rec && rec_get_deleted_flag(rec, FALSE); - rec= page_rec_get_next_low(rec, FALSE)); - return rec ? rec : page + PAGE_OLD_SUPREMUM; - } + for (rec= page_rec_next_get(page, rec); + rec && rec_get_deleted_flag(rec, comp); + rec= page_rec_next_get(page, rec)); + return rec ? rec : page + (comp ? PAGE_NEW_SUPREMUM : PAGE_OLD_SUPREMUM); } /** Scan a page, reading records from left to right and counting the number @@ -2066,10 +1732,13 @@ dict_stats_scan_page( this memory heap should never be used. */ mem_heap_t* heap = NULL; ut_ad(!!n_core == page_is_leaf(page)); - const rec_t* (*get_next)(const rec_t*) + const rec_t* (*get_next)(const page_t*, const rec_t*) = !n_core || srv_stats_include_delete_marked - ? page_rec_get_next_const - : page_rec_get_next_non_del_marked; + ? (page_is_comp(page) + ? page_rec_next_get : page_rec_next_get) + : page_is_comp(page) + ? page_rec_get_next_non_del_marked + : page_rec_get_next_non_del_marked; const bool should_count_external_pages = n_external_pages != NULL; @@ -2077,9 +1746,9 @@ dict_stats_scan_page( *n_external_pages = 0; } - rec = get_next(page_get_infimum_rec(page)); + rec = get_next(page, page_get_infimum_rec(page)); - if (!rec || page_rec_is_supremum(rec)) { + if (!rec || rec == page_get_supremum_rec(page)) { /* the page is empty or contains only delete-marked records */ *n_diff = 0; *out_rec = NULL; @@ -2094,11 +1763,11 @@ dict_stats_scan_page( rec, offsets_rec); } - next_rec = get_next(rec); + next_rec = get_next(page, rec); *n_diff = 1; - while (next_rec && !page_rec_is_supremum(next_rec)) { + while (next_rec && next_rec != page_get_supremum_rec(page)) { ulint matched_fields; @@ -2139,7 +1808,7 @@ dict_stats_scan_page( rec, offsets_rec); } - next_rec = get_next(next_rec); + next_rec = get_next(page, next_rec); } /* offsets1,offsets2 should have been big enough */ @@ -2201,8 +1870,8 @@ dict_stats_analyze_index_below_cur( rec_offs_set_n_alloc(offsets2, size); rec = btr_cur_get_rec(cur); - page = page_align(rec); - ut_ad(!page_rec_is_leaf(rec)); + page = btr_cur_get_page(cur); + ut_ad(!page_is_leaf(page)); offsets_rec = rec_get_offsets(rec, index, offsets1, 0, ULINT_UNDEFINED, &heap); @@ -3170,7 +2839,7 @@ static dberr_t dict_stats_report_error(dict_table_t* table) } /** Save the table's statistics into the persistent statistics storage. -@param[in] table_orig table whose stats to save +@param[in] table table whose stats to save @param[in] only_for_index if this is non-NULL, then stats for indexes that are not equal to it will not be saved, if NULL, then all indexes' stats are saved @@ -3178,7 +2847,7 @@ are saved static dberr_t dict_stats_save( - dict_table_t* table_orig, + dict_table_t* table, const index_id_t* only_for_index) { pars_info_t* pinfo; @@ -3198,8 +2867,8 @@ dict_stats_save( return DB_READ_ONLY; } - if (!table_orig->is_readable()) { - return (dict_stats_report_error(table_orig)); + if (!table->is_readable()) { + return (dict_stats_report_error(table)); } THD* thd = current_thd; @@ -3237,8 +2906,6 @@ release_and_exit: goto release_and_exit; } - dict_table_t* table = dict_stats_snapshot_create(table_orig); - dict_fs2utf8(table->name.m_name, db_utf8, sizeof(db_utf8), table_utf8, sizeof(table_utf8)); const time_t now = time(NULL); @@ -3304,7 +2971,6 @@ free_and_exit: dict_sys.unlock(); unlocked_free_and_exit: trx->free(); - dict_stats_snapshot_free(table); dict_table_close(table_stats, false, thd, mdl_table); dict_table_close(index_stats, false, thd, mdl_index); return ret; @@ -4111,46 +3777,21 @@ dict_stats_update( goto transient; } - dict_table_t* t; - - /* Create a dummy table object with the same name and - indexes, suitable for fetching the stats into it. */ - t = dict_stats_table_clone_create(table); - - dberr_t err = dict_stats_fetch_from_ps(t); - - t->stats_last_recalc = table->stats_last_recalc; - t->stat_modified_counter = 0; + dberr_t err = dict_stats_fetch_from_ps(table); switch (err) { case DB_SUCCESS: - - table->stats_mutex_lock(); - /* t is localized to this thread so no need to - take stats mutex lock (limiting it to debug only) */ - ut_d(t->stats_mutex_lock()); - - /* Pass reset_ignored_indexes=true as parameter - to dict_stats_copy. This will cause statictics - for corrupted indexes to be set to empty values */ - dict_stats_copy(table, t, true); - - dict_stats_assert_initialized(table); - - ut_d(t->stats_mutex_unlock()); - table->stats_mutex_unlock(); - - dict_stats_table_clone_free(t); - return(DB_SUCCESS); case DB_STATS_DO_NOT_EXIST: - dict_stats_table_clone_free(t); - if (srv_read_only_mode) { goto transient; } - +#ifdef WITH_WSREP + if (wsrep_thd_skip_locking(current_thd)) { + goto transient; + } +#endif if (dict_stats_auto_recalc_is_enabled(table)) { return(dict_stats_update( table, @@ -4173,8 +3814,6 @@ dict_stats_update( goto transient; default: - dict_stats_table_clone_free(t); - if (innodb_table_stats_not_found == false && table->stats_error_printed == false) { ib::error() << "Error fetching persistent statistics" diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index b779d9148ec..e32e54abded 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -224,21 +224,12 @@ fil_validate_skip(void) } #endif /* UNIV_DEBUG */ -/** Look up a tablespace. -@param tablespace identifier -@return tablespace -@retval nullptr if not found */ -fil_space_t *fil_space_get_by_id(uint32_t id) +fil_space_t *fil_space_get_by_id(uint32_t id) noexcept { - fil_space_t* space; - - ut_ad(fil_system.is_initialised()); - mysql_mutex_assert_owner(&fil_system.mutex); - - HASH_SEARCH(hash, &fil_system.spaces, id, - fil_space_t*, space,, space->id == id); - - return(space); + ut_ad(fil_system.is_initialised()); + mysql_mutex_assert_owner(&fil_system.mutex); + return fil_system.spaces.cell_get(id)->find + (&fil_space_t::hash, [id](const fil_space_t *s) { return s->id == id; }); } /** Look up a tablespace. @@ -828,7 +819,7 @@ inline pfs_os_file_t fil_node_t::close_to_free(bool detach_handle) pfs_os_file_t fil_system_t::detach(fil_space_t *space, bool detach_handle) { mysql_mutex_assert_owner(&fil_system.mutex); - HASH_DELETE(fil_space_t, hash, &spaces, space->id, space); + spaces.cell_get(space->id)->remove(*space, &fil_space_t::hash); if (space->is_in_unflushed_spaces) { @@ -995,9 +986,15 @@ fil_space_t *fil_space_t::create(uint32_t id, uint32_t flags, DBUG_EXECUTE_IF("fil_space_create_failure", return(NULL);); + fil_space_t** after = reinterpret_cast( + &fil_system.spaces.cell_get(id)->node); + for (; *after; after = &(*after)->hash) { + ut_a((*after)->id != id); + } + /* FIXME: if calloc() is defined as an inline function that calls memset() or bzero(), then GCC 6 -flifetime-dse can optimize it away */ - space= new (ut_zalloc_nokey(sizeof(*space))) fil_space_t; + *after = space = new (ut_zalloc_nokey(sizeof(*space))) fil_space_t; space->id = id; @@ -1021,20 +1018,6 @@ fil_space_t *fil_space_t::create(uint32_t id, uint32_t flags, space->latch.SRW_LOCK_INIT(fil_space_latch_key); - if (const fil_space_t *old_space = fil_space_get_by_id(id)) { - ib::error() << "Trying to add tablespace with id " << id - << " to the cache, but tablespace '" - << (old_space->chain.start - ? old_space->chain.start->name - : "") - << "' already exists in the cache!"; - space->~fil_space_t(); - ut_free(space); - return(NULL); - } - - HASH_INSERT(fil_space_t, hash, &fil_system.spaces, id, space); - if (opened) fil_system.add_opened_last_to_space_list(space); else @@ -2616,7 +2599,7 @@ fil_ibd_load(uint32_t space_id, const char *filename, fil_space_t *&space) if (space) { sql_print_information("InnoDB: Ignoring data file '%s'" - " with space ID " ULINTPF + " with space ID %" PRIu32 ". Another data file called %s" " exists" " with the same space ID.", diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index 7bb12a2d79a..97a3ca33b4d 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -447,8 +447,9 @@ Returns page offset of the first page in extent described by a descriptor. static uint32_t xdes_get_offset(const xdes_t *descr) { ut_ad(descr); - return page_get_page_no(page_align(descr)) + - uint32_t(((page_offset(descr) - XDES_ARR_OFFSET) / XDES_SIZE) * + const page_t *page= page_align(descr); + return page_get_page_no(page) + + uint32_t(((descr - page - XDES_ARR_OFFSET) / XDES_SIZE) * FSP_EXTENT_SIZE); } @@ -1509,7 +1510,8 @@ static dberr_t fsp_free_seg_inode(fil_space_t *space, fseg_inode_t *inode, return err; } - mtr->memset(iblock, page_offset(inode) + FSEG_ID, FSEG_INODE_SIZE, 0); + mtr->memset(iblock, inode - iblock->page.frame + FSEG_ID, + FSEG_INODE_SIZE, 0); if (ULINT_UNDEFINED != fsp_seg_inode_page_find_used(iblock->page.frame, physical_size)) @@ -1781,7 +1783,8 @@ page_alloc: } mtr->write<2>(*block, byte_offset + FSEG_HDR_OFFSET - + block->page.frame, page_offset(inode)); + + block->page.frame, + uintptr_t(inode - iblock->page.frame)); mtr->write<4>(*block, byte_offset + FSEG_HDR_PAGE_NO + block->page.frame, iblock->page.id().page_no()); @@ -1923,11 +1926,12 @@ fseg_alloc_free_extent( mtr_t* mtr, dberr_t* err) { - ut_ad(!((page_offset(inode) - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); + ut_ad(iblock->page.frame == page_align(inode)); + ut_ad(!((inode - iblock->page.frame - FSEG_ARR_OFFSET) % FSEG_INODE_SIZE)); ut_ad(!memcmp(FSEG_MAGIC_N_BYTES, FSEG_MAGIC_N + inode, 4)); ut_d(space->modify_check(*mtr)); - if (UNIV_UNLIKELY(page_offset(inode) < FSEG_ARR_OFFSET)) + if (UNIV_UNLIKELY(uintptr_t(inode - iblock->page.frame) < FSEG_ARR_OFFSET)) { corrupted: *err= DB_CORRUPTION; @@ -2651,7 +2655,7 @@ dberr_t fseg_free_page(fseg_header_t *seg_header, fil_space_t *space, mtr->x_lock_space(space); DBUG_PRINT("fseg_free_page", - ("space_id: " ULINTPF ", page_no: %u", space->id, offset)); + ("space_id: %" PRIu32 ", page_no: %" PRIu32, space->id, offset)); dberr_t err; if (fseg_inode_t *seg_inode= fseg_inode_try_get(seg_header, @@ -2822,14 +2826,14 @@ the segment. @param inode index node information @param iblock page where segment header are placed @param mtr mini-transaction -@param hdr_page_no segment header page number field +@param hdr_page segment header page @param ahi adaptive hash index @return DB_SUCCESS_LOCKED_REC when freeing wasn't completed @return DB_SUCCESS or other error code when freeing was completed */ static dberr_t fseg_free_step_low(fil_space_t *space, fseg_inode_t *inode, buf_block_t *iblock, mtr_t *mtr, - const byte *hdr_page_no + const page_t *hdr_page #ifdef BTR_CUR_HASH_ADAPT , bool ahi=false #endif /* BTR_CUR_HASH_ADAPT */ @@ -2854,14 +2858,14 @@ dberr_t fseg_free_step_low(fil_space_t *space, fseg_inode_t *inode, exist in the array then free the file segment inode */ ulint n = fseg_find_last_used_frag_page_slot(inode); if (UNIV_UNLIKELY(n == ULINT_UNDEFINED)) - return hdr_page_no + return hdr_page ? DB_SUCCESS : fsp_free_seg_inode(space, inode, iblock, mtr); - if (hdr_page_no && !memcmp_aligned<2>(hdr_page_no + FIL_PAGE_OFFSET, - inode + FSEG_FRAG_ARR + - n * FSEG_FRAG_SLOT_SIZE, 4)) - /* header_page_no is only passed by fseg_free_step_not_header(). + if (hdr_page && + !memcmp_aligned<2>(hdr_page + FIL_PAGE_OFFSET, + inode + FSEG_FRAG_ARR + n * FSEG_FRAG_SLOT_SIZE, 4)) + /* hdr_page is only passed by fseg_free_step_not_header(). In that case, the header page must be preserved, to be freed when we're finally called by fseg_free_step(). */ return DB_SUCCESS; @@ -2875,38 +2879,23 @@ dberr_t fseg_free_step_low(fil_space_t *space, fseg_inode_t *inode, if (err != DB_SUCCESS) return err; buf_page_free(space, page_no, mtr); - if (!hdr_page_no && + if (!hdr_page && fseg_find_last_used_frag_page_slot(inode) == ULINT_UNDEFINED) return fsp_free_seg_inode(space, inode, iblock, mtr); return DB_SUCCESS_LOCKED_REC; } -/** Frees part of a segment. This function can be used to free -a segment by repeatedly calling this function in different -mini-transactions. Doing the freeing in a single mini-transaction -might result in too big a mini-transaction. -@param header segment header; NOTE: if the header resides on first - page of the frag list of the segment, this pointer - becomes obsolete after the last freeing step -@param mtr mini-transaction -@param ahi Drop the adaptive hash index -@return whether the freeing was completed */ -bool -fseg_free_step( - fseg_header_t* header, - mtr_t* mtr +bool fseg_free_step(buf_block_t *block, size_t header, mtr_t *mtr #ifdef BTR_CUR_HASH_ADAPT - ,bool ahi + , bool ahi #endif /* BTR_CUR_HASH_ADAPT */ - ) + ) noexcept { fseg_inode_t* inode; - const uint32_t space_id = page_get_space_id(page_align(header)); - const uint32_t header_page = page_get_page_no(page_align(header)); - - fil_space_t* space = mtr->x_lock_space(space_id); - xdes_t* descr = xdes_get_descriptor(space, header_page, mtr); + const page_id_t header_id{block->page.id()}; + fil_space_t* space = mtr->x_lock_space(header_id.space()); + xdes_t* descr = xdes_get_descriptor(space, header_id.page_no(), mtr); if (!descr) { return true; @@ -2916,14 +2905,17 @@ fseg_free_step( freed yet */ if (UNIV_UNLIKELY(xdes_is_free(descr, - header_page & (FSP_EXTENT_SIZE - 1)))) { + header_id.page_no() + & (FSP_EXTENT_SIZE - 1)))) { /* Some corruption was detected: stop the freeing in order to prevent a crash. */ return true; } buf_block_t* iblock; const ulint zip_size = space->zip_size(); - inode = fseg_inode_try_get(header, space_id, zip_size, mtr, &iblock); + inode = fseg_inode_try_get(block->page.frame + header, + header_id.space(), zip_size, + mtr, &iblock); if (!inode || space->is_stopping()) { return true; } @@ -2939,33 +2931,31 @@ fseg_free_step( ) != DB_SUCCESS_LOCKED_REC; } -bool -fseg_free_step_not_header( - fseg_header_t* header, - mtr_t* mtr +bool fseg_free_step_not_header(buf_block_t *block, size_t header, mtr_t *mtr #ifdef BTR_CUR_HASH_ADAPT - ,bool ahi + , bool ahi #endif /* BTR_CUR_HASH_ADAPT */ - ) + ) noexcept { - const page_t *page = page_align(header); - const uint32_t space_id = page_get_space_id(page); - ut_ad(mtr->is_named_space(space_id)); + fseg_inode_t* inode; + const page_id_t header_id{block->page.id()}; + ut_ad(mtr->is_named_space(header_id.space())); - fil_space_t* space = mtr->x_lock_space(space_id); - buf_block_t* iblock; + fil_space_t* space = mtr->x_lock_space(header_id.space()); + buf_block_t* iblock; - fseg_inode_t *inode = fseg_inode_try_get(header, space_id, - space->zip_size(), - mtr, &iblock); + inode = fseg_inode_try_get(block->page.frame + header, + header_id.space(), space->zip_size(), + mtr, &iblock); if (space->is_stopping()) { return true; } - if (!inode) { - ib::warn() << "Double free of " - << page_id_t(space_id, - page_get_page_no(page)); + if (UNIV_UNLIKELY(!inode)) { + sql_print_warning("InnoDB: Double free of page " UINT32PF + " in file %s", + header_id.page_no(), + space->chain.start->name); return true; } @@ -2973,7 +2963,7 @@ fseg_free_step_not_header( fil_block_check_type(*iblock, FIL_PAGE_INODE, mtr); } - return fseg_free_step_low(space, inode, iblock, mtr, page + return fseg_free_step_low(space, inode, iblock, mtr, block->page.frame #ifdef BTR_CUR_HASH_ADAPT , ahi #endif /* BTR_CUR_HASH_ADAPT */ @@ -3044,8 +3034,9 @@ static void fseg_print_low(const fseg_inode_t *inode) ulint page_no; ib_id_t seg_id; - space = page_get_space_id(page_align(inode)); - page_no = page_get_page_no(page_align(inode)); + const page_t* inode_page = page_align(inode); + space = page_get_space_id(inode_page); + page_no = page_get_page_no(inode_page); reserved = fseg_n_reserved_pages_low(inode, &used); diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index e8fb7bbd5e4..5c291b068ff 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -3190,7 +3190,7 @@ fts_fetch_doc_from_rec( parser = get_doc->index_cache->index->parser; clust_rec = btr_pcur_get_rec(pcur); - ut_ad(!page_rec_is_comp(clust_rec) + ut_ad(!page_is_comp(btr_pcur_get_page(pcur)) || rec_get_status(clust_rec) == REC_STATUS_ORDINARY); for (ulint i = 0; i < index->n_fields; i++) { diff --git a/storage/innobase/fut/fut0lst.cc b/storage/innobase/fut/fut0lst.cc index 84b38bc6cf4..ff876801242 100644 --- a/storage/innobase/fut/fut0lst.cc +++ b/storage/innobase/fut/fut0lst.cc @@ -80,13 +80,13 @@ void flst_write_addr(const buf_block_t &block, byte *faddr, @param[in,out] mtr mini-transaction */ static void flst_zero_both(const buf_block_t& b, byte *addr, mtr_t *mtr) { + const ulint boffset= ulint(addr - b.page.frame); if (mach_read_from_4(addr + FIL_ADDR_PAGE) != FIL_NULL) - mtr->memset(&b, ulint(addr - b.page.frame) + FIL_ADDR_PAGE, 4, 0xff); + mtr->memset(&b, boffset + FIL_ADDR_PAGE, 4, 0xff); mtr->write<2,mtr_t::MAYBE_NOP>(b, addr + FIL_ADDR_BYTE, 0U); /* Initialize the other address by (MEMMOVE|0x80,offset,FIL_ADDR_SIZE,source) which is 4 bytes, or less than FIL_ADDR_SIZE. */ memcpy(addr + FIL_ADDR_SIZE, addr, FIL_ADDR_SIZE); - const uint16_t boffset= page_offset(addr); mtr->memmove(b, boffset + FIL_ADDR_SIZE, boffset, FIL_ADDR_SIZE); } diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc index f75eab07cf3..8a2d04b6731 100644 --- a/storage/innobase/gis/gis0rtree.cc +++ b/storage/innobase/gis/gis0rtree.cc @@ -220,7 +220,7 @@ rtr_update_mbr_field( rec_offs* offsets2; rec = btr_cur_get_rec(cursor); - page = page_align(rec); + page = btr_cur_get_page(cursor); rec_info = rec_get_info_bits(rec, rec_offs_comp(offsets)); @@ -1549,7 +1549,7 @@ rtr_page_copy_rec_list_end_no_locks( return DB_CORRUPTION; } - ut_a(page_is_comp(new_page) == page_rec_is_comp(rec)); + ut_a(page_is_comp(new_page) == page_is_comp(block->page.frame)); ut_a(mach_read_from_2(new_page + srv_page_size - 10) == (ulint) (page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM)); diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc index d084db25996..20fdfb49e7d 100644 --- a/storage/innobase/gis/gis0sea.cc +++ b/storage/innobase/gis/gis0sea.cc @@ -1292,11 +1292,13 @@ rtr_page_get_father_block( que_thr_t* thr, /*!< in/out: query thread */ mtr_t* mtr) /*!< in/out: mtr */ { - rec_t *rec= - page_rec_get_next(page_get_infimum_rec(cursor->block()->page.frame)); + const page_t *const page= cursor->block()->page.frame; + const rec_t *rec= page_is_comp(page) + ? page_rec_next_get(page, page + PAGE_NEW_INFIMUM) + : page_rec_next_get(page, page + PAGE_OLD_INFIMUM); if (!rec) return nullptr; - cursor->page_cur.rec= rec; + cursor->page_cur.rec= const_cast(rec); return rtr_page_get_father_node_ptr(offsets, heap, sea_cur, cursor, thr, mtr); } diff --git a/storage/innobase/ha/ha0storage.cc b/storage/innobase/ha/ha0storage.cc index acde71b0557..ef7cc78a4dd 100644 --- a/storage/innobase/ha/ha0storage.cc +++ b/storage/innobase/ha/ha0storage.cc @@ -29,46 +29,6 @@ Created September 22, 2007 Vasil Dimov #include "ha0storage.h" #include "hash0hash.h" #include "mem0mem.h" -#include "ut0rnd.h" - -/*******************************************************************//** -Retrieves a data from a storage. If it is present, a pointer to the -stored copy of data is returned, otherwise NULL is returned. */ -static -const void* -ha_storage_get( -/*===========*/ - ha_storage_t* storage, /*!< in: hash storage */ - const void* data, /*!< in: data to check for */ - ulint data_len) /*!< in: data length */ -{ - ha_storage_node_t* node; - ulint fold; - - /* avoid repetitive calls to ut_fold_binary() in the HASH_SEARCH - macro */ - fold = ut_fold_binary(static_cast(data), data_len); - -#define IS_FOUND \ - node->data_len == data_len && memcmp(node->data, data, data_len) == 0 - - HASH_SEARCH( - next, /* node->"next" */ - &storage->hash, /* the hash table */ - fold, /* key */ - ha_storage_node_t*, /* type of node->next */ - node, /* auxiliary variable */ - , /* assertion */ - IS_FOUND); /* search criteria */ - - if (node == NULL) { - - return(NULL); - } - /* else */ - - return(node->data); -} /*******************************************************************//** Copies data into the storage and returns a pointer to the copy. If the @@ -87,54 +47,30 @@ ha_storage_put_memlim( ulint data_len, /*!< in: data length */ ulint memlim) /*!< in: memory limit to obey */ { - void* raw; - ha_storage_node_t* node; - const void* data_copy; - ulint fold; + const uint32_t fold= my_crc32c(0, data, data_len); + ha_storage_node_t** after = reinterpret_cast + (&storage->hash.cell_get(fold)->node); + for (; *after; after= &(*after)->next) + if ((*after)->data_len == data_len && + !memcmp((*after)->data, data, data_len)) + return (*after)->data; - /* check if data chunk is already present */ - data_copy = ha_storage_get(storage, data, data_len); - if (data_copy != NULL) { + /* not present */ - return(data_copy); - } + /* check if we are allowed to allocate data_len bytes */ + if (memlim > 0 && ha_storage_get_size(storage) + data_len > memlim) + return nullptr; - /* not present */ - - /* check if we are allowed to allocate data_len bytes */ - if (memlim > 0 - && ha_storage_get_size(storage) + data_len > memlim) { - - return(NULL); - } - - /* we put the auxiliary node struct and the data itself in one - continuous block */ - raw = mem_heap_alloc(storage->heap, - sizeof(ha_storage_node_t) + data_len); - - node = (ha_storage_node_t*) raw; - data_copy = (byte*) raw + sizeof(*node); - - memcpy((byte*) raw + sizeof(*node), data, data_len); - - node->data_len = data_len; - node->data = data_copy; - - /* avoid repetitive calls to ut_fold_binary() in the HASH_INSERT - macro */ - fold = ut_fold_binary(static_cast(data), data_len); - - HASH_INSERT( - ha_storage_node_t, /* type used in the hash chain */ - next, /* node->"next" */ - &storage->hash, /* the hash table */ - fold, /* key */ - node); /* add this data to the hash */ - - /* the output should not be changed because it will spoil the - hash table */ - return(data_copy); + /* we put the auxiliary node struct and the data itself in one + continuous block */ + ha_storage_node_t *node= static_cast + (mem_heap_alloc(storage->heap, sizeof *node + data_len)); + node->data_len= data_len; + node->data= &node[1]; + node->next= nullptr; + memcpy(const_cast(node->data), data, data_len); + *after= node; + return node->data; } #ifdef UNIV_COMPILE_TEST_FUNCS diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 6928f6658cb..ce209c1c73e 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1939,7 +1939,7 @@ static int innodb_check_version(handlerton *hton, const char *path, const trx_id_t trx_id= table->def_trx_id; DBUG_ASSERT(trx_id <= create_id); dict_table_close(table); - DBUG_PRINT("info", ("create_id: %llu trx_id: %llu", create_id, trx_id)); + DBUG_PRINT("info", ("create_id: %llu trx_id: %" PRIu64, create_id, trx_id)); DBUG_RETURN(create_id != trx_id); } else @@ -3739,7 +3739,7 @@ compression_algorithm_is_not_loaded(ulong compression_algorithm, myf flags) if (is_loaded[compression_algorithm]) return 0; - my_printf_error(HA_ERR_UNSUPPORTED, "InnoDB: compression algorithm %s (%u)" + my_printf_error(HA_ERR_UNSUPPORTED, "InnoDB: compression algorithm %s (%lu)" " is not available. Please, load the corresponding provider plugin.", flags, page_compression_algorithms[compression_algorithm], compression_algorithm); return 1; @@ -3982,7 +3982,7 @@ static int innodb_init_params() "InnoDB: innodb_open_files=%lu is not greater " "than the number of system tablespace files, " "temporary tablespace files, " - "innodb_undo_tablespaces=%lu; adjusting " + "innodb_undo_tablespaces=%u; adjusting " "to innodb_open_files=%zu", innobase_open_files, srv_undo_tablespaces, min_open_files_limit); @@ -5655,15 +5655,15 @@ innobase_build_v_templ( } /** Check consistency between .frm indexes and InnoDB indexes. -@param[in] table table object formed from .frm @param[in] ib_table InnoDB table definition @retval true if not errors were found */ -static bool -check_index_consistency(const TABLE* table, const dict_table_t* ib_table) +bool +ha_innobase::check_index_consistency(const dict_table_t* ib_table) noexcept { ulint mysql_num_index = table->s->keys; ulint ib_num_index = UT_LIST_GET_LEN(ib_table->indexes); bool ret = true; + ulint last_unique = 0; /* If there exists inconsistency between MySQL and InnoDB dictionary (metadata) information, the number of index defined in MySQL @@ -5698,8 +5698,21 @@ check_index_consistency(const TABLE* table, const dict_table_t* ib_table) ret = false; goto func_exit; } - } + if (index->is_unique()) { + ulint i = 0; + while ((index = UT_LIST_GET_PREV(indexes, index))) i++; + /* Check if any unique index in InnoDB + dictionary are re-ordered compared to + the index in .frm */ + if (last_unique > i) { + m_int_table_flags + |= HA_DUPLICATE_KEY_NOT_IN_ORDER; + } + + last_unique = i; + } + } func_exit: return ret; } @@ -5954,7 +5967,7 @@ ha_innobase::open(const char* name, int, uint) ib_table->lock_mutex_unlock(); } - if (!check_index_consistency(table, ib_table)) { + if (!check_index_consistency(ib_table)) { sql_print_error("InnoDB indexes are inconsistent with what " "defined in .frm for table %s", name); @@ -11435,7 +11448,7 @@ create_table_info_t::check_table_options() push_warning_printf( m_thd, Sql_condition::WARN_LEVEL_WARN, HA_WRONG_CREATE_OPTION, - "InnoDB: invalid PAGE_COMPRESSION_LEVEL = %lu." + "InnoDB: invalid PAGE_COMPRESSION_LEVEL = %llu." " Valid values are [1, 2, 3, 4, 5, 6, 7, 8, 9]", options->page_compression_level); return "PAGE_COMPRESSION_LEVEL"; @@ -18560,7 +18573,7 @@ static void innodb_log_file_size_update(THD *thd, st_mysql_sys_var*, *static_cast(save) < log_sys.buf_size) my_printf_error(ER_WRONG_ARGUMENTS, "innodb_log_file_size must be at least" - " innodb_log_buffer_size=%zu", MYF(0), log_sys.buf_size); + " innodb_log_buffer_size=%u", MYF(0), log_sys.buf_size); else { switch (log_sys.resize_start(*static_cast(save))) { @@ -21208,7 +21221,7 @@ ib_foreign_warn(trx_t* trx, /*!< in: trx */ if (trx && trx->mysql_thd) { THD* thd = (THD*)trx->mysql_thd; - push_warning_printf( + push_warning( thd, Sql_condition::WARN_LEVEL_WARN, uint(convert_error_code_to_mysql(error, 0, thd)), buf); } diff --git a/storage/innobase/handler/ha_innodb.h b/storage/innobase/handler/ha_innodb.h index ef7b067206a..4f9a3c64665 100644 --- a/storage/innobase/handler/ha_innodb.h +++ b/storage/innobase/handler/ha_innodb.h @@ -439,6 +439,12 @@ public: const KEY_PART_INFO& old_part, const KEY_PART_INFO& new_part) const override; + /** Check consistency between .frm indexes and InnoDB indexes + Set HA_DUPLICATE_KEY_NOT_IN_ORDER if multiple unique index + are not in the correct order. + @param ib_table InnoDB table definition + @retval true if not errors were found */ + bool check_index_consistency(const dict_table_t* ib_table) noexcept; protected: bool can_convert_string(const Field_string* field, diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index 67a89f9402d..fe716157b32 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -366,16 +366,27 @@ ibuf_insert_to_index_page( ut_ad(!block->index); #endif /* BTR_CUR_HASH_ADAPT */ ut_ad(mtr->is_named_space(block->page.id().space())); + const auto comp = page_is_comp(page); if (UNIV_UNLIKELY(index->table->not_redundant() != !!page_is_comp(page))) { return DB_CORRUPTION; } - rec = page_rec_get_next(page_get_infimum_rec(page)); - - if (!rec || page_rec_is_supremum(rec)) { - return DB_CORRUPTION; + if (comp) { + rec = const_cast( + page_rec_next_get(page, + page + PAGE_NEW_INFIMUM)); + if (!rec || rec == page + PAGE_NEW_SUPREMUM) { + return DB_CORRUPTION; + } + } else { + rec = const_cast( + page_rec_next_get(page, + page + PAGE_OLD_INFIMUM)); + if (!rec || rec == page + PAGE_OLD_SUPREMUM) { + return DB_CORRUPTION; + } } if (!rec_n_fields_is_sane(index, rec, entry)) { @@ -787,7 +798,8 @@ static dberr_t ibuf_merge(fil_space_t *space, btr_cur_t *cur, mtr_t *mtr) { page_header_reset_last_insert(block, mtr); page_update_max_trx_id(block, buf_block_get_page_zip(block), - page_get_max_trx_id(page_align(rec)), mtr); + page_get_max_trx_id(btr_cur_get_page(cur)), + mtr); dict_index_t *index; mem_heap_t *heap = mem_heap_create(512); dtuple_t *entry= ibuf_entry_build(rec, not_redundant, n_fields, diff --git a/storage/innobase/include/btr0btr.h b/storage/innobase/include/btr0btr.h index 132cc4c654c..7a4e9ca19cd 100644 --- a/storage/innobase/include/btr0btr.h +++ b/storage/innobase/include/btr0btr.h @@ -264,20 +264,19 @@ dberr_t btr_page_reorganize(page_cur_t *cursor, mtr_t *mtr) MY_ATTRIBUTE((nonnull, warn_unused_result)); /** Decide if the page should be split at the convergence point of inserts converging to the left. -@param[in] cursor insert position +@param cursor insert position @return the first record to be moved to the right half page -@retval NULL if no split is recommended */ -rec_t* btr_page_get_split_rec_to_left(const btr_cur_t* cursor); +@retval nullptr if no split is recommended */ +rec_t *btr_page_get_split_rec_to_left(const btr_cur_t *cursor) noexcept; /** Decide if the page should be split at the convergence point of inserts converging to the right. -@param[in] cursor insert position -@param[out] split_rec if split recommended, the first record - on the right half page, or - NULL if the to-be-inserted record - should be first +@param cursor insert position +@param split_rec if split recommended, the first record on the right +half page, or nullptr if the to-be-inserted record should be first @return whether split is recommended */ bool -btr_page_get_split_rec_to_right(const btr_cur_t* cursor, rec_t** split_rec); +btr_page_get_split_rec_to_right(const btr_cur_t *cursor, rec_t **split_rec) + noexcept; /*************************************************************//** Splits an index page to halves and inserts the tuple. It is assumed @@ -328,7 +327,7 @@ inline void btr_set_min_rec_mark(rec_t *rec, const buf_block_t &block, ut_ad(!page_is_leaf(block.page.frame)); ut_ad(has_prev == page_has_prev(block.page.frame)); - rec-= page_rec_is_comp(rec) ? REC_NEW_INFO_BITS : REC_OLD_INFO_BITS; + rec-= page_is_comp(block.page.frame) ? REC_NEW_INFO_BITS : REC_OLD_INFO_BITS; if (block.page.zip.data) /* This flag is computed from other contents on a ROW_FORMAT=COMPRESSED @@ -339,11 +338,11 @@ inline void btr_set_min_rec_mark(rec_t *rec, const buf_block_t &block, } /** Seek to the parent page of a B-tree page. -@param[in,out] mtr mini-transaction -@param[in,out] cursor cursor pointing to the x-latched parent page +@param mtr mini-transaction +@param cursor cursor pointing to the x-latched parent page @return whether the cursor was successfully positioned */ -bool btr_page_get_father(mtr_t* mtr, btr_cur_t* cursor) - MY_ATTRIBUTE((nonnull,warn_unused_result)); +bool btr_page_get_father(mtr_t *mtr, btr_cur_t *cursor) noexcept + MY_ATTRIBUTE((nonnull,warn_unused_result)); #ifdef UNIV_DEBUG /************************************************************//** Checks that the node pointer to a page is appropriate. @@ -491,15 +490,15 @@ btr_print_index( Checks the size and number of fields in a record based on the definition of the index. @return TRUE if ok */ -ibool +bool btr_index_rec_validate( /*===================*/ - const rec_t* rec, /*!< in: index record */ + const page_cur_t& cur, /*!< in: index record */ const dict_index_t* index, /*!< in: index */ - ibool dump_on_error) /*!< in: TRUE if the function + bool dump_on_error) /*!< in: true if the function should print hex dump of record and page on error */ - MY_ATTRIBUTE((warn_unused_result)); + noexcept MY_ATTRIBUTE((warn_unused_result)); /**************************************************************//** Checks the consistency of an index tree. @return DB_SUCCESS if ok, error code if not */ diff --git a/storage/innobase/include/btr0cur.h b/storage/innobase/include/btr0cur.h index c8c8269fe60..6b6aaceb3d9 100644 --- a/storage/innobase/include/btr0cur.h +++ b/storage/innobase/include/btr0cur.h @@ -705,10 +705,6 @@ struct btr_cur_t { ulint fold; /*!< fold value used in the search if flag is BTR_CUR_HASH */ /* @} */ - btr_path_t* path_arr; /*!< in estimating the number of - rows in range, we store in this array - information of the path through - the tree */ rtr_info_t* rtr_info; /*!< rtree search info */ btr_cur_t() { memset((void*) this, 0, sizeof *this); } diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 32624424f16..da1a4001055 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -185,11 +185,7 @@ buf_block_t *buf_page_try_get(const page_id_t page_id, mtr_t *mtr); /** Get read access to a compressed page (usually of type FIL_PAGE_TYPE_ZBLOB or FIL_PAGE_TYPE_ZBLOB2). -The page must be released with unfix(). -NOTE: the page is not protected by any latch. Mutual exclusion has to -be implemented at a higher level. In other words, all possible -accesses to a given page through this function must be protected by -the same set of mutexes or latches. +The page must be released with s_unlock(). @param page_id page identifier @return pointer to the block, s-latched */ buf_page_t *buf_page_get_zip(const page_id_t page_id); @@ -1530,13 +1526,10 @@ public: return 1 + latches + empty_slots + h; } private: - /** @return the hash value before any ELEMENTS_PER_LATCH padding */ - static ulint hash(ulint fold, ulint n) { return ut_hash_ulint(fold, n); } - /** @return the index of an array element */ - static ulint calc_hash(ulint fold, ulint n_cells) + static ulint calc_hash(ulint fold, ulint n_cells) noexcept { - return pad(hash(fold, n_cells)); + return pad(fold % n_cells); } public: /** @return the latch covering a hash table chain */ diff --git a/storage/innobase/include/buf0rea.h b/storage/innobase/include/buf0rea.h index 68a4269e59c..78537606f2d 100644 --- a/storage/innobase/include/buf0rea.h +++ b/storage/innobase/include/buf0rea.h @@ -30,8 +30,9 @@ Created 11/5/1995 Heikki Tuuri /** Read a page synchronously from a file. buf_page_t::read_complete() will be invoked on read completion. -@param page_id page id -@param chain buf_pool.page_hash cell for page_id +@param page_id page identifier +@param chain buf_pool.page_hash cell for page_id +@param unzip whether to decompress ROW_FORMAT=COMPRESSED pages @retval DB_SUCCESS if the page was read and is not corrupted @retval DB_SUCCESS_LOCKED_REC if the page was not read @retval DB_PAGE_CORRUPTED if page based on checksum check is corrupted, @@ -39,7 +40,8 @@ will be invoked on read completion. after decryption normal page checksum does not match. @retval DB_TABLESPACE_DELETED if tablespace .ibd file is missing */ dberr_t buf_read_page(const page_id_t page_id, - buf_pool_t::hash_chain &chain); + buf_pool_t::hash_chain &chain, bool unzip= true) + noexcept; /** High-level function which reads a page asynchronously from a file to the buffer buf_pool if it is not already there. Sets the io_fix flag and sets diff --git a/storage/innobase/include/dict0dict.h b/storage/innobase/include/dict0dict.h index 910fe884415..67742b5cb24 100644 --- a/storage/innobase/include/dict0dict.h +++ b/storage/innobase/include/dict0dict.h @@ -395,15 +395,6 @@ dict_index_remove_from_cache( dict_index_t* index); /**********************************************************************//** -Change the id of a table object in the dictionary cache. This is used in -DISCARD TABLESPACE. */ -void -dict_table_change_id_in_cache( -/*==========================*/ - dict_table_t* table, /*!< in/out: table object already in cache */ - table_id_t new_id) /*!< in: new id to set */ - MY_ATTRIBUTE((nonnull)); -/**********************************************************************//** Removes a foreign constraint struct from the dictionary cache. */ void dict_foreign_remove_from_cache( @@ -1375,7 +1366,7 @@ public: static const char fatal_msg[]; /** @return a new temporary table ID */ - table_id_t acquire_temporary_table_id() + table_id_t acquire_temporary_table_id() noexcept { return temp_table_id.fetch_add(1, std::memory_order_relaxed); } @@ -1385,55 +1376,32 @@ public: @return temporary table @retval nullptr if the table does not exist (should only happen during the rollback of CREATE...SELECT) */ - dict_table_t *acquire_temporary_table(table_id_t id) - { - ut_ad(frozen()); - dict_table_t *table; - ulint fold = ut_fold_ull(id); - HASH_SEARCH(id_hash, &temp_id_hash, fold, dict_table_t*, table, - ut_ad(table->cached), table->id == id); - if (UNIV_LIKELY(table != nullptr)) - { - DBUG_ASSERT(table->is_temporary()); - DBUG_ASSERT(table->id >= DICT_HDR_FIRST_ID); - table->acquire(); - } - return table; - } + dict_table_t *acquire_temporary_table(table_id_t id) const noexcept; /** Look up a persistent table. @param id table ID @return table @retval nullptr if not cached */ - dict_table_t *find_table(table_id_t id) - { - ut_ad(frozen()); - dict_table_t *table; - ulint fold= ut_fold_ull(id); - HASH_SEARCH(id_hash, &table_id_hash, fold, dict_table_t*, table, - ut_ad(table->cached), table->id == id); - DBUG_ASSERT(!table || !table->is_temporary()); - return table; - } + dict_table_t *find_table(table_id_t id) const noexcept; - bool is_initialised() const { return m_initialised; } + bool is_initialised() const noexcept { return m_initialised; } /** Initialise the data dictionary cache. */ - void create(); + void create() noexcept; /** Close the data dictionary cache on shutdown. */ - void close(); + void close() noexcept; /** Resize the hash tables based on the current buffer pool size. */ - void resize(); + void resize() noexcept; /** Add a table definition to the data dictionary cache */ - inline void add(dict_table_t* table); + inline void add(dict_table_t *table) noexcept; /** Remove a table definition from the data dictionary cache. @param[in,out] table cached table definition to be evicted @param[in] lru whether this is part of least-recently-used evictiono @param[in] keep whether to keep (not free) the object */ - void remove(dict_table_t* table, bool lru = false, bool keep = false); + void remove(dict_table_t *table, bool lru= false, bool keep= false) noexcept; #ifdef UNIV_DEBUG /** Find a table */ @@ -1530,24 +1498,13 @@ public: /** Evict unused, unlocked tables from table_LRU. @param half whether to consider half the tables only (instead of all) @return number of tables evicted */ - ulint evict_table_LRU(bool half); + ulint evict_table_LRU(bool half) noexcept; /** Look up a table in the dictionary cache. @param name table name @return table handle @retval nullptr if not found */ - dict_table_t *find_table(const span &name) const - { - ut_ad(frozen()); - for (dict_table_t *table= static_cast - (HASH_GET_FIRST(&table_hash, table_hash.calc_hash - (my_crc32c(0, name.data(), name.size())))); - table; table= table->name_hash) - if (strlen(table->name.m_name) == name.size() && - !memcmp(table->name.m_name, name.data(), name.size())) - return table; - return nullptr; - } + dict_table_t *find_table(const span &name) const noexcept; /** Look up or load a table definition @param name table name @@ -1555,13 +1512,14 @@ public: @return table handle @retval nullptr if not found */ dict_table_t *load_table(const span &name, - dict_err_ignore_t ignore= DICT_ERR_IGNORE_NONE); + dict_err_ignore_t ignore= DICT_ERR_IGNORE_NONE) + noexcept; /** Attempt to load the system tables on startup @return whether any discrepancy with the expected definition was found */ - bool load_sys_tables(); + bool load_sys_tables() noexcept; /** Create or check system tables on startup */ - dberr_t create_or_check_sys_tables(); + dberr_t create_or_check_sys_tables() noexcept; }; /** the data dictionary cache */ diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index e78494fe909..585a18c8196 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -1747,11 +1747,11 @@ fil_delete_file( /*============*/ const char* path); /*!< in: filepath of the ibd tablespace */ -/** Look up a tablespace. -@param tablespace identifier -@return tablespace +/** Look up a table space by a given id. +@param id tablespace identifier +@return tablespace object @retval nullptr if not found */ -fil_space_t *fil_space_get_by_id(uint32_t id); +fil_space_t *fil_space_get_by_id(uint32_t id) noexcept; /** Note that a non-predefined persistent tablespace has been modified by redo log. diff --git a/storage/innobase/include/fsp0fsp.h b/storage/innobase/include/fsp0fsp.h index 779387fbe1f..cc50fe011f0 100644 --- a/storage/innobase/include/fsp0fsp.h +++ b/storage/innobase/include/fsp0fsp.h @@ -460,42 +460,36 @@ fseg_free_page( dberr_t fseg_page_is_allocated(fil_space_t *space, unsigned page) MY_ATTRIBUTE((nonnull, warn_unused_result)); +MY_ATTRIBUTE((nonnull, warn_unused_result)) /** Frees part of a segment. This function can be used to free a segment by repeatedly calling this function in different mini-transactions. Doing the freeing in a single mini-transaction might result in too big a mini-transaction. -@param header segment header; NOTE: if the header resides on first - page of the frag list of the segment, this pointer - becomes obsolete after the last freeing step -@param mtr mini-transaction -@param ahi Drop the adaptive hash index +@param block segment header block +@param header segment header offset in the block; +NOTE: if the header resides on first page of the frag list of the segment, +this pointer becomes obsolete after the last freeing step +@param mtr mini-transaction @return whether the freeing was completed */ -bool -fseg_free_step( - fseg_header_t* header, - mtr_t* mtr +bool fseg_free_step(buf_block_t *block, size_t header, mtr_t *mtr #ifdef BTR_CUR_HASH_ADAPT - ,bool ahi=false + , bool ahi=false /*!< whether to drop the AHI */ #endif /* BTR_CUR_HASH_ADAPT */ - ) - MY_ATTRIBUTE((warn_unused_result)); + ) noexcept; +MY_ATTRIBUTE((nonnull, warn_unused_result)) /** Frees part of a segment. Differs from fseg_free_step because this function leaves the header page unfreed. -@param header segment header which must reside on the first - fragment page of the segment -@param mtr mini-transaction -@param ahi drop the adaptive hash index +@param block segment header block; must reside on the first +fragment page of the segment +@param header segment header offset in the block +@param mtr mini-transaction @return whether the freeing was completed, except for the header page */ -bool -fseg_free_step_not_header( - fseg_header_t* header, - mtr_t* mtr +bool fseg_free_step_not_header(buf_block_t *block, size_t header, mtr_t *mtr #ifdef BTR_CUR_HASH_ADAPT - ,bool ahi=false + , bool ahi=false /*!< whether to drop the AHI */ #endif /* BTR_CUR_HASH_ADAPT */ - ) - MY_ATTRIBUTE((warn_unused_result)); + ) noexcept; /** Reset the page type. Data files created before MySQL 5.1.48 may contain garbage in FIL_PAGE_TYPE. diff --git a/storage/innobase/include/hash0hash.h b/storage/innobase/include/hash0hash.h index 867ad9e0109..4d45c0bf772 100644 --- a/storage/innobase/include/hash0hash.h +++ b/storage/innobase/include/hash0hash.h @@ -28,142 +28,91 @@ Created 5/20/1997 Heikki Tuuri #include "ut0rnd.h" #include "ut0new.h" -struct hash_table_t; struct hash_cell_t { /** singly-linked, nullptr terminated list of hash buckets */ void *node; +private: + /** @return pointer to the first element + @tparam T type of the element */ + template T **begin() noexcept + { return reinterpret_cast(&node); } + /** @return pointer to the last element + @tparam T type of the element + @param next the next-element pointer in T */ + template T **end(T *T::*next) noexcept + { + T **prev; + for (prev= begin(); *prev; prev= &((*prev)->*next)); + return prev; + } + +public: /** Append an element. @tparam T type of the element @param insert the being-inserted element @param next the next-element pointer in T */ template - void append(T &insert, T *T::*next) + void append(T &insert, T *T::*next) noexcept { - void **after; - for (after= &node; *after; - after= reinterpret_cast(&(static_cast(*after)->*next))); insert.*next= nullptr; - *after= &insert; + *end(next)= &insert; + } + + /** Find for an element. + @tparam T type of the element + @tparam UnaryPred unary predicate + @param next the next-element pointer in T + @param u unary predicate for searching the element + @return the first matching element + @retval nullptr if not found */ + template + T *find(T *T::*next, UnaryPred u) const noexcept + { + T *n; + for (n= static_cast(node); n && !u(n); n= n->*next); + return n; + } + + /** Search for a pointer to an element. + @tparam T type of the element + @tparam UnaryPred unary predicate + @param next the next-element pointer in T + @param u unary predicate for searching the element + @return pointer to the first matching element, + or to the last element in the chain */ + template + T **search(T *T::*next, UnaryPred u) noexcept + { + T **prev; + for (prev= begin(); !u(*prev); prev= &((*prev)->*next)); + return prev; + } + + /** Remove an element. + @tparam T type of the element + @param prev pointer to the element to be removed + @param next the next-element pointer in T */ + template + void remove(T **prev, T *T::*next) noexcept + { + T &element= **prev; + *prev= element.*next; + element.*next= nullptr; + } + + /** Remove an element. + @tparam T type of the element + @param element the being-removed element + @param next the next-element pointer in T */ + template + void remove(T &element, T *T::*next) noexcept + { + remove(search(next, [&element](const T *p){return p==&element;}), next); } }; -/*******************************************************************//** -Inserts a struct to a hash table. */ - -#define HASH_INSERT(TYPE, NAME, TABLE, FOLD, DATA)\ -do {\ - hash_cell_t* cell3333;\ - TYPE* struct3333;\ -\ - (DATA)->NAME = NULL;\ -\ - cell3333 = &(TABLE)->array[(TABLE)->calc_hash(FOLD)]; \ -\ - if (cell3333->node == NULL) {\ - cell3333->node = DATA;\ - } else {\ - struct3333 = (TYPE*) cell3333->node;\ -\ - while (struct3333->NAME != NULL) {\ -\ - struct3333 = (TYPE*) struct3333->NAME;\ - }\ -\ - struct3333->NAME = DATA;\ - }\ -} while (0) - -#ifdef UNIV_HASH_DEBUG -# define HASH_ASSERT_VALID(DATA) ut_a((void*) (DATA) != (void*) -1) -# define HASH_INVALIDATE(DATA, NAME) *(void**) (&DATA->NAME) = (void*) -1 -#else -# define HASH_ASSERT_VALID(DATA) do {} while (0) -# define HASH_INVALIDATE(DATA, NAME) do {} while (0) -#endif - -/*******************************************************************//** -Deletes a struct from a hash table. */ - -#define HASH_DELETE(TYPE, NAME, TABLE, FOLD, DATA)\ -do {\ - hash_cell_t* cell3333;\ - TYPE* struct3333;\ -\ - cell3333 = &(TABLE)->array[(TABLE)->calc_hash(FOLD)]; \ -\ - if (cell3333->node == DATA) {\ - HASH_ASSERT_VALID(DATA->NAME);\ - cell3333->node = DATA->NAME;\ - } else {\ - struct3333 = (TYPE*) cell3333->node;\ -\ - while (struct3333->NAME != DATA) {\ -\ - struct3333 = (TYPE*) struct3333->NAME;\ - ut_a(struct3333);\ - }\ -\ - struct3333->NAME = DATA->NAME;\ - }\ - HASH_INVALIDATE(DATA, NAME);\ -} while (0) - -/*******************************************************************//** -Gets the first struct in a hash chain, NULL if none. */ - -#define HASH_GET_FIRST(TABLE, HASH_VAL) (TABLE)->array[HASH_VAL].node - -/*******************************************************************//** -Gets the next struct in a hash chain, NULL if none. */ - -#define HASH_GET_NEXT(NAME, DATA) ((DATA)->NAME) - -/********************************************************************//** -Looks for a struct in a hash table. */ -#define HASH_SEARCH(NAME, TABLE, FOLD, TYPE, DATA, ASSERTION, TEST)\ -{\ - (DATA) = (TYPE) HASH_GET_FIRST(TABLE, (TABLE)->calc_hash(FOLD)); \ - HASH_ASSERT_VALID(DATA);\ -\ - while ((DATA) != NULL) {\ - ASSERTION;\ - if (TEST) {\ - break;\ - } else {\ - HASH_ASSERT_VALID(HASH_GET_NEXT(NAME, DATA));\ - (DATA) = (TYPE) HASH_GET_NEXT(NAME, DATA);\ - }\ - }\ -} - -/********************************************************************//** -Looks for an item in all hash buckets. */ -#define HASH_SEARCH_ALL(NAME, TABLE, TYPE, DATA, ASSERTION, TEST) \ -do { \ - ulint i3333; \ - \ - for (i3333 = (TABLE)->n_cells; i3333--; ) { \ - (DATA) = (TYPE) HASH_GET_FIRST(TABLE, i3333); \ - \ - while ((DATA) != NULL) { \ - HASH_ASSERT_VALID(DATA); \ - ASSERTION; \ - \ - if (TEST) { \ - break; \ - } \ - \ - (DATA) = (TYPE) HASH_GET_NEXT(NAME, DATA); \ - } \ - \ - if ((DATA) != NULL) { \ - break; \ - } \ - } \ -} while (0) - /** Hash table with singly-linked overflow lists */ struct hash_table_t { @@ -174,17 +123,20 @@ struct hash_table_t /** Create the hash table. @param n the lower bound of n_cells */ - void create(ulint n) + void create(ulint n) noexcept { n_cells= ut_find_prime(n); array= static_cast(ut_zalloc_nokey(n_cells * sizeof *array)); } /** Clear the hash table. */ - void clear() { memset(array, 0, n_cells * sizeof *array); } + void clear() noexcept { memset(array, 0, n_cells * sizeof *array); } /** Free the hash table. */ - void free() { ut_free(array); array= nullptr; } + void free() noexcept { ut_free(array); array= nullptr; } - ulint calc_hash(ulint fold) const { return ut_hash_ulint(fold, n_cells); } + ulint calc_hash(ulint fold) const noexcept { return fold % n_cells; } + + hash_cell_t *cell_get(ulint fold) const noexcept + { return &array[calc_hash(fold)]; } }; diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index d1e142069f5..9da6f1680cd 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -453,7 +453,7 @@ lock_rec_unlock( /*============*/ trx_t* trx, /*!< in/out: transaction that has set a record lock */ - const page_id_t id, /*!< in: page containing rec */ + const buf_block_t& block, /*!< in: page containing rec */ const rec_t* rec, /*!< in: record */ lock_mode lock_mode);/*!< in: LOCK_S or LOCK_X */ @@ -710,13 +710,10 @@ public: #endif private: - /** @return the hash value before any ELEMENTS_PER_LATCH padding */ - static ulint hash(ulint fold, ulint n) { return ut_hash_ulint(fold, n); } - /** @return the index of an array element */ - static ulint calc_hash(ulint fold, ulint n_cells) + static ulint calc_hash(ulint fold, ulint n_cells) noexcept { - return pad(hash(fold, n_cells)); + return pad(fold % n_cells); } }; @@ -1167,9 +1164,9 @@ lock_rec_create( trx mutex */ /** Remove a record lock request, waiting or granted, on a discarded page -@param hash hash table -@param in_lock lock object */ -void lock_rec_discard(lock_sys_t::hash_table &lock_hash, lock_t *in_lock); +@param in_lock lock object +@param cell hash table cell containing in_lock */ +void lock_rec_discard(lock_t *in_lock, hash_cell_t &cell) noexcept; /** Create a new record lock and inserts it to the lock queue, without checking for deadlocks or conflicts. diff --git a/storage/innobase/include/lock0priv.inl b/storage/innobase/include/lock0priv.inl index 67826fb1001..3c8ec01367b 100644 --- a/storage/innobase/include/lock0priv.inl +++ b/storage/innobase/include/lock0priv.inl @@ -180,7 +180,7 @@ lock_rec_get_next_on_page_const( const page_id_t page_id{lock->un_member.rec_lock.page_id}; - while (!!(lock= static_cast(HASH_GET_NEXT(hash, lock)))) + while (!!(lock= static_cast(lock->hash))) if (lock->un_member.rec_lock.page_id == page_id) break; return lock; diff --git a/storage/innobase/include/mtr0log.h b/storage/innobase/include/mtr0log.h index e2419309764..2186f2a002a 100644 --- a/storage/innobase/include/mtr0log.h +++ b/storage/innobase/include/mtr0log.h @@ -211,8 +211,7 @@ inline bool mtr_t::write(const buf_block_t &block, void *ptr, V val) p--; } ::memcpy(ptr, buf, l); - memcpy_low(block, static_cast - (ut_align_offset(p, srv_page_size)), p, end - p); + memcpy_low(block, uint16_t(p - block.page.frame), p, end - p); return true; } @@ -491,12 +490,12 @@ inline void mtr_t::memcpy(const buf_block_t &b, void *dest, const void *str, ulint len) { ut_ad(ut_align_down(dest, srv_page_size) == b.page.frame); - char *d= static_cast(dest); + byte *d= static_cast(dest); const char *s= static_cast(str); if (w != FORCED && is_logged()) { ut_ad(len); - const char *const end= d + len; + const byte *const end= d + len; while (*d++ == *s++) { if (d == end) @@ -510,7 +509,7 @@ inline void mtr_t::memcpy(const buf_block_t &b, void *dest, const void *str, len= static_cast(end - d); } ::memcpy(d, s, len); - memcpy(b, ut_align_offset(d, srv_page_size), len); + memcpy(b, d - b.page.frame, len); } /** Write an EXTENDED log record. diff --git a/storage/innobase/include/page0page.h b/storage/innobase/include/page0page.h index 8686c0f8861..945414dac01 100644 --- a/storage/innobase/include/page0page.h +++ b/storage/innobase/include/page0page.h @@ -196,13 +196,14 @@ extern my_bool srv_immediate_scrub_data_uncompressed; /** Get the start of a page frame. @param[in] ptr pointer within a page frame @return start of the page frame */ -MY_ATTRIBUTE((const)) -inline page_t* page_align(void *ptr) +MY_ATTRIBUTE((const,nonnull)) +inline page_t *page_align(void *ptr) noexcept { return my_assume_aligned (reinterpret_cast(ut_align_down(ptr, srv_page_size))); } -inline const page_t *page_align(const void *ptr) + +inline const page_t *page_align(const void *ptr) noexcept { return page_align(const_cast(ptr)); } @@ -210,8 +211,8 @@ inline const page_t *page_align(const void *ptr) /** Gets the byte offset within a page frame. @param[in] ptr pointer within a page frame @return offset from the start of the page */ -MY_ATTRIBUTE((const)) -inline uint16_t page_offset(const void* ptr) +MY_ATTRIBUTE((const,nonnull)) +inline uint16_t page_offset(const void *ptr) noexcept { return static_cast(ut_align_offset(ptr, srv_page_size)); } @@ -687,6 +688,7 @@ page_dir_find_owner_slot( /*=====================*/ const rec_t* rec); /*!< in: the physical record */ +#ifdef UNIV_DEBUG /***************************************************************//** Returns the heap number of a record. @return heap number */ @@ -695,6 +697,7 @@ ulint page_rec_get_heap_no( /*=================*/ const rec_t* rec); /*!< in: the physical record */ +#endif /** Determine whether a page has any siblings. @param[in] page page frame @return true if the page has any siblings */ @@ -738,15 +741,28 @@ inline uint64_t page_get_autoinc(const page_t *page) return mach_read_from_8(p); } -/************************************************************//** -Gets the pointer to the next record on the page. -@return pointer to next record */ -UNIV_INLINE -const rec_t* -page_rec_get_next_low( -/*==================*/ - const rec_t* rec, /*!< in: pointer to record */ - ulint comp); /*!< in: nonzero=compact page layout */ +/** Get the pointer to the next record on the page. +@tparam comp whether ROW_FORMAT is not REDUNDANT +@param page index page +@param rec index record +@return successor of rec in the page +@retval nullptr on corruption */ +template +inline const rec_t *page_rec_next_get(const page_t *page, const rec_t *rec) +{ + ut_ad(!!page_is_comp(page) == comp); + ut_ad(page_align(rec) == page); + ulint offs= rec_get_next_offs(rec, comp); + if (UNIV_UNLIKELY(offs < (comp ? PAGE_NEW_SUPREMUM : PAGE_OLD_SUPREMUM))) + return nullptr; + if (UNIV_UNLIKELY(offs > page_header_get_field(page, PAGE_HEAP_TOP))) + return nullptr; + ut_ad(page_rec_is_infimum(rec) || + (!page_is_leaf(page) && !page_has_prev(page)) || + !(rec_get_info_bits(page + offs, comp) & REC_INFO_MIN_REC_FLAG)); + return page + offs; +} + /************************************************************//** Gets the pointer to the next record on the page. @return pointer to next record */ @@ -755,6 +771,7 @@ rec_t* page_rec_get_next( /*==============*/ rec_t* rec); /*!< in: pointer to record */ + /************************************************************//** Gets the pointer to the next record on the page. @return pointer to next record */ diff --git a/storage/innobase/include/page0page.inl b/storage/innobase/include/page0page.inl index 6c0167edcf9..687ed93f447 100644 --- a/storage/innobase/include/page0page.inl +++ b/storage/innobase/include/page0page.inl @@ -128,6 +128,7 @@ inline void page_header_reset_last_insert(buf_block_t *block, mtr_t *mtr) memset_aligned<2>(&block->page.zip.data[field], 0, 2); } +#ifdef UNIV_DEBUG /***************************************************************//** Returns the heap number of a record. @return heap number */ @@ -143,6 +144,7 @@ page_rec_get_heap_no( return(rec_get_heap_no_old(rec)); } } +#endif /** Determine whether an index page record is a user record. @param[in] rec record in an index page @@ -235,7 +237,9 @@ page_get_page_no( /*=============*/ const page_t* page) /*!< in: page */ { - ut_ad(page == page_align((page_t*) page)); +#ifndef UNIV_INNOCHECKSUM + ut_ad(page == page_align(page)); +#endif /* !UNIV_INNOCHECKSUM */ return mach_read_from_4(my_assume_aligned<4>(page + FIL_PAGE_OFFSET)); } @@ -249,7 +253,7 @@ page_get_space_id( /*==============*/ const page_t* page) /*!< in: page */ { - ut_ad(page == page_align((page_t*) page)); + ut_ad(page == page_align(page)); return mach_read_from_4(my_assume_aligned<2> (page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID)); } @@ -357,8 +361,6 @@ page_rec_get_next_low( const page_t *page= page_align(rec); ut_ad(page_rec_check(rec)); ulint offs= rec_get_next_offs(rec, comp); - if (!offs) - return nullptr; if (UNIV_UNLIKELY(offs < (comp ? PAGE_NEW_SUPREMUM : PAGE_OLD_SUPREMUM))) return nullptr; if (UNIV_UNLIKELY(offs > page_header_get_field(page, PAGE_HEAP_TOP))) diff --git a/storage/innobase/include/trx0i_s.h b/storage/innobase/include/trx0i_s.h index caacfa0972a..f549745baa2 100644 --- a/storage/innobase/include/trx0i_s.h +++ b/storage/innobase/include/trx0i_s.h @@ -70,20 +70,6 @@ do { \ } \ } while (0) -/** A row of INFORMATION_SCHEMA.innodb_locks */ -struct i_s_locks_row_t; - -/** Objects of trx_i_s_cache_t::locks_hash */ -struct i_s_hash_chain_t; - -/** Objects of this type are added to the hash table -trx_i_s_cache_t::locks_hash */ -struct i_s_hash_chain_t { - i_s_locks_row_t* value; /*!< row of - INFORMATION_SCHEMA.innodb_locks*/ - i_s_hash_chain_t* next; /*!< next item in the hash chain */ -}; - /** This structure represents INFORMATION_SCHEMA.innodb_locks row */ struct i_s_locks_row_t { trx_id_t lock_trx_id; /*!< transaction identifier */ @@ -106,7 +92,7 @@ struct i_s_locks_row_t { table_id_t lock_table_id; /*!< table identifier from lock_get_table_id */ - i_s_hash_chain_t hash_chain; /*!< hash table chain node for + i_s_locks_row_t *next; /*!< hash table chain node for trx_i_s_cache_t::locks_hash */ /* @} */ }; diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h index 5d6c58517c6..79d9d155fd0 100644 --- a/storage/innobase/include/trx0sys.h +++ b/storage/innobase/include/trx0sys.h @@ -947,16 +947,58 @@ public: @return whether any transaction not newer than id might be active */ - bool find_same_or_older(trx_t *trx, trx_id_t id) + bool find_same_or_older_low(trx_t *trx, trx_id_t id) noexcept; + + /** + Determine if the specified transaction or any older one might be active. + + @param trx transaction whose max_inactive_id will be consulted + @param id identifier of another transaction + @return whether any transaction not newer than id might be active + */ + + bool find_same_or_older(trx_t *trx, trx_id_t id) noexcept { if (trx->max_inactive_id >= id) return false; - bool found= rw_trx_hash.iterate(trx, find_same_or_older_callback, &id); + const bool found{find_same_or_older_low(trx, id)}; if (!found) trx->max_inactive_id= id; return found; } + /** + Determine if the specified transaction or any older one might be active. + + @param trx purge_sys.query->trx (may be used by multiple threads) + @param id transaction identifier to check + @return whether any transaction not newer than id might be active + */ + + bool find_same_or_older_in_purge(trx_t *trx, trx_id_t id) noexcept + { +#if SIZEOF_SIZE_T < 8 && !defined __i386__ + /* On systems that lack native 64-bit loads and stores, + it should be more efficient to acquire a futex-backed mutex + earlier than to invoke a loop or a complex library function. + + Our IA-32 target is not "i386" but at least "i686", that is, at least + Pentium MMX, which has a 64-bit data bus and 64-bit XMM registers. */ + trx->mutex_lock(); + trx_id_t &max_inactive_id= trx->max_inactive_id; + const bool hot{max_inactive_id < id && find_same_or_older(trx, id)}; +#else + Atomic_relaxed &max_inactive_id= trx->max_inactive_id_atomic; + if (max_inactive_id >= id) + return false; + trx->mutex_lock(); + const bool hot{find_same_or_older(trx, id)}; +#endif + if (hot) + max_inactive_id= id; + trx->mutex_unlock(); + return hot; + } /** Determines the maximum transaction id. @@ -1211,12 +1253,7 @@ public: @return error code */ inline dberr_t reset_page(mtr_t *mtr); private: - static my_bool find_same_or_older_callback(void *el, void *i) - { - auto element= static_cast(el); - auto id= static_cast(i); - return element->id <= *id; - } + static my_bool find_same_or_older_callback(void *el, void *i) noexcept; struct snapshot_ids_arg diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 477c071ef0a..508e7858362 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -644,10 +644,25 @@ public: Cleared in commit_in_memory() after commit_state(), trx_sys_t::deregister_rw(), release_locks(). */ trx_id_t id; - /** The largest encountered transaction identifier for which no - transaction was observed to be active. This is a cache to speed up - trx_sys_t::find_same_or_older(). */ - trx_id_t max_inactive_id; + union + { + /** The largest encountered transaction identifier for which no + transaction was observed to be active. This is a cache to speed up + trx_sys_t::find_same_or_older(). + + This will be zero-initialized in Pool::Pool() and not initialized + when a transaction object in the pool is freed and reused. The + idea is that new transactions can reuse the result of + an expensive trx_sys_t::find_same_or_older_low() invocation that + was performed in an earlier transaction that used the same + memory area. */ + trx_id_t max_inactive_id; + /** Same as max_inactive_id, for purge_sys.query->trx which may be + accessed by multiple concurrent threads in in + trx_sys_t::find_same_or_older_in_purge(). Writes are protected by + trx_t::mutex. */ + Atomic_relaxed max_inactive_id_atomic; + }; private: /** mutex protecting state and some of lock diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h index 2954cf73ecc..a86cc346a14 100644 --- a/storage/innobase/include/trx0undo.h +++ b/storage/innobase/include/trx0undo.h @@ -326,7 +326,7 @@ public: page_id_t get_page_id() const { return page_id; } /** Handle the DML undo log and apply it on online indexes */ - inline void apply_undo_rec(const trx_undo_rec_t *rec); + inline void apply_undo_rec(const trx_undo_rec_t *rec, uint16_t offset); ~UndorecApplier() { diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index 7e1312e3f7e..91433e86fbd 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -134,7 +134,6 @@ using the call command. */ ut_ad(lock_rec_validate_page()) assertions. */ #define UNIV_LRU_DEBUG /* debug the buffer pool LRU */ -#define UNIV_HASH_DEBUG /* debug HASH_ macros */ #define UNIV_PERF_DEBUG /* debug flag that enables light weight performance related stuff. */ diff --git a/storage/innobase/include/ut0pool.h b/storage/innobase/include/ut0pool.h index aa0cfb9e060..e5df50fa071 100644 --- a/storage/innobase/include/ut0pool.h +++ b/storage/innobase/include/ut0pool.h @@ -68,6 +68,15 @@ struct Pool { aligned_malloc(m_size, CPU_LEVEL1_DCACHE_LINESIZE)); memset_aligned( m_start, 0, m_size); + /* Note: The above would zero-initialize some + std::atomic data members in trx_t, such as + trx_t::lock, which will not be initialized further in + TrxFactory::init(). It may be implementation defined + whether such zero initialization works. On some + hypothetical platform (not one that seems to be + supported by a mainstream C++ compiler), std::atomic + might wrap the data member as well as a + non-zero-initialized mutex. */ m_last = m_start; diff --git a/storage/innobase/include/ut0rnd.h b/storage/innobase/include/ut0rnd.h index 511eb21fd11..5cba6ee534f 100644 --- a/storage/innobase/include/ut0rnd.h +++ b/storage/innobase/include/ut0rnd.h @@ -70,26 +70,14 @@ inline ulint ut_rnd_interval(ulint n) return n > 1 ? static_cast(ut_rnd_gen() % n) : 0; } -/*******************************************************//** -The following function generates a hash value for a ulint integer -to a hash table of size table_size, which should be a prime or some -random number to work reliably. -@return hash value */ -UNIV_INLINE -ulint -ut_hash_ulint( -/*==========*/ - ulint key, /*!< in: value to be hashed */ - ulint table_size); /*!< in: hash table size */ -/*************************************************************//** -Folds a 64-bit integer. -@return folded value */ -UNIV_INLINE -ulint -ut_fold_ull( -/*========*/ - ib_uint64_t d) /*!< in: 64-bit integer */ - MY_ATTRIBUTE((const)); +# if SIZEOF_SIZE_T < 8 +inline size_t ut_fold_ull(uint64_t d) noexcept +{ + return size_t(d) * 31 + size_t(d >> (SIZEOF_SIZE_T * CHAR_BIT)); +} +# else +# define ut_fold_ull(d) d +# endif /***********************************************************//** Looks for a prime number slightly greater than the given argument. The prime is chosen so that it is not near any power of 2. diff --git a/storage/innobase/include/ut0rnd.inl b/storage/innobase/include/ut0rnd.inl index 37da323f8f3..c9f7373dd1b 100644 --- a/storage/innobase/include/ut0rnd.inl +++ b/storage/innobase/include/ut0rnd.inl @@ -27,40 +27,6 @@ Created 5/30/1994 Heikki Tuuri #define UT_HASH_RANDOM_MASK 1463735687 #define UT_HASH_RANDOM_MASK2 1653893711 -#ifndef UNIV_INNOCHECKSUM - -/*******************************************************//** -The following function generates a hash value for a ulint integer -to a hash table of size table_size, which should be a prime -or some random number for the hash table to work reliably. -@return hash value */ -UNIV_INLINE -ulint -ut_hash_ulint( -/*==========*/ - ulint key, /*!< in: value to be hashed */ - ulint table_size) /*!< in: hash table size */ -{ - ut_ad(table_size); - key = key ^ UT_HASH_RANDOM_MASK2; - - return(key % table_size); -} - -/*************************************************************//** -Folds a 64-bit integer. -@return folded value */ -UNIV_INLINE -ulint -ut_fold_ull( -/*========*/ - ib_uint64_t d) /*!< in: 64-bit integer */ -{ - return(ut_fold_ulint_pair((ulint) d & ULINT32_MASK, - (ulint) (d >> 32))); -} -#endif /* !UNIV_INNOCHECKSUM */ - /*************************************************************//** Folds a pair of ulints. @return folded value */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 071724c41f0..5c11a83f886 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -2363,8 +2363,7 @@ static void lock_rec_dequeue_from_page(lock_t *in_lock, bool owns_wait_mutex) const ulint rec_fold = page_id.fold(); hash_cell_t &cell = *lock_hash.cell_get(rec_fold); lock_sys.assert_locked(cell); - - HASH_DELETE(lock_t, hash, &lock_hash, rec_fold, in_lock); + cell.remove(*in_lock, &lock_t::hash); UT_LIST_REMOVE(in_lock->trx->lock.trx_locks, in_lock); MONITOR_INC(MONITOR_RECLOCK_REMOVED); @@ -2414,16 +2413,14 @@ static void lock_rec_dequeue_from_page(lock_t *in_lock, bool owns_wait_mutex) } /** Remove a record lock request, waiting or granted, on a discarded page -@param hash hash table -@param in_lock lock object */ +@param in_lock lock object +@param cell hash table cell containing in_lock */ TRANSACTIONAL_TARGET -void lock_rec_discard(lock_sys_t::hash_table &lock_hash, lock_t *in_lock) +void lock_rec_discard(lock_t *in_lock, hash_cell_t &cell) noexcept { ut_ad(!in_lock->is_table()); - lock_hash.assert_locked(in_lock->un_member.rec_lock.page_id); - HASH_DELETE(lock_t, hash, &lock_hash, - in_lock->un_member.rec_lock.page_id.fold(), in_lock); + cell.remove(*in_lock, &lock_t::hash); ut_d(uint32_t old_locks); { trx_t *trx= in_lock->trx; @@ -2441,17 +2438,16 @@ void lock_rec_discard(lock_sys_t::hash_table &lock_hash, lock_t *in_lock) Removes record lock objects set on an index page which is discarded. This function does not move locks, or check for waiting locks, therefore the lock bitmaps must already be reset when this function is called. */ +template static void -lock_rec_free_all_from_discard_page(page_id_t id, const hash_cell_t &cell, - lock_sys_t::hash_table &lock_hash) +lock_rec_free_all_from_discard_page(page_id_t id, hash_cell_t &cell) noexcept { for (lock_t *lock= lock_sys_t::get_first(cell, id); lock; ) { - ut_ad(&lock_hash != &lock_sys.rec_hash || - lock_rec_find_set_bit(lock) == ULINT_UNDEFINED); + ut_ad(!assert || lock_rec_find_set_bit(lock) == ULINT_UNDEFINED); ut_ad(!lock->is_waiting()); lock_t *next_lock= lock_rec_get_next_on_page(lock); - lock_rec_discard(lock_hash, lock); + lock_rec_discard(lock, cell); lock= next_lock; } } @@ -2468,15 +2464,15 @@ ATTRIBUTE_COLD void lock_discard_for_index(const dict_index_t &index) const ulint n= lock_sys.rec_hash.pad(lock_sys.rec_hash.n_cells); for (ulint i= 0; i < n; i++) { - for (lock_t *lock= static_cast(lock_sys.rec_hash.array[i].node); - lock; ) + hash_cell_t &cell= lock_sys.rec_hash.array[i]; + for (lock_t *lock= static_cast(cell.node); lock; ) { ut_ad(!lock->is_table()); if (lock->index == &index) { ut_ad(!lock->is_waiting()); - lock_rec_discard(lock_sys.rec_hash, lock); - lock= static_cast(lock_sys.rec_hash.array[i].node); + lock_rec_discard(lock, cell); + lock= static_cast(cell.node); } else lock= lock->hash; @@ -2776,8 +2772,8 @@ lock_move_reorganize_page( old_heap_no= rec_get_heap_no_new(rec2); new_heap_no= rec_get_heap_no_new(rec1); - rec1= page_rec_get_next_low(rec1, TRUE); - rec2= page_rec_get_next_low(rec2, TRUE); + rec1= page_rec_next_get(block->page.frame, rec1); + rec2= page_rec_next_get(oblock->page.frame, rec2); } else { @@ -2785,8 +2781,8 @@ lock_move_reorganize_page( new_heap_no= rec_get_heap_no_old(rec1); ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2))); - rec1= page_rec_get_next_low(rec1, FALSE); - rec2= page_rec_get_next_low(rec2, FALSE); + rec1= page_rec_next_get(block->page.frame, rec1); + rec2= page_rec_next_get(oblock->page.frame, rec2); } trx_t *lock_trx= lock->trx; @@ -2842,9 +2838,10 @@ lock_move_rec_list_end( const rec_t* rec) /*!< in: record on page: this is the first record moved */ { - const ulint comp= page_rec_is_comp(rec); - - ut_ad(block->page.frame == page_align(rec)); + const page_t *const page= block->page.frame; + const page_t *const new_page= new_block->page.frame; + const ulint comp= page_is_comp(page); + ut_ad(page == page_align(rec)); ut_ad(comp == page_is_comp(new_block->page.frame)); const page_id_t id{block->page.id()}; @@ -2867,17 +2864,15 @@ lock_move_rec_list_end( if (comp) { - if (page_offset(rec1) == PAGE_NEW_INFIMUM) - rec1= page_rec_get_next_low(rec1, TRUE); - rec2= page_rec_get_next_low(new_block->page.frame + PAGE_NEW_INFIMUM, - TRUE); + if (rec1 - page == PAGE_NEW_INFIMUM) + rec1= page_rec_next_get(page, rec1); + rec2= page_rec_next_get(new_page, PAGE_NEW_INFIMUM + new_page); } else { - if (page_offset(rec1) == PAGE_OLD_INFIMUM) - rec1= page_rec_get_next_low(rec1, FALSE); - rec2= page_rec_get_next_low(new_block->page.frame + PAGE_OLD_INFIMUM, - FALSE); + if (rec1 - page == PAGE_OLD_INFIMUM) + rec1= page_rec_next_get(page, rec1); + rec2= page_rec_next_get(new_page, PAGE_OLD_INFIMUM + new_page); } if (UNIV_UNLIKELY(!rec1 || !rec2)) @@ -2899,19 +2894,19 @@ lock_move_rec_list_end( if (comp) { rec1_heap_no= rec_get_heap_no_new(rec1); - if (!(rec1= page_rec_get_next_low(rec1, TRUE))) + if (!(rec1= page_rec_next_get(page, rec1))) { ut_ad(rec1_heap_no == PAGE_HEAP_NO_SUPREMUM); break; } rec2_heap_no= rec_get_heap_no_new(rec2); - rec2= page_rec_get_next_low(rec2, TRUE); + rec2= page_rec_next_get(new_page, rec2); } else { ut_d(const rec_t *old1= rec1); rec1_heap_no= rec_get_heap_no_old(rec1); - if (!(rec1= page_rec_get_next_low(rec1, FALSE))) + if (!(rec1= page_rec_next_get(page, rec1))) { ut_ad(rec1_heap_no == PAGE_HEAP_NO_SUPREMUM); break; @@ -2921,7 +2916,7 @@ lock_move_rec_list_end( ut_ad(!memcmp(old1, rec2, rec_get_data_size_old(old1))); rec2_heap_no= rec_get_heap_no_old(rec2); - rec2= page_rec_get_next_low(rec2, FALSE); + rec2= page_rec_next_get(new_page, rec2); } if (UNIV_UNLIKELY(!rec2)) @@ -2945,7 +2940,7 @@ lock_move_rec_list_end( } lock_rec_add_to_queue(type_mode, g.cell2(), new_id, - new_block->page.frame, + new_page, rec2_heap_no, lock->index, lock_trx, true); } @@ -2984,7 +2979,7 @@ lock_move_rec_list_start( before the records were copied */ { - const ulint comp= page_rec_is_comp(rec); + const ulint comp= page_is_comp(block->page.frame); ut_ad(block->page.frame == page_align(rec)); ut_ad(comp == page_is_comp(new_block->page.frame)); @@ -3006,15 +3001,15 @@ lock_move_rec_list_start( if (comp) { - rec1= page_rec_get_next_low(block->page.frame + PAGE_NEW_INFIMUM, - TRUE); - rec2= page_rec_get_next_low(old_end, TRUE); + rec1= page_rec_next_get(block->page.frame, + block->page.frame + PAGE_NEW_INFIMUM); + rec2= page_rec_next_get(new_block->page.frame, old_end); } else { - rec1= page_rec_get_next_low(block->page.frame + PAGE_OLD_INFIMUM, - FALSE); - rec2= page_rec_get_next_low(old_end, FALSE); + rec1= page_rec_next_get(block->page.frame, + block->page.frame + PAGE_OLD_INFIMUM); + rec2= page_rec_next_get(new_block->page.frame, old_end); } /* Copy lock requests on user records to new page and @@ -3039,8 +3034,8 @@ lock_move_rec_list_start( rec1_heap_no= rec_get_heap_no_new(rec1); rec2_heap_no= rec_get_heap_no_new(rec2); - rec1= page_rec_get_next_low(rec1, TRUE); - rec2= page_rec_get_next_low(rec2, TRUE); + rec1= page_rec_next_get(block->page.frame, rec1); + rec2= page_rec_next_get(new_block->page.frame, rec2); } else { @@ -3049,8 +3044,8 @@ lock_move_rec_list_start( ut_ad(!memcmp(rec1, rec2, rec_get_data_size_old(rec2))); - rec1= page_rec_get_next_low(rec1, FALSE); - rec2= page_rec_get_next_low(rec2, FALSE); + rec1= page_rec_next_get(block->page.frame, rec1); + rec2= page_rec_next_get(new_block->page.frame, rec2); } trx_t *lock_trx= lock->trx; @@ -3105,8 +3100,7 @@ lock_rtr_move_rec_list( if (!num_move) return; - const ulint comp= page_rec_is_comp(rec_move[0].old_rec); - + const ulint comp= page_is_comp(block->page.frame); ut_ad(block->page.frame == page_align(rec_move[0].old_rec)); ut_ad(new_block->page.frame == page_align(rec_move[0].new_rec)); ut_ad(comp == page_rec_is_comp(rec_move[0].new_rec)); @@ -3237,6 +3231,17 @@ static void lock_assert_no_spatial(const page_id_t id) } #endif +/** Determine the heap number of an index record +@param block index page +@param rec index record +@return the heap number of the record */ +static ulint lock_get_heap_no(const buf_block_t &block, const rec_t *rec) +{ + ut_ad(page_align(rec) == block.page.frame); + return page_is_comp(block.page.frame) + ? rec_get_heap_no_new(rec) : rec_get_heap_no_old(rec); +} + /*************************************************************//** Updates the lock table when a page is merged to the right. */ void @@ -3256,6 +3261,7 @@ lock_update_merge_right( const page_id_t l{left_block->page.id()}; const page_id_t r{right_block->page.id()}; + const ulint h= lock_get_heap_no(*right_block, orig_succ); /* This would likely be too large for a memory transaction. */ LockMultiGuard g{lock_sys.rec_hash, l, r}; @@ -3263,13 +3269,12 @@ lock_update_merge_right( original successor of infimum on the right page, to which the left page was merged */ lock_rec_inherit_to_gap(g.cell2(), r, g.cell1(), l, right_block->page.frame, - page_rec_get_heap_no(orig_succ), - PAGE_HEAP_NO_SUPREMUM); + h, PAGE_HEAP_NO_SUPREMUM); /* Reset the locks on the supremum of the left page, releasing waiting transactions */ lock_rec_reset_and_release_wait(g.cell1(), l, PAGE_HEAP_NO_SUPREMUM); - lock_rec_free_all_from_discard_page(l, g.cell1(), lock_sys.rec_hash); + lock_rec_free_all_from_discard_page(l, g.cell1()); ut_d(lock_assert_no_spatial(l)); } @@ -3301,7 +3306,7 @@ void lock_update_copy_and_discard(const buf_block_t &new_block, page_id_t old) /* Move the locks on the supremum of the old page to the supremum of new */ lock_rec_move(g.cell1(), new_block, id, g.cell2(), old, PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM); - lock_rec_free_all_from_discard_page(old, g.cell2(), lock_sys.rec_hash); + lock_rec_free_all_from_discard_page(old, g.cell2()); } /*************************************************************//** @@ -3333,21 +3338,38 @@ void lock_update_merge_left(const buf_block_t& left, const rec_t *orig_pred, ut_ad(left.page.frame == page_align(orig_pred)); const page_id_t l{left.page.id()}; - const rec_t *left_next_rec= page_rec_get_next_const(orig_pred); - if (UNIV_UNLIKELY(!left_next_rec)) + const auto comp= page_is_comp(left.page.frame); + const rec_t *left_next_rec; + ulint heap_no; + if (comp) { - ut_ad("corrupted page" == 0); - return; + left_next_rec= page_rec_next_get(left.page.frame, orig_pred); + if (UNIV_UNLIKELY(!left_next_rec)) + { + ut_ad("corrupted page" == 0); + return; + } + heap_no= rec_get_heap_no_new(left_next_rec); + } + else + { + left_next_rec= page_rec_next_get(left.page.frame, orig_pred); + if (UNIV_UNLIKELY(!left_next_rec)) + { + ut_ad("corrupted page" == 0); + return; + } + heap_no= rec_get_heap_no_old(left_next_rec); } /* This would likely be too large for a memory transaction. */ LockMultiGuard g{lock_sys.rec_hash, l, right}; - if (!page_rec_is_supremum(left_next_rec)) + if (heap_no != PAGE_HEAP_NO_SUPREMUM) { /* Inherit the locks on the supremum of the left page to the first record which was moved from the right page */ lock_rec_inherit_to_gap(g.cell1(), l, g.cell1(), l, left.page.frame, - page_rec_get_heap_no(left_next_rec), + heap_no, PAGE_HEAP_NO_SUPREMUM); /* Reset the locks on the supremum of the left page, @@ -3359,7 +3381,7 @@ void lock_update_merge_left(const buf_block_t& left, const rec_t *orig_pred, of the left page */ lock_rec_move(g.cell1(), left, l, g.cell2(), right, PAGE_HEAP_NO_SUPREMUM, PAGE_HEAP_NO_SUPREMUM); - lock_rec_free_all_from_discard_page(right, g.cell2(), lock_sys.rec_hash); + lock_rec_free_all_from_discard_page(right, g.cell2()); /* there should exist no page lock on the right page, otherwise, it will be blocked from merge */ @@ -3450,21 +3472,18 @@ lock_update_discard( } while (heap_no != PAGE_HEAP_NO_SUPREMUM); } - lock_rec_free_all_from_discard_page(page_id, g.cell2(), - lock_sys.rec_hash); + lock_rec_free_all_from_discard_page(page_id, g.cell2()); } else { const auto fold = page_id.fold(); auto cell = lock_sys.prdt_hash.cell_get(fold); auto latch = lock_sys_t::hash_table::latch(cell); latch->acquire(); - lock_rec_free_all_from_discard_page(page_id, *cell, - lock_sys.prdt_hash); + lock_rec_free_all_from_discard_page(page_id, *cell); latch->release(); cell = lock_sys.prdt_page_hash.cell_get(fold); latch = lock_sys_t::hash_table::latch(cell); latch->acquire(); - lock_rec_free_all_from_discard_page(page_id, *cell, - lock_sys.prdt_page_hash); + lock_rec_free_all_from_discard_page(page_id, *cell); latch->release(); } } @@ -3486,16 +3505,16 @@ lock_update_insert( /* Inherit the gap-locking locks for rec, in gap mode, from the next record */ - if (page_rec_is_comp(rec)) { + if (page_is_comp(block->page.frame)) { receiver_heap_no = rec_get_heap_no_new(rec); - rec = page_rec_get_next_low(rec, TRUE); + rec = page_rec_next_get(block->page.frame, rec); if (UNIV_UNLIKELY(!rec)) { return; } donator_heap_no = rec_get_heap_no_new(rec); } else { receiver_heap_no = rec_get_heap_no_old(rec); - rec = page_rec_get_next_low(rec, FALSE); + rec = page_rec_next_get(block->page.frame, rec); if (UNIV_UNLIKELY(!rec)) { return; } @@ -3562,9 +3581,7 @@ lock_rec_store_on_page_infimum( bits are reset on the record */ { - const ulint heap_no= page_rec_get_heap_no(rec); - - ut_ad(block->page.frame == page_align(rec)); + const ulint heap_no= lock_get_heap_no(*block, rec); const page_id_t id{block->page.id()}; #ifdef ENABLED_DEBUG_SYNC SCOPE_EXIT([]() { DEBUG_SYNC_C("lock_rec_store_on_page_infimum_end"); }); @@ -3584,7 +3601,7 @@ whose infimum stored the lock state; lock bits are reset on the infimum */ void lock_rec_restore_from_page_infimum(const buf_block_t &block, const rec_t *rec, page_id_t donator) { - const ulint heap_no= page_rec_get_heap_no(rec); + const ulint heap_no= lock_get_heap_no(block, rec); const page_id_t id{block.page.id()}; LockMultiGuard g{lock_sys.rec_hash, id, donator}; lock_rec_move(g.cell1(), block, id, g.cell2(), donator, heap_no, @@ -4269,21 +4286,22 @@ lock_rec_unlock( /*============*/ trx_t* trx, /*!< in/out: transaction that has set a record lock */ - const page_id_t id, /*!< in: page containing rec */ + const buf_block_t& block, /*!< in: page containing rec */ const rec_t* rec, /*!< in: record */ lock_mode lock_mode)/*!< in: LOCK_S or LOCK_X */ { lock_t* first_lock; lock_t* lock; - ulint heap_no; ut_ad(trx); ut_ad(rec); ut_ad(!trx->lock.wait_lock); ut_ad(trx_state_eq(trx, TRX_STATE_ACTIVE)); + ut_ad(page_rec_is_leaf(rec)); ut_ad(!page_rec_is_metadata(rec)); - heap_no = page_rec_get_heap_no(rec); + const ulint heap_no = lock_get_heap_no(block, rec); + const page_id_t id{block.page.id()}; LockGuard g{lock_sys.rec_hash, id}; @@ -5118,25 +5136,13 @@ Calculates the number of record lock structs in the record lock hash table. TRANSACTIONAL_TARGET static ulint lock_get_n_rec_locks() { - ulint n_locks = 0; - ulint i; - - lock_sys.assert_locked(); - - for (i = 0; i < lock_sys.rec_hash.n_cells; i++) { - const lock_t* lock; - - for (lock = static_cast( - HASH_GET_FIRST(&lock_sys.rec_hash, i)); - lock != 0; - lock = static_cast( - HASH_GET_NEXT(hash, lock))) { - - n_locks++; - } - } - - return(n_locks); + ulint n_locks= 0; + lock_sys.assert_locked(); + for (ulint i= 0; i < lock_sys.rec_hash.n_cells; i++) + for (auto lock= static_cast(lock_sys.rec_hash.array[i].node); + lock; lock= lock->hash) + n_locks++; + return n_locks; } #endif /* PRINT_NUM_OF_LOCK_STRUCTS */ @@ -5645,10 +5651,8 @@ lock_rec_validate( lock_sys.assert_locked(); for (const lock_t* lock = static_cast( - HASH_GET_FIRST(&lock_sys.rec_hash, start)); - lock != NULL; - lock = static_cast(HASH_GET_NEXT(hash, lock))) { - + lock_sys.rec_hash.array[start].node); + lock; lock = lock->hash) { ut_ad(!lock->trx->read_only || !lock->trx->is_autocommit_non_locking()); ut_ad(!lock->is_table()); @@ -5773,14 +5777,26 @@ lock_rec_insert_check_and_lock( ut_ad(page_is_leaf(block->page.frame)); ut_ad(!index->table->is_temporary()); - const rec_t *next_rec= page_rec_get_next_const(rec); - if (UNIV_UNLIKELY(!next_rec || rec_is_metadata(next_rec, *index))) - return DB_CORRUPTION; + const auto comp= page_is_comp(block->page.frame); + const rec_t *next_rec; + if (UNIV_LIKELY(comp != 0)) + { + next_rec= page_rec_next_get(block->page.frame, rec); + if (UNIV_UNLIKELY(!next_rec || rec_is_metadata(next_rec, TRUE))) + return DB_CORRUPTION; + } + else + { + next_rec= page_rec_next_get(block->page.frame, rec); + if (UNIV_UNLIKELY(!next_rec || rec_is_metadata(next_rec, FALSE))) + return DB_CORRUPTION; + } dberr_t err= DB_SUCCESS; bool inherit_in= *inherit; trx_t *trx= thr_get_trx(thr); - ulint heap_no= page_rec_get_heap_no(next_rec); + const ulint heap_no= comp + ? rec_get_heap_no_new(next_rec) : rec_get_heap_no_old(next_rec); const page_id_t id{block->page.id()}; { @@ -5863,12 +5879,12 @@ lock_rec_insert_check_and_lock( /** Create an explicit record lock for a transaction that currently only has an implicit lock on the record. @param trx referenced, active transaction, or nullptr -@param id page identifier +@param block index leaf page @param rec record in the page @param index the index B-tree that the record belongs to @return trx, with the reference released */ static trx_t *lock_rec_convert_impl_to_expl_for_trx(trx_t *trx, - const page_id_t id, + const buf_block_t &block, const rec_t *rec, dict_index_t *index) { @@ -5878,7 +5894,8 @@ static trx_t *lock_rec_convert_impl_to_expl_for_trx(trx_t *trx, ut_ad(page_rec_is_leaf(rec)); ut_ad(!rec_is_metadata(rec, *index)); - ulint heap_no= page_rec_get_heap_no(rec); + const ulint heap_no= lock_get_heap_no(block, rec); + const page_id_t id{block.page.id()}; { LockGuard g{lock_sys.rec_hash, id}; @@ -5983,7 +6000,7 @@ should be created. @tparam is_primary whether the index is the primary key @param[in,out] caller_trx current transaction -@param[in] id index tree leaf page identifier +@param[in] block index tree leaf page @param[in] rec record on the leaf page @param[in] index the index of the record @param[in] offsets rec_get_offsets(rec,index) @@ -5994,7 +6011,7 @@ static const trx_t * lock_rec_convert_impl_to_expl( trx_t* caller_trx, - page_id_t id, + const buf_block_t& block, const rec_t* rec, dict_index_t* index, const rec_offs* offsets) @@ -6032,10 +6049,11 @@ lock_rec_convert_impl_to_expl( return trx; } - ut_d(lock_rec_other_trx_holds_expl(caller_trx, trx, rec, id)); + ut_d(lock_rec_other_trx_holds_expl(caller_trx, trx, rec, + block.page.id())); } - return lock_rec_convert_impl_to_expl_for_trx(trx, id, rec, index); + return lock_rec_convert_impl_to_expl_for_trx(trx, block, rec, index); } /*********************************************************************//** @@ -6076,7 +6094,7 @@ lock_clust_rec_modify_check_and_lock( trx_t *trx = thr_get_trx(thr); if (const trx_t *owner = - lock_rec_convert_impl_to_expl(trx, block->page.id(), + lock_rec_convert_impl_to_expl(trx, *block, rec, index, offsets)) { if (owner == trx) { /* We already hold an exclusive lock. */ @@ -6126,7 +6144,6 @@ lock_sec_rec_modify_check_and_lock( ut_ad(!dict_index_is_clust(index)); ut_ad(!dict_index_is_online_ddl(index) || (flags & BTR_CREATE_FLAG)); - ut_ad(block->page.frame == page_align(rec)); ut_ad(mtr->is_named_space(index->table->space)); ut_ad(page_rec_is_leaf(rec)); ut_ad(!rec_is_metadata(rec, *index)); @@ -6137,7 +6154,7 @@ lock_sec_rec_modify_check_and_lock( } ut_ad(!index->table->is_temporary()); - heap_no = page_rec_get_heap_no(rec); + heap_no = lock_get_heap_no(*block, rec); #ifdef WITH_WSREP trx_t *trx= thr_get_trx(thr); @@ -6250,7 +6267,7 @@ lock_sec_rec_read_check_and_lock( if (page_rec_is_supremum(rec)) { } else if (const trx_t *owner = - lock_rec_convert_impl_to_expl(trx, block->page.id(), + lock_rec_convert_impl_to_expl(trx, *block, rec, index, offsets)) { if (owner == trx) { if (gap_mode == LOCK_REC_NOT_GAP) { @@ -6276,7 +6293,7 @@ lock_sec_rec_read_check_and_lock( #endif /* WITH_WSREP */ err = lock_rec_lock(false, gap_mode | mode, - block, page_rec_get_heap_no(rec), index, thr); + block, lock_get_heap_no(*block, rec), index, thr); #ifdef WITH_WSREP if (trx->wsrep == 3) trx->wsrep = 1; @@ -6335,15 +6352,13 @@ lock_clust_rec_read_check_and_lock( return(DB_SUCCESS); } - const page_id_t id{block->page.id()}; - - ulint heap_no = page_rec_get_heap_no(rec); + const ulint heap_no = lock_get_heap_no(*block, rec); trx_t *trx = thr_get_trx(thr); if (lock_table_has(trx, index->table, LOCK_X) || heap_no == PAGE_HEAP_NO_SUPREMUM) { } else if (const trx_t *owner = - lock_rec_convert_impl_to_expl(trx, id, + lock_rec_convert_impl_to_expl(trx, *block, rec, index, offsets)) { if (owner == trx) { if (gap_mode == LOCK_REC_NOT_GAP) { @@ -6360,14 +6375,17 @@ lock_clust_rec_read_check_and_lock( && trx->snapshot_isolation && trx->read_view.is_open() && !trx->read_view.changes_visible( - trx_read_trx_id(rec + row_trx_id_offset(rec, index)))) { + trx_read_trx_id(rec + row_trx_id_offset(rec, index))) + && IF_WSREP(!(trx->is_wsrep() + && wsrep_thd_skip_locking(trx->mysql_thd)), true)) { return DB_RECORD_CHANGED; } dberr_t err = lock_rec_lock(false, gap_mode | mode, block, heap_no, index, thr); - ut_ad(lock_rec_queue_validate(false, id, rec, index, offsets)); + ut_ad(lock_rec_queue_validate(false, block->page.id(), + rec, index, offsets)); DEBUG_SYNC_C("after_lock_clust_rec_read_check_and_lock"); diff --git a/storage/innobase/lock/lock0prdt.cc b/storage/innobase/lock/lock0prdt.cc index 2975659138d..3ea05ddb741 100644 --- a/storage/innobase/lock/lock0prdt.cc +++ b/storage/innobase/lock/lock0prdt.cc @@ -895,7 +895,7 @@ void lock_sys_t::prdt_page_free_from_discard(const page_id_t id, bool all) for (lock_t *lock= get_first(*cell, id), *next; lock; lock= next) { next= lock_rec_get_next_on_page(lock); - lock_rec_discard(prdt_page_hash, lock); + lock_rec_discard(lock, *cell); } if (all) @@ -907,7 +907,7 @@ void lock_sys_t::prdt_page_free_from_discard(const page_id_t id, bool all) for (lock_t *lock= get_first(*cell, id), *next; lock; lock= next) { next= lock_rec_get_next_on_page(lock); - lock_rec_discard(prdt_hash, lock); + lock_rec_discard(lock, *cell); } } @@ -919,7 +919,7 @@ void lock_sys_t::prdt_page_free_from_discard(const page_id_t id, bool all) for (lock_t *lock= get_first(*cell, id), *next; lock; lock= next) { next= lock_rec_get_next_on_page(lock); - lock_rec_discard(rec_hash, lock); + lock_rec_discard(lock, *cell); } latch->release(); diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 443a3b692d4..1c0a2fe7249 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -1387,7 +1387,6 @@ void recv_sys_t::debug_free() inline void recv_sys_t::free(const void *data) { ut_ad(!ut_align_offset(data, ALIGNMENT)); - data= page_align(data); mysql_mutex_assert_owner(&mutex); /* MDEV-14481 FIXME: To prevent race condition with buf_pool.resize(), @@ -1404,7 +1403,7 @@ inline void recv_sys_t::free(const void *data) if (offs >= chunk->size) continue; buf_block_t *block= &chunk->blocks[offs]; - ut_ad(block->page.frame == data); + ut_ad(block->page.frame == page_align(data)); ut_ad(block->page.state() == buf_page_t::MEMORY); ut_ad(static_cast(block->page.access_time - 1) < srv_page_size); @@ -4472,7 +4471,7 @@ static dberr_t recv_rename_files() err= space->rename(new_name, false); if (err != DB_SUCCESS) sql_print_error("InnoDB: Cannot replay rename of tablespace " - UINT32PF " to '%s: %s", new_name, ut_strerr(err)); + UINT32PF " to '%s': %s", id, new_name, ut_strerr(err)); goto done; } mysql_mutex_unlock(&fil_system.mutex); diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index ff2ac7ea9ac..35f1c31d7b4 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -2634,7 +2634,7 @@ os_file_io( if (type.type != IORequest::READ_MAYBE_PARTIAL) { sql_print_warning("InnoDB: %zu bytes should have been" - " %s at %llu from %s," + " %s at %" PRIu64 " from %s," " but got only %zd." " Retrying.", n, type.is_read() @@ -2808,7 +2808,7 @@ os_file_read_func( os_file_handle_error_no_exit(type.node ? type.node->name : nullptr, "read", false); - sql_print_error("InnoDB: Tried to read %zu bytes at offset %llu" + sql_print_error("InnoDB: Tried to read %zu bytes at offset %" PRIu64 " of file %s, but was only able to read %zd", n, offset, type.node ? type.node->name : "(unknown)", n_bytes); @@ -3792,23 +3792,6 @@ void fil_node_t::find_metadata(IF_WIN(,bool create)) noexcept punch_hole= 2; else punch_hole= IF_WIN(, !create ||) os_is_sparse_file_supported(file); - -#ifdef _WIN32 - on_ssd= is_file_on_ssd(file, name); - FILE_STORAGE_INFO info; - if (GetFileInformationByHandleEx(file, FileStorageInfo, &info, sizeof info)) - block_size= info.PhysicalBytesPerSectorForAtomicity; - else - block_size= 512; - if (space->purpose != FIL_TYPE_TABLESPACE) - { - /* For temporary tablespace or during IMPORT TABLESPACE, we - disable neighbour flushing and do not care about atomicity. */ - on_ssd= true; - atomic_write= true; - return; - } -#else if (space->purpose != FIL_TYPE_TABLESPACE) { /* For temporary tablespace or during IMPORT TABLESPACE, we @@ -3818,6 +3801,14 @@ void fil_node_t::find_metadata(IF_WIN(,bool create)) noexcept if (space->purpose == FIL_TYPE_TEMPORARY || !space->is_compressed()) return; } +#ifdef _WIN32 + on_ssd= is_file_on_ssd(file, name); + FILE_STORAGE_INFO info; + if (GetFileInformationByHandleEx(file, FileStorageInfo, &info, sizeof info)) + block_size= info.PhysicalBytesPerSectorForAtomicity; + else + block_size= 512; +#else struct stat statbuf; if (!fstat(file, &statbuf)) { @@ -3914,7 +3905,8 @@ bool fil_node_t::read_page0(const byte *dpage, bool no_lsn) noexcept invalid: if (dpage) goto retry_dpage; - sql_print_error("InnoDB: Expected tablespace flags 0x%zx but found 0x%zx" + sql_print_error("InnoDB: Expected tablespace flags 0x%" PRIx32 + " but found 0x%" PRIx32 " in the file %s", space->flags, flags, name); goto err_exit; } diff --git a/storage/innobase/page/page0cur.cc b/storage/innobase/page/page0cur.cc index 2562ae1d0e4..d1263a1489a 100644 --- a/storage/innobase/page/page0cur.cc +++ b/storage/innobase/page/page0cur.cc @@ -926,7 +926,7 @@ static void page_zip_dir_split_slot(buf_block_t *block, ulint s, mtr_t* mtr) /* Log changes to the compressed page header and the dense page directory. */ memcpy_aligned<2>(&block->page.zip.data[n_slots_f], n_slots_p, 2); - mach_write_to_2(slot, page_offset(rec)); + mach_write_to_2(slot, rec - block->page.frame); page_rec_set_n_owned(block, page_dir_slot_get_rec(slot), half_owned, true, mtr); page_rec_set_n_owned(block, @@ -991,7 +991,8 @@ static void page_zip_dir_balance_slot(buf_block_t *block, ulint s, mtr_t *mtr) /* Transfer one record to the underfilled slot */ page_rec_set_n_owned(block, slot_rec, 0, true, mtr); - const rec_t* new_rec = page_rec_get_next_low(slot_rec, TRUE); + const rec_t* new_rec = page_rec_next_get(block->page.frame, + slot_rec); /* We do not try to prevent crash on corruption here. For ROW_FORMAT=COMPRESSED pages, the next-record links should be validated in page_zip_decompress(). Corruption should only @@ -999,7 +1000,7 @@ static void page_zip_dir_balance_slot(buf_block_t *block, ulint s, mtr_t *mtr) page_rec_set_n_owned(block, const_cast(new_rec), PAGE_DIR_SLOT_MIN_N_OWNED, true, mtr); - mach_write_to_2(slot, page_offset(new_rec)); + mach_write_to_2(slot, new_rec - block->page.frame); page_rec_set_n_owned(up_rec, up_n_owned - 1, true); } @@ -1061,7 +1062,8 @@ static void page_dir_balance_slot(const buf_block_t &block, ulint s) if (comp) { if (UNIV_UNLIKELY(!(new_rec = - page_rec_get_next_low(slot_rec, true)))) { + page_rec_next_get(block.page.frame, + slot_rec)))) { ut_ad("corrupted page" == 0); return; } @@ -1071,7 +1073,8 @@ static void page_dir_balance_slot(const buf_block_t &block, ulint s) page_rec_set_n_owned(up_rec, up_n_owned - 1, true); } else { if (UNIV_UNLIKELY(!(new_rec = - page_rec_get_next_low(slot_rec, false)))) { + page_rec_next_get(block.page.frame, + slot_rec)))) { ut_ad("corrupted page" == 0); return; } @@ -1081,7 +1084,7 @@ static void page_dir_balance_slot(const buf_block_t &block, ulint s) page_rec_set_n_owned(up_rec, up_n_owned - 1, false); } - mach_write_to_2(slot, page_offset(new_rec)); + mach_write_to_2(slot, new_rec - block.page.frame); } /** Allocate space for inserting an index record. @@ -1476,7 +1479,8 @@ use_heap: rec_get_node_ptr_flag(rec)); /* Write PAGE_LAST_INSERT */ - mach_write_to_2(page_last_insert, page_offset(insert_buf + extra_size)); + mach_write_to_2(page_last_insert, + insert_buf + extra_size - block->page.frame); /* Update PAGE_DIRECTION_B, PAGE_N_DIRECTION if needed */ if (block->page.frame[FIL_PAGE_TYPE + 1] != byte(FIL_PAGE_RTREE)) @@ -1609,7 +1613,7 @@ inc_dir: rec_set_bit_field_2(insert_rec, heap_no, REC_OLD_HEAP_NO, REC_HEAP_NO_MASK, REC_HEAP_NO_SHIFT); memcpy(insert_rec - REC_NEXT, cur->rec - REC_NEXT, 2); - mach_write_to_2(cur->rec - REC_NEXT, page_offset(insert_rec)); + mach_write_to_2(cur->rec - REC_NEXT, insert_rec - block->page.frame); while (!(n_owned= rec_get_n_owned_old(next_rec))) { next_rec= block->page.frame + rec_get_next_offs(next_rec, false); @@ -2044,7 +2048,7 @@ use_heap: } /* next record after current before the insertion */ - const rec_t *next_rec = page_rec_get_next_low(cursor->rec, TRUE); + const rec_t *next_rec = page_rec_next_get(page, cursor->rec); if (UNIV_UNLIKELY(!next_rec || rec_get_status(next_rec) == REC_STATUS_INFIMUM || rec_get_status(cursor->rec) > REC_STATUS_INFIMUM)) @@ -2083,7 +2087,7 @@ use_heap: ut_ad(!last_insert_rec || rec_get_node_ptr_flag(page + last_insert_rec) == rec_get_node_ptr_flag(insert_rec)); - mach_write_to_2(last_insert, page_offset(insert_rec)); + mach_write_to_2(last_insert, insert_rec - page); if (!index->is_spatial()) { @@ -2103,7 +2107,7 @@ no_direction: inc_dir: mach_write_to_2(n, mach_read_from_2(n) + 1); } - else if (*dir != PAGE_RIGHT && page_rec_get_next(insert_rec) == + else if (*dir != PAGE_RIGHT && page_rec_next_get(page, insert_rec) == page + last_insert_rec) { *dir= PAGE_LEFT; @@ -2124,7 +2128,7 @@ inc_dir: ulint n_owned; while (!(n_owned= rec_get_n_owned_new(next_rec))) - if (!(next_rec= page_rec_get_next_low(next_rec, true))) + if (!(next_rec= page_rec_next_get(page, next_rec))) return nullptr; rec_set_bit_field_1(const_cast(next_rec), n_owned + 1, @@ -2174,12 +2178,13 @@ static void page_mem_free(const buf_block_t &block, rec_t *rec, byte *page_heap_top= my_assume_aligned<2>(PAGE_HEAP_TOP + PAGE_HEADER + block.page.frame); const uint16_t heap_top= mach_read_from_2(page_heap_top); - const size_t extra_savings= heap_top - page_offset(rec + data_size); + const size_t extra_savings= heap_top - + (rec + data_size - block.page.frame); ut_ad(extra_savings < heap_top); /* When deleting the last record, do not add it to the PAGE_FREE list. Instead, decrement PAGE_HEAP_TOP and PAGE_N_HEAP. */ - mach_write_to_2(page_heap_top, page_offset(rec - extra_size)); + mach_write_to_2(page_heap_top, rec - extra_size - block.page.frame); mach_write_to_2(my_assume_aligned<2>(page_heap_top + 2), n_heap); static_assert(PAGE_N_HEAP == PAGE_HEAP_TOP + 2, "compatibility"); if (extra_savings) @@ -2197,7 +2202,7 @@ static void page_mem_free(const buf_block_t &block, rec_t *rec, block.page.frame); byte *page_garbage= my_assume_aligned<2>(PAGE_GARBAGE + PAGE_HEADER + block.page.frame); - mach_write_to_2(page_free, page_offset(rec)); + mach_write_to_2(page_free, rec - block.page.frame); mach_write_to_2(page_garbage, mach_read_from_2(page_garbage) + extra_size + data_size); } @@ -2340,7 +2345,8 @@ page_cur_delete_rec( page_zip_rec_set_owned(block, prev_rec, 1, mtr); page_zip_rec_set_owned(block, slot_rec, 0, mtr); slot_rec = prev_rec; - mach_write_to_2(cur_dir_slot, page_offset(slot_rec)); + mach_write_to_2(cur_dir_slot, + slot_rec - block->page.frame); } else if (cur_n_owned == 1 && !page_rec_is_supremum(slot_rec)) { page_zip_rec_set_owned(block, slot_rec, 0, mtr); @@ -2365,14 +2371,14 @@ page_cur_delete_rec( if (current_rec == slot_rec) { slot_rec = prev_rec; - mach_write_to_2(cur_dir_slot, page_offset(slot_rec)); + mach_write_to_2(cur_dir_slot, slot_rec - block->page.frame); } const size_t data_size = rec_offs_data_size(offsets); const size_t extra_size = rec_offs_extra_size(offsets); if (page_is_comp(block->page.frame)) { - mtr->page_delete(*block, page_offset(prev_rec) + mtr->page_delete(*block, prev_rec - block->page.frame - PAGE_NEW_INFIMUM, extra_size - REC_N_NEW_EXTRA_BYTES, data_size); @@ -2382,7 +2388,7 @@ page_cur_delete_rec( (slot_rec[-REC_NEW_N_OWNED] & ~REC_N_OWNED_MASK) | (cur_n_owned - 1) << REC_N_OWNED_SHIFT); } else { - mtr->page_delete(*block, page_offset(prev_rec) + mtr->page_delete(*block, prev_rec - block->page.frame - PAGE_OLD_INFIMUM); memcpy(prev_rec - REC_NEXT, current_rec - REC_NEXT, 2); slot_rec[-REC_OLD_N_OWNED] = static_cast( diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc index e98022fb56b..43475882943 100644 --- a/storage/innobase/page/page0page.cc +++ b/storage/innobase/page/page0page.cc @@ -99,7 +99,7 @@ page_dir_find_owner_slot( if (page_is_comp(page)) { while (rec_get_n_owned_new(r) == 0) { - r = page_rec_get_next_low(r, true); + r = page_rec_next_get(page, r); if (UNIV_UNLIKELY(r < page + PAGE_NEW_SUPREMUM || r >= slot)) { return ULINT_UNDEFINED; @@ -107,7 +107,7 @@ page_dir_find_owner_slot( } } else { while (rec_get_n_owned_old(r) == 0) { - r = page_rec_get_next_low(r, false); + r = page_rec_next_get(page, r); if (UNIV_UNLIKELY(r < page + PAGE_OLD_SUPREMUM || r >= slot)) { return ULINT_UNDEFINED; @@ -455,7 +455,8 @@ page_copy_rec_list_end_no_locks( return DB_CORRUPTION; } - if (UNIV_UNLIKELY(page_is_comp(new_page) != page_rec_is_comp(rec) + if (UNIV_UNLIKELY(page_is_comp(new_page) + != page_is_comp(block->page.frame) || mach_read_from_2(new_page + srv_page_size - 10) != ulint(page_is_comp(new_page) ? PAGE_NEW_INFIMUM : PAGE_OLD_INFIMUM))) { @@ -872,11 +873,12 @@ page_delete_rec_list_end( return DB_SUCCESS; } - if (page_rec_is_infimum(rec) || - n_recs == page_get_n_recs(page) || - rec == (page_is_comp(page) - ? page_rec_get_next_low(page + PAGE_NEW_INFIMUM, 1) - : page_rec_get_next_low(page + PAGE_OLD_INFIMUM, 0))) + if (n_recs == page_get_n_recs(page) || + (page_is_comp(page) + ? (rec == page + PAGE_NEW_INFIMUM || + rec == page_rec_next_get(page, page + PAGE_NEW_INFIMUM)) + : (rec == page + PAGE_OLD_INFIMUM || + rec == page_rec_next_get(page, page + PAGE_OLD_INFIMUM)))) { /* We are deleting all records. */ page_create_empty(block, index, mtr); @@ -912,13 +914,13 @@ page_delete_rec_list_end( cur.index= index; offsets= rec_get_offsets(rec, index, offsets, n_core, ULINT_UNDEFINED, &heap); - rec= const_cast(page_rec_get_next_low(rec, true)); + rec= const_cast(page_rec_next_get(page, rec)); #ifdef UNIV_ZIP_DEBUG ut_a(page_zip_validate(&block->page.zip, page, index)); #endif /* UNIV_ZIP_DEBUG */ page_cur_delete_rec(&cur, offsets, mtr); } - while (page_offset(rec) != PAGE_NEW_SUPREMUM); + while (rec - page != PAGE_NEW_SUPREMUM); if (UNIV_LIKELY_NULL(heap)) mem_heap_free(heap); @@ -955,7 +957,7 @@ page_delete_rec_list_end( n_recs++; if (scrub) - mtr->memset(block, page_offset(rec2), rec_offs_data_size(offsets), 0); + mtr->memset(block, rec2 - page, rec_offs_data_size(offsets), 0); rec2= page_rec_get_next(rec2); } @@ -979,14 +981,14 @@ page_delete_rec_list_end( while (!(n_owned= rec_get_n_owned_new(owner_rec))) { count++; - if (!(owner_rec= page_rec_get_next_low(owner_rec, true))) + if (!(owner_rec= page_rec_next_get(page, owner_rec))) return DB_CORRUPTION; } else while (!(n_owned= rec_get_n_owned_old(owner_rec))) { count++; - if (!(owner_rec= page_rec_get_next_low(owner_rec, false))) + if (!(owner_rec= page_rec_next_get(page, owner_rec))) return DB_CORRUPTION; } @@ -1009,7 +1011,7 @@ page_delete_rec_list_end( const uint16_t free= page_header_get_field(page, PAGE_FREE); static_assert(PAGE_FREE + 2 == PAGE_GARBAGE, "compatibility"); - mach_write_to_2(page_header, page_offset(rec)); + mach_write_to_2(page_header, rec - page); mach_write_to_2(my_assume_aligned<2>(page_header + 2), mach_read_from_2(my_assume_aligned<2>(page_free + 2)) + size); @@ -1039,19 +1041,17 @@ page_delete_rec_list_end( PAGE_N_RECS + 2 - PAGE_N_DIR_SLOTS); // TODO: the equivalent of page_zip_dir_delete() for all records mach_write_to_2(prev_rec - REC_NEXT, static_cast - (PAGE_NEW_SUPREMUM - page_offset(prev_rec))); + (PAGE_NEW_SUPREMUM - (prev_rec - page))); mach_write_to_2(last_rec - REC_NEXT, free - ? static_cast(free - page_offset(last_rec)) - : 0U); + ? uint16_t(free - (last_rec - block->page.frame)) : 0U); return DB_SUCCESS; } #endif mtr->write<1,mtr_t::MAYBE_NOP>(*block, owned, new_owned); mtr->write<2>(*block, prev_rec - REC_NEXT, static_cast - (PAGE_NEW_SUPREMUM - page_offset(prev_rec))); + (PAGE_NEW_SUPREMUM - (prev_rec - block->page.frame))); mtr->write<2>(*block, last_rec - REC_NEXT, free - ? static_cast(free - page_offset(last_rec)) - : 0U); + ? uint16_t(free - (last_rec - block->page.frame)) : 0U); } else { @@ -1175,11 +1175,11 @@ page_rec_get_nth_const( if (page_is_comp(page)) { do { - rec = page_rec_get_next_low(rec, TRUE); + rec = page_rec_next_get(page, rec); } while (rec && nth--); } else { do { - rec = page_rec_get_next_low(rec, FALSE); + rec = page_rec_next_get(page, rec); } while (rec && nth--); } @@ -1279,7 +1279,7 @@ ulint page_rec_get_n_recs_before(const rec_t *rec) if (page_is_comp(page)) { for (; rec_get_n_owned_new(rec) == 0; n--) - if (UNIV_UNLIKELY(!(rec= page_rec_get_next_low(rec, true)))) + if (UNIV_UNLIKELY(!(rec= page_rec_next_get(page, rec)))) return ULINT_UNDEFINED; do @@ -1297,7 +1297,7 @@ ulint page_rec_get_n_recs_before(const rec_t *rec) else { for (; rec_get_n_owned_old(rec) == 0; n--) - if (UNIV_UNLIKELY(!(rec= page_rec_get_next_low(rec, false)))) + if (UNIV_UNLIKELY(!(rec= page_rec_next_get(page, rec)))) return ULINT_UNDEFINED; do @@ -1362,9 +1362,8 @@ page_dir_print( fprintf(stderr, "--------------------------------\n" "PAGE DIRECTORY\n" "Page address %p\n" - "Directory stack top at offs: %lu; number of slots: %lu\n", - page, (ulong) page_offset(page_dir_get_nth_slot(page, n - 1)), - (ulong) n); + "Directory stack top at offs: %zu; number of slots: %zu\n", + page, page_dir_get_nth_slot(page, n - 1) - page, n); for (i = 0; i < n; i++) { slot = page_dir_get_nth_slot(page, i); if ((i == pr_n) && (i < n - pr_n)) { @@ -1372,17 +1371,16 @@ page_dir_print( } if ((i < pr_n) || (i >= n - pr_n)) { fprintf(stderr, - "Contents of slot: %lu: n_owned: %lu," - " rec offs: %lu\n", - (ulong) i, - (ulong) page_dir_slot_get_n_owned(slot), - (ulong) - page_offset(page_dir_slot_get_rec(slot))); + "Contents of slot: %zu: n_owned: %zu," + " rec offs: %zu\n", + i, + page_dir_slot_get_n_owned(slot), + page_dir_slot_get_rec(slot) - page); } } - fprintf(stderr, "Total of %lu records\n" + fprintf(stderr, "Total of %zu records\n" "--------------------------------\n", - (ulong) (PAGE_HEAP_NO_USER_LOW + page_get_n_recs(page))); + PAGE_HEAP_NO_USER_LOW + page_get_n_recs(page)); } /***************************************************************//** @@ -1525,7 +1523,7 @@ page_rec_validate( page_rec_check(rec); rec_validate(rec, offsets); - if (page_rec_is_comp(rec)) { + if (page_is_comp(page)) { n_owned = rec_get_n_owned_new(rec); heap_no = rec_get_heap_no_new(rec); } else { @@ -1534,13 +1532,13 @@ page_rec_validate( } if (UNIV_UNLIKELY(!(n_owned <= PAGE_DIR_SLOT_MAX_N_OWNED))) { - ib::warn() << "Dir slot of rec " << page_offset(rec) + ib::warn() << "Dir slot of rec " << rec - page << ", n owned too big " << n_owned; return(FALSE); } if (UNIV_UNLIKELY(!(heap_no < page_dir_get_n_heap(page)))) { - ib::warn() << "Heap no of rec " << page_offset(rec) + ib::warn() << "Heap no of rec " << rec - page << " too big " << heap_no << " " << page_dir_get_n_heap(page); return(FALSE); @@ -1622,8 +1620,7 @@ page_simple_validate_old( << "Record heap and dir overlap on a page, heap top " << page_header_get_field(page, PAGE_HEAP_TOP) << ", dir " - << page_offset(page_dir_get_nth_slot(page, - n_slots - 1)); + << page_dir_get_nth_slot(page, n_slots - 1) - page; goto func_exit; } @@ -1700,7 +1697,7 @@ page_simple_validate_old( goto func_exit; } - rec = page_rec_get_next_const(rec); + rec = page_rec_next_get(page, rec); own_count++; } @@ -1820,9 +1817,9 @@ page_simple_validate_new( ib::error() << "Record heap and dir overlap on a page," " heap top " - << page_header_get_field(page, PAGE_HEAP_TOP) - << ", dir " << page_offset( - page_dir_get_nth_slot(page, n_slots - 1)); + << page_header_get_field(page, PAGE_HEAP_TOP) + << ", dir " + << page_dir_get_nth_slot(page, n_slots - 1) - page; goto func_exit; } @@ -1840,9 +1837,9 @@ page_simple_validate_new( for (;;) { if (UNIV_UNLIKELY(rec < page + PAGE_NEW_INFIMUM || rec > rec_heap_top)) { - ib::error() << "Record " << page_offset(rec) + ib::error() << "Record " << rec - page << " is out of bounds: " - << page_offset(rec_heap_top); + << rec_heap_top - page; goto func_exit; } @@ -1854,7 +1851,7 @@ page_simple_validate_new( ib::error() << "Wrong owned count " << rec_get_n_owned_new(rec) << ", " << own_count << ", rec " - << page_offset(rec); + << rec - page; goto func_exit; } @@ -1862,7 +1859,7 @@ page_simple_validate_new( if (UNIV_UNLIKELY (page_dir_slot_get_rec(slot) != rec)) { ib::error() << "Dir slot does not point" - " to right rec " << page_offset(rec); + " to right rec " << rec - page; goto func_exit; } @@ -1886,7 +1883,7 @@ page_simple_validate_new( ib::error() << "Next record offset nonsensical " << rec_get_next_offs(rec, TRUE) - << " for rec " << page_offset(rec); + << " for rec " << rec - page; goto func_exit; } @@ -1899,7 +1896,7 @@ page_simple_validate_new( goto func_exit; } - rec = page_rec_get_next_const(rec); + rec = page_rec_next_get(page, rec); own_count++; } @@ -1933,15 +1930,15 @@ page_simple_validate_new( || rec >= page + srv_page_size)) { ib::error() << "Free list record has" - " a nonsensical offset " << page_offset(rec); + " a nonsensical offset " << rec - page; goto func_exit; } if (UNIV_UNLIKELY(rec > rec_heap_top)) { - ib::error() << "Free list record " << page_offset(rec) + ib::error() << "Free list record " << rec - page << " is above rec heap top " - << page_offset(rec_heap_top); + << rec_heap_top - page; goto func_exit; } @@ -2251,7 +2248,7 @@ wrong_page_type: #endif /* UNIV_GIS_DEBUG */ } - offs = page_offset(rec_get_start(rec, offsets)); + offs = rec_get_start(rec, offsets) - page; i = rec_offs_size(offsets); if (UNIV_UNLIKELY(offs + i >= srv_page_size)) { ib::error() << "Record offset out of bounds: " @@ -2381,7 +2378,7 @@ next_free: } count++; - offs = page_offset(rec_get_start(rec, offsets)); + offs = rec_get_start(rec, offsets) - page; i = rec_offs_size(offsets); if (UNIV_UNLIKELY(offs + i >= srv_page_size)) { ib::error() << "Free record offset out of bounds: " @@ -2480,7 +2477,7 @@ const rec_t *page_find_rec_last_not_deleted(const page_t *page) if (!(rec[-REC_NEW_INFO_BITS] & (REC_INFO_DELETED_FLAG | REC_INFO_MIN_REC_FLAG))) prev_rec= rec; - if (!(rec= page_rec_get_next_low(rec, true))) + if (!(rec= page_rec_next_get(page, rec))) return page + PAGE_NEW_INFIMUM; } while (rec != page + PAGE_NEW_SUPREMUM); return prev_rec; @@ -2494,7 +2491,7 @@ const rec_t *page_find_rec_last_not_deleted(const page_t *page) if (!(rec[-REC_OLD_INFO_BITS] & (REC_INFO_DELETED_FLAG | REC_INFO_MIN_REC_FLAG))) prev_rec= rec; - if (!(rec= page_rec_get_next_low(rec, false))) + if (!(rec= page_rec_next_get(page, rec))) return page + PAGE_OLD_INFIMUM; } while (rec != page + PAGE_OLD_SUPREMUM); return prev_rec; diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc index 4eda832253b..945792e4ffd 100644 --- a/storage/innobase/page/page0zip.cc +++ b/storage/innobase/page/page0zip.cc @@ -3400,17 +3400,16 @@ page_zip_validate_low( goto func_exit; } - rec = page_rec_get_next_low(rec, TRUE); - trec = page_rec_get_next_low(trec, TRUE); + rec = page_rec_next_get(page, rec); + trec = page_rec_next_get(temp_page, trec); } /* Compare the records. */ heap = NULL; offsets = NULL; - rec = page_rec_get_next_low( - page + PAGE_NEW_INFIMUM, TRUE); - trec = page_rec_get_next_low( - temp_page + PAGE_NEW_INFIMUM, TRUE); + rec = page_rec_next_get(page, page + PAGE_NEW_INFIMUM); + trec = page_rec_next_get(temp_page, + temp_page + PAGE_NEW_INFIMUM); const ulint n_core = (index && page_is_leaf(page)) ? index->n_fields : 0; @@ -3442,8 +3441,8 @@ page_zip_validate_low( } } - rec = page_rec_get_next_low(rec, TRUE); - trec = page_rec_get_next_low(trec, TRUE); + rec = page_rec_next_get(page, rec); + trec = page_rec_next_get(temp_page, trec); } while (rec || trec); if (heap) { diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 7ac134d68b1..7884d90be2a 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -1634,7 +1634,7 @@ row_ins_check_foreign_constraint( const rec_t* rec = btr_pcur_get_rec(&pcur); const buf_block_t* block = btr_pcur_get_block(&pcur); - if (page_rec_is_infimum(rec)) { + if (page_rec_is_infimum_low(rec - block->page.frame)) { continue; } @@ -1643,7 +1643,7 @@ row_ins_check_foreign_constraint( check_index->n_core_fields, ULINT_UNDEFINED, &heap); - if (page_rec_is_supremum(rec)) { + if (page_rec_is_supremum_low(rec - block->page.frame)) { if (skip_gap_lock) { @@ -2124,7 +2124,7 @@ row_ins_scan_sec_index_for_duplicate( const buf_block_t* block = btr_pcur_get_block(&pcur); const ulint lock_type = LOCK_ORDINARY; - if (page_rec_is_infimum(rec)) { + if (page_rec_is_infimum_low(rec - block->page.frame)) { continue; } @@ -2160,7 +2160,7 @@ row_ins_scan_sec_index_for_duplicate( goto end_scan; } - if (page_rec_is_supremum(rec)) { + if (page_rec_is_supremum_low(rec - block->page.frame)) { continue; } @@ -2277,7 +2277,8 @@ row_ins_duplicate_error_in_clust_online( ut_ad(!cursor->index()->is_instant()); - if (cursor->low_match >= n_uniq && !page_rec_is_infimum(rec)) { + if (cursor->low_match >= n_uniq + && !page_rec_is_infimum_low(rec - btr_cur_get_page(cursor))) { *offsets = rec_get_offsets(rec, cursor->index(), *offsets, cursor->index()->n_fields, ULINT_UNDEFINED, heap); @@ -2288,7 +2289,7 @@ row_ins_duplicate_error_in_clust_online( } } - if (!(rec = page_rec_get_next_const(btr_cur_get_rec(cursor)))) { + if (!(rec = page_rec_get_next_const(rec))) { return DB_CORRUPTION; } @@ -2349,7 +2350,7 @@ row_ins_duplicate_error_in_clust( rec = btr_cur_get_rec(cursor); - if (!page_rec_is_infimum(rec)) { + if (!page_rec_is_infimum_low(rec - btr_cur_get_page(cursor))) { offsets = rec_get_offsets(rec, cursor->index(), offsets, cursor->index() @@ -2802,7 +2803,8 @@ row_level_insert: if (UNIV_UNLIKELY(entry->info_bits != 0)) { const rec_t* rec = btr_pcur_get_rec(&pcur); - if (rec_get_info_bits(rec, page_rec_is_comp(rec)) + if (rec_get_info_bits(rec, + page_is_comp(btr_pcur_get_page(&pcur))) & REC_INFO_MIN_REC_FLAG) { trx->error_info = index; err = DB_DUPLICATE_KEY; @@ -3231,7 +3233,8 @@ row_ins_clust_index_entry( #ifdef WITH_WSREP const bool skip_locking - = wsrep_thd_skip_locking(thr_get_trx(thr)->mysql_thd); + = thr_get_trx(thr)->is_wsrep() && + wsrep_thd_skip_locking(thr_get_trx(thr)->mysql_thd); ulint flags = index->table->no_rollback() ? BTR_NO_ROLLBACK : (index->table->is_temporary() || skip_locking) ? BTR_NO_LOCKING_FLAG : 0; diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index 4f088eb5d31..4005c474772 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -778,7 +778,10 @@ error: const byte* buf = row_ext_lookup(ext, col->ind, &len); if (UNIV_LIKELY_NULL(buf)) { - ut_a(buf != field_ref_zero); + if (UNIV_UNLIKELY(buf == field_ref_zero)) { + *err = DB_CORRUPTION; + goto error; + } if (i < dict_index_get_n_unique(index)) { dfield_set_data(field, buf, len); } else { @@ -791,7 +794,10 @@ error: const byte* buf = row_ext_lookup(ext, col->ind, &len); if (UNIV_LIKELY_NULL(buf)) { - ut_a(buf != field_ref_zero); + if (UNIV_UNLIKELY(buf == field_ref_zero)) { + *err = DB_CORRUPTION; + goto error; + } dfield_set_data(field, buf, len); } } @@ -2007,23 +2013,29 @@ err_exit: trx->error_key_num = 0; goto func_exit; } else { - rec_t* rec = page_rec_get_next(btr_pcur_get_rec(&pcur)); + const page_t* const page = btr_pcur_get_page(&pcur); + const auto comp = page_is_comp(page); + const rec_t* const rec = comp + ? page_rec_next_get(page, + btr_pcur_get_rec(&pcur)) + : page_rec_next_get(page, + btr_pcur_get_rec(&pcur)); if (!rec) { corrupted_metadata: err = DB_CORRUPTION; goto err_exit; } - if (rec_get_info_bits(rec, page_rec_is_comp(rec)) - & REC_INFO_MIN_REC_FLAG) { + if (rec_get_info_bits(rec, comp) & REC_INFO_MIN_REC_FLAG) { if (!clust_index->is_instant()) { goto corrupted_metadata; } - if (page_rec_is_comp(rec) + if (comp && rec_get_status(rec) != REC_STATUS_INSTANT) { goto corrupted_metadata; } /* Skip the metadata pseudo-record. */ - btr_pcur_get_page_cur(&pcur)->rec = rec; + btr_pcur_get_page_cur(&pcur)->rec = + const_cast(rec); } else if (clust_index->is_instant()) { goto corrupted_metadata; } diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 15d959ae601..74aec0aac8a 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -1792,7 +1792,7 @@ row_unlock_for_mysql( lock_rec_unlock( trx, - btr_pcur_get_block(pcur)->page.id(), + *btr_pcur_get_block(pcur), rec, static_cast( prebuilt->select_lock_type)); @@ -2298,6 +2298,9 @@ row_discard_tablespace( trx_t* trx, /*!< in/out: transaction handle */ dict_table_t* table) /*!< in/out: table to be discarded */ { + ut_ad(table->magic_n == DICT_TABLE_MAGIC_N); + ut_ad(!table->is_temporary()); + dberr_t err; /* How do we prevent crashes caused by ongoing operations on @@ -2350,8 +2353,14 @@ row_discard_tablespace( /* All persistent operations successful, update the data dictionary memory cache. */ + ut_ad(dict_sys.locked()); - dict_table_change_id_in_cache(table, new_id); + /* Remove the table from the hash table of id's */ + dict_sys.table_id_hash.cell_get(ut_fold_ull(table->id)) + ->remove(*table, &dict_table_t::id_hash); + table->id = new_id; + dict_sys.table_id_hash.cell_get(ut_fold_ull(table->id)) + ->append(*table, &dict_table_t::id_hash); dict_index_t* index = UT_LIST_GET_FIRST(table->indexes); diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index c7d12f04d50..bc05987c439 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -757,6 +757,22 @@ static bool row_purge_poss_sec(purge_node_t *node, dict_index_t *index, return can_delete; } +/** Report an error about not delete-marked secondary index record +that was about to be purged. +@param cur cursor on the secondary index record +@param entry search key */ +ATTRIBUTE_COLD ATTRIBUTE_NOINLINE +static void row_purge_del_mark_error(const btr_cur_t &cursor, + const dtuple_t &entry) +{ + const dict_index_t *index= cursor.index(); + ib::error() << "tried to purge non-delete-marked record in index " + << index->name << " of table " << index->table->name + << ": tuple: " << entry + << ", record: " << rec_index_print(cursor.page_cur.rec, index); + ut_ad(0); +} + __attribute__((nonnull, warn_unused_result)) /** Remove a secondary index entry if possible, by modifying the index tree. @param node purge node @@ -776,6 +792,16 @@ static bool row_purge_remove_sec_if_poss_tree(purge_node_t *node, mtr_t mtr; log_free_check(); +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF("enable_row_purge_sec_tree_sync", + debug_sync_set_action(current_thd, STRING_WITH_LEN( + "now SIGNAL " + "purge_sec_tree_begin")); + debug_sync_set_action(current_thd, STRING_WITH_LEN( + "now WAIT_FOR " + "purge_sec_tree_execute")); + ); +#endif mtr.start(); index->set_modified(mtr); pcur.btr_cur.page_cur.index = index; @@ -808,26 +834,13 @@ static bool row_purge_remove_sec_if_poss_tree(purge_node_t *node, /* Remove the index record, which should have been marked for deletion. */ - if (!rec_get_deleted_flag(btr_cur_get_rec( - btr_pcur_get_btr_cur(&pcur)), - dict_table_is_comp(index->table))) { - ib::error() - << "tried to purge non-delete-marked record" - " in index " << index->name - << " of table " << index->table->name - << ": tuple: " << *entry - << ", record: " << rec_index_print( - btr_cur_get_rec( - btr_pcur_get_btr_cur(&pcur)), - index); - - ut_ad(0); - + if (!rec_get_deleted_flag(btr_pcur_get_rec(&pcur), + index->table->not_redundant())) { + row_purge_del_mark_error(pcur.btr_cur, *entry); goto func_exit; } - btr_cur_pessimistic_delete(&err, FALSE, - btr_pcur_get_btr_cur(&pcur), + btr_cur_pessimistic_delete(&err, FALSE, &pcur.btr_cur, 0, false, &mtr); switch (UNIV_EXPECT(err, DB_SUCCESS)) { case DB_SUCCESS: @@ -846,12 +859,34 @@ func_exit: return success; } +/** Compute a nonzero return value of row_purge_remove_sec_if_poss_leaf(). +@param page latched secondary index page +@return PAGE_MAX_TRX_ID for row_purge_remove_sec_if_poss_tree() +@retval 1 if a further row_purge_poss_sec() check is necessary */ +ATTRIBUTE_NOINLINE ATTRIBUTE_COLD +static trx_id_t row_purge_check(const page_t *page) noexcept +{ + trx_id_t id= page_get_max_trx_id(page); + ut_ad(id); + if (trx_sys.find_same_or_older_in_purge(purge_sys.query->trx, id)) + /* Because an active transaction may modify the secondary index + but not PAGE_MAX_TRX_ID, row_purge_poss_sec() must be invoked + again after re-latching the page. Let us return a bogus ID. Yes, + an actual transaction with ID 1 would create the InnoDB dictionary + tables in dict_sys_t::create_or_check_sys_tables(), but it would + exclusively write TRX_UNDO_INSERT_REC records. Purging those + records never involves row_purge_remove_sec_if_poss_tree(). */ + id= 1; + return id; +} + __attribute__((nonnull, warn_unused_result)) /** Remove a secondary index entry if possible, without modifying the tree. @param node purge node @param index secondary index @param entry index entry @return PAGE_MAX_TRX_ID for row_purge_remove_sec_if_poss_tree() +@retval 1 if a further row_purge_poss_sec() check is necessary @retval 0 if success or if not found */ static trx_id_t row_purge_remove_sec_if_poss_leaf(purge_node_t *node, dict_index_t *index, @@ -884,30 +919,19 @@ found: /* Before attempting to purge a record, check if it is safe to do so. */ if (row_purge_poss_sec(node, index, entry, &mtr)) { - btr_cur_t* btr_cur = btr_pcur_get_btr_cur(&pcur); - /* Only delete-marked records should be purged. */ - if (!rec_get_deleted_flag( - btr_cur_get_rec(btr_cur), - dict_table_is_comp(index->table))) { - - ib::error() - << "tried to purge non-delete-marked" - " record" " in index " << index->name - << " of table " << index->table->name - << ": tuple: " << *entry - << ", record: " - << rec_index_print( - btr_cur_get_rec(btr_cur), - index); + if (!rec_get_deleted_flag(btr_pcur_get_rec(&pcur), + index->table + ->not_redundant())) { + row_purge_del_mark_error(pcur.btr_cur, *entry); mtr.commit(); dict_set_corrupted(index, "purge"); goto cleanup; } if (index->is_spatial()) { - const buf_block_t* block = btr_cur_get_block( - btr_cur); + const buf_block_t* block = btr_pcur_get_block( + &pcur); const page_id_t id{block->page.id()}; if (id.page_no() != index->page @@ -924,11 +948,11 @@ found: } } - if (btr_cur_optimistic_delete(btr_cur, 0, &mtr) - == DB_FAIL) { - page_max_trx_id = page_get_max_trx_id( - btr_cur_get_page(btr_cur)); - } + if (btr_cur_optimistic_delete(&pcur.btr_cur, 0, &mtr) + == DB_FAIL) { + page_max_trx_id = row_purge_check( + btr_pcur_get_page(&pcur)); + } } } @@ -1065,7 +1089,7 @@ static void row_purge_reset_trx_id(purge_node_t* node, mtr_t* mtr) byte* ptr = rec_get_nth_field( rec, offsets, trx_id_pos, &len); ut_ad(len == DATA_TRX_ID_LEN); - size_t offs = page_offset(ptr); + size_t offs = ptr - block->page.frame; mtr->memset(block, offs, DATA_TRX_ID_LEN, 0); offs += DATA_TRX_ID_LEN; mtr->write<1,mtr_t::MAYBE_NOP>( diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 4442f6f43ba..c865737c3df 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -1619,7 +1619,8 @@ row_sel_try_search_shortcut( return SEL_RETRY; } } else if (!srv_read_only_mode) { - trx_id_t trx_id = page_get_max_trx_id(page_align(rec)); + trx_id_t trx_id = + page_get_max_trx_id(btr_pcur_get_page(&plan->pcur)); ut_ad(trx_id); if (!node->read_view->sees(trx_id)) { return SEL_RETRY; @@ -2040,7 +2041,8 @@ skip_lock: rec = old_vers; } } else if (!srv_read_only_mode) { - trx_id_t trx_id = page_get_max_trx_id(page_align(rec)); + trx_id_t trx_id = page_get_max_trx_id( + btr_pcur_get_page(&plan->pcur)); ut_ad(trx_id); if (!node->read_view->sees(trx_id)) { cons_read_requires_clust_rec = TRUE; @@ -3407,8 +3409,9 @@ Row_sel_get_clust_rec_for_mysql::operator()( page and verify that */ if (dict_index_is_spatial(sec_index) && btr_cur->rtr_info->matches - && (page_align(rec) - == btr_cur->rtr_info->matches->block->page.frame + && (!(ulint(rec + - btr_cur->rtr_info->matches->block->page.frame) + >> srv_page_size_shift) || rec != btr_pcur_get_rec(prebuilt->pcur))) { #ifdef UNIV_DEBUG rtr_info_t* rtr_info = btr_cur->rtr_info; @@ -3535,7 +3538,7 @@ Row_sel_get_clust_rec_for_mysql::operator()( prebuilt->clust_pcur)->page; const lsn_t lsn = mach_read_from_8( - page_align(clust_rec) + FIL_PAGE_LSN); + bpage.frame + FIL_PAGE_LSN); if (lsn != cached_lsn || bpage.id() != cached_page_id @@ -5005,7 +5008,8 @@ wrong_offs: .buf_fix_count(); ib::error() << "Index corruption: rec offs " - << page_offset(rec) << " next offs " + << rec - btr_pcur_get_page(pcur) + << " next offs " << next_offs << btr_pcur_get_block(pcur)->page.id() << ", index " << index->name @@ -5022,7 +5026,8 @@ wrong_offs: over the corruption to recover as much as possible. */ ib::info() << "Index corruption: rec offs " - << page_offset(rec) << " next offs " + << rec - btr_pcur_get_page(pcur) + << " next offs " << next_offs << btr_pcur_get_block(pcur)->page.id() << ", index " << index->name @@ -5047,10 +5052,12 @@ wrong_offs: if (UNIV_UNLIKELY(srv_force_recovery > 0)) { if (!rec_validate(rec, offsets) - || !btr_index_rec_validate(rec, index, FALSE)) { + || !btr_index_rec_validate(pcur->btr_cur.page_cur, + index, FALSE)) { ib::error() << "Index corruption: rec offs " - << page_offset(rec) << " next offs " + << rec - btr_pcur_get_page(pcur) + << " next offs " << next_offs << btr_pcur_get_block(pcur)->page.id() << ", index " << index->name @@ -5414,7 +5421,7 @@ no_gap_lock: if (!srv_read_only_mode) { trx_id_t trx_id = page_get_max_trx_id( - page_align(rec)); + btr_pcur_get_page(pcur)); ut_ad(trx_id); if (trx->read_view.sees(trx_id)) { goto locks_ok; @@ -6397,7 +6404,8 @@ rec_loop: goto count_or_not; } - else if (const trx_id_t page_trx_id= page_get_max_trx_id(page_align(rec))) + else if (const trx_id_t page_trx_id= + page_get_max_trx_id(btr_pcur_get_page(prebuilt->pcur))) { if (page_trx_id >= trx_sys.get_max_trx_id()) goto invalid_PAGE_MAX_TRX_ID; @@ -6775,7 +6783,7 @@ rec_loop: { push_warning_printf(prebuilt->trx->mysql_thd, Sql_condition::WARN_LEVEL_WARN, ER_NOT_KEYFILE, - "InnoDB: Invalid PAGE_MAX_TRX_ID=%llu" + "InnoDB: Invalid PAGE_MAX_TRX_ID=%" PRIu64 " in index '%-.200s'", page_trx_id, index->name()); prebuilt->autoinc_error= DB_INDEX_CORRUPT; diff --git a/storage/innobase/row/row0umod.cc b/storage/innobase/row/row0umod.cc index 3493d624f1f..dc62be9c554 100644 --- a/storage/innobase/row/row0umod.cc +++ b/storage/innobase/row/row0umod.cc @@ -444,7 +444,8 @@ row_undo_mod_clust( 0, 1ULL << ROLL_PTR_INSERT_FLAG_POS, &mtr); } else { - size_t offs = page_offset(rec + trx_id_offset); + size_t offs = rec + trx_id_offset + - block->page.frame; mtr.memset(block, offs, DATA_TRX_ID_LEN, 0); offs += DATA_TRX_ID_LEN; mtr.write<1,mtr_t::MAYBE_NOP>(*block, @@ -490,15 +491,14 @@ static bool row_undo_mod_sec_is_unsafe(const rec_t *rec, dict_index_t *index, mem_heap_t* heap2; dtuple_t* row; const dtuple_t* entry; - ulint comp; dtuple_t* vrow = NULL; mem_heap_t* v_heap = NULL; dtuple_t* cur_vrow = NULL; + const bool comp = index->table->not_redundant(); clust_index = dict_table_get_first_index(index->table); - comp = page_rec_is_comp(rec); - ut_ad(!dict_table_is_comp(index->table) == !comp); + ut_ad(!!page_rec_is_comp(rec) == comp); heap = mem_heap_create(1024); clust_offsets = rec_get_offsets(rec, clust_index, NULL, clust_index->n_core_fields, diff --git a/storage/innobase/row/row0undo.cc b/storage/innobase/row/row0undo.cc index f14673c173f..610fcf58ad2 100644 --- a/storage/innobase/row/row0undo.cc +++ b/storage/innobase/row/row0undo.cc @@ -331,7 +331,9 @@ static buf_block_t* row_undo_rec_get(undo_node_t* node) } undo->top_page_no = prev_page->page.id().page_no(); - undo->top_offset = page_offset(prev_rec); + undo->top_offset = uint16_t(prev_rec - prev_page->page.frame); + ut_ad(prev_rec - prev_page->page.frame + == page_offset(prev_rec)); undo->top_undo_no = trx_undo_rec_get_undo_no(prev_rec); ut_ad(!undo->empty()); } else { diff --git a/storage/innobase/row/row0vers.cc b/storage/innobase/row/row0vers.cc index 67a638ceeb4..10111aac533 100644 --- a/storage/innobase/row/row0vers.cc +++ b/storage/innobase/row/row0vers.cc @@ -149,9 +149,9 @@ row_vers_impl_x_locked_low( } } - const ulint comp = page_rec_is_comp(rec); + const bool comp = index->table->not_redundant(); + ut_ad(!!page_rec_is_comp(rec) == comp); ut_ad(index->table == clust_index->table); - ut_ad(!!comp == dict_table_is_comp(index->table)); ut_ad(!comp == !page_rec_is_comp(clust_rec)); const ulint rec_del = rec_get_deleted_flag(rec, comp); @@ -402,6 +402,10 @@ row_vers_impl_x_locked( const rec_t* clust_rec; dict_index_t* clust_index; + /* The function must not be invoked under lock_sys latch to prevert + latching orded violation, i.e. page latch must be acquired before + lock_sys latch */ + lock_sys.assert_unlocked(); /* The current function can be called from lock_rec_unlock_unmodified() under lock_sys.wr_lock() */ diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 5f872c49e33..9e00dc418dc 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -481,8 +481,7 @@ ATTRIBUTE_COLD static dberr_t srv_undo_tablespaces_reinit() buf_block_t *block= buf_page_get_gen( rseg->page_id(), 0, RW_X_LATCH, nullptr, BUF_GET, &mtr); if (!block) break; - while (!fseg_free_step(TRX_RSEG + TRX_RSEG_FSEG_HEADER + - block->page.frame, &mtr)); + while (!fseg_free_step(block, TRX_RSEG + TRX_RSEG_FSEG_HEADER, &mtr)); } } diff --git a/storage/innobase/trx/trx0i_s.cc b/storage/innobase/trx/trx0i_s.cc index 2dc39118d3d..4a661c51183 100644 --- a/storage/innobase/trx/trx0i_s.cc +++ b/storage/innobase/trx/trx0i_s.cc @@ -59,47 +59,6 @@ now, then 39th chunk would accommodate 1677416425 rows and all chunks would accommodate 3354832851 rows. */ #define MEM_CHUNKS_IN_TABLE_CACHE 39 -/** The following are some testing auxiliary macros. Do not enable them -in a production environment. */ -/* @{ */ - -#if 0 -/** If this is enabled then lock folds will always be different -resulting in equal rows being put in a different cells of the hash -table. Checking for duplicates will be flawed because different -fold will be calculated when a row is searched in the hash table. */ -#define TEST_LOCK_FOLD_ALWAYS_DIFFERENT -#endif - -#if 0 -/** This effectively kills the search-for-duplicate-before-adding-a-row -function, but searching in the hash is still performed. It will always -be assumed that lock is not present and insertion will be performed in -the hash table. */ -#define TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T -#endif - -#if 0 -/** This aggressively repeats adding each row many times. Depending on -the above settings this may be noop or may result in lots of rows being -added. */ -#define TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES -#endif - -#if 0 -/** Very similar to TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T but hash -table search is not performed at all. */ -#define TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS -#endif - -#if 0 -/** Do not insert each row into the hash table, duplicates may appear -if this is enabled, also if this is enabled searching into the hash is -noop because it will be empty. */ -#define TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE -#endif -/* @} */ - /** Memory limit passed to ha_storage_put_memlim(). @param cache hash storage @return maximum allowed allocation size */ @@ -162,6 +121,13 @@ struct trx_i_s_cache_t { bool is_truncated; /*!< this is true if the memory limit was hit and thus the data in the cache is truncated */ + + /** Adds an element. + @param lock element to be added + @param heap_no record lock heap number, or 0xFFFF for table lock + @return the existing or added lock + @retval nullptr if memory cannot be allocated */ + i_s_locks_row_t *add(const lock_t &lock, uint16_t heap_no) noexcept; }; /** This is the intermediate buffer where data needed to fill the @@ -778,7 +744,7 @@ static bool fill_locks_row( row->lock_table_id = table->id; - row->hash_chain.value = row; + row->next = nullptr; ut_ad(i_s_locks_row_validate(row)); return true; @@ -819,112 +785,18 @@ static ulint fold_lock( /*======*/ - const lock_t* lock, /*!< in: lock object to fold */ - ulint heap_no)/*!< in: lock's record number + const lock_t& lock, /*!< in: lock object to fold */ + uint16_t heap_no)/*!< in: lock's record number or 0xFFFF if the lock is a table lock */ { -#ifdef TEST_LOCK_FOLD_ALWAYS_DIFFERENT - static ulint fold = 0; - - return(fold++); -#else - ulint ret; - - if (!lock->is_table()) { - ut_a(heap_no != 0xFFFF); - ret = ut_fold_ulint_pair((ulint) lock->trx->id, - lock->un_member.rec_lock.page_id. - fold()); - ret = ut_fold_ulint_pair(ret, heap_no); - } else { - /* this check is actually not necessary for continuing - correct operation, but something must have gone wrong if - it fails. */ - ut_a(heap_no == 0xFFFF); - - ret = (ulint) lock_get_table(*lock)->id; - } - - return(ret); -#endif -} - -/*******************************************************************//** -Checks whether i_s_locks_row_t object represents a lock_t object. -@return TRUE if they match */ -static -ibool -locks_row_eq_lock( -/*==============*/ - const i_s_locks_row_t* row, /*!< in: innodb_locks row */ - const lock_t* lock, /*!< in: lock object */ - ulint heap_no)/*!< in: lock's record number - or 0xFFFF if the lock - is a table lock */ -{ - ut_ad(i_s_locks_row_validate(row)); -#ifdef TEST_NO_LOCKS_ROW_IS_EVER_EQUAL_TO_LOCK_T - return(0); -#else - if (!lock->is_table()) { - ut_a(heap_no != 0xFFFF); - - return(row->lock_trx_id == lock->trx->id - && row->lock_page == lock->un_member.rec_lock.page_id - && row->lock_rec == heap_no); - } else { - /* this check is actually not necessary for continuing - correct operation, but something must have gone wrong if - it fails. */ - ut_a(heap_no == 0xFFFF); - - return(row->lock_trx_id == lock->trx->id - && row->lock_table_id == lock_get_table(*lock)->id); - } -#endif -} - -/*******************************************************************//** -Searches for a row in the innodb_locks cache that has a specified id. -This happens in O(1) time since a hash table is used. Returns pointer to -the row or NULL if none is found. -@return row or NULL */ -static -i_s_locks_row_t* -search_innodb_locks( -/*================*/ - trx_i_s_cache_t* cache, /*!< in: cache */ - const lock_t* lock, /*!< in: lock to search for */ - uint16_t heap_no)/*!< in: lock's record number - or 0xFFFF if the lock - is a table lock */ -{ - i_s_hash_chain_t* hash_chain; - - HASH_SEARCH( - /* hash_chain->"next" */ - next, - /* the hash table */ - &cache->locks_hash, - /* fold */ - fold_lock(lock, heap_no), - /* the type of the next variable */ - i_s_hash_chain_t*, - /* auxiliary variable */ - hash_chain, - /* assertion on every traversed item */ - ut_ad(i_s_locks_row_validate(hash_chain->value)), - /* this determines if we have found the lock */ - locks_row_eq_lock(hash_chain->value, lock, heap_no)); - - if (hash_chain == NULL) { - - return(NULL); - } - /* else */ - - return(hash_chain->value); + ut_ad((heap_no == 0xFFFF) == lock.is_table()); + if (heap_no == 0xFFFF) + return ulint(lock.un_member.tab_lock.table->id); + char buf[8 + 8]; + memcpy(buf, &lock.trx->id, 8); + memcpy(buf + 8, &lock.un_member.rec_lock.page_id, 8); + return my_crc32c(heap_no, buf, sizeof buf); } /*******************************************************************//** @@ -933,67 +805,40 @@ Returns a pointer to the added row. If the row is already present then no row is added and a pointer to the existing row is returned. If row can not be allocated then NULL is returned. @return row */ -static -i_s_locks_row_t* -add_lock_to_cache( -/*==============*/ - trx_i_s_cache_t* cache, /*!< in/out: cache */ - const lock_t* lock, /*!< in: the element to add */ - uint16_t heap_no)/*!< in: lock's record number - or 0 if the lock - is a table lock */ +i_s_locks_row_t * +trx_i_s_cache_t::add(const lock_t &lock, uint16_t heap_no) noexcept { - i_s_locks_row_t* dst_row; + ut_ad(lock.is_table() == (heap_no == 0xFFFF)); + i_s_locks_row_t** after= reinterpret_cast + (&locks_hash.cell_get(fold_lock(lock, heap_no))->node); + while (i_s_locks_row_t *row= *after) + { + ut_ad(i_s_locks_row_validate(row)); + if (row->lock_trx_id == lock.trx->id && + (heap_no == 0xFFFF + ? row->lock_table_id == lock.un_member.tab_lock.table->id + : (row->lock_rec == heap_no && + row->lock_page == lock.un_member.rec_lock.page_id))) + return row; + after= &row->next; + } + i_s_locks_row_t *dst_row= static_cast + (table_cache_create_empty_row(&innodb_locks, this)); + if (dst_row) + { + if (!fill_locks_row(dst_row, &lock, heap_no, this)) + { + innodb_locks.rows_used--; + dst_row= nullptr; + } + else + { + *after= dst_row; + ut_ad(i_s_locks_row_validate(dst_row)); + } + } -#ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES - ulint i; - for (i = 0; i < 10000; i++) { -#endif -#ifndef TEST_DO_NOT_CHECK_FOR_DUPLICATE_ROWS - /* quit if this lock is already present */ - dst_row = search_innodb_locks(cache, lock, heap_no); - if (dst_row != NULL) { - - ut_ad(i_s_locks_row_validate(dst_row)); - return(dst_row); - } -#endif - - dst_row = (i_s_locks_row_t*) - table_cache_create_empty_row(&cache->innodb_locks, cache); - - /* memory could not be allocated */ - if (dst_row == NULL) { - - return(NULL); - } - - if (!fill_locks_row(dst_row, lock, heap_no, cache)) { - - /* memory could not be allocated */ - cache->innodb_locks.rows_used--; - return(NULL); - } - -#ifndef TEST_DO_NOT_INSERT_INTO_THE_HASH_TABLE - HASH_INSERT( - /* the type used in the hash chain */ - i_s_hash_chain_t, - /* hash_chain->"next" */ - next, - /* the hash table */ - &cache->locks_hash, - /* fold */ - fold_lock(lock, heap_no), - /* add this data to the hash */ - &dst_row->hash_chain); -#endif -#ifdef TEST_ADD_EACH_LOCKS_ROW_MANY_TIMES - } /* for()-loop */ -#endif - - ut_ad(i_s_locks_row_validate(dst_row)); - return(dst_row); + return dst_row; } /*******************************************************************//** @@ -1057,12 +902,10 @@ add_trx_relevant_locks_to_cache( i_s_locks_row_t* blocking_lock_row; lock_queue_iterator_t iter; - uint16_t wait_lock_heap_no - = wait_lock_get_heap_no(wait_lock); + uint16_t heap_no = wait_lock_get_heap_no(wait_lock); /* add the requested lock */ - *requested_lock_row = add_lock_to_cache(cache, wait_lock, - wait_lock_heap_no); + *requested_lock_row = cache->add(*wait_lock, heap_no); /* memory could not be allocated */ if (*requested_lock_row == NULL) { @@ -1083,13 +926,8 @@ add_trx_relevant_locks_to_cache( /* add the lock that is blocking wait_lock */ - blocking_lock_row - = add_lock_to_cache( - cache, curr_lock, - /* heap_no is the same - for the wait and waited - locks */ - wait_lock_heap_no); + blocking_lock_row = cache->add(*curr_lock, + heap_no); /* memory could not be allocated */ if (blocking_lock_row == NULL) { diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index c135b1afd02..e27e87b67da 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -188,7 +188,8 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr) /* This function is invoked during transaction commit, which is not allowed to fail. If we get a corrupted undo header, we will crash here. */ ut_a(undo_page); - trx_ulogf_t *undo_header= undo_page->page.frame + undo->hdr_offset; + const uint16_t undo_header_offset= undo->hdr_offset; + trx_ulogf_t *undo_header= undo_page->page.frame + undo_header_offset; ut_ad(rseg->needs_purge > trx->id); ut_ad(rseg->last_page_no != FIL_NULL); @@ -265,9 +266,8 @@ trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr) than to intentionally violate ACID by committing something that is known to be corrupted. */ ut_a(flst_add_first(rseg_header, TRX_RSEG + TRX_RSEG_HISTORY, undo_page, - uint16_t(page_offset(undo_header) + - TRX_UNDO_HISTORY_NODE), rseg->space->free_limit, - mtr) == DB_SUCCESS); + uint16_t(undo_header_offset + TRX_UNDO_HISTORY_NODE), + rseg->space->free_limit, mtr) == DB_SUCCESS); mtr->write<2>(*undo_page, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + undo_page->page.frame, undo_state); @@ -285,8 +285,9 @@ static void trx_purge_free_segment(buf_block_t *rseg_hdr, buf_block_t *block, ut_ad(mtr.memo_contains_flagged(rseg_hdr, MTR_MEMO_PAGE_X_FIX)); ut_ad(mtr.memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX)); - while (!fseg_free_step_not_header(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + - block->page.frame, &mtr)) + while (!fseg_free_step_not_header(block, + TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER, + &mtr)) { rseg_hdr->fix(); block->fix(); @@ -309,8 +310,8 @@ static void trx_purge_free_segment(buf_block_t *rseg_hdr, buf_block_t *block, mtr.memo_push(block, MTR_MEMO_PAGE_X_FIX); } - while (!fseg_free_step(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + - block->page.frame, &mtr)); + while (!fseg_free_step(block, TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER, + &mtr)); } void purge_sys_t::rseg_enable(trx_rseg_t &rseg) @@ -927,7 +928,8 @@ bool purge_sys_t::choose_next_log() goto purge_nothing; } - offset= page_offset(undo_rec); + offset= uint16_t(undo_rec - b->page.frame); + ut_ad(undo_rec - b->page.frame == page_offset(undo_rec)); tail.undo_no= trx_undo_rec_get_undo_no(undo_rec); page_no= id.page_no(); } @@ -969,12 +971,14 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr) return {nullptr, 0}; } + buf_block_t *rec2_page= b; if (const trx_undo_rec_t *rec2= trx_undo_page_get_next_rec(b, offset, hdr_page_no, hdr_offset)) { got_rec: ut_ad(page_no == page_id.page_no()); - offset= page_offset(rec2); + ut_ad(page_offset(rec2) == rec2 - rec2_page->page.frame); + offset= uint16_t(rec2 - rec2_page->page.frame); tail.undo_no= trx_undo_rec_get_undo_no(rec2); } else if (hdr_page_no != page_no || @@ -990,6 +994,7 @@ inline trx_purge_rec_t purge_sys_t::get_next_rec(roll_ptr_t roll_ptr) rec2= trx_undo_page_get_first_rec(next_page, hdr_page_no, hdr_offset); if (rec2) { + rec2_page= next_page; page_no= next; goto got_rec; } diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc index 14a980175a7..7b6073c598c 100644 --- a/storage/innobase/trx/trx0sys.cc +++ b/storage/innobase/trx/trx0sys.cc @@ -206,6 +206,19 @@ TPOOL_SUPPRESS_TSAN size_t trx_sys_t::history_size_approx() const return size; } +my_bool trx_sys_t::find_same_or_older_callback(void *el, void *i) noexcept +{ + auto element= static_cast(el); + auto id= static_cast(i); + return element->id <= *id; +} + + +bool trx_sys_t::find_same_or_older_low(trx_t *trx, trx_id_t id) noexcept +{ + return rw_trx_hash.iterate(trx, find_same_or_older_callback, &id); +} + /** Create a persistent rollback segment. @param space_id system or undo tablespace id @return pointer to new rollback segment diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index e280d64f9ff..55c283859cb 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -588,9 +588,8 @@ static dberr_t trx_resurrect_table_locks(trx_t *trx, const trx_undo_t &undo) { buf_page_make_young_if_needed(&block->page); buf_block_t *undo_block= block; - const trx_undo_rec_t *undo_rec= block->page.frame + undo.top_offset; - - do + uint16_t undo_rec_offset= undo.top_offset; + for (const trx_undo_rec_t *undo_rec= block->page.frame + undo_rec_offset;;) { byte type; byte cmpl_info; @@ -606,11 +605,14 @@ static dberr_t trx_resurrect_table_locks(trx_t *trx, const trx_undo_t &undo) trx_undo_rec_get_pars(undo_rec, &type, &cmpl_info, &updated_extern, &undo_no, &table_id); tables.emplace(table_id, type == TRX_UNDO_EMPTY); - undo_rec= trx_undo_get_prev_rec(block, page_offset(undo_rec), + ut_ad(page_offset(undo_rec) == undo_rec_offset); + undo_rec= trx_undo_get_prev_rec(block, undo_rec_offset, undo.hdr_page_no, undo.hdr_offset, true, &mtr); + if (!undo_rec) + break; + undo_rec_offset= uint16_t(undo_rec - block->page.frame); } - while (undo_rec); } mtr.commit(); @@ -1054,13 +1056,13 @@ void trx_t::commit_empty(mtr_t *mtr) { mtr->memcpy(*u, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_START + u->page.frame, prev + TRX_UNDO_LOG_START, 2); - const ulint free= page_offset(last); + const ulint free= last - u->page.frame; mtr->write<2>(*u, TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + u->page.frame, free); mtr->write<2>(*u, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + u->page.frame, TRX_UNDO_CACHED); mtr->write<2>(*u, TRX_UNDO_SEG_HDR + TRX_UNDO_LAST_LOG + u->page.frame, - page_offset(prev)); + uintptr_t(prev - u->page.frame)); mtr->write<2>(*u, prev + TRX_UNDO_NEXT_LOG, 0U); mtr->memset(u, free, srv_page_size - FIL_PAGE_DATA_END - free, 0); @@ -1240,7 +1242,7 @@ trx_finalize_for_fts( } extern "C" MYSQL_THD thd_increment_pending_ops(MYSQL_THD); -extern "C" void thd_decrement_pending_ops(MYSQL_THD); +extern "C" void thd_decrement_pending_ops(void*); #include "../log/log0sync.h" @@ -1269,7 +1271,7 @@ static void trx_flush_log_if_needed(lsn_t lsn, trx_t *trx) if ((cb.m_param= thd_increment_pending_ops(trx->mysql_thd))) { - cb.m_callback= (void (*)(void *)) thd_decrement_pending_ops; + cb.m_callback= thd_decrement_pending_ops; log_write_up_to(lsn, flush, &cb); return; } @@ -1357,10 +1359,8 @@ ATTRIBUTE_NOINLINE static void trx_commit_cleanup(trx_undo_t *&undo) buf_page_get(page_id_t(SRV_TMP_SPACE_ID, undo->hdr_page_no), 0, RW_X_LATCH, &mtr)) { - fseg_header_t *file_seg= TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + - block->page.frame; - - finished= fseg_free_step(file_seg, &mtr); + finished= fseg_free_step(block, TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER, + &mtr); if (!finished); else if (buf_block_t *rseg_header= rseg->get(&mtr, nullptr)) diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc index 3068904dd04..21c0c10482a 100644 --- a/storage/innobase/trx/trx0undo.cc +++ b/storage/innobase/trx/trx0undo.cc @@ -292,12 +292,12 @@ trx_undo_get_first_rec(const fil_space_t &space, uint32_t page_no, mtr); } -inline void UndorecApplier::apply_undo_rec(const trx_undo_rec_t *rec) +inline +void UndorecApplier::apply_undo_rec(const trx_undo_rec_t *rec, uint16_t offset) { undo_rec= rec; - if (!undo_rec) - return; - offset= page_offset(undo_rec); + ut_ad(page_offset(undo_rec) == offset); + this->offset= offset; bool updated_extern= false; undo_no_t undo_no= 0; @@ -365,10 +365,11 @@ ATTRIBUTE_COLD void trx_t::apply_log() undo->hdr_offset); while (rec) { + const uint16_t offset= uint16_t(rec - block->page.frame); /* Since we are the only thread who could write to this undo page, it is safe to dereference rec while only holding a buffer-fix. */ - log_applier.apply_undo_rec(rec); - rec= trx_undo_page_get_next_rec(block, page_offset(rec), + log_applier.apply_undo_rec(rec, offset); + rec= trx_undo_page_get_next_rec(block, offset, page_id.page_no(), undo->hdr_offset); } @@ -1003,7 +1004,7 @@ corrupted_type: const trx_id_t trx_id= mach_read_from_8(undo_header + TRX_UNDO_TRX_ID); if (trx_id >> 48) { - sql_print_error("InnoDB: corrupted TRX_ID %llx", trx_id); + sql_print_error("InnoDB: corrupted TRX_ID %" PRIx64, trx_id); goto corrupted; } /* We will increment rseg->needs_purge, like trx_undo_reuse_cached() @@ -1038,7 +1039,7 @@ corrupted_type: read_trx_no: trx_no = mach_read_from_8(TRX_UNDO_TRX_NO + undo_header); if (trx_no >> 48) { - sql_print_error("InnoDB: corrupted TRX_NO %llx", + sql_print_error("InnoDB: corrupted TRX_NO %" PRIx64, trx_no); goto corrupted; } diff --git a/storage/maria/aria_pack.c b/storage/maria/aria_pack.c index ee694931a6d..43150d6e02c 100644 --- a/storage/maria/aria_pack.c +++ b/storage/maria/aria_pack.c @@ -137,8 +137,8 @@ static void free_counts_and_tree_and_queue(HUFF_TREE *huff_trees, uint trees, HUFF_COUNTS *huff_counts, uint fields); -static int compare_tree(void* cmp_arg __attribute__((unused)), - const uchar *s,const uchar *t); +static int compare_tree(void *cmp_arg __attribute__((unused)), + const void *s, const void *t); static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts); static void check_counts(HUFF_COUNTS *huff_counts,uint trees, my_off_t records); @@ -148,9 +148,9 @@ static int test_space_compress(HUFF_COUNTS *huff_counts,my_off_t records, enum en_fieldtype field_type); static HUFF_TREE* make_huff_trees(HUFF_COUNTS *huff_counts,uint trees); static int make_huff_tree(HUFF_TREE *tree,HUFF_COUNTS *huff_counts); -static int compare_huff_elements(void *not_used, uchar *a,uchar *b); -static int save_counts_in_queue(uchar *key,element_count count, - HUFF_TREE *tree); +static int compare_huff_elements(void *not_used, const void *a, + const void *b); +static int save_counts_in_queue(void *key, element_count count, void *tree); static my_off_t calc_packed_length(HUFF_COUNTS *huff_counts,uint flag); static uint join_same_trees(HUFF_COUNTS *huff_counts,uint trees); static int make_huff_decode_table(HUFF_TREE *huff_tree,uint trees); @@ -182,7 +182,7 @@ static int mrg_rrnd(PACK_MRG_INFO *info,uchar *buf); static void mrg_reset(PACK_MRG_INFO *mrg); #if !defined(DBUG_OFF) static void fakebigcodes(HUFF_COUNTS *huff_counts, HUFF_COUNTS *end_count); -static int fakecmp(my_off_t **count1, my_off_t **count2); +static int fakecmp(const void *count1, const void *count2); #endif /* @@ -902,8 +902,8 @@ static HUFF_COUNTS *init_huff_count(MARIA_HA *info,my_off_t records) 'tree_pos'. It's keys are implemented by pointers into 'tree_buff'. This is accomplished by '-1' as the element size. */ - init_tree(&count[col_nr].int_tree,0,0,-1,(qsort_cmp2) compare_tree, NULL, - NULL, MYF(0)); + init_tree(&count[col_nr].int_tree, 0, 0, -1, compare_tree, NULL, NULL, + MYF(0)); if (records && type != FIELD_BLOB && type != FIELD_VARCHAR) count[col_nr].tree_pos=count[col_nr].tree_buff = my_malloc(PSI_NOT_INSTRUMENTED, @@ -1255,12 +1255,14 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) } static int compare_huff_elements(void *not_used __attribute__((unused)), - uchar *a, uchar *b) + const void *a_, const void *b_) { - return *((my_off_t*) a) < *((my_off_t*) b) ? -1 : - (*((my_off_t*) a) == *((my_off_t*) b) ? 0 : 1); + const my_off_t *a= a_; + const my_off_t *b= b_; + return *a < *b ? -1 : (*a == *b ? 0 : 1); } + /* Check each tree if we should use pre-space-compress, end-space- compress, empty-field-compress or zero-field-compress */ @@ -1767,9 +1769,11 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts) return 0; } -static int compare_tree(void* cmp_arg __attribute__((unused)), - register const uchar *s, register const uchar *t) +static int compare_tree(void *cmp_arg __attribute__((unused)), const void *s_, + const void *t_) { + const uchar *s= s_; + const uchar *t= t_; uint length; for (length=global_count->field_length; length-- ;) if (*s++ != *t++) @@ -1798,9 +1802,10 @@ static int compare_tree(void* cmp_arg __attribute__((unused)), 0 */ -static int save_counts_in_queue(uchar *key, element_count count, - HUFF_TREE *tree) +static int save_counts_in_queue(void *key_, element_count count, void *tree_) { + uchar *key= key_; + HUFF_TREE *tree= tree_; HUFF_ELEMENT *new_huff_el; new_huff_el=tree->element_buffer+(tree->elements++); @@ -3320,8 +3325,10 @@ static void fakebigcodes(HUFF_COUNTS *huff_counts, HUFF_COUNTS *end_count) -1 count1 > count2 */ -static int fakecmp(my_off_t **count1, my_off_t **count2) +static int fakecmp(const void *count1_, const void *count2_) { + const my_off_t *const *count1= count1_; + const my_off_t *const *count2= count2_; return ((**count1 < **count2) ? 1 : (**count1 > **count2) ? -1 : 0); } diff --git a/storage/maria/aria_read_log.c b/storage/maria/aria_read_log.c index cde28e91c0a..6fac4fce7db 100644 --- a/storage/maria/aria_read_log.c +++ b/storage/maria/aria_read_log.c @@ -340,11 +340,12 @@ static void usage(void) } -static uchar* my_hash_get_string(const uchar *record, size_t *length, - my_bool first __attribute__ ((unused))) +static const uchar *my_hash_get_string(const void *record_, size_t *length, + my_bool first __attribute__((unused))) { - *length= (size_t) (strcend((const char*) record,',')- (const char*) record); - return (uchar*) record; + const char *record= record_; + *length= (strcend(record, ',') - record); + return record_; } diff --git a/storage/maria/ha_maria.cc b/storage/maria/ha_maria.cc index 3e498316653..c121491675d 100644 --- a/storage/maria/ha_maria.cc +++ b/storage/maria/ha_maria.cc @@ -2888,6 +2888,7 @@ int ha_maria::delete_table(const char *name) void ha_maria::drop_table(const char *name) { DBUG_ASSERT(!file || file->s->temporary); + file->s->deleting= 1; // Do not flush data (void) ha_close(); (void) maria_delete_table_files(name, 1, MY_WME); } diff --git a/storage/maria/ma_bitmap.c b/storage/maria/ma_bitmap.c index db59f31b5d4..8a9480ed72f 100644 --- a/storage/maria/ma_bitmap.c +++ b/storage/maria/ma_bitmap.c @@ -1068,23 +1068,30 @@ static my_bool _ma_read_bitmap_page(MARIA_HA *info, adjust_total_size(info, page); bitmap->full_head_size= bitmap->full_tail_size= 0; DBUG_ASSERT(share->pagecache->block_size == bitmap->block_size); - res= pagecache_read(share->pagecache, - &bitmap->file, page, 0, - bitmap->map, PAGECACHE_PLAIN_PAGE, - PAGECACHE_LOCK_LEFT_UNLOCKED, 0) == NULL; - if (!res) + if (share->internal_table && + page == 0 && share->state.state.data_file_length == bitmap->block_size) { - /* Calculate used_size */ - const uchar *data, *end= bitmap->map; - for (data= bitmap->map + bitmap->total_size; --data >= end && *data == 0; ) - {} - bitmap->used_size= (uint) ((data + 1) - end); - DBUG_ASSERT(bitmap->used_size <= bitmap->total_size); + /* Avoid read of bitmap for internal temporary tables */ + bzero(bitmap->map, bitmap->block_size); + bitmap->used_size= 0; + res= 0; } else { - _ma_set_fatal_error(info, my_errno); + res= pagecache_read(share->pagecache, + &bitmap->file, page, 0, + bitmap->map, PAGECACHE_PLAIN_PAGE, + PAGECACHE_LOCK_LEFT_UNLOCKED, 0) == NULL; + if (!res) + { + /* Calculate used_size */ + const uchar *data, *end= bitmap->map; + for (data= bitmap->map + bitmap->total_size; --data >= end && *data == 0; ) + {} + bitmap->used_size= (uint) ((data + 1) - end); + DBUG_ASSERT(bitmap->used_size <= bitmap->total_size); + } } /* We can't check maria_bitmap_marker here as if the bitmap page diff --git a/storage/maria/ma_check.c b/storage/maria/ma_check.c index e7af33cfe66..52dc5632332 100644 --- a/storage/maria/ma_check.c +++ b/storage/maria/ma_check.c @@ -68,8 +68,7 @@ static int sort_one_index(HA_CHECK *param, MARIA_HA *info, static int sort_key_read(MARIA_SORT_PARAM *sort_param, uchar *key); static int sort_maria_ft_key_read(MARIA_SORT_PARAM *sort_param, uchar *key); static int sort_get_next_record(MARIA_SORT_PARAM *sort_param); -static int sort_key_cmp(MARIA_SORT_PARAM *sort_param, const void *a, - const void *b); +static int sort_key_cmp(void *sort_param, const void *a, const void *b); static int sort_maria_ft_key_write(MARIA_SORT_PARAM *sort_param, const uchar *a); static int sort_key_write(MARIA_SORT_PARAM *sort_param, const uchar *a); @@ -5652,9 +5651,9 @@ int _ma_sort_write_record(MARIA_SORT_PARAM *sort_param) /* Compare two keys from _ma_create_index_by_sort */ -static int sort_key_cmp(MARIA_SORT_PARAM *sort_param, const void *a, - const void *b) +static int sort_key_cmp(void *sort_param_, const void *a, const void *b) { + const MARIA_SORT_PARAM *sort_param= sort_param_; uint not_used[2]; return (ha_key_cmp(sort_param->seg, *((uchar* const *) a), *((uchar* const *) b), diff --git a/storage/maria/ma_create.c b/storage/maria/ma_create.c index 9ce48ae9e7f..655c7798dc1 100644 --- a/storage/maria/ma_create.c +++ b/storage/maria/ma_create.c @@ -28,7 +28,7 @@ #endif #include -static int compare_columns(MARIA_COLUMNDEF **a, MARIA_COLUMNDEF **b); +static int compare_columns(const void *a, const void *b); static ulonglong update_tot_length(ulonglong tot_length, ulonglong max_rows, uint length) @@ -1335,9 +1335,11 @@ static inline int sign(long a) } -static int compare_columns(MARIA_COLUMNDEF **a_ptr, MARIA_COLUMNDEF **b_ptr) +static int compare_columns(const void *a_ptr_, const void *b_ptr_) { - MARIA_COLUMNDEF *a= *a_ptr, *b= *b_ptr; + const MARIA_COLUMNDEF *const *a_ptr= a_ptr_; + const MARIA_COLUMNDEF *const *b_ptr= b_ptr_; + const MARIA_COLUMNDEF *a= *a_ptr, *b= *b_ptr; enum en_fieldtype a_type, b_type; a_type= (a->type == FIELD_CHECK) ? FIELD_NORMAL : a->type; diff --git a/storage/maria/ma_ft_boolean_search.c b/storage/maria/ma_ft_boolean_search.c index a7bc2a7f318..4a4bccf9ce5 100644 --- a/storage/maria/ma_ft_boolean_search.c +++ b/storage/maria/ma_ft_boolean_search.c @@ -144,9 +144,12 @@ typedef struct st_ft_info enum { UNINITIALIZED, READY, INDEX_SEARCH, INDEX_DONE } state; } FTB; -static int FTB_WORD_cmp(my_off_t *v, FTB_WORD *a, FTB_WORD *b) +static int FTB_WORD_cmp(void *v_, const void *a_, const void *b_) { int i; + const my_off_t *v= v_; + const FTB_WORD *a= a_; + const FTB_WORD *b= b_; /* if a==curdoc, take it as a < b */ if (v && a->docid[0] == *v) @@ -159,11 +162,15 @@ static int FTB_WORD_cmp(my_off_t *v, FTB_WORD *a, FTB_WORD *b) return i; } -static int FTB_WORD_cmp_list(CHARSET_INFO *cs, FTB_WORD **a, FTB_WORD **b) +static int FTB_WORD_cmp_list(void *cs_, const void *a_, const void *b_) { + CHARSET_INFO *cs= cs_; + const FTB_WORD *const *a= a_; + const FTB_WORD *const *b= b_; + /* ORDER BY word, ndepth */ - int i= ha_compare_word(cs, (uchar*) (*a)->word + 1, (*a)->len - 1, - (uchar*) (*b)->word + 1, (*b)->len - 1); + int i= ha_compare_word(cs, (*a)->word + 1, (*a)->len - 1, (*b)->word + 1, + (*b)->len - 1); if (!i) i=CMP_NUM((*a)->ndepth, (*b)->ndepth); return i; @@ -325,10 +332,12 @@ static int _ftb_parse_query(FTB *ftb, uchar *query, uint len, } -static int _ftb_no_dupes_cmp(void* not_used __attribute__((unused)), - const void *a,const void *b) +static int _ftb_no_dupes_cmp(void *not_used __attribute__((unused)), + const void *a_, const void *b_) { - return CMP_NUM((*((my_off_t*)a)), (*((my_off_t*)b))); + const my_off_t *a= a_; + const my_off_t *b= b_; + return CMP_NUM((*a), (*b)); } @@ -597,14 +606,14 @@ FT_INFO * maria_ft_init_boolean_search(MARIA_HA *info, uint keynr, sizeof(void *)))) goto err; reinit_queue(&ftb->queue, ftb->queue.max_elements, 0, 0, - (int (*)(void*, uchar*, uchar*))FTB_WORD_cmp, 0, 0, 0); + FTB_WORD_cmp, 0, 0, 0); for (ftbw= ftb->last_word; ftbw; ftbw= ftbw->prev) queue_insert(&ftb->queue, (uchar *)ftbw); ftb->list=(FTB_WORD **)alloc_root(&ftb->mem_root, sizeof(FTB_WORD *)*ftb->queue.elements); memcpy(ftb->list, ftb->queue.root+1, sizeof(FTB_WORD *)*ftb->queue.elements); my_qsort2(ftb->list, ftb->queue.elements, sizeof(FTB_WORD *), - (qsort2_cmp)FTB_WORD_cmp_list, (void*) ftb->charset); + FTB_WORD_cmp_list, (void*) ftb->charset); if (ftb->queue.elements<2) ftb->with_scan &= ~FTB_FLAG_TRUNC; ftb->state=READY; return ftb; diff --git a/storage/maria/ma_ft_nlq_search.c b/storage/maria/ma_ft_nlq_search.c index 890de3db0ad..db3ebc220f6 100644 --- a/storage/maria/ma_ft_nlq_search.c +++ b/storage/maria/ma_ft_nlq_search.c @@ -52,9 +52,11 @@ typedef struct st_ft_superdoc } FT_SUPERDOC; -static int FT_SUPERDOC_cmp(void* cmp_arg __attribute__((unused)), - FT_SUPERDOC *p1, FT_SUPERDOC *p2) +static int FT_SUPERDOC_cmp(void *cmp_arg __attribute__((unused)), + const void *p1_, const void *p2_) { + const FT_SUPERDOC *p1= p1_; + const FT_SUPERDOC *p2= p2_; if (p1->doc.dpos < p2->doc.dpos) return -1; if (p1->doc.dpos == p2->doc.dpos) @@ -62,8 +64,10 @@ static int FT_SUPERDOC_cmp(void* cmp_arg __attribute__((unused)), return 1; } -static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio) +static int walk_and_match(void *word_, element_count count, void *aio_) { + FT_WORD *word= word_; + ALL_IN_ONE *aio= aio_; FT_WEIGTH subkeys; int r; uint doc_cnt; @@ -186,9 +190,11 @@ do_skip: } -static int walk_and_copy(FT_SUPERDOC *from, - uint32 count __attribute__((unused)), FT_DOC **to) +static int walk_and_copy(void *from_, uint32 count __attribute__((unused)), + void *to_) { + FT_SUPERDOC *from= from_; + FT_DOC **to= to_; DBUG_ENTER("walk_and_copy"); from->doc.weight+=from->tmp_weight*from->word_ptr->weight; (*to)->dpos=from->doc.dpos; @@ -197,9 +203,13 @@ static int walk_and_copy(FT_SUPERDOC *from, DBUG_RETURN(0); } -static int walk_and_push(FT_SUPERDOC *from, - uint32 count __attribute__((unused)), QUEUE *best) +static int walk_and_push(void *from_, + element_count count __attribute__((unused)), + void *best_) { + FT_SUPERDOC *from= from_; + QUEUE *best= best_; + DBUG_ENTER("walk_and_copy"); from->doc.weight+=from->tmp_weight*from->word_ptr->weight; set_if_smaller(best->elements, ft_query_expansion_limit-1); @@ -209,8 +219,10 @@ static int walk_and_push(FT_SUPERDOC *from, static int FT_DOC_cmp(void *unused __attribute__((unused)), - FT_DOC *a, FT_DOC *b) + const void *a_, const void *b_) { + const FT_DOC *a= a_; + const FT_DOC *b= b_; return CMP_NUM(b->weight, a->weight); } @@ -244,8 +256,8 @@ FT_INFO *maria_ft_init_nlq_search(MARIA_HA *info, uint keynr, uchar *query, bzero(&wtree,sizeof(wtree)); - init_tree(&aio.dtree,0,0,sizeof(FT_SUPERDOC),(qsort_cmp2)&FT_SUPERDOC_cmp, - NULL, NULL, MYF(0)); + init_tree(&aio.dtree, 0, 0, sizeof(FT_SUPERDOC), &FT_SUPERDOC_cmp, NULL, + NULL, MYF(0)); maria_ft_parse_init(&wtree, aio.charset); ftparser_param->flags= 0; @@ -253,16 +265,15 @@ FT_INFO *maria_ft_init_nlq_search(MARIA_HA *info, uint keynr, uchar *query, &wtree.mem_root)) goto err; - if (tree_walk(&wtree, (tree_walk_action)&walk_and_match, &aio, + if (tree_walk(&wtree, &walk_and_match, &aio, left_root_right)) goto err; if (flags & FT_EXPAND && ft_query_expansion_limit) { QUEUE best; - init_queue(&best,ft_query_expansion_limit,0,0, (queue_compare) &FT_DOC_cmp, - 0, 0, 0); - tree_walk(&aio.dtree, (tree_walk_action) &walk_and_push, + init_queue(&best, ft_query_expansion_limit, 0, 0, &FT_DOC_cmp, 0, 0, 0); + tree_walk(&aio.dtree, &walk_and_push, &best, left_root_right); while (best.elements) { @@ -281,7 +292,7 @@ FT_INFO *maria_ft_init_nlq_search(MARIA_HA *info, uint keynr, uchar *query, } delete_queue(&best); reset_tree(&aio.dtree); - if (tree_walk(&wtree, (tree_walk_action)&walk_and_match, &aio, + if (tree_walk(&wtree, &walk_and_match, &aio, left_root_right)) goto err; @@ -304,12 +315,11 @@ FT_INFO *maria_ft_init_nlq_search(MARIA_HA *info, uint keynr, uchar *query, dlist->info=aio.info; dptr=dlist->doc; - tree_walk(&aio.dtree, (tree_walk_action) &walk_and_copy, + tree_walk(&aio.dtree, &walk_and_copy, &dptr, left_root_right); if (flags & FT_SORTED) - my_qsort2(dlist->doc, dlist->ndocs, sizeof(FT_DOC), - (qsort2_cmp)&FT_DOC_cmp, 0); + my_qsort2(dlist->doc, dlist->ndocs, sizeof(FT_DOC), &FT_DOC_cmp, 0); err: delete_tree(&aio.dtree, 0); diff --git a/storage/maria/ma_ft_parser.c b/storage/maria/ma_ft_parser.c index f600873d54d..8680f5bfdc5 100644 --- a/storage/maria/ma_ft_parser.c +++ b/storage/maria/ma_ft_parser.c @@ -32,14 +32,19 @@ typedef struct st_my_maria_ft_parser_param } MY_FT_PARSER_PARAM; -static int FT_WORD_cmp(CHARSET_INFO* cs, FT_WORD *w1, FT_WORD *w2) +static int FT_WORD_cmp(void *cs_, const void *w1_, const void *w2_) { - return ha_compare_word(cs, (uchar*) w1->pos, w1->len, - (uchar*) w2->pos, w2->len); + CHARSET_INFO *cs= cs_; + const FT_WORD *w1= w1_; + const FT_WORD *w2= w2_; + return ha_compare_word(cs, w1->pos, w1->len, w2->pos, w2->len); } -static int walk_and_copy(FT_WORD *word,uint32 count,FT_DOCSTAT *docstat) + +static int walk_and_copy(void *word_, element_count count, void *docstat_) { + FT_WORD *word= word_; + FT_DOCSTAT *docstat= docstat_; word->weight=LWS_IN_USE; docstat->sum+=word->weight; memcpy((docstat->list)++, word, sizeof(FT_WORD)); @@ -60,7 +65,7 @@ FT_WORD * maria_ft_linearize(TREE *wtree, MEM_ROOT *mem_root) docstat.list=wlist; docstat.uniq=wtree->elements_in_tree; docstat.sum=0; - tree_walk(wtree,(tree_walk_action)&walk_and_copy,&docstat,left_root_right); + tree_walk(wtree,&walk_and_copy,&docstat,left_root_right); } delete_tree(wtree, 0); if (!wlist) @@ -234,8 +239,8 @@ void maria_ft_parse_init(TREE *wtree, CHARSET_INFO *cs) { DBUG_ENTER("maria_ft_parse_init"); if (!is_tree_inited(wtree)) - init_tree(wtree,0,0,sizeof(FT_WORD),(qsort_cmp2)&FT_WORD_cmp, NULL, - (void*) cs, MYF(0)); + init_tree(wtree, 0, 0, sizeof(FT_WORD), &FT_WORD_cmp, NULL, (void *) cs, + MYF(0)); DBUG_VOID_RETURN; } diff --git a/storage/maria/ma_info.c b/storage/maria/ma_info.c index 3de6b8b74c5..5e92dc2a41f 100644 --- a/storage/maria/ma_info.c +++ b/storage/maria/ma_info.c @@ -141,7 +141,7 @@ void _ma_report_error(int errcode, const LEX_STRING *name, myf flags) } } my_printf_error(errcode, "Got error '%M' for '%s'", - flags, (int) errcode, file_name); + flags, errcode, file_name); DBUG_VOID_RETURN; } diff --git a/storage/maria/ma_init.c b/storage/maria/ma_init.c index 14c4c9963f1..6873c94a0ed 100644 --- a/storage/maria/ma_init.c +++ b/storage/maria/ma_init.c @@ -22,8 +22,9 @@ #include "ma_checkpoint.h" #include -void history_state_free(MARIA_STATE_HISTORY_CLOSED *closed_history) +void history_state_free(void *closed_history_) { + MARIA_STATE_HISTORY_CLOSED *closed_history= closed_history_; MARIA_STATE_HISTORY *history, *next; /* @@ -72,7 +73,7 @@ int maria_init(void) maria_create_trn_hook= dummy_maria_create_trn_hook; } my_hash_init(PSI_INSTRUMENT_ME, &maria_stored_state, &my_charset_bin, 32, 0, - sizeof(LSN), 0, (my_hash_free_key) history_state_free, 0); + sizeof(LSN), 0, history_state_free, 0); DBUG_PRINT("info",("dummy_transaction_object: %p", &dummy_transaction_object)); return 0; } diff --git a/storage/maria/ma_loghandler.c b/storage/maria/ma_loghandler.c index 717bb3a4899..3db555f62a0 100644 --- a/storage/maria/ma_loghandler.c +++ b/storage/maria/ma_loghandler.c @@ -5734,7 +5734,7 @@ translog_write_variable_record_mgroup(LSN *lsn, goto err_unlock; } - DBUG_PRINT("info", ("chunk: #%u first_page: %u (%u) " + DBUG_PRINT("info", ("chunk: #%zu first_page: %u (%u) " "full_pages: %lu (%lu) " "Left %lu", groups.elements, @@ -5902,8 +5902,8 @@ translog_write_variable_record_mgroup(LSN *lsn, record_rest + header_fixed_part + (groups.elements - groups_per_page * (chunk0_pages - 1)) * (7 + 1)) chunk0_pages++; - DBUG_PRINT("info", ("chunk0_pages: %u groups %u groups per full page: %u " - "Group on last page: %u", + DBUG_PRINT("info", ("chunk0_pages: %u groups %zu groups per full page: %u " + "Group on last page: %zu", chunk0_pages, groups.elements, groups_per_page, (groups.elements - @@ -8731,7 +8731,7 @@ my_bool translog_purge(TRANSLOG_ADDRESS low) log_descriptor.open_files.elements); DBUG_ASSERT(log_descriptor.min_file == i); file= *((TRANSLOG_FILE **)pop_dynamic(&log_descriptor.open_files)); - DBUG_PRINT("info", ("Files : %d", log_descriptor.open_files.elements)); + DBUG_PRINT("info", ("Files : %zu", log_descriptor.open_files.elements)); DBUG_ASSERT(i == file->number); log_descriptor.min_file++; DBUG_ASSERT(log_descriptor.max_file - log_descriptor.min_file + 1 == diff --git a/storage/maria/ma_pagecache.c b/storage/maria/ma_pagecache.c index bc92eb66168..9fe262c1847 100644 --- a/storage/maria/ma_pagecache.c +++ b/storage/maria/ma_pagecache.c @@ -4724,8 +4724,10 @@ static my_bool free_block(PAGECACHE *pagecache, PAGECACHE_BLOCK_LINK *block, } -static int cmp_sec_link(PAGECACHE_BLOCK_LINK **a, PAGECACHE_BLOCK_LINK **b) +static int cmp_sec_link(const void *a_, const void *b_) { + PAGECACHE_BLOCK_LINK *const *a= a_; + PAGECACHE_BLOCK_LINK *const *b= b_; return (((*a)->hash_link->pageno < (*b)->hash_link->pageno) ? -1 : ((*a)->hash_link->pageno > (*b)->hash_link->pageno) ? 1 : 0); } diff --git a/storage/maria/ma_sort.c b/storage/maria/ma_sort.c index de6f5b8b288..4fcc40b460c 100644 --- a/storage/maria/ma_sort.c +++ b/storage/maria/ma_sort.c @@ -752,8 +752,8 @@ static int write_keys(MARIA_SORT_PARAM *info, register uchar **sort_keys, if (!buffpek) DBUG_RETURN(1); /* Out of memory */ - my_qsort2((uchar*) sort_keys,(size_t) count, sizeof(uchar*), - (qsort2_cmp) info->key_cmp, info); + my_qsort2(sort_keys, count, sizeof(uchar*), + info->key_cmp, info); if (!my_b_inited(tempfile) && open_cached_file(tempfile, my_tmpdir(info->tmpdir), "ST", DISK_BUFFER_SIZE, info->sort_info->param->myf_rw)) @@ -798,8 +798,8 @@ static int write_keys_varlen(MARIA_SORT_PARAM *info, if (!buffpek) DBUG_RETURN(1); /* Out of memory */ - my_qsort2((uchar*) sort_keys, (size_t) count, sizeof(uchar*), - (qsort2_cmp) info->key_cmp, info); + my_qsort2(sort_keys, count, sizeof(uchar*), + info->key_cmp, info); if (!my_b_inited(tempfile) && open_cached_file(tempfile, my_tmpdir(info->tmpdir), "ST", DISK_BUFFER_SIZE, info->sort_info->param->myf_rw)) @@ -841,8 +841,8 @@ static int write_index(MARIA_SORT_PARAM *info, register uchar **sort_keys, { DBUG_ENTER("write_index"); - my_qsort2((uchar*) sort_keys,(size_t) count,sizeof(uchar*), - (qsort2_cmp) info->key_cmp,info); + my_qsort2(sort_keys, count,sizeof(uchar*), + info->key_cmp,info); while (count--) { if ((*info->key_write)(info, *sort_keys++)) @@ -1044,8 +1044,8 @@ merge_buffers(MARIA_SORT_PARAM *info, ha_keys keys, IO_CACHE *from_file, sort_length=info->key_length; if (init_queue(&queue,(uint) (Tb-Fb)+1,offsetof(BUFFPEK,key),0, - (int (*)(void*, uchar *,uchar*)) info->key_cmp, - (void*) info, 0, 0)) + info->key_cmp, + info, 0, 0)) DBUG_RETURN(1); /* purecov: inspected */ for (buffpek= Fb ; buffpek <= Tb ; buffpek++) diff --git a/storage/maria/ma_write.c b/storage/maria/ma_write.c index 9a6859bfd4d..3f8931bbc3d 100644 --- a/storage/maria/ma_write.c +++ b/storage/maria/ma_write.c @@ -1685,8 +1685,12 @@ static my_bool _ma_ck_write_tree(register MARIA_HA *info, MARIA_KEY *key) /* typeof(_ma_keys_compare)=qsort_cmp2 */ -static int keys_compare(bulk_insert_param *param, uchar *key1, uchar *key2) +static int keys_compare(void *param_, const void *key1_, + const void *key2_) { + const bulk_insert_param *param= param_; + const uchar *key1= key1_; + const uchar *key2= key2_; uint not_used[2]; return ha_key_cmp(param->info->s->keyinfo[param->keynr].seg, key1, key2, USE_WHOLE_KEY, SEARCH_SAME, @@ -1792,7 +1796,7 @@ int maria_init_bulk_insert(MARIA_HA *info, size_t cache_size, ha_rows rows) init_tree(&info->bulk_insert[i], cache_size * key[i].maxlength, cache_size * key[i].maxlength, 0, - (qsort_cmp2) keys_compare, keys_free, (void *)params++, MYF(0)); + keys_compare, keys_free, params++, MYF(0)); } else info->bulk_insert[i].root=0; diff --git a/storage/maria/maria_def.h b/storage/maria/maria_def.h index 147dc83d78a..cda2532bbe2 100644 --- a/storage/maria/maria_def.h +++ b/storage/maria/maria_def.h @@ -405,7 +405,7 @@ typedef struct st_maria_sort_param my_bool calc_checksum; /* calculate table checksum */ size_t rec_buff_size; - int (*key_cmp)(struct st_maria_sort_param *, const void *, const void *); + int (*key_cmp)(void *, const void *, const void *); int (*key_read)(struct st_maria_sort_param *, uchar *); int (*key_write)(struct st_maria_sort_param *, const uchar *); void (*lock_in_memory)(HA_CHECK *); diff --git a/storage/maria/s3_func.c b/storage/maria/s3_func.c index b8918c48d4b..eeacc3deb71 100644 --- a/storage/maria/s3_func.c +++ b/storage/maria/s3_func.c @@ -1199,7 +1199,7 @@ my_bool s3_rename_object(ms3_st *s3_client, const char *aws_bucket, if (!(errmsg= ms3_server_error(s3_client))) errmsg= ms3_error(error); - my_printf_error(EE_READ, "Got error from move_object(%s -> %s): %d %", + my_printf_error(EE_READ, "Got error from move_object(%s -> %s): %d %s", error_flags, from_name, to_name, error, errmsg); } diff --git a/storage/maria/trnman.c b/storage/maria/trnman.c index 7cac6a2d0c6..e0509f4544f 100644 --- a/storage/maria/trnman.c +++ b/storage/maria/trnman.c @@ -126,11 +126,12 @@ default_trnman_end_trans_hook(TRN *trn __attribute__ ((unused)), } -static uchar *trn_get_hash_key(const uchar *trn, size_t *len, - my_bool unused __attribute__ ((unused))) +static const uchar *trn_get_hash_key(const void *trn_, size_t *len, + my_bool unused __attribute__((unused))) { + const TRN *const *trn= trn_; *len= sizeof(TrID); - return (uchar *) & ((*((TRN **)trn))->trid); + return (const uchar *) &((*trn)->trid); } diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp index 4b035bc11a9..0985d7459fc 100644 --- a/storage/mroonga/ha_mroonga.cpp +++ b/storage/mroonga/ha_mroonga.cpp @@ -603,25 +603,22 @@ static const char *mrn_inspect_extra_function(enum ha_extra_function operation) } #endif -static uchar *mrn_open_tables_get_key(const uchar *record, - size_t *length, - my_bool not_used __attribute__ ((unused))) +static const uchar *mrn_open_tables_get_key(const void *record, size_t *length, + my_bool) { MRN_DBUG_ENTER_FUNCTION(); - MRN_SHARE *share = reinterpret_cast(const_cast(record)); + auto share = static_cast(record); *length = share->table_name_length; - DBUG_RETURN(reinterpret_cast(share->table_name)); + DBUG_RETURN(reinterpret_cast(share->table_name)); } -static uchar *mrn_long_term_share_get_key(const uchar *record, - size_t *length, - my_bool not_used __attribute__ ((unused))) +static const uchar *mrn_long_term_share_get_key(const void *record, + size_t *length, my_bool) { MRN_DBUG_ENTER_FUNCTION(); - MRN_LONG_TERM_SHARE *long_term_share = - reinterpret_cast(const_cast(record)); + auto long_term_share= static_cast(record); *length = long_term_share->table_name_length; - DBUG_RETURN(reinterpret_cast(long_term_share->table_name)); + DBUG_RETURN(reinterpret_cast(long_term_share->table_name)); } /* status */ @@ -699,13 +696,12 @@ static grn_logger mrn_logger = { NULL }; -static uchar *mrn_allocated_thds_get_key(const uchar *record, - size_t *length, - my_bool not_used __attribute__ ((unused))) +static const uchar *mrn_allocated_thds_get_key(const void *record, + size_t *length, my_bool) { MRN_DBUG_ENTER_FUNCTION(); *length = sizeof(THD *); - DBUG_RETURN(const_cast(record)); + DBUG_RETURN(static_cast(record)); } /* system functions */ diff --git a/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp b/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp index dd3165cdadf..51145440135 100644 --- a/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp +++ b/storage/mroonga/lib/mrn_multiple_column_key_codec.cpp @@ -693,7 +693,7 @@ namespace mrn { "data: <%.*s>", normalized_length, UINT_MAX16, - field->field_name, + field->field_name.str, blob_data_length, blob_data); memcpy(grn_key, normalized, blob_data_length); new_blob_data_length = blob_data_length; diff --git a/storage/myisam/ft_boolean_search.c b/storage/myisam/ft_boolean_search.c index 3d95fffacaf..00ed5ce3163 100644 --- a/storage/myisam/ft_boolean_search.c +++ b/storage/myisam/ft_boolean_search.c @@ -144,9 +144,12 @@ typedef struct st_ft_info enum { UNINITIALIZED, READY, INDEX_SEARCH, INDEX_DONE } state; } FTB; -static int FTB_WORD_cmp(my_off_t *v, FTB_WORD *a, FTB_WORD *b) +static int FTB_WORD_cmp(void *v_, const void *a_, const void *b_) { int i; + const my_off_t *v= v_; + const FTB_WORD *a= a_; + const FTB_WORD *b= b_; /* if a==curdoc, take it as a < b */ if (v && a->docid[0] == *v) @@ -159,11 +162,14 @@ static int FTB_WORD_cmp(my_off_t *v, FTB_WORD *a, FTB_WORD *b) return i; } -static int FTB_WORD_cmp_list(CHARSET_INFO *cs, FTB_WORD **a, FTB_WORD **b) +static int FTB_WORD_cmp_list(void *cs_, const void *a_, const void *b_) { + CHARSET_INFO *cs= cs_; + const FTB_WORD *const *a= a_; + const FTB_WORD *const *b= b_; /* ORDER BY word, ndepth */ - int i= ha_compare_word(cs, (uchar*) (*a)->word + 1, (*a)->len - 1, - (uchar*) (*b)->word + 1, (*b)->len - 1); + int i= ha_compare_word(cs, (*a)->word + 1, (*a)->len - 1, (*b)->word + 1, + (*b)->len - 1); if (!i) i= CMP_NUM((*a)->ndepth, (*b)->ndepth); return i; @@ -327,8 +333,8 @@ static int _ftb_parse_query(FTB *ftb, uchar *query, uint len, } -static int _ftb_no_dupes_cmp(void* not_used __attribute__((unused)), - const void *a,const void *b) +static int _ftb_no_dupes_cmp(void *not_used __attribute__((unused)), + const void *a, const void *b) { return CMP_NUM((*((my_off_t*)a)), (*((my_off_t*)b))); } @@ -607,14 +613,14 @@ FT_INFO * ft_init_boolean_search(MI_INFO *info, uint keynr, uchar *query, sizeof(void *)))) goto err; reinit_queue(&ftb->queue, ftb->queue.max_elements, 0, 0, - (int (*)(void*, uchar*, uchar*))FTB_WORD_cmp, 0, 0, 0); + FTB_WORD_cmp, 0, 0, 0); for (ftbw= ftb->last_word; ftbw; ftbw= ftbw->prev) queue_insert(&ftb->queue, (uchar *)ftbw); ftb->list=(FTB_WORD **)alloc_root(&ftb->mem_root, sizeof(FTB_WORD *)*ftb->queue.elements); memcpy(ftb->list, &queue_top(&ftb->queue), sizeof(FTB_WORD *)*ftb->queue.elements); my_qsort2(ftb->list, ftb->queue.elements, sizeof(FTB_WORD *), - (qsort2_cmp)FTB_WORD_cmp_list, (void*)ftb->charset); + FTB_WORD_cmp_list, (void*)ftb->charset); if (ftb->queue.elements<2) ftb->with_scan &= ~FTB_FLAG_TRUNC; ftb->state=READY; return ftb; diff --git a/storage/myisam/ft_nlq_search.c b/storage/myisam/ft_nlq_search.c index 90a509057cf..dd89e36f41a 100644 --- a/storage/myisam/ft_nlq_search.c +++ b/storage/myisam/ft_nlq_search.c @@ -51,9 +51,10 @@ typedef struct st_ft_superdoc double tmp_weight; } FT_SUPERDOC; -static int FT_SUPERDOC_cmp(void* cmp_arg __attribute__((unused)), - FT_SUPERDOC *p1, FT_SUPERDOC *p2) +static int FT_SUPERDOC_cmp(void *cmp_arg __attribute__((unused)), + const void *p1_, const void *p2_) { + const FT_SUPERDOC *p1= p1_, *p2= p2_; if (p1->doc.dpos < p2->doc.dpos) return -1; if (p1->doc.dpos == p2->doc.dpos) @@ -61,8 +62,10 @@ static int FT_SUPERDOC_cmp(void* cmp_arg __attribute__((unused)), return 1; } -static int walk_and_match(FT_WORD *word, uint32 count, ALL_IN_ONE *aio) +static int walk_and_match(void *word_, element_count count, void *aio_) { + FT_WORD *word= word_; + ALL_IN_ONE *aio= aio_; FT_WEIGTH subkeys; int r; uint keylen, doc_cnt; @@ -184,9 +187,11 @@ do_skip: } -static int walk_and_copy(FT_SUPERDOC *from, - uint32 count __attribute__((unused)), FT_DOC **to) +static int walk_and_copy(void *from_, uint32 count __attribute__((unused)), + void *to_) { + FT_SUPERDOC *from= from_; + FT_DOC **to= to_; DBUG_ENTER("walk_and_copy"); from->doc.weight+=from->tmp_weight*from->word_ptr->weight; (*to)->dpos=from->doc.dpos; @@ -195,9 +200,12 @@ static int walk_and_copy(FT_SUPERDOC *from, DBUG_RETURN(0); } -static int walk_and_push(FT_SUPERDOC *from, - uint32 count __attribute__((unused)), QUEUE *best) +static int walk_and_push(void *from_, + element_count count __attribute__((unused)), + void *best_) { + FT_SUPERDOC *from= from_; + QUEUE *best= best_; DBUG_ENTER("walk_and_copy"); from->doc.weight+=from->tmp_weight*from->word_ptr->weight; set_if_smaller(best->elements, ft_query_expansion_limit-1); @@ -206,9 +214,10 @@ static int walk_and_push(FT_SUPERDOC *from, } -static int FT_DOC_cmp(void *unused __attribute__((unused)), - FT_DOC *a, FT_DOC *b) +static int FT_DOC_cmp(void *unused __attribute__((unused)), const void *a_, + const void *b_) { + const FT_DOC *a= a_, *b= b_; return CMP_NUM(b->weight, a->weight); } @@ -242,7 +251,7 @@ FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, uchar *query, bzero(&wtree,sizeof(wtree)); - init_tree(&aio.dtree,0,0,sizeof(FT_SUPERDOC),(qsort_cmp2)&FT_SUPERDOC_cmp, + init_tree(&aio.dtree,0,0,sizeof(FT_SUPERDOC),&FT_SUPERDOC_cmp, NULL, NULL, MYF(0)); ft_parse_init(&wtree, aio.charset); @@ -251,16 +260,15 @@ FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, uchar *query, &wtree.mem_root)) goto err; - if (tree_walk(&wtree, (tree_walk_action)&walk_and_match, &aio, + if (tree_walk(&wtree, &walk_and_match, &aio, left_root_right)) goto err; if (flags & FT_EXPAND && ft_query_expansion_limit) { QUEUE best; - init_queue(&best,ft_query_expansion_limit,0,0, (queue_compare) &FT_DOC_cmp, - 0, 0, 0); - tree_walk(&aio.dtree, (tree_walk_action) &walk_and_push, + init_queue(&best, ft_query_expansion_limit, 0, 0, &FT_DOC_cmp, 0, 0, 0); + tree_walk(&aio.dtree, &walk_and_push, &best, left_root_right); while (best.elements) { @@ -279,7 +287,7 @@ FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, uchar *query, } delete_queue(&best); reset_tree(&aio.dtree); - if (tree_walk(&wtree, (tree_walk_action)&walk_and_match, &aio, + if (tree_walk(&wtree, &walk_and_match, &aio, left_root_right)) goto err; @@ -302,11 +310,11 @@ FT_INFO *ft_init_nlq_search(MI_INFO *info, uint keynr, uchar *query, dlist->info=aio.info; dptr=dlist->doc; - tree_walk(&aio.dtree, (tree_walk_action) &walk_and_copy, + tree_walk(&aio.dtree, &walk_and_copy, &dptr, left_root_right); if (flags & FT_SORTED) - my_qsort2(dlist->doc, dlist->ndocs, sizeof(FT_DOC), (qsort2_cmp)&FT_DOC_cmp, + my_qsort2(dlist->doc, dlist->ndocs, sizeof(FT_DOC), &FT_DOC_cmp, 0); err: diff --git a/storage/myisam/ft_parser.c b/storage/myisam/ft_parser.c index ec392b6ecd8..25ef65d33af 100644 --- a/storage/myisam/ft_parser.c +++ b/storage/myisam/ft_parser.c @@ -30,14 +30,17 @@ typedef struct st_my_ft_parser_param MEM_ROOT *mem_root; } MY_FT_PARSER_PARAM; -static int FT_WORD_cmp(CHARSET_INFO* cs, FT_WORD *w1, FT_WORD *w2) +static int FT_WORD_cmp(void *cs_, const void *w1_, const void *w2_) { - return ha_compare_word(cs, (uchar*) w1->pos, w1->len, - (uchar*) w2->pos, w2->len); + CHARSET_INFO *cs= cs_; + const FT_WORD *w1= w1_, *w2= w2_; + return ha_compare_word(cs, w1->pos, w1->len, w2->pos, w2->len); } -static int walk_and_copy(FT_WORD *word,uint32 count,FT_DOCSTAT *docstat) +static int walk_and_copy(void *word_, element_count count, void *docstat_) { + FT_WORD *word= word_; + FT_DOCSTAT *docstat= docstat_; word->weight=LWS_IN_USE; docstat->sum+=word->weight; memcpy((docstat->list)++, word, sizeof(FT_WORD)); @@ -58,7 +61,7 @@ FT_WORD * ft_linearize(TREE *wtree, MEM_ROOT *mem_root) docstat.list=wlist; docstat.uniq=wtree->elements_in_tree; docstat.sum=0; - tree_walk(wtree,(tree_walk_action)&walk_and_copy,&docstat,left_root_right); + tree_walk(wtree,&walk_and_copy,&docstat,left_root_right); } delete_tree(wtree, 0); if (!wlist) @@ -257,8 +260,8 @@ void ft_parse_init(TREE *wtree, CHARSET_INFO *cs) { DBUG_ENTER("ft_parse_init"); if (!is_tree_inited(wtree)) - init_tree(wtree, 0, 0, sizeof(FT_WORD), (qsort_cmp2)&FT_WORD_cmp, 0, - (void*)cs, MYF(0)); + init_tree(wtree, 0, 0, sizeof(FT_WORD), &FT_WORD_cmp, 0, (void *) cs, + MYF(0)); DBUG_VOID_RETURN; } diff --git a/storage/myisam/ft_stopwords.c b/storage/myisam/ft_stopwords.c index b666c1f3a79..4ada6cecc58 100644 --- a/storage/myisam/ft_stopwords.c +++ b/storage/myisam/ft_stopwords.c @@ -30,17 +30,19 @@ typedef struct st_ft_stopwords static TREE *stopwords3=NULL; -static int FT_STOPWORD_cmp(void* cmp_arg __attribute__((unused)), - FT_STOPWORD *w1, FT_STOPWORD *w2) +static int FT_STOPWORD_cmp(void *cmp_arg __attribute__((unused)), + const void *w1_, const void *w2_) { + const FT_STOPWORD *w1= w1_, *w2= w2_; return ha_compare_word(ft_stopword_cs, (uchar *) w1->pos, w1->len, (uchar *) w2->pos, w2->len); } -static int FT_STOPWORD_free(FT_STOPWORD *w, TREE_FREE action, +static int FT_STOPWORD_free(void *w_, TREE_FREE action, void *arg __attribute__((unused))) { + FT_STOPWORD *w= w_; if (action == free_free) my_free((void*)w->pos); return 0; @@ -62,7 +64,7 @@ int ft_init_stopwords() if (!(stopwords3=(TREE *)my_malloc(mi_key_memory_ft_stopwords, sizeof(TREE), MYF(0)))) DBUG_RETURN(-1); - init_tree(stopwords3,0,0,sizeof(FT_STOPWORD),(qsort_cmp2)&FT_STOPWORD_cmp, + init_tree(stopwords3,0,0,sizeof(FT_STOPWORD),&FT_STOPWORD_cmp, (ft_stopword_file ? (tree_element_free)&FT_STOPWORD_free : 0), NULL, MYF(0)); /* diff --git a/storage/myisam/ha_myisam.cc b/storage/myisam/ha_myisam.cc index 8460534e3e0..364404bb939 100644 --- a/storage/myisam/ha_myisam.cc +++ b/storage/myisam/ha_myisam.cc @@ -1500,7 +1500,7 @@ int ha_myisam::assign_to_keycache(THD* thd, HA_CHECK_OPT *check_opt) param->db_name= table->s->db.str; param->table_name= table->s->table_name.str; param->testflag= 0; - mi_check_print_error(param, errmsg); + mi_check_print_error(param, "%s", errmsg); } DBUG_RETURN(error); } @@ -1566,7 +1566,7 @@ int ha_myisam::preload_keys(THD* thd, HA_CHECK_OPT *check_opt) param->db_name= table->s->db.str; param->table_name= table->s->table_name.str; param->testflag= 0; - mi_check_print_error(param, errmsg); + mi_check_print_error(param, "%s", errmsg); DBUG_RETURN(error); } } diff --git a/storage/myisam/mi_check.c b/storage/myisam/mi_check.c index 33fab259a9c..2bf400d08f3 100644 --- a/storage/myisam/mi_check.c +++ b/storage/myisam/mi_check.c @@ -61,7 +61,7 @@ static int sort_one_index(HA_CHECK *, MI_INFO *, MI_KEYDEF *, my_off_t, File); static int sort_key_read(MI_SORT_PARAM *sort_param,void *key); static int sort_ft_key_read(MI_SORT_PARAM *sort_param,void *key); static int sort_get_next_record(MI_SORT_PARAM *sort_param); -static int sort_key_cmp(MI_SORT_PARAM *sort_param, const void *a,const void *b); +static int sort_key_cmp(void *sort_param, const void *a, const void *b); static int sort_ft_key_write(MI_SORT_PARAM *sort_param, const void *a); static int sort_key_write(MI_SORT_PARAM *sort_param, const void *a); static my_off_t get_record_for_key(MI_INFO *, MI_KEYDEF *, uchar *); @@ -874,7 +874,7 @@ static int chk_index(HA_CHECK *param, MI_INFO *info, MI_KEYDEF *keyinfo, if (keypos != endpos) { mi_check_print_error(param,"Keyblock size at page %s is not correct. Block length: %d key length: %d", - llstr(page,llbuff), used_length, (keypos - buff)); + llstr(page,llbuff), used_length, (int) (keypos - buff)); goto err; } my_afree((uchar*) temp_buff); @@ -1167,7 +1167,7 @@ int chk_data_link(HA_CHECK *param, MI_INFO *info, my_bool extend) block_info.rec_len > (uint) info->s->max_pack_length) { mi_check_print_error(param, - "Found block with wrong recordlength: %d at %s", + "Found block with wrong recordlength: %lu at %s", block_info.rec_len, llstr(start_recpos,llbuff)); got_error=1; break; @@ -3389,7 +3389,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) { if (!searching) mi_check_print_info(param, - "Deleted block with impossible length %u at %s", + "Deleted block with impossible length %lu at %s", block_info.block_len,llstr(pos,llbuff)); error=1; } @@ -3625,7 +3625,7 @@ static int sort_get_next_record(MI_SORT_PARAM *sort_param) block_info.rec_len > (uint) share->max_pack_length) { if (! searching) - mi_check_print_info(param,"Found block with wrong recordlength: %d at %s\n", + mi_check_print_info(param,"Found block with wrong recordlength: %ld at %s\n", block_info.rec_len, llstr(sort_param->pos,llbuff)); continue; @@ -3805,12 +3805,14 @@ int sort_write_record(MI_SORT_PARAM *sort_param) /* Compare two keys from _create_index_by_sort */ -static int sort_key_cmp(MI_SORT_PARAM *sort_param, const void *a, - const void *b) +static int sort_key_cmp(void *sort_param_, const void *a_, const void *b_) { + const MI_SORT_PARAM *sort_param= sort_param_; uint not_used[2]; - return (ha_key_cmp(sort_param->seg, *((uchar**) a), *((uchar**) b), - USE_WHOLE_KEY, SEARCH_SAME, not_used)); + const void *const *a= a_; + const void *const *b= b_; + return (ha_key_cmp(sort_param->seg, *a, *b, + USE_WHOLE_KEY, SEARCH_SAME, not_used)); } /* sort_key_cmp */ @@ -4133,7 +4135,7 @@ static int sort_delete_record(MI_SORT_PARAM *sort_param) if (info->s->options & HA_OPTION_COMPRESS_RECORD) { mi_check_print_error(param, - "Recover aborted; Can't run standard recovery on compressed tables with errors in data-file. Use switch 'myisamchk --safe-recover' to fix it\n",stderr);; + "Recover aborted; Can't run standard recovery on compressed tables with errors in data-file. Use switch 'myisamchk --safe-recover' to fix it\n"); DBUG_RETURN(1); } diff --git a/storage/myisam/mi_locking.c b/storage/myisam/mi_locking.c index cee1c326b3e..ec1d7bf4782 100644 --- a/storage/myisam/mi_locking.c +++ b/storage/myisam/mi_locking.c @@ -452,8 +452,10 @@ my_bool mi_check_status(void *param) structure. */ -void mi_fix_status(MI_INFO *org_table, MI_INFO *new_table) +void mi_fix_status(void *ord_table_, void *new_table_) { + MI_INFO *org_table= ord_table_; + MI_INFO *new_table= new_table_; DBUG_ENTER("mi_fix_status"); if (!new_table) { diff --git a/storage/myisam/mi_write.c b/storage/myisam/mi_write.c index e8a985a5fd4..f7e5ee8d345 100644 --- a/storage/myisam/mi_write.c +++ b/storage/myisam/mi_write.c @@ -923,8 +923,11 @@ int _mi_ck_write_tree(register MI_INFO *info, uint keynr, uchar *key, /* typeof(_mi_keys_compare)=qsort_cmp2 */ -static int keys_compare(bulk_insert_param *param, uchar *key1, uchar *key2) +static int keys_compare(void *param_, const void *key1_, const void *key2_) { + const bulk_insert_param *param= param_; + const uchar *key1= key1_; + const uchar *key2= key2_; uint not_used[2]; return ha_key_cmp(param->info->s->keyinfo[param->keynr].seg, key1, key2, USE_WHOLE_KEY, SEARCH_SAME, @@ -1020,7 +1023,7 @@ int mi_init_bulk_insert(MI_INFO *info, size_t cache_size, ha_rows rows) init_tree(&info->bulk_insert[i], cache_size * key[i].maxlength, cache_size * key[i].maxlength, 0, - (qsort_cmp2)keys_compare, keys_free, (void *)params++, MYF(0)); + keys_compare, keys_free, params++, MYF(0)); } else info->bulk_insert[i].root=0; diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c index 17d2eef898a..da570f516c2 100644 --- a/storage/myisam/myisamchk.c +++ b/storage/myisam/myisamchk.c @@ -1572,7 +1572,7 @@ static int mi_sort_records(HA_CHECK *param, share->state.key_root[sort_key], MYF(MY_NABP+MY_WME))) { - mi_check_print_error(param,"Can't read indexpage from filepos: %s", + mi_check_print_error(param,"Can't read indexpage from filepos: %lu", (ulong) share->state.key_root[sort_key]); goto err; } diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h index 852df82c7e8..1fb420aaf98 100644 --- a/storage/myisam/myisamdef.h +++ b/storage/myisam/myisamdef.h @@ -731,7 +731,7 @@ void mi_update_status(void *param); void mi_restore_status(void *param); void mi_copy_status(void *to, void *from); my_bool mi_check_status(void *param); -void mi_fix_status(MI_INFO *org_table, MI_INFO *new_table); +void mi_fix_status(void *org_table, void *new_table); extern MI_INFO *test_if_reopen(char *filename); my_bool check_table_is_closed(const char *name, const char *where); int mi_open_datafile(MI_INFO *info, MYISAM_SHARE *share); diff --git a/storage/myisam/myisamlog.c b/storage/myisam/myisamlog.c index 4e51af4ac3d..cb0096a878a 100644 --- a/storage/myisam/myisamlog.c +++ b/storage/myisam/myisamlog.c @@ -59,12 +59,11 @@ extern int main(int argc,char * *argv); static void get_options(int *argc,char ***argv); static int examine_log(char * file_name,char **table_names); static int read_string(IO_CACHE *file,uchar* *to,uint length); -static int file_info_compare(void *cmp_arg, void *a,void *b); -static int test_if_open(struct file_info *key,element_count count, - struct test_if_open_param *param); +static int file_info_compare(void *cmp_arg, const void *a, const void *b); +static int test_if_open(void *key, element_count count, void *param); static void fix_blob_pointers(MI_INFO *isam,uchar *record); -static int test_when_accessed(struct file_info *key,element_count count, - struct st_access_param *access_param); +static int test_when_accessed(void *key, element_count count, + void *access_param); static int file_info_free(void*, TREE_FREE, void *); static int close_some_file(TREE *tree); static int reopen_closed_file(TREE *tree,struct file_info *file_info); @@ -330,7 +329,7 @@ static int examine_log(char * file_name, char **table_names) init_io_cache(&cache,file,0,READ_CACHE,start_offset,0,MYF(0)); bzero((uchar*) com_count,sizeof(com_count)); - init_tree(&tree,0,0,sizeof(file_info),(qsort_cmp2) file_info_compare, + init_tree(&tree,0,0,sizeof(file_info), file_info_compare, file_info_free, NULL, MYF(MY_TREE_WITH_DELETE)); (void) init_key_cache(dflt_key_cache,KEY_CACHE_BLOCK_SIZE,KEY_CACHE_SIZE, 0, 0, 0, 0); @@ -412,7 +411,7 @@ static int examine_log(char * file_name, char **table_names) } open_param.name=file_info.name; open_param.max_id=0; - (void) tree_walk(&tree,(tree_walk_action) test_if_open,(void*) &open_param, + (void) tree_walk(&tree, test_if_open,(void*) &open_param, left_root_right); file_info.id=open_param.max_id+1; /* @@ -697,8 +696,8 @@ static int read_string(IO_CACHE *file, register uchar* *to, register uint length } /* read_string */ -static int file_info_compare(void* cmp_arg __attribute__((unused)), - void *a, void *b) +static int file_info_compare(void *cmp_arg __attribute__((unused)), + const void *a, const void *b) { long lint; @@ -710,10 +709,12 @@ static int file_info_compare(void* cmp_arg __attribute__((unused)), /* ARGSUSED */ -static int test_if_open (struct file_info *key, +static int test_if_open (void *key_, element_count count __attribute__((unused)), - struct test_if_open_param *param) + void *param_) { + struct file_info *key= key_; + struct test_if_open_param *param= param_; if (!strcmp(key->name,param->name) && key->id > param->max_id) param->max_id=key->id; return 0; @@ -738,10 +739,12 @@ static void fix_blob_pointers(MI_INFO *info, uchar *record) /* close the file with hasn't been accessed for the longest time */ /* ARGSUSED */ -static int test_when_accessed (struct file_info *key, +static int test_when_accessed (void *key_, element_count count __attribute__((unused)), - struct st_access_param *access_param) + void *access_param_) { + struct file_info *key= key_; + struct st_access_param *access_param= access_param_; if (key->accessed < access_param->min_accessed && ! key->closed) { access_param->min_accessed=key->accessed; @@ -777,7 +780,7 @@ static int close_some_file(TREE *tree) access_param.min_accessed=LONG_MAX; access_param.found=0; - (void) tree_walk(tree,(tree_walk_action) test_when_accessed, + (void) tree_walk(tree, test_when_accessed, (void*) &access_param,left_root_right); if (!access_param.found) return 1; /* No open file that is possibly to close */ diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c index 709530d915c..077507e897c 100644 --- a/storage/myisam/myisampack.c +++ b/storage/myisam/myisampack.c @@ -133,8 +133,8 @@ static void free_counts_and_tree_and_queue(HUFF_TREE *huff_trees, uint trees, HUFF_COUNTS *huff_counts, uint fields); -static int compare_tree(void* cmp_arg __attribute__((unused)), - const uchar *s,const uchar *t); +static int compare_tree(void *cmp_arg __attribute__((unused)), + const void *s, const void *t); static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts); static void check_counts(HUFF_COUNTS *huff_counts,uint trees, my_off_t records); @@ -144,9 +144,9 @@ static int test_space_compress(HUFF_COUNTS *huff_counts,my_off_t records, enum en_fieldtype field_type); static HUFF_TREE* make_huff_trees(HUFF_COUNTS *huff_counts,uint trees); static int make_huff_tree(HUFF_TREE *tree,HUFF_COUNTS *huff_counts); -static int compare_huff_elements(void *not_used, uchar *a,uchar *b); -static int save_counts_in_queue(uchar *key,element_count count, - HUFF_TREE *tree); +static int compare_huff_elements(void *not_used, const void *a, const void *b); +static int save_counts_in_queue(void *key, element_count count, + void *tree); static my_off_t calc_packed_length(HUFF_COUNTS *huff_counts,uint flag); static uint join_same_trees(HUFF_COUNTS *huff_counts,uint trees); static int make_huff_decode_table(HUFF_TREE *huff_tree,uint trees); @@ -178,7 +178,7 @@ static int mrg_rrnd(PACK_MRG_INFO *info,uchar *buf); static void mrg_reset(PACK_MRG_INFO *mrg); #if !defined(DBUG_OFF) static void fakebigcodes(HUFF_COUNTS *huff_counts, HUFF_COUNTS *end_count); -static int fakecmp(my_off_t **count1, my_off_t **count2); +static int fakecmp(const void *count1, const void *count2); #endif @@ -817,8 +817,8 @@ static HUFF_COUNTS *init_huff_count(MI_INFO *info,my_off_t records) 'tree_pos'. It's keys are implemented by pointers into 'tree_buff'. This is accomplished by '-1' as the element size. */ - init_tree(&count[i].int_tree,0,0,-1,(qsort_cmp2) compare_tree, NULL, - NULL, MYF(0)); + init_tree(&count[i].int_tree, 0, 0, -1, compare_tree, NULL, NULL, + MYF(0)); if (records && type != FIELD_BLOB && type != FIELD_VARCHAR) count[i].tree_pos=count[i].tree_buff = my_malloc(PSI_NOT_INSTRUMENTED, count[i].field_length > 1 ? tree_buff_length : 2, @@ -1177,10 +1177,11 @@ static int get_statistic(PACK_MRG_INFO *mrg,HUFF_COUNTS *huff_counts) } static int compare_huff_elements(void *not_used __attribute__((unused)), - uchar *a, uchar *b) + const void *a_, const void *b_) { - return *((my_off_t*) a) < *((my_off_t*) b) ? -1 : - (*((my_off_t*) a) == *((my_off_t*) b) ? 0 : 1); + const my_off_t *a= a_; + const my_off_t *b= b_; + return *a < *b ? -1 : (*a == *b ? 0 : 1); } /* Check each tree if we should use pre-space-compress, end-space- @@ -1689,9 +1690,11 @@ static int make_huff_tree(HUFF_TREE *huff_tree, HUFF_COUNTS *huff_counts) return 0; } -static int compare_tree(void* cmp_arg __attribute__((unused)), - register const uchar *s, register const uchar *t) +static int compare_tree(void *cmp_arg __attribute__((unused)), const void *s_, + const void *t_) { + const uchar *s= s_; + const uchar *t= t_; uint length; for (length=global_count->field_length; length-- ;) if (*s++ != *t++) @@ -1720,9 +1723,10 @@ static int compare_tree(void* cmp_arg __attribute__((unused)), 0 */ -static int save_counts_in_queue(uchar *key, element_count count, - HUFF_TREE *tree) +static int save_counts_in_queue(void *key_, element_count count, void *tree_) { + uchar *key= key_; + HUFF_TREE *tree= tree_; HUFF_ELEMENT *new_huff_el; new_huff_el=tree->element_buffer+(tree->elements++); @@ -3222,8 +3226,10 @@ static void fakebigcodes(HUFF_COUNTS *huff_counts, HUFF_COUNTS *end_count) -1 count1 > count2 */ -static int fakecmp(my_off_t **count1, my_off_t **count2) +static int fakecmp(const void *count1_, const void *count2_) { + const my_off_t *const *count1= count1_; + const my_off_t *const *count2= count2_; return ((**count1 < **count2) ? 1 : (**count1 > **count2) ? -1 : 0); } diff --git a/storage/myisam/sort.c b/storage/myisam/sort.c index 0fb475c4d72..60de1f972cf 100644 --- a/storage/myisam/sort.c +++ b/storage/myisam/sort.c @@ -731,8 +731,8 @@ static int write_keys(MI_SORT_PARAM *info, register uchar **sort_keys, if (!buffpek) DBUG_RETURN(1); /* Out of memory */ - my_qsort2((uchar*) sort_keys,(size_t) count, sizeof(uchar*), - (qsort2_cmp) info->key_cmp, info); + my_qsort2(sort_keys, count, sizeof(uchar *), + info->key_cmp, info); if (!my_b_inited(tempfile) && open_cached_file(tempfile, my_tmpdir(info->tmpdir), "ST", DISK_BUFFER_SIZE, info->sort_info->param->myf_rw)) @@ -777,8 +777,8 @@ static int write_keys_varlen(MI_SORT_PARAM *info, if (!buffpek) DBUG_RETURN(1); /* Out of memory */ - my_qsort2((uchar*) sort_keys, (size_t) count, sizeof(uchar*), - (qsort2_cmp) info->key_cmp, info); + my_qsort2(sort_keys, count, sizeof(uchar *), + info->key_cmp, info); if (!my_b_inited(tempfile) && open_cached_file(tempfile, my_tmpdir(info->tmpdir), "ST", DISK_BUFFER_SIZE, info->sort_info->param->myf_rw)) @@ -819,8 +819,8 @@ static int write_index(MI_SORT_PARAM *info, register uchar **sort_keys, { DBUG_ENTER("write_index"); - my_qsort2((uchar*) sort_keys,(size_t) count,sizeof(uchar*), - (qsort2_cmp) info->key_cmp,info); + my_qsort2(sort_keys, count, sizeof(uchar *), + info->key_cmp, info); while (count--) { if ((*info->key_write)(info,*sort_keys++)) @@ -1005,7 +1005,7 @@ merge_buffers(MI_SORT_PARAM *info, ha_keys keys, IO_CACHE *from_file, sort_length=info->key_length; if (init_queue(&queue,(uint) (Tb-Fb)+1,offsetof(BUFFPEK,key),0, - (int (*)(void*, uchar *,uchar*)) info->key_cmp, + info->key_cmp, (void*) info, 0, 0)) DBUG_RETURN(1); /* purecov: inspected */ diff --git a/storage/myisammrg/myrg_queue.c b/storage/myisammrg/myrg_queue.c index 08d02bd5b12..e1cbeaec6f4 100644 --- a/storage/myisammrg/myrg_queue.c +++ b/storage/myisammrg/myrg_queue.c @@ -15,10 +15,10 @@ #include "myrg_def.h" -static int queue_key_cmp(void *keyseg, uchar *a, uchar *b) +static int queue_key_cmp(void *keyseg, const void *a, const void *b) { - MYRG_TABLE *ma= (MYRG_TABLE *)a; - MYRG_TABLE *mb= (MYRG_TABLE *)b; + const MYRG_TABLE *ma= a; + const MYRG_TABLE *mb= b; MI_INFO *aa= ma->table; MI_INFO *bb= mb->table; uint not_used[2]; diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc index db8337c60f1..bd3e27a5515 100644 --- a/storage/oqgraph/ha_oqgraph.cc +++ b/storage/oqgraph/ha_oqgraph.cc @@ -319,7 +319,7 @@ int ha_oqgraph::oqgraph_check_table_structure (TABLE *table_arg) { DBUG_PRINT( "oq-debug", ("Allowing integer no more!")); badColumn = true; - push_warning_printf( current_thd, Sql_condition::WARN_LEVEL_WARN, HA_WRONG_CREATE_OPTION, "Integer latch is not supported for new tables.", i); + push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, HA_WRONG_CREATE_OPTION, "Integer latch is not supported for new tables."); } else /* Check Column Type */ if ((*field)->type() != skel[i].coltype) { diff --git a/storage/perfschema/pfs_account.cc b/storage/perfschema/pfs_account.cc index 84257d9dc38..2a877836649 100644 --- a/storage/perfschema/pfs_account.cc +++ b/storage/perfschema/pfs_account.cc @@ -67,19 +67,19 @@ void cleanup_account(void) } C_MODE_START -static uchar *account_hash_get_key(const uchar *entry, size_t *length, - my_bool) +static const uchar *account_hash_get_key(const void *entry, size_t *length, + my_bool) { const PFS_account * const *typed_entry; const PFS_account *account; const void *result; - typed_entry= reinterpret_cast (entry); + typed_entry= static_cast (entry); assert(typed_entry != NULL); account= *typed_entry; assert(account != NULL); *length= account->m_key.m_key_length; result= account->m_key.m_hash_key; - return const_cast (reinterpret_cast (result)); + return reinterpret_cast(result); } C_MODE_END diff --git a/storage/perfschema/pfs_digest.cc b/storage/perfschema/pfs_digest.cc index 47fa82dcb9a..296680c356f 100644 --- a/storage/perfschema/pfs_digest.cc +++ b/storage/perfschema/pfs_digest.cc @@ -146,19 +146,19 @@ void cleanup_digest(void) } C_MODE_START -static uchar *digest_hash_get_key(const uchar *entry, size_t *length, - my_bool) +static const uchar *digest_hash_get_key(const void *entry, size_t *length, + my_bool) { const PFS_statements_digest_stat * const *typed_entry; const PFS_statements_digest_stat *digest; const void *result; - typed_entry= reinterpret_cast(entry); + typed_entry= static_cast(entry); assert(typed_entry != NULL); digest= *typed_entry; assert(digest != NULL); *length= sizeof (PFS_digest_key); result= & digest->m_digest_key; - return const_cast (reinterpret_cast (result)); + return reinterpret_cast(result); } C_MODE_END diff --git a/storage/perfschema/pfs_host.cc b/storage/perfschema/pfs_host.cc index 42c0620a39f..bf8421909ba 100644 --- a/storage/perfschema/pfs_host.cc +++ b/storage/perfschema/pfs_host.cc @@ -65,19 +65,19 @@ void cleanup_host(void) } C_MODE_START -static uchar *host_hash_get_key(const uchar *entry, size_t *length, - my_bool) +static const uchar *host_hash_get_key(const void *entry, size_t *length, + my_bool) { const PFS_host * const *typed_entry; const PFS_host *host; const void *result; - typed_entry= reinterpret_cast (entry); + typed_entry= static_cast (entry); assert(typed_entry != NULL); host= *typed_entry; assert(host != NULL); *length= host->m_key.m_key_length; result= host->m_key.m_hash_key; - return const_cast (reinterpret_cast (result)); + return reinterpret_cast(result); } C_MODE_END diff --git a/storage/perfschema/pfs_instr.cc b/storage/perfschema/pfs_instr.cc index d34afcb1393..dad6125fa83 100644 --- a/storage/perfschema/pfs_instr.cc +++ b/storage/perfschema/pfs_instr.cc @@ -253,19 +253,19 @@ void cleanup_instruments(void) C_MODE_START /** Get hash table key for instrumented files. */ -static uchar *filename_hash_get_key(const uchar *entry, size_t *length, - my_bool) +static const uchar *filename_hash_get_key(const void *entry, size_t *length, + my_bool) { const PFS_file * const *typed_entry; const PFS_file *file; const void *result; - typed_entry= reinterpret_cast (entry); + typed_entry= static_cast (entry); assert(typed_entry != NULL); file= *typed_entry; assert(file != NULL); *length= file->m_filename_length; result= file->m_filename; - return const_cast (reinterpret_cast (result)); + return reinterpret_cast(result); } C_MODE_END diff --git a/storage/perfschema/pfs_instr_class.cc b/storage/perfschema/pfs_instr_class.cc index 49aa73f078a..86cc6365c31 100644 --- a/storage/perfschema/pfs_instr_class.cc +++ b/storage/perfschema/pfs_instr_class.cc @@ -403,19 +403,19 @@ void cleanup_table_share(void) C_MODE_START /** get_key function for @c table_share_hash. */ -static uchar *table_share_hash_get_key(const uchar *entry, size_t *length, - my_bool) +static const uchar *table_share_hash_get_key(const void *entry, size_t *length, + my_bool) { const PFS_table_share * const *typed_entry; const PFS_table_share *share; const void *result; - typed_entry= reinterpret_cast (entry); + typed_entry= static_cast (entry); assert(typed_entry != NULL); share= *typed_entry; assert(share != NULL); *length= share->m_key.m_key_length; result= &share->m_key.m_hash_key[0]; - return const_cast (reinterpret_cast (result)); + return reinterpret_cast(result); } C_MODE_END diff --git a/storage/perfschema/pfs_program.cc b/storage/perfschema/pfs_program.cc index de456610519..20b0aaa8489 100644 --- a/storage/perfschema/pfs_program.cc +++ b/storage/perfschema/pfs_program.cc @@ -63,19 +63,19 @@ void cleanup_program(void) } C_MODE_START -static uchar *program_hash_get_key(const uchar *entry, size_t *length, - my_bool) +static const uchar *program_hash_get_key(const void *entry, size_t *length, + my_bool) { const PFS_program * const *typed_entry; const PFS_program *program; const void *result; - typed_entry= reinterpret_cast (entry); + typed_entry= static_cast (entry); assert(typed_entry != NULL); program= *typed_entry; assert(program != NULL); *length= program->m_key.m_key_length; result= program->m_key.m_hash_key; - return const_cast (reinterpret_cast (result)); + return reinterpret_cast(result); } C_MODE_END diff --git a/storage/perfschema/pfs_setup_actor.cc b/storage/perfschema/pfs_setup_actor.cc index c2b53bf2d1e..638b2e5648d 100644 --- a/storage/perfschema/pfs_setup_actor.cc +++ b/storage/perfschema/pfs_setup_actor.cc @@ -63,19 +63,19 @@ void cleanup_setup_actor(void) } C_MODE_START -static uchar *setup_actor_hash_get_key(const uchar *entry, size_t *length, - my_bool) +static const uchar *setup_actor_hash_get_key(const void *entry, size_t *length, + my_bool) { const PFS_setup_actor * const *typed_entry; const PFS_setup_actor *setup_actor; const void *result; - typed_entry= reinterpret_cast (entry); + typed_entry= static_cast (entry); assert(typed_entry != NULL); setup_actor= *typed_entry; assert(setup_actor != NULL); *length= setup_actor->m_key.m_key_length; result= setup_actor->m_key.m_hash_key; - return const_cast (reinterpret_cast (result)); + return reinterpret_cast(result); } C_MODE_END diff --git a/storage/perfschema/pfs_setup_object.cc b/storage/perfschema/pfs_setup_object.cc index b18bc2fd5ba..a961c51d0e9 100644 --- a/storage/perfschema/pfs_setup_object.cc +++ b/storage/perfschema/pfs_setup_object.cc @@ -63,19 +63,19 @@ void cleanup_setup_object(void) } C_MODE_START -static uchar *setup_object_hash_get_key(const uchar *entry, size_t *length, - my_bool) +static const uchar *setup_object_hash_get_key(const void *entry, + size_t *length, my_bool) { const PFS_setup_object * const *typed_entry; const PFS_setup_object *setup_object; const void *result; - typed_entry= reinterpret_cast (entry); + typed_entry= static_cast (entry); assert(typed_entry != NULL); setup_object= *typed_entry; assert(setup_object != NULL); *length= setup_object->m_key.m_key_length; result= setup_object->m_key.m_hash_key; - return const_cast (reinterpret_cast (result)); + return reinterpret_cast(result); } C_MODE_END diff --git a/storage/perfschema/pfs_user.cc b/storage/perfschema/pfs_user.cc index fa39b330f75..d83ee52a77d 100644 --- a/storage/perfschema/pfs_user.cc +++ b/storage/perfschema/pfs_user.cc @@ -64,19 +64,19 @@ void cleanup_user(void) } C_MODE_START -static uchar *user_hash_get_key(const uchar *entry, size_t *length, - my_bool) +static const uchar *user_hash_get_key(const void *entry, size_t *length, + my_bool) { const PFS_user * const *typed_entry; const PFS_user *user; const void *result; - typed_entry= reinterpret_cast (entry); + typed_entry= static_cast (entry); assert(typed_entry != NULL); user= *typed_entry; assert(user != NULL); *length= user->m_key.m_key_length; result= user->m_key.m_hash_key; - return const_cast (reinterpret_cast (result)); + return reinterpret_cast(result); } C_MODE_END diff --git a/storage/rocksdb/CMakeLists.txt b/storage/rocksdb/CMakeLists.txt index 4a190755728..ce68d31c28c 100644 --- a/storage/rocksdb/CMakeLists.txt +++ b/storage/rocksdb/CMakeLists.txt @@ -11,6 +11,7 @@ at maximising storage efficiency while maintaining InnoDB-like performance." PAR MY_CHECK_AND_SET_COMPILER_FLAG(-Wno-range-loop-construct) MY_CHECK_AND_SET_COMPILER_FLAG(-Wno-invalid-offsetof) +MY_CHECK_AND_SET_COMPILER_FLAG(-Wno-effc++ DEBUG RELWITHDEBINFO) MACRO(SKIP_ROCKSDB_PLUGIN msg) MESSAGE_ONCE(SKIP_ROCKSDB_PLUGIN "Can't build rocksdb engine - ${msg}") diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index 103ce50dd63..a68b49571df 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -720,11 +720,12 @@ typedef size_t GetKeyLength_t; typedef uint GetKeyLength_t; #endif -static byte * sphinx_get_key ( const byte * pSharePtr, GetKeyLength_t * pLength, my_bool ) +static const uchar *sphinx_get_key(const void *pSharePtr, + GetKeyLength_t *pLength, my_bool) { - CSphSEShare * pShare = (CSphSEShare *) pSharePtr; - *pLength = (size_t) pShare->m_iTableNameLen; - return (byte*) pShare->m_sTable; + const CSphSEShare *pShare= static_cast(pSharePtr); + *pLength= pShare->m_iTableNameLen; + return reinterpret_cast(pShare->m_sTable); } #if MYSQL_VERSION_ID<50100 diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_27186.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_27186.result index 9c2a7a60829..0b60fa1f0d6 100644 --- a/storage/spider/mysql-test/spider/bugfix/r/mdev_27186.result +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_27186.result @@ -6,6 +6,8 @@ test.t preload_keys Error Unable to connect to foreign data source: localhost test.t preload_keys Error Unable to connect to foreign data source: localhost test.t preload_keys Error Unable to connect to foreign data source: localhost test.t preload_keys Error Unable to connect to foreign data source: localhost +test.t preload_keys Error Unable to connect to foreign data source: localhost +test.t preload_keys Error Unable to connect to foreign data source: localhost test.t preload_keys error Corrupt DROP TABLE t; CREATE TABLE t (c INT PRIMARY KEY) ENGINE=SPIDER PARTITION BY KEY() PARTITIONS 2; diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_27902.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_27902.result index 7460bbca85e..9b051274363 100644 --- a/storage/spider/mysql-test/spider/bugfix/r/mdev_27902.result +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_27902.result @@ -11,6 +11,8 @@ HANDLER t OPEN; Warnings: Error 1429 Unable to connect to foreign data source: localhost Error 1429 Unable to connect to foreign data source: localhost +Error 1429 Unable to connect to foreign data source: localhost +Error 1429 Unable to connect to foreign data source: localhost HANDLER t READ next; ERROR HY000: Unable to connect to foreign data source: localhost dummy; @@ -23,6 +25,8 @@ HANDLER t OPEN; Warnings: Error 1429 Unable to connect to foreign data source: localhost Error 1429 Unable to connect to foreign data source: localhost +Error 1429 Unable to connect to foreign data source: localhost +Error 1429 Unable to connect to foreign data source: localhost HANDLER t READ FIRST; ERROR HY000: Unable to connect to foreign data source: localhost HANDLER t READ NEXT; @@ -33,6 +37,8 @@ HANDLER t OPEN; Warnings: Error 1429 Unable to connect to foreign data source: localhost Error 1429 Unable to connect to foreign data source: localhost +Error 1429 Unable to connect to foreign data source: localhost +Error 1429 Unable to connect to foreign data source: localhost HANDLER t READ NEXT; ERROR HY000: Unable to connect to foreign data source: localhost SELECT * FROM t; diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_30014.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_30014.result index 94fdcd2033c..25e165fbbea 100644 --- a/storage/spider/mysql-test/spider/bugfix/r/mdev_30014.result +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_30014.result @@ -20,6 +20,8 @@ FLUSH NO_WRITE_TO_BINLOG TABLES t4 WITH READ LOCK; Warnings: Error 1429 Unable to connect to foreign data source: localhost Error 1429 Unable to connect to foreign data source: localhost +Error 1429 Unable to connect to foreign data source: localhost +Error 1429 Unable to connect to foreign data source: localhost UNLOCK TABLES; drop table t1, t2, t3, t4; create table t1 (c int); diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_30649.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_30649.result new file mode 100644 index 00000000000..4824dfdd44f --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_30649.result @@ -0,0 +1,49 @@ +for master_1 +for child2 +child2_1 +child2_2 +child2_3 +for child3 +connection child2_1; +CREATE DATABASE auto_test_remote; +USE auto_test_remote; +CREATE TABLE src (c INT, d DATE, PRIMARY KEY(c)); +INSERT INTO src VALUES (42, '2024-08-27'), (73, '2024-07-12'); +connection master_1; +CREATE DATABASE auto_test_local; +USE auto_test_local; +SET spider_same_server_link=1; +CREATE TABLE dst (c INT, d DATE, PRIMARY KEY(c)) ENGINE=INNODB; +INSERT INTO dst VALUES (555, '1999-12-12'); +CREATE TABLE t (c INT, d DATE, PRIMARY KEY(c)) ENGINE=SPIDER +COMMENT='table "src dst", srv "s_2_1 s_1"'; +Warnings: +Warning 138 Spider table params in COMMENT or CONNECTION strings have been deprecated and will be removed in a future release. Please use table options instead. +Warning 138 The high availability feature of Spider has been deprecated and will be removed in a future release +Warning 138 The high availability feature of Spider has been deprecated and will be removed in a future release +SELECT spider_copy_tables('t', '0', '1'); +spider_copy_tables('t', '0', '1') +1 +SELECT * FROM dst; +c d +42 2024-08-27 +73 2024-07-12 +555 1999-12-12 +connection child2_1; +DROP TABLE src; +DROP DATABASE auto_test_remote; +connection master_1; +DROP TABLE t; +SELECT * FROM dst; +c d +42 2024-08-27 +73 2024-07-12 +555 1999-12-12 +DROP TABLE dst; +DROP DATABASE auto_test_local; +for master_1 +for child2 +child2_1 +child2_2 +child2_3 +for child3 diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_35064.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_35064.result new file mode 100644 index 00000000000..2a78962c696 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_35064.result @@ -0,0 +1,16 @@ +for master_1 +for child2 +for child3 +CREATE SERVER s FOREIGN DATA WRAPPER mysql OPTIONS(HOST '1'); +CREATE TABLE t(c INT)ENGINE=Spider COMMENT='WRAPPER "mysql",SRV "s",TABLE "foo"'; +Warnings: +Warning 138 Spider table params in COMMENT or CONNECTION strings have been deprecated and will be removed in a future release. Please use table options instead. +INSERT INTO t VALUES(1); +ERROR HY000: Unable to connect to foreign data source: s +INSERT INTO t VALUES(1); +ERROR HY000: Unable to connect to foreign data source: s +drop table t; +drop server s; +for master_1 +for child2 +for child3 diff --git a/storage/spider/mysql-test/spider/bugfix/r/slave_trx_isolation.result b/storage/spider/mysql-test/spider/bugfix/r/slave_trx_isolation.result index 2282eceeeaf..99ee1cb2876 100644 --- a/storage/spider/mysql-test/spider/bugfix/r/slave_trx_isolation.result +++ b/storage/spider/mysql-test/spider/bugfix/r/slave_trx_isolation.result @@ -55,6 +55,8 @@ set session time_zone = '+00:00';set @`spider_lc_./auto_test_remote/tbl_a` = '-x SET NAMES utf8mb3 set @old_lock_wait_timeout=@@session.lock_wait_timeout;set session lock_wait_timeout=1 set session lock_wait_timeout=@old_lock_wait_timeout +set @old_lock_wait_timeout=@@session.lock_wait_timeout;set session lock_wait_timeout=1 +set session lock_wait_timeout=@old_lock_wait_timeout set session transaction isolation level read committed;set session autocommit = 1;set session wait_timeout = 604800;set session sql_mode = 'strict_trans_tables,error_for_division_by_zero,no_auto_create_user,no_engine_substitution';start transaction SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%set %' SELECT pkey FROM tbl_a ORDER BY pkey; diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_30649.cnf b/storage/spider/mysql-test/spider/bugfix/t/mdev_30649.cnf new file mode 100644 index 00000000000..05dfd8a0bce --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_30649.cnf @@ -0,0 +1,3 @@ +!include include/default_mysqld.cnf +!include ../my_1_1.cnf +!include ../my_2_1.cnf diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_30649.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_30649.test new file mode 100644 index 00000000000..02df6032887 --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_30649.test @@ -0,0 +1,47 @@ +--disable_query_log +--disable_result_log +--source ../../t/test_init.inc +--enable_result_log +--enable_query_log + +--connection child2_1 +CREATE DATABASE auto_test_remote; +USE auto_test_remote; +# works without primary key +CREATE TABLE src (c INT, d DATE, PRIMARY KEY(c)); +INSERT INTO src VALUES (42, '2024-08-27'), (73, '2024-07-12'); + +--connection master_1 +CREATE DATABASE auto_test_local; +USE auto_test_local; +SET spider_same_server_link=1; + +# works without primary key +CREATE TABLE dst (c INT, d DATE, PRIMARY KEY(c)) ENGINE=INNODB; +INSERT INTO dst VALUES (555, '1999-12-12'); + +# requires primary key, but it is ok if the data node tables have no +# primary key and the corresponding column has duplicate values. +# possibly a bug, e.g. an unnecessary requirement. +evalp CREATE TABLE t (c INT, d DATE, PRIMARY KEY(c)) ENGINE=SPIDER +COMMENT='table "src dst", srv "s_2_1 s_1"'; +SELECT spider_copy_tables('t', '0', '1'); + +SELECT * FROM dst; + +--connection child2_1 +DROP TABLE src; +DROP DATABASE auto_test_remote; + +--connection master_1 + +DROP TABLE t; +SELECT * FROM dst; +DROP TABLE dst; +DROP DATABASE auto_test_local; + +--disable_query_log +--disable_result_log +--source ../../t/test_deinit.inc +--enable_result_log +--enable_query_log diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_35064.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_35064.test new file mode 100644 index 00000000000..fcf2bb361af --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_35064.test @@ -0,0 +1,20 @@ +--disable_query_log +--disable_result_log +--source ../../t/test_init.inc +--enable_result_log +--enable_query_log + +CREATE SERVER s FOREIGN DATA WRAPPER mysql OPTIONS(HOST '1'); +CREATE TABLE t(c INT)ENGINE=Spider COMMENT='WRAPPER "mysql",SRV "s",TABLE "foo"'; +--error ER_CONNECT_TO_FOREIGN_DATA_SOURCE +INSERT INTO t VALUES(1); +--error ER_CONNECT_TO_FOREIGN_DATA_SOURCE +INSERT INTO t VALUES(1); # Hangs +drop table t; +drop server s; + +--disable_query_log +--disable_result_log +--source ../../t/test_deinit.inc +--enable_result_log +--enable_query_log diff --git a/storage/spider/mysql-test/spider/r/slave_trx_isolation.result b/storage/spider/mysql-test/spider/r/slave_trx_isolation.result index c65a62e0108..26cddde0d07 100644 --- a/storage/spider/mysql-test/spider/r/slave_trx_isolation.result +++ b/storage/spider/mysql-test/spider/r/slave_trx_isolation.result @@ -58,6 +58,8 @@ set session time_zone = '+00:00';set @`spider_lc_./auto_test_remote/tbl_a` = '-x SET NAMES utf8mb3 set @old_lock_wait_timeout=@@session.lock_wait_timeout;set session lock_wait_timeout=1 set session lock_wait_timeout=@old_lock_wait_timeout +set @old_lock_wait_timeout=@@session.lock_wait_timeout;set session lock_wait_timeout=1 +set session lock_wait_timeout=@old_lock_wait_timeout set session transaction isolation level read committed;set session autocommit = 1;set session wait_timeout = 604800;set session sql_mode = 'strict_trans_tables,error_for_division_by_zero,no_auto_create_user,no_engine_substitution';start transaction SELECT argument FROM mysql.general_log WHERE command_type != 'Execute' AND argument LIKE '%set %' SELECT pkey FROM tbl_a ORDER BY pkey; diff --git a/storage/spider/spd_conn.cc b/storage/spider/spd_conn.cc index 3706f1b8427..96df128b5fb 100644 --- a/storage/spider/spd_conn.cc +++ b/storage/spider/spd_conn.cc @@ -100,48 +100,52 @@ ulong spider_open_connections_line_no; pthread_mutex_t spider_conn_mutex; /* for spider_open_connections and trx_conn_hash */ -uchar *spider_conn_get_key( - SPIDER_CONN *conn, +const uchar *spider_conn_get_key( + const void *conn_, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto conn= static_cast(conn_); DBUG_ENTER("spider_conn_get_key"); *length = conn->conn_key_length; #ifdef DBUG_TRACE spider_print_keys(conn->conn_key, conn->conn_key_length); #endif - DBUG_RETURN((uchar*) conn->conn_key); + DBUG_RETURN(reinterpret_cast(conn->conn_key)); } -uchar *spider_ipport_conn_get_key( - SPIDER_IP_PORT_CONN *ip_port, +const uchar *spider_ipport_conn_get_key( + const void *ip_port_, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto ip_port= static_cast(ip_port_); DBUG_ENTER("spider_ipport_conn_get_key"); *length = ip_port->key_len; - DBUG_RETURN((uchar*) ip_port->key); + DBUG_RETURN(reinterpret_cast(ip_port->key)); } -static uchar *spider_loop_check_full_get_key( - SPIDER_CONN_LOOP_CHECK *ptr, +static const uchar *spider_loop_check_full_get_key( + const void *ptr_, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto ptr= static_cast(ptr_); DBUG_ENTER("spider_loop_check_full_get_key"); *length = ptr->full_name.length; - DBUG_RETURN((uchar*) ptr->full_name.str); + DBUG_RETURN(reinterpret_cast(ptr->full_name.str)); } -static uchar *spider_loop_check_to_get_key( - SPIDER_CONN_LOOP_CHECK *ptr, +static const uchar *spider_loop_check_to_get_key( + const void *ptr_, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto ptr= static_cast(ptr_); DBUG_ENTER("spider_loop_check_to_get_key"); *length = ptr->to_name.length; - DBUG_RETURN((uchar*) ptr->to_name.str); + DBUG_RETURN(reinterpret_cast(ptr->to_name.str)); } int spider_conn_init( @@ -154,10 +158,10 @@ int spider_conn_init( { goto error_loop_check_mutex_init; } - if ( - my_hash_init(PSI_INSTRUMENT_ME, &conn->loop_checked, spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_loop_check_full_get_key, 0, 0) - ) { + if (my_hash_init(PSI_INSTRUMENT_ME, &conn->loop_checked, + spd_charset_utf8mb3_bin, 32, 0, 0, + spider_loop_check_full_get_key, 0, 0)) + { goto error_loop_checked_hash_init; } spider_alloc_calc_mem_init(conn->loop_checked, SPD_MID_CONN_INIT_1); @@ -165,10 +169,10 @@ int spider_conn_init( conn->loop_checked, conn->loop_checked.array.max_element * conn->loop_checked.array.size_of_element); - if ( - my_hash_init(PSI_INSTRUMENT_ME, &conn->loop_check_queue, spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_loop_check_to_get_key, 0, 0) - ) { + if (my_hash_init(PSI_INSTRUMENT_ME, &conn->loop_check_queue, + spd_charset_utf8mb3_bin, 32, 0, 0, + spider_loop_check_to_get_key, 0, 0)) + { goto error_loop_check_queue_hash_init; } spider_alloc_calc_mem_init(conn->loop_check_queue, SPD_MID_CONN_INIT_2); diff --git a/storage/spider/spd_conn.h b/storage/spider/spd_conn.h index 1152bd808f5..f8859365a66 100644 --- a/storage/spider/spd_conn.h +++ b/storage/spider/spd_conn.h @@ -82,16 +82,16 @@ typedef struct st_spider_conn_loop_check LEX_CSTRING merged_value; } SPIDER_CONN_LOOP_CHECK; -uchar *spider_conn_get_key( - SPIDER_CONN *conn, +const uchar *spider_conn_get_key( + const void *conn, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ); -uchar *spider_ipport_conn_get_key( - SPIDER_IP_PORT_CONN *ip_port, +const uchar *spider_ipport_conn_get_key( + const void *ip_port, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ); int spider_conn_init( diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index 69c4026eb2d..59467b98ee6 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -1845,10 +1845,10 @@ int spider_db_mbase::init() { DBUG_ENTER("spider_db_mbase::init"); DBUG_PRINT("info",("spider this=%p", this)); - if ( - my_hash_init(PSI_INSTRUMENT_ME, &lock_table_hash, spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_link_get_key, 0, 0) - ) { + if (my_hash_init(PSI_INSTRUMENT_ME, &lock_table_hash, + spd_charset_utf8mb3_bin, 32, 0, 0, spider_link_get_key, 0, + 0)) + { DBUG_RETURN(HA_ERR_OUT_OF_MEM); } spider_alloc_calc_mem_init(lock_table_hash, SPD_MID_DB_MBASE_INIT_1); @@ -2023,7 +2023,7 @@ int spider_db_mbase::connect( DBUG_RETURN(ER_CONNECT_TO_FOREIGN_DATA_SOURCE); } connect_retry_count--; - my_sleep((ulong) connect_retry_interval); + my_sleep((ulong) connect_retry_interval * 1000); } else { db_conn->net.thd = NULL; if (connect_mutex) @@ -2296,7 +2296,7 @@ int spider_db_mbase::fetch_and_print_warnings(struct tm *l_time) longlong res_num = (longlong) my_strtoll10(row[1], (char**) NULL, &error_num); DBUG_PRINT("info",("spider res_num=%lld", res_num)); - my_printf_error((int) res_num, row[2], MYF(0)); + my_printf_error((int) res_num, "%s", MYF(0), row[2]); error_num = (int) res_num; row = mysql_fetch_row(res); } diff --git a/storage/spider/spd_group_by_handler.cc b/storage/spider/spd_group_by_handler.cc index 359e4fb24e6..eb0ab563314 100644 --- a/storage/spider/spd_group_by_handler.cc +++ b/storage/spider/spd_group_by_handler.cc @@ -1435,6 +1435,7 @@ group_by_handler *spider_create_group_by_handler( if (!(table_holder= spider_create_table_holder(table_count))) DBUG_RETURN(NULL); + my_bitmap_init(&skips, NULL, query->select->elements); table_idx = 0; from = query->from; if (from->table->part_info) @@ -1538,7 +1539,6 @@ group_by_handler *spider_create_group_by_handler( fields_arg->set_table_holder(table_holder, table_count); keep_going = TRUE; it.init(*query->select); - my_bitmap_init(&skips, NULL, query->select->elements); int i= -1, n_aux= query->n_aux; while ((item = it++)) { diff --git a/storage/spider/spd_param.cc b/storage/spider/spd_param.cc index bfe6afc400e..df3161eb42d 100644 --- a/storage/spider/spd_param.cc +++ b/storage/spider/spd_param.cc @@ -1740,7 +1740,7 @@ static MYSQL_THDVAR_INT( "Connect retry count", /* comment */ NULL, /* check */ NULL, /* update */ - 1000, /* def */ + 2, /* def */ 0, /* min */ 2147483647, /* max */ 0 /* blk */ diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index b0d3d41d246..6d4a46e8662 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -449,76 +449,73 @@ static char spider_unique_id_buf[1 + 12 + 1 + (16 * 2) + 1 + 1]; LEX_CSTRING spider_unique_id; // for spider_open_tables -uchar *spider_tbl_get_key( - SPIDER_SHARE *share, +const uchar *spider_tbl_get_key( + const void *share_, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto share= static_cast(share_); DBUG_ENTER("spider_tbl_get_key"); *length = share->table_name_length; - DBUG_RETURN((uchar*) share->table_name); + DBUG_RETURN(reinterpret_cast(share->table_name)); } -uchar *spider_wide_share_get_key( - SPIDER_WIDE_SHARE *share, +const uchar *spider_wide_share_get_key( + const void *share_, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto share= static_cast(share_); DBUG_ENTER("spider_wide_share_get_key"); *length = share->table_name_length; - DBUG_RETURN((uchar*) share->table_name); + DBUG_RETURN(reinterpret_cast(share->table_name)); } -uchar *spider_lgtm_tblhnd_share_hash_get_key( - SPIDER_LGTM_TBLHND_SHARE *share, +const uchar *spider_lgtm_tblhnd_share_hash_get_key( + const void *share_, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto share= static_cast(share_); DBUG_ENTER("spider_lgtm_tblhnd_share_hash_get_key"); *length = share->table_name_length; - DBUG_RETURN((uchar*) share->table_name); + DBUG_RETURN(reinterpret_cast(share->table_name)); } -uchar *spider_link_get_key( - SPIDER_LINK_FOR_HASH *link_for_hash, +const uchar *spider_link_get_key( + const void *link_for_hash_, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto link_for_hash= + static_cast(link_for_hash_); DBUG_ENTER("spider_link_get_key"); *length = link_for_hash->db_table_str->length(); - DBUG_RETURN((uchar*) link_for_hash->db_table_str->ptr()); + DBUG_RETURN(reinterpret_cast(link_for_hash->db_table_str->ptr())); } -uchar *spider_ha_get_key( - ha_spider *spider, +const uchar *spider_udf_tbl_mon_list_key( + const void *table_mon_list_, size_t *length, - my_bool not_used __attribute__ ((unused)) -) { - DBUG_ENTER("spider_ha_get_key"); - *length = spider->share->table_name_length; - DBUG_RETURN((uchar*) spider->share->table_name); -} - -uchar *spider_udf_tbl_mon_list_key( - SPIDER_TABLE_MON_LIST *table_mon_list, - size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto table_mon_list= + static_cast(table_mon_list_); DBUG_ENTER("spider_udf_tbl_mon_list_key"); DBUG_PRINT("info",("spider hash key=%s", table_mon_list->key)); DBUG_PRINT("info",("spider hash key length=%u", table_mon_list->key_length)); *length = table_mon_list->key_length; - DBUG_RETURN((uchar*) table_mon_list->key); + DBUG_RETURN(reinterpret_cast(table_mon_list->key)); } -uchar *spider_allocated_thds_get_key( - THD *thd, +const uchar *spider_allocated_thds_get_key( + const void *thd, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { DBUG_ENTER("spider_allocated_thds_get_key"); *length = sizeof(THD *); - DBUG_RETURN((uchar*) thd); + DBUG_RETURN(reinterpret_cast(thd)); } #ifdef HAVE_PSI_INTERFACE @@ -6643,8 +6640,9 @@ int spider_db_init( &spider_mem_calc_mutex, MY_MUTEX_INIT_FAST)) goto error_mem_calc_mutex_init; - if (my_hash_init(PSI_INSTRUMENT_ME, &spider_open_tables, spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_tbl_get_key, 0, 0)) + if (my_hash_init(PSI_INSTRUMENT_ME, &spider_open_tables, + spd_charset_utf8mb3_bin, 32, 0, 0, spider_tbl_get_key, 0, + 0)) goto error_open_tables_hash_init; spider_alloc_calc_mem_init(spider_open_tables, SPD_MID_DB_INIT_1); @@ -6652,8 +6650,9 @@ int spider_db_init( spider_open_tables, spider_open_tables.array.max_element * spider_open_tables.array.size_of_element); - if (my_hash_init(PSI_INSTRUMENT_ME, &spider_init_error_tables, spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_tbl_get_key, 0, 0)) + if (my_hash_init(PSI_INSTRUMENT_ME, &spider_init_error_tables, + spd_charset_utf8mb3_bin, 32, 0, 0, spider_tbl_get_key, 0, + 0)) goto error_init_error_tables_hash_init; spider_alloc_calc_mem_init(spider_init_error_tables, SPD_MID_DB_INIT_2); @@ -6661,10 +6660,9 @@ int spider_db_init( spider_init_error_tables, spider_init_error_tables.array.max_element * spider_init_error_tables.array.size_of_element); - if( - my_hash_init(PSI_INSTRUMENT_ME, &spider_open_wide_share, spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_wide_share_get_key, 0, 0) - ) + if (my_hash_init(PSI_INSTRUMENT_ME, &spider_open_wide_share, + spd_charset_utf8mb3_bin, 32, 0, 0, + spider_wide_share_get_key, 0, 0)) goto error_open_wide_share_hash_init; spider_alloc_calc_mem_init(spider_open_wide_share, SPD_MID_DB_INIT_3); @@ -6674,7 +6672,7 @@ int spider_db_init( spider_open_wide_share.array.size_of_element); if (my_hash_init(PSI_INSTRUMENT_ME, &spider_lgtm_tblhnd_share_hash, spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_lgtm_tblhnd_share_hash_get_key, 0, 0)) + spider_lgtm_tblhnd_share_hash_get_key, 0, 0)) goto error_lgtm_tblhnd_share_hash_init; spider_alloc_calc_mem_init(spider_lgtm_tblhnd_share_hash, SPD_MID_DB_INIT_4); @@ -6682,22 +6680,24 @@ int spider_db_init( spider_lgtm_tblhnd_share_hash, spider_lgtm_tblhnd_share_hash.array.max_element * spider_lgtm_tblhnd_share_hash.array.size_of_element); - if (my_hash_init(PSI_INSTRUMENT_ME, &spider_open_connections, spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_conn_get_key, 0, 0)) + if (my_hash_init(PSI_INSTRUMENT_ME, &spider_open_connections, + spd_charset_utf8mb3_bin, 32, 0, 0, spider_conn_get_key, 0, + 0)) goto error_open_connections_hash_init; - if (my_hash_init(PSI_INSTRUMENT_ME, &spider_ipport_conns, spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_ipport_conn_get_key, - spider_free_ipport_conn, 0)) - goto error_ipport_conn__hash_init; + if (my_hash_init(PSI_INSTRUMENT_ME, &spider_ipport_conns, + spd_charset_utf8mb3_bin, 32, 0, 0, + spider_ipport_conn_get_key, spider_free_ipport_conn, 0)) + goto error_ipport_conn__hash_init; spider_alloc_calc_mem_init(spider_open_connections, SPD_MID_DB_INIT_5); spider_alloc_calc_mem(NULL, spider_open_connections, spider_open_connections.array.max_element * spider_open_connections.array.size_of_element); - if (my_hash_init(PSI_INSTRUMENT_ME, &spider_allocated_thds, spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_allocated_thds_get_key, 0, 0)) + if (my_hash_init(PSI_INSTRUMENT_ME, &spider_allocated_thds, + spd_charset_utf8mb3_bin, 32, 0, 0, + spider_allocated_thds_get_key, 0, 0)) goto error_allocated_thds_hash_init; spider_alloc_calc_mem_init(spider_allocated_thds, SPD_MID_DB_INIT_8); @@ -6748,9 +6748,10 @@ int spider_db_init( roop_count < (int) spider_udf_table_mon_mutex_count; roop_count++) { - if (my_hash_init(PSI_INSTRUMENT_ME, &spider_udf_table_mon_list_hash[roop_count], - spd_charset_utf8mb3_bin, 32, 0, 0, - (my_hash_get_key) spider_udf_tbl_mon_list_key, 0, 0)) + if (my_hash_init(PSI_INSTRUMENT_ME, + &spider_udf_table_mon_list_hash[roop_count], + spd_charset_utf8mb3_bin, 32, 0, 0, + spider_udf_tbl_mon_list_key, 0, 0)) goto error_init_udf_table_mon_list_hash; spider_alloc_calc_mem_init(spider_udf_table_mon_list_hash, SPD_MID_DB_INIT_11); @@ -7211,6 +7212,12 @@ int spider_get_crd( enum ha_sts_crd_get_type get_type = spider_get_crd_type(share, crd_interval, crd_sync); + if (get_type == HA_GET_COPY) + memcpy(share->cardinality, share->wide_share->cardinality, + sizeof(longlong) * table->s->fields); + else + error_num= spider_db_show_index(spider, link_idx, table, crd_mode); + if (get_type >= HA_GET_AFTER_LOCK) pthread_mutex_unlock(&share->wide_share->crd_mutex); if (error_num) @@ -8307,9 +8314,11 @@ bool spider_check_index_merge( } int spider_compare_for_sort( - SPIDER_SORT *a, - SPIDER_SORT *b + const void *a_, + const void *b_ ) { + const SPIDER_SORT *a= static_cast(a_); + const SPIDER_SORT *b= static_cast(b_); DBUG_ENTER("spider_compare_for_sort"); if (a->sort > b->sort) DBUG_RETURN(-1); diff --git a/storage/spider/spd_table.h b/storage/spider/spd_table.h index 10e89683976..3c2cd7823ab 100644 --- a/storage/spider/spd_table.h +++ b/storage/spider/spd_table.h @@ -47,28 +47,22 @@ typedef struct st_spider_param_string_parse bool locate_param_def(char*& start_param); } SPIDER_PARAM_STRING_PARSE; -uchar *spider_tbl_get_key( - SPIDER_SHARE *share, +const uchar *spider_tbl_get_key( + const void *share, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ); -uchar *spider_wide_share_get_key( - SPIDER_WIDE_SHARE *share, +const uchar *spider_wide_share_get_key( + const void *share, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ); -uchar *spider_link_get_key( - SPIDER_LINK_FOR_HASH *link_for_hash, +const uchar *spider_link_get_key( + const void *link_for_hash, size_t *length, - my_bool not_used __attribute__ ((unused)) -); - -uchar *spider_ha_get_key( - ha_spider *spider, - size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ); int spider_get_server( @@ -416,8 +410,8 @@ bool spider_check_index_merge( ); int spider_compare_for_sort( - SPIDER_SORT *a, - SPIDER_SORT *b + const void *a, + const void *b ); ulong spider_calc_for_sort( diff --git a/storage/spider/spd_trx.cc b/storage/spider/spd_trx.cc index bf612e9689b..b5ad22b1f25 100644 --- a/storage/spider/spd_trx.cc +++ b/storage/spider/spd_trx.cc @@ -54,29 +54,31 @@ extern ulong spider_allocated_thds_line_no; extern pthread_mutex_t spider_allocated_thds_mutex; // for spider_alter_tables -uchar *spider_alter_tbl_get_key( - SPIDER_ALTER_TABLE *alter_table, +const uchar *spider_alter_tbl_get_key( + const void *alter_table_, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto alter_table= static_cast(alter_table_); DBUG_ENTER("spider_alter_tbl_get_key"); *length = alter_table->table_name_length; DBUG_PRINT("info",("spider table_name_length=%zu", *length)); DBUG_PRINT("info",("spider table_name=%s", alter_table->table_name)); - DBUG_RETURN((uchar*) alter_table->table_name); + DBUG_RETURN(reinterpret_cast(alter_table->table_name)); } // for SPIDER_TRX_HA -uchar *spider_trx_ha_get_key( - SPIDER_TRX_HA *trx_ha, +const uchar *spider_trx_ha_get_key( + const void *trx_ha_, size_t *length, - my_bool not_used __attribute__ ((unused)) + my_bool ) { + auto trx_ha= static_cast(trx_ha_); DBUG_ENTER("spider_trx_ha_get_key"); *length = trx_ha->table_name_length; DBUG_PRINT("info",("spider table_name_length=%zu", *length)); DBUG_PRINT("info",("spider table_name=%s", trx_ha->table_name)); - DBUG_RETURN((uchar*) trx_ha->table_name); + DBUG_RETURN(reinterpret_cast(trx_ha->table_name)); } /* @@ -1165,11 +1167,9 @@ SPIDER_TRX *spider_get_trx( goto error_init_udf_table_mutex; } - if ( - my_hash_init(PSI_INSTRUMENT_ME, &trx->trx_conn_hash, - spd_charset_utf8mb3_bin, 32, 0, 0, (my_hash_get_key) - spider_conn_get_key, 0, 0) - ) + if (my_hash_init(PSI_INSTRUMENT_ME, &trx->trx_conn_hash, + spd_charset_utf8mb3_bin, 32, 0, 0, spider_conn_get_key, 0, + 0)) goto error_init_hash; spider_alloc_calc_mem_init(trx->trx_conn_hash, SPD_MID_GET_TRX_2); spider_alloc_calc_mem( @@ -1178,11 +1178,9 @@ SPIDER_TRX *spider_get_trx( trx->trx_conn_hash.array.max_element * trx->trx_conn_hash.array.size_of_element); - if ( - my_hash_init(PSI_INSTRUMENT_ME, &trx->trx_another_conn_hash, - spd_charset_utf8mb3_bin, 32, 0, 0, (my_hash_get_key) - spider_conn_get_key, 0, 0) - ) + if (my_hash_init(PSI_INSTRUMENT_ME, &trx->trx_another_conn_hash, + spd_charset_utf8mb3_bin, 32, 0, 0, spider_conn_get_key, 0, + 0)) goto error_init_another_hash; spider_alloc_calc_mem_init(trx->trx_another_conn_hash, SPD_MID_GET_TRX_3); spider_alloc_calc_mem( @@ -1191,11 +1189,9 @@ SPIDER_TRX *spider_get_trx( trx->trx_another_conn_hash.array.max_element * trx->trx_another_conn_hash.array.size_of_element); - if ( - my_hash_init(PSI_INSTRUMENT_ME, &trx->trx_alter_table_hash, - spd_charset_utf8mb3_bin, 32, 0, 0, (my_hash_get_key) - spider_alter_tbl_get_key, 0, 0) - ) + if (my_hash_init(PSI_INSTRUMENT_ME, &trx->trx_alter_table_hash, + spd_charset_utf8mb3_bin, 32, 0, 0, + spider_alter_tbl_get_key, 0, 0)) goto error_init_alter_hash; spider_alloc_calc_mem_init(trx->trx_alter_table_hash, SPD_MID_GET_TRX_8); spider_alloc_calc_mem( @@ -1204,11 +1200,9 @@ SPIDER_TRX *spider_get_trx( trx->trx_alter_table_hash.array.max_element * trx->trx_alter_table_hash.array.size_of_element); - if ( - my_hash_init(PSI_INSTRUMENT_ME, &trx->trx_ha_hash, - spd_charset_utf8mb3_bin, 32, 0, 0, (my_hash_get_key) - spider_trx_ha_get_key, 0, 0) - ) + if (my_hash_init(PSI_INSTRUMENT_ME, &trx->trx_ha_hash, + spd_charset_utf8mb3_bin, 32, 0, 0, spider_trx_ha_get_key, + 0, 0)) goto error_init_trx_ha_hash; spider_alloc_calc_mem_init(trx->trx_ha_hash, SPD_MID_GET_TRX_9); spider_alloc_calc_mem( diff --git a/strings/json_normalize.c b/strings/json_normalize.c index 2c66c712e81..a7849d95b3c 100644 --- a/strings/json_normalize.c +++ b/strings/json_normalize.c @@ -308,10 +308,9 @@ json_norm_value_string_init(struct json_norm_value *val, } -static int -json_norm_kv_comp(const struct json_norm_kv *a, - const struct json_norm_kv *b) +static int json_norm_kv_comp(const void *a_, const void *b_) { + const struct json_norm_kv *a= a_, *b= b_; return my_strnncoll(&my_charset_utf8mb4_bin, (const uchar *)a->key.str, a->key.length, (const uchar *)b->key.str, b->key.length); @@ -333,8 +332,7 @@ json_normalize_sort(struct json_norm_value *val) } my_qsort(dynamic_element(pairs, 0, struct json_norm_kv*), - pairs->elements, sizeof(struct json_norm_kv), - (qsort_cmp) json_norm_kv_comp); + pairs->elements, sizeof(struct json_norm_kv), json_norm_kv_comp); break; } case JSON_VALUE_ARRAY: diff --git a/support-files/rpm/server-prein.sh b/support-files/rpm/server-prein.sh index 393532dcf43..3d6b089366f 100644 --- a/support-files/rpm/server-prein.sh +++ b/support-files/rpm/server-prein.sh @@ -4,8 +4,8 @@ installed=`rpm -q --whatprovides mysql-server 2> /dev/null` if [ $? -eq 0 -a -n "$installed" ]; then installed=`echo "$installed"|sed -n 1p` - vendor=`rpm -q --queryformat='%''{VENDOR}' "$installed" 2>&1 | sed 's/Monty Program AB/MariaDB Foundation/'` - version=`rpm -q --queryformat='%''{VERSION}' "$installed" 2>&1` + vendor=`rpm -q --queryformat='%''{VENDOR}' "$installed" 2> /dev/null | sed 's/Monty Program AB/MariaDB Foundation/'` + version=`rpm -q --queryformat='%''{VERSION}' "$installed" 2> /dev/null` myvendor='%{mysql_vendor}' myversion='%{mysqlversion}' diff --git a/tpool/tpool_generic.cc b/tpool/tpool_generic.cc index 9ed832dd1ed..55a7f55bf7d 100644 --- a/tpool/tpool_generic.cc +++ b/tpool/tpool_generic.cc @@ -129,7 +129,7 @@ enum worker_wake_reason /* A per-worker thread structure.*/ -struct alignas(CPU_LEVEL1_DCACHE_LINESIZE) worker_data +struct worker_data { /** Condition variable to wakeup this worker.*/ std::condition_variable m_cv; @@ -156,6 +156,8 @@ struct alignas(CPU_LEVEL1_DCACHE_LINESIZE) worker_data }; int m_state; + /* Padding to avoid false sharing */ + char m_pad[CPU_LEVEL1_DCACHE_LINESIZE]; bool is_executing_task() { @@ -179,16 +181,6 @@ struct alignas(CPU_LEVEL1_DCACHE_LINESIZE) worker_data m_state(NONE), m_task_start_time() {} - - /*Define custom new/delete because of overaligned structure. */ - static void *operator new(size_t size) - { - return aligned_malloc(size, CPU_LEVEL1_DCACHE_LINESIZE); - } - static void operator delete(void* p) - { - aligned_free(p); - } }; diff --git a/unittest/mysys/queues-t.c b/unittest/mysys/queues-t.c index 23cb0da1a32..ec62f58cd45 100644 --- a/unittest/mysys/queues-t.c +++ b/unittest/mysys/queues-t.c @@ -19,8 +19,10 @@ #include #include "tap.h" -int cmp(void *arg __attribute__((unused)), uchar *a, uchar *b) +int cmp(void *arg __attribute__((unused)), const void *a_, const void *b_) { + const uchar *a= a_; + const uchar *b= b_; return *a < *b ? -1 : *a > *b; }