From 888663ce12647c5aefee5e18accd80843d726741 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Tue, 3 Jun 2025 10:32:22 +0200 Subject: [PATCH 01/61] MDEV-36280 ALTER TABLE with DEFAULT NEXTVAL(sequence) fails due to insufficient grants Defer privilege checking until fix_fields. This way ALTER will behave consistently with CREATE, and require the same privileges to sequence column (SELECT/INSERT) --- mysql-test/suite/sql_sequence/grant.result | 13 +++++++++++++ mysql-test/suite/sql_sequence/grant.test | 16 ++++++++++++++++ sql/sql_acl.cc | 14 +++++++++++--- 3 files changed, 40 insertions(+), 3 deletions(-) diff --git a/mysql-test/suite/sql_sequence/grant.result b/mysql-test/suite/sql_sequence/grant.result index fc3421efcb6..d631772c740 100644 --- a/mysql-test/suite/sql_sequence/grant.result +++ b/mysql-test/suite/sql_sequence/grant.result @@ -97,6 +97,19 @@ ERROR 42000: SELECT, INSERT command denied to user 'u'@'localhost' for table `my disconnect con1; connection default; drop user u; +create user u_alter; +create table t1 (id int); +grant alter on t1 to u_alter; +connect con_alter,localhost,u_alter,,mysqltest_1; +alter table t1 modify id int default nextval(s1); +ERROR 42000: SELECT, INSERT command denied to user 'u_alter'@'localhost' for table `mysqltest_1`.`s1` +connection default; +grant insert, select on s1 to u_alter; +connection con_alter; +alter table t1 modify id int default nextval(s1); +disconnect con_alter; +connection default; +drop user u_alter; drop database mysqltest_1; # # End of 10.11 tests diff --git a/mysql-test/suite/sql_sequence/grant.test b/mysql-test/suite/sql_sequence/grant.test index c205bd34223..8c56de16525 100644 --- a/mysql-test/suite/sql_sequence/grant.test +++ b/mysql-test/suite/sql_sequence/grant.test @@ -106,6 +106,22 @@ create table t1 (a int not null default(nextval(s1)), --connection default drop user u; +# ALTER for table with DEFAULT NEXTVAL(seq) column needs INSERT/SELECT on seq +# just like CREATE does in the example above +create user u_alter; +create table t1 (id int); +grant alter on t1 to u_alter; +--connect(con_alter,localhost,u_alter,,mysqltest_1) +--error ER_TABLEACCESS_DENIED_ERROR +alter table t1 modify id int default nextval(s1); +--connection default +grant insert, select on s1 to u_alter; +--connection con_alter +alter table t1 modify id int default nextval(s1); +--disconnect con_alter +--connection default +drop user u_alter; + # # Cleanup # diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index b61f18c41cc..422ea6b008b 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -8347,9 +8347,17 @@ bool check_grant(THD *thd, privilege_t want_access, TABLE_LIST *tables, Direct SELECT of a sequence table doesn't set t_ref->sequence, so privileges will be checked normally, as for any table. */ - if (t_ref->sequence && - !(want_access & ~(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL))) - continue; + if (t_ref->sequence) + { + if (!(want_access & ~(SELECT_ACL | INSERT_ACL | UPDATE_ACL | DELETE_ACL))) + continue; + /* + If it is ALTER..SET DEFAULT= nextval(sequence), also defer checks + until ::fix_fields(). + */ + if (tl != tables && want_access == ALTER_ACL) + continue; + } const ACL_internal_table_access *access= get_cached_table_access(&t_ref->grant.m_internal, From 6ec57588bd822182cc4095a2f5c2f1446569c0fd Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Wed, 11 Jun 2025 20:47:43 +0530 Subject: [PATCH 02/61] MDEV-30363 InnoDB: Failing assertion: trx->error_state == DB_SUCCESS in que_run_threads Problem: ========= - During truncation of a fulltext table, InnoDB does create the table and does insert the default config fts values in fulltext common config table using create table transaction. - Before committing the create table transaction, InnoDB does update the dictionary by loading the stopword into fts cache and write the stopword configuration into fulltext common config table by creating a separate transaction. This leads to lock wait timeout error and rollbacks the transaction. - But truncate table holds dict_sys.lock and rollback also tries to acquire dict_sys.lock. This leads to assertion during rollback. Solution: ========= ha_innobase::truncate(): Commit the create table transaction before updating the dictionary after create table. --- .../suite/innodb_fts/r/innodb_fts_misc_1.result | 12 ++++++++++++ .../suite/innodb_fts/t/innodb_fts_misc_1.test | 13 +++++++++++++ storage/innobase/handler/ha_innodb.cc | 2 +- 3 files changed, 26 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result index 52cbede7314..52bd819286f 100644 --- a/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result +++ b/mysql-test/suite/innodb_fts/r/innodb_fts_misc_1.result @@ -993,3 +993,15 @@ FTS_DOC_ID f1 f2 4294967298 txt bbb 100000000000 aaa bbb DROP TABLE t1; +# +# MDEV-30363 Failing assertion: trx->error_state == DB_SUCCESS +# in que_run_threads +# +CREATE TABLE server_stopword (value VARCHAR(1))engine=innodb; +SET GLOBAL innodb_ft_server_stopword_table='test/server_stopword'; +CREATE TABLE t (t VARCHAR(1) COLLATE utf8_unicode_ci, +FULLTEXT (t))engine=innodb; +TRUNCATE TABLE t; +DROP TABLE t; +DROP TABLE server_stopword; +SET GLOBAL innodb_ft_server_stopword_table= default; diff --git a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test index 4eaf5b2e0bd..7f2c21ee404 100644 --- a/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test +++ b/mysql-test/suite/innodb_fts/t/innodb_fts_misc_1.test @@ -967,3 +967,16 @@ CREATE FULLTEXT INDEX i ON t1 (f2); SELECT * FROM t1 WHERE match(f2) against("bbb"); # Cleanup DROP TABLE t1; + +--echo # +--echo # MDEV-30363 Failing assertion: trx->error_state == DB_SUCCESS +--echo # in que_run_threads +--echo # +CREATE TABLE server_stopword (value VARCHAR(1))engine=innodb; +SET GLOBAL innodb_ft_server_stopword_table='test/server_stopword'; +CREATE TABLE t (t VARCHAR(1) COLLATE utf8_unicode_ci, + FULLTEXT (t))engine=innodb; +TRUNCATE TABLE t; +DROP TABLE t; +DROP TABLE server_stopword; +SET GLOBAL innodb_ft_server_stopword_table= default; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 7e8eef467cf..3f84070064f 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -14088,10 +14088,10 @@ int ha_innobase::truncate() trx); if (!err) { + trx->commit(deleted); m_prebuilt->table->acquire(); create_table_info_t::create_table_update_dict(m_prebuilt->table, m_user_thd, info, *table); - trx->commit(deleted); } else { From cda1826201cadd8f0d3a2bcbdbfd1ed7118de4b5 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 21 May 2025 14:56:16 +0200 Subject: [PATCH 03/61] MDEV-36852 Table definition gets corrupt after adding unique hash key --- mysql-test/main/long_unique_bugs.result | 26 +++++++++++++++++++++++++ mysql-test/main/long_unique_bugs.test | 12 ++++++++++++ sql/sql_table.cc | 5 +++-- sql/table.cc | 1 + 4 files changed, 42 insertions(+), 2 deletions(-) diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result index 6bf3a7dcc44..3f65fb18014 100644 --- a/mysql-test/main/long_unique_bugs.result +++ b/mysql-test/main/long_unique_bugs.result @@ -809,3 +809,29 @@ hex(c1) hex(c2) c3 hex(c4) NULL NULL NULL NULL drop table t1; # End of 10.5 tests +# +# MDEV-36852 Table definition gets corrupt after adding unique hash key +# +create table t1 (a text, b int, foreign key(a) references x(x)) engine=myisam; +Warnings: +Note 1071 Specified key was too long; max key length is 1000 bytes +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` text DEFAULT NULL, + `b` int(11) DEFAULT NULL, + KEY `a` (`a`(1000)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +alter table t1 add unique(a), add key(a); +Warnings: +Note 1071 Specified key was too long; max key length is 1000 bytes +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` text DEFAULT NULL, + `b` int(11) DEFAULT NULL, + UNIQUE KEY `a` (`a`) USING HASH, + KEY `a_2` (`a`(1000)) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test index b1daf97d194..d8d3462c1da 100644 --- a/mysql-test/main/long_unique_bugs.test +++ b/mysql-test/main/long_unique_bugs.test @@ -772,3 +772,15 @@ select hex(c1), hex(c2), c3, hex(c4) from t1; drop table t1; --echo # End of 10.5 tests + +--echo # +--echo # MDEV-36852 Table definition gets corrupt after adding unique hash key +--echo # + +create table t1 (a text, b int, foreign key(a) references x(x)) engine=myisam; +show create table t1; +alter table t1 add unique(a), add key(a); +show create table t1; +drop table t1; + +--echo # End of 10.6 tests diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 3a963403d65..69ddea8e5e9 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -3337,8 +3337,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, Create_field *auto_increment_key= 0; Key_part_spec *column; - bool is_hash_field_needed= key->key_create_info.algorithm - == HA_KEY_ALG_LONG_HASH; if (key->type == Key::IGNORE_KEY) { /* ignore redundant keys */ @@ -3349,6 +3347,9 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, break; } + bool is_hash_field_needed= key->key_create_info.algorithm + == HA_KEY_ALG_LONG_HASH; + if (key_check_without_overlaps(thd, create_info, alter_info, *key)) DBUG_RETURN(true); diff --git a/sql/table.cc b/sql/table.cc index 9bbc29d4ecf..119d949388a 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -2838,6 +2838,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write, hash_keypart->fieldnr= hash_field_used_no + 1; hash_field= share->field[hash_field_used_no]; hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs + DBUG_ASSERT(hash_field->invisible == INVISIBLE_FULL); keyinfo->flags|= HA_NOSAME; share->virtual_fields++; share->stored_fields--; From e706324205ac5a9258c2d5e52b4c8ec22598a07d Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Mon, 23 Jun 2025 15:48:13 +0530 Subject: [PATCH 04/61] MDEV-35863 innodb.doublewrite_debug test case fails to start the server Problem: ======= - There are two failures occurs for this test case: (1) set global innodb_buf_flush_list_now=1 doesn't make sure that pages are being flushed. (2) InnoDB page cleaner thread aborts while writing the checkpoint information. Problem is that When InnoDB startup aborts, InnoDB changes the shutdown state to SRV_SHUTDOWN_EXIT_THREADS. By changing the shutdown state, InnoDB doesn't advance the log_sys.lsn (avoids fil_names_clear()). After InnoDB shutdown(innodb_shutdown()) is being initiated, shutdown state again changed to SRV_SHUTDOWN_INITIATED. This leads the page cleaner thread to fail with assertion ut_ad(srv_shutdown_state > SRV_SHUTDOWN_INITIATED) in log_write_checkpoint_info() Solution: ========= (1) In order to avoid (1) failure, InnoDB can make the variable innodb_max_dirty_pages_pct_lwm, innodb_max_dirty_pages_pct to 0. Also make sure that InnoDB doesn't have any dirty pages in buffer pool by adding wait_condition. (2) Avoid changing the srv_shutdown_state to SRV_SHUTDOWN_EXIT_THREADS when the InnoDB startup aborts --- .../suite/innodb/r/doublewrite_debug.result | 2 +- mysql-test/suite/innodb/t/doublewrite_debug.test | 8 +++++++- storage/innobase/srv/srv0start.cc | 15 +++++++++++---- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/mysql-test/suite/innodb/r/doublewrite_debug.result b/mysql-test/suite/innodb/r/doublewrite_debug.result index a743217f34e..e1d2b0137e1 100644 --- a/mysql-test/suite/innodb/r/doublewrite_debug.result +++ b/mysql-test/suite/innodb/r/doublewrite_debug.result @@ -26,13 +26,13 @@ SET GLOBAL innodb_fast_shutdown = 0; # restart: --debug_dbug=+d,ib_log_checkpoint_avoid_hard --innodb_flush_sync=0 begin; insert into t1 values (6, repeat('%', 400)); +SET GLOBAL innodb_max_dirty_pages_pct_lwm=0, innodb_max_dirty_pages_pct=0; # Make the first page dirty for system tablespace set global innodb_saved_page_number_debug = 0; set global innodb_fil_make_page_dirty_debug = 0; # Make the second page dirty for system tablespace set global innodb_saved_page_number_debug = 1; set global innodb_fil_make_page_dirty_debug = 0; -set global innodb_buf_flush_list_now = 1; # Kill the server # Make the 1st page (page_no=0) and 2nd page (page_no=1) # of the system tablespace all zeroes. diff --git a/mysql-test/suite/innodb/t/doublewrite_debug.test b/mysql-test/suite/innodb/t/doublewrite_debug.test index b207823e3d1..e31cf34dbc1 100644 --- a/mysql-test/suite/innodb/t/doublewrite_debug.test +++ b/mysql-test/suite/innodb/t/doublewrite_debug.test @@ -51,6 +51,8 @@ let $restart_parameters=--debug_dbug=+d,ib_log_checkpoint_avoid_hard --innodb_fl begin; insert into t1 values (6, repeat('%', 400)); +SET GLOBAL innodb_max_dirty_pages_pct_lwm=0, innodb_max_dirty_pages_pct=0; + --echo # Make the first page dirty for system tablespace set global innodb_saved_page_number_debug = 0; set global innodb_fil_make_page_dirty_debug = 0; @@ -59,7 +61,11 @@ set global innodb_fil_make_page_dirty_debug = 0; set global innodb_saved_page_number_debug = 1; set global innodb_fil_make_page_dirty_debug = 0; -set global innodb_buf_flush_list_now = 1; +let $wait_condition = +SELECT variable_value = 0 +FROM information_schema.global_status +WHERE variable_name = 'INNODB_BUFFER_POOL_PAGES_DIRTY'; +--source include/wait_condition.inc --let CLEANUP_IF_CHECKPOINT=drop table t1, unexpected_checkpoint; --source ../include/no_checkpoint_end.inc diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index ff1a363565a..322f3bfca51 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -843,12 +843,19 @@ srv_open_tmp_tablespace(bool create_new_db) return(err); } -/** Shutdown background threads, except the page cleaner. */ -static void srv_shutdown_threads() +/** Shutdown background threads, except the page cleaner. +@param init_abort set to true when InnoDB startup aborted */ +static void srv_shutdown_threads(bool init_abort= false) { ut_ad(!srv_undo_sources); srv_master_timer.reset(); - srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS; + /* In case of InnoDB start up aborted, Don't change + the srv_shutdown_state. Because innodb_shutdown() + does call innodb_preshutdown() which changes the + srv_shutdown_state back to SRV_SHUTDOWN_INITIATED */ + if (!init_abort) { + srv_shutdown_state = SRV_SHUTDOWN_EXIT_THREADS; + } if (purge_sys.enabled()) { srv_purge_shutdown(); @@ -918,7 +925,7 @@ srv_init_abort_low( } srv_shutdown_bg_undo_sources(); - srv_shutdown_threads(); + srv_shutdown_threads(true); return(err); } From 3c67d73aad76fc47200de314ec659af67278bd8e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 26 Jun 2025 10:05:36 +0300 Subject: [PATCH 05/61] MDEV-36482: Make libaio work WITH_MSAN=ON As noted in commit c36834c8324974f26770d64192898f4f45d9f772 the MemorySanitizer instrumented builds so far only work with the synchronous I/O interface (innodb_use_native_aio=OFF). It is not that hard to make WITH_MSAN=ON work with the Linux libaio, without even instrumenting that library itself. aio_linux::getevent_thread_routine(): Declare the buffer that is returned by my_getevents() as initialized. Declare the data returned by a successful aio_opcode::AIO_PREAD as initialized. Reviewed by: Daniel Black --- tpool/aio_linux.cc | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tpool/aio_linux.cc b/tpool/aio_linux.cc index 0955a6dded4..993d06f11bb 100644 --- a/tpool/aio_linux.cc +++ b/tpool/aio_linux.cc @@ -15,6 +15,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111 - 1301 USA*/ #include "tpool_structs.h" #include "tpool.h" +#include "my_valgrind.h" # include # include @@ -117,6 +118,9 @@ class aio_linux final : public aio abort(); goto end; } +#if __has_feature(memory_sanitizer) + MEM_MAKE_DEFINED(events, ret * sizeof *events); +#endif for (int i= 0; i < ret; i++) { const io_event &event= events[i]; @@ -128,6 +132,10 @@ class aio_linux final : public aio } else { +#if __has_feature(memory_sanitizer) + if (iocb->m_opcode == aio_opcode::AIO_PREAD) + MEM_MAKE_DEFINED(iocb->m_buffer, event.res); +#endif iocb->m_ret_len= event.res; iocb->m_err= 0; finish_synchronous(iocb); From c3578720e6b5ef82482a8b4ed57db54c95e34633 Mon Sep 17 00:00:00 2001 From: Monty Date: Sat, 28 Jun 2025 14:32:31 +0300 Subject: [PATCH 06/61] Removed safemalloc warnings from myisamchk --version --- storage/myisam/myisamchk.c | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c index 3e312269b7f..1c52f89021e 100644 --- a/storage/myisam/myisamchk.c +++ b/storage/myisam/myisamchk.c @@ -129,6 +129,17 @@ int main(int argc, char **argv) #endif } /* main */ + +/* Free memory and exit */ + +void __attribute__ ((noreturn)) my_exit(int exit_state) +{ + free_defaults(default_argv); + my_end(MY_CHECK_ERROR); + exit(exit_state); +} + + enum options_mc { OPT_CHARSETS_DIR=256, OPT_SET_COLLATION,OPT_START_CHECK_POS, OPT_CORRECT_CHECKSUM, OPT_CREATE_MISSING_KEYS, OPT_KEY_BUFFER_SIZE, @@ -660,7 +671,7 @@ get_one_option(const struct my_option *opt, fprintf(stderr, "The value of the sort key is bigger than max key: %d.\n", MI_MAX_KEY); - exit(1); + my_exit(1); } } break; @@ -694,7 +705,9 @@ get_one_option(const struct my_option *opt, break; case 'V': print_version(); - exit(0); + free_defaults(default_argv); + my_end(MY_CHECK_ERROR); + my_exit(0); case OPT_CORRECT_CHECKSUM: if (argument == disabled_my_option) check_param.testflag&= ~T_CALC_CHECKSUM; @@ -711,7 +724,7 @@ get_one_option(const struct my_option *opt, FIND_TYPE_BASIC)) <= 0) { fprintf(stderr, "Invalid value of stats_method: %s.\n", argument); - exit(1); + my_exit(1); } switch (method-1) { case 0: @@ -735,10 +748,10 @@ get_one_option(const struct my_option *opt, #endif case 'H': my_print_help(my_long_options); - exit(0); + my_exit(0); case '?': usage(); - exit(0); + my_exit(0); } return 0; } @@ -754,7 +767,7 @@ static void get_options(register int *argc,register char ***argv) check_param.testflag|=T_WRITE_LOOP; if ((ho_error=handle_options(argc, argv, my_long_options, get_one_option))) - exit(ho_error); + my_exit(ho_error); /* If using repair, then update checksum if one uses --update-state */ if ((check_param.testflag & T_UPDATE_STATE) && @@ -764,7 +777,7 @@ static void get_options(register int *argc,register char ***argv) if (*argc == 0) { usage(); - exit(-1); + my_exit(-1); } if ((check_param.testflag & T_UNPACK) && @@ -773,7 +786,7 @@ static void get_options(register int *argc,register char ***argv) (void) fprintf(stderr, "%s: --unpack can't be used with --quick or --sort-records\n", my_progname_short); - exit(1); + my_exit(1); } if ((check_param.testflag & T_READONLY) && (check_param.testflag & @@ -783,11 +796,11 @@ static void get_options(register int *argc,register char ***argv) (void) fprintf(stderr, "%s: Can't use --readonly when repairing or sorting\n", my_progname_short); - exit(1); + my_exit(1); } if (init_tmpdir(&myisamchk_tmpdir, opt_tmpdir)) - exit(1); + my_exit(1); check_param.tmpdir=&myisamchk_tmpdir; check_param.key_cache_block_size= opt_key_cache_block_size; @@ -795,7 +808,7 @@ static void get_options(register int *argc,register char ***argv) if (set_collation_name) if (!(set_collation= get_charset_by_name(set_collation_name, MYF(MY_UTF8_IS_UTF8MB3 | MY_WME)))) - exit(1); + my_exit(1); myisam_block_size=(uint) 1 << my_bit_log2_uint64(opt_myisam_block_size); return; From f41acb555d246aa3266604573a765aa0f161ec34 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Mon, 23 Jun 2025 08:56:00 +0300 Subject: [PATCH 07/61] MDEV-35523 : Server crashes with "WSREP: Unknown writeset version: -1" Cluster configuration was incorrect e.g. wsrep_node_address was missing. Therefore, Galera replication was not properly initialized and TOI is not supported. Fix is to check when user tries to start Galera replication with wsrep_on=ON that Galera replication is properly initialized and node is ready to receive operations. If Galera replication is not properly initialized return a error. Signed-off-by: Julius Goryavsky --- mysql-test/suite/wsrep/r/MDEV-20625.result | 1 + mysql-test/suite/wsrep/r/wsrep_off.result | 6 ++++++ mysql-test/suite/wsrep/t/MDEV-20625.test | 1 + mysql-test/suite/wsrep/t/wsrep_off.cnf | 17 +++++++++++++++++ mysql-test/suite/wsrep/t/wsrep_off.test | 8 ++++++++ sql/wsrep_mysqld.cc | 10 +++++++++- sql/wsrep_var.cc | 9 +++++++++ 7 files changed, 51 insertions(+), 1 deletion(-) create mode 100644 mysql-test/suite/wsrep/r/wsrep_off.result create mode 100644 mysql-test/suite/wsrep/t/wsrep_off.cnf create mode 100644 mysql-test/suite/wsrep/t/wsrep_off.test diff --git a/mysql-test/suite/wsrep/r/MDEV-20625.result b/mysql-test/suite/wsrep/r/MDEV-20625.result index 3e2b621c8f9..d5e9df07374 100644 --- a/mysql-test/suite/wsrep/r/MDEV-20625.result +++ b/mysql-test/suite/wsrep/r/MDEV-20625.result @@ -1,4 +1,5 @@ SET GLOBAL wsrep_on=ON; +ERROR HY000: Galera replication not supported SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size'; Variable_name Value wsrep_cluster_size 0 diff --git a/mysql-test/suite/wsrep/r/wsrep_off.result b/mysql-test/suite/wsrep/r/wsrep_off.result new file mode 100644 index 00000000000..95cd804e35c --- /dev/null +++ b/mysql-test/suite/wsrep/r/wsrep_off.result @@ -0,0 +1,6 @@ +SET GLOBAL wsrep_on=ON; +ERROR HY000: Galera replication not supported +REPAIR TABLE performance_schema.setup_objects; +Table Op Msg_type Msg_text +performance_schema.setup_objects repair note The storage engine for the table doesn't support repair +SET GLOBAL wsrep_on=OFF; diff --git a/mysql-test/suite/wsrep/t/MDEV-20625.test b/mysql-test/suite/wsrep/t/MDEV-20625.test index 2a537fe432e..7dcb622fde0 100644 --- a/mysql-test/suite/wsrep/t/MDEV-20625.test +++ b/mysql-test/suite/wsrep/t/MDEV-20625.test @@ -5,6 +5,7 @@ --source include/have_wsrep_provider.inc --source include/have_binlog_format_row.inc +--error ER_GALERA_REPLICATION_NOT_SUPPORTED SET GLOBAL wsrep_on=ON; SHOW GLOBAL STATUS LIKE 'wsrep_cluster_size'; SET GLOBAL wsrep_on=OFF; diff --git a/mysql-test/suite/wsrep/t/wsrep_off.cnf b/mysql-test/suite/wsrep/t/wsrep_off.cnf new file mode 100644 index 00000000000..77eae0c4acd --- /dev/null +++ b/mysql-test/suite/wsrep/t/wsrep_off.cnf @@ -0,0 +1,17 @@ +# Use default setting for mysqld processes +!include include/default_mysqld.cnf + +[mysqld] +wsrep-on=OFF +wsrep-provider=@ENV.WSREP_PROVIDER +log-bin +binlog-format=row +loose-wsrep_cluster_address=gcomm:// +loose-wsrep_node_address='127.0.0.1:@mysqld.1.#galera_port' +loose-wsrep-incoming-address=127.0.0.1:@mysqld.1.port + +[mysqld.1] +wsrep-on=OFF +#galera_port=@OPT.port +#ist_port=@OPT.port +#sst_port=@OPT.port diff --git a/mysql-test/suite/wsrep/t/wsrep_off.test b/mysql-test/suite/wsrep/t/wsrep_off.test new file mode 100644 index 00000000000..27e64c92e93 --- /dev/null +++ b/mysql-test/suite/wsrep/t/wsrep_off.test @@ -0,0 +1,8 @@ +--source include/have_innodb.inc +--source include/have_wsrep_provider.inc +--source include/have_binlog_format_row.inc + +--error ER_GALERA_REPLICATION_NOT_SUPPORTED +SET GLOBAL wsrep_on=ON; +REPAIR TABLE performance_schema.setup_objects; +SET GLOBAL wsrep_on=OFF; diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 010cab56614..4ba3364267e 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -2780,9 +2780,17 @@ static int wsrep_TOI_begin(THD *thd, const char *db, const char *table, WSREP_DEBUG("TOI Begin: %s", wsrep_thd_query(thd)); DEBUG_SYNC(thd, "wsrep_before_toi_begin"); - if (wsrep_can_run_in_toi(thd, db, table, table_list, create_info) == false) + if (!wsrep_ready || + wsrep_can_run_in_toi(thd, db, table, table_list, create_info) == false) { WSREP_DEBUG("No TOI for %s", wsrep_thd_query(thd)); + if (!wsrep_ready) + { + my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0)); + push_warning_printf(thd, Sql_state_errno_level::WARN_LEVEL_WARN, + ER_GALERA_REPLICATION_NOT_SUPPORTED, + "Galera cluster is not ready to execute replication"); + } return 1; } diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index 3d381afa704..152563a061d 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -27,6 +27,7 @@ #include #include "wsrep_trans_observer.h" #include "wsrep_server_state.h" +#include "wsrep_mysqld.h" ulong wsrep_reject_queries; @@ -123,6 +124,14 @@ bool wsrep_on_update (sys_var *self, THD* thd, enum_var_type var_type) saved_wsrep_on= false; } + if (!wsrep_ready_get()) + { + my_error(ER_GALERA_REPLICATION_NOT_SUPPORTED, MYF(0)); + WSREP_INFO("Failed to start Galera replication. Please check your " + "configuration."); + saved_wsrep_on= false; + } + free(tmp); mysql_mutex_lock(&LOCK_global_system_variables); } From fd1266a9803325dafdebbff641c128da9dacc8b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Wed, 11 Jun 2025 15:57:42 +0300 Subject: [PATCH 08/61] MDEV-34761 : Assertion `client_state_.mode() == wsrep::client_state::m_local' failed in int wsrep::transaction::after_statement(wsrep::unique_lock&) @@enforce_storage_engine is local setting and there is no knowledge how other nodes are configured. Statement CREATE TABLE xxx ENGINE=yyy is replicated as it is and if required engine != enforced engine it could lead inconsistent used storage engine in the cluster. Fix is to return error and a warning if required engine is not same as enforced engine. Signed-off-by: Julius Goryavsky --- .../galera/r/enforce_storage_engine2.result | 12 ++------- mysql-test/suite/galera/r/galera_aria.result | 25 +++++++++++++++++++ .../galera/t/enforce_storage_engine2.test | 12 +++++++-- mysql-test/suite/galera/t/galera_aria.test | 19 ++++++++++++++ sql/sql_table.cc | 17 +++++++++++++ 5 files changed, 73 insertions(+), 12 deletions(-) create mode 100644 mysql-test/suite/galera/r/galera_aria.result create mode 100644 mysql-test/suite/galera/t/galera_aria.test diff --git a/mysql-test/suite/galera/r/enforce_storage_engine2.result b/mysql-test/suite/galera/r/enforce_storage_engine2.result index b652eacd4f4..9239a4fdb93 100644 --- a/mysql-test/suite/galera/r/enforce_storage_engine2.result +++ b/mysql-test/suite/galera/r/enforce_storage_engine2.result @@ -7,23 +7,15 @@ connection node_1; connection node_1; CREATE TABLE t1(i INT) ENGINE=INNODB; CREATE TABLE t2(i INT) ENGINE=MYISAM; -Warnings: -Note 1266 Using storage engine InnoDB for table 't2' -Note 1266 Using storage engine InnoDB for table 't2' +ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement connection node_2; SHOW TABLES; Tables_in_test t1 -t2 SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `i` int(11) DEFAULT NULL ) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci -SHOW CREATE TABLE t2; -Table Create Table -t2 CREATE TABLE `t2` ( - `i` int(11) DEFAULT NULL -) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci -DROP TABLE t1, t2; +DROP TABLE t1; # End of tests diff --git a/mysql-test/suite/galera/r/galera_aria.result b/mysql-test/suite/galera/r/galera_aria.result new file mode 100644 index 00000000000..435a0525a0f --- /dev/null +++ b/mysql-test/suite/galera/r/galera_aria.result @@ -0,0 +1,25 @@ +connection node_2; +connection node_1; +set session sql_mode=''; +SET @@enforce_storage_engine=INNODB; +CREATE TABLE t1 (c INT ) ENGINE=ARIA; +ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +SHOW WARNINGS; +Level Code Message +Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set +Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set +CREATE TABLE t1 (c INT ); +DROP TABLE t1; +CREATE TABLE t1 (c INT ) ENGINE=INNODB; +DROP TABLE t1; +SET @@enforce_storage_engine=ARIA; +CREATE TABLE t1 (c INT ) ENGINE=INNODB; +ERROR HY000: The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +SHOW WARNINGS; +Level Code Message +Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set +Error 1290 The MariaDB server is running with the ENFORCE_STORAGE_ENGINE option so it cannot execute this statement +Note 1290 Do not use ENGINE=x when @@enforce_storage_engine is set diff --git a/mysql-test/suite/galera/t/enforce_storage_engine2.test b/mysql-test/suite/galera/t/enforce_storage_engine2.test index 7a822bced59..dd52ea9e239 100644 --- a/mysql-test/suite/galera/t/enforce_storage_engine2.test +++ b/mysql-test/suite/galera/t/enforce_storage_engine2.test @@ -1,5 +1,6 @@ --source include/galera_cluster.inc --source include/have_innodb.inc +--source include/have_aria.inc --echo # --echo # MDEV-9312: storage engine not enforced during galera cluster @@ -7,14 +8,21 @@ --echo # --connection node_1 CREATE TABLE t1(i INT) ENGINE=INNODB; +# +# This is not anymore supported because enforce_storage_engine +# is local setting and final used storage engine +# on other members of cluster depend on their configuration. +# Currently, there is no way to query remote node +# configuration. +# +--error ER_OPTION_PREVENTS_STATEMENT CREATE TABLE t2(i INT) ENGINE=MYISAM; --connection node_2 SHOW TABLES; SHOW CREATE TABLE t1; -SHOW CREATE TABLE t2; # Cleanup -DROP TABLE t1, t2; +DROP TABLE t1; --echo # End of tests diff --git a/mysql-test/suite/galera/t/galera_aria.test b/mysql-test/suite/galera/t/galera_aria.test new file mode 100644 index 00000000000..24dd2e5048b --- /dev/null +++ b/mysql-test/suite/galera/t/galera_aria.test @@ -0,0 +1,19 @@ +--source include/galera_cluster.inc +--source include/have_aria.inc +--source include/log_bin.inc + +set session sql_mode=''; +SET @@enforce_storage_engine=INNODB; +--error ER_OPTION_PREVENTS_STATEMENT +CREATE TABLE t1 (c INT ) ENGINE=ARIA; +SHOW WARNINGS; + +CREATE TABLE t1 (c INT ); +DROP TABLE t1; +CREATE TABLE t1 (c INT ) ENGINE=INNODB; +DROP TABLE t1; + +SET @@enforce_storage_engine=ARIA; +--error ER_OPTION_PREVENTS_STATEMENT +CREATE TABLE t1 (c INT ) ENGINE=INNODB; +SHOW WARNINGS; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 69ddea8e5e9..4014f4101dd 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -12120,6 +12120,23 @@ bool check_engine(THD *thd, const char *db_name, my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "NO_ENGINE_SUBSTITUTION"); DBUG_RETURN(TRUE); } +#ifdef WITH_WSREP + /* @@enforce_storage_engine is local, if user has used + ENGINE=XXX we can't allow it in cluster in this + case as enf_engine != new _engine. This is because + original stmt is replicated including ENGINE=XXX and + here */ + if ((create_info->used_fields & HA_CREATE_USED_ENGINE) && + WSREP(thd)) + { + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "ENFORCE_STORAGE_ENGINE"); + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, + ER_OPTION_PREVENTS_STATEMENT, + "Do not use ENGINE=x when @@enforce_storage_engine is set"); + + DBUG_RETURN(TRUE); + } +#endif *new_engine= enf_engine; } From f49546068914187c5582a51a0eeedce3ccf849af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Mon, 9 Jun 2025 11:00:27 +0300 Subject: [PATCH 09/61] MDEV-36968 : galera_3nodes.inconsistency_shutdown test occasionally hangs Test changes only. Remove unnecessary wsrep_on=[ON|OFF] and use sync wait instead. Signed-off-by: Julius Goryavsky --- .../r/inconsistency_shutdown.result | 27 +++++++----- .../t/inconsistency_shutdown.test | 41 ++++++++++--------- 2 files changed, 37 insertions(+), 31 deletions(-) diff --git a/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result b/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result index cd4087e01ca..bbcad5ee4db 100644 --- a/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result +++ b/mysql-test/suite/galera_3nodes/r/inconsistency_shutdown.result @@ -32,8 +32,8 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 6; UPDATE t1 SET f2 = 1 WHERE f1 = 7; UPDATE t1 SET f2 = 1 WHERE f1 = 8; connection node_2; -SET wsrep_on=OFF; -SET wsrep_on=ON; +# make sure all events landed to slave queue +set wsrep_sync_wait=0; UNLOCK TABLES; SET SESSION wsrep_on = ON; SET SESSION wsrep_sync_wait = 15; @@ -56,7 +56,8 @@ f1 f2 7 1 8 1 connection node_2; -SET GLOBAL wsrep_on=OFF; +# Gracefully restart the node +set wsrep_on=OFF; # restart DROP TABLE t1; connection node_1; @@ -73,11 +74,15 @@ INSERT INTO t1 VALUES (8, 0); COMMIT; CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INT); connection node_2; +# Allow 1K slave queue without flow control SET GLOBAL wsrep_provider_options='gcs.fc_limit=1K'; +# Introduce inconsistency SET wsrep_on=OFF; DROP TABLE t2; SET wsrep_on=ON; +# set up sync point to ensure DROP TABLE replication order below SET GLOBAL wsrep_provider_options = 'dbug=d,after_replicate_sync'; +# Build up slave queue: LOCK TABLES t1 READ; connection node_1; UPDATE t1 SET f2 = 1 WHERE f1 = 1; @@ -86,18 +91,19 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 3; UPDATE t1 SET f2 = 1 WHERE f1 = 4; UPDATE t1 SET f2 = 2 WHERE f1 = 4; /* dependent applier */; +# interleave a failing statement connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; connection node_2a; DROP TABLE t2;; +# make sure DROP TABLE from above has replicated connection node_2; -SET wsrep_on=OFF; +set wsrep_sync_wait=0; "Wait for DROP TABLE to replicate" SET SESSION wsrep_on = 0; -SET SESSION wsrep_on = 0; +SET SESSION wsrep_on = 1; SET GLOBAL wsrep_provider_options = 'signal=after_replicate_sync'; SET GLOBAL wsrep_provider_options = 'dbug='; "DROP TABLE replicated" -SET wsrep_on=ON; connection node_1; UPDATE t1 SET f2 = 3 WHERE f1 = 4; /* dependent applier */ @@ -106,8 +112,7 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 6; UPDATE t1 SET f2 = 1 WHERE f1 = 7; UPDATE t1 SET f2 = 1 WHERE f1 = 8; connection node_2; -SET wsrep_on=OFF; -SET wsrep_on=ON; +# make sure all events landed to slave queue UNLOCK TABLES; connection node_2a; ERROR 42S02: Unknown table 'test.t2' @@ -128,11 +133,11 @@ f1 f2 7 1 8 1 connection node_2; -SET SESSION wsrep_on = ON; +set wsrep_on=OFF; SET SESSION wsrep_sync_wait = 15; -SET SESSION wsrep_on = ON; +# Wait for the node to shutdown replication SET SESSION wsrep_sync_wait = 15; -SET GLOBAL wsrep_on=OFF; +# Gracefully restart the node # restart DROP TABLE t1; CALL mtr.add_suppression("Can't find record in 't1'"); diff --git a/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test b/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test index 347433a6f14..dcd8a7b15ca 100644 --- a/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test +++ b/mysql-test/suite/galera_3nodes/t/inconsistency_shutdown.test @@ -33,6 +33,7 @@ SET wsrep_on=OFF; DELETE FROM t1 WHERE f1 = 2; DELETE FROM t1 WHERE f1 = 4; SET wsrep_on=ON; +--source include/galera_wait_ready.inc # Build up slave queue: # - first 8 events will be picked by slave threads @@ -51,11 +52,11 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 7; UPDATE t1 SET f2 = 1 WHERE f1 = 8; --connection node_2 -# make sure all events landed to slave queue -SET wsrep_on=OFF; +--echo # make sure all events landed to slave queue +set wsrep_sync_wait=0; --let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_recv_queue'; --source include/wait_condition.inc -SET wsrep_on=ON; + UNLOCK TABLES; --source include/wsrep_wait_disconnect.inc # Wait for the node to shutdown replication @@ -70,8 +71,8 @@ SHOW STATUS LIKE 'wsrep_cluster_size'; SELECT * FROM t1; --connection node_2 -#Gracefully restart the node -SET GLOBAL wsrep_on=OFF; +--echo # Gracefully restart the node +set wsrep_on=OFF; --source include/shutdown_mysqld.inc --source include/start_mysqld.inc --source include/galera_wait_ready.inc @@ -98,20 +99,21 @@ COMMIT; CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 INT); --connection node_2 -# Allow 1K slave queue without flow control +--echo # Allow 1K slave queue without flow control SET GLOBAL wsrep_provider_options='gcs.fc_limit=1K'; -# Introduce inconsistency -SET wsrep_on=OFF; --let $wait_condition = SELECT COUNT(*)=1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't2'; --source include/wait_condition.inc +--echo # Introduce inconsistency +SET wsrep_on=OFF; DROP TABLE t2; SET wsrep_on=ON; +--source include/galera_wait_ready.inc -# set up sync point to ensure DROP TABLE replication order below +--echo # set up sync point to ensure DROP TABLE replication order below --let galera_sync_point = after_replicate_sync --source include/galera_set_sync_point.inc -# Build up slave queue: +--echo # Build up slave queue: # - first 8 events will be picked by slave threads # - one more event will be waiting in slave queue LOCK TABLES t1 READ; @@ -123,20 +125,19 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 3; UPDATE t1 SET f2 = 1 WHERE f1 = 4; UPDATE t1 SET f2 = 2 WHERE f1 = 4; /* dependent applier */; -# interleave a failing statement +--echo # interleave a failing statement --connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 --connection node_2a --send DROP TABLE t2; -# make sure DROP TABLE from above has replicated +--echo # make sure DROP TABLE from above has replicated --connection node_2 -SET wsrep_on=OFF; +set wsrep_sync_wait=0; --echo "Wait for DROP TABLE to replicate" --source include/galera_wait_sync_point.inc --source include/galera_signal_sync_point.inc --source include/galera_clear_sync_point.inc --echo "DROP TABLE replicated" -SET wsrep_on=ON; --connection node_1 UPDATE t1 SET f2 = 3 WHERE f1 = 4; /* dependent applier */ @@ -146,11 +147,10 @@ UPDATE t1 SET f2 = 1 WHERE f1 = 7; UPDATE t1 SET f2 = 1 WHERE f1 = 8; --connection node_2 -# make sure all events landed to slave queue -SET wsrep_on=OFF; +--echo # make sure all events landed to slave queue --let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_recv_queue'; --source include/wait_condition.inc -SET wsrep_on=ON; + UNLOCK TABLES; --connection node_2a @@ -165,12 +165,13 @@ SHOW STATUS LIKE 'wsrep_cluster_size'; SELECT * FROM t1; --connection node_2 +set wsrep_on=OFF; --source include/wsrep_wait_disconnect.inc -# Wait for the node to shutdown replication +--echo # Wait for the node to shutdown replication --let $members=0 --source include/wsrep_wait_membership.inc -# Gracefully restart the node -SET GLOBAL wsrep_on=OFF; +--echo # Gracefully restart the node + --source include/shutdown_mysqld.inc --source include/start_mysqld.inc --source include/galera_wait_ready.inc From 56fbc0cdd79bbbcb84ef0d3a20ffaf72267186a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Fri, 6 Jun 2025 10:42:40 +0300 Subject: [PATCH 10/61] MDEV-36953 : mysql-wsrep#198 test hangs Test changes only. INSERT may fail for lock wait because in other node we have LOCK TABLE or it may succeed if next statement i.e. UNLOCK TABLES is fast enough. Signed-off-by: Julius Goryavsky --- .../suite/galera/r/mysql-wsrep#198.result | 9 +++++++-- .../suite/galera/t/mysql-wsrep#198.test | 19 ++++++++++++++++--- 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/mysql-test/suite/galera/r/mysql-wsrep#198.result b/mysql-test/suite/galera/r/mysql-wsrep#198.result index 7759c4f1982..95fb3e67fd1 100644 --- a/mysql-test/suite/galera/r/mysql-wsrep#198.result +++ b/mysql-test/suite/galera/r/mysql-wsrep#198.result @@ -7,14 +7,18 @@ SELECT 1 FROM DUAL; 1 1 LOCK TABLE t2 WRITE; +connect node_2_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_2; +connection node_2_ctrl; +SET SESSION wsrep_sync_wait=0; connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2; connection node_2a; OPTIMIZE TABLE t1,t2;; +connection node_2_ctrl; +SET SESSION wsrep_sync_wait = 0; connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2; connection node_2b; REPAIR TABLE t1,t2;; -connection node_2; -SET SESSION wsrep_sync_wait = 0; +connection node_2_ctrl; connection node_1; INSERT INTO t2 VALUES (1); connection node_2; @@ -34,3 +38,4 @@ DROP TABLE t2; connection node_1; disconnect node_2a; disconnect node_2b; +disconnect node_2_ctrl; diff --git a/mysql-test/suite/galera/t/mysql-wsrep#198.test b/mysql-test/suite/galera/t/mysql-wsrep#198.test index 98dea684f0d..78facd64356 100644 --- a/mysql-test/suite/galera/t/mysql-wsrep#198.test +++ b/mysql-test/suite/galera/t/mysql-wsrep#198.test @@ -10,21 +10,33 @@ SELECT 1 FROM DUAL; LOCK TABLE t2 WRITE; +--connect node_2_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_2 +--connection node_2_ctrl +SET SESSION wsrep_sync_wait=0; + --connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2 --connection node_2a --send OPTIMIZE TABLE t1,t2; +--connection node_2_ctrl +SET SESSION wsrep_sync_wait = 0; +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'acquiring total order isolation%'; +--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST +--source include/wait_condition_with_debug_and_kill.inc + --connect node_2b, 127.0.0.1, root, , test, $NODE_MYPORT_2 --connection node_2b --send REPAIR TABLE t1,t2; ---connection node_2 -SET SESSION wsrep_sync_wait = 0; ---let $wait_condition = SELECT COUNT(*) BETWEEN 1 AND 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'Waiting to execute in isolation%'; +--connection node_2_ctrl +--let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Waiting for table metadata lock%' OR STATE LIKE 'acquiring total order isolation%'; --let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.PROCESSLIST --source include/wait_condition_with_debug_and_kill.inc --connection node_1 +# We have LOCK TABLE in node_2 so this could fail on lock wait +# or next statement is fast enought and succeed +--error 0,ER_LOCK_WAIT_TIMEOUT INSERT INTO t2 VALUES (1); --connection node_2 @@ -43,3 +55,4 @@ DROP TABLE t2; --disconnect node_2a --disconnect node_2b +--disconnect node_2_ctrl From 2d5dfc47a9303012b89e61cd739a9053c6cf9dff Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 30 Jun 2025 15:48:26 +0300 Subject: [PATCH 11/61] Define error message for HA_ERR_INCOMPATIBLE_DEFINITION --- sql/handler.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sql/handler.cc b/sql/handler.cc index 44a9cf8b173..bc96c35a306 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -503,6 +503,8 @@ int ha_init_errors(void) SETMSG(HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE, "Too many words in a FTS phrase or proximity search"); SETMSG(HA_ERR_FK_DEPTH_EXCEEDED, "Foreign key cascade delete/update exceeds"); SETMSG(HA_ERR_TABLESPACE_MISSING, ER_DEFAULT(ER_TABLESPACE_MISSING)); + SETMSG(HA_ERR_INCOMPATIBLE_DEFINITION, + "Mismatch between table definitions in sql and storage layer"); /* Register the error messages for use with my_error(). */ return my_error_register(get_handler_errmsgs, HA_ERR_FIRST, HA_ERR_LAST); From 0dd6566ee47ac75cdedb91b1da46273f410ce823 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Wed, 2 Jul 2025 14:25:38 +0530 Subject: [PATCH 12/61] MDEV-37123 dict_table_open_on_id() fails to release dict_sys.latch While updating the persistent defragmentation statistics for the table, InnoDB opens the table only if it is in cache. If dict_table_open_on_id() fails to find the table in cache then it fails to unfreeze dict_sys.latch. This lead to crash --- mysql-test/suite/innodb/r/innodb_defrag_stats.result | 8 ++++++++ mysql-test/suite/innodb/t/innodb_defrag_stats.test | 9 +++++++++ storage/innobase/dict/dict0dict.cc | 2 ++ 3 files changed, 19 insertions(+) diff --git a/mysql-test/suite/innodb/r/innodb_defrag_stats.result b/mysql-test/suite/innodb/r/innodb_defrag_stats.result index c6fd7006f9e..934405c33cd 100644 --- a/mysql-test/suite/innodb/r/innodb_defrag_stats.result +++ b/mysql-test/suite/innodb/r/innodb_defrag_stats.result @@ -131,3 +131,11 @@ ALTER TABLE t2 STATS_PERSISTENT=1; DROP TABLE t2; SELECT * FROM mysql.innodb_index_stats; database_name table_name index_name last_update stat_name stat_value sample_size stat_description +# +# MDEV-37123 dict_table_open_on_id() fails to release dict_sys.latch +# +SET GLOBAL innodb_defragment_stats_accuracy=1; +CREATE TABLE t (f INT,f2 CHAR(1),KEY k1 (f2),FULLTEXT KEY(f2), +FOREIGN KEY(f2) REFERENCES t (f3)) ENGINE=InnoDB; +ERROR HY000: Can't create table `test`.`t` (errno: 150 "Foreign key constraint is incorrectly formed") +SET GLOBAL innodb_defragment_stats_accuracy=default; diff --git a/mysql-test/suite/innodb/t/innodb_defrag_stats.test b/mysql-test/suite/innodb/t/innodb_defrag_stats.test index 3730eb657af..56477e8f17e 100644 --- a/mysql-test/suite/innodb/t/innodb_defrag_stats.test +++ b/mysql-test/suite/innodb/t/innodb_defrag_stats.test @@ -86,3 +86,12 @@ ALTER TABLE t2 STATS_PERSISTENT=1; DROP TABLE t2; SELECT * FROM mysql.innodb_index_stats; + +--echo # +--echo # MDEV-37123 dict_table_open_on_id() fails to release dict_sys.latch +--echo # +SET GLOBAL innodb_defragment_stats_accuracy=1; +--error ER_CANT_CREATE_TABLE +CREATE TABLE t (f INT,f2 CHAR(1),KEY k1 (f2),FULLTEXT KEY(f2), + FOREIGN KEY(f2) REFERENCES t (f3)) ENGINE=InnoDB; +SET GLOBAL innodb_defragment_stats_accuracy=default; diff --git a/storage/innobase/dict/dict0dict.cc b/storage/innobase/dict/dict0dict.cc index ef4c24f6fb5..bced539a0eb 100644 --- a/storage/innobase/dict/dict0dict.cc +++ b/storage/innobase/dict/dict0dict.cc @@ -927,6 +927,8 @@ retry: else if (table) table->acquire(); } + else if (!dict_locked) + dict_sys.unfreeze(); return table; } From 9059385262b02ce77d1841d99ae168511a891ca3 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Fri, 20 Jun 2025 10:41:50 +1000 Subject: [PATCH 13/61] MDEV-37048 revert MSAN my_vsnprintf_ex for double workaround 5 years ago e843033d0233927b8f51d7dbe21993bdfb01ecdf worked around a MSAN bug when retrieving a double from a va_list. The construct {{__msan_check_mem_is_initialized(ap,size)}} where ap is a va_list is undefined as {{__msan_check_mem_is_initialized}} expects a pointer. The implementation detail of va_list is architecture dependant and on aarch64 this isn't a pointer. The need to any msan action is no unnecessary since this has been corrected in recent clang versions. As such the additions from e843033d0233927b8f51d7dbe21993bdfb01ecdf have been reverted. Tested with clang-20.1 and the test cases from MDEV-22690 and MDEV-22691. --- strings/my_vsnprintf.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/strings/my_vsnprintf.c b/strings/my_vsnprintf.c index f7cbb507e35..a23e9904109 100644 --- a/strings/my_vsnprintf.c +++ b/strings/my_vsnprintf.c @@ -739,13 +739,7 @@ size_t my_vsnprintf_ex(CHARSET_INFO *cs, char *to, size_t n, else if (*fmt == 'f' || *fmt == 'g') { double d; -#if __has_feature(memory_sanitizer) /* QQ: MSAN has double trouble? */ - __msan_check_mem_is_initialized(ap, sizeof(double)); -#endif d= va_arg(ap, double); -#if __has_feature(memory_sanitizer) /* QQ: MSAN has double trouble? */ - __msan_unpoison(&d, sizeof(double)); -#endif to= process_dbl_arg(to, end, width, d, *fmt); continue; } From 27660ff2e935e04b11a310b4940c43e7c5dc6b69 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Tue, 1 Jul 2025 10:59:20 +0530 Subject: [PATCH 14/61] MDEV-37121 Change buffer freed pages are not removed during slow shutdown Problem: ======= - During slow shutdown, expectation is that change buffer index to merge all the change buffer pages and free all the change buffer pages except root. But InnoDB fails to free the freed change buffer pages from segment and returns it to system tablespace. Solution: ========= - During slow shutdown, remove all pages from free list of change buffer and frees it to system tablespace --- storage/innobase/ibuf/ibuf0ibuf.cc | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index 3e8f7888959..685c68033fe 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -1855,12 +1855,17 @@ corrupted: return true; } -/*********************************************************************//** -Removes a page from the free list and frees it to the fsp system. */ -static void ibuf_remove_free_page() +/** Removes a page from the free list and frees it to the fsp system. +@param all Free all freed page. This should be useful only during slow +shutdown +@return error code when InnoDB fails to free the page +@retval DB_SUCCESS_LOCKED_REC if all free pages are freed +@retval DB_SUCCESS if page is freed */ +static dberr_t ibuf_remove_free_page(bool all = false) { mtr_t mtr; page_t* header_page; + dberr_t err = DB_SUCCESS; log_free_check(); @@ -1876,17 +1881,17 @@ static void ibuf_remove_free_page() mysql_mutex_lock(&ibuf_pessimistic_insert_mutex); mysql_mutex_lock(&ibuf_mutex); - if (!header_page || !ibuf_data_too_much_free()) { + if (!header_page || (!all && !ibuf_data_too_much_free())) { early_exit: mysql_mutex_unlock(&ibuf_mutex); mysql_mutex_unlock(&ibuf_pessimistic_insert_mutex); - +exit: ibuf_mtr_commit(&mtr); - return; + return err; } - buf_block_t* root = ibuf_tree_root_get(&mtr); + buf_block_t* root = ibuf_tree_root_get(&mtr, &err); if (UNIV_UNLIKELY(!root)) { goto early_exit; @@ -1897,7 +1902,10 @@ early_exit: + PAGE_BTR_IBUF_FREE_LIST + root->page.frame).page; + /* If all the freed pages are removed during slow shutdown + then exit early with DB_SUCCESS_LOCKED_REC */ if (page_no >= fil_system.sys_space->free_limit) { + err = DB_SUCCESS_LOCKED_REC; goto early_exit; } @@ -1919,7 +1927,7 @@ early_exit: compile_time_assert(IBUF_SPACE_ID == 0); const page_id_t page_id{IBUF_SPACE_ID, page_no}; buf_block_t* bitmap_page = nullptr; - dberr_t err = fseg_free_page( + err = fseg_free_page( header_page + IBUF_HEADER + IBUF_TREE_SEG_HEADER, fil_system.sys_space, page_no, &mtr); @@ -1964,7 +1972,7 @@ func_exit: buf_page_free(fil_system.sys_space, page_no, &mtr); } - ibuf_mtr_commit(&mtr); + goto exit; } /***********************************************************************//** @@ -2433,7 +2441,9 @@ ATTRIBUTE_COLD ulint ibuf_contract() == page_id_t(IBUF_SPACE_ID, FSP_IBUF_TREE_ROOT_PAGE_NO)); ibuf_mtr_commit(&mtr); - + /* Remove all free page from free list and + frees it to system tablespace */ + while (ibuf_remove_free_page(true) == DB_SUCCESS); return(0); } From a293dfd92a8cf2bf3bf279b7e68b19e6e80d6e48 Mon Sep 17 00:00:00 2001 From: Kostadin Shishmanov Date: Sun, 1 Jun 2025 17:35:07 +0300 Subject: [PATCH 15/61] Fix building with gcc 16 (evex512 removal) Recently, evex512 was removed from gcc trunk [1] which will eventually become gcc 16, and that leads to a build failure in mariadb, originally reported downstream in a Gentoo bug [2]. This is reproducible across all versions from 10.6 to current master. The change is as simple as adding an upper boundary to which gcc versions can use evex512. [1] https://gcc.gnu.org/git/?p=gcc.git;a=commitdiff;h=c052a6f4 [2] https://bugs.gentoo.org/956632 Signed-off-by: Kostadin Shishmanov --- mysys/crc32/crc32c_x86.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysys/crc32/crc32c_x86.cc b/mysys/crc32/crc32c_x86.cc index fb5dc19f7a5..134596db2cc 100644 --- a/mysys/crc32/crc32c_x86.cc +++ b/mysys/crc32/crc32c_x86.cc @@ -25,7 +25,7 @@ #else # include # ifdef __APPLE__ /* AVX512 states are not enabled in XCR0 */ -# elif __GNUC__ >= 14 || (defined __clang_major__ && __clang_major__ >= 18) +# elif (__GNUC__ >= 14 && __GNUC__ < 16) || (defined __clang_major__ && __clang_major__ >= 18) # define TARGET "pclmul,evex512,avx512f,avx512dq,avx512bw,avx512vl,vpclmulqdq" # define USE_VPCLMULQDQ __attribute__((target(TARGET))) # elif __GNUC__ >= 11 || (defined __clang_major__ && __clang_major__ >= 9) From 31aa8b6939ee9326b4145a9cceae7e5a3711d7bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 7 Jul 2025 09:30:34 +0300 Subject: [PATCH 16/61] MDEV-37170 Enable AVX10.1 CRC-32 on GCC 16 The AVX512 accelerated CRC-32 computation that had been added in commit 9ec7819c585d139c8fe64d0f7f0f0f51dcafa01f was disabled in commit a293dfd92a8cf2bf3bf279b7e68b19e6e80d6e48 for GCC 16. Let us enable that logic by applying dr-m/crc32_simd@075bacb0cc7da8df18afa15a827ae4d11166fe32 which makes use of the avx10.1 target attribute that had been introduced in GCC 15. --- mysys/crc32/crc32c_x86.cc | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mysys/crc32/crc32c_x86.cc b/mysys/crc32/crc32c_x86.cc index 134596db2cc..a66093e54cc 100644 --- a/mysys/crc32/crc32c_x86.cc +++ b/mysys/crc32/crc32c_x86.cc @@ -25,7 +25,10 @@ #else # include # ifdef __APPLE__ /* AVX512 states are not enabled in XCR0 */ -# elif (__GNUC__ >= 14 && __GNUC__ < 16) || (defined __clang_major__ && __clang_major__ >= 18) +# elif __GNUC__ >= 15 +# define TARGET "pclmul,avx10.1,vpclmulqdq" +# define USE_VPCLMULQDQ __attribute__((target(TARGET))) +# elif __GNUC__ >= 14 || (defined __clang_major__ && __clang_major__ >= 18) # define TARGET "pclmul,evex512,avx512f,avx512dq,avx512bw,avx512vl,vpclmulqdq" # define USE_VPCLMULQDQ __attribute__((target(TARGET))) # elif __GNUC__ >= 11 || (defined __clang_major__ && __clang_major__ >= 9) From c4a2688328f886ce35bfa5ebc2be4e77cea8228c Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Sun, 25 May 2025 23:23:29 +0300 Subject: [PATCH 17/61] MDEV-24726 Assertion on compressed varstring as key field in optimizer temporary table Compressed field cannot be part of a key by its nature: there is no data to order, only the compressed data. For optimizer temporary table we create uncompressed substitute. In all other cases (MDEV-16808) we don't use key: add_keyuse() is skipped by !field->compression_method() condition. --- mysql-test/main/column_compression.result | 56 +++++++++++++++++++++++ mysql-test/main/column_compression.test | 53 +++++++++++++++++++++ sql/field.cc | 53 +++++++++++++++++++++ sql/field.h | 2 + sql/sql_select.cc | 8 +++- sql/table.h | 5 ++ 6 files changed, 176 insertions(+), 1 deletion(-) diff --git a/mysql-test/main/column_compression.result b/mysql-test/main/column_compression.result index 15976939f70..7f176290637 100644 --- a/mysql-test/main/column_compression.result +++ b/mysql-test/main/column_compression.result @@ -2978,4 +2978,60 @@ SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1; f nc,mmmmmmmmmmd DROP TABLE t1; +# +# MDEV-24726 Assertion `0' failed in Field_varstring_compressed::key_cmp +# +# VARCHAR +create table t1 (a varchar(8) compressed) character set utf8mb4; +create algorithm=temptable view v1 as select * from t1; +insert into t1 values ('foo'),('bar'),('foo'); +select * from v1 where a in (select a from t1); +a +foo +foo +bar +drop view v1; +drop table t1; +create table t1 (f1 varchar(8)) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t1 values (''); +create table t2 (f2 varchar(8) compressed) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t2 values ('a'),('b'); +select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1; +f1 + +drop table t1, t2; +# BLOB +create table t1 (a text compressed) character set utf8mb4; +create algorithm=temptable view v1 as select * from t1; +insert into t1 values ('foo'),('bar'),('foo'); +select * from v1 where a in (select a from t1); +a +foo +foo +bar +drop view v1; +drop table t1; +create table t1 (f1 text) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t1 values (''); +create table t2 (f2 text compressed) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t2 values ('a'),('b'); +select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1; +f1 + +drop table t1, t2; +# +# MDEV-16808 Assertion on compressed blob as key field +# +set join_cache_level= 3; +create table t1 (col_blob text) engine=innodb; +create table t2 (col_blob text compressed) engine=innodb; +select * from t1 join t2 using ( col_blob ); +col_blob +drop tables t1, t2; +create table t (a text compressed,b text) engine=innodb; +create table t4 like t; +set session join_cache_level=3; +select * from (select * from t) as t natural join (select * from t) as t1; +a b +drop tables t, t4; # End of 10.5 tests diff --git a/mysql-test/main/column_compression.test b/mysql-test/main/column_compression.test index f9b7cd31355..874f3c3580b 100644 --- a/mysql-test/main/column_compression.test +++ b/mysql-test/main/column_compression.test @@ -519,4 +519,57 @@ INSERT INTO t1 VALUES ('c','n'),('d','mmmmmmmmmm'); SELECT GROUP_CONCAT( b, a ORDER BY 2 ) AS f FROM t1; DROP TABLE t1; +--echo # +--echo # MDEV-24726 Assertion `0' failed in Field_varstring_compressed::key_cmp +--echo # + +--echo # VARCHAR +create table t1 (a varchar(8) compressed) character set utf8mb4; +create algorithm=temptable view v1 as select * from t1; +insert into t1 values ('foo'),('bar'),('foo'); +select * from v1 where a in (select a from t1); +# cleanup +drop view v1; +drop table t1; + +create table t1 (f1 varchar(8)) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t1 values (''); +create table t2 (f2 varchar(8) compressed) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t2 values ('a'),('b'); +select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1; +# cleanup +drop table t1, t2; + +--echo # BLOB +create table t1 (a text compressed) character set utf8mb4; +create algorithm=temptable view v1 as select * from t1; +insert into t1 values ('foo'),('bar'),('foo'); +select * from v1 where a in (select a from t1); +# cleanup +drop view v1; +drop table t1; + +create table t1 (f1 text) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t1 values (''); +create table t2 (f2 text compressed) charset=eucjpms collate=eucjpms_nopad_bin; +insert into t2 values ('a'),('b'); +select t1.* from t1 left join (select distinct f2 from t2) sq on sq.f2 = t1.f1; +# cleanup +drop table t1, t2; + +--echo # +--echo # MDEV-16808 Assertion on compressed blob as key field +--echo # +set join_cache_level= 3; +create table t1 (col_blob text) engine=innodb; +create table t2 (col_blob text compressed) engine=innodb; +select * from t1 join t2 using ( col_blob ); +drop tables t1, t2; + +create table t (a text compressed,b text) engine=innodb; +create table t4 like t; +set session join_cache_level=3; +select * from (select * from t) as t natural join (select * from t) as t1; +drop tables t, t4; + --echo # End of 10.5 tests diff --git a/sql/field.cc b/sql/field.cc index a1c4fd8f4cf..849bb7d33c7 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -8374,6 +8374,59 @@ Field *Field_varstring::make_new_field(MEM_ROOT *root, TABLE *new_table, } +Field *Field_varstring_compressed::make_new_field(MEM_ROOT *root, TABLE *new_table, + bool keep_type) +{ + Field_varstring *res; + if (new_table->s->is_optimizer_tmp_table()) + { + /* + Compressed field cannot be part of a key. For optimizer temporary + table we create uncompressed substitute. + */ + res= new (root) Field_varstring(ptr, field_length, length_bytes, null_ptr, + null_bit, Field::NONE, &field_name, + new_table->s, charset()); + if (res) + { + res->init_for_make_new_field(new_table, orig_table); + /* See Column_definition::create_length_to_internal_length_string() */ + res->field_length--; + } + } + else + res= (Field_varstring*) Field::make_new_field(root, new_table, keep_type); + if (res) + res->length_bytes= length_bytes; + return res; +} + +Field *Field_blob_compressed::make_new_field(MEM_ROOT *root, TABLE *new_table, + bool keep_type) +{ + Field_blob *res; + if (new_table->s->is_optimizer_tmp_table()) + { + /* + Compressed field cannot be part of a key. For optimizer temporary + table we create uncompressed substitute. + */ + res= new (root) Field_blob(ptr, null_ptr, null_bit, Field::NONE, &field_name, + new_table->s, packlength, charset()); + if (res) + { + res->init_for_make_new_field(new_table, orig_table); + /* See Column_definition::create_length_to_internal_length_string() */ + res->field_length--; + } + } + else + res= (Field_blob *) Field::make_new_field(root, new_table, keep_type); + return res; +} + + + Field *Field_varstring::new_key_field(MEM_ROOT *root, TABLE *new_table, uchar *new_ptr, uint32 length, uchar *new_null_ptr, uint new_null_bit) diff --git a/sql/field.h b/sql/field.h index 2224d1e4ab1..ce61788c653 100644 --- a/sql/field.h +++ b/sql/field.h @@ -4309,6 +4309,7 @@ private: { DBUG_ASSERT(0); return 0; } using Field_varstring::key_cmp; Binlog_type_info binlog_type_info() const override; + Field *make_new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type) override; }; @@ -4750,6 +4751,7 @@ private: override { DBUG_ASSERT(0); return 0; } Binlog_type_info binlog_type_info() const override; + Field *make_new_field(MEM_ROOT *root, TABLE *new_table, bool keep_type) override; }; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 7f1921de480..f7c0b4cc884 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -7082,7 +7082,13 @@ add_key_part(DYNAMIC_ARRAY *keyuse_array, KEY_FIELD *key_field) } } } - if (field->hash_join_is_possible() && + /* + Compressed field cannot be part of a key. For optimizer temporary table + compressed fields are replaced by uncompressed, see + is_optimizer_tmp_table() and Field_*_compressed::make_new_field(). + */ + if (!field->compression_method() && + field->hash_join_is_possible() && (key_field->optimize & KEY_OPTIMIZE_EQ) && key_field->val->used_tables()) { diff --git a/sql/table.h b/sql/table.h index b68f93f5e5c..ee26e2cd38b 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1092,6 +1092,11 @@ struct TABLE_SHARE return (tmp_table == SYSTEM_TMP_TABLE) ? 0 : table_map_id; } + bool is_optimizer_tmp_table() + { + return tmp_table == INTERNAL_TMP_TABLE && !db.length && table_name.length; + } + bool visit_subgraph(Wait_for_flush *waiting_ticket, MDL_wait_for_graph_visitor *gvisitor); From 4c8af2007d48930fea2609326db8b9de5dbd1c83 Mon Sep 17 00:00:00 2001 From: Kristian Nielsen Date: Thu, 19 Jun 2025 11:32:40 +0200 Subject: [PATCH 18/61] MDEV-36934: semi sync makes the master unresponsive when a replica is stopped Ensure that a pending semi-sync transaction can only be signalled on its THD while it is waiting in commit_trx(); not if the wait in commit_trx() is skipped for some reason. There was a bug that if no semi-sync slaves were connected and rpl_semi_sync_master_wait_no_slave was off, the THD pointer could be left in the list of pending transactions after THD was deleted, and an invalid THD/condition variable could be signalled, causing hang or other corruption. Testcase based on work by Brandon Nesterenko . Reviewed-by: Brandon Nesterenko Signed-off-by: Kristian Nielsen --- .../r/rpl_semi_sync_cond_var_per_thd.result | 63 ++++++++++- .../rpl/t/rpl_semi_sync_cond_var_per_thd.test | 104 +++++++++++++++++- sql/semisync_master.cc | 50 +++++++-- sql/semisync_master.h | 10 +- 4 files changed, 210 insertions(+), 17 deletions(-) diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result b/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result index 18ad5d3d2cc..96e7aa43fbf 100644 --- a/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_cond_var_per_thd.result @@ -7,6 +7,8 @@ call mtr.add_suppression("Could not read packet"); call mtr.add_suppression("Could not write packet"); set @save_bgc_count= @@global.binlog_commit_wait_count; set @save_bgc_usec= @@global.binlog_commit_wait_usec; +set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point; +set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave; set @save_debug_dbug= @@global.debug_dbug; set @@global.binlog_commit_wait_count=3; set @@global.binlog_commit_wait_usec=10000000; @@ -46,8 +48,6 @@ drop table t1, t2, t3; # the binlogging to semi-sync, and starting the wait for ACK; and during # this pause, semi-sync is manually switched off and on. connection master; -set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point; -set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave; set @@global.rpl_semi_sync_master_wait_point= AFTER_SYNC; set @@global.rpl_semi_sync_master_wait_no_slave= 1; set @@global.debug_dbug="+d,semisync_log_skip_trx_wait"; @@ -100,7 +100,66 @@ commit; # Cleanup connection master; drop table tn; +set @@global.debug_dbug=@save_debug_dbug; +# +# MDEV-36934 +# The server could indefinitely hang due to a memory leak which tried to +# pthread signal on a destroyed condition variable. In effect, no +# connections could commit transactions because there would be a thread +# stuck on a never-returning call to pthread_cond_signal() while +# holding Repl_semi_sync_master::LOCK_log. +connection master; +set @@global.rpl_semi_sync_master_wait_point= AFTER_COMMIT; +set @@global.rpl_semi_sync_master_wait_no_slave= 0; +# Ensure servers are in proper state +connection master; +connection slave; +# Test case initial set-up +connection master; +create table t_36934 (a int) engine=innodb; +include/save_master_gtid.inc +connection slave; +include/sync_with_master_gtid.inc +# Pause the user transaction before inserting into Active_tranx +connect user_con,localhost,root,,; +SET debug_sync= 'semisync_at_write_tranx_in_binlog SIGNAL at_write_tranx_in_binlog WAIT_FOR resume_write_tranx_in_binlog'; +insert into t_36934 values (1); +connection server_1; +set debug_sync="now wait_for at_write_tranx_in_binlog"; +# Disconnect the slave (note that the binlog dump thread won't yet be +# notified of a binlog update from the last transaction, so the slave +# should neither receiver nor ACK the transaction). +connection slave; +include/stop_slave.inc +# Waiting for master to realize the slave has disconnected.. +connection server_1; +# ..done +# Resuming transaction (it will exit commit_trx early without waiting) +set debug_sync="now signal resume_write_tranx_in_binlog"; +connection user_con; +disconnect user_con; +# Force delete the user thread (FLUSH THREADS ensures the thread won't +# stay in the thread cache) +connection master; +FLUSH THREADS; +# BUG: Re-connect slave. MDEV-36934 reports that the master would hang +# when the slave would re-connect and try to ACK the last transaction +# who's thread has been deleted +connection slave; +include/start_slave.inc +# Try to commit another transaction (prior to MDEV-36934 fixes, this +# would hang indefinitely) +connection master; +set debug_sync="RESET"; +insert into t_36934 values (2); +connection server_1; +# Waiting 30s for last query to complete.. +connection master; +# ..done +# Cleanup +connection master; set @@global.rpl_semi_sync_master_wait_point= @old_master_wait_point; set @@global.rpl_semi_sync_master_wait_no_slave= @old_master_wait_no_slave; set @@global.debug_dbug=@save_debug_dbug; +drop table t_36934; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test b/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test index 537a4bf7fa8..5d6702f8c42 100644 --- a/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_cond_var_per_thd.test @@ -34,6 +34,8 @@ call mtr.add_suppression("Could not read packet"); call mtr.add_suppression("Could not write packet"); set @save_bgc_count= @@global.binlog_commit_wait_count; set @save_bgc_usec= @@global.binlog_commit_wait_usec; +set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point; +set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave; set @save_debug_dbug= @@global.debug_dbug; set @@global.binlog_commit_wait_count=3; set @@global.binlog_commit_wait_usec=10000000; @@ -98,8 +100,6 @@ drop table t1, t2, t3; --connection master -set @old_master_wait_point= @@global.rpl_semi_sync_master_wait_point; -set @old_master_wait_no_slave= @@global.rpl_semi_sync_master_wait_no_slave; set @@global.rpl_semi_sync_master_wait_point= AFTER_SYNC; set @@global.rpl_semi_sync_master_wait_no_slave= 1; --eval set @@global.debug_dbug="+d,semisync_log_skip_trx_wait" @@ -190,8 +190,108 @@ commit; --echo # Cleanup --connection master drop table tn; +set @@global.debug_dbug=@save_debug_dbug; + + +--echo # +--echo # MDEV-36934 +--echo # The server could indefinitely hang due to a memory leak which tried to +--echo # pthread signal on a destroyed condition variable. In effect, no +--echo # connections could commit transactions because there would be a thread +--echo # stuck on a never-returning call to pthread_cond_signal() while +--echo # holding Repl_semi_sync_master::LOCK_log. + +--connection master +set @@global.rpl_semi_sync_master_wait_point= AFTER_COMMIT; +set @@global.rpl_semi_sync_master_wait_no_slave= 0; + +--echo # Ensure servers are in proper state +--connection master +let $status_var= rpl_semi_sync_master_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; +--connection slave +let $status_var= rpl_semi_sync_slave_status; +let $status_var_value= ON; +source include/wait_for_status_var.inc; + +--echo # Test case initial set-up +--connection master +create table t_36934 (a int) engine=innodb; +--source include/save_master_gtid.inc +--connection slave +--source include/sync_with_master_gtid.inc + +--echo # Pause the user transaction before inserting into Active_tranx +--connect(user_con,localhost,root,,) +SET debug_sync= 'semisync_at_write_tranx_in_binlog SIGNAL at_write_tranx_in_binlog WAIT_FOR resume_write_tranx_in_binlog'; +--send insert into t_36934 values (1) + +--connection server_1 +set debug_sync="now wait_for at_write_tranx_in_binlog"; + +--echo # Disconnect the slave (note that the binlog dump thread won't yet be +--echo # notified of a binlog update from the last transaction, so the slave +--echo # should neither receiver nor ACK the transaction). +--connection slave +--source include/stop_slave.inc + +--echo # Waiting for master to realize the slave has disconnected.. +--connection server_1 +let $status_var= rpl_semi_sync_master_clients; +let $status_var_value= 0; +source include/wait_for_status_var.inc; +--echo # ..done + +--echo # Resuming transaction (it will exit commit_trx early without waiting) +set debug_sync="now signal resume_write_tranx_in_binlog"; + +--connection user_con +--reap +--let $user_con_tid= `SELECT connection_id()` +--disconnect user_con +--source include/wait_until_disconnected.inc + +--echo # Force delete the user thread (FLUSH THREADS ensures the thread won't +--echo # stay in the thread cache) +--connection master +FLUSH THREADS; + +--echo # BUG: Re-connect slave. MDEV-36934 reports that the master would hang +--echo # when the slave would re-connect and try to ACK the last transaction +--echo # who's thread has been deleted +--connection slave +--source include/start_slave.inc + +--echo # Try to commit another transaction (prior to MDEV-36934 fixes, this +--echo # would hang indefinitely) +--connection master +set debug_sync="RESET"; +--send insert into t_36934 values (2) + +--connection server_1 +--echo # Waiting 30s for last query to complete.. +--let $wait_timeout= 30 +--let $wait_condition= SELECT count(*)=0 FROM information_schema.processlist WHERE info LIKE 'insert into t_36934%'; +--source include/wait_condition.inc + +# Variable `success` is set by wait_condition.inc +if (!$success) +{ + --echo # ..error + --die Query is hung +} + +--connection master +--reap +--echo # ..done + + +--echo # Cleanup +--connection master set @@global.rpl_semi_sync_master_wait_point= @old_master_wait_point; set @@global.rpl_semi_sync_master_wait_no_slave= @old_master_wait_no_slave; set @@global.debug_dbug=@save_debug_dbug; +drop table t_36934; --source include/rpl_end.inc diff --git a/sql/semisync_master.cc b/sql/semisync_master.cc index 43afb8f996e..854b6a61eeb 100644 --- a/sql/semisync_master.cc +++ b/sql/semisync_master.cc @@ -68,15 +68,20 @@ static ulonglong timespec_to_usec(const struct timespec *ts) return (ulonglong) ts->tv_sec * TIME_MILLION + ts->tv_nsec / TIME_THOUSAND; } -int signal_waiting_transaction(THD *waiting_thd, const char *binlog_file, - my_off_t binlog_pos) +static int +signal_waiting_transaction(THD *waiting_thd, bool thd_valid, + const char *binlog_file, my_off_t binlog_pos) { /* It is possible that the connection thd waiting for an ACK was killed. In such circumstance, the connection thread will nullify the thd member of its Active_tranx node. So before we try to signal, ensure the THD exists. + + The thd_valid is only set while the THD is waiting in commit_trx(); this + is defensive coding to not signal an invalid THD if we somewhere + accidentally did not remove the transaction from the list. */ - if (waiting_thd) + if (waiting_thd && thd_valid) mysql_cond_signal(&waiting_thd->COND_wakeup_ready); return 0; } @@ -182,6 +187,7 @@ int Active_tranx::insert_tranx_node(THD *thd_to_wait, ins_node->log_name[FN_REFLEN-1] = 0; /* make sure it ends properly */ ins_node->log_pos = log_file_pos; ins_node->thd= thd_to_wait; + ins_node->thd_valid= false; if (!m_trx_front) { @@ -263,7 +269,8 @@ void Active_tranx::clear_active_tranx_nodes( if ((log_file_name != NULL) && compare(new_front, log_file_name, log_file_pos) > 0) break; - pre_delete_hook(new_front->thd, new_front->log_name, new_front->log_pos); + pre_delete_hook(new_front->thd, new_front->thd_valid, + new_front->log_name, new_front->log_pos); new_front = new_front->next; } @@ -355,13 +362,17 @@ void Active_tranx::unlink_thd_as_waiter(const char *log_file_name, } if (entry) + { entry->thd= NULL; + entry->thd_valid= false; + } DBUG_VOID_RETURN; } -bool Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name, - my_off_t log_file_pos) +Tranx_node * +Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name, + my_off_t log_file_pos) { DBUG_ENTER("Active_tranx::assert_thd_is_waiter"); mysql_mutex_assert_owner(m_lock); @@ -377,7 +388,7 @@ bool Active_tranx::is_thd_waiter(THD *thd_to_check, const char *log_file_name, entry = entry->hash_next; } - DBUG_RETURN(static_cast(entry)); + DBUG_RETURN(entry); } /******************************************************************************* @@ -863,6 +874,10 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name, if (!rpl_semi_sync_master_clients && !rpl_semi_sync_master_wait_no_slave) { + lock(); + m_active_tranxs->unlink_thd_as_waiter(trx_wait_binlog_name, + trx_wait_binlog_pos); + unlock(); rpl_semi_sync_master_no_transactions++; DBUG_RETURN(0); } @@ -922,6 +937,9 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name, } } + Tranx_node *tranx_entry= + m_active_tranxs->is_thd_waiter(thd, trx_wait_binlog_name, + trx_wait_binlog_pos); /* In between the binlogging of this transaction and this wait, it is * possible that our entry in Active_tranx was removed (i.e. if * semi-sync was switched off and on). It is also possible that the @@ -932,8 +950,7 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name, * rpl_semi_sync_master_yes/no_tx consistent with it, we check for a * semi-sync restart _after_ checking the reply state. */ - if (unlikely(!m_active_tranxs->is_thd_waiter(thd, trx_wait_binlog_name, - trx_wait_binlog_pos))) + if (unlikely(!tranx_entry)) { DBUG_EXECUTE_IF( "semisync_log_skip_trx_wait", @@ -952,6 +969,16 @@ int Repl_semi_sync_master::commit_trx(const char *trx_wait_binlog_name, break; } + /* + Mark that our THD is now valid for signalling to by the ack thread. + It is important to ensure that we can never leave a no longer valid + THD in the transaction list and signal it, eg. MDEV-36934. This way, + we ensure the THD will only be signalled while this function is + running, even in case of some incorrect error handling or similar + that might leave a dangling THD in the list. + */ + tranx_entry->thd_valid= true; + /* Let us update the info about the minimum binlog position of waiting * threads. */ @@ -1284,6 +1311,8 @@ int Repl_semi_sync_master::write_tranx_in_binlog(THD *thd, DBUG_ENTER("Repl_semi_sync_master::write_tranx_in_binlog"); + DEBUG_SYNC(current_thd, "semisync_at_write_tranx_in_binlog"); + lock(); /* This is the real check inside the mutex. */ @@ -1317,7 +1346,8 @@ int Repl_semi_sync_master::write_tranx_in_binlog(THD *thd, m_commit_file_name_inited = true; } - if (is_on()) + if (is_on() && + (rpl_semi_sync_master_clients || rpl_semi_sync_master_wait_no_slave)) { DBUG_ASSERT(m_active_tranxs != NULL); if(m_active_tranxs->insert_tranx_node(thd, log_file_name, log_file_pos)) diff --git a/sql/semisync_master.h b/sql/semisync_master.h index a1c57959165..28de3ecf480 100644 --- a/sql/semisync_master.h +++ b/sql/semisync_master.h @@ -30,6 +30,7 @@ extern PSI_cond_key key_COND_binlog_send; struct Tranx_node { char log_name[FN_REFLEN]; + bool thd_valid; /* thd is valid for signalling */ my_off_t log_pos; THD *thd; /* The thread awaiting an ACK */ struct Tranx_node *next; /* the next node in the sorted list */ @@ -126,7 +127,9 @@ public: trx_node= &(current_block->nodes[++last_node]); trx_node->log_name[0] = '\0'; + trx_node->thd_valid= false; trx_node->log_pos= 0; + trx_node->thd= nullptr; trx_node->next= 0; trx_node->hash_next= 0; return trx_node; @@ -298,7 +301,8 @@ private: its invocation. See the context in which it is called to know. */ -typedef int (*active_tranx_action)(THD *trx_thd, const char *log_file_name, +typedef int (*active_tranx_action)(THD *trx_thd, bool thd_valid, + const char *log_file_name, my_off_t trx_log_file_pos); /** @@ -381,8 +385,8 @@ public: * matches the thread of the respective Tranx_node::thd of the passed in * log_file_name and log_file_pos. */ - bool is_thd_waiter(THD *thd_to_check, const char *log_file_name, - my_off_t log_file_pos); + Tranx_node * is_thd_waiter(THD *thd_to_check, const char *log_file_name, + my_off_t log_file_pos); /* Given a position, check to see whether the position is an active * transaction's ending position by probing the hash table. From 1c7685f5fc5693312a7244f7575584ef59f096a4 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 30 Jun 2025 10:35:48 +0200 Subject: [PATCH 19/61] bugfix: nextval() in default, and UPDATE SET x=DEFAULT set thd->lex->default_used accordingly --- mysql-test/suite/sql_sequence/default.result | 13 +++++++++++-- mysql-test/suite/sql_sequence/default.test | 15 +++++++++++++-- sql/item.cc | 1 + sql/item_func.cc | 3 ++- sql/sql_prepare.cc | 2 ++ sql/sql_yacc.yy | 2 ++ 6 files changed, 31 insertions(+), 5 deletions(-) diff --git a/mysql-test/suite/sql_sequence/default.result b/mysql-test/suite/sql_sequence/default.result index eecef1d3527..fb8fed00a9a 100644 --- a/mysql-test/suite/sql_sequence/default.result +++ b/mysql-test/suite/sql_sequence/default.result @@ -292,6 +292,15 @@ a b 10 j DROP TABLE t1; DROP SEQUENCE s1; -# # End of 10.3 tests -# +# in UPDATE +create sequence s1 cache 0; +create table t1 (id int unsigned default nextval(s1)); +insert t1 values (); +update t1 set id=default; +prepare stmt from "update t1 set id=?"; +execute stmt using default; +deallocate prepare stmt; +drop table t1; +drop sequence s1; +# End of 10.6 tests diff --git a/mysql-test/suite/sql_sequence/default.test b/mysql-test/suite/sql_sequence/default.test index f965089d992..d7bc978ed9e 100644 --- a/mysql-test/suite/sql_sequence/default.test +++ b/mysql-test/suite/sql_sequence/default.test @@ -216,6 +216,17 @@ SELECT a, b FROM t1; DROP TABLE t1; DROP SEQUENCE s1; ---echo # --echo # End of 10.3 tests ---echo # + +--echo # in UPDATE +create sequence s1 cache 0; +create table t1 (id int unsigned default nextval(s1)); +insert t1 values (); +update t1 set id=default; +prepare stmt from "update t1 set id=?"; +execute stmt using default; +deallocate prepare stmt; +drop table t1; +drop sequence s1; + +--echo # End of 10.6 tests diff --git a/sql/item.cc b/sql/item.cc index a221d27dd34..33e62f32667 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -5064,6 +5064,7 @@ Item_param::set_param_type_and_swap_value(Item_param *src) void Item_param::set_default(bool set_type_handler_null) { m_is_settable_routine_parameter= false; + current_thd->lex->default_used= true; state= DEFAULT_VALUE; /* When Item_param is set to DEFAULT_VALUE: diff --git a/sql/item_func.cc b/sql/item_func.cc index 08614e836f4..85dd193a929 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -7108,7 +7108,8 @@ longlong Item_func_nextval::val_int() String key_buff(buff,sizeof(buff), &my_charset_bin); DBUG_ENTER("Item_func_nextval::val_int"); update_table(); - DBUG_ASSERT(table && table->s->sequence); + DBUG_ASSERT(table); + DBUG_ASSERT(table->s->sequence); thd= table->in_use; if (thd->count_cuted_fields == CHECK_FIELD_EXPRESSION) diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 059e3d7c510..431991c1d7a 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -4595,6 +4595,8 @@ Prepared_statement::set_parameters(String *expanded_query, res= set_params_data(this, expanded_query); #endif } + lex->default_used= thd->lex->default_used; + thd->lex->default_used= false; if (res) { my_error(ER_WRONG_ARGUMENTS, MYF(0), diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 03b37f5dec9..f41e352999a 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -13502,6 +13502,7 @@ expr_or_ignore_or_default: | DEFAULT { $$= new (thd->mem_root) Item_default_specification(thd); + Lex->default_used= TRUE; if (unlikely($$ == NULL)) MYSQL_YYABORT; } @@ -13585,6 +13586,7 @@ update_elem: { Item *def= new (thd->mem_root) Item_default_value(thd, Lex->current_context(), $1, 1); + Lex->default_used= TRUE; if (!def || add_item_to_list(thd, $1) || add_value_to_list(thd, def)) MYSQL_YYABORT; } From c27d78beb59b49bee7697a489743b4abe17bebe5 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 30 Jun 2025 15:44:50 +0200 Subject: [PATCH 20/61] MDEV-36870 Spurious unrelated permission error when selecting from table with default that uses nextval(sequence) Lots of different cases, SELECT, SELECT DEFAULT(), UPDATE t SET x=DEFAULT, prepares statements, opening of a table for the I_S, prelocking (so TL_WRITE), insert with subquery (so SQLCOM_SELECT), etc. Don't check NEXTVAL privileges in fix_fields() anymore, it cannot possibly handle all the cases correctly. Make a special method Item_func_nextval::check_access() for that and invoke it from * fix_fields on explicit SELECT NEXTVAL() (but not if NEXTVAL() is used in a DEFAULT clause) * when DEFAULT bareword in used in, say, UPDATE t SET x=DEFAULT (but not if DEFAULT() itself is used in a DEFAULT clause) * in CREATE TABLE * in ALTER TABLE ALGORITHM=INPLACE (that doesn't go CREATE TABLE path) * on INSERT helpers * Virtual_column_info::check_access() to walk the item tree and invoke Item::check_access() * TABLE::check_sequence_privileges() to iterate default expressions and invoke Virtual_column_info::check_access() also, single-table UPDATE in prepared statements now associates value items with fields just as multi-update already did, fixes the case of PREPARE s "UPDATE t SET x=?"; EXECUTE s USING DEFAULT. --- mysql-test/suite/sql_sequence/grant.result | 73 ++++++++++++++- mysql-test/suite/sql_sequence/grant.test | 104 +++++++++++++++++++-- sql/field.h | 1 + sql/handler.cc | 3 +- sql/item.cc | 5 + sql/item.h | 1 + sql/item_func.cc | 5 +- sql/item_func.h | 18 ++-- sql/sql_insert.cc | 3 + sql/sql_prepare.cc | 5 + sql/sql_table.cc | 3 +- sql/table.cc | 20 ++++ sql/table.h | 1 + 13 files changed, 224 insertions(+), 18 deletions(-) diff --git a/mysql-test/suite/sql_sequence/grant.result b/mysql-test/suite/sql_sequence/grant.result index d631772c740..f0b1bea6865 100644 --- a/mysql-test/suite/sql_sequence/grant.result +++ b/mysql-test/suite/sql_sequence/grant.result @@ -112,5 +112,76 @@ connection default; drop user u_alter; drop database mysqltest_1; # -# End of 10.11 tests +# MDEV-36870 Spurious unrelated permission error when selecting from table with default that uses nextval(sequence) # +create database db1; +use db1; +create sequence s1 cache 0; +create table t1 (id int unsigned default (10+nextval(s1))); +insert t1 values (); +create table t2 (id int unsigned default nextval(s1), b int default(default(id))); +insert t2 values (); +create function f1(x int) returns int sql security invoker +begin +select id+x into x from t1; +return x; +insert t1 values (); +end| +create user u1@localhost; +grant select on db1.* to u1@localhost; +grant execute on db1.* to u1@localhost; +use test; +create table t3 (id int unsigned default (20+nextval(db1.s1)), b int); +insert t3 values (); +create sequence s2 cache 0; +create table t4 (id int unsigned default (10+nextval(s2)), b int); +insert t4 values (); +connect u1,localhost,u1,,db1; +select * from t1; +id +11 +connection default; +flush tables; +connection u1; +select * from t1; +id +11 +select default(id) from t1; +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +select * from t2; +id b +2 3 +select f1(100); +f1(100) +111 +select column_name, data_type, column_default from information_schema.columns where table_schema='db1' and table_name='t1'; +column_name data_type column_default +id int (10 + nextval(`db1`.`s1`)) +use test; +insert t3 values (); +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +insert t4 values (); +insert t3 (b) select 5; +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +insert t4 (b) select 5; +update t3 set id=default; +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +update t4 set id=default; +prepare stmt from "update t3 set id=?"; +execute stmt using default; +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +prepare stmt from "update t4 set id=?"; +execute stmt using default; +deallocate prepare stmt; +insert t4 (b) values ((select * from db1.t1)); +insert t4 (b) values ((select default(id) from db1.t1)); +ERROR 42000: INSERT command denied to user 'u1'@'localhost' for table `db1`.`s1` +connection default; +disconnect u1; +select nextval(db1.s1) as 'must be 5'; +must be 5 +5 +drop user u1@localhost; +drop database db1; +drop table t3, t4, s2; +# End of 10.6 tests diff --git a/mysql-test/suite/sql_sequence/grant.test b/mysql-test/suite/sql_sequence/grant.test index 8c56de16525..becaf6a31fb 100644 --- a/mysql-test/suite/sql_sequence/grant.test +++ b/mysql-test/suite/sql_sequence/grant.test @@ -121,13 +121,105 @@ alter table t1 modify id int default nextval(s1); --disconnect con_alter --connection default drop user u_alter; - -# -# Cleanup -# - drop database mysqltest_1; --echo # ---echo # End of 10.11 tests +--echo # MDEV-36870 Spurious unrelated permission error when selecting from table with default that uses nextval(sequence) --echo # + +# various tests for permission checking on sequences +create database db1; +use db1; +create sequence s1 cache 0; +create table t1 (id int unsigned default (10+nextval(s1))); +insert t1 values (); + +create table t2 (id int unsigned default nextval(s1), b int default(default(id))); +insert t2 values (); + +# INSERT affects prelocking, but is never actually executed +delimiter |; +create function f1(x int) returns int sql security invoker +begin + select id+x into x from t1; + return x; + insert t1 values (); +end| +delimiter ;| + +create user u1@localhost; +grant select on db1.* to u1@localhost; +grant execute on db1.* to u1@localhost; + +use test; +create table t3 (id int unsigned default (20+nextval(db1.s1)), b int); +insert t3 values (); + +create sequence s2 cache 0; +create table t4 (id int unsigned default (10+nextval(s2)), b int); +insert t4 values (); + +connect u1,localhost,u1,,db1; + +# table already in the cache. must be re-fixed +# SELECT * - no error +select * from t1; + +# not in cache +connection default; +flush tables; +connection u1; +# SELECT * - no error +select * from t1; + +# SELECT DEFAULT() - error +--error ER_TABLEACCESS_DENIED_ERROR +select default(id) from t1; + +# default(default(nextval)) +select * from t2; + +# SELECT but table has TL_WRITE because of prelocking +select f1(100); + +# opening the table for I_S +select column_name, data_type, column_default from information_schema.columns where table_schema='db1' and table_name='t1'; + +use test; +# insert +--error ER_TABLEACCESS_DENIED_ERROR +insert t3 values (); +insert t4 values (); +#insert select +--error ER_TABLEACCESS_DENIED_ERROR +insert t3 (b) select 5; +insert t4 (b) select 5; +#update +--error ER_TABLEACCESS_DENIED_ERROR +update t3 set id=default; +update t4 set id=default; + +# PS UPDATE with ? = DEFAULT +prepare stmt from "update t3 set id=?"; +--error ER_TABLEACCESS_DENIED_ERROR +execute stmt using default; +prepare stmt from "update t4 set id=?"; +execute stmt using default; +deallocate prepare stmt; + +# SELECT * in a subquery, like INSERT t3 VALUES ((SELECT * FROM t1)); +# with sequences both on t3 and t1 +insert t4 (b) values ((select * from db1.t1)); +--error ER_TABLEACCESS_DENIED_ERROR +insert t4 (b) values ((select default(id) from db1.t1)); + +connection default; +disconnect u1; +--disable_ps2_protocol +select nextval(db1.s1) as 'must be 5'; +--enable_ps2_protocol +drop user u1@localhost; +drop database db1; +drop table t3, t4, s2; + +--echo # End of 10.6 tests diff --git a/sql/field.h b/sql/field.h index ce61788c653..59dcd229b52 100644 --- a/sql/field.h +++ b/sql/field.h @@ -658,6 +658,7 @@ public: bool fix_session_expr(THD *thd); bool cleanup_session_expr(); bool fix_and_check_expr(THD *thd, TABLE *table); + bool check_access(THD *thd); inline bool is_equal(const Virtual_column_info* vcol) const; /* Same as is_equal() but for comparing with different table */ bool is_equivalent(THD *thd, TABLE_SHARE *share, TABLE_SHARE *vcol_share, diff --git a/sql/handler.cc b/sql/handler.cc index bc96c35a306..754b5a7e5f8 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -6113,7 +6113,8 @@ int ha_create_table(THD *thd, const char *path, const char *db, name= get_canonical_filename(table.file, share.path.str, name_buff); - error= table.file->ha_create(name, &table, create_info); + error= table.check_sequence_privileges(thd) ? 1 : + table.file->ha_create(name, &table, create_info); if (unlikely(error)) { diff --git a/sql/item.cc b/sql/item.cc index 33e62f32667..4a47b268f52 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -5222,6 +5222,11 @@ static Field *make_default_field(THD *thd, Field *field_arg) if (!newptr) return nullptr; + /* Don't check privileges, if it's parse_vcol_defs() */ + if (def_field->table->pos_in_table_list && + def_field->default_value->check_access(thd)) + return nullptr; + if (should_mark_column(thd->column_usage)) def_field->default_value->expr->update_used_tables(); def_field->move_field(newptr + 1, def_field->maybe_null() ? newptr : 0, 1); diff --git a/sql/item.h b/sql/item.h index 84c2d64218a..58f01cfb73d 100644 --- a/sql/item.h +++ b/sql/item.h @@ -2432,6 +2432,7 @@ public: If there is some, sets a bit for this key in the proper key map. */ virtual bool check_index_dependence(void *arg) { return 0; } + virtual bool check_sequence_privileges(void *arg) { return 0; } /*============== End of Item processor list ======================*/ /* diff --git a/sql/item_func.cc b/sql/item_func.cc index 85dd193a929..5592f96fa5e 100644 --- a/sql/item_func.cc +++ b/sql/item_func.cc @@ -7085,15 +7085,14 @@ longlong Item_func_cursor_rowcount::val_int() /***************************************************************************** SEQUENCE functions *****************************************************************************/ -bool Item_func_nextval::check_access_and_fix_fields(THD *thd, Item **ref, - privilege_t want_access) +bool Item_func_nextval::check_access(THD *thd, privilege_t want_access) { table_list->sequence= false; bool error= check_single_table_access(thd, want_access, table_list, false); table_list->sequence= true; if (error && table_list->belong_to_view) table_list->replace_view_error_with_generic(thd); - return error || Item_longlong_func::fix_fields(thd, ref); + return error; } longlong Item_func_nextval::val_int() diff --git a/sql/item_func.h b/sql/item_func.h index 43ea18069aa..5f9b02b32e7 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -4228,7 +4228,7 @@ class Item_func_nextval :public Item_longlong_func protected: TABLE_LIST *table_list; TABLE *table; - bool check_access_and_fix_fields(THD *, Item **ref, privilege_t); + bool check_access(THD *, privilege_t); public: Item_func_nextval(THD *thd, TABLE_LIST *table_list_arg): Item_longlong_func(thd), table_list(table_list_arg) {} @@ -4239,7 +4239,13 @@ public: return name; } bool fix_fields(THD *thd, Item **ref) override - { return check_access_and_fix_fields(thd, ref, INSERT_ACL | SELECT_ACL); } + { + /* Don't check privileges, if it's parse_vcol_defs() */ + return (table_list->table && check_sequence_privileges(thd)) || + Item_longlong_func::fix_fields(thd, ref); + } + bool check_sequence_privileges(void *thd) override + { return check_access((THD*)thd, INSERT_ACL | SELECT_ACL); } bool fix_length_and_dec() override { unsigned_flag= 0; @@ -4281,8 +4287,8 @@ class Item_func_lastval :public Item_func_nextval public: Item_func_lastval(THD *thd, TABLE_LIST *table_list_arg): Item_func_nextval(thd, table_list_arg) {} - bool fix_fields(THD *thd, Item **ref) override - { return check_access_and_fix_fields(thd, ref, SELECT_ACL); } + bool check_sequence_privileges(void *thd) override + { return check_access((THD*)thd, SELECT_ACL); } longlong val_int() override; LEX_CSTRING func_name_cstring() const override { @@ -4307,8 +4313,8 @@ public: : Item_func_nextval(thd, table_list_arg), nextval(nextval_arg), round(round_arg), is_used(is_used_arg) {} - bool fix_fields(THD *thd, Item **ref) override - { return check_access_and_fix_fields(thd, ref, INSERT_ACL); } + bool check_sequence_privileges(void *thd) override + { return check_access((THD*)thd, INSERT_ACL); } longlong val_int() override; LEX_CSTRING func_name_cstring() const override { diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index ffc68305fa0..fa52164445f 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -1622,6 +1622,9 @@ static bool mysql_prepare_insert_check_table(THD *thd, TABLE_LIST *table_list, DBUG_RETURN(insert_view_fields(thd, &fields, table_list)); } + if (table_list->table->check_sequence_privileges(thd)) + DBUG_RETURN(TRUE); + DBUG_RETURN(FALSE); } diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index 431991c1d7a..b2bcf03820c 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -1506,6 +1506,11 @@ static int mysql_test_update(Prepared_statement *stmt, 0, NULL, 0, THD_WHERE::SET_LIST) || check_unique_table(thd, table_list)) goto error; + { + List_iterator_fast fs(select->item_list), vs(stmt->lex->value_list); + while (Item *f= fs++) + vs++->associate_with_target_field(thd, static_cast(f)); + } /* TODO: here we should send types of placeholders to the client. */ DBUG_RETURN(0); error: diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 4014f4101dd..a32bd87b223 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -10789,7 +10789,8 @@ do_continue:; thd->count_cuted_fields= CHECK_FIELD_EXPRESSION; altered_table.reset_default_fields(); if (altered_table.default_field && - altered_table.update_default_fields(true)) + (altered_table.check_sequence_privileges(thd) || + altered_table.update_default_fields(true))) { cleanup_table_after_inplace_alter(&altered_table); goto err_new_table_cleanup; diff --git a/sql/table.cc b/sql/table.cc index 119d949388a..683da12f7d9 100644 --- a/sql/table.cc +++ b/sql/table.cc @@ -3744,6 +3744,19 @@ Vcol_expr_context::~Vcol_expr_context() } +bool TABLE::check_sequence_privileges(THD *thd) +{ + if (internal_tables) + for (Field **fp= field; *fp; fp++) + { + Virtual_column_info *vcol= (*fp)->default_value; + if (vcol && vcol->check_access(thd)) + return 1; + } + return 0; +} + + bool TABLE::vcol_fix_expr(THD *thd) { if (pos_in_table_list->placeholder() || vcol_refix_list.is_empty()) @@ -3880,6 +3893,13 @@ bool Virtual_column_info::fix_and_check_expr(THD *thd, TABLE *table) } +bool Virtual_column_info::check_access(THD *thd) +{ + return flags & VCOL_NEXTVAL && + expr->walk(&Item::check_sequence_privileges, 0, thd); +} + + /* @brief Unpack the definition of a virtual column from its linear representation diff --git a/sql/table.h b/sql/table.h index ee26e2cd38b..efa369743ce 100644 --- a/sql/table.h +++ b/sql/table.h @@ -1716,6 +1716,7 @@ public: TABLE *tmp_table, TMP_TABLE_PARAM *tmp_table_param, bool with_cleanup); + bool check_sequence_privileges(THD *thd); bool vcol_fix_expr(THD *thd); bool vcol_cleanup_expr(THD *thd); Field *find_field_by_name(LEX_CSTRING *str) const; From 3b140fed0d70709bbb2a9d71cecf39e6ba8f3ce0 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 11 Jul 2025 09:52:17 +0200 Subject: [PATCH 21/61] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 42ffde83307..5406e03c982 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=6 -MYSQL_VERSION_PATCH=22 +MYSQL_VERSION_PATCH=23 SERVER_MATURITY=stable From f73ffd1150064fb03be417126f1257b4612989a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 11 Jul 2025 15:20:06 +0300 Subject: [PATCH 22/61] MDEV-37183 Scrubbing empty record breaks recovery page_delete_rec_list_end(): Do not attempt to scrub the data of an empty record. The test case would reproduce a debug assertion failure in branches where commit 358921ce32203a9a8dd277a5ba7ac177c9e79e53 (MDEV-26938) is present. MariaDB Server 10.6 only supports ascending indexes, and in those, the empty string would always be sorted first, never last in a page. Nevertheless, we fix the bug also in 10.6, in case it would be reproducible in a slightly different scenario. Reviewed by: Thirunarayanan Balathandayuthapani --- mysql-test/suite/innodb/r/scrub_debug.result | 16 ++++++++++++++++ mysql-test/suite/innodb/t/scrub_debug.test | 15 +++++++++++++++ storage/innobase/page/page0page.cc | 5 +++-- 3 files changed, 34 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/innodb/r/scrub_debug.result b/mysql-test/suite/innodb/r/scrub_debug.result index 7b0a9fd501c..7da2e0c51b0 100644 --- a/mysql-test/suite/innodb/r/scrub_debug.result +++ b/mysql-test/suite/innodb/r/scrub_debug.result @@ -15,5 +15,21 @@ FLUSH TABLE t1 FOR EXPORT; NOT FOUND /repairman/ in t1.ibd UNLOCK TABLES; DROP TABLE t1; +# +# MDEV-37183 innodb_immediate_scrub_data_uncompressed=ON may break +# crash recovery +# +SET GLOBAL innodb_limit_optimistic_insert_debug=0; +CREATE TABLE t(a VARCHAR(1) PRIMARY KEY,INDEX(a DESC)) ENGINE=InnoDB; +INSERT INTO t VALUES('2'),('1'),(''),('6'),('4'),('3'); +SET GLOBAL innodb_limit_optimistic_insert_debug=3; +INSERT INTO t VALUES('8'); +CHECK TABLE t; +Table Op Msg_type Msg_text +test.t check status OK +SELECT COUNT(*) FROM t; +COUNT(*) +7 +DROP TABLE t; SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug; SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub; diff --git a/mysql-test/suite/innodb/t/scrub_debug.test b/mysql-test/suite/innodb/t/scrub_debug.test index 8cebfca6106..b1603e961fd 100644 --- a/mysql-test/suite/innodb/t/scrub_debug.test +++ b/mysql-test/suite/innodb/t/scrub_debug.test @@ -24,5 +24,20 @@ FLUSH TABLE t1 FOR EXPORT; -- source include/search_pattern_in_file.inc UNLOCK TABLES; DROP TABLE t1; + +--echo # +--echo # MDEV-37183 innodb_immediate_scrub_data_uncompressed=ON may break +--echo # crash recovery +--echo # +SET GLOBAL innodb_limit_optimistic_insert_debug=0; +# Note: MariaDB 10.6 fails to reproduce the crash; it maps DESC to ASC. +CREATE TABLE t(a VARCHAR(1) PRIMARY KEY,INDEX(a DESC)) ENGINE=InnoDB; +INSERT INTO t VALUES('2'),('1'),(''),('6'),('4'),('3'); +SET GLOBAL innodb_limit_optimistic_insert_debug=3; +INSERT INTO t VALUES('8'); +CHECK TABLE t; +SELECT COUNT(*) FROM t; +DROP TABLE t; + SET GLOBAL INNODB_LIMIT_OPTIMISTIC_INSERT_DEBUG=@save_debug; SET GLOBAL INNODB_IMMEDIATE_SCRUB_DATA_UNCOMPRESSED=@save_scrub; diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc index e78ea8b75d4..ae9031f3ba1 100644 --- a/storage/innobase/page/page0page.cc +++ b/storage/innobase/page/page0page.cc @@ -977,8 +977,9 @@ page_delete_rec_list_end( size+= s; n_recs++; - if (scrub) - mtr->memset(block, rec2 - page, rec_offs_data_size(offsets), 0); + if (UNIV_LIKELY(!scrub)); + else if (size_t size= rec_offs_data_size(offsets)) + mtr->memset(block, rec2 - page, size, 0); rec2= page_rec_get_next(rec2); } From 7fbbbc983f054a9c9c7f36ea8a9778ff3793a151 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 11 Jul 2025 16:07:08 +0300 Subject: [PATCH 23/61] MDEV-36330: SERIALIZABLE read inconsistency At TRANSACTION ISOLATION LEVEL SERIALIZABLE, InnoDB would fail to flag a write/read conflict, which would be a violation already at the more relaxed REPEATABLE READ level when innodb_snapshot_isolation=ON. Fix: Create a read view and start the transaction at the same time. Thus, lock checks will be able to consult the correct read view to flag ER_CHECKREAD if we are about to lock a record that was committed after the start of our transaction. innobase_start_trx_and_assign_read_view(): At any other isolation level than READ UNCOMMITTED, do create a read view. This is needed for the correct operation of START TRANSACTION WITH CONSISTENT SNAPSHOT. ha_innobase::store_lock(): At SERIALIZABLE isolation level, if the transaction was not started yet, start it and open a read view. An alternative way to achieve this would be to make trans_begin() treat START TRANSACTION (or BEGIN) in the same way as START TRANSACTION WITH CONSISTENT SNAPSHOT when the isolation level is SERIALIZABLE. innodb_isolation_level(const THD*): A simpler version of innobase_map_isolation_level(). Compared to earlier, we will return READ UNCOMMITTED also if the :newraw option is set for the InnoDB system tablespace. Reviewed by: Vladislav Lesin --- .../suite/innodb/r/lock_isolation.result | 52 +++++++++++- mysql-test/suite/innodb/t/lock_isolation.test | 46 ++++++++++- storage/innobase/handler/ha_innodb.cc | 82 ++++++++----------- .../trx/cons_snapshot_serializable.rdiff | 10 +-- .../trx/level_read_committed.rdiff | 11 --- .../trx/level_read_uncommitted.rdiff | 2 +- 6 files changed, 128 insertions(+), 75 deletions(-) delete mode 100644 storage/innobase/mysql-test/storage_engine/trx/level_read_committed.rdiff diff --git a/mysql-test/suite/innodb/r/lock_isolation.result b/mysql-test/suite/innodb/r/lock_isolation.result index 1e1625ae458..7c24ed01f4c 100644 --- a/mysql-test/suite/innodb/r/lock_isolation.result +++ b/mysql-test/suite/innodb/r/lock_isolation.result @@ -166,7 +166,6 @@ SELECT * FROM t FORCE INDEX (b) FOR UPDATE; a b 1 NULL COMMIT; -disconnect con_weird; connection consistent; SELECT * FROM t FORCE INDEX (b) FOR UPDATE; a b @@ -230,9 +229,58 @@ UPDATE t SET b=4 WHERE a=1; connection consistent; SELECT * FROM t WHERE a=1 FOR UPDATE; ERROR HY000: Record has changed since last read in table 't' -disconnect consistent; disconnect disable_purging; connection default; SET DEBUG_SYNC="RESET"; DROP TABLE t; +CREATE TABLE t1(a INT) ENGINE=InnoDB STATS_PERSISTENT=0; +CREATE TABLE t2(a INT) ENGINE=InnoDB STATS_PERSISTENT=0; +BEGIN; +INSERT INTO t1 SET a=1; +connection con_weird; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +BEGIN; +INSERT INTO t2 SET a=1; +connection consistent; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +BEGIN; +INSERT INTO t2 SET a=2; +connection default; +COMMIT; +connection con_weird; +SELECT * FROM t1; +a +1 +COMMIT; +connection consistent; +SELECT * FROM t1; +ERROR HY000: Record has changed since last read in table 't1' +COMMIT; +connection default; +BEGIN; +INSERT INTO t1 SET a=2; +connection con_weird; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +INSERT INTO t2 SET a=3; +connection consistent; +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +INSERT INTO t2 SET a=2; +connection default; +COMMIT; +connection con_weird; +SELECT * FROM t1; +a +1 +2 +COMMIT; +disconnect con_weird; +connection consistent; +SELECT * FROM t1; +ERROR HY000: Record has changed since last read in table 't1' +COMMIT; +disconnect consistent; +connection default; +DROP TABLE t1,t2; # End of 10.6 tests diff --git a/mysql-test/suite/innodb/t/lock_isolation.test b/mysql-test/suite/innodb/t/lock_isolation.test index b332f2e867a..3c5544321c7 100644 --- a/mysql-test/suite/innodb/t/lock_isolation.test +++ b/mysql-test/suite/innodb/t/lock_isolation.test @@ -174,7 +174,6 @@ ROLLBACK; --reap SELECT * FROM t FORCE INDEX (b) FOR UPDATE; COMMIT; ---disconnect con_weird --connection consistent SELECT * FROM t FORCE INDEX (b) FOR UPDATE; @@ -246,12 +245,55 @@ UPDATE t SET b=4 WHERE a=1; --connection consistent --error ER_CHECKREAD SELECT * FROM t WHERE a=1 FOR UPDATE; ---disconnect consistent --disconnect disable_purging --connection default SET DEBUG_SYNC="RESET"; DROP TABLE t; +CREATE TABLE t1(a INT) ENGINE=InnoDB STATS_PERSISTENT=0; +CREATE TABLE t2(a INT) ENGINE=InnoDB STATS_PERSISTENT=0; +BEGIN; INSERT INTO t1 SET a=1; +--connection con_weird +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +BEGIN; INSERT INTO t2 SET a=1; +--connection consistent +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +BEGIN; INSERT INTO t2 SET a=2; +--connection default +COMMIT; +--connection con_weird +SELECT * FROM t1; +COMMIT; +--connection consistent +--disable_ps2_protocol +--error ER_CHECKREAD +SELECT * FROM t1; +--enable_ps2_protocol +COMMIT; +--connection default +BEGIN; INSERT INTO t1 SET a=2; +--connection con_weird +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +START TRANSACTION WITH CONSISTENT SNAPSHOT; INSERT INTO t2 SET a=3; +--connection consistent +SET TRANSACTION ISOLATION LEVEL SERIALIZABLE; +START TRANSACTION WITH CONSISTENT SNAPSHOT; INSERT INTO t2 SET a=2; +--connection default +COMMIT; +--connection con_weird +SELECT * FROM t1; +COMMIT; +--disconnect con_weird +--connection consistent +--disable_ps2_protocol +--error ER_CHECKREAD +SELECT * FROM t1; +--enable_ps2_protocol +COMMIT; +--disconnect consistent +--connection default +DROP TABLE t1,t2; + --source include/wait_until_count_sessions.inc --echo # End of 10.6 tests diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 3f84070064f..f7aafbbf9ef 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -116,8 +116,6 @@ simple_thread_local ha_handler_stats *mariadb_stats; #include #include // TT_FOR_UPGRADE -#define thd_get_trx_isolation(X) ((enum_tx_isolation)thd_tx_isolation(X)) - extern "C" void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all); unsigned long long thd_get_query_id(const MYSQL_THD thd); void thd_clear_error(MYSQL_THD thd); @@ -821,14 +819,16 @@ innodb_tmpdir_validate( return(0); } -/******************************************************************//** -Maps a MySQL trx isolation level code to the InnoDB isolation level code -@return InnoDB isolation level */ -static inline -uint -innobase_map_isolation_level( -/*=========================*/ - enum_tx_isolation iso); /*!< in: MySQL isolation level code */ +/** @return the current transaction isolation level */ +static inline uint innodb_isolation_level(const THD *thd) noexcept +{ + static_assert(ISO_REPEATABLE_READ == TRX_ISO_REPEATABLE_READ, ""); + static_assert(ISO_SERIALIZABLE == TRX_ISO_SERIALIZABLE, ""); + static_assert(ISO_READ_COMMITTED == TRX_ISO_READ_COMMITTED, ""); + static_assert(ISO_READ_UNCOMMITTED == TRX_ISO_READ_UNCOMMITTED, ""); + return high_level_read_only + ? ISO_READ_UNCOMMITTED : (thd_tx_isolation(thd) & 3); +} /** Gets field offset for a field in a table. @param[in] table MySQL table object @@ -4470,21 +4470,18 @@ innobase_start_trx_and_assign_read_view( trx_start_if_not_started_xa(trx, false); - /* Assign a read view if the transaction does not have it yet. - Do this only if transaction is using REPEATABLE READ isolation - level. */ - trx->isolation_level = innobase_map_isolation_level( - thd_get_trx_isolation(thd)) & 3; + /* Assign a read view if the transaction does not have one yet. + Skip this for the READ UNCOMMITTED isolation level. */ + trx->isolation_level = innodb_isolation_level(thd) & 3; - if (trx->isolation_level == TRX_ISO_REPEATABLE_READ) { + if (trx->isolation_level != TRX_ISO_READ_UNCOMMITTED) { trx->read_view.open(trx); } else { push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, HA_ERR_UNSUPPORTED, "InnoDB: WITH CONSISTENT SNAPSHOT" - " was ignored because this phrase" - " can only be used with" - " REPEATABLE READ isolation level."); + " is ignored at READ UNCOMMITTED" + " isolation level."); } /* Set the MySQL flag to mark that there is an active transaction */ @@ -16031,31 +16028,6 @@ ha_innobase::start_stmt( DBUG_RETURN(0); } -/******************************************************************//** -Maps a MySQL trx isolation level code to the InnoDB isolation level code -@return InnoDB isolation level */ -static inline -uint -innobase_map_isolation_level( -/*=========================*/ - enum_tx_isolation iso) /*!< in: MySQL isolation level code */ -{ - if (UNIV_UNLIKELY(srv_force_recovery >= SRV_FORCE_NO_UNDO_LOG_SCAN) - || UNIV_UNLIKELY(srv_read_only_mode)) { - return TRX_ISO_READ_UNCOMMITTED; - } - switch (iso) { - case ISO_REPEATABLE_READ: return(TRX_ISO_REPEATABLE_READ); - case ISO_READ_COMMITTED: return(TRX_ISO_READ_COMMITTED); - case ISO_SERIALIZABLE: return(TRX_ISO_SERIALIZABLE); - case ISO_READ_UNCOMMITTED: return(TRX_ISO_READ_UNCOMMITTED); - } - - ut_error; - - return(0); -} - /******************************************************************//** As MySQL will execute an external lock for every new table it uses when it starts to process an SQL statement (an exception is when MySQL calls @@ -16520,19 +16492,29 @@ ha_innobase::store_lock( Be careful to ignore TL_IGNORE if we are going to do something with only 'real' locks! */ - /* If no MySQL table is in use, we need to set the isolation level + /* If no table handle is open, we need to set the isolation level of the transaction. */ if (lock_type != TL_IGNORE && trx->n_mysql_tables_in_use == 0) { - trx->isolation_level = innobase_map_isolation_level( - (enum_tx_isolation) thd_tx_isolation(thd)) & 3; - - if (trx->isolation_level <= TRX_ISO_READ_COMMITTED) { - + switch ((trx->isolation_level + = innodb_isolation_level(thd) & 3)) { + case ISO_REPEATABLE_READ: + break; + case ISO_READ_COMMITTED: + case ISO_READ_UNCOMMITTED: /* At low transaction isolation levels we let each consistent read set its own snapshot */ trx->read_view.close(); + break; + case ISO_SERIALIZABLE: + auto trx_state = trx->state; + if (trx_state == TRX_STATE_NOT_STARTED) { + trx_start_if_not_started(trx, false); + trx->read_view.open(trx); + } else { + ut_ad(trx_state == TRX_STATE_ACTIVE); + } } } diff --git a/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff b/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff index 2860d5cb0b8..b2251a7222a 100644 --- a/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff +++ b/storage/innobase/mysql-test/storage_engine/trx/cons_snapshot_serializable.rdiff @@ -1,14 +1,6 @@ --- suite/storage_engine/trx/cons_snapshot_serializable.result +++ suite/storage_engine/trx/cons_snapshot_serializable.reject -@@ -5,12 +5,15 @@ - CREATE TABLE t1 (a ) ENGINE= ; - SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; - START TRANSACTION WITH CONSISTENT SNAPSHOT; -+Warnings: -+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level. - connection con2; - INSERT INTO t1 (a) VALUES (1); - connection con1; +@@ -11,6 +11,7 @@ # If consistent read works on this isolation level (SERIALIZABLE), the following SELECT should not return the value we inserted (1) SELECT a FROM t1; a diff --git a/storage/innobase/mysql-test/storage_engine/trx/level_read_committed.rdiff b/storage/innobase/mysql-test/storage_engine/trx/level_read_committed.rdiff deleted file mode 100644 index d0a846ee1f7..00000000000 --- a/storage/innobase/mysql-test/storage_engine/trx/level_read_committed.rdiff +++ /dev/null @@ -1,11 +0,0 @@ ---- suite/storage_engine/trx/level_read_committed.result -+++ suite/storage_engine/trx/level_read_committed.reject -@@ -77,6 +77,8 @@ - CREATE TABLE t1 (a ) ENGINE= ; - SET SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED; - START TRANSACTION WITH CONSISTENT SNAPSHOT; -+Warnings: -+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level. - connection con2; - INSERT INTO t1 (a) VALUES (1); - connection con1; diff --git a/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff b/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff index ee483dd64bb..756b8626f76 100644 --- a/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff +++ b/storage/innobase/mysql-test/storage_engine/trx/level_read_uncommitted.rdiff @@ -5,7 +5,7 @@ SET SESSION TRANSACTION ISOLATION LEVEL READ UNCOMMITTED; START TRANSACTION WITH CONSISTENT SNAPSHOT; +Warnings: -+Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT was ignored because this phrase can only be used with REPEATABLE READ isolation level. ++Warning 138 InnoDB: WITH CONSISTENT SNAPSHOT is ignored at READ UNCOMMITTED isolation level. connection con2; INSERT INTO t1 (a) VALUES (1); connection con1; From 4d19e55441fe92fc483c795b020240884d78f46f Mon Sep 17 00:00:00 2001 From: Monty Date: Sat, 12 Jul 2025 14:59:11 +0300 Subject: [PATCH 24/61] MDEV-36858 MariaDB MyISAM secondary indexes silently break for tables > 10B rows The problem was a bug in my_qsort from 1980 where we converted difference of pointers to int instead of longlong. This causes sort to fail if the size of the sort array > 2147483648 bytes. In case of myisam recovery, we are sorting pointers, which limited the max number of elements to 268435456. In the case of a longlong and a 6 byte pointers, this equals to a sort buffer of about 5G. Fixed by changing the cast to longlong. --- mysys/mf_qsort.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/mysys/mf_qsort.c b/mysys/mf_qsort.c index 4dee20750c0..fbd75451d9f 100644 --- a/mysys/mf_qsort.c +++ b/mysys/mf_qsort.c @@ -38,7 +38,7 @@ do { \ if (swap_ptrs) \ { \ reg1 char **a = (char**) (A), **b = (char**) (B); \ - char *tmp = *a; *a++ = *b; *b++ = tmp; \ + char *tmp = *a; *a = *b; *b = tmp; \ } \ else \ { \ @@ -190,16 +190,16 @@ qsort_t my_qsort(void *base_ptr, size_t count, size_t size, qsort_cmp cmp) This ensures that the stack is keept small. */ - if ((int) (high_ptr - low) <= 0) + if ((longlong) (high_ptr - low) <= 0) { - if ((int) (high - low_ptr) <= 0) + if ((longlong) (high - low_ptr) <= 0) { POP(low, high); /* Nothing more to sort */ } else low = low_ptr; /* Ignore small left part. */ } - else if ((int) (high - low_ptr) <= 0) + else if ((longlong) (high - low_ptr) <= 0) high = high_ptr; /* Ignore small right part. */ else if ((high_ptr - low) > (high - low_ptr)) { From c78e906ed549077a0cca58ae2145164f269cee77 Mon Sep 17 00:00:00 2001 From: Yuchen Pei Date: Thu, 5 Jun 2025 16:26:10 +1000 Subject: [PATCH 25/61] MDEV-30264 Remove unused method spider_db_result::fetch_row_from_tmp_table --- storage/spider/spd_db_include.h | 3 --- storage/spider/spd_db_mysql.cc | 24 ------------------------ storage/spider/spd_db_mysql.h | 3 --- 3 files changed, 30 deletions(-) diff --git a/storage/spider/spd_db_include.h b/storage/spider/spd_db_include.h index 9b042d567ce..dd4aa02a6dc 100644 --- a/storage/spider/spd_db_include.h +++ b/storage/spider/spd_db_include.h @@ -850,9 +850,6 @@ public: virtual void free_result() = 0; virtual SPIDER_DB_ROW *current_row() = 0; virtual SPIDER_DB_ROW *fetch_row(MY_BITMAP *skips = NULL) = 0; - virtual SPIDER_DB_ROW *fetch_row_from_result_buffer( - spider_db_result_buffer *spider_res_buf - ) = 0; virtual SPIDER_DB_ROW *fetch_row_from_tmp_table( TABLE *tmp_table ) = 0; diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index f02895e644e..d0c377a5870 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -761,30 +761,6 @@ SPIDER_DB_ROW *spider_db_mbase_result::fetch_row(MY_BITMAP *skips) DBUG_RETURN((SPIDER_DB_ROW *) &row); } -SPIDER_DB_ROW *spider_db_mbase_result::fetch_row_from_result_buffer( - spider_db_result_buffer *spider_res_buf -) { - DBUG_ENTER("spider_db_mbase_result::fetch_row_from_result_buffer"); - DBUG_PRINT("info",("spider this=%p", this)); - if (!(row.row = mysql_fetch_row(db_result))) - { - if (mysql_errno(((spider_db_mbase *) db_conn)->db_conn)) - { - store_error_num = mysql_errno(((spider_db_mbase *) db_conn)->db_conn); - my_message(store_error_num, - mysql_error(((spider_db_mbase *) db_conn)->db_conn), MYF(0)); - } else - store_error_num = HA_ERR_END_OF_FILE; - DBUG_RETURN(NULL); - } - row.lengths = mysql_fetch_lengths(db_result); - row.field_count = mysql_num_fields(db_result); - row.row_first = row.row; - row.lengths_first = row.lengths; - row.record_size = 0; - DBUG_RETURN((SPIDER_DB_ROW *) &row); -} - SPIDER_DB_ROW *spider_db_mbase_result::fetch_row_from_tmp_table( TABLE *tmp_table ) { diff --git a/storage/spider/spd_db_mysql.h b/storage/spider/spd_db_mysql.h index d60a02e51e9..32f6f7ed9a9 100644 --- a/storage/spider/spd_db_mysql.h +++ b/storage/spider/spd_db_mysql.h @@ -301,9 +301,6 @@ public: void free_result() override; SPIDER_DB_ROW *current_row() override; SPIDER_DB_ROW *fetch_row(MY_BITMAP *) override; - SPIDER_DB_ROW *fetch_row_from_result_buffer( - spider_db_result_buffer *spider_res_buf - ) override; SPIDER_DB_ROW *fetch_row_from_tmp_table( TABLE *tmp_table ) override; From a3aab082ff6441c181682ed73cfd32bc533e2310 Mon Sep 17 00:00:00 2001 From: Yuchen Pei Date: Thu, 5 Jun 2025 17:45:54 +1000 Subject: [PATCH 26/61] MDEV-27474 Spider: remove #WITH_PARTITION_STORAGE_ENGINE --- storage/spider/ha_spider.cc | 120 ---------------------- storage/spider/ha_spider.h | 4 - storage/spider/spd_conn.cc | 14 --- storage/spider/spd_copy_tables.cc | 2 - storage/spider/spd_group_by_handler.cc | 12 --- storage/spider/spd_include.h | 14 --- storage/spider/spd_param.cc | 8 -- storage/spider/spd_param.h | 4 - storage/spider/spd_ping_table.cc | 4 - storage/spider/spd_table.cc | 134 ------------------------- storage/spider/spd_table.h | 12 --- storage/spider/spd_trx.cc | 2 - 12 files changed, 330 deletions(-) diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc index 05f3a9d904f..6e980a014a7 100644 --- a/storage/spider/ha_spider.cc +++ b/storage/spider/ha_spider.cc @@ -63,9 +63,7 @@ void ha_spider::init_fields() spider_thread_id = 0; trx_conn_adjustment = 0; search_link_query_id = 0; -#ifdef WITH_PARTITION_STORAGE_ENGINE partition_handler = NULL; -#endif #ifdef HA_MRR_USE_DEFAULT_IMPL multi_range_keys = NULL; mrr_key_buff = NULL; @@ -143,9 +141,7 @@ ha_spider::~ha_spider() { DBUG_ENTER("ha_spider::~ha_spider"); DBUG_PRINT("info",("spider this=%p", this)); -#ifdef WITH_PARTITION_STORAGE_ENGINE partition_handler = NULL; -#endif if (wide_handler_owner) { spider_free(spider_current_trx, wide_handler, MYF(0)); @@ -199,17 +195,14 @@ int ha_spider::open( ha_spider *spider, *owner; bool wide_handler_alloc = FALSE; SPIDER_WIDE_SHARE *wide_share; -#ifdef WITH_PARTITION_STORAGE_ENGINE uint part_num; bool partition_handler_alloc = FALSE; ha_spider **wide_handler_handlers = NULL; ha_partition *clone_source; -#endif DBUG_ENTER("ha_spider::open"); DBUG_PRINT("info",("spider this=%p", this)); dup_key_idx = (uint) -1; -#ifdef WITH_PARTITION_STORAGE_ENGINE table->file->get_no_parts("", &part_num); if (part_num) { @@ -223,13 +216,10 @@ int ha_spider::open( is_clone = TRUE; } } else { -#endif spider = this; owner = this; -#ifdef WITH_PARTITION_STORAGE_ENGINE clone_source = NULL; } -#endif if (!spider->wide_handler) { uchar *searched_bitmap; @@ -240,7 +230,6 @@ int ha_spider::open( uchar *rnd_read_bitmap; uchar *rnd_write_bitmap; if (!(wide_handler = (SPIDER_WIDE_HANDLER *) -#ifdef WITH_PARTITION_STORAGE_ENGINE spider_bulk_malloc(spider_current_trx, SPD_MID_HA_SPIDER_OPEN_1, MYF(MY_WME | MY_ZEROFILL), &wide_handler, sizeof(SPIDER_WIDE_HANDLER), &searched_bitmap, @@ -260,25 +249,6 @@ int ha_spider::open( &partition_handler, (uint) sizeof(SPIDER_PARTITION_HANDLER), NullS) -#else - spider_bulk_malloc(spider_current_trx, SPD_MID_HA_SPIDER_OPEN_2, MYF(MY_WME | MY_ZEROFILL), - &wide_handler, sizeof(SPIDER_WIDE_HANDLER), - &searched_bitmap, - (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), - &ft_discard_bitmap, - (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), - &position_bitmap, - (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), - &idx_read_bitmap, - (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), - &idx_write_bitmap, - (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), - &rnd_read_bitmap, - (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), - &rnd_write_bitmap, - (uint) sizeof(uchar) * my_bitmap_buffer_size(table->read_set), - NullS) -#endif ) ) { error_num = HA_ERR_OUT_OF_MEM; @@ -293,9 +263,7 @@ int ha_spider::open( wide_handler->idx_write_bitmap = idx_write_bitmap; wide_handler->rnd_read_bitmap = rnd_read_bitmap; wide_handler->rnd_write_bitmap = rnd_write_bitmap; -#ifdef WITH_PARTITION_STORAGE_ENGINE wide_handler->partition_handler = partition_handler; -#endif wide_handler->owner = owner; if (table_share->tmp_table == NO_TMP_TABLE) wide_handler->top_share = table->s; @@ -311,7 +279,6 @@ int ha_spider::open( wide_share = share->wide_share; -#ifdef WITH_PARTITION_STORAGE_ENGINE DBUG_PRINT("info",("spider create partition_handler")); DBUG_PRINT("info",("spider table=%p", table)); partition_handler->table = table; @@ -336,7 +303,6 @@ int ha_spider::open( thr_lock_data_init(&wide_share->lock, &wide_handler->lock, NULL); } -#endif init_sql_alloc_size = spider_param_init_sql_alloc_size(thd, share->init_sql_alloc_size); @@ -408,7 +374,6 @@ int ha_spider::open( if (is_clone) { -#ifdef WITH_PARTITION_STORAGE_ENGINE if (part_num) { for (roop_count = 0; roop_count < (int) part_num; roop_count++) @@ -421,7 +386,6 @@ int ha_spider::open( } } } -#endif wide_handler->external_lock_type = pt_clone_source_handler->wide_handler->external_lock_type; @@ -457,7 +421,6 @@ error_reset: blob_buff = NULL; error_init_blob_buff: error_init_result_list: -#ifdef WITH_PARTITION_STORAGE_ENGINE if (partition_handler_alloc) { wide_share = share->wide_share; @@ -465,7 +428,6 @@ error_init_result_list: owner->partition_handler = NULL; } partition_handler = NULL; -#endif spider_free_share(share); share = NULL; if (conn_keys) @@ -477,12 +439,10 @@ error_get_share: if (wide_handler_alloc) { spider_free(spider_current_trx, wide_handler, MYF(0)); -#ifdef WITH_PARTITION_STORAGE_ENGINE if (wide_handler_handlers) { wide_handler_handlers[0]->wide_handler = NULL; } -#endif spider->wide_handler = NULL; owner->wide_handler = NULL; owner->wide_handler_owner = FALSE; @@ -586,9 +546,7 @@ int ha_spider::close() spider_free(spider_current_trx, conn_keys, MYF(0)); conn_keys = NULL; } -#ifdef WITH_PARTITION_STORAGE_ENGINE partition_handler = NULL; -#endif if (wide_handler_owner) { spider_free(spider_current_trx, wide_handler, MYF(0)); @@ -683,7 +641,6 @@ THR_LOCK_DATA **ha_spider::store_lock( ) { DBUG_ENTER("ha_spider::store_lock"); DBUG_PRINT("info",("spider this=%p", this)); -#ifdef WITH_PARTITION_STORAGE_ENGINE if ( wide_handler->stage == SPD_HND_STAGE_STORE_LOCK && wide_handler->stage_executor != this) @@ -692,7 +649,6 @@ THR_LOCK_DATA **ha_spider::store_lock( } wide_handler->stage = SPD_HND_STAGE_STORE_LOCK; wide_handler->stage_executor = this; -#endif wide_handler->lock_table_type = 0; if (lock_type == TL_IGNORE) { @@ -793,7 +749,6 @@ THR_LOCK_DATA **ha_spider::store_lock( !spider_param_local_lock_table(thd) ) { wide_handler->lock_table_type = 1; -#ifdef WITH_PARTITION_STORAGE_ENGINE if (partition_handler && partition_handler->handlers) { uint roop_count; @@ -808,11 +763,8 @@ THR_LOCK_DATA **ha_spider::store_lock( } } } else { -#endif store_error_num = append_lock_tables_list(); -#ifdef WITH_PARTITION_STORAGE_ENGINE } -#endif } } else { DBUG_PRINT("info",("spider default lock route")); @@ -828,7 +780,6 @@ THR_LOCK_DATA **ha_spider::store_lock( spider_param_semi_table_lock(thd, wide_handler->semi_table_lock) ) { wide_handler->lock_table_type = 2; -#ifdef WITH_PARTITION_STORAGE_ENGINE if (partition_handler && partition_handler->handlers) { uint roop_count; @@ -844,11 +795,8 @@ THR_LOCK_DATA **ha_spider::store_lock( } } } else { -#endif store_error_num = append_lock_tables_list(); -#ifdef WITH_PARTITION_STORAGE_ENGINE } -#endif } } if ( @@ -987,7 +935,6 @@ int ha_spider::start_stmt( thr_lock_type lock_type ) { DBUG_ENTER("ha_spider::start_stmt"); -#ifdef WITH_PARTITION_STORAGE_ENGINE if ( wide_handler->stage == SPD_HND_STAGE_START_STMT && wide_handler->stage_executor != this) @@ -996,7 +943,6 @@ int ha_spider::start_stmt( } wide_handler->stage = SPD_HND_STAGE_START_STMT; wide_handler->stage_executor = this; -#endif DBUG_RETURN(0); } @@ -1064,10 +1010,8 @@ int ha_spider::reset() #ifdef INFO_KIND_FORCE_LIMIT_BEGIN wide_handler->info_limit = 9223372036854775807LL; #endif -#ifdef WITH_PARTITION_STORAGE_ENGINE wide_handler->stage = SPD_HND_STAGE_NONE; wide_handler->stage_executor = NULL; -#endif } if (!(tmp_trx = spider_get_trx(thd, TRUE, &error_num2))) { @@ -1154,7 +1098,6 @@ int ha_spider::extra( DBUG_ENTER("ha_spider::extra"); DBUG_PRINT("info",("spider this=%p", this)); DBUG_PRINT("info",("spider operation=%d", (int) operation)); -#ifdef WITH_PARTITION_STORAGE_ENGINE if ( wide_handler->stage == SPD_HND_STAGE_EXTRA && wide_handler->stage_executor != this) @@ -1163,7 +1106,6 @@ int ha_spider::extra( } wide_handler->stage = SPD_HND_STAGE_EXTRA; wide_handler->stage_executor = this; -#endif switch (operation) { case HA_EXTRA_QUICK: @@ -1173,13 +1115,11 @@ int ha_spider::extra( if (!is_clone) { wide_handler->keyread = TRUE; -#ifdef WITH_PARTITION_STORAGE_ENGINE if (wide_handler->update_request) { if (check_partitioned()) wide_handler->keyread = FALSE; } -#endif } break; case HA_EXTRA_NO_KEYREAD: @@ -1505,9 +1445,7 @@ int ha_spider::index_read_map_internal( if ((error_num = spider_set_conn_bg_param(this))) DBUG_RETURN(error_num); #endif -#ifdef WITH_PARTITION_STORAGE_ENGINE check_select_column(FALSE); -#endif DBUG_PRINT("info",("spider result_list.finish_flg = FALSE")); result_list.finish_flg = FALSE; result_list.record_num = 0; @@ -1708,9 +1646,7 @@ int ha_spider::index_read_last_map_internal( if ((error_num = spider_set_conn_bg_param(this))) DBUG_RETURN(error_num); #endif -#ifdef WITH_PARTITION_STORAGE_ENGINE check_select_column(FALSE); -#endif DBUG_PRINT("info",("spider result_list.finish_flg = FALSE")); result_list.finish_flg = FALSE; result_list.record_num = 0; @@ -1947,9 +1883,7 @@ int ha_spider::index_first_internal( if ((error_num = spider_set_conn_bg_param(this))) DBUG_RETURN(error_num); #endif -#ifdef WITH_PARTITION_STORAGE_ENGINE check_select_column(FALSE); -#endif DBUG_PRINT("info",("spider result_list.finish_flg = FALSE")); result_list.finish_flg = FALSE; result_list.record_num = 0; @@ -2134,9 +2068,7 @@ int ha_spider::index_last_internal( if ((error_num = spider_set_conn_bg_param(this))) DBUG_RETURN(error_num); #endif -#ifdef WITH_PARTITION_STORAGE_ENGINE check_select_column(FALSE); -#endif DBUG_PRINT("info",("spider result_list.finish_flg = FALSE")); result_list.finish_flg = FALSE; result_list.record_num = 0; @@ -2364,9 +2296,7 @@ int ha_spider::read_range_first_internal( if ((error_num = spider_set_conn_bg_param(this))) DBUG_RETURN(error_num); #endif -#ifdef WITH_PARTITION_STORAGE_ENGINE check_select_column(FALSE); -#endif DBUG_PRINT("info",("spider result_list.finish_flg = FALSE")); result_list.finish_flg = FALSE; result_list.record_num = 0; @@ -2755,9 +2685,7 @@ int ha_spider::read_multi_range_first_internal( if ((error_num = spider_set_conn_bg_param(this))) DBUG_RETURN(error_num); #endif -#ifdef WITH_PARTITION_STORAGE_ENGINE check_select_column(FALSE); -#endif DBUG_PRINT("info",("spider result_list.finish_flg = FALSE")); result_list.finish_flg = FALSE; result_list.record_num = 0; @@ -5027,9 +4955,7 @@ int ha_spider::rnd_next_internal( if ((error_num = rnd_handler_init())) DBUG_RETURN(check_error_mode_eof(error_num)); check_direct_order_limit(); -#ifdef WITH_PARTITION_STORAGE_ENGINE check_select_column(TRUE); -#endif if (this->result_list.direct_limit_offset) { @@ -5499,9 +5425,7 @@ int ha_spider::ft_read_internal( if ((error_num = spider_set_conn_bg_param(this))) DBUG_RETURN(error_num); #endif -#ifdef WITH_PARTITION_STORAGE_ENGINE check_select_column(FALSE); -#endif DBUG_PRINT("info",("spider result_list.finish_flg = FALSE")); result_list.finish_flg = FALSE; result_list.record_num = 0; @@ -5656,9 +5580,7 @@ int ha_spider::info( THD *thd = ha_thd(); double sts_interval = spider_param_sts_interval(thd, share->sts_interval); int sts_mode = spider_param_sts_mode(thd, share->sts_mode); -#ifdef WITH_PARTITION_STORAGE_ENGINE int sts_sync = spider_param_sts_sync(thd, share->sts_sync); -#endif #ifndef WITHOUT_SPIDER_BG_SEARCH int sts_bg_mode = spider_param_sts_bg_mode(thd, share->sts_bg_mode); #endif @@ -5738,10 +5660,8 @@ int ha_spider::info( } pthread_mutex_unlock(&share->sts_mutex); sts_interval = 0; -#ifdef WITH_PARTITION_STORAGE_ENGINE if (tmp_auto_increment_mode == 1) sts_sync = 0; -#endif } } if (flag & HA_STATUS_AUTO) @@ -5752,9 +5672,7 @@ int ha_spider::info( !share->lgtm_tblhnd_share->auto_increment_init ) { sts_interval = 0; -#ifdef WITH_PARTITION_STORAGE_ENGINE sts_sync = 0; -#endif } } if (difftime(tmp_time, share->sts_get_time) >= sts_interval) @@ -5805,9 +5723,7 @@ int ha_spider::info( } if ((error_num = spider_get_sts(share, search_link_idx, tmp_time, this, sts_interval, sts_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE sts_sync, -#endif share->sts_init ? 2 : 1, flag | (share->sts_init ? 0 : HA_STATUS_AUTO))) ) { @@ -5852,9 +5768,7 @@ int ha_spider::info( share->bg_sts_try_time = tmp_time; share->bg_sts_interval = sts_interval; share->bg_sts_mode = sts_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE share->bg_sts_sync = sts_sync; -#endif if (!share->bg_sts_init) { if ((error_num = spider_create_sts_thread(share))) @@ -5879,9 +5793,7 @@ int ha_spider::info( share->bg_sts_try_time = tmp_time; share->bg_sts_interval = sts_interval; share->bg_sts_mode = sts_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE share->bg_sts_sync = sts_sync; -#endif spider_table_add_share_to_sts_thread(share); } #endif @@ -5988,9 +5900,7 @@ ha_rows ha_spider::records_in_range( double crd_interval = spider_param_crd_interval(thd, share->crd_interval); int crd_mode = spider_param_crd_mode(thd, share->crd_mode); int crd_type = spider_param_crd_type(thd, share->crd_type); -#ifdef WITH_PARTITION_STORAGE_ENGINE int crd_sync = spider_param_crd_sync(thd, share->crd_sync); -#endif #ifndef WITHOUT_SPIDER_BG_SEARCH int crd_bg_mode = spider_param_crd_bg_mode(thd, share->crd_bg_mode); #endif @@ -6073,9 +5983,7 @@ ha_rows ha_spider::records_in_range( { if ((error_num = spider_get_crd(share, search_link_idx, tmp_time, this, table, crd_interval, crd_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE crd_sync, -#endif share->crd_init ? 2 : 1))) { pthread_mutex_unlock(&share->crd_mutex); @@ -6115,9 +6023,7 @@ ha_rows ha_spider::records_in_range( share->bg_crd_try_time = tmp_time; share->bg_crd_interval = crd_interval; share->bg_crd_mode = crd_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE share->bg_crd_sync = crd_sync; -#endif if (!share->bg_crd_init) { if ((error_num = spider_create_crd_thread(share))) @@ -6133,9 +6039,7 @@ ha_rows ha_spider::records_in_range( share->bg_crd_try_time = tmp_time; share->bg_crd_interval = crd_interval; share->bg_crd_mode = crd_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE share->bg_crd_sync = crd_sync; -#endif spider_table_add_share_to_crd_thread(share); } #endif @@ -6294,9 +6198,7 @@ int ha_spider::check_crd() THD *thd = ha_thd(); double crd_interval = spider_param_crd_interval(thd, share->crd_interval); int crd_mode = spider_param_crd_mode(thd, share->crd_mode); -#ifdef WITH_PARTITION_STORAGE_ENGINE int crd_sync = spider_param_crd_sync(thd, share->crd_sync); -#endif #ifndef WITHOUT_SPIDER_BG_SEARCH int crd_bg_mode = spider_param_crd_bg_mode(thd, share->crd_bg_mode); #endif @@ -6361,9 +6263,7 @@ int ha_spider::check_crd() { if ((error_num = spider_get_crd(share, search_link_idx, tmp_time, this, table, crd_interval, crd_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE crd_sync, -#endif share->crd_init ? 2 : 1))) { pthread_mutex_unlock(&share->crd_mutex); @@ -6396,9 +6296,7 @@ int ha_spider::check_crd() share->bg_crd_try_time = tmp_time; share->bg_crd_interval = crd_interval; share->bg_crd_mode = crd_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE share->bg_crd_sync = crd_sync; -#endif if (!share->bg_crd_init) { if ((error_num = spider_create_crd_thread(share))) @@ -6413,9 +6311,7 @@ int ha_spider::check_crd() share->bg_crd_try_time = tmp_time; share->bg_crd_interval = crd_interval; share->bg_crd_mode = crd_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE share->bg_crd_sync = crd_sync; -#endif spider_table_add_share_to_crd_thread(share); } #endif @@ -7917,9 +7813,7 @@ int ha_spider::create( tmp_share.key_hint[roop_count].init_calc_mem(SPD_MID_HA_SPIDER_CREATE_2); DBUG_PRINT("info",("spider tmp_share.key_hint=%p", tmp_share.key_hint)); if ((error_num = spider_parse_connect_info(&tmp_share, form->s, -#ifdef WITH_PARTITION_STORAGE_ENGINE form->part_info, -#endif 1))) goto error; DBUG_PRINT("info",("spider tmp_table=%d", form->s->tmp_table)); @@ -8572,7 +8466,6 @@ const COND *ha_spider::cond_push( const COND *cond ) { DBUG_ENTER("ha_spider::cond_push"); -#ifdef WITH_PARTITION_STORAGE_ENGINE if ( wide_handler->stage == SPD_HND_STAGE_COND_PUSH && wide_handler->stage_executor != this) @@ -8581,7 +8474,6 @@ const COND *ha_spider::cond_push( } wide_handler->stage = SPD_HND_STAGE_COND_PUSH; wide_handler->stage_executor = this; -#endif wide_handler->cond_check = FALSE; if (cond) { @@ -8600,7 +8492,6 @@ const COND *ha_spider::cond_push( void ha_spider::cond_pop() { DBUG_ENTER("ha_spider::cond_pop"); -#ifdef WITH_PARTITION_STORAGE_ENGINE if ( wide_handler->stage == SPD_HND_STAGE_COND_POP && wide_handler->stage_executor != this) @@ -8609,7 +8500,6 @@ void ha_spider::cond_pop() } wide_handler->stage = SPD_HND_STAGE_COND_POP; wide_handler->stage_executor = this; -#endif if (wide_handler->condition) { SPIDER_CONDITION *tmp_cond = wide_handler->condition->next; @@ -8626,7 +8516,6 @@ int ha_spider::info_push( int error_num = 0; DBUG_ENTER("ha_spider::info_push"); DBUG_PRINT("info",("spider this=%p", this)); -#ifdef WITH_PARTITION_STORAGE_ENGINE if ( wide_handler->stage == SPD_HND_STAGE_INFO_PUSH && wide_handler->stage_executor != this) @@ -8635,7 +8524,6 @@ int ha_spider::info_push( } wide_handler->stage = SPD_HND_STAGE_INFO_PUSH; wide_handler->stage_executor = this; -#endif switch (info_type) { @@ -8644,10 +8532,8 @@ int ha_spider::info_push( DBUG_PRINT("info",("spider INFO_KIND_UPDATE_FIELDS")); wide_handler->direct_update_fields = (List *) info; wide_handler->update_request = TRUE; -#ifdef WITH_PARTITION_STORAGE_ENGINE if (wide_handler->keyread && check_partitioned()) wide_handler->keyread = FALSE; -#endif break; #endif #ifdef INFO_KIND_UPDATE_VALUES @@ -8911,15 +8797,11 @@ void ha_spider::set_select_column_mode() if (wide_handler->external_lock_type == F_WRLCK && wide_handler->sql_command != SQLCOM_SELECT) { -#ifdef WITH_PARTITION_STORAGE_ENGINE uint part_num = 0; if (wide_handler->update_request) part_num = check_partitioned(); -#endif if ( -#ifdef WITH_PARTITION_STORAGE_ENGINE part_num || -#endif table_share->primary_key == MAX_KEY ) { /* need all columns */ @@ -8950,7 +8832,6 @@ void ha_spider::set_select_column_mode() DBUG_VOID_RETURN; } -#ifdef WITH_PARTITION_STORAGE_ENGINE void ha_spider::check_select_column(bool rnd) { THD *thd = wide_handler->trx->thd; @@ -9009,7 +8890,6 @@ void ha_spider::check_select_column(bool rnd) } DBUG_VOID_RETURN; } -#endif bool ha_spider::check_and_start_bulk_update( spider_bulk_upd_start bulk_upd_start diff --git a/storage/spider/ha_spider.h b/storage/spider/ha_spider.h index 5bdbdb187fa..074339c2aca 100644 --- a/storage/spider/ha_spider.h +++ b/storage/spider/ha_spider.h @@ -79,9 +79,7 @@ public: spider_string *blob_buff; SPIDER_POSITION *pushed_pos; SPIDER_POSITION pushed_pos_buf; -#ifdef WITH_PARTITION_STORAGE_ENGINE SPIDER_PARTITION_HANDLER *partition_handler; -#endif /* Whether this ha_spider is the owner of its wide_handler. */ bool wide_handler_owner = FALSE; SPIDER_WIDE_HANDLER *wide_handler = NULL; @@ -612,9 +610,7 @@ public: void set_clone_searched_bitmap(); void set_searched_bitmap_from_item_list(); void set_select_column_mode(); -#ifdef WITH_PARTITION_STORAGE_ENGINE void check_select_column(bool rnd); -#endif bool check_and_start_bulk_update( spider_bulk_upd_start bulk_upd_start ); diff --git a/storage/spider/spd_conn.cc b/storage/spider/spd_conn.cc index 67fb8f72578..67515c25053 100644 --- a/storage/spider/spd_conn.cc +++ b/storage/spider/spd_conn.cc @@ -3025,18 +3025,11 @@ void *spider_bg_sts_action( } if (spider.search_link_idx != -1 && conns[spider.search_link_idx]) { -#ifdef WITH_PARTITION_STORAGE_ENGINE if (spider_get_sts(share, spider.search_link_idx, share->bg_sts_try_time, &spider, share->bg_sts_interval, share->bg_sts_mode, share->bg_sts_sync, 2, HA_STATUS_CONST | HA_STATUS_VARIABLE)) -#else - if (spider_get_sts(share, spider.search_link_idx, - share->bg_sts_try_time, &spider, - share->bg_sts_interval, share->bg_sts_mode, - 2, HA_STATUS_CONST | HA_STATUS_VARIABLE)) -#endif { spider.search_link_idx = -1; } @@ -3299,18 +3292,11 @@ void *spider_bg_crd_action( } if (spider.search_link_idx != -1 && conns[spider.search_link_idx]) { -#ifdef WITH_PARTITION_STORAGE_ENGINE if (spider_get_crd(share, spider.search_link_idx, share->bg_crd_try_time, &spider, &table, share->bg_crd_interval, share->bg_crd_mode, share->bg_crd_sync, 2)) -#else - if (spider_get_crd(share, spider.search_link_idx, - share->bg_crd_try_time, &spider, &table, - share->bg_crd_interval, share->bg_crd_mode, - 2)) -#endif { spider.search_link_idx = -1; } diff --git a/storage/spider/spd_copy_tables.cc b/storage/spider/spd_copy_tables.cc index 0ea7b0dcf0c..1ae37067d0c 100644 --- a/storage/spider/spd_copy_tables.cc +++ b/storage/spider/spd_copy_tables.cc @@ -315,10 +315,8 @@ int spider_udf_get_copy_tgt_tables( if ( (error_num = spider_set_connect_info_default( tmp_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE NULL, NULL, -#endif NULL )) || (error_num = spider_set_connect_info_default_db_table( diff --git a/storage/spider/spd_group_by_handler.cc b/storage/spider/spd_group_by_handler.cc index b08108ac26e..a612ec5e382 100644 --- a/storage/spider/spd_group_by_handler.cc +++ b/storage/spider/spd_group_by_handler.cc @@ -1429,7 +1429,6 @@ group_by_handler *spider_create_group_by_handler( do { DBUG_PRINT("info",("spider from=%p", from)); ++table_count; -#ifdef WITH_PARTITION_STORAGE_ENGINE if (from->table->part_info) { DBUG_PRINT("info",("spider partition handler")); @@ -1442,7 +1441,6 @@ group_by_handler *spider_create_group_by_handler( DBUG_RETURN(NULL); } } -#endif } while ((from = from->next_local)); if (!(table_holder= spider_create_table_holder(table_count))) @@ -1451,7 +1449,6 @@ group_by_handler *spider_create_group_by_handler( my_bitmap_init(&skips, NULL, query->select->elements, TRUE); table_idx = 0; from = query->from; -#ifdef WITH_PARTITION_STORAGE_ENGINE if (from->table->part_info) { partition_info *part_info = from->table->part_info; @@ -1462,7 +1459,6 @@ group_by_handler *spider_create_group_by_handler( } else { spider = (ha_spider *) from->table->file; } -#endif share = spider->share; spider->idx_for_direct_join = table_idx; ++table_idx; @@ -1484,7 +1480,6 @@ group_by_handler *spider_create_group_by_handler( } while ((from = from->next_local)) { -#ifdef WITH_PARTITION_STORAGE_ENGINE if (from->table->part_info) { partition_info *part_info = from->table->part_info; @@ -1495,7 +1490,6 @@ group_by_handler *spider_create_group_by_handler( } else { spider = (ha_spider *) from->table->file; } -#endif share = spider->share; spider->idx_for_direct_join = table_idx; ++table_idx; @@ -1524,7 +1518,6 @@ group_by_handler *spider_create_group_by_handler( from = query->from; do { -#ifdef WITH_PARTITION_STORAGE_ENGINE if (from->table->part_info) { partition_info *part_info = from->table->part_info; @@ -1535,7 +1528,6 @@ group_by_handler *spider_create_group_by_handler( } else { spider = (ha_spider *) from->table->file; } -#endif share = spider->share; if (spider_param_skip_default_condition(thd, share->skip_default_condition)) @@ -1693,7 +1685,6 @@ group_by_handler *spider_create_group_by_handler( goto skip_free_table_holder; from = query->from; -#ifdef WITH_PARTITION_STORAGE_ENGINE if (from->table->part_info) { partition_info *part_info = from->table->part_info; @@ -1704,7 +1695,6 @@ group_by_handler *spider_create_group_by_handler( } else { spider = (ha_spider *) from->table->file; } -#endif share = spider->share; lock_mode = spider_conn_lock_mode(spider); if (lock_mode) @@ -1770,7 +1760,6 @@ group_by_handler *spider_create_group_by_handler( { fields->clear_conn_holder_from_conn(); -#ifdef WITH_PARTITION_STORAGE_ENGINE if (from->table->part_info) { partition_info *part_info = from->table->part_info; @@ -1781,7 +1770,6 @@ group_by_handler *spider_create_group_by_handler( } else { spider = (ha_spider *) from->table->file; } -#endif share = spider->share; DBUG_PRINT("info",("spider s->db=%s", from->table->s->db.str)); DBUG_PRINT("info",("spider s->table_name=%s", from->table->s->table_name.str)); diff --git a/storage/spider/spd_include.h b/storage/spider/spd_include.h index d8bfbc5e342..a99c40079f1 100644 --- a/storage/spider/spd_include.h +++ b/storage/spider/spd_include.h @@ -810,7 +810,6 @@ typedef struct st_spider_lgtm_tblhnd_share ulonglong auto_increment_value; } SPIDER_LGTM_TBLHND_SHARE; -#ifdef WITH_PARTITION_STORAGE_ENGINE typedef struct st_spider_patition_handler { bool clone_bitmap_init; @@ -820,7 +819,6 @@ typedef struct st_spider_patition_handler ha_spider *owner; ha_spider **handlers; } SPIDER_PARTITION_HANDLER; -#endif typedef struct st_spider_wide_share { @@ -873,9 +871,7 @@ typedef struct st_spider_wide_handler uchar *rnd_write_bitmap; SPIDER_CONDITION *condition; void *owner; -#ifdef WITH_PARTITION_STORAGE_ENGINE SPIDER_PARTITION_HANDLER *partition_handler; -#endif List *direct_update_fields; List *direct_update_values; TABLE_SHARE *top_share; @@ -1022,9 +1018,7 @@ typedef struct st_spider_share TABLE_SHARE *table_share; SPIDER_LGTM_TBLHND_SHARE *lgtm_tblhnd_share; my_hash_value_type table_name_hash_value; -#ifdef WITH_PARTITION_STORAGE_ENGINE my_hash_value_type table_path_hash_value; -#endif volatile bool init; volatile bool init_error; @@ -1037,9 +1031,7 @@ typedef struct st_spider_share volatile time_t bg_sts_try_time; volatile double bg_sts_interval; volatile int bg_sts_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE volatile int bg_sts_sync; -#endif volatile bool bg_sts_init; volatile bool bg_sts_kill; volatile bool bg_sts_thd_wait; @@ -1054,9 +1046,7 @@ typedef struct st_spider_share volatile time_t bg_crd_try_time; volatile double bg_crd_interval; volatile int bg_crd_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE volatile int bg_crd_sync; -#endif volatile bool bg_crd_init; volatile bool bg_crd_kill; volatile bool bg_crd_thd_wait; @@ -1114,9 +1104,7 @@ typedef struct st_spider_share #endif double sts_interval; int sts_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE int sts_sync; -#endif int store_last_sts; int load_sts_at_startup; #ifndef WITHOUT_SPIDER_BG_SEARCH @@ -1124,9 +1112,7 @@ typedef struct st_spider_share #endif double crd_interval; int crd_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE int crd_sync; -#endif int store_last_crd; int load_crd_at_startup; int crd_type; diff --git a/storage/spider/spd_param.cc b/storage/spider/spd_param.cc index a051526981b..bbcd880fd52 100644 --- a/storage/spider/spd_param.cc +++ b/storage/spider/spd_param.cc @@ -1303,7 +1303,6 @@ static MYSQL_THDVAR_INT( SPIDER_THDVAR_OVERRIDE_VALUE_FUNC(int, crd_mode) -#ifdef WITH_PARTITION_STORAGE_ENGINE /* -1 :fallback to default 0 :No synchronization. @@ -1324,7 +1323,6 @@ static MYSQL_THDVAR_INT( ); SPIDER_THDVAR_OVERRIDE_VALUE_FUNC(int, crd_sync) -#endif /* -1 :fallback to default @@ -1425,7 +1423,6 @@ static MYSQL_THDVAR_INT( SPIDER_THDVAR_OVERRIDE_VALUE_FUNC(int, sts_mode) -#ifdef WITH_PARTITION_STORAGE_ENGINE /* -1 :fallback to default 0 :No synchronization. @@ -1446,7 +1443,6 @@ static MYSQL_THDVAR_INT( ); SPIDER_THDVAR_OVERRIDE_VALUE_FUNC(int, sts_sync) -#endif #ifndef WITHOUT_SPIDER_BG_SEARCH /* @@ -2581,9 +2577,7 @@ static struct st_mysql_sys_var* spider_system_variables[] = { MYSQL_SYSVAR(second_read), MYSQL_SYSVAR(crd_interval), MYSQL_SYSVAR(crd_mode), -#ifdef WITH_PARTITION_STORAGE_ENGINE MYSQL_SYSVAR(crd_sync), -#endif MYSQL_SYSVAR(store_last_crd), MYSQL_SYSVAR(load_crd_at_startup), MYSQL_SYSVAR(crd_type), @@ -2593,9 +2587,7 @@ static struct st_mysql_sys_var* spider_system_variables[] = { #endif MYSQL_SYSVAR(sts_interval), MYSQL_SYSVAR(sts_mode), -#ifdef WITH_PARTITION_STORAGE_ENGINE MYSQL_SYSVAR(sts_sync), -#endif MYSQL_SYSVAR(store_last_sts), MYSQL_SYSVAR(load_sts_at_startup), #ifndef WITHOUT_SPIDER_BG_SEARCH diff --git a/storage/spider/spd_param.h b/storage/spider/spd_param.h index c93d41022d5..491c2ac729c 100644 --- a/storage/spider/spd_param.h +++ b/storage/spider/spd_param.h @@ -212,12 +212,10 @@ int spider_param_crd_mode( THD *thd, int crd_mode ); -#ifdef WITH_PARTITION_STORAGE_ENGINE int spider_param_crd_sync( THD *thd, int crd_sync ); -#endif int spider_param_crd_type( THD *thd, int crd_type @@ -240,12 +238,10 @@ int spider_param_sts_mode( THD *thd, int sts_mode ); -#ifdef WITH_PARTITION_STORAGE_ENGINE int spider_param_sts_sync( THD *thd, int sts_sync ); -#endif #ifndef WITHOUT_SPIDER_BG_SEARCH int spider_param_sts_bg_mode( THD *thd, diff --git a/storage/spider/spd_ping_table.cc b/storage/spider/spd_ping_table.cc index 68254a636e5..4aeba9d77d6 100644 --- a/storage/spider/spd_ping_table.cc +++ b/storage/spider/spd_ping_table.cc @@ -406,10 +406,8 @@ create_table_mon: if ( (error_num = spider_set_connect_info_default( tmp_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE NULL, NULL, -#endif NULL )) || (error_num = spider_set_connect_info_default_dbtable( @@ -572,10 +570,8 @@ SPIDER_TABLE_MON_LIST *spider_get_ping_table_tgt( if ( (*error_num = spider_set_connect_info_default( tmp_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE NULL, NULL, -#endif NULL )) || (*error_num = spider_set_connect_info_default_dbtable( diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index f0184aaef62..cb6396dad60 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -141,9 +141,7 @@ extern ulonglong spider_thread_id; #ifdef HAVE_PSI_INTERFACE PSI_mutex_key spd_key_mutex_tbl; PSI_mutex_key spd_key_mutex_init_error_tbl; -#ifdef WITH_PARTITION_STORAGE_ENGINE PSI_mutex_key spd_key_mutex_wide_share; -#endif PSI_mutex_key spd_key_mutex_lgtm_tblhnd_share; PSI_mutex_key spd_key_mutex_conn; PSI_mutex_key spd_key_mutex_open_conn; @@ -167,10 +165,8 @@ PSI_mutex_key spd_key_mutex_share; PSI_mutex_key spd_key_mutex_share_sts; PSI_mutex_key spd_key_mutex_share_crd; PSI_mutex_key spd_key_mutex_share_auto_increment; -#ifdef WITH_PARTITION_STORAGE_ENGINE PSI_mutex_key spd_key_mutex_wide_share_sts; PSI_mutex_key spd_key_mutex_wide_share_crd; -#endif PSI_mutex_key spd_key_mutex_udf_table; PSI_mutex_key spd_key_mutex_mem_calc; PSI_mutex_key spd_key_thread_id; @@ -187,9 +183,7 @@ static PSI_mutex_info all_spider_mutexes[]= { { &spd_key_mutex_tbl, "tbl", PSI_FLAG_GLOBAL}, { &spd_key_mutex_init_error_tbl, "init_error_tbl", PSI_FLAG_GLOBAL}, -#ifdef WITH_PARTITION_STORAGE_ENGINE { &spd_key_mutex_wide_share, "wide_share", PSI_FLAG_GLOBAL}, -#endif { &spd_key_mutex_lgtm_tblhnd_share, "lgtm_tblhnd_share", PSI_FLAG_GLOBAL}, { &spd_key_mutex_conn, "conn", PSI_FLAG_GLOBAL}, { &spd_key_mutex_open_conn, "open_conn", PSI_FLAG_GLOBAL}, @@ -222,10 +216,8 @@ static PSI_mutex_info all_spider_mutexes[]= { &spd_key_mutex_share_sts, "share_sts", 0}, { &spd_key_mutex_share_crd, "share_crd", 0}, { &spd_key_mutex_share_auto_increment, "share_auto_increment", 0}, -#ifdef WITH_PARTITION_STORAGE_ENGINE { &spd_key_mutex_wide_share_sts, "wide_share_sts", 0}, { &spd_key_mutex_wide_share_crd, "wide_share_crd", 0}, -#endif { &spd_key_mutex_udf_table, "udf_table", 0}, { &spd_key_mutex_conn_loop_check, "conn_loop_check", 0}, }; @@ -332,14 +324,12 @@ extern pthread_mutex_t spider_thread_id_mutex; extern pthread_mutex_t spider_conn_id_mutex; extern pthread_mutex_t spider_ipport_conn_mutex; -#ifdef WITH_PARTITION_STORAGE_ENGINE HASH spider_open_wide_share; uint spider_open_wide_share_id; const char *spider_open_wide_share_func_name; const char *spider_open_wide_share_file_name; ulong spider_open_wide_share_line_no; pthread_mutex_t spider_wide_share_mutex; -#endif HASH spider_lgtm_tblhnd_share_hash; uint spider_lgtm_tblhnd_share_hash_id; @@ -1796,9 +1786,7 @@ static void spider_minus_1(SPIDER_SHARE *share, TABLE_SHARE *table_share) #endif share->sts_interval = -1; share->sts_mode = -1; -#ifdef WITH_PARTITION_STORAGE_ENGINE share->sts_sync = -1; -#endif share->store_last_sts = -1; share->load_sts_at_startup = -1; #ifndef WITHOUT_SPIDER_BG_SEARCH @@ -1806,9 +1794,7 @@ static void spider_minus_1(SPIDER_SHARE *share, TABLE_SHARE *table_share) #endif share->crd_interval = -1; share->crd_mode = -1; -#ifdef WITH_PARTITION_STORAGE_ENGINE share->crd_sync = -1; -#endif share->store_last_crd = -1; share->load_crd_at_startup = -1; share->crd_type = -1; @@ -1896,7 +1882,6 @@ static int spider_get_connect_info(const int type, { switch (type) { -#ifdef WITH_PARTITION_STORAGE_ENGINE case 4: if (!sub_elem || !sub_elem->part_comment) return 1; @@ -1911,7 +1896,6 @@ static int spider_get_connect_info(const int type, part_elem->part_comment, strlen(part_elem->part_comment)))) return HA_ERR_OUT_OF_MEM; break; -#endif case 2: if (table_share->comment.length == 0) return 1; @@ -2138,11 +2122,7 @@ int spider_parse_connect_info( spider_get_partition_info(share->table_name, share->table_name_length, table_share, part_info, &part_elem, &sub_elem); spider_minus_1(share, table_share); -#ifdef WITH_PARTITION_STORAGE_ENGINE for (int i = 4; i > 0; i--) -#else - for (roop_count = 2; roop_count > 0; roop_count--) -#endif { if (connect_string) { @@ -2203,9 +2183,7 @@ int spider_parse_connect_info( SPIDER_PARAM_DOUBLE("civ", crd_interval, 0); SPIDER_PARAM_INT_WITH_MAX("cmd", crd_mode, 0, 3); SPIDER_PARAM_INT_WITH_MAX("csr", casual_read, 0, 63); -#ifdef WITH_PARTITION_STORAGE_ENGINE SPIDER_PARAM_INT_WITH_MAX("csy", crd_sync, 0, 2); -#endif SPIDER_PARAM_LONG_LIST_WITH_MAX("cto", connect_timeouts, 0, 2147483647); SPIDER_PARAM_INT_WITH_MAX("ctp", crd_type, 0, 2); @@ -2291,9 +2269,7 @@ int spider_parse_connect_info( SPIDER_PARAM_STR_LIST("srv", server_names); SPIDER_PARAM_DOUBLE("ssr", semi_split_read, 0); SPIDER_PARAM_LONGLONG("ssl", semi_split_read_limit, 0); -#ifdef WITH_PARTITION_STORAGE_ENGINE SPIDER_PARAM_INT_WITH_MAX("ssy", sts_sync, 0, 2); -#endif SPIDER_PARAM_INT_WITH_MAX("stc", semi_table_lock_conn, 0, 1); SPIDER_PARAM_INT_WITH_MAX("stl", semi_table_lock, 0, 1); SPIDER_PARAM_LONGLONG("srs", static_records_for_status, 0); @@ -2337,13 +2313,9 @@ int spider_parse_connect_info( SPIDER_PARAM_STR_LIST("database", tgt_dbs); SPIDER_PARAM_STR_LIST("password", tgt_passwords); SPIDER_PARAM_INT_WITH_MAX("sts_mode", sts_mode, 1, 2); -#ifdef WITH_PARTITION_STORAGE_ENGINE SPIDER_PARAM_INT_WITH_MAX("sts_sync", sts_sync, 0, 2); -#endif SPIDER_PARAM_INT_WITH_MAX("crd_mode", crd_mode, 0, 3); -#ifdef WITH_PARTITION_STORAGE_ENGINE SPIDER_PARAM_INT_WITH_MAX("crd_sync", crd_sync, 0, 2); -#endif SPIDER_PARAM_INT_WITH_MAX("crd_type", crd_type, 0, 2); SPIDER_PARAM_LONGLONG("priority", priority, 0); #ifndef WITHOUT_SPIDER_BG_SEARCH @@ -3134,10 +3106,8 @@ int spider_parse_connect_info( if ((error_num = spider_set_connect_info_default( share, -#ifdef WITH_PARTITION_STORAGE_ENGINE part_elem, sub_elem, -#endif table_share ))) goto error; @@ -3473,10 +3443,8 @@ error_alloc_conn_string: int spider_set_connect_info_default( SPIDER_SHARE *share, /* The `SPIDER_SHARE' to set default connect info */ -#ifdef WITH_PARTITION_STORAGE_ENGINE partition_element *part_elem, /* partition info used as input */ partition_element *sub_elem, /* subpartition info used as input */ -#endif TABLE_SHARE *table_share /* table share info used as input */ ) { bool check_socket; @@ -3647,13 +3615,8 @@ int spider_set_connect_info_default( if ( !(share->tgt_table_names[roop_count] = spider_create_table_name_string( table_share->table_name.str, -#ifdef WITH_PARTITION_STORAGE_ENGINE (part_elem ? part_elem->partition_name : NULL), (sub_elem ? sub_elem->partition_name : NULL) -#else - NULL, - NULL -#endif )) ) { DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -4159,9 +4122,7 @@ int spider_create_conn_keys( SPIDER_SHARE *spider_create_share( const char *table_name, TABLE_SHARE *table_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *part_info, -#endif my_hash_value_type hash_value, int *error_num ) { @@ -4210,10 +4171,8 @@ SPIDER_SHARE *spider_create_share( share->bitmap_size = bitmap_size; share->table_share = table_share; share->table_name_hash_value = hash_value; -#ifdef WITH_PARTITION_STORAGE_ENGINE share->table_path_hash_value = my_calc_hash(&spider_open_tables, (uchar*) table_share->path.str, table_share->path.length); -#endif #ifndef WITHOUT_SPIDER_BG_SEARCH share->table.s = table_share; share->table.field = table_share->field; @@ -4232,9 +4191,7 @@ SPIDER_SHARE *spider_create_share( DBUG_PRINT("info",("spider share->key_hint=%p", share->key_hint)); if ((*error_num = spider_parse_connect_info(share, table_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE part_info, -#endif 0))) goto error_parse_connect_string; @@ -4372,15 +4329,11 @@ SPIDER_SHARE *spider_get_share( int roop_count; double sts_interval; int sts_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE int sts_sync; int auto_increment_mode; -#endif double crd_interval; int crd_mode; -#ifdef WITH_PARTITION_STORAGE_ENGINE int crd_sync; -#endif int search_link_idx; uint sql_command = thd_sql_command(thd); SPIDER_Open_tables_backup open_tables_backup; @@ -4448,9 +4401,7 @@ SPIDER_SHARE *spider_get_share( { if (!(share = spider_create_share( table_name, table_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE table->part_info, -#endif hash_value, error_num ))) { @@ -4844,20 +4795,16 @@ SPIDER_SHARE *spider_get_share( SPIDER_INIT_ERROR_TABLE *spider_init_error_table; sts_interval = spider_param_sts_interval(thd, share->sts_interval); sts_mode = spider_param_sts_mode(thd, share->sts_mode); -#ifdef WITH_PARTITION_STORAGE_ENGINE sts_sync = spider_param_sts_sync(thd, share->sts_sync); auto_increment_mode = spider_param_auto_increment_mode(thd, share->auto_increment_mode); if (auto_increment_mode == 1) sts_sync = 0; -#endif crd_interval = spider_param_crd_interval(thd, share->crd_interval); crd_mode = spider_param_crd_mode(thd, share->crd_mode); if (crd_mode == 3) crd_mode = 1; -#ifdef WITH_PARTITION_STORAGE_ENGINE crd_sync = spider_param_crd_sync(thd, share->crd_sync); -#endif time_t tmp_time = (time_t) time((time_t*) 0); pthread_mutex_lock(&share->sts_mutex); pthread_mutex_lock(&share->crd_mutex); @@ -4889,9 +4836,7 @@ SPIDER_SHARE *spider_get_share( ) && (*error_num = spider_get_sts(share, spider->search_link_idx, tmp_time, spider, sts_interval, sts_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE sts_sync, -#endif 1, HA_STATUS_VARIABLE | HA_STATUS_CONST | HA_STATUS_AUTO)) ) { if (*error_num != ER_SPIDER_SYS_TABLE_VERSION_NUM) @@ -4913,9 +4858,7 @@ SPIDER_SHARE *spider_get_share( ) && (*error_num = spider_get_crd(share, spider->search_link_idx, tmp_time, spider, table, crd_interval, crd_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE crd_sync, -#endif 1)) ) { if (*error_num != ER_SPIDER_SYS_TABLE_VERSION_NUM) @@ -5296,20 +5239,16 @@ SPIDER_SHARE *spider_get_share( SPIDER_INIT_ERROR_TABLE *spider_init_error_table; sts_interval = spider_param_sts_interval(thd, share->sts_interval); sts_mode = spider_param_sts_mode(thd, share->sts_mode); -#ifdef WITH_PARTITION_STORAGE_ENGINE sts_sync = spider_param_sts_sync(thd, share->sts_sync); auto_increment_mode = spider_param_auto_increment_mode(thd, share->auto_increment_mode); if (auto_increment_mode == 1) sts_sync = 0; -#endif crd_interval = spider_param_crd_interval(thd, share->crd_interval); crd_mode = spider_param_crd_mode(thd, share->crd_mode); if (crd_mode == 3) crd_mode = 1; -#ifdef WITH_PARTITION_STORAGE_ENGINE crd_sync = spider_param_crd_sync(thd, share->crd_sync); -#endif time_t tmp_time = (time_t) time((time_t*) 0); if ((spider_init_error_table = spider_get_init_error_table(spider->wide_handler->trx, share, @@ -5338,9 +5277,7 @@ SPIDER_SHARE *spider_get_share( ) && (*error_num = spider_get_sts(share, spider->search_link_idx, tmp_time, spider, sts_interval, sts_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE sts_sync, -#endif 1, HA_STATUS_VARIABLE | HA_STATUS_CONST | HA_STATUS_AUTO)) ) { if (*error_num != ER_SPIDER_SYS_TABLE_VERSION_NUM) @@ -5359,9 +5296,7 @@ SPIDER_SHARE *spider_get_share( ) && (*error_num = spider_get_crd(share, spider->search_link_idx, tmp_time, spider, table, crd_interval, crd_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE crd_sync, -#endif 1)) ) { if (*error_num != ER_SPIDER_SYS_TABLE_VERSION_NUM) @@ -5795,10 +5730,8 @@ int spider_open_all_tables( table_tables, &tmp_share, &mem_root)) || (error_num = spider_set_connect_info_default( &tmp_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE NULL, NULL, -#endif NULL )) ) { @@ -6196,13 +6129,11 @@ int spider_db_done( spider_lgtm_tblhnd_share_hash.array.max_element * spider_lgtm_tblhnd_share_hash.array.size_of_element); my_hash_free(&spider_lgtm_tblhnd_share_hash); -#ifdef WITH_PARTITION_STORAGE_ENGINE spider_free_mem_calc(spider_current_trx, spider_open_wide_share_id, spider_open_wide_share.array.max_element * spider_open_wide_share.array.size_of_element); my_hash_free(&spider_open_wide_share); -#endif pthread_mutex_lock(&spider_init_error_tbl_mutex); while ((spider_init_error_table = (SPIDER_INIT_ERROR_TABLE*) my_hash_element(&spider_init_error_tables, 0))) @@ -6228,9 +6159,7 @@ int spider_db_done( pthread_mutex_destroy(&spider_open_conn_mutex); pthread_mutex_destroy(&spider_conn_mutex); pthread_mutex_destroy(&spider_lgtm_tblhnd_share_mutex); -#ifdef WITH_PARTITION_STORAGE_ENGINE pthread_mutex_destroy(&spider_wide_share_mutex); -#endif pthread_mutex_destroy(&spider_init_error_tbl_mutex); pthread_mutex_destroy(&spider_conn_id_mutex); pthread_mutex_destroy(&spider_ipport_conn_mutex); @@ -6439,12 +6368,10 @@ int spider_db_init( &spider_init_error_tbl_mutex, MY_MUTEX_INIT_FAST)) goto error_init_error_tbl_mutex_init; -#ifdef WITH_PARTITION_STORAGE_ENGINE if (mysql_mutex_init(spd_key_mutex_wide_share, &spider_wide_share_mutex, MY_MUTEX_INIT_FAST)) goto error_wide_share_mutex_init; -#endif if (mysql_mutex_init(spd_key_mutex_lgtm_tblhnd_share, &spider_lgtm_tblhnd_share_mutex, MY_MUTEX_INIT_FAST)) goto error_lgtm_tblhnd_share_mutex_init; @@ -6489,7 +6416,6 @@ int spider_db_init( spider_init_error_tables, spider_init_error_tables.array.max_element * spider_init_error_tables.array.size_of_element); -#ifdef WITH_PARTITION_STORAGE_ENGINE if (my_hash_init(PSI_INSTRUMENT_ME, &spider_open_wide_share, spd_charset_utf8mb3_bin, 32, 0, 0, spider_wide_share_get_key, 0, 0)) @@ -6500,7 +6426,6 @@ int spider_db_init( spider_open_wide_share, spider_open_wide_share.array.max_element * spider_open_wide_share.array.size_of_element); -#endif if (my_hash_init(PSI_INSTRUMENT_ME, &spider_lgtm_tblhnd_share_hash, spd_charset_utf8mb3_bin, 32, 0, 0, spider_lgtm_tblhnd_share_hash_get_key, 0, 0)) @@ -6713,14 +6638,12 @@ error_open_connections_hash_init: spider_lgtm_tblhnd_share_hash.array.size_of_element); my_hash_free(&spider_lgtm_tblhnd_share_hash); error_lgtm_tblhnd_share_hash_init: -#ifdef WITH_PARTITION_STORAGE_ENGINE spider_free_mem_calc(NULL, spider_open_wide_share_id, spider_open_wide_share.array.max_element * spider_open_wide_share.array.size_of_element); my_hash_free(&spider_open_wide_share); error_open_wide_share_hash_init: -#endif spider_free_mem_calc(NULL, spider_init_error_tables_id, spider_init_error_tables.array.max_element * @@ -6745,10 +6668,8 @@ error_open_conn_mutex_init: error_conn_mutex_init: pthread_mutex_destroy(&spider_lgtm_tblhnd_share_mutex); error_lgtm_tblhnd_share_mutex_init: -#ifdef WITH_PARTITION_STORAGE_ENGINE pthread_mutex_destroy(&spider_wide_share_mutex); error_wide_share_mutex_init: -#endif pthread_mutex_destroy(&spider_init_error_tbl_mutex); error_init_error_tbl_mutex_init: pthread_mutex_destroy(&spider_ipport_conn_mutex); @@ -6815,7 +6736,6 @@ char *spider_create_table_name_string( DBUG_RETURN(res); } -#ifdef WITH_PARTITION_STORAGE_ENGINE void spider_get_partition_info( const char *table_name, uint table_name_length, @@ -6899,7 +6819,6 @@ void spider_get_partition_info( DBUG_PRINT("info",("spider no hit")); DBUG_VOID_RETURN; } -#endif int spider_get_sts( SPIDER_SHARE *share, @@ -6908,19 +6827,14 @@ int spider_get_sts( ha_spider *spider, double sts_interval, int sts_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE int sts_sync, -#endif int sts_sync_level, uint flag ) { -#ifdef WITH_PARTITION_STORAGE_ENGINE int get_type; -#endif int error_num = 0; DBUG_ENTER("spider_get_sts"); -#ifdef WITH_PARTITION_STORAGE_ENGINE if ( sts_sync == 0 ) { @@ -6954,24 +6868,16 @@ int spider_get_sts( /* copy */ get_type = 0; } -#endif -#ifdef WITH_PARTITION_STORAGE_ENGINE if (get_type == 0) spider_copy_sts_to_share(share, share->wide_share); else { -#endif error_num = spider_db_show_table_status(spider, link_idx, sts_mode, flag); -#ifdef WITH_PARTITION_STORAGE_ENGINE } -#endif -#ifdef WITH_PARTITION_STORAGE_ENGINE if (get_type >= 2) pthread_mutex_unlock(&share->wide_share->sts_mutex); -#endif if (error_num) { -#ifdef WITH_PARTITION_STORAGE_ENGINE SPIDER_PARTITION_HANDLER *partition_handler = spider->partition_handler; if ( @@ -7013,17 +6919,14 @@ int spider_get_sts( } } if (error_num) -#endif DBUG_RETURN(error_num); } -#ifdef WITH_PARTITION_STORAGE_ENGINE if (sts_sync >= sts_sync_level && get_type > 0) { spider_copy_sts_to_wide_share(share->wide_share, share); share->wide_share->sts_get_time = tmp_time; share->wide_share->sts_init = TRUE; } -#endif share->sts_get_time = tmp_time; share->sts_init = TRUE; DBUG_RETURN(0); @@ -7037,18 +6940,13 @@ int spider_get_crd( TABLE *table, double crd_interval, int crd_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE int crd_sync, -#endif int crd_sync_level ) { -#ifdef WITH_PARTITION_STORAGE_ENGINE int get_type; -#endif int error_num = 0; DBUG_ENTER("spider_get_crd"); -#ifdef WITH_PARTITION_STORAGE_ENGINE if ( crd_sync == 0 ) { @@ -7082,24 +6980,16 @@ int spider_get_crd( /* copy */ get_type = 0; } -#endif -#ifdef WITH_PARTITION_STORAGE_ENGINE if (get_type == 0) spider_copy_crd_to_share(share, share->wide_share, table->s->fields); else { -#endif error_num = spider_db_show_index(spider, link_idx, table, crd_mode); -#ifdef WITH_PARTITION_STORAGE_ENGINE } -#endif -#ifdef WITH_PARTITION_STORAGE_ENGINE if (get_type >= 2) pthread_mutex_unlock(&share->wide_share->crd_mutex); -#endif if (error_num) { -#ifdef WITH_PARTITION_STORAGE_ENGINE SPIDER_PARTITION_HANDLER *partition_handler = spider->partition_handler; if ( @@ -7142,10 +7032,8 @@ int spider_get_crd( } } if (error_num) -#endif DBUG_RETURN(error_num); } -#ifdef WITH_PARTITION_STORAGE_ENGINE if (crd_sync >= crd_sync_level && get_type > 0) { spider_copy_crd_to_wide_share(share->wide_share, share, @@ -7153,7 +7041,6 @@ int spider_get_crd( share->wide_share->crd_get_time = tmp_time; share->wide_share->crd_init = TRUE; } -#endif share->crd_get_time = tmp_time; share->crd_init = TRUE; DBUG_RETURN(0); @@ -8302,14 +8189,10 @@ int spider_discover_table_structure( const char *table_name = share->path.str; uint table_name_length = (uint) strlen(table_name); SPIDER_TRX *trx; -#ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *part_info = thd->work_part_info; -#endif SPIDER_Open_tables_backup open_tables_backup; TABLE *table_tables; -#ifdef WITH_PARTITION_STORAGE_ENGINE uint str_len; -#endif char buf[MAX_FIELD_WIDTH]; spider_string str(buf, sizeof(buf), system_charset_info); DBUG_ENTER("spider_discover_table_structure"); @@ -8331,9 +8214,7 @@ int spider_discover_table_structure( str.q_append(share->table_name.str, share->table_name.length); str.q_append(SPIDER_SQL_LCL_NAME_QUOTE_STR, SPIDER_SQL_LCL_NAME_QUOTE_LEN); str.q_append(SPIDER_SQL_OPEN_PAREN_STR, SPIDER_SQL_OPEN_PAREN_LEN); -#ifdef WITH_PARTITION_STORAGE_ENGINE str_len = str.length(); -#endif my_hash_value_type hash_value = my_calc_hash(&spider_open_tables, (uchar*) table_name, table_name_length); if (!(trx = spider_get_trx(thd, TRUE, &error_num))) @@ -8344,14 +8225,10 @@ int spider_discover_table_structure( } share->table_charset = info->default_table_charset; share->comment = info->comment; -#ifdef WITH_PARTITION_STORAGE_ENGINE if (!part_info) { -#endif if (!(spider_share = spider_create_share(table_name, share, -#ifdef WITH_PARTITION_STORAGE_ENGINE NULL, -#endif hash_value, &error_num ))) { @@ -8383,7 +8260,6 @@ int spider_discover_table_structure( } spider_free_share_resource_only(spider_share); -#ifdef WITH_PARTITION_STORAGE_ENGINE } else { char tmp_name[FN_REFLEN + 1]; List_iterator part_it(part_info->partitions); @@ -8531,7 +8407,6 @@ int spider_discover_table_structure( &open_tables_backup, FALSE); } } -#endif if (!error_num) thd->clear_error(); @@ -8579,7 +8454,6 @@ int spider_discover_table_structure( DBUG_RETURN(HA_ERR_OUT_OF_MEM); } str.q_append(SPIDER_SQL_VALUE_QUOTE_STR, SPIDER_SQL_VALUE_QUOTE_LEN); -#ifdef WITH_PARTITION_STORAGE_ENGINE DBUG_PRINT("info",("spider part_info=%p", part_info)); if (part_info) { @@ -8615,7 +8489,6 @@ int spider_discover_table_structure( str.q_append(part_syntax, part_syntax_len); SPIDER_free_part_syntax(part_syntax, MYF(0)); } -#endif DBUG_PRINT("info",("spider str=%s", str.c_ptr_safe())); error_num = share->init_from_sql_statement_string(thd, TRUE, str.ptr(), @@ -9002,18 +8875,11 @@ void *spider_table_bg_sts_action( ("spider search_link_idx=%d", spider->search_link_idx)); if (spider->search_link_idx >= 0 && conns[spider->search_link_idx]) { -#ifdef WITH_PARTITION_STORAGE_ENGINE if (spider_get_sts(share, spider->search_link_idx, share->bg_sts_try_time, spider, share->bg_sts_interval, share->bg_sts_mode, share->bg_sts_sync, 2, HA_STATUS_CONST | HA_STATUS_VARIABLE)) -#else - if (spider_get_sts(share, spider->search_link_idx, - share->bg_sts_try_time, spider, - share->bg_sts_interval, share->bg_sts_mode, - 2, HA_STATUS_CONST | HA_STATUS_VARIABLE)) -#endif { spider->search_link_idx = -1; } diff --git a/storage/spider/spd_table.h b/storage/spider/spd_table.h index 04d75065051..721a2894488 100644 --- a/storage/spider/spd_table.h +++ b/storage/spider/spd_table.h @@ -127,18 +127,14 @@ int spider_increase_longlong_list( int spider_parse_connect_info( SPIDER_SHARE *share, TABLE_SHARE *table_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *part_info, -#endif uint create_table ); int spider_set_connect_info_default( SPIDER_SHARE *share, -#ifdef WITH_PARTITION_STORAGE_ENGINE partition_element *part_elem, partition_element *sub_elem, -#endif TABLE_SHARE *table_share ); @@ -186,9 +182,7 @@ void spider_free_lgtm_tblhnd_share_alloc( SPIDER_SHARE *spider_create_share( const char *table_name, TABLE_SHARE *table_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE partition_info *part_info, -#endif my_hash_value_type hash_value, int *error_num ); @@ -299,7 +293,6 @@ char *spider_create_table_name_string( const char *sub_name ); -#ifdef WITH_PARTITION_STORAGE_ENGINE void spider_get_partition_info( const char *table_name, uint table_name_length, @@ -308,7 +301,6 @@ void spider_get_partition_info( partition_element **part_elem, partition_element **sub_elem ); -#endif int spider_get_sts( SPIDER_SHARE *share, @@ -317,9 +309,7 @@ int spider_get_sts( ha_spider *spider, double sts_interval, int sts_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE int sts_sync, -#endif int sts_sync_level, uint flag ); @@ -332,9 +322,7 @@ int spider_get_crd( TABLE *table, double crd_interval, int crd_mode, -#ifdef WITH_PARTITION_STORAGE_ENGINE int crd_sync, -#endif int crd_sync_level ); diff --git a/storage/spider/spd_trx.cc b/storage/spider/spd_trx.cc index 9c70c1eb869..42d45bc8c3e 100644 --- a/storage/spider/spd_trx.cc +++ b/storage/spider/spd_trx.cc @@ -1023,10 +1023,8 @@ SPIDER_TRX *spider_get_trx( if ( spider_set_connect_info_default( trx->tmp_share, -#ifdef WITH_PARTITION_STORAGE_ENGINE NULL, NULL, -#endif NULL ) || spider_set_connect_info_default_db_table( From 3e9aa07cce58eb73ec70cd01dc3f72f943275984 Mon Sep 17 00:00:00 2001 From: Yuchen Pei Date: Thu, 5 Jun 2025 17:38:17 +1000 Subject: [PATCH 27/61] MDEV-30436 Spider: deduplicate some sts/crd code. We spare spider_table_remove_share_from_crd/sts_thread and spider_table_add_share_to_crd/sts_thread because the function body has too many fields with crd/sts in the name and macro affects debuggability. --- storage/spider/spd_table.cc | 372 +++++++++++------------------------- storage/spider/spd_table.h | 15 +- 2 files changed, 112 insertions(+), 275 deletions(-) diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index cb6396dad60..4e85ae545b8 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -6044,12 +6044,12 @@ int spider_db_done( for (roop_count = spider_param_table_crd_thread_count() - 1; roop_count >= 0; roop_count--) { - spider_free_crd_threads(&spider_table_crd_threads[roop_count]); + spider_free_sts_crd_threads(&spider_table_crd_threads[roop_count]); } for (roop_count = spider_param_table_sts_thread_count() - 1; roop_count >= 0; roop_count--) { - spider_free_sts_threads(&spider_table_sts_threads[roop_count]); + spider_free_sts_crd_threads(&spider_table_sts_threads[roop_count]); } spider_free(NULL, spider_table_sts_threads, MYF(0)); #endif @@ -6532,7 +6532,8 @@ int spider_db_init( roop_count < (int) spider_param_table_sts_thread_count(); roop_count++) { - if ((error_num = spider_create_sts_threads(&spider_table_sts_threads[roop_count]))) + if ((error_num = spider_create_sts_crd_threads(&spider_table_sts_threads[roop_count], + true))) { goto error_init_table_sts_threads; } @@ -6541,7 +6542,8 @@ int spider_db_init( roop_count < (int) spider_param_table_crd_thread_count(); roop_count++) { - if ((error_num = spider_create_crd_threads(&spider_table_crd_threads[roop_count]))) + if ((error_num = spider_create_sts_crd_threads(&spider_table_crd_threads[roop_count], + false))) { goto error_init_table_crd_threads; } @@ -6581,13 +6583,13 @@ error_init_dbton: error_init_table_crd_threads: for (; roop_count >= 0; roop_count--) { - spider_free_crd_threads(&spider_table_crd_threads[roop_count]); + spider_free_sts_crd_threads(&spider_table_crd_threads[roop_count]); } roop_count = spider_param_table_sts_thread_count() - 1; error_init_table_sts_threads: for (; roop_count >= 0; roop_count--) { - spider_free_sts_threads(&spider_table_sts_threads[roop_count]); + spider_free_sts_crd_threads(&spider_table_sts_threads[roop_count]); } error_alloc_table_sts_crd_threads: spider_free(NULL, spider_table_sts_threads, MYF(0)); @@ -8621,32 +8623,44 @@ void spider_free_spider_object_for_share( DBUG_VOID_RETURN; } -int spider_create_sts_threads( - SPIDER_THREAD *spider_thread +int spider_create_sts_crd_threads( + SPIDER_THREAD *spider_thread, + bool is_sts ) { int error_num; - DBUG_ENTER("spider_create_sts_threads"); - if (mysql_mutex_init(spd_key_mutex_bg_stss, + DBUG_ENTER("spider_create_sts_crd_threads"); + PSI_mutex_key mutex_bg= is_sts ? spd_key_mutex_bg_stss : + spd_key_mutex_bg_crds; + PSI_cond_key cond_bg= is_sts ? spd_key_cond_bg_stss : + spd_key_cond_bg_crds; + PSI_cond_key cond_bg_syncs= is_sts ? spd_key_cond_bg_sts_syncs : + spd_key_cond_bg_crd_syncs; + if (mysql_mutex_init(mutex_bg, &spider_thread->mutex, MY_MUTEX_INIT_FAST)) { error_num = HA_ERR_OUT_OF_MEM; goto error_mutex_init; } - if (mysql_cond_init(spd_key_cond_bg_stss, + if (mysql_cond_init(cond_bg, &spider_thread->cond, NULL)) { error_num = HA_ERR_OUT_OF_MEM; goto error_cond_init; } - if (mysql_cond_init(spd_key_cond_bg_sts_syncs, + if (mysql_cond_init(cond_bg_syncs, &spider_thread->sync_cond, NULL)) { error_num = HA_ERR_OUT_OF_MEM; goto error_sync_cond_init; } - if (mysql_thread_create(spd_key_thd_bg_stss, &spider_thread->thread, - &spider_pt_attr, spider_table_bg_sts_action, (void *) spider_thread) - ) + error_num = is_sts ? + mysql_thread_create(spd_key_thd_bg_stss, &spider_thread->thread, + &spider_pt_attr, spider_table_bg_sts_action, + (void *) spider_thread) : + mysql_thread_create(spd_key_thd_bg_crds, &spider_thread->thread, + &spider_pt_attr, spider_table_bg_crd_action, + (void *) spider_thread); + if (error_num) { error_num = HA_ERR_OUT_OF_MEM; goto error_thread_create; @@ -8663,11 +8677,11 @@ error_mutex_init: DBUG_RETURN(error_num); } -void spider_free_sts_threads( +void spider_free_sts_crd_threads( SPIDER_THREAD *spider_thread ) { bool thread_killed; - DBUG_ENTER("spider_free_sts_threads"); + DBUG_ENTER("spider_free_sts_crd_threads"); pthread_mutex_lock(&spider_thread->mutex); thread_killed = spider_thread->killed; spider_thread->killed = TRUE; @@ -8689,86 +8703,20 @@ void spider_free_sts_threads( DBUG_VOID_RETURN; } -int spider_create_crd_threads( - SPIDER_THREAD *spider_thread -) { - int error_num; - DBUG_ENTER("spider_create_crd_threads"); - if (mysql_mutex_init(spd_key_mutex_bg_crds, - &spider_thread->mutex, MY_MUTEX_INIT_FAST)) - { - error_num = HA_ERR_OUT_OF_MEM; - goto error_mutex_init; - } - if (mysql_cond_init(spd_key_cond_bg_crds, - &spider_thread->cond, NULL)) - { - error_num = HA_ERR_OUT_OF_MEM; - goto error_cond_init; - } - if (mysql_cond_init(spd_key_cond_bg_crd_syncs, - &spider_thread->sync_cond, NULL)) - { - error_num = HA_ERR_OUT_OF_MEM; - goto error_sync_cond_init; - } - if (mysql_thread_create(spd_key_thd_bg_crds, &spider_thread->thread, - &spider_pt_attr, spider_table_bg_crd_action, (void *) spider_thread) - ) - { - error_num = HA_ERR_OUT_OF_MEM; - goto error_thread_create; - } - DBUG_RETURN(0); - -error_thread_create: - pthread_cond_destroy(&spider_thread->sync_cond); -error_sync_cond_init: - pthread_cond_destroy(&spider_thread->cond); -error_cond_init: - pthread_mutex_destroy(&spider_thread->mutex); -error_mutex_init: - DBUG_RETURN(error_num); -} - -void spider_free_crd_threads( - SPIDER_THREAD *spider_thread -) { - bool thread_killed; - DBUG_ENTER("spider_free_crd_threads"); - pthread_mutex_lock(&spider_thread->mutex); - thread_killed = spider_thread->killed; - spider_thread->killed = TRUE; - if (!thread_killed) - { - if (spider_thread->thd_wait) - { - pthread_cond_signal(&spider_thread->cond); - } - pthread_cond_wait(&spider_thread->sync_cond, &spider_thread->mutex); - } - pthread_mutex_unlock(&spider_thread->mutex); - pthread_join(spider_thread->thread, NULL); - pthread_cond_destroy(&spider_thread->sync_cond); - pthread_cond_destroy(&spider_thread->cond); - pthread_mutex_destroy(&spider_thread->mutex); - spider_thread->thd_wait = FALSE; - spider_thread->killed = FALSE; - DBUG_VOID_RETURN; -} - -void *spider_table_bg_sts_action( - void *arg +static void *spider_table_bg_sts_crd_action( + void *arg, + bool is_sts ) { SPIDER_THREAD *thread = (SPIDER_THREAD *) arg; SPIDER_SHARE *share; SPIDER_TRX *trx; int error_num; ha_spider *spider; + TABLE *table; /* only needed for crd */ SPIDER_CONN **conns; THD *thd; my_thread_init(); - DBUG_ENTER("spider_table_bg_sts_action"); + DBUG_ENTER("spider_table_bg_sts_crd_action"); /* init start */ pthread_mutex_lock(&thread->mutex); if (!(thd = spider_create_sys_thd(thread))) @@ -8783,7 +8731,8 @@ void *spider_table_bg_sts_action( #ifdef HAVE_PSI_INTERFACE mysql_thread_set_psi_id(thd->thread_id); #endif - thd_proc_info(thd, "Spider table background statistics action handler"); + thd_proc_info(thd, "Spider table background statistics/cardinality" + " action handler"); if (!(trx = spider_get_trx(NULL, FALSE, &error_num))) { spider_destroy_sys_thd(thd); @@ -8799,10 +8748,6 @@ void *spider_table_bg_sts_action( trx->thd = thd; /* init end */ - if (thd->killed) - { - thread->killed = TRUE; - } if (thd->killed) { thread->killed = TRUE; @@ -8810,10 +8755,10 @@ void *spider_table_bg_sts_action( while (TRUE) { - DBUG_PRINT("info",("spider bg sts loop start")); + DBUG_PRINT("info",("spider bg sts/crd loop start")); if (thread->killed) { - DBUG_PRINT("info",("spider bg sts kill start")); + DBUG_PRINT("info",("spider bg sts/crd kill start")); trx->thd = NULL; spider_free_trx(trx, TRUE); spider_destroy_sys_thd(thd); @@ -8827,7 +8772,7 @@ void *spider_table_bg_sts_action( } if (!thread->queue_first) { - DBUG_PRINT("info",("spider bg sts has no job")); + DBUG_PRINT("info",("spider bg sts/crd has no job")); thread->thd_wait = TRUE; pthread_cond_wait(&thread->cond, &thread->mutex); thread->thd_wait = FALSE; @@ -8836,155 +8781,16 @@ void *spider_table_bg_sts_action( continue; } share = (SPIDER_SHARE *) thread->queue_first; - share->sts_working = TRUE; + if (is_sts) + share->sts_working = TRUE; + else + share->crd_working = TRUE; pthread_mutex_unlock(&thread->mutex); - - spider = share->sts_spider; - conns = spider->conns; - if (spider->search_link_idx < 0) - { - spider->wide_handler->trx = trx; - spider_trx_set_link_idx_for_all(spider); - spider->search_link_idx = spider_conn_first_link_idx(thd, - share->link_statuses, share->access_balances, spider->conn_link_idx, - share->link_count, SPIDER_LINK_STATUS_OK); - } - if (spider->search_link_idx >= 0) - { - DBUG_PRINT("info", - ("spider difftime=%f", - difftime(share->bg_sts_try_time, share->sts_get_time))); - DBUG_PRINT("info", - ("spider bg_sts_interval=%f", share->bg_sts_interval)); - if (difftime(share->bg_sts_try_time, share->sts_get_time) >= - share->bg_sts_interval) - { - if (!conns[spider->search_link_idx]) - { - spider_get_conn(share, spider->search_link_idx, - share->conn_keys[spider->search_link_idx], trx, - spider, FALSE, FALSE, &error_num); - if (conns[spider->search_link_idx]) - { - conns[spider->search_link_idx]->error_mode = 0; - } else { - spider->search_link_idx = -1; - } - } - DBUG_PRINT("info", - ("spider search_link_idx=%d", spider->search_link_idx)); - if (spider->search_link_idx >= 0 && conns[spider->search_link_idx]) - { - if (spider_get_sts(share, spider->search_link_idx, - share->bg_sts_try_time, spider, - share->bg_sts_interval, share->bg_sts_mode, - share->bg_sts_sync, - 2, HA_STATUS_CONST | HA_STATUS_VARIABLE)) - { - spider->search_link_idx = -1; - } - } - } - } - memset(spider->need_mons, 0, sizeof(int) * share->link_count); - pthread_mutex_lock(&thread->mutex); - if (thread->queue_first == thread->queue_last) - { - thread->queue_first = NULL; - thread->queue_last = NULL; - } else { - thread->queue_first = share->sts_next; - share->sts_next->sts_prev = NULL; - share->sts_next = NULL; - } - share->sts_working = FALSE; - share->sts_wait = FALSE; - if (thread->first_free_wait) - { - pthread_cond_signal(&thread->sync_cond); - pthread_cond_wait(&thread->cond, &thread->mutex); - if (thd->killed) - thread->killed = TRUE; - } - } -} - -void *spider_table_bg_crd_action( - void *arg -) { - SPIDER_THREAD *thread = (SPIDER_THREAD *) arg; - SPIDER_SHARE *share; - SPIDER_TRX *trx; - int error_num; - ha_spider *spider; - TABLE *table; - SPIDER_CONN **conns; - THD *thd; - my_thread_init(); - DBUG_ENTER("spider_table_bg_crd_action"); - /* init start */ - pthread_mutex_lock(&thread->mutex); - if (!(thd = spider_create_sys_thd(thread))) - { - thread->thd_wait = FALSE; - thread->killed = FALSE; - pthread_mutex_unlock(&thread->mutex); - my_thread_end(); - DBUG_RETURN(NULL); - } - SPIDER_set_next_thread_id(thd); -#ifdef HAVE_PSI_INTERFACE - mysql_thread_set_psi_id(thd->thread_id); -#endif - thd_proc_info(thd, "Spider table background cardinality action handler"); - if (!(trx = spider_get_trx(NULL, FALSE, &error_num))) - { - spider_destroy_sys_thd(thd); - thread->thd_wait = FALSE; - thread->killed = FALSE; - pthread_mutex_unlock(&thread->mutex); -#if !defined(MYSQL_DYNAMIC_PLUGIN) || !defined(_WIN32) - set_current_thd(nullptr); -#endif - my_thread_end(); - DBUG_RETURN(NULL); - } - trx->thd = thd; - /* init end */ - - while (TRUE) - { - DBUG_PRINT("info",("spider bg crd loop start")); - if (thread->killed) - { - DBUG_PRINT("info",("spider bg crd kill start")); - trx->thd = NULL; - spider_free_trx(trx, TRUE); - spider_destroy_sys_thd(thd); - pthread_cond_signal(&thread->sync_cond); - pthread_mutex_unlock(&thread->mutex); -#if !defined(MYSQL_DYNAMIC_PLUGIN) || !defined(_WIN32) - set_current_thd(nullptr); -#endif - my_thread_end(); - DBUG_RETURN(NULL); - } - if (!thread->queue_first) - { - DBUG_PRINT("info",("spider bg crd has no job")); - thread->thd_wait = TRUE; - pthread_cond_wait(&thread->cond, &thread->mutex); - thread->thd_wait = FALSE; - if (thd->killed) - thread->killed = TRUE; - continue; - } - share = (SPIDER_SHARE *) thread->queue_first; - share->crd_working = TRUE; - pthread_mutex_unlock(&thread->mutex); - table = &share->table; - spider = share->crd_spider; + if (is_sts) + spider = share->sts_spider; + else + spider = share->crd_spider; conns = spider->conns; if (spider->search_link_idx < 0) { @@ -8996,13 +8802,13 @@ void *spider_table_bg_crd_action( } if (spider->search_link_idx >= 0) { - DBUG_PRINT("info", - ("spider difftime=%f", - difftime(share->bg_crd_try_time, share->crd_get_time))); - DBUG_PRINT("info", - ("spider bg_crd_interval=%f", share->bg_crd_interval)); - if (difftime(share->bg_crd_try_time, share->crd_get_time) >= - share->bg_crd_interval) + double diff_time= is_sts ? + difftime(share->bg_sts_try_time, share->sts_get_time) : + difftime(share->bg_crd_try_time, share->crd_get_time); + double interval= is_sts? share->bg_sts_interval : share->bg_crd_interval; + DBUG_PRINT("info", ("spider difftime=%f", diff_time)); + DBUG_PRINT("info", ("spider bg_sts_interval=%f", interval)); + if (diff_time >= interval) { if (!conns[spider->search_link_idx]) { @@ -9020,18 +8826,27 @@ void *spider_table_bg_crd_action( ("spider search_link_idx=%d", spider->search_link_idx)); if (spider->search_link_idx >= 0 && conns[spider->search_link_idx]) { + int result = is_sts ? #ifdef WITH_PARTITION_STORAGE_ENGINE - if (spider_get_crd(share, spider->search_link_idx, - share->bg_crd_try_time, spider, table, - share->bg_crd_interval, share->bg_crd_mode, - share->bg_crd_sync, - 2)) + spider_get_sts(share, spider->search_link_idx, + share->bg_sts_try_time, spider, + share->bg_sts_interval, share->bg_sts_mode, + share->bg_sts_sync, + 2, HA_STATUS_CONST | HA_STATUS_VARIABLE) : + spider_get_crd(share, spider->search_link_idx, + share->bg_crd_try_time, spider, table, + share->bg_crd_interval, share->bg_crd_mode, + share->bg_crd_sync, 2); #else - if (spider_get_crd(share, spider->search_link_idx, - share->bg_crd_try_time, spider, table, - share->bg_crd_interval, share->bg_crd_mode, - 2)) + spider_get_sts(share, spider->search_link_idx, + share->bg_sts_try_time, spider, + share->bg_sts_interval, share->bg_sts_mode, + 2, HA_STATUS_CONST | HA_STATUS_VARIABLE) : + spider_get_crd(share, spider->search_link_idx, + share->bg_crd_try_time, spider, table, + share->bg_crd_interval, share->bg_crd_mode, 2); #endif + if (result) { spider->search_link_idx = -1; } @@ -9045,12 +8860,29 @@ void *spider_table_bg_crd_action( thread->queue_first = NULL; thread->queue_last = NULL; } else { - thread->queue_first = share->crd_next; - share->crd_next->crd_prev = NULL; - share->crd_next = NULL; + if (is_sts) + { + thread->queue_first = share->sts_next; + share->sts_next->sts_prev = NULL; + share->sts_next = NULL; + } + else + { + thread->queue_first = share->crd_next; + share->crd_next->crd_prev = NULL; + share->crd_next = NULL; + } + } + if (is_sts) + { + share->sts_working= FALSE; + share->sts_wait= FALSE; + } + else + { + share->crd_working= FALSE; + share->crd_wait= FALSE; } - share->crd_working = FALSE; - share->crd_wait = FALSE; if (thread->first_free_wait) { pthread_cond_signal(&thread->sync_cond); @@ -9061,6 +8893,18 @@ void *spider_table_bg_crd_action( } } +void *spider_table_bg_sts_action(void *arg) +{ + DBUG_ENTER("spider_table_bg_sts_action"); + DBUG_RETURN(spider_table_bg_sts_crd_action(arg, true)); +} + +void *spider_table_bg_crd_action(void *arg) +{ + DBUG_ENTER("spider_table_bg_crd_action"); + DBUG_RETURN(spider_table_bg_sts_crd_action(arg, false)); +} + void spider_table_add_share_to_sts_thread( SPIDER_SHARE *share ) { diff --git a/storage/spider/spd_table.h b/storage/spider/spd_table.h index 721a2894488..6d7c1959ece 100644 --- a/storage/spider/spd_table.h +++ b/storage/spider/spd_table.h @@ -472,19 +472,12 @@ void spider_free_spider_object_for_share( ha_spider **spider ); -int spider_create_sts_threads( - SPIDER_THREAD *spider_thread +int spider_create_sts_crd_threads( + SPIDER_THREAD *spider_thread, + bool is_sts ); -void spider_free_sts_threads( - SPIDER_THREAD *spider_thread -); - -int spider_create_crd_threads( - SPIDER_THREAD *spider_thread -); - -void spider_free_crd_threads( +void spider_free_sts_crd_threads( SPIDER_THREAD *spider_thread ); From 998e765060ddde57a85b3c7ea63ad9e9fa438449 Mon Sep 17 00:00:00 2001 From: Yuchen Pei Date: Thu, 10 Jul 2025 16:22:47 +1000 Subject: [PATCH 28/61] MDEV-32907 Spider: do not create gbh if encountering Item_aggregate_ref When spider encounters Item_aggregate_ref that is valid at gbh creation, it could become invalid at gbh execution due to item list substitution*. Therefore we ban Item_aggregate_ref for spider gbh creation. To that end, we also make sure that str is NULL if and only if in the creation stage, not the execution stage, including removing a redundant check when str is not NULL. *: Note that it is likely the same scenario as in MDEV-25116. --- .../spider/bugfix/r/mdev_32907.result | 21 +++++++++++++++ .../spider/bugfix/t/mdev_32907.test | 26 +++++++++++++++++++ storage/spider/spd_db_conn.cc | 22 +++++++++++++--- storage/spider/spd_db_mysql.cc | 10 ++----- 4 files changed, 67 insertions(+), 12 deletions(-) create mode 100644 storage/spider/mysql-test/spider/bugfix/r/mdev_32907.result create mode 100644 storage/spider/mysql-test/spider/bugfix/t/mdev_32907.test diff --git a/storage/spider/mysql-test/spider/bugfix/r/mdev_32907.result b/storage/spider/mysql-test/spider/bugfix/r/mdev_32907.result new file mode 100644 index 00000000000..9e8dbebc86c --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/r/mdev_32907.result @@ -0,0 +1,21 @@ +for master_1 +for child2 +for child3 +set spider_same_server_link= 1; +CREATE SERVER srv FOREIGN DATA WRAPPER mysql +OPTIONS (SOCKET "$MASTER_1_MYSOCK", DATABASE 'test',user 'root'); +create table t2 (c int); +create table t1 (c int) ENGINE=Spider +COMMENT='WRAPPER "mysql", srv "srv",TABLE "t2"'; +insert into t1 values (3), (NULL); +explain select nvl(sum(c), 0) from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 2 +select nvl(sum(c), 0) from t1; +nvl(sum(c), 0) +3 +drop table t1, t2; +drop server srv; +for master_1 +for child2 +for child3 diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_32907.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_32907.test new file mode 100644 index 00000000000..50835f4e47d --- /dev/null +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_32907.test @@ -0,0 +1,26 @@ +--disable_query_log +--disable_result_log +--source ../../t/test_init.inc +--enable_result_log +--enable_query_log +set spider_same_server_link= 1; +evalp CREATE SERVER srv FOREIGN DATA WRAPPER mysql +OPTIONS (SOCKET "$MASTER_1_MYSOCK", DATABASE 'test',user 'root'); + +create table t2 (c int); +create table t1 (c int) ENGINE=Spider +COMMENT='WRAPPER "mysql", srv "srv",TABLE "t2"'; + +insert into t1 values (3), (NULL); + +explain select nvl(sum(c), 0) from t1; +select nvl(sum(c), 0) from t1; +drop table t1, t2; + +drop server srv; + +--disable_query_log +--disable_result_log +--source ../../t/test_deinit.inc +--enable_result_log +--enable_query_log diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index 5e2dad0535e..79cafaa8f30 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -7919,11 +7919,21 @@ int spider_db_print_item_type( DBUG_ENTER("spider_db_print_item_type"); DBUG_PRINT("info",("spider COND type=%d", item->type())); - if (item->type() == Item::REF_ITEM && - ((Item_ref*)item)->ref_type() == Item_ref::DIRECT_REF) + if (item->type() == Item::REF_ITEM) { - item= item->real_item(); - DBUG_PRINT("info",("spider new COND type=%d", item->type())); + const auto rtype= ((Item_ref*)item)->ref_type(); + /* + The presence of an Item_aggregate_ref tends to lead to the query + being broken at the execution stage. + */ + if (rtype == Item_ref::AGGREGATE_REF && !str) + DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM); + DBUG_ASSERT(rtype != Item_ref::AGGREGATE_REF); + if (rtype == Item_ref::DIRECT_REF) + { + item= item->real_item(); + DBUG_PRINT("info", ("spider new COND type=%d", item->type())); + } } switch (item->type()) { @@ -8353,6 +8363,10 @@ int spider_db_open_item_ref( } DBUG_RETURN(0); } + /* + TODO: MDEV-25116 is the same case as MDEV-32907 (having an + Item_aggregate_ref). Perhaps the following is redundant. + */ DBUG_RETURN(ER_SPIDER_COND_SKIP_NUM); // MDEV-25116 } DBUG_RETURN(spider_db_open_item_ident((Item_ident *) item_ref, spider, str, diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index d0c377a5870..7883c63d9f0 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -4855,13 +4855,9 @@ int spider_db_mbase_util::open_item_func( ) { DBUG_ENTER("spider_db_mbase_util::open_item_func"); - int error = check_item_func(item_func, spider, alias, - alias_length, use_fields, fields); - if (error) - DBUG_RETURN(error); if (!str) - DBUG_RETURN(0); - + DBUG_RETURN(check_item_func(item_func, spider, alias, + alias_length, use_fields, fields)); DBUG_RETURN(print_item_func(item_func, spider, str, alias, alias_length, use_fields, fields)); } @@ -5021,8 +5017,6 @@ int spider_db_mbase_util::print_item_func( int use_pushdown_udf, case_when_start, case_when_count; bool merge_func = FALSE, case_with_else; DBUG_ENTER("spider_db_mbase_util::print_item_func"); - DBUG_ASSERT(!check_item_func(item_func, spider, alias, alias_length, - use_fields, fields)); DBUG_ASSERT(str); if (str->reserve(SPIDER_SQL_OPEN_PAREN_LEN)) From ea962ca4957872581f54644f21d435fa2b51f14b Mon Sep 17 00:00:00 2001 From: Yuchen Pei Date: Mon, 14 Jul 2025 15:45:28 +1000 Subject: [PATCH 29/61] MDEV-30436 [fixup] Add missing check for HAVE_PSI_INTERFACE A fixup of 3e9aa07cce58eb73ec70cd01dc3f72f943275984, with thanks to Daniel Black. --- storage/spider/spd_table.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/spider/spd_table.cc b/storage/spider/spd_table.cc index 4e85ae545b8..85fd1848507 100644 --- a/storage/spider/spd_table.cc +++ b/storage/spider/spd_table.cc @@ -8629,12 +8629,14 @@ int spider_create_sts_crd_threads( ) { int error_num; DBUG_ENTER("spider_create_sts_crd_threads"); +#ifdef HAVE_PSI_INTERFACE PSI_mutex_key mutex_bg= is_sts ? spd_key_mutex_bg_stss : spd_key_mutex_bg_crds; PSI_cond_key cond_bg= is_sts ? spd_key_cond_bg_stss : spd_key_cond_bg_crds; PSI_cond_key cond_bg_syncs= is_sts ? spd_key_cond_bg_sts_syncs : spd_key_cond_bg_crd_syncs; +#endif if (mysql_mutex_init(mutex_bg, &spider_thread->mutex, MY_MUTEX_INIT_FAST)) { From 499fa24d638b2b62eab626ff6df97278649361c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 14 Jul 2025 10:31:48 +0300 Subject: [PATCH 30/61] MDEV-27058 fixup: Fix a bogus assertion buf_page_get_low(): Do not expect a valid state of buf_page_t::in_zip_hash for blocks that are not file pages. This debug assertion had been misplaced in commit aaef2e1d8c843d1e40b1ce0c5199c3abb3c5da28 (MDEV-27058) that removed the condition block->page.state() == BUF_BLOCK_FILE_PAGE. --- storage/innobase/buf/buf0buf.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 2ea1909036d..1eb8d32c6ac 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -2730,7 +2730,6 @@ loop: transactional_shared_lock_guard g{hash_lock}; if (buf_pool.is_uncompressed(block) && page_id == block->page.id()) { - ut_ad(!block->page.in_zip_hash); state = block->page.state(); /* Ignore guesses that point to read-fixed blocks. We can only avoid a race condition by @@ -2738,6 +2737,7 @@ loop: if ((state >= buf_page_t::FREED && state < buf_page_t::READ_FIX) || state >= buf_page_t::WRITE_FIX) { + ut_ad(!block->page.in_zip_hash); state = block->page.fix(); goto got_block; } From b7b2e009b3fbe4cb41a368486f385e346a1d7494 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 14 Jul 2025 10:31:56 +0300 Subject: [PATCH 31/61] MDEV-37215 SELECT FOR UPDATE crash in SERIALIZABLE ha_innobase::store_lock(): Set also trx->will_lock when starting a transaction at SERIALIZABLE isolation level. This fixes up commit 7fbbbc983f054a9c9c7f36ea8a9778ff3793a151 (MDEV-36330). --- mysql-test/suite/innodb/r/lock_isolation.result | 9 +++++++++ mysql-test/suite/innodb/t/lock_isolation.test | 10 ++++++++++ storage/innobase/handler/ha_innodb.cc | 1 + 3 files changed, 20 insertions(+) diff --git a/mysql-test/suite/innodb/r/lock_isolation.result b/mysql-test/suite/innodb/r/lock_isolation.result index 7c24ed01f4c..2044f001ad8 100644 --- a/mysql-test/suite/innodb/r/lock_isolation.result +++ b/mysql-test/suite/innodb/r/lock_isolation.result @@ -283,4 +283,13 @@ COMMIT; disconnect consistent; connection default; DROP TABLE t1,t2; +# +# MDEV-37215 SELECT...FOR UPDATE crash under SERIALIZABLE +# +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; +SELECT * FROM t1 FOR UPDATE; +a +DROP TABLE t1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; # End of 10.6 tests diff --git a/mysql-test/suite/innodb/t/lock_isolation.test b/mysql-test/suite/innodb/t/lock_isolation.test index 3c5544321c7..7506754cf8a 100644 --- a/mysql-test/suite/innodb/t/lock_isolation.test +++ b/mysql-test/suite/innodb/t/lock_isolation.test @@ -295,5 +295,15 @@ COMMIT; --connection default DROP TABLE t1,t2; +--echo # +--echo # MDEV-37215 SELECT...FOR UPDATE crash under SERIALIZABLE +--echo # + +SET SESSION TRANSACTION ISOLATION LEVEL SERIALIZABLE; +CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB; +SELECT * FROM t1 FOR UPDATE; +DROP TABLE t1; +SET SESSION TRANSACTION ISOLATION LEVEL REPEATABLE READ; + --source include/wait_until_count_sessions.inc --echo # End of 10.6 tests diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index f7aafbbf9ef..e0c0188ad63 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -16510,6 +16510,7 @@ ha_innobase::store_lock( case ISO_SERIALIZABLE: auto trx_state = trx->state; if (trx_state == TRX_STATE_NOT_STARTED) { + trx->will_lock = true; trx_start_if_not_started(trx, false); trx->read_view.open(trx); } else { From 3bcfc2ed0aed64882868b42885c6b55a98e7c505 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Mon, 7 Jul 2025 13:14:13 +0530 Subject: [PATCH 32/61] MDEV-22250 InnoDB: Failing assertion: opt_no_lock during mariabackup --backup backup_file_op_fail(): Ignore the FTS internal table if it is being created in late phase of backup. mariabackup --prepare should be handle intermediate table and orphaned fts internal table. check_if_fts_table(): Determine whether the space name belongs to internal FTS table --- extra/mariabackup/backup_copy.cc | 2 +- extra/mariabackup/backup_copy.h | 1 + extra/mariabackup/xtrabackup.cc | 72 ++++++++++++++++++++++++++++++++ 3 files changed, 74 insertions(+), 1 deletion(-) diff --git a/extra/mariabackup/backup_copy.cc b/extra/mariabackup/backup_copy.cc index ef3018c5644..d4a90760f65 100644 --- a/extra/mariabackup/backup_copy.cc +++ b/extra/mariabackup/backup_copy.cc @@ -635,7 +635,7 @@ ends_with(const char *str, const char *suffix) && strcmp(str + str_len - suffix_len, suffix) == 0); } -static bool starts_with(const char *str, const char *prefix) +bool starts_with(const char *str, const char *prefix) { return strncmp(str, prefix, strlen(prefix)) == 0; } diff --git a/extra/mariabackup/backup_copy.h b/extra/mariabackup/backup_copy.h index b5aaf3121e9..3d08c59d8c9 100644 --- a/extra/mariabackup/backup_copy.h +++ b/extra/mariabackup/backup_copy.h @@ -41,4 +41,5 @@ directory_exists(const char *dir, bool create); lsn_t get_current_lsn(MYSQL *connection); +bool starts_with(const char *str, const char *prefix); #endif diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index d44c7a6a4d1..a0c26d25e7a 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -113,6 +113,7 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA #include #include #include "backup_debug.h" +#include "fts0types.h" #define MB_CORRUPTED_PAGES_FILE "innodb_corrupted_pages" @@ -124,6 +125,8 @@ int sd_notifyf() { return 0; } int sys_var_init(); +extern const char* fts_common_tables[]; +extern const fts_index_selector_t fts_index_selector[]; /* === xtrabackup specific options === */ #define DEFAULT_TARGET_DIR "./xtrabackup_backupfiles/" char xtrabackup_real_target_dir[FN_REFLEN] = DEFAULT_TARGET_DIR; @@ -960,6 +963,61 @@ static void backup_file_op(uint32_t space_id, int type, pthread_mutex_unlock(&backup_mutex); } +/** Check whether the spacename belongs to internal FTS table +@param space_name space name to be checked +@return true if it is fts table or false otherwise */ +static bool check_if_fts_table(const char *space_name) { + /* There are two types of FTS internal table + 1) FTS common tables (FTS__ + 2) FTS INDEX auxiliary table (FTS___ */ + const char *table_name_start = strrchr(space_name, '/'); + if (table_name_start) + ++table_name_start; + else + table_name_start = space_name; + + if (!starts_with(table_name_start,"FTS_")) + return false; + + const char *table_name_end = + table_name_start + strlen(table_name_start) - 1; + + /* Skip FTS_ */ + const char *table_name_suffix = strchr(table_name_start, '_'); + if (!table_name_suffix || + table_name_suffix == table_name_end) { + return false; + } + table_name_suffix++; + + /* Skip _ */ + table_name_suffix = strchr(table_name_suffix, '_'); + if (!table_name_suffix || + table_name_end == table_name_suffix) { + return false; + } + table_name_suffix++; + + /* Compare only common tables */ + for (const char **suffix = fts_common_tables; *suffix; ++suffix) { + if (!strcmp(table_name_suffix, *suffix)) + return true; + } + + /* Skip index_id on fts table name */ + table_name_suffix = strchr(table_name_suffix, '_'); + if (!table_name_suffix || + table_name_suffix == table_name_end) { + return false; + } + table_name_suffix++; + + for (size_t i = 0; fts_index_selector[i].suffix; ++i) + if (!strcmp(table_name_suffix, fts_index_selector[i].suffix)) + return true; + return false; +} + /* This callback is called if DDL operation is detected, @@ -982,6 +1040,20 @@ static void backup_file_op_fail(uint32_t space_id, int type, space_id, int(len), name); fail = !check_if_skip_table( filename_to_spacename(name, len).c_str()); + if (fail && !opt_no_lock && + check_if_fts_table( + filename_to_spacename(name, len).c_str())) { + /* Ignore the FTS internal table because InnoDB does + create intermediate table and their associative FTS + internal table when table is being rebuilt during + prepare phase. Also, backup_set_alter_copy_lock() + downgrades the MDL_BACKUP_DDL before prepare phase + of alter. This leads to the FTS internal table being + created in the late phase of backup. + mariabackup --prepare should be able to handle + this case. */ + fail = false; + } error= "create"; break; case FILE_MODIFY: From e3c5565dfb4c5a738b413978f34194af842825ec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 15 Jul 2025 16:26:16 +0300 Subject: [PATCH 33/61] MDEV-36330 fixup: Only fix innodb_snapsho_isolation=ON ha_innobase::store_lock(): Do not create a read view or start the transaction if innodb_snapshot_isolation=OFF. This should save some resources with the default settings. --- storage/innobase/handler/ha_innodb.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index e0c0188ad63..5cf3e00f88d 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -16509,12 +16509,12 @@ ha_innobase::store_lock( break; case ISO_SERIALIZABLE: auto trx_state = trx->state; - if (trx_state == TRX_STATE_NOT_STARTED) { + if (trx_state != TRX_STATE_NOT_STARTED) { + ut_ad(trx_state == TRX_STATE_ACTIVE); + } else if (trx->snapshot_isolation) { trx->will_lock = true; trx_start_if_not_started(trx, false); trx->read_view.open(trx); - } else { - ut_ad(trx_state == TRX_STATE_ACTIVE); } } } From 024c7e881f1f82446fdb20be9f612c967a529e45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 16 Jul 2025 12:01:59 +0300 Subject: [PATCH 34/61] MDEV-37103 innodb_immediate_scrub_data_uncompressed=ON may break innodb_undo_log_truncate=ON The test innodb.undo_truncate occasionally demonstrates a race condition where scrubbing is writing zeroes to a freed undo page, and innodb_undo_log_truncate=ON truncating the same tablespace. The truncation is an exception to the rule that InnoDB tablespace file sizes can only grow, never shrink. The fields fil_space_t::size and fil_node_t::size are protected by fil_system.mutex, which used to be a highly contended resource. We do not want to revert back to acquiring the mutex in fil_space_t::io() because that would introduce an obvious scalability bottleneck. fil_space_t::flush_freed(): Do not try to scrub pages of the undo tablespace in order to prevent a race condition between io() and undo tablespace truncation. fil_space_t::io(): Prevent a null pointer dereference when reporting an out-of-bounds access to the non-first file of the system or temporary tablespace. Do not invoke set_corrupted() after an out-of-bounds asynchronous read. Note: fil_space_t::flush_freed() may only invoke PUNCH_RANGE on page_compressed tablespaces, never on an undo tablespace. --- storage/innobase/buf/buf0flu.cc | 9 ++++++--- storage/innobase/fil/fil0fil.cc | 17 ++++++++++++----- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 71ee98b18be..b0ee82e03e0 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -985,12 +985,15 @@ MY_ATTRIBUTE((warn_unused_result)) @return number of pages written or hole-punched */ uint32_t fil_space_t::flush_freed(bool writable) noexcept { + mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex); + mysql_mutex_assert_not_owner(&buf_pool.mutex); + const bool punch_hole= chain.start->punch_hole == 1; if (!punch_hole && !srv_immediate_scrub_data_uncompressed) return 0; - - mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex); - mysql_mutex_assert_not_owner(&buf_pool.mutex); + if (srv_is_undo_tablespace(id)) + /* innodb_undo_log_truncate=ON can take care of these better */ + return 0; for (;;) { diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 63b0c3414c5..8a4310b3de9 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -2696,23 +2696,30 @@ fil_io_t fil_space_t::io(const IORequest &type, os_offset_t offset, size_t len, while (node->size <= p) { p -= node->size; - node = UT_LIST_GET_NEXT(chain, node); - if (!node) { + if (!UT_LIST_GET_NEXT(chain, node)) { fail: - if (type.type != IORequest::READ_ASYNC) { + switch (type.type) { + case IORequest::READ_ASYNC: + /* Read-ahead may be requested for + non-existing pages. Ignore such + requests. */ + break; + default: fil_invalid_page_access_msg( node->name, offset, len, type.is_read()); - } #ifndef DBUG_OFF io_error: #endif - set_corrupted(); + set_corrupted(); + } + err = DB_CORRUPTION; node = nullptr; goto release; } + node = UT_LIST_GET_NEXT(chain, node); } offset = os_offset_t{p} << srv_page_size_shift; From dc9bdb42168989e8b93e463d2c0b8638a9b0ce1f Mon Sep 17 00:00:00 2001 From: Rucha Deodhar Date: Wed, 16 Apr 2025 18:22:05 +0530 Subject: [PATCH 35/61] MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json Analysis: Wrong comparison function is used, resulting in wrong argument to the read_json() function. Fix: Used correct function. --- mysql-test/main/func_json.result | 8 ++++++++ mysql-test/main/func_json.test | 8 ++++++++ sql/item_cmpfunc.cc | 2 +- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index d8d87b53672..a000cfbeefe 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -1766,5 +1766,13 @@ FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t; data # +# MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json +# +select null<=>json_extract('1',json_object(null,'{ }',null,null),'{}'); +null<=>json_extract('1',json_object(null,'{ }',null,null),'{}') +1 +Warnings: +Warning 4042 Syntax error in JSON path in argument 2 to function 'json_extract' at position 1 +# # End of 10.6 tests # diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index e7e1db78791..da22d719a8d 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -1231,6 +1231,14 @@ SELECT data FROM JSON_TABLE (@data, '$[*]' COLUMNS (data text PATH '$.Data')) AS t; + +--echo # +--echo # MDEV-21530: json_extract STILL crashes in Item_func_json_extract::read_json +--echo # + +select null<=>json_extract('1',json_object(null,'{ }',null,null),'{}'); + + --echo # --echo # End of 10.6 tests --echo # diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 06433c25b80..2135729046f 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -603,7 +603,7 @@ bool Arg_comparator::set_cmp_func_string(THD *thd) else if ((*b)->type() == Item::FUNC_ITEM && ((Item_func *) (*b))->functype() == Item_func::JSON_EXTRACT_FUNC) { - func= is_owner_equal_func() ? &Arg_comparator::compare_e_json_str: + func= is_owner_equal_func() ? &Arg_comparator::compare_e_str_json: &Arg_comparator::compare_str_json; return 0; } From d8c2362912abb60779ef2c69020f83f4e5bd3692 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 10 Jul 2025 18:12:41 +0200 Subject: [PATCH 36/61] cleanup: long unique checks consolidate and unify long unique checks. fix a bug where an update of a long unique blob was ignoring the prefix length --- mysql-test/main/long_unique.result | 1 + mysql-test/main/long_unique.test | 1 + mysql-test/main/long_unique_bugs.result | 1 + mysql-test/main/long_unique_bugs.test | 11 +- sql/handler.cc | 349 ++++++++++-------------- sql/handler.h | 4 +- 6 files changed, 144 insertions(+), 223 deletions(-) diff --git a/mysql-test/main/long_unique.result b/mysql-test/main/long_unique.result index ec68afa8f00..b1b537a29b0 100644 --- a/mysql-test/main/long_unique.result +++ b/mysql-test/main/long_unique.result @@ -1242,6 +1242,7 @@ t1 CREATE TABLE `t1` ( insert into t1 value(concat(repeat('s',3000),'1')); insert into t1 value(concat(repeat('s',3000),'2')); ERROR 23000: Duplicate entry 'sssssssssssssssssssssssssssssssssssssssssssssssssssssssssssss...' for key 'a' +update t1 set a= concat(repeat('s',3000),'2'); insert into t1 value(concat(repeat('a',3000),'2')); drop table t1; create table t1(a varchar(4000), b longblob , c varchar(5000), d longblob, diff --git a/mysql-test/main/long_unique.test b/mysql-test/main/long_unique.test index b3c1e8e9efc..e759fa6d4b7 100644 --- a/mysql-test/main/long_unique.test +++ b/mysql-test/main/long_unique.test @@ -404,6 +404,7 @@ show create table t1; insert into t1 value(concat(repeat('s',3000),'1')); --error ER_DUP_ENTRY insert into t1 value(concat(repeat('s',3000),'2')); +update t1 set a= concat(repeat('s',3000),'2'); insert into t1 value(concat(repeat('a',3000),'2')); drop table t1; diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result index 3f65fb18014..1c87e028802 100644 --- a/mysql-test/main/long_unique_bugs.result +++ b/mysql-test/main/long_unique_bugs.result @@ -356,6 +356,7 @@ ERROR 42000: Specified key was too long; max key length is 2300 bytes # create table t1(a int, unique(a) using hash); #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) +insert into t1 values(200),(150),(149),(148),(147),(146),(145),(144),(143),(142),(141),(140),(139),(138),(137),(136),(135),(134),(133),(132),(131),(130),(129),(128),(127),(126),(125),(124),(123),(122),(121),(120),(119),(118),(117),(116),(115),(114),(113),(112),(111),(110),(109),(108),(107),(106),(105),(104),(103),(102),(101),(100),(99),(98),(97),(96),(95),(94),(93),(92),(91),(90),(89),(88),(87),(86),(85),(84),(83),(82),(81),(80),(79),(78),(77),(76),(75),(74),(73),(72),(71),(70),(69),(68),(67),(66),(65),(64),(63),(62),(61),(60),(59),(58),(57),(56),(55),(54),(53),(52),(51),(50),(49),(48),(47),(46),(45),(44),(43),(42),(41),(40),(39),(38),(37),(36),(35),(34),(33),(32),(31),(30),(29),(28),(27),(26),(25),(24),(23),(22),(21),(20),(19),(18),(17),(16),(15),(14),(13),(12),(11),(10),(9),(8),(7),(6),(5),(4),(3),(2),(1); drop table t1; # # MDEV-21804 Assertion `marked_for_read()' failed upon INSERT into table with long unique blob under binlog_row_image=NOBLOB diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test index d8d3462c1da..d7ab1c87142 100644 --- a/mysql-test/main/long_unique_bugs.test +++ b/mysql-test/main/long_unique_bugs.test @@ -344,17 +344,8 @@ CREATE TABLE t1 (a TEXT, UNIQUE(a)) ENGINE=Aria; --echo # MDEV-20001 Potential dangerous regression: INSERT INTO >=100 rows fail for myisam table with HASH indexes --echo # create table t1(a int, unique(a) using hash); ---let $count=150 ---let insert_stmt= insert into t1 values(200) -while ($count) -{ - --let $insert_stmt=$insert_stmt,($count) - --dec $count -} ---disable_query_log --echo #BULK insert > 100 rows (MI_MIN_ROWS_TO_DISABLE_INDEXES) ---eval $insert_stmt ---enable_query_log +insert into t1 values(200),(150),(149),(148),(147),(146),(145),(144),(143),(142),(141),(140),(139),(138),(137),(136),(135),(134),(133),(132),(131),(130),(129),(128),(127),(126),(125),(124),(123),(122),(121),(120),(119),(118),(117),(116),(115),(114),(113),(112),(111),(110),(109),(108),(107),(106),(105),(104),(103),(102),(101),(100),(99),(98),(97),(96),(95),(94),(93),(92),(91),(90),(89),(88),(87),(86),(85),(84),(83),(82),(81),(80),(79),(78),(77),(76),(75),(74),(73),(72),(71),(70),(69),(68),(67),(66),(65),(64),(63),(62),(61),(60),(59),(58),(57),(56),(55),(54),(53),(52),(51),(50),(49),(48),(47),(46),(45),(44),(43),(42),(41),(40),(39),(38),(37),(36),(35),(34),(33),(32),(31),(30),(29),(28),(27),(26),(25),(24),(23),(22),(21),(20),(19),(18),(17),(16),(15),(14),(13),(12),(11),(10),(9),(8),(7),(6),(5),(4),(3),(2),(1); drop table t1; --echo # diff --git a/sql/handler.cc b/sql/handler.cc index 754b5a7e5f8..cbea7ea5d9a 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -7351,10 +7351,10 @@ int handler::ha_reset() DBUG_RETURN(reset()); } -#ifdef WITH_WSREP static int wsrep_after_row(THD *thd) { DBUG_ENTER("wsrep_after_row"); +#ifdef WITH_WSREP if (thd->internal_transaction()) DBUG_RETURN(0); @@ -7378,9 +7378,32 @@ static int wsrep_after_row(THD *thd) { DBUG_RETURN(ER_LOCK_DEADLOCK); } +#endif /* WITH_WSREP */ DBUG_RETURN(0); } -#endif /* WITH_WSREP */ + + +static bool long_unique_fields_differ(KEY *keyinfo, const uchar *other) +{ + uint key_parts= fields_in_hash_keyinfo(keyinfo); + KEY_PART_INFO *keypart= keyinfo->key_part - key_parts; + my_ptrdiff_t off= other - keypart->field->table->record[0]; + DBUG_ASSERT(off); + do + { + Field *field= keypart->field; + if (field->is_null() != field->is_null(off)) + return true; + else if (f_is_blob(keypart->key_type) && keypart->length) + { + if (field->cmp_prefix(field->ptr, field->ptr + off, keypart->length)) + return true; + } + else if (field->cmp_offset(off)) + return true; + } while (keypart++ < keyinfo->key_part); + return false; +} /** @@ -7389,18 +7412,16 @@ static int wsrep_after_row(THD *thd) int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) { - int result, error= 0; + int result; KEY *key_info= table->key_info + key_no; - Field *hash_field= key_info->key_part->field; uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL]; - String *blob_storage; DBUG_ENTER("handler::check_duplicate_long_entry_key"); DBUG_ASSERT((key_info->flags & HA_NULL_PART_KEY && key_info->key_length == HA_HASH_KEY_LENGTH_WITH_NULL) || key_info->key_length == HA_HASH_KEY_LENGTH_WITHOUT_NULL); - if (hash_field->is_real_null()) + if (key_info->key_part->field->is_real_null()) DBUG_RETURN(0); key_copy(ptr, new_rec, key_info, key_info->key_length, false); @@ -7408,72 +7429,47 @@ int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) result= lookup_handler->ha_index_init(key_no, 0); if (result) DBUG_RETURN(result); - blob_storage= (String*)alloca(sizeof(String)*table->s->virtual_not_stored_blob_fields); + auto blob_storage= (String*)alloca(sizeof(String)*table->s->virtual_not_stored_blob_fields); table->remember_blob_values(blob_storage); store_record(table, file->lookup_buffer); result= lookup_handler->ha_index_read_map(table->record[0], ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT); - if (!result) + if (result) { - bool is_same; - Field * t_field; - Item_func_hash * temp= (Item_func_hash *)hash_field->vcol_info->expr; - Item ** arguments= temp->arguments(); - uint arg_count= temp->argument_count(); - // restore pointers after swap_values in TABLE::update_virtual_fields() - for (Field **vf= table->vfield; *vf; vf++) - { - if (!(*vf)->stored_in_db() && (*vf)->flags & BLOB_FLAG && - bitmap_is_set(table->read_set, (*vf)->field_index)) - ((Field_blob*)*vf)->swap_value_and_read_value(); - } - do - { - my_ptrdiff_t diff= table->file->lookup_buffer - new_rec; - is_same= true; - for (uint j=0; is_same && j < arg_count; j++) - { - DBUG_ASSERT(arguments[j]->type() == Item::FIELD_ITEM || - // this one for left(fld_name,length) - arguments[j]->type() == Item::FUNC_ITEM); - if (arguments[j]->type() == Item::FIELD_ITEM) - { - t_field= static_cast(arguments[j])->field; - if (t_field->cmp_offset(diff)) - is_same= false; - } - else - { - Item_func_left *fnc= static_cast(arguments[j]); - DBUG_ASSERT(!my_strcasecmp(system_charset_info, "left", fnc->func_name())); - DBUG_ASSERT(fnc->arguments()[0]->type() == Item::FIELD_ITEM); - t_field= static_cast(fnc->arguments()[0])->field; - uint length= (uint)fnc->arguments()[1]->val_int(); - if (t_field->cmp_prefix(t_field->ptr, t_field->ptr + diff, length)) - is_same= false; - } - } - } - while (!is_same && - !(result= lookup_handler->ha_index_next_same(table->record[0], - ptr, key_info->key_length))); - if (is_same) - error= HA_ERR_FOUND_DUPP_KEY; - goto exit; + if (result == HA_ERR_KEY_NOT_FOUND) + result= 0; + goto end; } - if (result != HA_ERR_KEY_NOT_FOUND) - error= result; -exit: - if (error == HA_ERR_FOUND_DUPP_KEY) + + // restore pointers after swap_values in TABLE::update_virtual_fields() + for (Field **vf= table->vfield; *vf; vf++) { - table->file->lookup_errkey= key_no; - lookup_handler->position(table->record[0]); - memcpy(table->file->dup_ref, lookup_handler->ref, ref_length); + if (!(*vf)->stored_in_db() && (*vf)->flags & BLOB_FLAG && + bitmap_is_set(table->read_set, (*vf)->field_index)) + ((Field_blob*)*vf)->swap_value_and_read_value(); } + do + { + if (!long_unique_fields_differ(key_info, lookup_buffer)) + { + result= HA_ERR_FOUND_DUPP_KEY; + table->file->lookup_errkey= key_no; + lookup_handler->position(table->record[0]); + memcpy(table->file->dup_ref, lookup_handler->ref, ref_length); + goto end; + } + } + while (!(result= lookup_handler->ha_index_next_same(table->record[0], ptr, + key_info->key_length))); + + if (result == HA_ERR_END_OF_FILE) + result= 0; + +end: restore_record(table, file->lookup_buffer); table->restore_blob_values(blob_storage); lookup_handler->ha_index_end(); - DBUG_RETURN(error); + DBUG_RETURN(result); } void handler::alloc_lookup_buffer() @@ -7485,77 +7481,56 @@ void handler::alloc_lookup_buffer() + table_share->reclength); } -/** @brief - check whether inserted records breaks the - unique constraint on long columns. - @returns 0 if no duplicate else returns error - */ -int handler::check_duplicate_long_entries(const uchar *new_rec) + +int handler::ha_check_inserver_constraints(const uchar *old_data, + const uchar* new_data) { - lookup_errkey= (uint)-1; - for (uint i= 0; i < table->s->keys; i++) + int error= 0; + /* + this != table->file is true in 3 cases: + 1. under copy_partitions() (REORGANIZE PARTITION): that does not + require long unique check as it does not introduce new rows or new index. + 2. under partition's ha_write_row() or ha_update_row(). Constraints + were already checked by ha_partition::ha_write_row(), no need re-check + for each partition. + 3. under ha_mroonga::wrapper_write_row(). Same as 2. + */ + if (this == table->file) { - int result; - if (table->key_info[i].algorithm == HA_KEY_ALG_LONG_HASH && - (result= check_duplicate_long_entry_key(new_rec, i))) - return result; + uint saved_status= table->status; + if (!(error= ha_check_overlaps(old_data, new_data))) + error= ha_check_long_uniques(old_data, new_data); + table->status= saved_status; } - return 0; + return error; } /** @brief - check whether updated records breaks the - unique constraint on long columns. - In the case of update we just need to check the specic key - reason for that is consider case - create table t1(a blob , b blob , x blob , y blob ,unique(a,b) - ,unique(x,y)) - and update statement like this - update t1 set a=23+a; in this case if we try to scan for - whole keys in table then index scan on x_y will return 0 - because data is same so in the case of update we take - key as a parameter in normal insert key should be -1 + check whether inserted records breaks the unique constraint on long columns. @returns 0 if no duplicate else returns error */ -int handler::check_duplicate_long_entries_update(const uchar *new_rec) +int handler::ha_check_long_uniques(const uchar *old_rec, const uchar *new_rec) { - Field *field; - uint key_parts; - KEY *keyinfo; - KEY_PART_INFO *keypart; - /* - Here we are comparing whether new record and old record are same - with respect to fields in hash_str - */ - uint reclength= (uint) (table->record[1] - table->record[0]); - + if (!table->s->long_unique_table) + return 0; + DBUG_ASSERT(inited == NONE || lookup_handler != this); + DBUG_ASSERT(new_rec == table->record[0]); + DBUG_ASSERT(!old_rec || old_rec == table->record[1]); + lookup_errkey= (uint)-1; for (uint i= 0; i < table->s->keys; i++) { - keyinfo= table->key_info + i; + KEY *keyinfo= table->key_info + i; if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH) { - key_parts= fields_in_hash_keyinfo(keyinfo); - keypart= keyinfo->key_part - key_parts; - for (uint j= 0; j < key_parts; j++, keypart++) + if (!old_rec || long_unique_fields_differ(keyinfo, old_rec)) { - int error; - field= keypart->field; - /* - Compare fields if they are different then check for duplicates - cmp_binary_offset cannot differentiate between null and empty string - So also check for that too - */ - if((field->is_null(0) != field->is_null(reclength)) || - field->cmp_offset(reclength)) + if (int res= check_duplicate_long_entry_key(new_rec, i)) { - if((error= check_duplicate_long_entry_key(new_rec, i))) - return error; - /* - break because check_duplicate_long_entries_key will - take care of remaining fields - */ - break; + if (!old_rec && table->next_number_field) + if (int err= update_auto_increment()) + return err; + return res; } } } @@ -7567,8 +7542,7 @@ int handler::check_duplicate_long_entries_update(const uchar *new_rec) int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data) { DBUG_ASSERT(new_data); - if (this != table->file) - return 0; + DBUG_ASSERT(this == table->file); if (!table_share->period.unique_keys) return 0; if (table->versioned() && !table->vers_end_field()->is_max()) @@ -7752,11 +7726,8 @@ int handler::prepare_for_insert(bool do_create) int handler::ha_write_row(const uchar *buf) { int error; - DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || - m_lock_type == F_WRLCK); DBUG_ENTER("handler::ha_write_row"); DEBUG_SYNC_C("ha_write_row_start"); -#ifdef WITH_WSREP DBUG_EXECUTE_IF("wsrep_ha_write_row", { const char act[]= @@ -7765,36 +7736,10 @@ int handler::ha_write_row(const uchar *buf) "WAIT_FOR wsrep_ha_write_row_continue"; DBUG_ASSERT(!debug_sync_set_action(ha_thd(), STRING_WITH_LEN(act))); }); -#endif /* WITH_WSREP */ - if ((error= ha_check_overlaps(NULL, buf))) - { - DEBUG_SYNC_C("ha_write_row_end"); - DBUG_RETURN(error); - } + DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK); - /* - NOTE: this != table->file is true in 3 cases: - - 1. under copy_partitions() (REORGANIZE PARTITION): that does not - require long unique check as it does not introduce new rows or new index. - 2. under partition's ha_write_row() (INSERT): check_duplicate_long_entries() - was already done by ha_partition::ha_write_row(), no need to check it - again for each single partition. - 3. under ha_mroonga::wrapper_write_row() - */ - - if (table->s->long_unique_table && this == table->file) - { - DBUG_ASSERT(inited == NONE || lookup_handler != this); - if ((error= check_duplicate_long_entries(buf))) - { - if (table->next_number_field && buf == table->record[0]) - if (int err= update_auto_increment()) - error= err; - DEBUG_SYNC_C("ha_write_row_end"); - DBUG_RETURN(error); - } - } + if ((error= ha_check_inserver_constraints(NULL, buf))) + goto err; MYSQL_INSERT_ROW_START(table_share->db.str, table_share->table_name.str); mark_trx_read_write(); @@ -7806,23 +7751,21 @@ int handler::ha_write_row(const uchar *buf) dbug_format_row(table, buf, false).c_ptr_safe(), error)); MYSQL_INSERT_ROW_DONE(error); - if (likely(!error)) - { - rows_changed++; - if (row_logging) - { - Log_func *log_func= Write_rows_log_event::binlog_row_logging_function; - error= binlog_log_row(table, 0, buf, log_func); - } + if (error) + goto err; -#ifdef WITH_WSREP - THD *thd= ha_thd(); - if (WSREP_NNULL(thd) && table_share->tmp_table == NO_TMP_TABLE && - ht->flags & HTON_WSREP_REPLICATION && !error) - error= wsrep_after_row(thd); -#endif /* WITH_WSREP */ + rows_changed++; + if (row_logging) + { + Log_func *log_func= Write_rows_log_event::binlog_row_logging_function; + error= binlog_log_row(table, 0, buf, log_func); } + if (WSREP_NNULL(ha_thd()) && table_share->tmp_table == NO_TMP_TABLE && + ht->flags & HTON_WSREP_REPLICATION && !error) + error= wsrep_after_row(ha_thd()); + +err: DEBUG_SYNC_C("ha_write_row_end"); DBUG_RETURN(error); } @@ -7831,30 +7774,15 @@ int handler::ha_write_row(const uchar *buf) int handler::ha_update_row(const uchar *old_data, const uchar *new_data) { int error; - DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || - m_lock_type == F_WRLCK); + DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK); /* Some storage engines require that the new record is in record[0] (and the old record is in record[1]). - */ + */ DBUG_ASSERT(new_data == table->record[0]); DBUG_ASSERT(old_data == table->record[1]); - uint saved_status= table->status; - error= ha_check_overlaps(old_data, new_data); - - /* - NOTE: this != table->file is true under partition's ha_update_row(): - check_duplicate_long_entries_update() was already done by - ha_partition::ha_update_row(), no need to check it again for each single - partition. Same applies to ha_mroonga wrapper. - */ - - if (!error && table->s->long_unique_table && this == table->file) - error= check_duplicate_long_entries_update(new_data); - table->status= saved_status; - - if (error) + if ((error= ha_check_inserver_constraints(old_data, new_data))) return error; MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str); @@ -7869,35 +7797,35 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data) error)); MYSQL_UPDATE_ROW_DONE(error); - if (likely(!error)) - { - rows_changed++; - if (row_logging) - { - Log_func *log_func= Update_rows_log_event::binlog_row_logging_function; - error= binlog_log_row(table, old_data, new_data, log_func); - } -#ifdef WITH_WSREP - THD *thd= ha_thd(); - if (WSREP_NNULL(thd)) - { - /* for streaming replication, the following wsrep_after_row() - may replicate a fragment, so we have to declare potential PA - unsafe before that */ - if (table->s->primary_key == MAX_KEY && wsrep_thd_is_local(thd)) - { - WSREP_DEBUG("marking trx as PA unsafe pk %d", table->s->primary_key); - if (thd->wsrep_cs().mark_transaction_pa_unsafe()) - WSREP_DEBUG("session does not have active transaction," - " can not mark as PA unsafe"); - } + if (error) + return error; - if (!error && table_share->tmp_table == NO_TMP_TABLE && - ht->flags & HTON_WSREP_REPLICATION) - error= wsrep_after_row(thd); - } -#endif /* WITH_WSREP */ + rows_changed++; + if (row_logging) + { + Log_func *log_func= Update_rows_log_event::binlog_row_logging_function; + error= binlog_log_row(table, old_data, new_data, log_func); } +#ifdef WITH_WSREP + THD *thd= ha_thd(); + if (WSREP_NNULL(thd)) + { + /* for streaming replication, the following wsrep_after_row() + may replicate a fragment, so we have to declare potential PA + unsafe before that */ + if (table->s->primary_key == MAX_KEY && wsrep_thd_is_local(thd)) + { + WSREP_DEBUG("marking trx as PA unsafe pk %d", table->s->primary_key); + if (thd->wsrep_cs().mark_transaction_pa_unsafe()) + WSREP_DEBUG("session does not have active transaction," + " can not mark as PA unsafe"); + } + + if (!error && table_share->tmp_table == NO_TMP_TABLE && + ht->flags & HTON_WSREP_REPLICATION) + error= wsrep_after_row(thd); + } +#endif /* WITH_WSREP */ return error; } @@ -7932,8 +7860,7 @@ int handler::update_first_row(const uchar *new_data) int handler::ha_delete_row(const uchar *buf) { int error; - DBUG_ASSERT(table_share->tmp_table != NO_TMP_TABLE || - m_lock_type == F_WRLCK); + DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK); /* Normally table->record[0] is used, but sometimes table->record[1] is used. */ diff --git a/sql/handler.h b/sql/handler.h index 05541765a0b..4d94e1d5ac3 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -4919,11 +4919,11 @@ private: int create_lookup_handler(); void alloc_lookup_buffer(); - int check_duplicate_long_entries(const uchar *new_rec); - int check_duplicate_long_entries_update(const uchar *new_rec); int check_duplicate_long_entry_key(const uchar *new_rec, uint key_no); /** PRIMARY KEY/UNIQUE WITHOUT OVERLAPS check */ int ha_check_overlaps(const uchar *old_data, const uchar* new_data); + int ha_check_long_uniques(const uchar *old_rec, const uchar *new_rec); + int ha_check_inserver_constraints(const uchar *old_data, const uchar* new_data); protected: /* From 2746c19a9cc1e9775328f00c4ecdec3e1ccaa270 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 11 Jul 2025 11:23:30 +0200 Subject: [PATCH 37/61] MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update --- mysql-test/main/long_unique_bugs.result | 7 +++++++ mysql-test/main/long_unique_bugs.test | 8 ++++++++ sql/handler.cc | 2 +- 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/mysql-test/main/long_unique_bugs.result b/mysql-test/main/long_unique_bugs.result index 1c87e028802..55cc07ce067 100644 --- a/mysql-test/main/long_unique_bugs.result +++ b/mysql-test/main/long_unique_bugs.result @@ -835,4 +835,11 @@ t1 CREATE TABLE `t1` ( KEY `a_2` (`a`(1000)) ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci drop table t1; +# +# MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update +# +create table t (a int,b text unique key); +insert into t (a) values (1); +update t set a=2; +drop table t; # End of 10.6 tests diff --git a/mysql-test/main/long_unique_bugs.test b/mysql-test/main/long_unique_bugs.test index d7ab1c87142..69b7e517c46 100644 --- a/mysql-test/main/long_unique_bugs.test +++ b/mysql-test/main/long_unique_bugs.test @@ -774,4 +774,12 @@ alter table t1 add unique(a), add key(a); show create table t1; drop table t1; +--echo # +--echo # MDEV-37203 UBSAN: applying zero offset to null pointer in strings/ctype-uca.inl | my_uca_strnncollsp_onelevel_utf8mb4 | handler::check_duplicate_long_entries_update +--echo # +create table t (a int,b text unique key); +insert into t (a) values (1); +update t set a=2; +drop table t; + --echo # End of 10.6 tests diff --git a/sql/handler.cc b/sql/handler.cc index cbea7ea5d9a..272ab60cffa 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -7392,7 +7392,7 @@ static bool long_unique_fields_differ(KEY *keyinfo, const uchar *other) do { Field *field= keypart->field; - if (field->is_null() != field->is_null(off)) + if (field->is_null() || field->is_null(off)) return true; else if (f_is_blob(keypart->key_type) && keypart->length) { From 9703c90712f33014b9f222c08e272e898937a1bb Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 11 Jul 2025 15:49:53 +0200 Subject: [PATCH 38/61] MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records Server-level UNIQUE constraints (namely, WITHOUT OVERLAPS and USING HASH) only worked with InnoDB in REPEATABLE READ isolation mode, when the constraint was checked first and then the row was inserted or updated. Gap locks prevented race conditions when a concurrent connection could've also checked the constraint and inserted/updated a row at the same time. In READ COMMITTED there are no gap locks. To avoid race conditions, we now check the constraint *after* the row operation. This is enabled by the HA_CHECK_UNIQUE_AFTER_WRITE table flag that InnoDB sets in the READ COMMITTED transactions. Checking the constraint after the row operation is more complex. First, the constraint will see the current (inserted/updated) row, and needs to skip it. Second, IGNORE operations become tricky, as we need to revert the insert/update and continue statement execution. write_row() (INSERT IGNORE) is reverted with delete_row(). Conveniently it deletes the current row, that is, the last inserted row. update_row(a,b) (UPDATE IGNORE) is reverted with a reversed update, update_row(b,a). Conveniently, it updates the current row too. Except in InnoDB when the PK is updated - in this case InnoDB internally performs delete+insert, but does not move the cursor, so the "current" row is the deleted one and the reverse update doesn't work. This combination now throws an "unsupported" error and will be fixed in MDEV-37233 --- .../main/long_unique_innodb_debug.result | 254 ++++++++++++++++++ mysql-test/main/long_unique_innodb_debug.test | 239 ++++++++++++++++ mysql-test/suite/period/r/innodb_debug.result | 215 +++++++++++++++ mysql-test/suite/period/t/innodb_debug.test | 208 ++++++++++++++ sql/handler.cc | 90 ++++++- sql/handler.h | 4 +- storage/innobase/handler/ha_innodb.cc | 2 +- 7 files changed, 997 insertions(+), 15 deletions(-) create mode 100644 mysql-test/main/long_unique_innodb_debug.result create mode 100644 mysql-test/main/long_unique_innodb_debug.test create mode 100644 mysql-test/suite/period/r/innodb_debug.result create mode 100644 mysql-test/suite/period/t/innodb_debug.test diff --git a/mysql-test/main/long_unique_innodb_debug.result b/mysql-test/main/long_unique_innodb_debug.result new file mode 100644 index 00000000000..65409309b5f --- /dev/null +++ b/mysql-test/main/long_unique_innodb_debug.result @@ -0,0 +1,254 @@ +# +# MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records +# +## INSERT +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +# Keep a Read View open to prevent purge +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +# Create delete marked secondary index Record ('a', 10) +insert t1 values(10, 'a'); +delete from t1; +# Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +insert t1 values(15, 'a'); +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +# Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10) +set transaction isolation level read committed; +insert t1 values(5, 'a'); +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry 'a' for key 'col2' +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## INSERT, row-level locking proof +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +# Keep a Read View open to prevent purge +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +# Create delete marked secondary index Record ('a', 10) +insert t1 values(10, 'a'); +delete from t1; +# Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum +set transaction isolation level read committed; +set debug_sync="ha_write_row_end SIGNAL checked_duplicate WAIT_FOR do_insert"; +insert t1 values(15, 'a'); +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +# Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10) +set session innodb_lock_wait_timeout= 1; +set transaction isolation level read committed; +insert t1 values(5, 'a'); +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +set debug_sync="now SIGNAL do_insert"; +connection con1; +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +15 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update t1 set col2='a' where col1=5; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry 'a' for key 'col2' +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 b +15 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## INSERT IGNORE +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +insert t1 values(10, 'a'); +delete from t1; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +insert ignore t1 values(15, 'a'), (16, 'b'); +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +insert t1 values(5, 'a'); +set debug_sync="now SIGNAL do_insert"; +connection con1; +Warnings: +Warning 1062 Duplicate entry 'a' for key 'col2' +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 a +16 b +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE IGNORE +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +insert into t1 values( 9, 'd'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update ignore t1 set col2=chr(92+col1) where col1<=9; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +connection con1; +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 b +9 e +15 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE modifying PK +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update t1 set col2='a', col1=4 where col1=5; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry 'a' for key 'col2' +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 b +15 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE IGNORE modifying PK +create table t1 ( +col1 int primary key, +col2 varchar(3000), +unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +col1 col2 +connect con1,localhost,root; +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +insert into t1 values( 9, 'd'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update ignore t1 set col2=chr(92+col1), col1=col1-1 where col1<=9; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 42000: UPDATE IGNORE that modifies a primary key of a table with a UNIQUE constraint USING HASH is not currently supported +connection default; +select * from t1; +col1 col2 +commit; +select * from t1; +col1 col2 +5 b +9 d +15 a +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/main/long_unique_innodb_debug.test b/mysql-test/main/long_unique_innodb_debug.test new file mode 100644 index 00000000000..bd4ba866576 --- /dev/null +++ b/mysql-test/main/long_unique_innodb_debug.test @@ -0,0 +1,239 @@ +--source include/have_innodb.inc +--source include/have_debug_sync.inc + +--echo # +--echo # MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records +--echo # + +--echo ## INSERT +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +--echo # Keep a Read View open to prevent purge +start transaction; +select * from t1; +--connect con1,localhost,root +--echo # Create delete marked secondary index Record ('a', 10) +insert t1 values(10, 'a'); +delete from t1; +--echo # Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send insert t1 values(15, 'a') +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +--echo # Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10) +set transaction isolation level read committed; +insert t1 values(5, 'a'); +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## INSERT, row-level locking proof +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +--echo # Keep a Read View open to prevent purge +start transaction; +select * from t1; + +--connect con1,localhost,root +--echo # Create delete marked secondary index Record ('a', 10) +insert t1 values(10, 'a'); +delete from t1; +--echo # Insert secondary index key ('a', 15) in the GAP between ('a', 10) and Supremum +set transaction isolation level read committed; +set debug_sync="ha_write_row_end SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send insert t1 values(15, 'a') + +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +--echo # Insert secondary index key ('a', 5) in the GAP between Infimum and ('a', 10) +set session innodb_lock_wait_timeout= 1; +set transaction isolation level read committed; +--error ER_LOCK_WAIT_TIMEOUT +insert t1 values(5, 'a'); +set debug_sync="now SIGNAL do_insert"; + +--connection con1 +--reap + +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +--connect con1,localhost,root +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update t1 set col2='a' where col1=5 +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## INSERT IGNORE +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, 'a'); +delete from t1; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send insert ignore t1 values(15, 'a'), (16, 'b') +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +insert t1 values(5, 'a'); +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE IGNORE +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +--connect con1,localhost,root +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +insert into t1 values( 9, 'd'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update ignore t1 set col2=chr(92+col1) where col1<=9 +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE modifying PK +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +--connect con1,localhost,root +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update t1 set col2='a', col1=4 where col1=5 +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE IGNORE modifying PK +create table t1 ( + col1 int primary key, + col2 varchar(3000), + unique (col2) using hash) engine=innodb; +start transaction; +select * from t1; +--connect con1,localhost,root +insert into t1 values(10, 'a'); +delete from t1; +insert into t1 values( 5, 'b'); +insert into t1 values(15, 'c'); +insert into t1 values( 9, 'd'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update ignore t1 set col2=chr(92+col1), col1=col1-1 where col1<=9 +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set col2='a' where col1=15; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_NOT_SUPPORTED_YET +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo # End of 10.6 tests diff --git a/mysql-test/suite/period/r/innodb_debug.result b/mysql-test/suite/period/r/innodb_debug.result new file mode 100644 index 00000000000..fe9adb4d4c3 --- /dev/null +++ b/mysql-test/suite/period/r/innodb_debug.result @@ -0,0 +1,215 @@ +# +# MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records +# +## INSERT +create table t1 ( +id int, s date, e date, +period for p(s,e), +unique(id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +insert t1 values(10, date'2010-09-09', date'2010-11-10'); +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +insert t1 values(10, date'2010-10-10', date'2010-11-12'); +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'id' +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-10-10 2010-11-12 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE +create table t1 ( +id int, s date, e date, +period for p(s,e), +unique(id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update t1 set e=e + interval 2 month where s=date'2010-09-09'; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'id' +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-09-09 2010-09-10 +10 2010-10-10 2010-12-12 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## INSERT IGNORE +create table t1 ( +id int, s date, e date, +period for p(s,e), +unique(id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +insert ignore t1 values(10, date'2010-09-09', date'2010-11-10'),(11, date'2010-09-09', date'2010-11-10'); +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +insert t1 values(10, date'2010-10-10', date'2010-11-12'); +set debug_sync="now SIGNAL do_insert"; +connection con1; +Warnings: +Warning 1062 Duplicate entry '10-2010-11-10-2010-09-09' for key 'id' +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-10-10 2010-11-12 +11 2010-09-09 2010-11-10 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE IGNORE +create table t1 ( +id int, s date, e date, +period for p(s,e), +unique(id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update ignore t1 set e=e + interval 2 month where s=date'2010-09-09'; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +connection con1; +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-09-09 2010-09-10 +10 2010-10-10 2010-12-12 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE modifying PK +create table t1 ( +id int, s date, e date, +period for p(s,e), +primary key (id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update t1 set e=e + interval 2 month where s=date'2010-09-09'; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 23000: Duplicate entry '10-2010-11-10-2010-09-09' for key 'PRIMARY' +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-09-09 2010-09-10 +10 2010-10-10 2010-12-12 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +## UPDATE IGNORE modifying PK +create table t1 ( +id int, s date, e date, +period for p(s,e), +primary key (id, p without overlaps) +) engine=innodb; +start transaction; +select * from t1; +id s e +connect con1,localhost,root; +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +update ignore t1 set e=e + interval 2 month where s=date'2010-09-09'; +connect con2,localhost,root; +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +connection con1; +ERROR 42000: UPDATE IGNORE that modifies a primary key of a table with a UNIQUE constraint WITHOUT OVERLAPS is not currently supported +connection default; +select * from t1; +id s e +commit; +select * from t1; +id s e +10 2010-09-09 2010-09-10 +10 2010-10-10 2010-12-12 +disconnect con1; +disconnect con2; +set debug_sync='RESET'; +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/suite/period/t/innodb_debug.test b/mysql-test/suite/period/t/innodb_debug.test new file mode 100644 index 00000000000..6ae92ab600f --- /dev/null +++ b/mysql-test/suite/period/t/innodb_debug.test @@ -0,0 +1,208 @@ +--source include/have_innodb.inc +--source include/have_debug_sync.inc + +--echo # +--echo # MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records +--echo # + +--echo ## INSERT +create table t1 ( + id int, s date, e date, + period for p(s,e), + unique(id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send insert t1 values(10, date'2010-09-09', date'2010-11-10') +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +insert t1 values(10, date'2010-10-10', date'2010-11-12'); +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE +create table t1 ( + id int, s date, e date, + period for p(s,e), + unique(id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update t1 set e=e + interval 2 month where s=date'2010-09-09' +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## INSERT IGNORE +create table t1 ( + id int, s date, e date, + period for p(s,e), + unique(id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +set transaction isolation level read committed; +set debug_sync="innodb_row_ins_step_enter SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send insert ignore t1 values(10, date'2010-09-09', date'2010-11-10'),(11, date'2010-09-09', date'2010-11-10') +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +insert t1 values(10, date'2010-10-10', date'2010-11-12'); +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE IGNORE +create table t1 ( + id int, s date, e date, + period for p(s,e), + unique(id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update ignore t1 set e=e + interval 2 month where s=date'2010-09-09' +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE modifying PK +create table t1 ( + id int, s date, e date, + period for p(s,e), + primary key (id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update t1 set e=e + interval 2 month where s=date'2010-09-09' +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_DUP_ENTRY +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + +--echo ## UPDATE IGNORE modifying PK +create table t1 ( + id int, s date, e date, + period for p(s,e), + primary key (id, p without overlaps) +) engine=innodb; + +start transaction; +select * from t1; +--connect con1,localhost,root +insert t1 values(10, date'2010-10-10', date'2010-11-11'); +delete from t1; +insert t1 values(10, date'2010-09-09', date'2010-09-10'); +insert t1 values(10, date'2010-12-10', date'2010-12-12'); +set transaction isolation level read committed; +set debug_sync="innodb_row_update_for_mysql_begin SIGNAL checked_duplicate WAIT_FOR do_insert"; +--send update ignore t1 set e=e + interval 2 month where s=date'2010-09-09' +--connect con2,localhost,root +set debug_sync="now WAIT_FOR checked_duplicate"; +set transaction isolation level read committed; +update t1 set s=date'2010-10-10' where e=date'2010-12-12'; +set debug_sync="now SIGNAL do_insert"; +--connection con1 +--error ER_NOT_SUPPORTED_YET +--reap +--connection default +select * from t1; +commit; +select * from t1; +--disconnect con1 +--disconnect con2 +set debug_sync='RESET'; +drop table t1; + + +--echo # End of 10.6 tests + diff --git a/sql/handler.cc b/sql/handler.cc index 272ab60cffa..959fedae959 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -7413,6 +7413,8 @@ static bool long_unique_fields_differ(KEY *keyinfo, const uchar *other) int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) { int result; + /* Skip just written row in the case of HA_CHECK_UNIQUE_AFTER_WRITE */ + bool lax= (ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) > 0; KEY *key_info= table->key_info + key_no; uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL]; DBUG_ENTER("handler::check_duplicate_long_entry_key"); @@ -7452,6 +7454,11 @@ int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) { if (!long_unique_fields_differ(key_info, lookup_buffer)) { + if (lax) + { + lax= false; + continue; + } result= HA_ERR_FOUND_DUPP_KEY; table->file->lookup_errkey= key_no; lookup_handler->position(table->record[0]); @@ -7527,7 +7534,8 @@ int handler::ha_check_long_uniques(const uchar *old_rec, const uchar *new_rec) { if (int res= check_duplicate_long_entry_key(new_rec, i)) { - if (!old_rec && table->next_number_field) + if (!old_rec && table->next_number_field && + !(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE)) if (int err= update_auto_increment()) return err; return res; @@ -7548,7 +7556,8 @@ int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data) if (table->versioned() && !table->vers_end_field()->is_max()) return 0; - const bool is_update= old_data != NULL; + const bool after_write= ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE; + const bool is_update= !after_write && old_data; uchar *record_buffer= lookup_buffer + table_share->max_unique_length + table_share->null_fields; @@ -7603,17 +7612,22 @@ int handler::ha_check_overlaps(const uchar *old_data, const uchar* new_data) key_part_map((1 << (key_parts - 1)) - 1), HA_READ_AFTER_KEY); - if (!error && is_update) + if (!error) { - /* In case of update it could happen that the nearest neighbour is - a record we are updating. It means, that there are no overlaps - from this side. - */ - DBUG_ASSERT(lookup_handler != this); - DBUG_ASSERT(ref_length == lookup_handler->ref_length); + if (is_update) + { + /* In case of update it could happen that the nearest neighbour is + a record we are updating. It means, that there are no overlaps + from this side. + */ + DBUG_ASSERT(lookup_handler != this); + DBUG_ASSERT(ref_length == lookup_handler->ref_length); - lookup_handler->position(record_buffer); - if (memcmp(ref, lookup_handler->ref, ref_length) == 0) + lookup_handler->position(record_buffer); + if (memcmp(ref, lookup_handler->ref, ref_length) == 0) + error= lookup_handler->ha_index_next(record_buffer); + } + else if (after_write) error= lookup_handler->ha_index_next(record_buffer); } @@ -7738,7 +7752,8 @@ int handler::ha_write_row(const uchar *buf) }); DBUG_ASSERT(table_share->tmp_table || m_lock_type == F_WRLCK); - if ((error= ha_check_inserver_constraints(NULL, buf))) + if (!(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) && + (error= ha_check_inserver_constraints(NULL, buf))) goto err; MYSQL_INSERT_ROW_START(table_share->db.str, table_share->table_name.str); @@ -7754,6 +7769,25 @@ int handler::ha_write_row(const uchar *buf) if (error) goto err; + if ((ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) && + (error= ha_check_inserver_constraints(NULL, buf))) + { + if (lookup_handler != this) // INSERT IGNORE or REPLACE or ODKU + { + position(buf); + int e= rnd_pos(lookup_buffer, ref); + if (!e) + { + increment_statistics(&SSV::ha_delete_count); + TABLE_IO_WAIT(tracker, PSI_TABLE_DELETE_ROW, MAX_KEY, e, + { e= delete_row(buf);}) + } + if (e) + error= e; + } + goto err; + } + rows_changed++; if (row_logging) { @@ -7782,7 +7816,8 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data) DBUG_ASSERT(new_data == table->record[0]); DBUG_ASSERT(old_data == table->record[1]); - if ((error= ha_check_inserver_constraints(old_data, new_data))) + if (!(ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) && + (error= ha_check_inserver_constraints(old_data, new_data))) return error; MYSQL_UPDATE_ROW_START(table_share->db.str, table_share->table_name.str); @@ -7800,6 +7835,35 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data) if (error) return error; + if ((ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) && + (error= ha_check_inserver_constraints(old_data, new_data))) + { + int e= 0; + if (ha_thd()->lex->ignore) + { + /* hack: modifying PK is not supported for now, see MDEV-37233 */ + if (table->s->primary_key != MAX_KEY) + { + KEY *key= table->key_info + table->s->primary_key; + KEY_PART_INFO *kp= key->key_part; + KEY_PART_INFO *end= kp + key->user_defined_key_parts; + for (; kp < end; kp++) + if (bitmap_is_set(table->write_set, kp->fieldnr-1)) + { + my_printf_error(ER_NOT_SUPPORTED_YET, "UPDATE IGNORE that " + "modifies a primary key of a table with a UNIQUE constraint " + "%s is not currently supported", MYF(0), + table->s->long_unique_table ? "USING HASH" : "WITHOUT OVERLAPS"); + return HA_ERR_UNSUPPORTED; + } + } + increment_statistics(&SSV::ha_update_count); + TABLE_IO_WAIT(tracker, PSI_TABLE_UPDATE_ROW, MAX_KEY, e, + { e= update_row(new_data, old_data);}) + } + return e ? e : error; + } + rows_changed++; if (row_logging) { diff --git a/sql/handler.h b/sql/handler.h index 4d94e1d5ac3..6bb1dc65229 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -368,7 +368,9 @@ enum chf_create_flags { /* Implements SELECT ... FOR UPDATE SKIP LOCKED */ #define HA_CAN_SKIP_LOCKED (1ULL << 61) -#define HA_LAST_TABLE_FLAG HA_CAN_SKIP_LOCKED +#define HA_CHECK_UNIQUE_AFTER_WRITE (1ULL << 62) + +#define HA_LAST_TABLE_FLAG HA_CHECK_UNIQUE_AFTER_WRITE /* bits in index_flags(index_number) for what you can do with index */ diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 5cf3e00f88d..9a136cbc178 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -5071,7 +5071,7 @@ ha_innobase::table_flags() const called before prebuilt is inited. */ if (thd_tx_isolation(thd) <= ISO_READ_COMMITTED) { - return(flags); + return(flags | HA_CHECK_UNIQUE_AFTER_WRITE); } return(flags | HA_BINLOG_STMT_CAPABLE); From 626d5bf832cd05cbbfe4a1cc424bf30064639d60 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Wed, 16 Jul 2025 16:25:53 +0530 Subject: [PATCH 39/61] MDEV-36287 mariabackup ignores tables-file Problem: ======== - Mariabackup ignores tables-file option because it fails to register the new entry in database hash cells. This issue was caused by the commit 3c312d247ca4509283c92adccaaab808e95c7c6d (MDEV-35190). xb_register_filter_entry() fails to stop when it encounters the empty list. Solution: ========= xb_register_filter_entry(): If there is no next member to deference then it return pointer to existing element. --- extra/mariabackup/xtrabackup.cc | 2 +- mysql-test/suite/mariabackup/partial.result | 12 +++++++++++ mysql-test/suite/mariabackup/partial.test | 22 +++++++++++++++++++++ 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index a0c26d25e7a..60f81861a35 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -4282,7 +4282,7 @@ xb_register_filter_entry( databases_hash->cell_get(my_crc32c(0, name, p - name)) ->search(&xb_filter_entry_t::name_hash, [dbname](xb_filter_entry_t* f) - { return f && !strcmp(f->name, dbname); }); + { return !f || !strcmp(f->name, dbname); }); if (!*prev) { (*prev = xb_new_filter_entry(dbname)) ->has_tables = TRUE; diff --git a/mysql-test/suite/mariabackup/partial.result b/mysql-test/suite/mariabackup/partial.result index 9ff3a20c01f..9527b278ac9 100644 --- a/mysql-test/suite/mariabackup/partial.result +++ b/mysql-test/suite/mariabackup/partial.result @@ -25,3 +25,15 @@ i DROP TABLE t1; DROP TABLE t2; DROP TABLE t21; +# +# MDEV-36287 maribackup ignores tables-file option +# +CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB; +CREATE TABLE t2(f1 INT NOT NULL)ENGINE=InnoDB; +INSERT INTO t1 values(1); +# Only backup table t1 by creating tables_file +# Backup with --tables-file option +# table t2 is skipped. Shows only t1 +t1.frm +t1.new +DROP TABLE t2, t1; diff --git a/mysql-test/suite/mariabackup/partial.test b/mysql-test/suite/mariabackup/partial.test index 85808749b62..bc8186c909f 100644 --- a/mysql-test/suite/mariabackup/partial.test +++ b/mysql-test/suite/mariabackup/partial.test @@ -78,3 +78,25 @@ DROP TABLE t1; DROP TABLE t2; DROP TABLE t21; rmdir $targetdir; + +--echo # +--echo # MDEV-36287 maribackup ignores tables-file option +--echo # +CREATE TABLE t1(f1 INT NOT NULL)ENGINE=InnoDB; +CREATE TABLE t2(f1 INT NOT NULL)ENGINE=InnoDB; +INSERT INTO t1 values(1); +let targetdir=$MYSQLTEST_VARDIR/tmp/backup; +let tables_list=$MYSQLTEST_VARDIR/tmp/tables_list.out; +--echo # Only backup table t1 by creating tables_file +--exec echo "test.t1" > $tables_list + +--echo # Backup with --tables-file option +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --tables-file=$tables_list --target-dir=$targetdir; +--enable_result_log + +--echo # table t2 is skipped. Shows only t1 +list_files $targetdir/test; +DROP TABLE t2, t1; +rmdir $targetdir; +remove_file $tables_list; From bfcd2674a3b20b105393970d3937cdc18f43ce23 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 16 Jul 2025 16:40:17 +0200 Subject: [PATCH 40/61] MDEV-37199 disable --view-protocol creating and dropping views doesn't work very well with transactions --- mysql-test/main/long_unique_innodb_debug.test | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mysql-test/main/long_unique_innodb_debug.test b/mysql-test/main/long_unique_innodb_debug.test index bd4ba866576..5f750d9cf3c 100644 --- a/mysql-test/main/long_unique_innodb_debug.test +++ b/mysql-test/main/long_unique_innodb_debug.test @@ -5,6 +5,7 @@ --echo # MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records --echo # +--disable_view_protocol --echo ## INSERT create table t1 ( col1 int primary key, @@ -235,5 +236,6 @@ select * from t1; --disconnect con2 set debug_sync='RESET'; drop table t1; +--enable_view_protocol --echo # End of 10.6 tests From 1681b6c330dfd62810030ee1522f28e26b5999fa Mon Sep 17 00:00:00 2001 From: Julius Goryavsky Date: Thu, 17 Jul 2025 15:42:59 +0200 Subject: [PATCH 41/61] MDEV-37257: unstable tests temporarily added to 'disabled' list --- mysql-test/suite/galera/disabled.def | 2 ++ 1 file changed, 2 insertions(+) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 23bf40c409a..438b1e63cfd 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -14,3 +14,5 @@ galera_wan : MDEV-35940 Unallowed state transition: donor -> synced in galera_wa galera_vote_rejoin_ddl : MDEV-35940 Unallowed state transition: donor -> synced in galera_wan MW-329 : MDEV-35951 Complete freeze during MW-329 test galera_vote_rejoin_dml : MDEV-35964 Assertion `ist_seqno >= cc_seqno' failed in galera_vote_rejoin_dml +galera_var_notify_cmd : MDEV-37257 WSREP: Notification command failed: 1 (Operation not permitted) +galera_var_notify_ssl_ipv6 : MDEV-37257 WSREP: Notification command failed: 1 (Operation not permitted) From 008145b968518f1ccf22558ee15fbc146f1b204b Mon Sep 17 00:00:00 2001 From: Julius Goryavsky Date: Thu, 17 Jul 2025 17:21:02 +0200 Subject: [PATCH 42/61] galera: changes for transition to galera library 26.4.23 --- mysql-test/include/galera_variables_ok.inc | 2 +- mysql-test/suite/wsrep/r/variables.result | 2 -- mysql-test/suite/wsrep/r/variables_debug.result | 3 +++ mysql-test/suite/wsrep/t/variables.test | 2 +- mysql-test/suite/wsrep/t/variables_debug.test | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/mysql-test/include/galera_variables_ok.inc b/mysql-test/include/galera_variables_ok.inc index 4d9c71e47d8..9bd31bb32b2 100644 --- a/mysql-test/include/galera_variables_ok.inc +++ b/mysql-test/include/galera_variables_ok.inc @@ -5,7 +5,7 @@ if (!$_galera_variables_delta) { --let $galera_variables_delta=0 } ---let $galera_variables_expected=`SELECT 49 + $galera_variables_delta` +--let $galera_variables_expected=`SELECT 51 + $galera_variables_delta` --let $galera_variables_count=`SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep%'` diff --git a/mysql-test/suite/wsrep/r/variables.result b/mysql-test/suite/wsrep/r/variables.result index c8150a2f0b0..592e5ef1b73 100644 --- a/mysql-test/suite/wsrep/r/variables.result +++ b/mysql-test/suite/wsrep/r/variables.result @@ -108,8 +108,6 @@ WSREP_FORCED_BINLOG_FORMAT WSREP_GTID_DOMAIN_ID WSREP_GTID_MODE WSREP_GTID_SEQ_NO -WSREP_MODE -WSREP_STRICT_DDL WSREP_IGNORE_APPLY_ERRORS WSREP_LOAD_DATA_SPLITTING WSREP_LOG_CONFLICTS diff --git a/mysql-test/suite/wsrep/r/variables_debug.result b/mysql-test/suite/wsrep/r/variables_debug.result index 36f9b4ec256..18a323054d6 100644 --- a/mysql-test/suite/wsrep/r/variables_debug.result +++ b/mysql-test/suite/wsrep/r/variables_debug.result @@ -108,11 +108,13 @@ WSREP_DRUPAL_282555_WORKAROUND WSREP_FORCED_BINLOG_FORMAT WSREP_GTID_DOMAIN_ID WSREP_GTID_MODE +WSREP_GTID_SEQ_NO WSREP_IGNORE_APPLY_ERRORS WSREP_LOAD_DATA_SPLITTING WSREP_LOG_CONFLICTS WSREP_MAX_WS_ROWS WSREP_MAX_WS_SIZE +WSREP_MODE WSREP_MYSQL_REPLICATION_BUNDLE WSREP_NODE_ADDRESS WSREP_NODE_INCOMING_ADDRESS @@ -138,6 +140,7 @@ WSREP_SST_DONOR_REJECTS_QUERIES WSREP_SST_METHOD WSREP_SST_RECEIVE_ADDRESS WSREP_START_POSITION +WSREP_STRICT_DDL WSREP_SYNC_WAIT WSREP_TRX_FRAGMENT_SIZE WSREP_TRX_FRAGMENT_UNIT diff --git a/mysql-test/suite/wsrep/t/variables.test b/mysql-test/suite/wsrep/t/variables.test index 762d783a09c..c82d0ae02c2 100644 --- a/mysql-test/suite/wsrep/t/variables.test +++ b/mysql-test/suite/wsrep/t/variables.test @@ -3,7 +3,7 @@ --source include/have_innodb.inc --source include/galera_no_debug_sync.inc ---let $galera_version=26.4.21 +--let $galera_version=26.4.23 source include/check_galera_version.inc; source include/galera_variables_ok.inc; diff --git a/mysql-test/suite/wsrep/t/variables_debug.test b/mysql-test/suite/wsrep/t/variables_debug.test index e55dbd4fa1f..e50cee28a15 100644 --- a/mysql-test/suite/wsrep/t/variables_debug.test +++ b/mysql-test/suite/wsrep/t/variables_debug.test @@ -5,7 +5,7 @@ --source include/have_debug_sync.inc --source include/galera_have_debug_sync.inc ---let $galera_version=26.4.21 +--let $galera_version=26.4.23 source include/check_galera_version.inc; source include/galera_variables_ok_debug.inc; From 9412cd0e62c3fcdcaed371ff0e5917d5c3300837 Mon Sep 17 00:00:00 2001 From: Raghunandan Bhat Date: Fri, 18 Jul 2025 18:59:25 +0530 Subject: [PATCH 43/61] MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date Problem: When a parameter in a prepared UPDATE statement uses DEFAULT value for a column (e.g., `EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT`), and that column's default is derived from an expression or a function referencing another column (e.g., `FROM_UNIXTIME(a)`), the server fails to correctly mark the dependent column (`a`) for reading. This happened because the server failed to associate the `Item_param` (representing the `?` parameter) with its target field (`b`), leaving `Item_param::m_associated_field` uninitialized. This prevented the correct code path for evaluating the `DEFAULT` value, leading to the dependent column not being marked for read. When the column's value was later accessed, an assertion failed, causing a crash. Fix: In the prepare stage of the UPDATE statement, associate each value with the target field, causing the server to take the correct code path (`Item_param::assign_default`) for evaluating DEFAULT values and marking dependent column for reading beforehand. Only tests are included as it is a duplicate of MDEV-36870. --- mysql-test/main/ps.result | 52 +++++++++++++++++++++++++++++++++++++++ mysql-test/main/ps.test | 51 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 103 insertions(+) diff --git a/mysql-test/main/ps.result b/mysql-test/main/ps.result index 775ada34653..691f4ba4a04 100644 --- a/mysql-test/main/ps.result +++ b/mysql-test/main/ps.result @@ -5995,3 +5995,55 @@ DROP VIEW t1; # # End of 10.4 tests # +# +# MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date +# +CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10'); +SELECT * FROM t; +a b +1 2025-07-18 18:37:10 +EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT; +SELECT * FROM t; +a b +1 1970-01-01 09:00:01 +DROP TABLE t; +CREATE TABLE t (a INT, b INT DEFAULT (a+5)); +INSERT INTO t values (1,2), (2,DEFAULT); +EXECUTE IMMEDIATE 'INSERT INTO t VALUES (3,4), (4,?)' USING DEFAULT; +SELECT * FROM t; +a b +1 2 +2 7 +3 4 +4 9 +EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT; +SELECT * FROM t; +a b +1 6 +2 7 +3 8 +4 9 +DROP TABLE t; +CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10'); +SELECT * FROM t; +a b +1 2025-07-18 18:37:10 +PREPARE s FROM 'UPDATE t SET b=?'; +EXECUTE s USING DEFAULT; +SELECT * FROM t; +a b +1 1970-01-01 09:00:01 +DROP TABLE t; +CREATE TABLE t (a INT, b TIMESTAMP DEFAULT FROM_UNIXTIME(a), c INT DEFAULT (a+5)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10',3); +SELECT * FROM t; +a b c +1 2025-07-18 18:37:10 3 +EXECUTE IMMEDIATE 'UPDATE t SET b=?, c=?' USING DEFAULT, DEFAULT; +SELECT * FROM t; +a b c +1 1970-01-01 09:00:01 6 +DROP TABLE t; +# End of 10.6 tests diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test index ff1e9e9f13e..54666d64012 100644 --- a/mysql-test/main/ps.test +++ b/mysql-test/main/ps.test @@ -5447,3 +5447,54 @@ DROP VIEW t1; --echo # --echo # End of 10.4 tests --echo # + +--echo # +--echo # MDEV-35330: Assertion `marked_for_read()` failed in VSec9::VSec9 | Item_func_from_unixtime::get_date +--echo # + +CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10'); + +SELECT * FROM t; + +EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT; + +SELECT * FROM t; + +DROP TABLE t; + +CREATE TABLE t (a INT, b INT DEFAULT (a+5)); +INSERT INTO t values (1,2), (2,DEFAULT); +EXECUTE IMMEDIATE 'INSERT INTO t VALUES (3,4), (4,?)' USING DEFAULT; + +SELECT * FROM t; + +EXECUTE IMMEDIATE 'UPDATE t SET b=?' USING DEFAULT; + +SELECT * FROM t; + +DROP TABLE t; + +CREATE TABLE t (a INT,b TIMESTAMP DEFAULT FROM_UNIXTIME(a)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10'); + +SELECT * FROM t; + +PREPARE s FROM 'UPDATE t SET b=?'; +EXECUTE s USING DEFAULT; + +SELECT * FROM t; + +DROP TABLE t; + +CREATE TABLE t (a INT, b TIMESTAMP DEFAULT FROM_UNIXTIME(a), c INT DEFAULT (a+5)); +INSERT INTO t VALUES (1,'2025-07-18 18:37:10',3); + +SELECT * FROM t; + +EXECUTE IMMEDIATE 'UPDATE t SET b=?, c=?' USING DEFAULT, DEFAULT; + +SELECT * FROM t; + +DROP TABLE t; +--echo # End of 10.6 tests From 3a2e1f87a1fa01bfe5ada183ec38412aa96726ce Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 20 Jul 2025 12:06:42 +0200 Subject: [PATCH 44/61] MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED followup for 9703c90712f3 (MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records) don't forget to rnd_init()/rnd_end() around rnd_pos() --- sql/handler.cc | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/sql/handler.cc b/sql/handler.cc index 959fedae959..e58ddff3aa5 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -7774,16 +7774,19 @@ int handler::ha_write_row(const uchar *buf) { if (lookup_handler != this) // INSERT IGNORE or REPLACE or ODKU { + int olderror= error; + if ((error= rnd_init(0))) + goto err; position(buf); - int e= rnd_pos(lookup_buffer, ref); - if (!e) - { - increment_statistics(&SSV::ha_delete_count); - TABLE_IO_WAIT(tracker, PSI_TABLE_DELETE_ROW, MAX_KEY, e, - { e= delete_row(buf);}) - } - if (e) - error= e; + if ((error= rnd_pos(lookup_buffer, ref))) + goto err; + + increment_statistics(&SSV::ha_delete_count); + TABLE_IO_WAIT(tracker, PSI_TABLE_DELETE_ROW, MAX_KEY, error, + { error= delete_row(buf);}) + rnd_end(); + if (!error) + error= olderror; } goto err; } From 774039e410d1ff4ff54d810157fd9071c48648be Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 20 Jul 2025 12:33:01 +0200 Subject: [PATCH 45/61] MDEV-37268 ER_DUP_ENTRY upon REPLACE into table with unique hash under READ-COMMITTED followup for 9703c90712f3 (MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records) when looking for long unique duplicates and the new row is already inserted, we cannot simply "skip one conflict" we must skip exactly the new row and find a conflict which isn't a new row - otherwise table->file->dup_ref can be set incorrectly and REPLACE won't work. --- mysql-test/main/long_unique_innodb.result | 13 +++++++++++++ mysql-test/main/long_unique_innodb.test | 14 ++++++++++++++ sql/handler.cc | 11 +++++++---- 3 files changed, 34 insertions(+), 4 deletions(-) diff --git a/mysql-test/main/long_unique_innodb.result b/mysql-test/main/long_unique_innodb.result index 8ed45ce6b10..58267e9c4eb 100644 --- a/mysql-test/main/long_unique_innodb.result +++ b/mysql-test/main/long_unique_innodb.result @@ -134,3 +134,16 @@ disconnect con2; # MDEV-20131 Assertion `!pk->has_virtual()' failed create table t1 (a text, primary key(a(1871))) engine=innodb; ERROR 42000: Specified key was too long; max key length is 1536 bytes +# End of 10.4 tests +# +# MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED +# +create table t1 (id int not null primary key, f varchar(100), unique(f) using hash) engine=innodb; +insert t1 values (1,'x'); +set transaction isolation level read committed; +replace t1 values (2,'x'); +select * from t1; +id f +2 x +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/main/long_unique_innodb.test b/mysql-test/main/long_unique_innodb.test index dd2d9f94de3..3253bd927eb 100644 --- a/mysql-test/main/long_unique_innodb.test +++ b/mysql-test/main/long_unique_innodb.test @@ -143,3 +143,17 @@ disconnect con2; --error ER_TOO_LONG_KEY create table t1 (a text, primary key(a(1871))) engine=innodb; + +--echo # End of 10.4 tests + +--echo # +--echo # MDEV-37268 ER_NOT_KEYFILE or assertion failure upon REPLACE into table with unique hash under READ-COMMITTED +--echo # +create table t1 (id int not null primary key, f varchar(100), unique(f) using hash) engine=innodb; +insert t1 values (1,'x'); +set transaction isolation level read committed; +replace t1 values (2,'x'); +select * from t1; +drop table t1; + +--echo # End of 10.6 tests diff --git a/sql/handler.cc b/sql/handler.cc index e58ddff3aa5..1ae75ff94d3 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -7414,7 +7414,7 @@ int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) { int result; /* Skip just written row in the case of HA_CHECK_UNIQUE_AFTER_WRITE */ - bool lax= (ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE) > 0; + bool skip_self= ha_table_flags() & HA_CHECK_UNIQUE_AFTER_WRITE; KEY *key_info= table->key_info + key_no; uchar ptr[HA_HASH_KEY_LENGTH_WITH_NULL]; DBUG_ENTER("handler::check_duplicate_long_entry_key"); @@ -7426,6 +7426,9 @@ int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) if (key_info->key_part->field->is_real_null()) DBUG_RETURN(0); + if (skip_self) + position(table->record[0]); + key_copy(ptr, new_rec, key_info, key_info->key_length, false); result= lookup_handler->ha_index_init(key_no, 0); @@ -7454,14 +7457,14 @@ int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) { if (!long_unique_fields_differ(key_info, lookup_buffer)) { - if (lax) + lookup_handler->position(table->record[0]); + if (skip_self && !memcmp(ref, lookup_handler->ref, ref_length)) { - lax= false; + skip_self= false; // cannot happen twice, so let's save a memcpy continue; } result= HA_ERR_FOUND_DUPP_KEY; table->file->lookup_errkey= key_no; - lookup_handler->position(table->record[0]); memcpy(table->file->dup_ref, lookup_handler->ref, ref_length); goto end; } From b96b5a6ccf6a2f7679cd3bb880af945da85073dd Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 20 Jul 2025 16:08:51 +0200 Subject: [PATCH 46/61] cleanup: ha_partition::m_rec0 it makes no sense to safe this->table->record[0] in this->m_rec0, just use table->record[0] (also, future-proof, if table->record[0] changes) --- sql/ha_partition.cc | 24 +++++++++++------------- sql/ha_partition.h | 1 - 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 1c796a78416..3c3c51f92eb 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -409,7 +409,6 @@ void ha_partition::init_handler_variables() m_top_entry= NO_CURRENT_PART_ID; m_rec_length= 0; m_last_part= 0; - m_rec0= 0; m_err_rec= NULL; m_curr_key_info[0]= NULL; m_curr_key_info[1]= NULL; @@ -2199,7 +2198,7 @@ int ha_partition::copy_partitions(ulonglong * const copied, goto init_error; while (TRUE) { - if ((result= file->ha_rnd_next(m_rec0))) + if ((result= file->ha_rnd_next(table->record[0]))) { if (result != HA_ERR_END_OF_FILE) goto error; @@ -2225,7 +2224,7 @@ int ha_partition::copy_partitions(ulonglong * const copied, /* Copy record to new handler */ (*copied)++; DBUG_ASSERT(!m_new_file[new_part]->row_logging); - result= m_new_file[new_part]->ha_write_row(m_rec0); + result= m_new_file[new_part]->ha_write_row(table->record[0]); if (result) goto error; } @@ -3803,7 +3802,6 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked) DBUG_RETURN(HA_ERR_INITIALIZATION); } m_start_key.length= 0; - m_rec0= table->record[0]; m_rec_length= table_share->reclength; if (!m_part_ids_sorted_by_num_of_records) { @@ -4719,15 +4717,15 @@ int ha_partition::update_row(const uchar *old_data, const uchar *new_data) */ { Abort_on_warning_instant_set old_abort_on_warning(thd, 0); - error= get_part_for_buf(old_data, m_rec0, m_part_info, &old_part_id); + error= get_part_for_buf(old_data, table->record[0], m_part_info, &old_part_id); } DBUG_ASSERT(!error); DBUG_ASSERT(old_part_id == m_last_part); DBUG_ASSERT(bitmap_is_set(&(m_part_info->read_partitions), old_part_id)); #endif - if (unlikely((error= get_part_for_buf(new_data, m_rec0, m_part_info, - &new_part_id)))) + if (unlikely((error= get_part_for_buf(new_data, table->record[0], + m_part_info, &new_part_id)))) goto exit; if (unlikely(!bitmap_is_set(&(m_part_info->lock_partitions), new_part_id))) { @@ -5555,7 +5553,7 @@ int ha_partition::rnd_pos_by_record(uchar *record) { DBUG_ENTER("ha_partition::rnd_pos_by_record"); - if (unlikely(get_part_for_buf(record, m_rec0, m_part_info, &m_last_part))) + if (unlikely(get_part_for_buf(record, table->record[0], m_part_info, &m_last_part))) DBUG_RETURN(1); int err= m_file[m_last_part]->rnd_pos_by_record(record); @@ -6339,7 +6337,7 @@ int ha_partition::read_range_first(const key_range *start_key, m_start_key.key= NULL; m_index_scan_type= partition_read_range; - error= common_index_read(m_rec0, MY_TEST(start_key)); + error= common_index_read(table->record[0], MY_TEST(start_key)); DBUG_RETURN(error); } @@ -10338,7 +10336,7 @@ void ha_partition::print_error(int error, myf errflag) str.append('('); str.append_ulonglong(m_last_part); str.append(STRING_WITH_LEN(" != ")); - if (get_part_for_buf(m_err_rec, m_rec0, m_part_info, &part_id)) + if (get_part_for_buf(m_err_rec, table->record[0], m_part_info, &part_id)) str.append('?'); else str.append_ulonglong(part_id); @@ -11323,7 +11321,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) while (true) { - if ((result= m_file[read_part_id]->ha_rnd_next(m_rec0))) + if ((result= m_file[read_part_id]->ha_rnd_next(table->record[0]))) { if (result != HA_ERR_END_OF_FILE) break; @@ -11369,7 +11367,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) Insert row into correct partition. Notice that there are no commit for every N row, so the repair will be one large transaction! */ - if ((result= m_file[correct_part_id]->ha_write_row(m_rec0))) + if ((result= m_file[correct_part_id]->ha_write_row(table->record[0]))) { /* We have failed to insert a row, it might have been a duplicate! @@ -11413,7 +11411,7 @@ int ha_partition::check_misplaced_rows(uint read_part_id, bool do_repair) } /* Delete row from wrong partition. */ - if ((result= m_file[read_part_id]->ha_delete_row(m_rec0))) + if ((result= m_file[read_part_id]->ha_delete_row(table->record[0]))) { if (m_file[correct_part_id]->has_transactions_and_rollback()) break; diff --git a/sql/ha_partition.h b/sql/ha_partition.h index 0bad10b91bf..d03428bd625 100644 --- a/sql/ha_partition.h +++ b/sql/ha_partition.h @@ -322,7 +322,6 @@ private: and if clustered pk, [0]= current index, [1]= pk, [2]= NULL */ KEY *m_curr_key_info[3]; // Current index - uchar *m_rec0; // table->record[0] const uchar *m_err_rec; // record which gave error QUEUE m_queue; // Prio queue used by sorted read From 2b11a0e9918ac6b04d821fdbe4192475f516946a Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 20 Jul 2025 13:00:23 +0200 Subject: [PATCH 47/61] MDEV-37268 assert upon UPDATE or partitioned table with unique hash under READ-COMMITTED followup for 9703c90712f3 (MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records) maintain the invariant, that handler::ha_update_row() is always invoked as handler::ha_update_row(record[0], record[1]) --- sql/handler.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sql/handler.cc b/sql/handler.cc index 1ae75ff94d3..4ca785d1c32 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -7863,9 +7863,13 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data) return HA_ERR_UNSUPPORTED; } } + table->move_fields(table->field, table->record[1], table->record[0]); + std::swap(table->record[0], table->record[1]); increment_statistics(&SSV::ha_update_count); TABLE_IO_WAIT(tracker, PSI_TABLE_UPDATE_ROW, MAX_KEY, e, { e= update_row(new_data, old_data);}) + table->move_fields(table->field, table->record[1], table->record[0]); + std::swap(table->record[0], table->record[1]); } return e ? e : error; } From 5622f3f5e8ceac675dca0cfe63e6310b343239ac Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 20 Jul 2025 13:04:52 +0200 Subject: [PATCH 48/61] MDEV-37268 HA_ERR_KEY_NOT_FOUND upon UPDATE or partitioned table with unique hash under READ-COMMITTED followup for 9703c90712f3 (MDEV-37199 UNIQUE KEY USING HASH accepting duplicate records) ha_partition can return HA_ERR_KEY_NOT_FOUND even in the middle of the index scan --- mysql-test/main/long_unique_innodb.result | 9 +++++++++ mysql-test/main/long_unique_innodb.test | 7 +++++++ sql/handler.cc | 8 ++------ 3 files changed, 18 insertions(+), 6 deletions(-) diff --git a/mysql-test/main/long_unique_innodb.result b/mysql-test/main/long_unique_innodb.result index 58267e9c4eb..2af8a860115 100644 --- a/mysql-test/main/long_unique_innodb.result +++ b/mysql-test/main/long_unique_innodb.result @@ -146,4 +146,13 @@ select * from t1; id f 2 x drop table t1; +create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9; +insert t1 (id) values (1),(2); +set transaction isolation level read committed; +update ignore t1 set f = 'x'; +select * from t1; +id f +1 x +2 NULL +drop table t1; # End of 10.6 tests diff --git a/mysql-test/main/long_unique_innodb.test b/mysql-test/main/long_unique_innodb.test index 3253bd927eb..95bcc5c74fd 100644 --- a/mysql-test/main/long_unique_innodb.test +++ b/mysql-test/main/long_unique_innodb.test @@ -1,4 +1,5 @@ --source include/have_innodb.inc +--source include/have_partition.inc # # MDEV-371 Unique indexes for blobs @@ -156,4 +157,10 @@ replace t1 values (2,'x'); select * from t1; drop table t1; +create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9; +insert t1 (id) values (1),(2); +set transaction isolation level read committed; +update ignore t1 set f = 'x'; +select * from t1; +drop table t1; --echo # End of 10.6 tests diff --git a/sql/handler.cc b/sql/handler.cc index 4ca785d1c32..393e4cc12d5 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -7440,11 +7440,7 @@ int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) result= lookup_handler->ha_index_read_map(table->record[0], ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT); if (result) - { - if (result == HA_ERR_KEY_NOT_FOUND) - result= 0; goto end; - } // restore pointers after swap_values in TABLE::update_virtual_fields() for (Field **vf= table->vfield; *vf; vf++) @@ -7472,10 +7468,10 @@ int handler::check_duplicate_long_entry_key(const uchar *new_rec, uint key_no) while (!(result= lookup_handler->ha_index_next_same(table->record[0], ptr, key_info->key_length))); - if (result == HA_ERR_END_OF_FILE) +end: + if (result == HA_ERR_END_OF_FILE || result == HA_ERR_KEY_NOT_FOUND) result= 0; -end: restore_record(table, file->lookup_buffer); table->restore_blob_values(blob_storage); lookup_handler->ha_index_end(); From 18f85c8c681db74b35d3e042a998e4bccb1d6d98 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 24 Jul 2025 00:11:33 +0200 Subject: [PATCH 49/61] MDEV-37302 Assertion failure in Table_triggers_list::add_tables_and_routines_for_triggers upon attempt to insert DEFAULT into non-insertable view Only do trigger prelocking for tables that are doing to be modified (with a write lock). A table can cause prelocking if its DEFAULT value is used (because DEFAULT can be NEXTVAL), even if the table itself is only used for reads. Don't process triggers for such a table --- mysql-test/suite/sql_sequence/default.result | 10 ++++++++++ mysql-test/suite/sql_sequence/default.test | 11 +++++++++++ sql/sql_base.cc | 2 +- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/sql_sequence/default.result b/mysql-test/suite/sql_sequence/default.result index fb8fed00a9a..fe9c6af1795 100644 --- a/mysql-test/suite/sql_sequence/default.result +++ b/mysql-test/suite/sql_sequence/default.result @@ -303,4 +303,14 @@ execute stmt using default; deallocate prepare stmt; drop table t1; drop sequence s1; +# +# MDEV-37302 Assertion failure in Table_triggers_list::add_tables_and_routines_for_triggers upon attempt to insert DEFAULT into non-insertable view +# +create table t1 (f int); +create algorithm=temptable view v1 as select * from t1; +create trigger tr before update on t1 for each row set @a=1; +insert v1 values (default); +ERROR HY000: The target table v1 of the INSERT is not insertable-into +drop view v1; +drop table t1; # End of 10.6 tests diff --git a/mysql-test/suite/sql_sequence/default.test b/mysql-test/suite/sql_sequence/default.test index d7bc978ed9e..5cbfe237cd3 100644 --- a/mysql-test/suite/sql_sequence/default.test +++ b/mysql-test/suite/sql_sequence/default.test @@ -229,4 +229,15 @@ deallocate prepare stmt; drop table t1; drop sequence s1; +--echo # +--echo # MDEV-37302 Assertion failure in Table_triggers_list::add_tables_and_routines_for_triggers upon attempt to insert DEFAULT into non-insertable view +--echo # +create table t1 (f int); +create algorithm=temptable view v1 as select * from t1; +create trigger tr before update on t1 for each row set @a=1; +--error ER_NON_INSERTABLE_TABLE +insert v1 values (default); +drop view v1; +drop table t1; + --echo # End of 10.6 tests diff --git a/sql/sql_base.cc b/sql/sql_base.cc index 1a4fa315c3c..501d13f201c 100644 --- a/sql/sql_base.cc +++ b/sql/sql_base.cc @@ -4897,7 +4897,7 @@ bool DML_prelocking_strategy::handle_table(THD *thd, DBUG_ASSERT(table_list->lock_type >= TL_FIRST_WRITE || thd->lex->default_used); - if (table_list->trg_event_map) + if (table_list->trg_event_map && table_list->lock_type >= TL_FIRST_WRITE) { if (table->triggers) { From fb2f324f855080ce169e3896c7d375227bcc964f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 25 Jul 2025 12:26:50 +0200 Subject: [PATCH 50/61] MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED let's disallow UPDATE IGNORE in READ COMMITTED with the table has UNIQUE constraint that is USING HASH or is WITHOUT OVERLAPS This rarely-used combination should not block a release, with be fixed in MDEV-37233 --- mysql-test/main/long_unique_innodb.result | 16 ++++++++++- mysql-test/main/long_unique_innodb.test | 13 +++++++++ .../main/long_unique_innodb_debug.result | 5 ++-- mysql-test/main/long_unique_innodb_debug.test | 1 + mysql-test/suite/period/r/innodb_debug.result | 3 +- mysql-test/suite/period/t/innodb_debug.test | 1 + sql/handler.cc | 28 ++++--------------- 7 files changed, 40 insertions(+), 27 deletions(-) diff --git a/mysql-test/main/long_unique_innodb.result b/mysql-test/main/long_unique_innodb.result index 2af8a860115..fd2b3daf8e5 100644 --- a/mysql-test/main/long_unique_innodb.result +++ b/mysql-test/main/long_unique_innodb.result @@ -150,9 +150,23 @@ create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb insert t1 (id) values (1),(2); set transaction isolation level read committed; update ignore t1 set f = 'x'; +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported select * from t1; id f -1 x +1 NULL 2 NULL drop table t1; +# +# MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED +# +create table t1 (id int, f blob, unique(id,f)) engine=innodb partition by key(id) partitions 2; +insert t1 values (1,'foo'),(2,'foo'); +set transaction isolation level read committed; +update ignore t1 set id = 2; +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported +select * from t1; +id f +1 foo +2 foo +drop table t1; # End of 10.6 tests diff --git a/mysql-test/main/long_unique_innodb.test b/mysql-test/main/long_unique_innodb.test index 95bcc5c74fd..cbe2d7431fe 100644 --- a/mysql-test/main/long_unique_innodb.test +++ b/mysql-test/main/long_unique_innodb.test @@ -160,7 +160,20 @@ drop table t1; create table t1 (id int, f longtext, primary key (id), unique(f)) engine=innodb partition by key (id) partitions 9; insert t1 (id) values (1),(2); set transaction isolation level read committed; +--error ER_NOT_SUPPORTED_YET update ignore t1 set f = 'x'; select * from t1; drop table t1; + +--echo # +--echo # MDEV-37310 Non-debug failing assertion node->pcur->rel_pos == BTR_PCUR_ON upon violating long unique under READ-COMMITTED +--echo # +create table t1 (id int, f blob, unique(id,f)) engine=innodb partition by key(id) partitions 2; +insert t1 values (1,'foo'),(2,'foo'); +set transaction isolation level read committed; +--error ER_NOT_SUPPORTED_YET +update ignore t1 set id = 2; +select * from t1; +drop table t1; + --echo # End of 10.6 tests diff --git a/mysql-test/main/long_unique_innodb_debug.result b/mysql-test/main/long_unique_innodb_debug.result index 65409309b5f..497f2af39bf 100644 --- a/mysql-test/main/long_unique_innodb_debug.result +++ b/mysql-test/main/long_unique_innodb_debug.result @@ -166,6 +166,7 @@ set transaction isolation level read committed; update t1 set col2='a' where col1=15; set debug_sync="now SIGNAL do_insert"; connection con1; +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported connection default; select * from t1; col1 col2 @@ -173,7 +174,7 @@ commit; select * from t1; col1 col2 5 b -9 e +9 d 15 a disconnect con1; disconnect con2; @@ -237,7 +238,7 @@ set transaction isolation level read committed; update t1 set col2='a' where col1=15; set debug_sync="now SIGNAL do_insert"; connection con1; -ERROR 42000: UPDATE IGNORE that modifies a primary key of a table with a UNIQUE constraint USING HASH is not currently supported +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint USING HASH is not currently supported connection default; select * from t1; col1 col2 diff --git a/mysql-test/main/long_unique_innodb_debug.test b/mysql-test/main/long_unique_innodb_debug.test index 5f750d9cf3c..d1a0673b54a 100644 --- a/mysql-test/main/long_unique_innodb_debug.test +++ b/mysql-test/main/long_unique_innodb_debug.test @@ -162,6 +162,7 @@ set transaction isolation level read committed; update t1 set col2='a' where col1=15; set debug_sync="now SIGNAL do_insert"; --connection con1 +--error ER_NOT_SUPPORTED_YET --reap --connection default select * from t1; diff --git a/mysql-test/suite/period/r/innodb_debug.result b/mysql-test/suite/period/r/innodb_debug.result index fe9adb4d4c3..eafc2230fdb 100644 --- a/mysql-test/suite/period/r/innodb_debug.result +++ b/mysql-test/suite/period/r/innodb_debug.result @@ -128,6 +128,7 @@ set transaction isolation level read committed; update t1 set s=date'2010-10-10' where e=date'2010-12-12'; set debug_sync="now SIGNAL do_insert"; connection con1; +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint WITHOUT OVERLAPS is not currently supported connection default; select * from t1; id s e @@ -199,7 +200,7 @@ set transaction isolation level read committed; update t1 set s=date'2010-10-10' where e=date'2010-12-12'; set debug_sync="now SIGNAL do_insert"; connection con1; -ERROR 42000: UPDATE IGNORE that modifies a primary key of a table with a UNIQUE constraint WITHOUT OVERLAPS is not currently supported +ERROR 42000: UPDATE IGNORE in READ COMMITTED isolation mode of a table with a UNIQUE constraint WITHOUT OVERLAPS is not currently supported connection default; select * from t1; id s e diff --git a/mysql-test/suite/period/t/innodb_debug.test b/mysql-test/suite/period/t/innodb_debug.test index 6ae92ab600f..46ff13cb5fb 100644 --- a/mysql-test/suite/period/t/innodb_debug.test +++ b/mysql-test/suite/period/t/innodb_debug.test @@ -125,6 +125,7 @@ set transaction isolation level read committed; update t1 set s=date'2010-10-10' where e=date'2010-12-12'; set debug_sync="now SIGNAL do_insert"; --connection con1 +--error ER_NOT_SUPPORTED_YET --reap --connection default select * from t1; diff --git a/sql/handler.cc b/sql/handler.cc index 393e4cc12d5..09f438bc204 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -7843,29 +7843,11 @@ int handler::ha_update_row(const uchar *old_data, const uchar *new_data) int e= 0; if (ha_thd()->lex->ignore) { - /* hack: modifying PK is not supported for now, see MDEV-37233 */ - if (table->s->primary_key != MAX_KEY) - { - KEY *key= table->key_info + table->s->primary_key; - KEY_PART_INFO *kp= key->key_part; - KEY_PART_INFO *end= kp + key->user_defined_key_parts; - for (; kp < end; kp++) - if (bitmap_is_set(table->write_set, kp->fieldnr-1)) - { - my_printf_error(ER_NOT_SUPPORTED_YET, "UPDATE IGNORE that " - "modifies a primary key of a table with a UNIQUE constraint " - "%s is not currently supported", MYF(0), - table->s->long_unique_table ? "USING HASH" : "WITHOUT OVERLAPS"); - return HA_ERR_UNSUPPORTED; - } - } - table->move_fields(table->field, table->record[1], table->record[0]); - std::swap(table->record[0], table->record[1]); - increment_statistics(&SSV::ha_update_count); - TABLE_IO_WAIT(tracker, PSI_TABLE_UPDATE_ROW, MAX_KEY, e, - { e= update_row(new_data, old_data);}) - table->move_fields(table->field, table->record[1], table->record[0]); - std::swap(table->record[0], table->record[1]); + my_printf_error(ER_NOT_SUPPORTED_YET, "UPDATE IGNORE in READ " + "COMMITTED isolation mode of a table with a UNIQUE constraint " + "%s is not currently supported", MYF(0), + table->s->long_unique_table ? "USING HASH" : "WITHOUT OVERLAPS"); + return HA_ERR_UNSUPPORTED; } return e ? e : error; } From a3c3db76930724f7e9a495a67633cfe8f91d0dc1 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 14 Jul 2025 16:38:24 +0200 Subject: [PATCH 51/61] update WolfSSL to 5.8.0-stable --- extra/wolfssl/wolfssl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extra/wolfssl/wolfssl b/extra/wolfssl/wolfssl index 239b85c8043..b077c81eb63 160000 --- a/extra/wolfssl/wolfssl +++ b/extra/wolfssl/wolfssl @@ -1 +1 @@ -Subproject commit 239b85c80438bf60d9a5b9e0ebe9ff097a760d0d +Subproject commit b077c81eb635392e694ccedbab8b644297ec0285 From 145afe7d7934ece196fdf47e71359af4856a6500 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Mon, 14 Jul 2025 21:58:59 +0200 Subject: [PATCH 52/61] Workaround WolfSSL issue #9004 to fix the build on Windows. --- extra/wolfssl/CMakeLists.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/extra/wolfssl/CMakeLists.txt b/extra/wolfssl/CMakeLists.txt index 38203a07911..f5b5c8bb5e8 100644 --- a/extra/wolfssl/CMakeLists.txt +++ b/extra/wolfssl/CMakeLists.txt @@ -134,6 +134,8 @@ if(MSVC) remove_definitions(-DHAVE_CONFIG_H) target_compile_definitions(wolfssl PRIVATE WOLFSSL_HAVE_MIN WOLFSSL_HAVE_MAX) + # Workaround https://github.com/wolfSSL/wolfssl/issues/9004 + target_compile_definitions(wolfssl PRIVATE WOLFSSL_NO_SOCK SOCKET_INVALID=-1) endif() CONFIGURE_FILE(user_settings.h.in user_settings.h) From a99dfa26d394647e0316881b0eac3faa3a6c6dee Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 14 Jul 2025 17:09:11 +0200 Subject: [PATCH 53/61] HeidiSQL 12.11 --- win/packaging/heidisql.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/win/packaging/heidisql.cmake b/win/packaging/heidisql.cmake index 157e5517594..79755e63371 100644 --- a/win/packaging/heidisql.cmake +++ b/win/packaging/heidisql.cmake @@ -1,4 +1,4 @@ -SET(HEIDISQL_BASE_NAME "HeidiSQL_12.10_32_Portable") +SET(HEIDISQL_BASE_NAME "HeidiSQL_12.11_32_Portable") SET(HEIDISQL_ZIP "${HEIDISQL_BASE_NAME}.zip") SET(HEIDISQL_URL "https://www.heidisql.com/downloads/releases/${HEIDISQL_ZIP}") SET(HEIDISQL_DOWNLOAD_DIR ${THIRD_PARTY_DOWNLOAD_LOCATION}/${HEIDISQL_BASE_NAME}) From a0759bf017df167dca767e43649adf4b3d1321e7 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 16 Jul 2025 12:50:24 +0200 Subject: [PATCH 54/61] Connector/C 3.3.17 --- libmariadb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libmariadb b/libmariadb index 55abb320382..77bdf5a5725 160000 --- a/libmariadb +++ b/libmariadb @@ -1 +1 @@ -Subproject commit 55abb3203826a7b3593f0728d6d077d4e0f19259 +Subproject commit 77bdf5a5725ec13c9067723ee2d3e1c5787e8c71 From b0a2b921cc7af1ca64492864317b0ea42c717e9a Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 14 Jul 2025 17:13:35 +0200 Subject: [PATCH 55/61] ColumnStore 6.4.11-1 --- storage/columnstore/columnstore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/columnstore/columnstore b/storage/columnstore/columnstore index ef0647b74ac..5ba808d542e 160000 --- a/storage/columnstore/columnstore +++ b/storage/columnstore/columnstore @@ -1 +1 @@ -Subproject commit ef0647b74ac41263cfdc25d3d4b5169c9e8df7ec +Subproject commit 5ba808d542e8552a4b3a868fce9e119623f366d7 From 17358074486eeefa95dc955b4fc21bbe1da407d9 Mon Sep 17 00:00:00 2001 From: Justin Jose Date: Thu, 27 Feb 2025 08:44:14 +0530 Subject: [PATCH 56/61] Bug#37117875 Binlog record error when delimiter is set to other symbols Description: ------------ When the delimiter is set to a non-default symbol and the SQL statement contains an unquoted semicolon (;) within a MySQL-specific comment, the SQL executes successfully in the source database. However, the binlog record becomes incomplete, leading to a syntax error in the replica database. Analysis: ------------ When the delimiter is set to a non-default symbol and an SQL statement contains an unquoted semicolon within a MySQL-specific comment, the client transmits the entire SQL statement, including the MySQL-specific comment, up to the delimiter to the server. During parsing, the server interprets the semicolon as the end of the command while processing the comment, resulting in the execution of a partial statement. The truncated statement is then recorded in the binary log and propagated to the replica, leading to an error. Fix: ------------ When the delimiter is set to a non-default symbol, treat MySQL-specific comments containing unquoted semicolons as syntax errors and return a parser error. Change-Id: I00d6b4ced89e79a7350c94218bf2527553054aed --- sql/sql_lex.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 7923c137b77..3741184d8cb 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -1,5 +1,5 @@ -/* Copyright (c) 2000, 2019, Oracle and/or its affiliates. - Copyright (c) 2009, 2022, MariaDB Corporation. +/* Copyright (c) 2000, 2025, Oracle and/or its affiliates. + Copyright (c) 2009, 2025, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -2564,6 +2564,8 @@ int Lex_input_stream::lex_one_token(YYSTYPE *yylval, THD *thd) state=MY_LEX_CHAR; break; case MY_LEX_END: + /* Unclosed special comments result in a syntax error */ + if (in_comment == DISCARD_COMMENT) return (ABORT_SYM); next_state= MY_LEX_END; return(0); // We found end of input last time From 5fa5ee3edb219e9a9db55f2667f8fda954b82084 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 24 Jul 2025 15:46:45 +0200 Subject: [PATCH 57/61] Bug#37117875 test case --- .../suite/rpl/r/rpl_conditional_comments.result | 14 ++++++++++++++ .../suite/rpl/t/rpl_conditional_comments.test | 12 ++++++++++++ 2 files changed, 26 insertions(+) diff --git a/mysql-test/suite/rpl/r/rpl_conditional_comments.result b/mysql-test/suite/rpl/r/rpl_conditional_comments.result index 036824d60aa..8bfccb9c6a8 100644 --- a/mysql-test/suite/rpl/r/rpl_conditional_comments.result +++ b/mysql-test/suite/rpl/r/rpl_conditional_comments.result @@ -88,5 +88,19 @@ c1 3 20 connection master; +insert t1 values /*! (100);insert t1 values */ (200) // +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'insert t1 values */ (200)' at line 1 +select * from t1; +c1 +62 +3 +20 +connection slave; +select * from t1; +c1 +62 +3 +20 +connection master; DROP TABLE t1; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_conditional_comments.test b/mysql-test/suite/rpl/t/rpl_conditional_comments.test index 6e4ec8745f4..343ea0d3d13 100644 --- a/mysql-test/suite/rpl/t/rpl_conditional_comments.test +++ b/mysql-test/suite/rpl/t/rpl_conditional_comments.test @@ -80,5 +80,17 @@ sync_slave_with_master; select * from t1; connection master; +# +# Bug#37117875 Binlog record error when delimiter is set to other symbols +# +delimiter //; +--error ER_PARSE_ERROR +insert t1 values /*! (100);insert t1 values */ (200) // +delimiter ;// +select * from t1; +sync_slave_with_master; +select * from t1; +connection master; + DROP TABLE t1; --source include/rpl_end.inc From 29775c03c147ae72b0a61378d3da729dbad346a2 Mon Sep 17 00:00:00 2001 From: Ayush Gupta Date: Tue, 20 May 2025 06:53:03 +0200 Subject: [PATCH 58/61] Bug#34422267 - Contribution by Tencent: comment mistake in get_best_ror_intersect Description: The function get_best_ror_intersect is responsible for selecting the optimal combination of ROR scans that minimize cost while improving selectivity. It iteratively adds scans to a selected set (S), ensuring that each addition results in improved selectivity. If selectivity improves, the function then evaluates whether the cost is minimized. The comment contained some inaccuracies: - Incorrect Selectivity Condition: A missing parentheses caused the condition to be misinterpreted, leading to incorrect logic. The function intends to check whether adding a scan improves selectivity before including it in the set. - Loop Condition Issue: The condition for continuing the loop did not properly reduce R in the comment which meant it was an infinite loop. Fix: The comment of the function is fixed addressing the issues. The set should include the scan before comparing the selectivity with the initial set and thus the selectivity condition in the comment is fixed by properly enclosing the expression in parentheses to reflect the intended logic. Ensured that R is properly reduced in each iteration to maintain correctness. Change-Id: Ie197af8211a5ef05a5118a33b8b543d354475780 --- sql/opt_range.cc | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 0a3d71abe93..3b0b0de1fad 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -7132,21 +7132,24 @@ static bool ror_intersect_add(ROR_INTERSECT_INFO *info, order R by (E(#records_matched) * key_record_length). S= first(R); -- set of scans that will be used for ROR-intersection - R= R-first(S); + R= R - S; min_cost= cost(S); min_scan= make_scan(S); while (R is not empty) { - firstR= R - first(R); - if (!selectivity(S + firstR < selectivity(S))) + firstR= first(R); + if (!selectivity(S + firstR) < selectivity(S)) + { + R= R - firstR; continue; - + } S= S + first(R); if (cost(S) < min_cost) { min_cost= cost(S); min_scan= make_scan(S); } + R= R - firstR; -- Remove the processed scan from R } return min_scan; } From f49a5beb30e832d3d3a987b12cc18ddd0f7c702f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 25 Jul 2025 19:15:09 +0200 Subject: [PATCH 59/61] mariadb-backup: read --tables-file in the text mode on Windows --- extra/mariabackup/xtrabackup.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 60f81861a35..a65f8c2cf0b 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -4416,7 +4416,7 @@ xb_load_list_file( FILE* fp; /* read and store the filenames */ - fp = fopen(filename, "r"); + fp = fopen(filename, "rt"); if (!fp) { die("Can't open %s", filename); From 633417308f163141a822398fcf8e7f281bcda38b Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 26 Jul 2025 10:26:16 +0200 Subject: [PATCH 60/61] MDEV-37312 ASAN errors or assertion failure upon attempt to UPDATE FOR PORTION violating long unique under READ COMMITTED in case of a long unique conflict ha_write_row() used delete_row() to remove the newly inserted row, and it used rnd_pos() to position the cursor before deletion. This rnd_pos() was freeing and reallocating blobs in record[0]. So when the code for FOR PORTION OF did store_record(record[2]); ha_write_row() restore_record(record[2]); it ended up with blob pointers to a freed memory. Let's use lookup_handler for deletion. --- mysql-test/suite/period/r/long_unique.result | 17 ++++++++++++++++ mysql-test/suite/period/t/long_unique.test | 21 ++++++++++++++++++++ sql/handler.cc | 13 ++++++------ 3 files changed, 44 insertions(+), 7 deletions(-) diff --git a/mysql-test/suite/period/r/long_unique.result b/mysql-test/suite/period/r/long_unique.result index 5c5f4297fb9..fa7817fb562 100644 --- a/mysql-test/suite/period/r/long_unique.result +++ b/mysql-test/suite/period/r/long_unique.result @@ -15,3 +15,20 @@ INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01'); DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01'; ERROR 23000: Duplicate entry 'foo' for key 'b' DROP TABLE t1; +# End of 10.5 tests +# +# MDEV-37312 ASAN errors or assertion failure upon attempt to UPDATE FOR PORTION violating long unique under READ COMMITTED +# +create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb; +insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01'); +set transaction isolation level read committed; +update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1; +ERROR 23000: Duplicate entry 'foo' for key 'f' +drop table t1; +create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb partition by hash (a); +insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01'); +set transaction isolation level read committed; +update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1; +ERROR 23000: Duplicate entry 'foo' for key 'f' +drop table t1; +# End of 10.6 tests diff --git a/mysql-test/suite/period/t/long_unique.test b/mysql-test/suite/period/t/long_unique.test index c2dcd3f6c3f..bca2f15ebae 100644 --- a/mysql-test/suite/period/t/long_unique.test +++ b/mysql-test/suite/period/t/long_unique.test @@ -1,3 +1,4 @@ +--source include/have_innodb.inc --source include/have_partition.inc --echo # @@ -21,3 +22,23 @@ INSERT INTO t1 VALUES (1,'foo','2022-01-01', '2025-01-01'); DELETE FROM t1 FOR PORTION OF app FROM '2023-01-01' TO '2024-01-01'; DROP TABLE t1; +--echo # End of 10.5 tests + +--echo # +--echo # MDEV-37312 ASAN errors or assertion failure upon attempt to UPDATE FOR PORTION violating long unique under READ COMMITTED +--echo # +create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb; +insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01'); +set transaction isolation level read committed; +--error ER_DUP_ENTRY +update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1; +drop table t1; + +create table t1 (a int, f text unique, s datetime, e datetime, period for p(s,e)) engine=innodb partition by hash (a); +insert into t1 values (1,'foo','1900-01-01','2000-01-01'),(2,'bar','1900-01-01','2000-01-01'); +set transaction isolation level read committed; +--error ER_DUP_ENTRY +update t1 for portion of p from '1980-01-01' to '1980-01-02' set a = 1; +drop table t1; + +--echo # End of 10.6 tests diff --git a/sql/handler.cc b/sql/handler.cc index 09f438bc204..5d7138490fd 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -3367,7 +3367,7 @@ int handler::create_lookup_handler() if (!(tmp= clone(table->s->normalized_path.str, table->in_use->mem_root))) return 1; lookup_handler= tmp; - return lookup_handler->ha_external_lock(table->in_use, F_RDLCK); + return lookup_handler->ha_external_lock(table->in_use, F_WRLCK); } LEX_CSTRING *handler::engine_name() @@ -7774,16 +7774,16 @@ int handler::ha_write_row(const uchar *buf) if (lookup_handler != this) // INSERT IGNORE or REPLACE or ODKU { int olderror= error; - if ((error= rnd_init(0))) + if ((error= lookup_handler->rnd_init(0))) goto err; position(buf); - if ((error= rnd_pos(lookup_buffer, ref))) + if ((error= lookup_handler->rnd_pos(lookup_buffer, ref))) goto err; increment_statistics(&SSV::ha_delete_count); TABLE_IO_WAIT(tracker, PSI_TABLE_DELETE_ROW, MAX_KEY, error, - { error= delete_row(buf);}) - rnd_end(); + { error= lookup_handler->delete_row(buf);}) + lookup_handler->rnd_end(); if (!error) error= olderror; } @@ -7916,8 +7916,7 @@ int handler::ha_delete_row(const uchar *buf) /* Normally table->record[0] is used, but sometimes table->record[1] is used. */ - DBUG_ASSERT(buf == table->record[0] || - buf == table->record[1]); + DBUG_ASSERT(buf == table->record[0] || buf == table->record[1]); MYSQL_DELETE_ROW_START(table_share->db.str, table_share->table_name.str); mark_trx_read_write(); From fe8047caf26d20e98ea7f6ec1dce3924e696703f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 28 Jul 2025 15:45:51 +0200 Subject: [PATCH 61/61] MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default don't construct a "default value field" by moving field's ptr/null_ptr. Field can have its null_ptr moved to extra_null_bitmap for BEFORE triggers. Perhaps there can be other reasons for null_ptr and ptr not to be at the right offset to each other. Instead, use pointers from TABLE_SHARE::field, which always point to default values. Except when there's no TABLE_SHARE::field, which can happen for TEMPTABLE views, for example, but these views are not updatable anyway. Add an assert to Field::move_field_offset() to ensure it's only used for appropriately set ptr/null_ptr pairs. --- mysql-test/main/default.result | 14 +++++++++----- mysql-test/main/default.test | 17 ++++++++++++----- sql/field.h | 7 +++++++ sql/item.cc | 9 ++++++++- 4 files changed, 36 insertions(+), 11 deletions(-) diff --git a/mysql-test/main/default.result b/mysql-test/main/default.result index 335f7e7d607..29a05e3ad88 100644 --- a/mysql-test/main/default.result +++ b/mysql-test/main/default.result @@ -3432,10 +3432,8 @@ DEFAULT(a) CASE a WHEN 0 THEN 1 ELSE 2 END NULL 2 DROP TABLE t; DROP VIEW v; -# # End of 10.2 test # -# # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default # record, which can cause crashes when accessing already released # memory. @@ -3450,10 +3448,8 @@ length(DEFAULT(h)) 25 INSERT INTO t1 () VALUES (); drop table t1; -# # End of 10.3 test # -# # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize # CREATE TABLE t1 (pk text DEFAULT length(uuid())); @@ -3483,6 +3479,14 @@ column_name column_default has_default is_nullable a NULL 1 YES drop view v1; drop table t1; -# # End of 10.4 test # +# MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default +# +create table t1 (f01 timestamp, f03 timestamp); +insert into t1 () values (); +create trigger tr before insert on t1 for each row set @a=1; +prepare stmt from "update t1 set f03 = ?"; +execute stmt using default; +drop table t1; +# End of 10.6 test diff --git a/mysql-test/main/default.test b/mysql-test/main/default.test index 13f611246c9..5b37f7047fa 100644 --- a/mysql-test/main/default.test +++ b/mysql-test/main/default.test @@ -2137,9 +2137,8 @@ CREATE ALGORITHM=TEMPTABLE VIEW v AS SELECT * FROM t; SELECT DISTINCT DEFAULT(a), CASE a WHEN 0 THEN 1 ELSE 2 END FROM v GROUP BY a WITH ROLLUP; DROP TABLE t; DROP VIEW v; ---echo # + --echo # End of 10.2 test ---echo # --echo # --echo # MDEV-22703 DEFAULT() on a BLOB column can overwrite the default @@ -2157,9 +2156,7 @@ SELECT length(DEFAULT(h)) FROM t1; INSERT INTO t1 () VALUES (); drop table t1; ---echo # --echo # End of 10.3 test ---echo # --echo # --echo # MDEV-26423: MariaDB server crash in Create_tmp_table::finalize @@ -2183,6 +2180,16 @@ select column_name, column_default, column_default is not null as 'has_default', drop view v1; drop table t1; ---echo # --echo # End of 10.4 test + --echo # +--echo # MDEV-37320 ASAN errors in Field::is_null / Item_param::assign_default +--echo # +create table t1 (f01 timestamp, f03 timestamp); +insert into t1 () values (); +create trigger tr before insert on t1 for each row set @a=1; +prepare stmt from "update t1 set f03 = ?"; +execute stmt using default; +drop table t1; + +--echo # End of 10.6 test diff --git a/sql/field.h b/sql/field.h index 59dcd229b52..d68e72c0925 100644 --- a/sql/field.h +++ b/sql/field.h @@ -1537,7 +1537,14 @@ public: { ptr=ADD_TO_PTR(ptr,ptr_diff, uchar*); if (null_ptr) + { null_ptr=ADD_TO_PTR(null_ptr,ptr_diff,uchar*); + if (table) + { + DBUG_ASSERT(null_ptr < ptr); + DBUG_ASSERT(ptr - null_ptr <= (int)table->s->rec_buff_length); + } + } } void get_image(uchar *buff, uint length, CHARSET_INFO *cs) const { get_image(buff, length, ptr, cs); } diff --git a/sql/item.cc b/sql/item.cc index 4a47b268f52..051c13adc3b 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -5231,10 +5231,17 @@ static Field *make_default_field(THD *thd, Field *field_arg) def_field->default_value->expr->update_used_tables(); def_field->move_field(newptr + 1, def_field->maybe_null() ? newptr : 0, 1); } - else + else if (field_arg->table && field_arg->table->s->field) + { + Field *def_val= field_arg->table->s->field[field_arg->field_index]; + def_field->move_field(def_val->ptr, def_val->null_ptr, def_val->null_bit); + } + else /* e.g. non-updatable view */ + { def_field->move_field_offset((my_ptrdiff_t) (def_field->table->s->default_values - def_field->table->record[0])); + } return def_field; }