From 41dbc32665e6e821c7dd469517c877dbe5a775ee Mon Sep 17 00:00:00 2001 From: Kristian Nielsen Date: Sun, 23 Apr 2023 10:59:32 +0200 Subject: [PATCH 01/76] MDEV-31140: FLUSH BINARY LOGS DELETE_DOMAIN_ID=(D) can errorneously delete active domains Fix the code in rpl_binlog_state::drop_domain(), so that _all_ entries for the domain in the binlog state must match an entry in the initial GTID_LIST, not just one entry match. Signed-off-by: Kristian Nielsen --- .../binlog_flush_binlogs_delete_domain.result | 14 ++++++++--- .../t/binlog_flush_binlogs_delete_domain.test | 13 +++++++++- sql/rpl_gtid.cc | 24 ++++++++++--------- 3 files changed, 36 insertions(+), 15 deletions(-) diff --git a/mysql-test/suite/binlog/r/binlog_flush_binlogs_delete_domain.result b/mysql-test/suite/binlog/r/binlog_flush_binlogs_delete_domain.result index fdcfb4bfa01..1c11191802f 100644 --- a/mysql-test/suite/binlog/r/binlog_flush_binlogs_delete_domain.result +++ b/mysql-test/suite/binlog/r/binlog_flush_binlogs_delete_domain.result @@ -46,15 +46,23 @@ Warning 1076 The current gtid binlog state is incompatible with a former one mis Warning 1076 The gtid domain being deleted ('1') is not in the current binlog state FLUSH BINARY LOGS DELETE_DOMAIN_ID = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 0); ERROR HY000: Could not delete gtid domain. Reason: binlog files may contain gtids from the domain ('1') being deleted. Make sure to first purge those files. +MDEV-31140: Missing error from DELETE_DOMAIN_ID when gtid_binlog_state partially matches GTID_LIST. FLUSH BINARY LOGS; PURGE BINARY LOGS TO 'master-bin.000005'; +SET @@SESSION.gtid_domain_id=8; +SET @@SESSION.server_id=10*8 + 1; +INSERT INTO t SELECT 1+MAX(a) FROM t; +FLUSH BINARY LOGS DELETE_DOMAIN_ID = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 0); +ERROR HY000: Could not delete gtid domain. Reason: binlog files may contain gtids from the domain ('8') being deleted. Make sure to first purge those files. +FLUSH BINARY LOGS; +PURGE BINARY LOGS TO 'master-bin.000006'; FLUSH BINARY LOGS DELETE_DOMAIN_ID = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 0); Warnings: Warning 1076 The gtid domain being deleted ('0') is not in the current binlog state Gtid_list of the current binlog does not contain 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 0: -show binlog events in 'master-bin.000006' limit 1,1; +show binlog events in 'master-bin.000007' limit 1,1; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000006 # Gtid_list 1 # [] +master-bin.000007 # Gtid_list 1 # [] SET @@SESSION.gtid_domain_id=1;; SET @@SESSION.server_id=1; SET @@SESSION.gtid_seq_no=1; @@ -75,7 +83,7 @@ INSERT INTO t SET a=1; SELECT @gtid_binlog_state_saved "as original state", @@GLOBAL.gtid_binlog_state as "out of order for 11 domain state"; as original state out of order for 11 domain state 1-1-1,1-2-2,11-11-11 1-1-1,1-2-2,11-11-1 -PURGE BINARY LOGS TO 'master-bin.000007'; +PURGE BINARY LOGS TO 'master-bin.000008'; the following command succeeds with warnings FLUSH BINARY LOGS DELETE_DOMAIN_ID = (1); Warnings: diff --git a/mysql-test/suite/binlog/t/binlog_flush_binlogs_delete_domain.test b/mysql-test/suite/binlog/t/binlog_flush_binlogs_delete_domain.test index 8311f4bd800..1643ecff72d 100644 --- a/mysql-test/suite/binlog/t/binlog_flush_binlogs_delete_domain.test +++ b/mysql-test/suite/binlog/t/binlog_flush_binlogs_delete_domain.test @@ -21,7 +21,6 @@ FLUSH BINARY LOGS DELETE_DOMAIN_ID = (); --echo but with a warning --let $binlog_pre_flush=query_get_value(SHOW MASTER STATUS, Position, 1) FLUSH BINARY LOGS DELETE_DOMAIN_ID = (99); ---let $binlog_start=$binlog_pre_flush --source include/show_binary_logs.inc # Log one event in a specified domain and try to delete the domain @@ -62,6 +61,8 @@ FLUSH BINARY LOGS DELETE_DOMAIN_ID = (1); # expected overrun of the static buffers of underlying dynamic arrays is doing. --let $domain_cnt=17 --let $server_in_domain_cnt=3 +--let $err_domain_id=`SELECT FLOOR($domain_cnt/2)` +--let $err_server_id=`SELECT FLOOR($server_in_domain_cnt/2)` --let $domain_list= --disable_query_log while ($domain_cnt) @@ -86,6 +87,16 @@ while ($domain_cnt) --error ER_BINLOG_CANT_DELETE_GTID_DOMAIN --eval FLUSH BINARY LOGS DELETE_DOMAIN_ID = ($domain_list) +--echo MDEV-31140: Missing error from DELETE_DOMAIN_ID when gtid_binlog_state partially matches GTID_LIST. +FLUSH BINARY LOGS; +--let $purge_to_binlog= query_get_value(SHOW MASTER STATUS, File, 1) +--eval PURGE BINARY LOGS TO '$purge_to_binlog' +--eval SET @@SESSION.gtid_domain_id=$err_domain_id +--eval SET @@SESSION.server_id=10*$err_domain_id + $err_server_id +eval INSERT INTO t SELECT 1+MAX(a) FROM t; +--error ER_BINLOG_CANT_DELETE_GTID_DOMAIN +--eval FLUSH BINARY LOGS DELETE_DOMAIN_ID = ($domain_list) + # Now satisfy the safety condtion to purge log files containing $domain list FLUSH BINARY LOGS; --let $purge_to_binlog= query_get_value(SHOW MASTER STATUS, File, 1) diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index c4e5c75b10a..7b67a83b3dd 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -2209,18 +2209,16 @@ rpl_binlog_state::drop_domain(DYNAMIC_ARRAY *ids, /* For each domain_id from ids - when no such domain in binlog state - warn && continue - For each domain.server's last gtid - when not locate the last gtid in glev.list - error out binlog state can't change - otherwise continue + If the domain is already absent from the binlog state + Warn && continue + If any GTID with that domain in binlog state is missing from glev.list + Error out binlog state can't change */ for (ulong i= 0; i < ids->elements; i++) { rpl_binlog_state::element *elem= NULL; uint32 *ptr_domain_id; - bool not_match; + bool all_found; ptr_domain_id= (uint32*) dynamic_array_ptr(ids, i); elem= (rpl_binlog_state::element *) @@ -2235,14 +2233,18 @@ rpl_binlog_state::drop_domain(DYNAMIC_ARRAY *ids, continue; } - for (not_match= true, k= 0; k < elem->hash.records; k++) + all_found= true; + for (k= 0; k < elem->hash.records && all_found; k++) { rpl_gtid *d_gtid= (rpl_gtid *)my_hash_element(&elem->hash, k); - for (ulong l= 0; l < glev->count && not_match; l++) - not_match= !(*d_gtid == glev->list[l]); + bool match_found= false; + for (ulong l= 0; l < glev->count && !match_found; l++) + match_found= match_found || (*d_gtid == glev->list[l]); + if (!match_found) + all_found= false; } - if (not_match) + if (!all_found) { sprintf(errbuf, "binlog files may contain gtids from the domain ('%u') " "being deleted. Make sure to first purge those files", From f288d42cdbc98460e097f771a4ad015667e355fb Mon Sep 17 00:00:00 2001 From: Weijun Huang Date: Fri, 31 Mar 2023 01:18:24 +0200 Subject: [PATCH 02/76] MDEV-29646: sformat('Num [{:20}]', 42) gives incorrect result in view The problem is that sformat does not assign the enough space for the result string. The result string is allocated with the max_length of argument, but the correst max_length should be based on the format string. The patch fixes the problem by using MAX_BLOB_WIDTH to assign length --- mysql-test/main/func_sformat.result | 25 ++++++++++++++++++++++++- mysql-test/main/func_sformat.test | 21 +++++++++++++++++++++ sql/item_strfunc.cc | 2 +- 3 files changed, 46 insertions(+), 2 deletions(-) diff --git a/mysql-test/main/func_sformat.result b/mysql-test/main/func_sformat.result index 1809cbad23b..9e8a11677b5 100644 --- a/mysql-test/main/func_sformat.result +++ b/mysql-test/main/func_sformat.result @@ -434,7 +434,7 @@ create table t1 as select sformat(_ucs2 x'003D007B007D003D', _ucs2 x'04420435044 show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `x` varchar(8) CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL + `x` longtext CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci drop table t1; set names latin1; @@ -468,3 +468,26 @@ set names latin1; # # End of 10.7 tests # +# +# Start of 10.8 tests +# +# +# MDEV-29646 sformat('Num [{:20}]', 42) gives incorrect result in view +# +create view v1 as select sformat('Num [{:20}]', 42); +select * from v1; +sformat('Num [{:20}]', 42) +Num [ 42] +drop view v1; +create view v1 as SELECT sformat('Square root of [{:d}] is [{:.20}]', 2, sqrt(2)); +select * from v1; +sformat('Square root of [{:d}] is [{:.20}]', 2, sqrt(2)) +Square root of [2] is [1.4142135623730951455] +drop view v1; +create table t1 (a text, b int, c text); +insert t1 values ('[{} -> {}]', 10, '{}'), ('[{:20} <- {}]', 1, '{:30}'); +select sformat(a,b,c) from t1; +sformat(a,b,c) +[10 -> {}] +[ 1 <- {:30}] +drop table t1; diff --git a/mysql-test/main/func_sformat.test b/mysql-test/main/func_sformat.test index cb7e7c8f1c1..65e4b639179 100644 --- a/mysql-test/main/func_sformat.test +++ b/mysql-test/main/func_sformat.test @@ -253,3 +253,24 @@ set names latin1; echo #; echo # End of 10.7 tests; echo #; + +echo #; +echo # Start of 10.8 tests; +echo #; + +echo #; +echo # MDEV-29646 sformat('Num [{:20}]', 42) gives incorrect result in view; +echo #; + +create view v1 as select sformat('Num [{:20}]', 42); +select * from v1; +drop view v1; + +create view v1 as SELECT sformat('Square root of [{:d}] is [{:.20}]', 2, sqrt(2)); +select * from v1; +drop view v1; + +create table t1 (a text, b int, c text); +insert t1 values ('[{} -> {}]', 10, '{}'), ('[{:20} <- {}]', 1, '{:30}'); +select sformat(a,b,c) from t1; +drop table t1; diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 643c3d4cf3e..c6f00db6563 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1348,13 +1348,13 @@ bool Item_func_sformat::fix_length_and_dec() for (uint i=0 ; i < arg_count ; i++) { - char_length+= args[i]->max_char_length(); if (args[i]->result_type() == STRING_RESULT && Type_std_attributes::agg_item_set_converter(c, func_name_cstring(), args+i, 1, flags, 1)) return TRUE; } + char_length= MAX_BLOB_WIDTH; fix_char_length_ulonglong(char_length); return FALSE; } From 7cbb45d1d4b1a5e2d07b1c17c49a5ca797df8991 Mon Sep 17 00:00:00 2001 From: Tuukka Pasanen Date: Wed, 29 Mar 2023 12:28:51 +0300 Subject: [PATCH 03/76] MDEV-30952: Reformat Debian post and pre scripts There is several misindentation inside Debian post and pre installation scripts. False indentation with space as indent space should be 2 and indentation with tabs. --- debian/mariadb-common.postinst | 2 +- debian/mariadb-common.postrm | 2 +- debian/mariadb-server-10.6.postinst | 62 ++++++++++++++-------------- debian/mariadb-server-10.6.postrm | 44 ++++++++++---------- debian/mariadb-server-10.6.preinst | 64 ++++++++++++++--------------- debian/mariadb-server-10.6.prerm | 6 +-- 6 files changed, 90 insertions(+), 90 deletions(-) diff --git a/debian/mariadb-common.postinst b/debian/mariadb-common.postinst index 12f65bd3d92..53d131a3929 100644 --- a/debian/mariadb-common.postinst +++ b/debian/mariadb-common.postinst @@ -35,7 +35,7 @@ case "$1" in then update-alternatives --install /etc/mysql/my.cnf my.cnf "/etc/mysql/mariadb.cnf" 500 || true fi - ;; + ;; esac #DEBHELPER# diff --git a/debian/mariadb-common.postrm b/debian/mariadb-common.postrm index d0bfa266b7d..2548733a1b9 100644 --- a/debian/mariadb-common.postrm +++ b/debian/mariadb-common.postrm @@ -10,7 +10,7 @@ case "$1" in then /usr/share/mysql-common/configure-symlinks remove mariadb "/etc/mysql/mariadb.cnf" fi - ;; + ;; esac #DEBHELPER# diff --git a/debian/mariadb-server-10.6.postinst b/debian/mariadb-server-10.6.postinst index a259a50ac55..1a83f38dd25 100644 --- a/debian/mariadb-server-10.6.postinst +++ b/debian/mariadb-server-10.6.postinst @@ -100,7 +100,7 @@ this all away. EOF fi fi - rmdir $mysql_upgradedir 2>/dev/null || true + rmdir $mysql_upgradedir 2>/dev/null || true done @@ -113,9 +113,9 @@ EOF # perform mariadb-upgrade, (MDEV-22678). To keep the impact minimal, we # skip innodb and set key-buffer-size to 0 as it isn't reused. if [ -f "$mysql_datadir"/auto.cnf ] && [ -f "$mysql_datadir"/mysql/user.MYD ] && - [ ! lsof -nt "$mysql_datadir"/mysql/user.MYD > /dev/null ] && [ ! -f "$mysql_datadir"/undo_001 ]; then - echo "UPDATE mysql.user SET plugin='unix_socket' WHERE plugin='auth_socket';" | - mariadbd --skip-innodb --key_buffer_size=0 --default-storage-engine=MyISAM --bootstrap 2> /dev/null + [ ! lsof -nt "$mysql_datadir"/mysql/user.MYD > /dev/null ] && [ ! -f "$mysql_datadir"/undo_001 ]; then + echo "UPDATE mysql.user SET plugin='unix_socket' WHERE plugin='auth_socket';" | + mariadbd --skip-innodb --key_buffer_size=0 --default-storage-engine=MyISAM --bootstrap 2> /dev/null fi # Ensure the existence and right permissions for the database and @@ -171,8 +171,8 @@ EOF # Debian: can safely run on upgrades with existing databases set +e bash /usr/bin/mariadb-install-db --rpm --cross-bootstrap --user=mysql \ - --disable-log-bin --skip-test-db 2>&1 | \ - $ERR_LOGGER + --disable-log-bin --skip-test-db 2>&1 | \ + $ERR_LOGGER set -e # On new installations root user can connect via unix_socket. @@ -188,21 +188,21 @@ EOF install -o 0 -g 0 -m 0755 -d $mysql_cfgdir fi if [ ! -e "$dc" ]; then - cat /dev/null > $dc - echo "# THIS FILE IS OBSOLETE. STOP USING IT IF POSSIBLE." >>$dc - echo "# This file exists only for backwards compatibility for" >>$dc - echo "# tools that run '--defaults-file=/etc/mysql/debian.cnf'" >>$dc - echo "# and have root level access to the local filesystem." >>$dc - echo "# With those permissions one can run 'mariadb' directly" >>$dc - echo "# anyway thanks to unix socket authentication and hence" >>$dc - echo "# this file is useless. See package README for more info." >>$dc - echo "[client]" >>$dc - echo "host = localhost" >>$dc - echo "user = root" >>$dc - echo "[mysql_upgrade]" >>$dc - echo "host = localhost" >>$dc - echo "user = root" >>$dc - echo "# THIS FILE WILL BE REMOVED IN A FUTURE DEBIAN RELEASE." >>$dc + cat /dev/null > $dc + echo "# THIS FILE IS OBSOLETE. STOP USING IT IF POSSIBLE." >>$dc + echo "# This file exists only for backwards compatibility for" >>$dc + echo "# tools that run '--defaults-file=/etc/mysql/debian.cnf'" >>$dc + echo "# and have root level access to the local filesystem." >>$dc + echo "# With those permissions one can run 'mariadb' directly" >>$dc + echo "# anyway thanks to unix socket authentication and hence" >>$dc + echo "# this file is useless. See package README for more info." >>$dc + echo "[client]" >>$dc + echo "host = localhost" >>$dc + echo "user = root" >>$dc + echo "[mysql_upgrade]" >>$dc + echo "host = localhost" >>$dc + echo "user = root" >>$dc + echo "# THIS FILE WILL BE REMOVED IN A FUTURE DEBIAN RELEASE." >>$dc fi # Keep it only root-readable, as it always was chown 0:0 $dc @@ -229,10 +229,10 @@ EOF # will think the service is masked echo "# empty placeholder" > /etc/systemd/system/mariadb.service.d/migrated-from-my.cnf-settings.conf - ;; + ;; abort-upgrade|abort-remove|abort-configure) - ;; + ;; triggered) if [ -d /run/systemd/system ]; then @@ -240,12 +240,12 @@ EOF else invoke-rc.d mariadb restart fi - ;; + ;; *) echo "postinst called with unknown argument '$1'" 1>&2 exit 1 - ;; + ;; esac db_stop # in case invoke fails @@ -263,11 +263,11 @@ fi # Modified dh_systemd_start snippet that's not added automatically if [ -d /run/systemd/system ]; then - systemctl --system daemon-reload >/dev/null || true - deb-systemd-invoke start mariadb.service >/dev/null || true -# Modified dh_installinit snippet to only run with sysvinit + systemctl --system daemon-reload >/dev/null || true + deb-systemd-invoke start mariadb.service >/dev/null || true + # Modified dh_installinit snippet to only run with sysvinit elif [ -x "/etc/init.d/mariadb" ]; then - if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ]; then - invoke-rc.d mariadb start || exit $? - fi + if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ]; then + invoke-rc.d mariadb start || exit $? + fi fi diff --git a/debian/mariadb-server-10.6.postrm b/debian/mariadb-server-10.6.postrm index 94ce91db31d..82fffef4eae 100644 --- a/debian/mariadb-server-10.6.postrm +++ b/debian/mariadb-server-10.6.postrm @@ -15,24 +15,24 @@ MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf" # do it himself. No database directories should be removed while the server # is running! Another mariadbd in e.g. a different chroot is fine for us. stop_server() { - # Return immediately if there are no mysqld processes running - # as there is no point in trying to shutdown in that case. - if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null; then return; fi + # Return immediately if there are no mysqld processes running + # as there is no point in trying to shutdown in that case. + if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null; then return; fi - set +e - invoke-rc.d mariadb stop - invoke-rc.d mysql stop # Backwards compatibility - errno=$? - set -e + set +e + invoke-rc.d mariadb stop + invoke-rc.d mysql stop # Backwards compatibility + errno=$? + set -e - # systemctl could emit exit code 100=no init script (fresh install) - if [ "$errno" != 0 -a "$errno" != 100 ]; then - echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 - echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 - echo "Stop it yourself and try again!" 1>&2 - db_stop - exit 1 - fi + # systemctl could emit exit code 100=no init script (fresh install) + if [ "$errno" != 0 -a "$errno" != 100 ]; then + echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 + echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 + echo "Stop it yourself and try again!" 1>&2 + db_stop + exit 1 + fi } @@ -42,11 +42,11 @@ case "$1" in stop_server sleep 2 fi - ;; + ;; *) echo "postrm called with unknown argument '$1'" 1>&2 exit 1 - ;; + ;; esac # @@ -75,9 +75,9 @@ if [ "$1" = "purge" ] && [ -f "/var/lib/mysql/debian-$MAJOR_VER.flag" ]; then if [ -d /var/lib/mysql ] then find /var/lib/mysql -mindepth 1 \ - -not -path '*/lost+found/*' -not -name 'lost+found' \ - -not -path '*/lost@002bfound/*' -not -name 'lost@002bfound' \ - -delete + -not -path '*/lost+found/*' -not -name 'lost+found' \ + -not -path '*/lost@002bfound/*' -not -name 'lost@002bfound' \ + -delete # "|| true" still needed as rmdir still exits with non-zero if # /var/lib/mysql is a mount point @@ -93,5 +93,5 @@ fi # Modified dh_systemd_start snippet that's not added automatically if [ -d /run/systemd/system ]; then - systemctl --system daemon-reload >/dev/null || true + systemctl --system daemon-reload >/dev/null || true fi diff --git a/debian/mariadb-server-10.6.preinst b/debian/mariadb-server-10.6.preinst index c865173e29e..c203e6f0ade 100644 --- a/debian/mariadb-server-10.6.preinst +++ b/debian/mariadb-server-10.6.preinst @@ -14,7 +14,7 @@ MAJOR_VER="${DPKG_MAINTSCRIPT_PACKAGE#mariadb-server-}" # Just kill the invalid insserv.conf.d directory without fallback if [ -d "/etc/insserv.conf.d/mariadb/" ]; then - rm -rf "/etc/insserv.conf.d/mariadb/" + rm -rf "/etc/insserv.conf.d/mariadb/" fi if [ -n "$DEBIAN_SCRIPT_DEBUG" ]; then set -v -x; DEBIAN_SCRIPT_TRACE=1; fi @@ -28,25 +28,25 @@ mysql_upgradedir=/var/lib/mysql-upgrade # do it himself. No database directories should be removed while the server # is running! Another mariadbd in e.g. a different chroot is fine for us. stop_server() { - # Return immediately if there are no mysqld processes running on a host - # (leave containerized processes with the same name in other namespaces) - # as there is no point in trying to shutdown in that case. - if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null; then return; fi + # Return immediately if there are no mysqld processes running on a host + # (leave containerized processes with the same name in other namespaces) + # as there is no point in trying to shutdown in that case. + if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null; then return; fi - set +e - invoke-rc.d mariadb stop - invoke-rc.d mysql stop # Backwards compatibility - errno=$? - set -e + set +e + invoke-rc.d mariadb stop + invoke-rc.d mysql stop # Backwards compatibility + errno=$? + set -e - # systemctl could emit exit code 100=no init script (fresh install) - if [ "$errno" != 0 -a "$errno" != 100 ]; then - echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 - echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 - echo "Stop it yourself and try again!" 1>&2 - db_stop - exit 1 - fi + # systemctl could emit exit code 100=no init script (fresh install) + if [ "$errno" != 0 -a "$errno" != 100 ]; then + echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 + echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 + echo "Stop it yourself and try again!" 1>&2 + db_stop + exit 1 + fi } ################################ main() ########################## @@ -112,7 +112,7 @@ then fi if dpkg --compare-versions "$found_version" '>>' "$max_upgradeable_version" \ - && dpkg --compare-versions "$found_version" '<<' "10.0" + && dpkg --compare-versions "$found_version" '<<' "10.0" then downgrade_detected=true fi @@ -171,22 +171,22 @@ fi # creating mysql group if he isn't already there if ! getent group mysql >/dev/null; then - # Adding system group: mysql. - addgroup --system mysql >/dev/null + # Adding system group: mysql. + addgroup --system mysql >/dev/null fi # creating mysql user if he isn't already there if ! getent passwd mysql >/dev/null; then - # Adding system user: mysql. - adduser \ - --system \ - --disabled-login \ - --ingroup mysql \ - --no-create-home \ - --home /nonexistent \ - --gecos "MySQL Server" \ - --shell /bin/false \ - mysql >/dev/null + # Adding system user: mysql. + adduser \ + --system \ + --disabled-login \ + --ingroup mysql \ + --no-create-home \ + --home /nonexistent \ + --gecos "MySQL Server" \ + --shell /bin/false \ + mysql >/dev/null fi # end of NIS tolerance zone @@ -205,7 +205,7 @@ done # creating mysql home directory if [ ! -d $mysql_datadir ] && [ ! -L $mysql_datadir ]; then - # Use mkdir option 'Z' to create with correct SELinux context. + # Use mkdir option 'Z' to create with correct SELinux context. mkdir -Z $mysql_datadir fi diff --git a/debian/mariadb-server-10.6.prerm b/debian/mariadb-server-10.6.prerm index 8fd172da9d2..0325a80c9b6 100644 --- a/debian/mariadb-server-10.6.prerm +++ b/debian/mariadb-server-10.6.prerm @@ -5,8 +5,8 @@ set -e # Modified dh_systemd_start snippet that's not added automatically if [ -d /run/systemd/system ]; then - deb-systemd-invoke stop mariadb.service >/dev/null -# Modified dh_installinit snippet to only run with sysvinit + deb-systemd-invoke stop mariadb.service >/dev/null + # Modified dh_installinit snippet to only run with sysvinit elif [ -x "/etc/init.d/mariadb" ]; then - invoke-rc.d mariadb stop || exit $? + invoke-rc.d mariadb stop || exit $? fi From 8febdfa3428c59e93be6a2d3a437706cd0233e3f Mon Sep 17 00:00:00 2001 From: Tuukka Pasanen Date: Wed, 29 Mar 2023 12:42:12 +0300 Subject: [PATCH 04/76] MDEV-30952: Fix shellcheck problems on Debian scripts Commit fixes several ShellCheck found problems in Debian Pre- and Postscripts. Debian script mariadb-server-10.6.postrm contains shellcheck Fixed problems are: https://www.shellcheck.net/wiki/SC2166 -- Prefer [ p ] && [ q ] as [ p -a q... https://www.shellcheck.net/wiki/SC2006 -- Use $(...) notation instead of le... https://www.shellcheck.net/wiki/SC1091 -- Not following: /usr/share/debconf... Debian script mariadb-server-10.6.postinst contains shellcheck Fixed problems are: https://www.shellcheck.net/wiki/SC2129 -- Consider using { cmd1; cmd2; } >>... https://www.shellcheck.net/wiki/SC1091 -- Not following: /usr/share/debconf... https://www.shellcheck.net/wiki/SC1072 -- Expected test to end here (don't ... https://www.shellcheck.net/wiki/SC1073 -- Couldn't parse this test expressi... https://www.shellcheck.net/wiki/SC1009 -- The mentioned syntax error was in... Debian script mariadb-server-10.6.preinst contains shellcheck Fixed problems are: https://www.shellcheck.net/wiki/SC2231 -- Quote expansions in this for loop... https://www.shellcheck.net/wiki/SC2166 -- Prefer [ p ] && [ q ] as [ p -a q... https://www.shellcheck.net/wiki/SC2001 -- See if you can use ${variable//se... https://www.shellcheck.net/wiki/SC1091 -- Not following: /usr/share/debconf... https://www.shellcheck.net/wiki/SC1007 -- Remove space after = if trying to... --- debian/mariadb-server-10.6.postinst | 37 ++++++++++++++++------------- debian/mariadb-server-10.6.postrm | 5 ++-- debian/mariadb-server-10.6.preinst | 9 +++---- 3 files changed, 29 insertions(+), 22 deletions(-) diff --git a/debian/mariadb-server-10.6.postinst b/debian/mariadb-server-10.6.postinst index 1a83f38dd25..a994209ce11 100644 --- a/debian/mariadb-server-10.6.postinst +++ b/debian/mariadb-server-10.6.postinst @@ -1,6 +1,7 @@ #!/bin/bash set -e +# shellcheck source=/dev/null . /usr/share/debconf/confmodule # Automatically set version to ease maintenance of this file @@ -112,8 +113,10 @@ EOF # This direct update is needed to enable an authentication mechanism to # perform mariadb-upgrade, (MDEV-22678). To keep the impact minimal, we # skip innodb and set key-buffer-size to 0 as it isn't reused. - if [ -f "$mysql_datadir"/auto.cnf ] && [ -f "$mysql_datadir"/mysql/user.MYD ] && - [ ! lsof -nt "$mysql_datadir"/mysql/user.MYD > /dev/null ] && [ ! -f "$mysql_datadir"/undo_001 ]; then + lsof -nt "$mysql_datadir"/mysql/user.MYD > /dev/null + lsof_rtn_code=$? + if [ -f "$mysql_datadir/auto.cnf" ] && [ -f "$mysql_datadir/mysql/user.MYD" ] && + [ ! ${lsof_rtn_code} ] && [ ! -f "$mysql_datadir/undo_001" ]; then echo "UPDATE mysql.user SET plugin='unix_socket' WHERE plugin='auth_socket';" | mariadbd --skip-innodb --key_buffer_size=0 --default-storage-engine=MyISAM --bootstrap 2> /dev/null fi @@ -189,20 +192,22 @@ EOF fi if [ ! -e "$dc" ]; then cat /dev/null > $dc - echo "# THIS FILE IS OBSOLETE. STOP USING IT IF POSSIBLE." >>$dc - echo "# This file exists only for backwards compatibility for" >>$dc - echo "# tools that run '--defaults-file=/etc/mysql/debian.cnf'" >>$dc - echo "# and have root level access to the local filesystem." >>$dc - echo "# With those permissions one can run 'mariadb' directly" >>$dc - echo "# anyway thanks to unix socket authentication and hence" >>$dc - echo "# this file is useless. See package README for more info." >>$dc - echo "[client]" >>$dc - echo "host = localhost" >>$dc - echo "user = root" >>$dc - echo "[mysql_upgrade]" >>$dc - echo "host = localhost" >>$dc - echo "user = root" >>$dc - echo "# THIS FILE WILL BE REMOVED IN A FUTURE DEBIAN RELEASE." >>$dc + { + echo "# THIS FILE IS OBSOLETE. STOP USING IT IF POSSIBLE."; + echo "# This file exists only for backwards compatibility for"; + echo "# tools that run '--defaults-file=/etc/mysql/debian.cnf'"; + echo "# and have root level access to the local filesystem."; + echo "# With those permissions one can run 'mariadb' directly"; + echo "# anyway thanks to unix socket authentication and hence"; + echo "# this file is useless. See package README for more info."; + echo "[client]"; + echo "host = localhost"; + echo "user = root"; + echo "[mysql_upgrade]"; + echo "host = localhost"; + echo "user = root"; + echo "# THIS FILE WILL BE REMOVED IN A FUTURE DEBIAN RELEASE."; + } >> $dc fi # Keep it only root-readable, as it always was chown 0:0 $dc diff --git a/debian/mariadb-server-10.6.postrm b/debian/mariadb-server-10.6.postrm index 82fffef4eae..0c847f19684 100644 --- a/debian/mariadb-server-10.6.postrm +++ b/debian/mariadb-server-10.6.postrm @@ -1,6 +1,7 @@ #!/bin/bash set -e +# shellcheck source=/dev/null . /usr/share/debconf/confmodule # Automatically set version to ease maintenance of this file @@ -26,7 +27,7 @@ stop_server() { set -e # systemctl could emit exit code 100=no init script (fresh install) - if [ "$errno" != 0 -a "$errno" != 100 ]; then + if [ "$errno" != 0 ] && [ "$errno" != 100 ]; then echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 echo "Stop it yourself and try again!" 1>&2 @@ -38,7 +39,7 @@ stop_server() { case "$1" in purge|remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) - if [ -n "`$MYADMIN ping 2>/dev/null`" ]; then + if [ -n "$($MYADMIN ping 2>/dev/null)" ]; then stop_server sleep 2 fi diff --git a/debian/mariadb-server-10.6.preinst b/debian/mariadb-server-10.6.preinst index c203e6f0ade..e8f9953da7c 100644 --- a/debian/mariadb-server-10.6.preinst +++ b/debian/mariadb-server-10.6.preinst @@ -7,6 +7,7 @@ # * abort-upgrade # +# shellcheck source=/dev/null . /usr/share/debconf/confmodule # Automatically set version to ease maintenance of this file @@ -40,7 +41,7 @@ stop_server() { set -e # systemctl could emit exit code 100=no init script (fresh install) - if [ "$errno" != 0 -a "$errno" != 100 ]; then + if [ "$errno" != 0 ] && [ "$errno" != 100 ]; then echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 echo "Stop it yourself and try again!" 1>&2 @@ -57,7 +58,7 @@ max_upgradeable_version=5.7 # Check if a flag file is found that indicates a previous MariaDB or MySQL # version was installed. If multiple flags are found, check which one was # the biggest version number. -for flag in $mysql_datadir/debian-*.flag +for flag in "$mysql_datadir"/debian-*.flag do # The for loop leaves $flag as the query string if there are no results, @@ -92,7 +93,7 @@ done # Downgrade is detected if the flag version is bigger than $this_version # (e.g. 10.1 > 10.0) or the flag version is smaller than 10.0 but bigger # than $max_upgradeable_version. -if [ ! -z "$found_version" ] +if [ -n "$found_version" ] then # MySQL 8.0 in Ubuntu has a bug in packaging and the file is name wrongly @@ -134,7 +135,7 @@ fi # Don't abort dpkg if downgrade is detected (as was done previously). # Instead simply move the old datadir and create a new for this_version. -if [ ! -z "$downgrade_detected" ] +if [ -n "$downgrade_detected" ] then db_input critical "mariadb-server-$MAJOR_VER/old_data_directory_saved" || true db_go From 50cdf0b5ea8aa4b2b3f20ed8285debbff655cab9 Mon Sep 17 00:00:00 2001 From: Tuukka Pasanen Date: Thu, 30 Mar 2023 13:58:54 +0300 Subject: [PATCH 05/76] MDEV-30952: Reformat Debian pre- and postscripts if-clauses Debian install scripts if-clauses are not formatted as they should be. This commit formats Debian Pre and Post script if-clauses. --- debian/mariadb-server-10.6.postinst | 74 ++++++++++++++++++++--------- debian/mariadb-server-10.6.postrm | 27 ++++++++--- debian/mariadb-server-10.6.preinst | 36 +++++++++----- debian/mariadb-server-10.6.prerm | 6 ++- 4 files changed, 101 insertions(+), 42 deletions(-) diff --git a/debian/mariadb-server-10.6.postinst b/debian/mariadb-server-10.6.postinst index a994209ce11..eb0423c3bc7 100644 --- a/debian/mariadb-server-10.6.postinst +++ b/debian/mariadb-server-10.6.postinst @@ -7,7 +7,12 @@ set -e # Automatically set version to ease maintenance of this file MAJOR_VER="${DPKG_MAINTSCRIPT_PACKAGE#mariadb-server-}" -if [ -n "$DEBIAN_SCRIPT_DEBUG" ]; then set -v -x; DEBIAN_SCRIPT_TRACE=1; fi +if [ -n "$DEBIAN_SCRIPT_DEBUG" ] +then + set -v -x + DEBIAN_SCRIPT_TRACE=1 +fi + ${DEBIAN_SCRIPT_TRACE:+ echo "#42#DEBUG# RUNNING $0 $*" 1>&2 } export PATH=$PATH:/sbin:/usr/sbin:/bin:/usr/bin @@ -25,7 +30,9 @@ case "$1" in # and because changed configuration options should take effect immediately. # In case the server wasn't running at all it should be ok if the stop # script fails. I can't tell at this point because of the cleaned /run. - set +e; invoke-rc.d mariadb stop; set -e + set +e + invoke-rc.d mariadb stop + set -e # An existing /etc/init.d/mysql might be on the system if there was a # previous MySQL or MariaDB installation, since /etc/init.d files are @@ -65,21 +72,26 @@ case "$1" in # If the following symlink exists, it is a preserved copy the old data dir # created by the preinst script during a upgrade that would have otherwise # been replaced by an empty mysql dir. This should restore it. - for dir in DATADIR LOGDIR; do + for dir in DATADIR LOGDIR + do - if [ "$dir" = "DATADIR" ]; then + if [ "$dir" = "DATADIR" ] + then targetdir=$mysql_datadir else targetdir=$mysql_logdir fi savelink="$mysql_upgradedir/$dir.link" - if [ -L "$savelink" ]; then + if [ -L "$savelink" ] + then # If the targetdir was a symlink before we upgraded it is supposed # to be either still be present or not existing anymore now. - if [ -L "$targetdir" ]; then + if [ -L "$targetdir" ] + then rm "$savelink" - elif [ ! -d "$targetdir" ]; then + elif [ ! -d "$targetdir" ] + then mv "$savelink" "$targetdir" else # this should never even happen, but just in case... @@ -116,16 +128,26 @@ EOF lsof -nt "$mysql_datadir"/mysql/user.MYD > /dev/null lsof_rtn_code=$? if [ -f "$mysql_datadir/auto.cnf" ] && [ -f "$mysql_datadir/mysql/user.MYD" ] && - [ ! ${lsof_rtn_code} ] && [ ! -f "$mysql_datadir/undo_001" ]; then + [ ! ${lsof_rtn_code} ] && [ ! -f "$mysql_datadir/undo_001" ] + then echo "UPDATE mysql.user SET plugin='unix_socket' WHERE plugin='auth_socket';" | mariadbd --skip-innodb --key_buffer_size=0 --default-storage-engine=MyISAM --bootstrap 2> /dev/null fi # Ensure the existence and right permissions for the database and # log files. Use mkdir option 'Z' to create with correct SELinux context. - if [ ! -d "$mysql_statedir" ] && [ ! -L "$mysql_statedir" ]; then mkdir -Z "$mysql_statedir"; fi - if [ ! -d "$mysql_datadir" ] && [ ! -L "$mysql_datadir" ]; then mkdir -Z "$mysql_datadir" ; fi - if [ ! -d "$mysql_logdir" ] && [ ! -L "$mysql_logdir" ]; then mkdir -Z "$mysql_logdir" ; fi + if [ ! -d "$mysql_statedir" ] && [ ! -L "$mysql_statedir" ] + then + mkdir -Z "$mysql_statedir" + fi + if [ ! -d "$mysql_datadir" ] && [ ! -L "$mysql_datadir" ] + then + mkdir -Z "$mysql_datadir" + fi + if [ ! -d "$mysql_logdir" ] && [ ! -L "$mysql_logdir" ] + then + mkdir -Z "$mysql_logdir" + fi # When creating an ext3 jounal on an already mounted filesystem like e.g. # /var/lib/mysql, you get a .journal file that is not modifiable by chown. # The mysql_statedir must not be writable by the mysql user under any @@ -186,11 +208,13 @@ EOF # --defaults-file option for tools (for the sake of upgrades) # and thus need /etc/mysql/debian.cnf to exist, even if it's empty. # In the long run the goal is to obsolete this file. - dc=$mysql_cfgdir/debian.cnf; - if [ ! -d "$mysql_cfgdir" ]; then + dc="$mysql_cfgdir/debian.cnf" + if [ ! -d "$mysql_cfgdir" ] + then install -o 0 -g 0 -m 0755 -d $mysql_cfgdir fi - if [ ! -e "$dc" ]; then + if [ ! -e "$dc" ] + then cat /dev/null > $dc { echo "# THIS FILE IS OBSOLETE. STOP USING IT IF POSSIBLE."; @@ -220,8 +244,10 @@ EOF # on by default) to work both to disable a default profile, and to keep # any profile installed and maintained by users themselves. profile="/etc/apparmor.d/usr.sbin.mariadbd" - if [ -f "$profile" ] && aa-status --enabled 2>/dev/null; then - if grep -q /usr/sbin/mariadbd "$profile" 2>/dev/null ; then + if [ -f "$profile" ] && aa-status --enabled 2>/dev/null + then + if grep -q /usr/sbin/mariadbd "$profile" 2>/dev/null + then apparmor_parser -r "$profile" || true else echo "/usr/sbin/mariadbd { }" | apparmor_parser --remove 2>/dev/null || true @@ -233,14 +259,14 @@ EOF # Note that file cannot be empty, otherwise systemd version in Ubuntu Bionic # will think the service is masked echo "# empty placeholder" > /etc/systemd/system/mariadb.service.d/migrated-from-my.cnf-settings.conf - ;; abort-upgrade|abort-remove|abort-configure) ;; triggered) - if [ -d /run/systemd/system ]; then + if [ -d /run/systemd/system ] + then systemctl --system daemon-reload else invoke-rc.d mariadb restart @@ -260,19 +286,23 @@ db_stop # in case invoke fails # systemctl. If we upgrade from MySQL mysql.service may be masked, which also # means init.d script is disabled. Unmask mysql service explicitly. # Check first that the command exists, to avoid emitting any warning messages. -if [ -x "$(command -v deb-systemd-helper)" ]; then +if [ -x "$(command -v deb-systemd-helper)" ] +then deb-systemd-helper unmask mysql.service > /dev/null fi #DEBHELPER# # Modified dh_systemd_start snippet that's not added automatically -if [ -d /run/systemd/system ]; then +if [ -d /run/systemd/system ] +then systemctl --system daemon-reload >/dev/null || true deb-systemd-invoke start mariadb.service >/dev/null || true # Modified dh_installinit snippet to only run with sysvinit -elif [ -x "/etc/init.d/mariadb" ]; then - if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ]; then +elif [ -x "/etc/init.d/mariadb" ] +then + if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ] + then invoke-rc.d mariadb start || exit $? fi fi diff --git a/debian/mariadb-server-10.6.postrm b/debian/mariadb-server-10.6.postrm index 0c847f19684..41ed3ffe979 100644 --- a/debian/mariadb-server-10.6.postrm +++ b/debian/mariadb-server-10.6.postrm @@ -7,7 +7,12 @@ set -e # Automatically set version to ease maintenance of this file MAJOR_VER="${DPKG_MAINTSCRIPT_PACKAGE#mariadb-server-}" -if [ -n "$DEBIAN_SCRIPT_DEBUG" ]; then set -v -x; DEBIAN_SCRIPT_TRACE=1; fi +if [ -n "$DEBIAN_SCRIPT_DEBUG" ] +then + set -v -x + DEBIAN_SCRIPT_TRACE=1 +fi + ${DEBIAN_SCRIPT_TRACE:+ echo "#42#DEBUG# RUNNING $0 $*" 1>&2 } MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf" @@ -18,7 +23,10 @@ MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf" stop_server() { # Return immediately if there are no mysqld processes running # as there is no point in trying to shutdown in that case. - if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null; then return; fi + if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null + then + return + fi set +e invoke-rc.d mariadb stop @@ -27,7 +35,8 @@ stop_server() { set -e # systemctl could emit exit code 100=no init script (fresh install) - if [ "$errno" != 0 ] && [ "$errno" != 100 ]; then + if [ "$errno" != 0 ] && [ "$errno" != 100 ] + then echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 echo "Stop it yourself and try again!" 1>&2 @@ -39,7 +48,8 @@ stop_server() { case "$1" in purge|remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) - if [ -n "$($MYADMIN ping 2>/dev/null)" ]; then + if [ -n "$($MYADMIN ping 2>/dev/null)" ] + then stop_server sleep 2 fi @@ -55,7 +65,8 @@ esac # - Remove the mysql user only after all his owned files are purged. # - Cleanup the initscripts only if this was the last provider of them # -if [ "$1" = "purge" ] && [ -f "/var/lib/mysql/debian-$MAJOR_VER.flag" ]; then +if [ "$1" = "purge" ] && [ -f "/var/lib/mysql/debian-$MAJOR_VER.flag" ] +then # we remove the mysql user only after all his owned files are purged rm -f /var/log/mysql.{log,err}{,.0,.[1234567].gz} rm -rf /var/log/mysql @@ -63,7 +74,8 @@ if [ "$1" = "purge" ] && [ -f "/var/lib/mysql/debian-$MAJOR_VER.flag" ]; then db_input high "mariadb-server-$MAJOR_VER/postrm_remove_databases" || true db_go || true db_get "mariadb-server-$MAJOR_VER/postrm_remove_databases" || true - if [ "$RET" = "true" ]; then + if [ "$RET" = "true" ] + then # never remove the debian.cnf when the databases are still existing # else we ran into big trouble on the next install! rm -f /etc/mysql/debian.cnf @@ -93,6 +105,7 @@ fi #DEBHELPER# # Modified dh_systemd_start snippet that's not added automatically -if [ -d /run/systemd/system ]; then +if [ -d /run/systemd/system ] +then systemctl --system daemon-reload >/dev/null || true fi diff --git a/debian/mariadb-server-10.6.preinst b/debian/mariadb-server-10.6.preinst index e8f9953da7c..9a31b5c636e 100644 --- a/debian/mariadb-server-10.6.preinst +++ b/debian/mariadb-server-10.6.preinst @@ -14,11 +14,16 @@ MAJOR_VER="${DPKG_MAINTSCRIPT_PACKAGE#mariadb-server-}" # Just kill the invalid insserv.conf.d directory without fallback -if [ -d "/etc/insserv.conf.d/mariadb/" ]; then +if [ -d "/etc/insserv.conf.d/mariadb/" ] +then rm -rf "/etc/insserv.conf.d/mariadb/" fi -if [ -n "$DEBIAN_SCRIPT_DEBUG" ]; then set -v -x; DEBIAN_SCRIPT_TRACE=1; fi +if [ -n "$DEBIAN_SCRIPT_DEBUG" ] +then + set -v -x + DEBIAN_SCRIPT_TRACE=1 +fi ${DEBIAN_SCRIPT_TRACE:+ echo "#42#DEBUG# RUNNING $0 $*" 1>&2 } export PATH=$PATH:/sbin:/usr/sbin:/bin:/usr/bin @@ -32,7 +37,10 @@ stop_server() { # Return immediately if there are no mysqld processes running on a host # (leave containerized processes with the same name in other namespaces) # as there is no point in trying to shutdown in that case. - if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null; then return; fi + if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null + then + return + fi set +e invoke-rc.d mariadb stop @@ -41,7 +49,8 @@ stop_server() { set -e # systemctl could emit exit code 100=no init script (fresh install) - if [ "$errno" != 0 ] && [ "$errno" != 100 ]; then + if [ "$errno" != 0 ] && [ "$errno" != 100 ] + then echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 echo "Stop it yourself and try again!" 1>&2 @@ -156,7 +165,8 @@ stop_server # If we use NIS then errors should be tolerated. It's up to the # user to ensure that the mysql user is correctly setup. # Beware that there are two ypwhich one of them needs the 2>/dev/null! -if test -n "$(which ypwhich 2>/dev/null)" && ypwhich >/dev/null 2>&1; then +if test -n "$(which ypwhich 2>/dev/null)" && ypwhich >/dev/null 2>&1 +then set +e fi @@ -171,13 +181,15 @@ fi # # creating mysql group if he isn't already there -if ! getent group mysql >/dev/null; then +if ! getent group mysql >/dev/null +then # Adding system group: mysql. addgroup --system mysql >/dev/null fi # creating mysql user if he isn't already there -if ! getent passwd mysql >/dev/null; then +if ! getent passwd mysql >/dev/null +then # Adding system user: mysql. adduser \ --system \ @@ -195,7 +207,8 @@ set -e # if there's a symlink, let's store where it's pointing, because otherwise # it's going to be lost in some situations -for dir in DATADIR LOGDIR; do +for dir in DATADIR LOGDIR +do checkdir=$(eval echo "$"$dir) if [ -L "$checkdir" ]; then # Use mkdir option 'Z' to create with correct SELinux context. @@ -205,7 +218,8 @@ for dir in DATADIR LOGDIR; do done # creating mysql home directory -if [ ! -d $mysql_datadir ] && [ ! -L $mysql_datadir ]; then +if [ ! -d $mysql_datadir ] && [ ! -L $mysql_datadir ] +then # Use mkdir option 'Z' to create with correct SELinux context. mkdir -Z $mysql_datadir fi @@ -213,7 +227,8 @@ fi # As preset blocksize of GNU df is 1024 then available bytes is $df_available_blocks * 1024 # 4096 blocks is then lower than 4 MB df_available_blocks=`LC_ALL=C BLOCKSIZE= df --output=avail "$datadir" | tail -n 1` -if [ "$df_available_blocks" -lt "4096" ]; then +if [ "$df_available_blocks" -lt "4096" ] +then echo "ERROR: There's not enough space in $mysql_datadir/" 1>&2 db_stop exit 1 @@ -232,7 +247,6 @@ find $mysql_datadir -follow -not -group mysql -print0 2>/dev/null \ | xargs -0 --no-run-if-empty chgrp mysql set -e - db_stop #DEBHELPER# diff --git a/debian/mariadb-server-10.6.prerm b/debian/mariadb-server-10.6.prerm index 0325a80c9b6..2701fa5fc36 100644 --- a/debian/mariadb-server-10.6.prerm +++ b/debian/mariadb-server-10.6.prerm @@ -4,9 +4,11 @@ set -e #DEBHELPER# # Modified dh_systemd_start snippet that's not added automatically -if [ -d /run/systemd/system ]; then +if [ -d /run/systemd/system ] +then deb-systemd-invoke stop mariadb.service >/dev/null # Modified dh_installinit snippet to only run with sysvinit -elif [ -x "/etc/init.d/mariadb" ]; then +elif [ -x "/etc/init.d/mariadb" ] +then invoke-rc.d mariadb stop || exit $? fi From de8567559ed6bcd9c74afa86d3003f14bcb67d39 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Tue, 2 May 2023 18:09:34 +1000 Subject: [PATCH 06/76] deb: autobake - add mantic lunar and manic don't need lsb-base any more. Add a consistent style of fall though, and add_lsb_base_depends is called once on the last fall through value. Remove impish (EOL). --- debian/autobake-deb.sh | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index ff1ae1544d5..0454348e1c1 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -115,7 +115,6 @@ case "${LSBNAME}" in # Debian "buster") - add_lsb_base_depends disable_libfmt replace_uring_with_aio if [ ! "$architecture" = amd64 ] @@ -123,11 +122,10 @@ in disable_pmem fi ;& - "bullseye"|"bookworm") - if [[ "${LSBNAME}" == "bullseye" ]] - then - add_lsb_base_depends - fi + "bullseye") + add_lsb_base_depends + ;& + "bookworm") # mariadb-plugin-rocksdb in control is 4 arches covered by the distro rocksdb-tools # so no removal is necessary. if [[ ! "$architecture" =~ amd64|arm64|ppc64el ]] @@ -145,17 +143,17 @@ in ;; # Ubuntu "bionic") - add_lsb_base_depends remove_rocksdb_tools [ "$architecture" != amd64 ] && disable_pmem ;& "focal") - add_lsb_base_depends replace_uring_with_aio disable_libfmt ;& - "impish"|"jammy"|"kinetic"|"lunar") + "jammy"|"kinetic") add_lsb_base_depends + ;& + "lunar"|"mantic") # mariadb-plugin-rocksdb s390x not supported by us (yet) # ubuntu doesn't support mips64el yet, so keep this just # in case something changes. From 2740b657ced466c9f6945d59b78915ca6000e890 Mon Sep 17 00:00:00 2001 From: Tuukka Pasanen Date: Mon, 8 May 2023 11:35:32 +0300 Subject: [PATCH 07/76] MDEV-31216: Make sure that lsof does not fail on install Command lsof can fail on Debian install. Revert logic more like old one to make sure that there is no failing and still does don't boundce on shellcheck. --- debian/mariadb-server-10.6.postinst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/debian/mariadb-server-10.6.postinst b/debian/mariadb-server-10.6.postinst index eb0423c3bc7..b87ce856313 100644 --- a/debian/mariadb-server-10.6.postinst +++ b/debian/mariadb-server-10.6.postinst @@ -125,10 +125,10 @@ EOF # This direct update is needed to enable an authentication mechanism to # perform mariadb-upgrade, (MDEV-22678). To keep the impact minimal, we # skip innodb and set key-buffer-size to 0 as it isn't reused. - lsof -nt "$mysql_datadir"/mysql/user.MYD > /dev/null - lsof_rtn_code=$? - if [ -f "$mysql_datadir/auto.cnf" ] && [ -f "$mysql_datadir/mysql/user.MYD" ] && - [ ! ${lsof_rtn_code} ] && [ ! -f "$mysql_datadir/undo_001" ] + if [ -f "$mysql_datadir/auto.cnf" ] && + [ -f "$mysql_datadir/mysql/user.MYD" ] && + ! lsof -nt "$mysql_datadir"/mysql/user.MYD > /dev/null && + [ ! -f "$mysql_datadir/undo_001" ] then echo "UPDATE mysql.user SET plugin='unix_socket' WHERE plugin='auth_socket';" | mariadbd --skip-innodb --key_buffer_size=0 --default-storage-engine=MyISAM --bootstrap 2> /dev/null From 0d8b0493ee2458eebd6110995c46141b7c7978c0 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 10 May 2023 08:43:49 -0400 Subject: [PATCH 08/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 4a712ac1045..2806542e3ac 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=5 -MYSQL_VERSION_PATCH=20 +MYSQL_VERSION_PATCH=21 SERVER_MATURITY=stable From afe44ef2126d74c11407266cf7d0225ac0392e61 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 10 May 2023 08:45:08 -0400 Subject: [PATCH 09/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 017d7bd5ba5..b7bb36d6024 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=6 -MYSQL_VERSION_PATCH=13 +MYSQL_VERSION_PATCH=14 SERVER_MATURITY=stable From 56aa73a3e370291365ee1f0db299961f163a9717 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 10 May 2023 08:46:44 -0400 Subject: [PATCH 10/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index d2c4c84b782..47b4b206e8d 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=9 -MYSQL_VERSION_PATCH=6 +MYSQL_VERSION_PATCH=7 SERVER_MATURITY=stable From 07abe20bbbc4a2353a5f2cd5af72540690b49c8f Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 10 May 2023 08:47:29 -0400 Subject: [PATCH 11/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 96a9a52bfd8..2c5ca290588 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=10 -MYSQL_VERSION_PATCH=4 +MYSQL_VERSION_PATCH=5 SERVER_MATURITY=stable From 40a857c9081adcc5946e5bb6cd3c7ce6b9889db7 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 10 May 2023 08:48:11 -0400 Subject: [PATCH 12/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index d26d4fbd86c..cd377cb282f 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=11 -MYSQL_VERSION_PATCH=3 +MYSQL_VERSION_PATCH=4 SERVER_MATURITY=stable From 38ed782f555025bd85047974483d1ac28d4213d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 11 May 2023 08:42:28 +0300 Subject: [PATCH 13/76] MDEV-30812 fixup: GCC 12.2.0 -Wmaybe-uninitialized best_access_path(): Simplify the logic for computing fanout. This fixes up commit 4329ec5d3b109cb0bcbee151b5800dc7b19d1945 --- sql/sql_select.cc | 35 ++++++++++++++++++----------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 31e3e16b397..b00c7de9cbc 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -8531,25 +8531,26 @@ best_access_path(JOIN *join, fanout= hash_join_fanout(join, s, remaining_tables, rnd_records, hj_start_key, &stats_found); join_sel= 1.0; // Don't do the "10% heuristic" - } - if (!stats_found) - { - /* - No OPTIMIZER_SWITCH_HASH_JOIN_CARDINALITY or no field statistics - found. - - Take into account if there is non constant constraints used with - earlier tables in the where expression. - If yes, this will set fanout to rnd_records/4. - We estimate that there will be HASH_FANOUT (10%) - hash matches / row. - */ - if (found_constraint && !force_estimate) - rnd_records= use_found_constraint(rnd_records); - fanout= rnd_records; - join_sel= 0.1; + if (stats_found) + goto fanout_computed; } + /* + No OPTIMIZER_SWITCH_HASH_JOIN_CARDINALITY or no field statistics + found. + + Take into account if there is non constant constraints used with + earlier tables in the where expression. + If yes, this will set fanout to rnd_records/4. + We estimate that there will be HASH_FANOUT (10%) + hash matches / row. + */ + if (found_constraint && !force_estimate) + rnd_records= use_found_constraint(rnd_records); + fanout= rnd_records; + join_sel= 0.1; + + fanout_computed: tmp= s->quick ? s->quick->read_time : s->scan_time(); double cmp_time= (s->records - rnd_records)/TIME_FOR_COMPARE; tmp= COST_ADD(tmp, cmp_time); From 7124911a2c08beeb9b6aebb92eb2b6bedfabe413 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 11 May 2023 08:43:00 +0300 Subject: [PATCH 14/76] MDEV-31158: Potential hang with ROW_FORMAT=COMPRESSED tables btr_cur_need_opposite_intention(): Check also page_zip_available() so that we will escalate to exclusive index latch when a non-leaf page may have to be split further due to ROW_FORMAT=COMPRESSED page overflow. Tested by: Matthias Leich --- .../suite/innodb_zip/r/page_size.result | 21 +++++++++++++++ mysql-test/suite/innodb_zip/t/page_size.test | 24 +++++++++++++++++ storage/innobase/btr/btr0cur.cc | 27 ++++++++++++------- 3 files changed, 63 insertions(+), 9 deletions(-) diff --git a/mysql-test/suite/innodb_zip/r/page_size.result b/mysql-test/suite/innodb_zip/r/page_size.result index 47effe06884..48b954c945b 100644 --- a/mysql-test/suite/innodb_zip/r/page_size.result +++ b/mysql-test/suite/innodb_zip/r/page_size.result @@ -608,4 +608,25 @@ SET GLOBAL innodb_compression_level=0; INSERT INTO t1 VALUES (''); SET GLOBAL innodb_compression_level= @save_innodb_compression_level; DROP TABLE t1; +# +# MDEV-31158 Assertion ...MTR_MEMO_X_LOCKED in btr_attach_half_pages() +# +SET @save_compression_level=@@GLOBAL.innodb_compression_level; +SET GLOBAL innodb_compression_level=0; +CREATE TEMPORARY TABLE t(a SERIAL, prefix VARBINARY(4), pad INT); +INSERT INTO t(prefix, pad) VALUES +(_binary 0xff,160),('',19),(_binary 0x0001,253),(_binary 0x0b11,169), +(_binary 0x0b010001,23),(_binary 0x0b100001,251),(_binary 0x0d,163), +(_binary 0xb3,254),(_binary 0x96,254),(_binary 0xeb,61), +(_binary 0xf231,253),(_binary 0x1db0,253),(_binary 0x0005,101), +(_binary 0x6370,253),(_binary 0x0b12,112),(_binary 0x0b010002,23), +(_binary 0x0b100002,80),(_binary 0x181984,163),(_binary 0x181926,168), +(_binary 0xe1,176),(_binary 0xe2,187),(_binary 0xe6,254),(_binary 0xbb,51), +(_binary 0x1c,248),(_binary 0x8a,94),(_binary 0x14,254); +CREATE TABLE u(a SMALLINT UNSIGNED PRIMARY KEY AUTO_INCREMENT, +b VARBINARY(255), KEY(b)) ENGINE=InnoDB +KEY_BLOCK_SIZE=1 ROW_FORMAT=COMPRESSED; +INSERT INTO u SELECT a,CONCAT(prefix,REPEAT(chr(0),pad)) FROM t; +DROP TABLE u, t; +SET GLOBAL innodb_compression_level=@save_compression_level; # End of 10.6 tests diff --git a/mysql-test/suite/innodb_zip/t/page_size.test b/mysql-test/suite/innodb_zip/t/page_size.test index 16d65a139cf..3455ef8ed94 100644 --- a/mysql-test/suite/innodb_zip/t/page_size.test +++ b/mysql-test/suite/innodb_zip/t/page_size.test @@ -888,4 +888,28 @@ INSERT INTO t1 VALUES (''); SET GLOBAL innodb_compression_level= @save_innodb_compression_level; DROP TABLE t1; +--echo # +--echo # MDEV-31158 Assertion ...MTR_MEMO_X_LOCKED in btr_attach_half_pages() +--echo # +--source include/have_innodb.inc + +SET @save_compression_level=@@GLOBAL.innodb_compression_level; +SET GLOBAL innodb_compression_level=0; +CREATE TEMPORARY TABLE t(a SERIAL, prefix VARBINARY(4), pad INT); +INSERT INTO t(prefix, pad) VALUES +(_binary 0xff,160),('',19),(_binary 0x0001,253),(_binary 0x0b11,169), +(_binary 0x0b010001,23),(_binary 0x0b100001,251),(_binary 0x0d,163), +(_binary 0xb3,254),(_binary 0x96,254),(_binary 0xeb,61), +(_binary 0xf231,253),(_binary 0x1db0,253),(_binary 0x0005,101), +(_binary 0x6370,253),(_binary 0x0b12,112),(_binary 0x0b010002,23), +(_binary 0x0b100002,80),(_binary 0x181984,163),(_binary 0x181926,168), +(_binary 0xe1,176),(_binary 0xe2,187),(_binary 0xe6,254),(_binary 0xbb,51), +(_binary 0x1c,248),(_binary 0x8a,94),(_binary 0x14,254); +CREATE TABLE u(a SMALLINT UNSIGNED PRIMARY KEY AUTO_INCREMENT, + b VARBINARY(255), KEY(b)) ENGINE=InnoDB + KEY_BLOCK_SIZE=1 ROW_FORMAT=COMPRESSED; +INSERT INTO u SELECT a,CONCAT(prefix,REPEAT(chr(0),pad)) FROM t; +DROP TABLE u, t; +SET GLOBAL innodb_compression_level=@save_compression_level; + --echo # End of 10.6 tests diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 70b0ae4c32c..784e32a11c1 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -748,18 +748,24 @@ btr_cur_will_modify_tree( /** Detects whether the modifying record might need a opposite modification to the intention. -@param page page -@param lock_intention lock intention for the tree operation +@param bpage buffer pool page +@param is_clust whether this is a clustered index +@param lock_intention lock intention for the tree operation @param node_ptr_max_size the maximum size of a node pointer @param compress_limit BTR_CUR_PAGE_COMPRESS_LIMIT(index) -@param rec record (current node_ptr) -@return true if tree modification is needed */ -static bool btr_cur_need_opposite_intention(const page_t *page, +@param rec record (current node_ptr) +@return true if tree modification is needed */ +static bool btr_cur_need_opposite_intention(const buf_page_t &bpage, + bool is_clust, btr_intention_t lock_intention, ulint node_ptr_max_size, ulint compress_limit, const rec_t *rec) { + if (UNIV_LIKELY_NULL(bpage.zip.data) && + !page_zip_available(&bpage.zip, is_clust, node_ptr_max_size, 1)) + return true; + const page_t *const page= bpage.frame; if (lock_intention != BTR_INTENTION_INSERT) { /* We compensate also for btr_cur_compress_recommendation() */ @@ -1342,7 +1348,8 @@ release_tree: !btr_block_get(*index(), btr_page_get_next(block->page.frame), RW_X_LATCH, false, mtr, &err)) goto func_exit; - if (btr_cur_need_opposite_intention(block->page.frame, lock_intention, + if (btr_cur_need_opposite_intention(block->page, index()->is_clust(), + lock_intention, node_ptr_max_size, compress_limit, page_cur.rec)) goto need_opposite_intention; @@ -1398,7 +1405,8 @@ release_tree: default: break; case BTR_MODIFY_TREE: - if (btr_cur_need_opposite_intention(block->page.frame, lock_intention, + if (btr_cur_need_opposite_intention(block->page, index()->is_clust(), + lock_intention, node_ptr_max_size, compress_limit, page_cur.rec)) /* If the rec is the first or last in the page for pessimistic @@ -1948,7 +1956,7 @@ index_locked: break; if (!index->lock.have_x() && - btr_cur_need_opposite_intention(block->page.frame, + btr_cur_need_opposite_intention(block->page, index->is_clust(), lock_intention, node_ptr_max_size, compress_limit, page_cur.rec)) @@ -1995,7 +2003,8 @@ index_locked: ut_ad(latch_mode != BTR_MODIFY_TREE || upper_rw_latch == RW_X_LATCH); if (latch_mode != BTR_MODIFY_TREE); - else if (btr_cur_need_opposite_intention(block->page.frame, lock_intention, + else if (btr_cur_need_opposite_intention(block->page, index->is_clust(), + lock_intention, node_ptr_max_size, compress_limit, page_cur.rec)) { From 45a879f6cf1e485e7f4bd870b71cc3c77e65add8 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Wed, 10 May 2023 20:08:33 +0300 Subject: [PATCH 15/76] Fix ./mtr --view-protocol opt_trace Follow the approach taken in the rest of the test. --- mysql-test/main/opt_trace.test | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test index 3f1f1fd1204..5238af5c7a0 100644 --- a/mysql-test/main/opt_trace.test +++ b/mysql-test/main/opt_trace.test @@ -756,6 +756,11 @@ select * from from t10 left join t11 on t11.col1=t10.col1 group by grp_id) T on T.grp_id=t1.b; +# Not sure how MDEV-27871 is related but this test uses this reason +# all over the place: +#enable after fix MDEV-27871 +--disable_view_protocol + select json_detailed(json_extract(trace, '$**.check_split_materialized')) as JS from information_schema.optimizer_trace; @@ -768,6 +773,7 @@ select ) as JS from information_schema.optimizer_trace; +--enable_view_protocol drop table t1,t2,t3,t10,t11; set optimizer_trace=DEFAULT; From 279d0120f5769462c3418fb32387c3e3b7f0ce4b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 11 May 2023 13:21:57 +0300 Subject: [PATCH 16/76] MDEV-29967 innodb_read_ahead_threshold (linear read-ahead) does not work buf_read_ahead_linear(): Correct some calculations that were broken in commit b1ab211dee599eabd9a5b886fafa3adea29ae041 (MDEV-15053). Thanks to Daniel Black for providing a test case and initial debugging. Tested by: Matthias Leich --- storage/innobase/buf/buf0rea.cc | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index ff163f74b08..26a84939306 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -582,7 +582,7 @@ buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf) /* We will check that almost all pages in the area have been accessed in the desired order. */ - const bool descending= page_id == low; + const bool descending= page_id != low; if (!descending && page_id != high_1) /* This is not a border page of the area */ @@ -612,7 +612,7 @@ fail: uint32_t{buf_pool.read_ahead_area}); page_id_t new_low= low, new_high_1= high_1; unsigned prev_accessed= 0; - for (page_id_t i= low; i != high_1; ++i) + for (page_id_t i= low; i <= high_1; ++i) { const ulint fold= i.fold(); page_hash_latch *hash_lock= buf_pool.page_hash.lock(fold); @@ -647,12 +647,21 @@ hard_fail: if (prev == FIL_NULL || next == FIL_NULL) goto hard_fail; page_id_t id= page_id; - if (descending && next - 1 == page_id.page_no()) - id.set_page_no(prev); - else if (!descending && prev + 1 == page_id.page_no()) - id.set_page_no(next); + if (descending) + { + if (id == high_1) + ++id; + else if (next - 1 != page_id.page_no()) + goto hard_fail; + else + id.set_page_no(prev); + } else - goto hard_fail; /* Successor or predecessor not in the right order */ + { + if (prev + 1 != page_id.page_no()) + goto hard_fail; + id.set_page_no(next); + } new_low= id - (id.page_no() % buf_read_ahead_area); new_high_1= new_low + (buf_read_ahead_area - 1); @@ -693,7 +702,7 @@ failed: /* If we got this far, read-ahead can be sensible: do it */ count= 0; for (ulint ibuf_mode= ibuf ? BUF_READ_IBUF_PAGES_ONLY : BUF_READ_ANY_PAGE; - new_low != new_high_1; ++new_low) + new_low <= new_high_1; ++new_low) { if (ibuf_bitmap_page(new_low, zip_size)) continue; From 7c9f275ee4cd59212a85827626fbca2615d144d5 Mon Sep 17 00:00:00 2001 From: karmengc Date: Thu, 11 May 2023 10:01:35 +0100 Subject: [PATCH 17/76] server.cnf: adjust major version to 10.11 Bump section header to 10.11 at rpm file server.cnf --- support-files/rpm/server.cnf | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/support-files/rpm/server.cnf b/support-files/rpm/server.cnf index bf9ed9c4f46..1db6cd186e8 100644 --- a/support-files/rpm/server.cnf +++ b/support-files/rpm/server.cnf @@ -39,8 +39,8 @@ # you can put MariaDB-only options here [mariadb] -# This group is only read by MariaDB-10.8 servers. +# This group is only read by MariaDB-10.11 servers. # If you use the same .cnf file for MariaDB of different versions, # use this group for options that older servers don't understand -[mariadb-10.8] +[mariadb-10.11] From 477285c8ead70d78e69faf3052401e09a38238c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2023 14:57:14 +0300 Subject: [PATCH 18/76] MDEV-31253 Freed data pages are not always being scrubbed fil_space_t::flush_freed(): Renamed from buf_flush_freed_pages(); this is a backport of aa45850687409ba857eb3a0eb19e7ccc28dc02f7 from 10.6. Invoke log_write_up_to() on last_freed_lsn, instead of avoiding the operation when the log has not yet been written. A more costly alternative would be that log_checkpoint() would invoke this function on every affected tablespace. --- storage/innobase/buf/buf0flu.cc | 58 +++++++++++++++--------------- storage/innobase/buf/buf0rea.cc | 9 ++++- storage/innobase/include/fil0fil.h | 26 ++++++++------ 3 files changed, 54 insertions(+), 39 deletions(-) diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 36ea302a403..ce75f95602c 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -1042,63 +1042,65 @@ static page_id_t buf_flush_check_neighbors(const fil_space_t &space, return i; } -MY_ATTRIBUTE((nonnull, warn_unused_result)) -/** Write punch-hole or zeroes of the freed ranges when -innodb_immediate_scrub_data_uncompressed from the freed ranges. -@param space tablespace which may contain ranges of freed pages -@param writable whether the tablespace is writable +MY_ATTRIBUTE((warn_unused_result)) +/** Apply freed_ranges to the file. +@param writable whether the file is writable @return number of pages written or hole-punched */ -static uint32_t buf_flush_freed_pages(fil_space_t *space, bool writable) +uint32_t fil_space_t::flush_freed(bool writable) { - const bool punch_hole= space->punch_hole; if (!punch_hole && !srv_immediate_scrub_data_uncompressed) return 0; mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex); mysql_mutex_assert_not_owner(&buf_pool.mutex); - space->freed_range_mutex.lock(); - if (space->freed_ranges.empty() || - log_sys.get_flushed_lsn() < space->get_last_freed_lsn()) + for (;;) { - space->freed_range_mutex.unlock(); - return 0; + freed_range_mutex.lock(); + if (freed_ranges.empty()) + { + freed_range_mutex.unlock(); + return 0; + } + const lsn_t flush_lsn= last_freed_lsn; + if (log_sys.get_flushed_lsn() >= flush_lsn) + break; + freed_range_mutex.unlock(); + log_write_up_to(flush_lsn, true); } - const unsigned physical_size{space->physical_size()}; + const unsigned physical{physical_size()}; - range_set freed_ranges= std::move(space->freed_ranges); + range_set freed= std::move(freed_ranges); uint32_t written= 0; if (!writable); else if (punch_hole) { - for (const auto &range : freed_ranges) + for (const auto &range : freed) { written+= range.last - range.first + 1; - space->reacquire(); - space->io(IORequest(IORequest::PUNCH_RANGE), - os_offset_t{range.first} * physical_size, - (range.last - range.first + 1) * physical_size, - nullptr); + reacquire(); + io(IORequest(IORequest::PUNCH_RANGE), + os_offset_t{range.first} * physical, + (range.last - range.first + 1) * physical, nullptr); } } else { - for (const auto &range : freed_ranges) + for (const auto &range : freed) { written+= range.last - range.first + 1; for (os_offset_t i= range.first; i <= range.last; i++) { - space->reacquire(); - space->io(IORequest(IORequest::WRITE_ASYNC), - i * physical_size, physical_size, - const_cast(field_ref_zero)); + reacquire(); + io(IORequest(IORequest::WRITE_ASYNC), i * physical, physical, + const_cast(field_ref_zero)); } } } - space->freed_range_mutex.unlock(); + freed_range_mutex.unlock(); return written; } @@ -1225,7 +1227,7 @@ static ulint buf_free_from_unzip_LRU_list_batch(ulint max) static std::pair buf_flush_space(const uint32_t id) { if (fil_space_t *space= fil_space_t::get(id)) - return {space, buf_flush_freed_pages(space, true)}; + return {space, space->flush_freed(true)}; return {nullptr, 0}; } @@ -1617,7 +1619,7 @@ bool buf_flush_list_space(fil_space_t *space, ulint *n_flushed) bool acquired= space->acquire(); { - const uint32_t written{buf_flush_freed_pages(space, acquired)}; + const uint32_t written{space->flush_freed(acquired)}; mysql_mutex_lock(&buf_pool.mutex); if (written) buf_pool.stat.n_pages_written+= written; diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 26a84939306..97bfd6f64ec 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -728,6 +728,13 @@ failed: return count; } +/** @return whether a page has been freed */ +inline bool fil_space_t::is_freed(uint32_t page) +{ + std::lock_guard freed_lock(freed_range_mutex); + return freed_ranges.contains(page); +} + /** Issues read requests for pages which recovery wants to read in. @param[in] space_id tablespace id @param[in] page_nos array of page numbers to read, with the @@ -747,7 +754,7 @@ void buf_read_recv_pages(ulint space_id, const uint32_t* page_nos, ulint n) for (ulint i = 0; i < n; i++) { /* Ignore if the page already present in freed ranges. */ - if (space->freed_ranges.contains(page_nos[i])) { + if (space->is_freed(page_nos[i])) { continue; } diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index 385d547a060..10365d167b7 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -415,16 +415,16 @@ public: punch hole */ bool punch_hole; - /** mutex to protect freed ranges */ - std::mutex freed_range_mutex; +private: + /** mutex to protect freed_ranges and last_freed_lsn */ + std::mutex freed_range_mutex; - /** Variables to store freed ranges. This can be used to write - zeroes/punch the hole in files. Protected by freed_mutex */ - range_set freed_ranges; - - /** Stores last page freed lsn. Protected by freed_mutex */ - lsn_t last_freed_lsn; + /** Ranges of freed page numbers; protected by freed_range_mutex */ + range_set freed_ranges; + /** LSN of freeing last page; protected by freed_range_mutex */ + lsn_t last_freed_lsn; +public: ulint magic_n;/*!< FIL_SPACE_MAGIC_N */ /** @return whether doublewrite buffering is needed */ @@ -434,6 +434,14 @@ public: buf_dblwr.is_initialised(); } + /** @return whether a page has been freed */ + inline bool is_freed(uint32_t page); + + /** Apply freed_ranges to the file. + @param writable whether the file is writable + @return number of pages written or hole-punched */ + uint32_t flush_freed(bool writable); + /** Append a file to the chain of files of a space. @param[in] name file name of a file that is not open @param[in] handle file handle, or OS_FILE_CLOSED @@ -589,8 +597,6 @@ public: /** Close all tablespace files at shutdown */ static void close_all(); - /** @return last_freed_lsn */ - lsn_t get_last_freed_lsn() { return last_freed_lsn; } /** Update last_freed_lsn */ void update_last_freed_lsn(lsn_t lsn) { From c9eff1a144ba44846373660a30d342d3f0dc91a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 12 May 2023 15:04:50 +0300 Subject: [PATCH 19/76] MDEV-31254 InnoDB: Trying to read doublewrite buffer page buf_read_page_low(): Remove an error message and a debug assertion that can be triggered when using innodb_page_size=4k and innodb_file_per_table=0. In that case, buf_read_ahead_linear() may be invoked on page 255, which is one less than the first page of the doublewrite buffer (256). --- storage/innobase/buf/buf0rea.cc | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 97bfd6f64ec..822c424fe46 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -280,9 +280,7 @@ buf_read_page_low( *err = DB_SUCCESS; if (buf_dblwr.is_inside(page_id)) { - ib::error() << "Trying to read doublewrite buffer page " - << page_id; - ut_ad(0); + *err = DB_CORRUPTION; nothing_read: space->release(); return false; From f522b0f2303e5556e3fc98c50cdc7999f6f5834a Mon Sep 17 00:00:00 2001 From: Tuukka Pasanen Date: Wed, 10 May 2023 11:57:48 +0300 Subject: [PATCH 20/76] MDEV-30951: Fix small perlcritic and enable modern Perl Add Modern Perl headers. Perl 5.16 is still fairly old from 2012. Enable UTF-8, warnings and make script 'strict' Small fixes for perlcritic reported problems and some crashes I/O layer ":utf8" used at line 268, column 16. Use ":encoding(UTF-8)" to get strict validation. (Severity: 5) "return" statement with explicit "undef" at line 806, column 4. See page 199 of PBP. (Severity: 5) "return" statement with explicit "undef" at line 6844, column 4. See page 199 of PBP. (Severity: 5) "return" statement with explicit "undef" at line 7524, column 4. See page 199 of PBP. (Severity: 5) "return" statement with explicit "undef" at line 7527, column 4. See page 199 of PBP. (Severity: 5) "return" statement with explicit "undef" at line 7599, column 4. See page 199 of PBP. (Severity: 5) "return" statement with explicit "undef" at line 7602, column 4. See page 199 of PBP. (Severity: 5) Expression form of "eval" at line 7784, column 4. See page 161 of PBP. (Severity: 5) Expression form of "eval" at line 7806, column 4. See page 161 of PBP. (Severity: 5) Glob written as <...> at line 8016, column 25. See page 167 of PBP. (Severity: 5) "return" statement followed by "sort" at line 9195, column 60. Behavior is undefined if called in scalar context. (Severity: 5) Expression form of "eval" at line 9846, column 10. See page 161 of PBP. (Severity: 5) --- debian/additions/innotop/innotop | 35 ++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 11 deletions(-) mode change 100644 => 100755 debian/additions/innotop/innotop diff --git a/debian/additions/innotop/innotop b/debian/additions/innotop/innotop old mode 100644 new mode 100755 index 2bc090917fe..8e5a41768eb --- a/debian/additions/innotop/innotop +++ b/debian/additions/innotop/innotop @@ -20,6 +20,9 @@ # Street, Fifth Floor, Boston, MA 02110-1335 USA use strict; +use warnings; +use utf8; +use feature ':5.16'; use warnings FATAL => 'all'; our $VERSION = '1.11.4'; @@ -265,7 +268,7 @@ sub get_dbh { $dbh->do($sql); MKDEBUG && _d('Enabling charset for STDOUT'); if ( $charset eq 'utf8' ) { - binmode(STDOUT, ':utf8') + binmode(STDOUT, ':encoding(UTF-8)') or die "Can't binmode(STDOUT, ':utf8'): $OS_ERROR"; } else { @@ -612,6 +615,9 @@ sub ts_to_string { sub parse_innodb_timestamp { my $text = shift; + if ( ! defined $text ) { + return (0, 0, 0, 0, 0, 0); + } my ( $y, $m, $d, $h, $i, $s ) = $text =~ m/^(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)$/; die("Can't get timestamp from $text\n") unless $y; @@ -803,7 +809,8 @@ sub parse_fk_transaction_error { # TODO: write some tests for this sub parse_innodb_record_dump { my ( $dump, $complete, $debug ) = @_; - return undef unless $dump; + # Use bare return as recommend in page 199 of PBP + return unless $dump; my $result = {}; @@ -6769,6 +6776,9 @@ sub set_precision { my ( $num, $precision ) = @_; $num = 0 unless defined $num; $precision = $config{num_digits}->{val} if !defined $precision; + if ( $num eq "" ) { + $num = int(0); + } sprintf("%.${precision}f", $num); } @@ -6777,6 +6787,9 @@ sub set_precision { sub percent { my ( $num ) = @_; $num = 0 unless defined $num; + if ( $num eq "" ) { + $num = int(0); + } my $digits = $config{num_digits}->{val}; return sprintf("%.${digits}f", $num * 100) . ($config{show_percent}->{val} ? '%' : ''); @@ -6841,7 +6854,7 @@ sub make_color_func { push @criteria, "( defined \$set->{$spec->{col}} && \$set->{$spec->{col}} $spec->{op} $val ) { return '$spec->{color}'; }"; } - return undef unless @criteria; + return unless @criteria; my $sub = eval 'sub { my ( $set ) = @_; if ' . join(" elsif ", @criteria) . '}'; die if $EVAL_ERROR; return $sub; @@ -7521,10 +7534,10 @@ sub choose_connections { sub do_stmt { my ( $cxn, $stmt_name, @args ) = @_; - return undef if $file; + return if $file; # Test if the cxn should not even be tried - return undef if $dbhs{$cxn} + return if $dbhs{$cxn} && $dbhs{$cxn}->{failed} && ( !$dbhs{$cxn}->{dbh} || !$dbhs{$cxn}->{dbh}->{Active} || $dbhs{$cxn}->{mode} eq $config{mode}->{val} ); @@ -7596,10 +7609,10 @@ sub handle_cxn_error { sub do_query { my ( $cxn, $query ) = @_; - return undef if $file; + return if $file; # Test if the cxn should not even be tried - return undef if $dbhs{$cxn} + return if $dbhs{$cxn} && $dbhs{$cxn}->{failed} && ( !$dbhs{$cxn}->{dbh} || !$dbhs{$cxn}->{dbh}->{Active} || $dbhs{$cxn}->{mode} eq $config{mode}->{val} ); @@ -7781,7 +7794,7 @@ sub compile_select_stmt { sub compile_filter { my ( $text ) = @_; my ( $sub, $err ); - eval "\$sub = sub { my \$set = shift; $text }"; + eval { $sub = sub { my $set = shift; $text } }; if ( $EVAL_ERROR ) { $EVAL_ERROR =~ s/at \(eval.*$//; $sub = sub { return $EVAL_ERROR }; @@ -8013,7 +8026,7 @@ sub load_config_plugins { # First, find a list of all plugins that exist on disk, and get information about them. my $dir = $config{plugin_dir}->{val}; - foreach my $p_file ( <$dir/*.pm> ) { + foreach my $p_file (glob($dir."/*.pm")) { my ($package, $desc); eval { open my $p_in, "<", $p_file or die $OS_ERROR; @@ -9192,7 +9205,7 @@ sub switch_var_set { # edit_stmt_sleep_times {{{3 sub edit_stmt_sleep_times { $clear_screen_sub->(); - my $stmt = prompt_list('Specify a statement', '', sub { return sort keys %stmt_maker_for }); + my $stmt = prompt_list('Specify a statement', '', sub { my @tmparray = sort keys %stmt_maker_for; return @tmparray }); return unless $stmt && exists $stmt_maker_for{$stmt}; $clear_screen_sub->(); my $curr_val = $stmt_sleep_time_for{$stmt} || 0; @@ -9843,7 +9856,7 @@ sub get_slave_status { sub is_func { my ( $word ) = @_; return defined(&$word) - || eval "my \$x= sub { $word }; 1" + || eval { my $x = sub { $word }; 1 } || $EVAL_ERROR !~ m/^Bareword/; } From 3b34454c9dbc11c0090f323fe2b8ce21d12276a2 Mon Sep 17 00:00:00 2001 From: Rucha Deodhar Date: Mon, 3 Apr 2023 13:34:51 +0530 Subject: [PATCH 21/76] MDEV-23187: Assorted assertion failures in json_find_path with certain collations Analysis: When we have negative index, the value in array_counter[] array is going to be -1 at some point ( because in case of negative index in json path, the initial value for a path with negative index is -, and as we move forward in array while parsing it and finding path, this value increments). Since SKIPPED_STEP_MARK, is maximum uint value, it gets compared to some int value in the array and eventually equates to -1 and messes with path. Fix: Make SKIPPED_STEP_MARK maximum of INT32. --- mysql-test/main/func_json.result | 24 ++++++++++++++++++++++++ mysql-test/main/func_json.test | 26 ++++++++++++++++++++++++++ strings/json_lib.c | 2 +- 3 files changed, 51 insertions(+), 1 deletion(-) diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index 8cbc5305405..5af8e658d34 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -2578,5 +2578,29 @@ SELECT JSON_EXTRACT('{ "my-key": 1 }', '$.my-key'); JSON_EXTRACT('{ "my-key": 1 }', '$.my-key') 1 # +# MDEV-23187: Assorted assertion failures in json_find_path with certain collations +# +SET @save_collation_connection= @@collation_connection; +SET @json='{ "A": [ [{"k":"v"},[1]],true],"B": {"C": 1} }'; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); +JSON_VALUE(@json,'$.A[last-1][last-1].key1') +NULL +SET @json='{ "A": [ [{"k":"v"},[1]],true],"B": {"C": 1} }'; +SET collation_connection='ucs2_bin'; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); +JSON_VALUE(@json,'$.A[last-1][last-1].key1') +NULL +SET @json='{ "A": [ [{"k":"v"},[15]],true],"B": {"C": 1} }'; +SET sql_mode=0,character_set_connection=utf32; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); +JSON_VALUE(@json,'$.A[last-1][last-1].key1') +NULL +SET @json='{ "A": [ [{"k":"v"},[15]],true],"B": {"C": 1} }'; +SET sql_mode=0,character_set_connection=utf32; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); +JSON_VALUE(@json,'$.A[last-1][last-1].key1') +NULL +SET @@collation_connection= @save_collation_connection; +# # End of 10.9 Test # diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index 9bf0c9bae05..23a703ca716 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -1772,6 +1772,32 @@ DROP TABLE t1; SELECT JSON_EXTRACT('{ "my-key": 1 }', '$."my-key"'); SELECT JSON_EXTRACT('{ "my-key": 1 }', '$.my-key'); +--echo # +--echo # MDEV-23187: Assorted assertion failures in json_find_path with certain collations +--echo # + + +SET @save_collation_connection= @@collation_connection; + +SET @json='{ "A": [ [{"k":"v"},[1]],true],"B": {"C": 1} }'; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); + +SET @json='{ "A": [ [{"k":"v"},[1]],true],"B": {"C": 1} }'; +SET collation_connection='ucs2_bin'; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); + +SET @json='{ "A": [ [{"k":"v"},[15]],true],"B": {"C": 1} }'; +SET sql_mode=0,character_set_connection=utf32; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); + + +SET @json='{ "A": [ [{"k":"v"},[15]],true],"B": {"C": 1} }'; +SET sql_mode=0,character_set_connection=utf32; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); + +SET @@collation_connection= @save_collation_connection; + + --echo # --echo # End of 10.9 Test --echo # diff --git a/strings/json_lib.c b/strings/json_lib.c index 903dec978b4..920fb1d4a89 100644 --- a/strings/json_lib.c +++ b/strings/json_lib.c @@ -1324,7 +1324,7 @@ int json_skip_key(json_engine_t *j) } -#define SKIPPED_STEP_MARK ((int) ~0) +#define SKIPPED_STEP_MARK INT_MAX32 /* Current step of the patch matches the JSON construction. From caeff1357939daa708f7e68d51ab5ded2a929b97 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Otto=20Kek=C3=A4l=C3=A4inen?= Date: Mon, 15 May 2023 23:32:30 -0700 Subject: [PATCH 22/76] Remove CODEOWNERS as obsolete The CODEOWNERS was added almost 3 years ago but never saw any adoption. Only one person used it (me) to mark what files I maintain and for which I wish to review commits. No other maintainers or code paths were added, so clean it away for clarity. --- .github/CODEOWNERS | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 538007ed62d..00000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,2 +0,0 @@ -/debian @ottok - From e0084b9d315f10e3ceb578b65e144d751b208bf1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 19 May 2023 12:19:26 +0300 Subject: [PATCH 23/76] MDEV-31234 InnoDB does not free UNDO after the fix of MDEV-30671 trx_purge_truncate_history(): Only call trx_purge_truncate_rseg_history() if the rollback segment is safe to process. This will avoid leaking undo log pages that are not yet ready to be processed. This fixes a regression that was introduced in commit 0de3be8cfdfc26f5c236eaefe12d03c7b4af22c8 (MDEV-30671). trx_sys_t::any_active_transactions(): Separately count XA PREPARE transactions. srv_purge_should_exit(): Terminate slow shutdown if the history size does not change and XA PREPARE transactions exist in the system. This will avoid a hang of the test innodb.recovery_shutdown. Tested by: Matthias Leich --- storage/innobase/include/trx0sys.h | 2 +- storage/innobase/srv/srv0srv.cc | 41 ++++++++++++++++++------------ storage/innobase/trx/trx0purge.cc | 10 +++----- storage/innobase/trx/trx0sys.cc | 24 +++++++++++++---- 4 files changed, 48 insertions(+), 29 deletions(-) diff --git a/storage/innobase/include/trx0sys.h b/storage/innobase/include/trx0sys.h index e033a3e1fe4..016ac0b1363 100644 --- a/storage/innobase/include/trx0sys.h +++ b/storage/innobase/include/trx0sys.h @@ -1055,7 +1055,7 @@ public: void close(); /** @return total number of active (non-prepared) transactions */ - ulint any_active_transactions(); + size_t any_active_transactions(size_t *prepared= nullptr); /** diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 57aa4bef9fe..50569f810ea 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1697,7 +1697,7 @@ void srv_master_callback(void*) } /** @return whether purge should exit due to shutdown */ -static bool srv_purge_should_exit() +static bool srv_purge_should_exit(size_t old_history_size) { ut_ad(srv_shutdown_state <= SRV_SHUTDOWN_CLEANUP); @@ -1708,7 +1708,12 @@ static bool srv_purge_should_exit() return true; /* Slow shutdown was requested. */ - if (const size_t history_size= trx_sys.rseg_history_len) + size_t prepared, active= trx_sys.any_active_transactions(&prepared); + const size_t history_size= trx_sys.rseg_history_len; + + if (!history_size); + else if (!active && history_size == old_history_size && prepared); + else { static time_t progress_time; time_t now= time(NULL); @@ -1725,7 +1730,7 @@ static bool srv_purge_should_exit() return false; } - return !trx_sys.any_active_transactions(); + return !active; } /*********************************************************************//** @@ -1845,7 +1850,7 @@ static size_t srv_do_purge(ulint* n_total_purged) *n_total_purged += n_pages_purged; } while (n_pages_purged > 0 && !purge_sys.paused() - && !srv_purge_should_exit()); + && !srv_purge_should_exit(rseg_history_len)); return(rseg_history_len); } @@ -1960,7 +1965,7 @@ static void purge_coordinator_callback_low() } } while ((purge_sys.enabled() && !purge_sys.paused()) || - !srv_purge_should_exit()); + !srv_purge_should_exit(trx_sys.rseg_history_len)); } static void purge_coordinator_callback(void*) @@ -2031,15 +2036,19 @@ ulint srv_get_task_queue_length() /** Shut down the purge threads. */ void srv_purge_shutdown() { - if (purge_sys.enabled()) { - if (!srv_fast_shutdown && !opt_bootstrap) - srv_update_purge_thread_count(innodb_purge_threads_MAX); - while(!srv_purge_should_exit()) { - ut_a(!purge_sys.paused()); - srv_wake_purge_thread_if_not_active(); - purge_coordinator_task.wait(); - } - purge_sys.coordinator_shutdown(); - srv_shutdown_purge_tasks(); - } + if (purge_sys.enabled()) + { + if (!srv_fast_shutdown && !opt_bootstrap) + srv_update_purge_thread_count(innodb_purge_threads_MAX); + size_t history_size= trx_sys.rseg_history_len; + while (!srv_purge_should_exit(history_size)) + { + history_size= trx_sys.rseg_history_len; + ut_a(!purge_sys.paused()); + srv_wake_purge_thread_if_not_active(); + purge_coordinator_task.wait(); + } + purge_sys.coordinator_shutdown(); + srv_shutdown_purge_tasks(); + } } diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index b22a85f4646..97979a3fefe 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -448,12 +448,7 @@ func_exit: prev_hdr_addr.boffset = static_cast(prev_hdr_addr.boffset - TRX_UNDO_HISTORY_NODE); - if (!rseg.trx_ref_count - && rseg.needs_purge <= (purge_sys.head.trx_no - ? purge_sys.head.trx_no - : purge_sys.tail.trx_no) - && mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_STATE - + block->frame) + if (mach_read_from_2(TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + block->frame) == TRX_UNDO_TO_PURGE && !mach_read_from_2(block->frame + hdr_addr.boffset + TRX_UNDO_NEXT_LOG)) { @@ -544,7 +539,8 @@ static void trx_purge_truncate_history() ut_ad(rseg->id == i); ut_ad(rseg->is_persistent()); mutex_enter(&rseg->mutex); - trx_purge_truncate_rseg_history(*rseg, head); + if (!rseg->trx_ref_count && rseg->needs_purge <= head.trx_no) + trx_purge_truncate_rseg_history(*rseg, head); mutex_exit(&rseg->mutex); } } diff --git a/storage/innobase/trx/trx0sys.cc b/storage/innobase/trx/trx0sys.cc index bcde969eb41..ab3de55db64 100644 --- a/storage/innobase/trx/trx0sys.cc +++ b/storage/innobase/trx/trx0sys.cc @@ -325,15 +325,29 @@ trx_sys_t::close() } /** @return total number of active (non-prepared) transactions */ -ulint trx_sys_t::any_active_transactions() +size_t trx_sys_t::any_active_transactions(size_t *prepared) { - uint32_t total_trx= 0; + size_t total_trx= 0, prepared_trx= 0; - trx_sys.trx_list.for_each([&total_trx](const trx_t &trx) { - if (trx.state == TRX_STATE_COMMITTED_IN_MEMORY || - (trx.state == TRX_STATE_ACTIVE && trx.id)) + trx_sys.trx_list.for_each([&](const trx_t &trx) { + switch (trx.state) { + case TRX_STATE_NOT_STARTED: + break; + case TRX_STATE_ACTIVE: + if (!trx.id) + break; + /* fall through */ + case TRX_STATE_COMMITTED_IN_MEMORY: total_trx++; + break; + case TRX_STATE_PREPARED: + case TRX_STATE_PREPARED_RECOVERED: + prepared_trx++; + } }); + if (prepared) + *prepared= prepared_trx; + return total_trx; } From e5933b99d5dd25acceecafe5bdfefe016176f68a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 19 May 2023 12:25:30 +0300 Subject: [PATCH 24/76] MDEV-31234 related cleanup trx_purge_free_segment(), trx_purge_truncate_rseg_history(): Replace some unreachable code with debug assertions. A buffer-fix does prevent pages from being evicted from the buffer pool; see buf_page_t::can_relocate(). Tested by: Matthias Leich --- storage/innobase/trx/trx0purge.cc | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 19504110c21..6541ede447e 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -378,7 +378,7 @@ static void trx_purge_free_segment(buf_block_t *block, mtr_t &mtr) block->page.frame, &mtr)) { block->fix(); - const page_id_t id{block->page.id()}; + ut_d(const page_id_t id{block->page.id()}); mtr.commit(); /* NOTE: If the server is killed after the log that was produced up to this point was written, and before the log from the mtr.commit() @@ -390,16 +390,8 @@ static void trx_purge_free_segment(buf_block_t *block, mtr_t &mtr) log_free_check(); mtr.start(); block->page.lock.x_lock(); - if (UNIV_UNLIKELY(block->page.id() != id)) - { - block->unfix(); - block->page.lock.x_unlock(); - block= buf_page_get_gen(id, 0, RW_X_LATCH, nullptr, BUF_GET, &mtr); - if (!block) - return; - } - else - mtr.memo_push(block, MTR_MEMO_PAGE_X_MODIFY); + ut_ad(block->page.id() == id); + mtr.memo_push(block, MTR_MEMO_PAGE_X_MODIFY); } while (!fseg_free_step(TRX_UNDO_SEG_HDR + TRX_UNDO_FSEG_HEADER + @@ -421,7 +413,6 @@ trx_purge_truncate_rseg_history(trx_rseg_t& rseg, mtr.start(); dberr_t err; -reget: buf_block_t *rseg_hdr= rseg.get(&mtr, &err); if (!rseg_hdr) { @@ -520,12 +511,7 @@ loop: log_free_check(); mtr.start(); rseg_hdr->page.lock.x_lock(); - if (UNIV_UNLIKELY(rseg_hdr->page.id() != rseg.page_id())) - { - rseg_hdr->unfix(); - rseg_hdr->page.lock.x_unlock(); - goto reget; - } + ut_ad(rseg_hdr->page.id() == rseg.page_id()); mtr.memo_push(rseg_hdr, MTR_MEMO_PAGE_X_MODIFY); goto loop; From 542278479261aa9e5124d0095fb4f77ba5f3fa63 Mon Sep 17 00:00:00 2001 From: Vlad Lesin Date: Fri, 12 May 2023 17:20:03 +0300 Subject: [PATCH 25/76] MDEV-31256 fil_node_open_file() releases fil_system.mutex allowing other thread to open its file node There is room between mutex_exit(&fil_system.mutex) and mutex_enter(&fil_system.mutex) calls in fil_node_open_file(). During this room another thread can open the node, and ut_ad(!node->is_open()) assertion in fil_node_open_file_low() can fail. The fix is not to open node if it was already opened by another thread. --- storage/innobase/fil/fil0fil.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index fd2404a009a..58261d27d8e 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -458,7 +458,9 @@ static bool fil_node_open_file(fil_node_t *node) } } - return fil_node_open_file_low(node); + /* The node can be opened beween releasing and acquiring fil_system.mutex + in the above code */ + return node->is_open() || fil_node_open_file_low(node); } /** Close the file handle. */ From 2f9e264781f702b8da1ed418ac9f4f5e8f8aa843 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 19 May 2023 15:15:38 +0300 Subject: [PATCH 26/76] MDEV-29911 InnoDB recovery and mariadb-backup --prepare fail to report detailed progress The progress reporting of InnoDB crash recovery was rather intermittent. Nothing was reported during the single-threaded log record parsing, which could consume minutes when parsing a large log. During log application, there only was progress reporting in background threads that would be invoked on data page read completion. The progress reporting here will be detailed like this: InnoDB: Starting crash recovery from checkpoint LSN=503549688 InnoDB: Parsed redo log up to LSN=1990840177; to recover: 124806 pages InnoDB: Parsed redo log up to LSN=2729777071; to recover: 186123 pages InnoDB: Parsed redo log up to LSN=3488599173; to recover: 248397 pages InnoDB: Parsed redo log up to LSN=4177856618; to recover: 306469 pages InnoDB: Multi-batch recovery needed at LSN 4189599815 InnoDB: End of log at LSN=4483551634 InnoDB: To recover: LSN 4189599815/4483551634; 307490 pages InnoDB: To recover: LSN 4189599815/4483551634; 197159 pages InnoDB: To recover: LSN 4189599815/4483551634; 67623 pages InnoDB: Parsed redo log up to LSN=4353924218; to recover: 102083 pages ... InnoDB: log sequence number 4483551634 ... The previous messages "Starting a batch to recover" or "Starting a final batch to recover" will be replaced by "To recover: ... pages" messages. If a batch lasts longer than 15 seconds, then there will be progress reports every 15 seconds, showing the number of remaining pages. For the non-final batch, the "To recover:" message includes two end LSN: that of the batch, and of the recovered log. This is the primary measure of progress. The batch will end once the number of pages to recover reaches 0. If recovery is possible in a single batch, the output will look like this, with a shorter "To recover:" message that counts only the remaining pages: InnoDB: Starting crash recovery from checkpoint LSN=503549688 InnoDB: Parsed redo log up to LSN=1998701027; to recover: 125560 pages InnoDB: Parsed redo log up to LSN=2734136874; to recover: 186446 pages InnoDB: Parsed redo log up to LSN=3499505504; to recover: 249378 pages InnoDB: Parsed redo log up to LSN=4183247844; to recover: 306964 pages InnoDB: End of log at LSN=4483551634 ... InnoDB: To recover: 331797 pages ... InnoDB: log sequence number 4483551634 ... We will also speed up recovery by improving the memory management and implementing multi-threaded recovery of data pages that will not need to be read into the buffer pool ("fake read"). Log application in the "fake read" threads will be protected by an atomic being_recovered field and exclusive buf_page_t::latch. Recovery will reserve for data pages two thirds of the buffer pool, or 256 pages, whichever is smaller. Previously, we could only use at most one third of the buffer pool for buffered log records. This would typically mean that with large buffer pools, recovery unnecessary consisted of multiple batches. If recovery runs out of memory, it will "roll back" or "rewind" the current mini-transaction. The recv_sys.lsn and recv_sys.pages will correspond to the "out of memory LSN", at the end of the previous complete mini-transaction. If recovery runs out of memory while executing the final recovery batch, we can simply invoke recv_sys.apply(false) to make room, and resume parsing. If recovery runs out of memory before the final batch, we will scan the redo log to the end (recv_sys.scanned_lsn) and check for any missing or inconsistent files. If recv_init_crash_recovery_spaces() does not report any potentially missing tablespaces, we can make use of the already stored recv_sys.pages and only rewind to the "out of memory LSN". Else, we must keep parsing and invoking recv_validate_tablespace() until an error has been found or everything has been resolved, and ultimatily rewind to to the checkpoint LSN. recv_sys_t::pages_it: A cached iterator to recv_sys.pages recv_sys_t::parse_mtr(): Remove an ATTRIBUTE_NOINLINE that would prevent tail call optimization in recv_sys_t::parse_pmem(). recv_sys_t::parse(), recv_sys_t::parse_mtr(), recv_sys_t::parse_pmem(): Add template parameter. Redo log record parsing (store=false) is better specialized from store=true (with bool if_exists) so that we can avoid some conditional branches in frequently invoked low-level code. recv_sys_t::is_memory_exhausted(): Remove. The special parse() status GOT_OOM will report out-of-memory situation at the low level. recv_sys_t::rewind(), page_recv_t::recs_t::rewind(): Remove all log starting with a specific LSN. recv_scan_log(): Separate some code for only parsing, not storing log. In rewound_lsn, remember the LSN at which last_phase=false recovery ran out of memory. This is where the next call to recv_scan_log() will resume storing the log. This replaces recv_sys.last_stored_lsn. recv_sys_t::parse(): Evaluate the template parameter store in a few more cases, to allow dead code to be eliminated at compile time. recv_sys_t::scanned_lsn: The end of the log found by recv_scan_log(). The special value 1 means that recv_sys has been initialized but no log has been parsed. IORequest::write_complete(), IORequest::read_complete(): Replaces fil_aio_callback(). read_io_callback(), write_io_callback(): Replaces io_callback(). IORequest::fake_read_complete(), fake_io_callback(), os_fake_read(): Process a "fake read" request for concurrent recovery. recv_sys_t::apply_batch(): Choose a number of successive pages for a recovery batch. recv_sys_t::erase(recv_sys_t::map::iterator): Remove log records for a page whose recovery is not in progress. Log application threads will not invoke this; they will only set being_recovered=-1 to indicate that the entry is no longer needed. recv_sys_t::garbage_collect(): Remove all being_recovered=-1 entries. recv_sys_t::wait_for_pool(): Wait for some space to become available in the buffer pool. mlog_init_t::mark_ibuf_exist(): Avoid calls to recv_sys::recover_low() via ibuf_page_exists() and buf_page_get_low(). Such calls would lead to double locking of recv_sys.mutex, which depending on implementation could cause a deadlock. We will use lower-level calls to look up index pages. buf_LRU_block_remove_hashed(): Disable consistency checks for freed ROW_FORMAT=COMPRESSED pages. Their contents could be uninitialized garbage. This fixes an occasional failure of the test innodb.innodb_bulk_create_index_debug. Tested by: Matthias Leich --- extra/mariabackup/xtrabackup.cc | 4 +- storage/innobase/buf/buf0dblwr.cc | 2 +- storage/innobase/buf/buf0flu.cc | 1 + storage/innobase/buf/buf0lru.cc | 9 +- storage/innobase/buf/buf0rea.cc | 85 +- storage/innobase/fil/fil0fil.cc | 70 +- storage/innobase/include/buf0buf.h | 3 +- storage/innobase/include/buf0rea.h | 11 +- storage/innobase/include/log0recv.h | 209 ++-- storage/innobase/include/os0file.h | 9 + storage/innobase/log/log0recv.cc | 1766 ++++++++++++++++----------- storage/innobase/os/os0file.cc | 90 +- 12 files changed, 1332 insertions(+), 927 deletions(-) diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 6f42a9be05a..180616f37e9 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -3146,7 +3146,7 @@ static bool xtrabackup_copy_logfile() if (log_sys.buf[recv_sys.offset] <= 1) break; - if (recv_sys.parse_mtr(STORE_NO) == recv_sys_t::OK) + if (recv_sys.parse_mtr(false) == recv_sys_t::OK) { do { @@ -3156,7 +3156,7 @@ static bool xtrabackup_copy_logfile() sequence_offset)); *seq= 1; } - while ((r= recv_sys.parse_mtr(STORE_NO)) == recv_sys_t::OK); + while ((r= recv_sys.parse_mtr(false)) == recv_sys_t::OK); if (ds_write(dst_log_file, log_sys.buf + start_offset, recv_sys.offset - start_offset)) diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index 1260145ed1c..510ad02256d 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -372,7 +372,7 @@ void buf_dblwr_t::recover() const uint32_t space_id= page_get_space_id(page); const page_id_t page_id(space_id, page_no); - if (recv_sys.lsn < lsn) + if (recv_sys.scanned_lsn < lsn) { ib::info() << "Ignoring a doublewrite copy of page " << page_id << " with future log sequence number " << lsn; diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 5b8ebde55da..91c8de3191b 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -2591,6 +2591,7 @@ ATTRIBUTE_COLD void buf_flush_page_cleaner_init() /** Flush the buffer pool on shutdown. */ ATTRIBUTE_COLD void buf_flush_buffer_pool() { + ut_ad(!os_aio_pending_reads()); ut_ad(!buf_page_cleaner_is_active); ut_ad(!buf_flush_sync_lsn); diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 724aa641f12..8a25e9c5266 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -1093,7 +1093,11 @@ static bool buf_LRU_block_remove_hashed(buf_page_t *bpage, const page_id_t id, ut_a(!zip || !bpage->oldest_modification()); ut_ad(bpage->zip_size()); - + /* Skip consistency checks if the page was freed. + In recovery, we could get a sole FREE_PAGE record + and nothing else, for a ROW_FORMAT=COMPRESSED page. + Its contents would be garbage. */ + if (!bpage->is_freed()) switch (fil_page_get_type(page)) { case FIL_PAGE_TYPE_ALLOCATED: case FIL_PAGE_INODE: @@ -1224,6 +1228,7 @@ void buf_pool_t::corrupted_evict(buf_page_t *bpage, uint32_t state) buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(id.fold()); page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain); + recv_sys.free_corrupted_page(id); mysql_mutex_lock(&mutex); hash_lock.lock(); @@ -1248,8 +1253,6 @@ void buf_pool_t::corrupted_evict(buf_page_t *bpage, uint32_t state) buf_LRU_block_free_hashed_page(reinterpret_cast(bpage)); mysql_mutex_unlock(&mutex); - - recv_sys.free_corrupted_page(id); } /** Update buf_pool.LRU_old_ratio. diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index b2278039951..cf76a9bd93a 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -655,60 +655,35 @@ failed: return count; } -/** @return whether a page has been freed */ -inline bool fil_space_t::is_freed(uint32_t page) +/** Schedule a page for recovery. +@param space tablespace +@param page_id page identifier +@param recs log records +@param init page initialization, or nullptr if the page needs to be read */ +void buf_read_recover(fil_space_t *space, const page_id_t page_id, + page_recv_t &recs, recv_init *init) { - std::lock_guard freed_lock(freed_range_mutex); - return freed_ranges.contains(page); -} - -/** Issues read requests for pages which recovery wants to read in. -@param space_id tablespace identifier -@param page_nos page numbers to read, in ascending order */ -void buf_read_recv_pages(uint32_t space_id, st_::span page_nos) -{ - fil_space_t* space = fil_space_t::get(space_id); - - if (!space) { - /* The tablespace is missing or unreadable: do nothing */ - return; - } - - const ulint zip_size = space->zip_size(); - - for (ulint i = 0; i < page_nos.size(); i++) { - - /* Ignore if the page already present in freed ranges. */ - if (space->is_freed(page_nos[i])) { - continue; - } - - const page_id_t cur_page_id(space_id, page_nos[i]); - - ulint limit = 0; - for (ulint j = 0; j < buf_pool.n_chunks; j++) { - limit += buf_pool.chunks[j].size / 2; - } - - if (os_aio_pending_reads() >= limit) { - os_aio_wait_until_no_pending_reads(false); - } - - space->reacquire(); - switch (buf_read_page_low(space, false, BUF_READ_ANY_PAGE, - cur_page_id, zip_size, true)) { - case DB_SUCCESS: case DB_SUCCESS_LOCKED_REC: - break; - default: - sql_print_error("InnoDB: Recovery failed to read page " - UINT32PF " from %s", - cur_page_id.page_no(), - space->chain.start->name); - } - } - - - DBUG_PRINT("ib_buf", ("recovery read (%zu pages) for %s", - page_nos.size(), space->chain.start->name)); - space->release(); + ut_ad(space->id == page_id.space()); + space->reacquire(); + const ulint zip_size= space->zip_size(); + + if (init) + { + if (buf_page_t *bpage= buf_page_init_for_read(BUF_READ_ANY_PAGE, page_id, + zip_size, true)) + { + ut_ad(bpage->in_file()); + os_fake_read(IORequest{bpage, (buf_tmp_buffer_t*) &recs, + UT_LIST_GET_FIRST(space->chain), + IORequest::READ_ASYNC}, ptrdiff_t(init)); + } + } + else if (dberr_t err= buf_read_page_low(space, false, BUF_READ_ANY_PAGE, + page_id, zip_size, true)) + { + if (err != DB_SUCCESS_LOCKED_REC) + sql_print_error("InnoDB: Recovery failed to read page " + UINT32PF " from %s", + page_id.page_no(), space->chain.start->name); + } } diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index be313140225..e4b352a05aa 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -2775,53 +2775,55 @@ func_exit: #include -/** Callback for AIO completion */ -void fil_aio_callback(const IORequest &request) +void IORequest::write_complete() const { ut_ad(fil_validate_skip()); - ut_ad(request.node); + ut_ad(node); + ut_ad(is_write()); - if (!request.bpage) + if (!bpage) { ut_ad(!srv_read_only_mode); - if (request.type == IORequest::DBLWR_BATCH) - buf_dblwr.flush_buffered_writes_completed(request); + if (type == IORequest::DBLWR_BATCH) + buf_dblwr.flush_buffered_writes_completed(*this); else - ut_ad(request.type == IORequest::WRITE_ASYNC); -write_completed: - request.node->complete_write(); - } - else if (request.is_write()) - { - buf_page_write_complete(request); - goto write_completed; + ut_ad(type == IORequest::WRITE_ASYNC); } else + buf_page_write_complete(*this); + + node->complete_write(); + node->space->release(); +} + +void IORequest::read_complete() const +{ + ut_ad(fil_validate_skip()); + ut_ad(node); + ut_ad(is_read()); + ut_ad(bpage); + + /* IMPORTANT: since i/o handling for reads will read also the insert + buffer in fil_system.sys_space, we have to be very careful not to + introduce deadlocks. We never close fil_system.sys_space data files + and never issue asynchronous reads of change buffer pages. */ + const page_id_t id(bpage->id()); + + if (dberr_t err= bpage->read_complete(*node)) { - ut_ad(request.is_read()); - - /* IMPORTANT: since i/o handling for reads will read also the insert - buffer in fil_system.sys_space, we have to be very careful not to - introduce deadlocks. We never close fil_system.sys_space data - files and never issue asynchronous reads of change buffer pages. */ - const page_id_t id(request.bpage->id()); - - if (dberr_t err= request.bpage->read_complete(*request.node)) + if (recv_recovery_is_on() && !srv_force_recovery) { - if (recv_recovery_is_on() && !srv_force_recovery) - { - mysql_mutex_lock(&recv_sys.mutex); - recv_sys.set_corrupt_fs(); - mysql_mutex_unlock(&recv_sys.mutex); - } - - if (err != DB_FAIL) - ib::error() << "Failed to read page " << id.page_no() - << " from file '" << request.node->name << "': " << err; + mysql_mutex_lock(&recv_sys.mutex); + recv_sys.set_corrupt_fs(); + mysql_mutex_unlock(&recv_sys.mutex); } + + if (err != DB_FAIL) + ib::error() << "Failed to read page " << id.page_no() + << " from file '" << node->name << "': " << err; } - request.node->space->release(); + node->space->release(); } /** Flush to disk the writes in file spaces of the given type diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 4295c3ba342..6d3ec65b1d3 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -75,8 +75,7 @@ struct buf_pool_info_t ulint flush_list_len; /*!< Length of buf_pool.flush_list */ ulint n_pend_unzip; /*!< buf_pool.n_pend_unzip, pages pending decompress */ - ulint n_pend_reads; /*!< buf_pool.n_pend_reads, pages - pending read */ + ulint n_pend_reads; /*!< os_aio_pending_reads() */ ulint n_pending_flush_lru; /*!< Pages pending flush in LRU */ ulint n_pending_flush_list; /*!< Pages pending flush in FLUSH LIST */ diff --git a/storage/innobase/include/buf0rea.h b/storage/innobase/include/buf0rea.h index 4ec8938c689..3dd085dda5c 100644 --- a/storage/innobase/include/buf0rea.h +++ b/storage/innobase/include/buf0rea.h @@ -102,10 +102,13 @@ which could result in a deadlock if the OS does not support asynchronous io. ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf); -/** Issue read requests for pages that need to be recovered. -@param space_id tablespace identifier -@param page_nos page numbers to read, in ascending order */ -void buf_read_recv_pages(uint32_t space_id, st_::span page_nos); +/** Schedule a page for recovery. +@param space tablespace +@param page_id page identifier +@param recs log records +@param init page initialization, or nullptr if the page needs to be read */ +void buf_read_recover(fil_space_t *space, const page_id_t page_id, + page_recv_t &recs, recv_init *init); /** @name Modes used in read-ahead @{ */ /** read only pages belonging to the insert buffer tree */ diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index e787d81e8c2..e642b501409 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -38,9 +38,9 @@ Created 9/20/1997 Heikki Tuuri #define recv_recovery_is_on() UNIV_UNLIKELY(recv_sys.recovery_on) ATTRIBUTE_COLD MY_ATTRIBUTE((nonnull, warn_unused_result)) -/** Apply any buffered redo log to a page that was just read from a data file. -@param[in,out] space tablespace -@param[in,out] bpage buffer pool page +/** Apply any buffered redo log to a page. +@param space tablespace +@param bpage buffer pool page @return whether the page was recovered correctly */ bool recv_recover_page(fil_space_t* space, buf_page_t* bpage); @@ -49,17 +49,6 @@ of first system tablespace page @return error code or DB_SUCCESS */ dberr_t recv_recovery_from_checkpoint_start(); -/** Whether to store redo log records in recv_sys.pages */ -enum store_t { - /** Do not store redo log records. */ - STORE_NO, - /** Store redo log records. */ - STORE_YES, - /** Store redo log records if the tablespace exists. */ - STORE_IF_EXISTS -}; - - /** Report an operation to create, delete, or rename a file during backup. @param[in] space_id tablespace identifier @param[in] type file operation redo log type @@ -125,21 +114,15 @@ struct recv_dblwr_t list pages; }; -/** the recovery state and buffered records for a page */ +/** recv_sys.pages entry; protected by recv_sys.mutex */ struct page_recv_t { - /** Recovery state; protected by recv_sys.mutex */ - enum - { - /** not yet processed */ - RECV_NOT_PROCESSED, - /** not processed; the page will be reinitialized */ - RECV_WILL_NOT_READ, - /** page is being read */ - RECV_BEING_READ, - /** log records are being applied on the page */ - RECV_BEING_PROCESSED - } state= RECV_NOT_PROCESSED; + /** Recovery status: 0=not in progress, 1=log is being applied, + -1=log has been applied and the entry may be erased. + Transitions from 1 to -1 are NOT protected by recv_sys.mutex. */ + Atomic_relaxed being_processed{0}; + /** Whether reading the page will be skipped */ + bool skip_read= false; /** Latest written byte offset when applying the log records. @see mtr_t::m_last_offset */ uint16_t last_offset= 1; @@ -162,6 +145,9 @@ struct page_recv_t head= recs; tail= recs; } + /** Remove the last records for the page + @param start_lsn start of the removed log */ + ATTRIBUTE_COLD void rewind(lsn_t start_lsn); /** @return the last log snippet */ const log_rec_t* last() const { return tail; } @@ -180,8 +166,8 @@ struct page_recv_t iterator begin() { return head; } iterator end() { return NULL; } bool empty() const { ut_ad(!head == !tail); return !head; } - /** Clear and free the records; @see recv_sys_t::alloc() */ - inline void clear(); + /** Clear and free the records; @see recv_sys_t::add() */ + void clear(); } log; /** Trim old log records for a page. @@ -190,21 +176,27 @@ struct page_recv_t inline bool trim(lsn_t start_lsn); /** Ignore any earlier redo log records for this page. */ inline void will_not_read(); - /** @return whether the log records for the page are being processed */ - bool is_being_processed() const { return state == RECV_BEING_PROCESSED; } +}; + +/** A page initialization operation that was parsed from the redo log */ +struct recv_init +{ + /** log sequence number of the page initialization */ + lsn_t lsn; + /** Whether btr_page_create() avoided a read of the page. + At the end of the last recovery batch, mark_ibuf_exist() + will mark pages for which this flag is set. */ + bool created; }; /** Recovery system data structure */ struct recv_sys_t { - /** mutex protecting apply_log_recs and page_recv_t::state */ - mysql_mutex_t mutex; + using init= recv_init; + + /** mutex protecting this as well as some of page_recv_t */ + alignas(CPU_LEVEL1_DCACHE_LINESIZE) mysql_mutex_t mutex; private: - /** condition variable for - !apply_batch_on || pages.empty() || found_corrupt_log || found_corrupt_fs */ - pthread_cond_t cond; - /** whether recv_apply_hashed_log_recs() is running */ - bool apply_batch_on; /** set when finding a corrupt log block or record, or there is a log parsing buffer overflow */ bool found_corrupt_log; @@ -226,6 +218,8 @@ public: size_t offset; /** log sequence number of the first non-parsed record */ lsn_t lsn; + /** log sequence number of the last parsed mini-transaction */ + lsn_t scanned_lsn; /** log sequence number at the end of the FILE_CHECKPOINT record, or 0 */ lsn_t file_checkpoint; /** the time when progress was last reported */ @@ -238,6 +232,9 @@ public: map pages; private: + /** iterator to pages, used by parse() */ + map::iterator pages_it; + /** Process a record that indicates that a tablespace size is being shrunk. @param page_id first page that is not in the file @param lsn log sequence number of the shrink operation */ @@ -257,30 +254,42 @@ public: /** The contents of the doublewrite buffer */ recv_dblwr_t dblwr; - /** Last added LSN to pages, before switching to STORE_NO */ - lsn_t last_stored_lsn= 0; - inline void read(os_offset_t offset, span buf); inline size_t files_size(); void close_files() { files.clear(); files.shrink_to_fit(); } + /** Advance pages_it if it matches the iterator */ + void pages_it_invalidate(const map::iterator &p) + { + mysql_mutex_assert_owner(&mutex); + if (pages_it == p) + pages_it++; + } + /** Invalidate pages_it if it points to the given tablespace */ + void pages_it_invalidate(uint32_t space_id) + { + mysql_mutex_assert_owner(&mutex); + if (pages_it != pages.end() && pages_it->first.space() == space_id) + pages_it= pages.end(); + } + private: /** Attempt to initialize a page based on redo log records. - @param page_id page identifier - @param p iterator pointing to page_id + @param p iterator @param mtr mini-transaction @param b pre-allocated buffer pool block + @param init page initialization @return the recovered block @retval nullptr if the page cannot be initialized based on log records @retval -1 if the page cannot be recovered due to corruption */ - inline buf_block_t *recover_low(const page_id_t page_id, map::iterator &p, - mtr_t &mtr, buf_block_t *b); + inline buf_block_t *recover_low(const map::iterator &p, mtr_t &mtr, + buf_block_t *b, init &init); /** Attempt to initialize a page based on redo log records. @param page_id page identifier @return the recovered block @retval nullptr if the page cannot be initialized based on log records @retval -1 if the page cannot be recovered due to corruption */ - buf_block_t *recover_low(const page_id_t page_id); + ATTRIBUTE_COLD buf_block_t *recover_low(const page_id_t page_id); /** All found log files (multiple ones are possible if we are upgrading from before MariaDB Server 10.5.1) */ @@ -289,10 +298,27 @@ private: /** Base node of the redo block list. List elements are linked via buf_block_t::unzip_LRU. */ UT_LIST_BASE_NODE_T(buf_block_t) blocks; + + /** Allocate a block from the buffer pool for recv_sys.pages */ + ATTRIBUTE_COLD buf_block_t *add_block(); + + /** Wait for buffer pool to become available. + @param pages number of buffer pool pages needed */ + ATTRIBUTE_COLD void wait_for_pool(size_t pages); + + /** Free log for processed pages. */ + void garbage_collect(); + + /** Apply a recovery batch. + @param space_id current tablespace identifier + @param space current tablespace + @param free_block spare buffer block + @param last_batch whether it is possible to write more redo log + @return whether the caller must provide a new free_block */ + bool apply_batch(uint32_t space_id, fil_space_t *&space, + buf_block_t *&free_block, bool last_batch); + public: - /** Check whether the number of read redo log blocks exceeds the maximum. - @return whether the memory is exhausted */ - inline bool is_memory_exhausted(); /** Apply buffered log to persistent data pages. @param last_batch whether it is possible to write more redo log */ void apply(bool last_batch); @@ -310,7 +336,7 @@ public: /** Clean up after create() */ void close(); - bool is_initialised() const { return last_stored_lsn != 0; } + bool is_initialised() const { return scanned_lsn != 0; } /** Find the latest checkpoint. @return error code or DB_SUCCESS */ @@ -321,60 +347,76 @@ public: @param start_lsn start LSN of the mini-transaction @param lsn @see mtr_t::commit_lsn() @param l redo log snippet - @param len length of l, in bytes */ - inline void add(map::iterator it, lsn_t start_lsn, lsn_t lsn, - const byte *l, size_t len); + @param len length of l, in bytes + @return whether we ran out of memory */ + bool add(map::iterator it, lsn_t start_lsn, lsn_t lsn, + const byte *l, size_t len); - enum parse_mtr_result { OK, PREMATURE_EOF, GOT_EOF }; + /** Parsing result */ + enum parse_mtr_result { + /** a record was successfully parsed */ + OK, + /** the log ended prematurely (need to read more) */ + PREMATURE_EOF, + /** the end of the log was reached */ + GOT_EOF, + /** parse(l, false) ran out of memory */ + GOT_OOM + }; private: /** Parse and register one log_t::FORMAT_10_8 mini-transaction. - @param store whether to store the records - @param l log data source */ + @tparam store whether to store the records + @param l log data source + @param if_exists if store: whether to check if the tablespace exists */ + template + inline parse_mtr_result parse(source &l, bool if_exists) noexcept; + + /** Rewind a mini-transaction when parse() runs out of memory. + @param l log data source + @param begin start of the mini-transaction */ template - inline parse_mtr_result parse(store_t store, source& l) noexcept; + ATTRIBUTE_COLD void rewind(source &l, source &begin) noexcept; + + /** Report progress in terms of LSN or pages remaining */ + ATTRIBUTE_COLD void report_progress() const; public: /** Parse and register one log_t::FORMAT_10_8 mini-transaction, handling log_sys.is_pmem() buffer wrap-around. - @param store whether to store the records */ - static parse_mtr_result parse_mtr(store_t store) noexcept; + @tparam store whether to store the records + @param if_exists if store: whether to check if the tablespace exists */ + template + static parse_mtr_result parse_mtr(bool if_exists) noexcept; /** Parse and register one log_t::FORMAT_10_8 mini-transaction, handling log_sys.is_pmem() buffer wrap-around. - @param store whether to store the records */ - static parse_mtr_result parse_pmem(store_t store) noexcept + @tparam store whether to store the records + @param if_exists if store: whether to check if the tablespace exists */ + template + static parse_mtr_result parse_pmem(bool if_exists) noexcept #ifdef HAVE_PMEM ; #else - { return parse_mtr(store); } + { return parse_mtr(if_exists); } #endif + /** Erase log records for a page. */ + void erase(map::iterator p); + /** Clear a fully processed set of stored redo log records. */ - inline void clear(); + void clear(); /** Determine whether redo log recovery progress should be reported. @param time the current time @return whether progress should be reported (the last report was at least 15 seconds ago) */ - bool report(time_t time) - { - if (time - progress_time < 15) - return false; - - progress_time= time; - return true; - } + bool report(time_t time); /** The alloc() memory alignment, in bytes */ static constexpr size_t ALIGNMENT= sizeof(size_t); - /** Allocate memory for log_rec_t - @param len allocation size, in bytes - @return pointer to len bytes of memory (never NULL) */ - inline void *alloc(size_t len); - /** Free a redo log snippet. - @param data buffer returned by alloc() */ + @param data buffer allocated in add() */ inline void free(const void *data); /** Remove records for a corrupted page. @@ -386,8 +428,6 @@ public: ATTRIBUTE_COLD void set_corrupt_fs(); /** Flag log file corruption during recovery. */ ATTRIBUTE_COLD void set_corrupt_log(); - /** Possibly finish a recovery batch. */ - inline void maybe_finish_batch(); /** @return whether data file corruption was found */ bool is_corrupt_fs() const { return UNIV_UNLIKELY(found_corrupt_fs); } @@ -405,13 +445,14 @@ public: } /** Try to recover a tablespace that was not readable earlier - @param p iterator, initially pointing to page_id_t{space_id,0}; - the records will be freed and the iterator advanced + @param p iterator @param name tablespace file name @param free_block spare buffer block - @return whether recovery failed */ - bool recover_deferred(map::iterator &p, const std::string &name, - buf_block_t *&free_block); + @return recovered tablespace + @retval nullptr if recovery failed */ + fil_space_t *recover_deferred(const map::iterator &p, + const std::string &name, + buf_block_t *&free_block); }; /** The recovery system */ diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index 13f9d3de3f8..54f7ceeb4c0 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -212,6 +212,10 @@ public: bool is_LRU() const { return (type & (WRITE_LRU ^ WRITE_ASYNC)) != 0; } bool is_async() const { return (type & (READ_SYNC ^ READ_ASYNC)) != 0; } + void write_complete() const; + void read_complete() const; + void fake_read_complete(os_offset_t offset) const; + /** If requested, free storage space associated with a section of the file. @param off byte offset from the start (SEEK_SET) @param len size of the hole in bytes @@ -1040,6 +1044,11 @@ int os_aio_init(); Frees the asynchronous io system. */ void os_aio_free(); +/** Submit a fake read request during crash recovery. +@param type fake read request +@param offset additional context */ +void os_fake_read(const IORequest &type, os_offset_t offset); + /** Request a read or write. @param type I/O request @param buf buffer diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 37a496725fc..4619786ee8d 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -738,7 +738,7 @@ static struct { retry: log_sys.latch.wr_unlock(); - bool fail= false; + fil_space_t *space= fil_system.sys_space; buf_block_t *free_block= buf_LRU_get_free_block(false); log_sys.latch.wr_lock(SRW_LOCK_CALL); mysql_mutex_lock(&recv_sys.mutex); @@ -755,11 +755,12 @@ retry: there were no buffered records. Either way, we must create a dummy tablespace with the latest known name, for dict_drop_index_tree(). */ + recv_sys.pages_it_invalidate(space_id); while (p != recv_sys.pages.end() && p->first.space() == space_id) { + ut_ad(!p->second.being_processed); recv_sys_t::map::iterator r= p++; - r->second.log.clear(); - recv_sys.pages.erase(r); + recv_sys.erase(r); } recv_spaces_t::iterator it{recv_spaces.find(space_id)}; if (it != recv_spaces.end()) @@ -782,11 +783,14 @@ retry: } } else - fail= recv_sys.recover_deferred(p, d->second.file_name, free_block); + space= recv_sys.recover_deferred(p, d->second.file_name, free_block); processed: - defers.erase(d++); - if (fail) + auto e= d++; + defers.erase(e); + if (!space) break; + if (space != fil_system.sys_space) + space->release(); if (free_block) continue; mysql_mutex_unlock(&recv_sys.mutex); @@ -797,7 +801,7 @@ processed: mysql_mutex_unlock(&recv_sys.mutex); if (free_block) buf_pool.free_block(free_block); - return fail; + return !space; } /** Create tablespace metadata for a data file that was initially @@ -905,28 +909,191 @@ free_space: } deferred_spaces; +/** Report an operation to create, delete, or rename a file during backup. +@param[in] space_id tablespace identifier +@param[in] type redo log type +@param[in] name file name (not NUL-terminated) +@param[in] len length of name, in bytes +@param[in] new_name new file name (NULL if not rename) +@param[in] new_len length of new_name, in bytes (0 if NULL) */ +void (*log_file_op)(uint32_t space_id, int type, + const byte* name, ulint len, + const byte* new_name, ulint new_len); + +void (*undo_space_trunc)(uint32_t space_id); + +void (*first_page_init)(uint32_t space_id); + +/** Information about initializing page contents during redo log processing. +FIXME: Rely on recv_sys.pages! */ +class mlog_init_t +{ + using map= std::map, + ut_allocator>>; + /** Map of page initialization operations. + FIXME: Merge this to recv_sys.pages! */ + map inits; + + /** Iterator to the last add() or will_avoid_read(), for speeding up + will_avoid_read(). */ + map::iterator i; +public: + /** Constructor */ + mlog_init_t() : i(inits.end()) {} + + /** Record that a page will be initialized by the redo log. + @param page_id page identifier + @param lsn log sequence number + @return whether the state was changed */ + bool add(const page_id_t page_id, lsn_t lsn) + { + mysql_mutex_assert_owner(&recv_sys.mutex); + const recv_init init = { lsn, false }; + std::pair p= + inits.insert(map::value_type(page_id, init)); + ut_ad(!p.first->second.created); + if (p.second) return true; + if (p.first->second.lsn >= lsn) return false; + p.first->second = init; + i = p.first; + return true; + } + + /** Get the last stored lsn of the page id and its respective + init/load operation. + @param page_id page identifier + @return the latest page initialization; + not valid after releasing recv_sys.mutex. */ + recv_init &last(page_id_t page_id) + { + mysql_mutex_assert_owner(&recv_sys.mutex); + return inits.find(page_id)->second; + } + + /** Determine if a page will be initialized or freed after a time. + @param page_id page identifier + @param lsn log sequence number + @return whether page_id will be freed or initialized after lsn */ + bool will_avoid_read(page_id_t page_id, lsn_t lsn) + { + mysql_mutex_assert_owner(&recv_sys.mutex); + if (i != inits.end() && i->first == page_id) + return i->second.lsn > lsn; + i = inits.lower_bound(page_id); + return i != inits.end() && i->first == page_id && i->second.lsn > lsn; + } + + /** At the end of each recovery batch, reset the 'created' flags. */ + void reset() + { + mysql_mutex_assert_owner(&recv_sys.mutex); + ut_ad(recv_no_ibuf_operations); + for (map::value_type &i : inits) + i.second.created= false; + } + + /** During the last recovery batch, mark whether there exist + buffered changes for the pages that were initialized + by buf_page_create() and still reside in the buffer pool. */ + void mark_ibuf_exist() + { + mysql_mutex_assert_owner(&recv_sys.mutex); + + for (const map::value_type &i : inits) + if (i.second.created) + { + auto &chain= buf_pool.page_hash.cell_get(i.first.fold()); + page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain); + + hash_lock.lock_shared(); + buf_block_t *block= reinterpret_cast + (buf_pool.page_hash.get(i.first, chain)); + bool got_latch= block && block->page.lock.x_lock_try(); + hash_lock.unlock_shared(); + + if (!block) + continue; + + uint32_t state; + + if (!got_latch) + { + mysql_mutex_lock(&buf_pool.mutex); + block= reinterpret_cast + (buf_pool.page_hash.get(i.first, chain)); + if (!block) + { + mysql_mutex_unlock(&buf_pool.mutex); + continue; + } + + state= block->page.fix(); + mysql_mutex_unlock(&buf_pool.mutex); + if (state < buf_page_t::UNFIXED) + { + block->page.unfix(); + continue; + } + block->page.lock.x_lock(); + state= block->page.unfix(); + ut_ad(state < buf_page_t::READ_FIX); + if (state >= buf_page_t::UNFIXED && block->page.id() == i.first) + goto check_ibuf; + } + else + { + state= block->page.state(); + ut_ad(state >= buf_page_t::FREED); + ut_ad(state < buf_page_t::READ_FIX); + + if (state >= buf_page_t::UNFIXED) + { + check_ibuf: + mysql_mutex_unlock(&recv_sys.mutex); + if (ibuf_page_exists(block->page.id(), block->zip_size())) + block->page.set_ibuf_exist(); + mysql_mutex_lock(&recv_sys.mutex); + } + } + + block->page.lock.x_unlock(); + } + } + + /** Clear the data structure */ + void clear() { inits.clear(); i = inits.end(); } +}; + +static mlog_init_t mlog_init; + /** Try to recover a tablespace that was not readable earlier -@param p iterator, initially pointing to page_id_t{space_id,0}; - the records will be freed and the iterator advanced +@param p iterator to the page @param name tablespace file name @param free_block spare buffer block -@return whether recovery failed */ -bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, - const std::string &name, - buf_block_t *&free_block) +@return recovered tablespace +@retval nullptr if recovery failed */ +fil_space_t *recv_sys_t::recover_deferred(const recv_sys_t::map::iterator &p, + const std::string &name, + buf_block_t *&free_block) { mysql_mutex_assert_owner(&mutex); - const page_id_t first{p->first}; - ut_ad(first.space()); + ut_ad(p->first.space()); - recv_spaces_t::iterator it{recv_spaces.find(first.space())}; + recv_spaces_t::iterator it{recv_spaces.find(p->first.space())}; ut_ad(it != recv_spaces.end()); - if (!first.page_no() && p->second.state == page_recv_t::RECV_WILL_NOT_READ) + if (!p->first.page_no() && p->second.skip_read) { mtr_t mtr; - buf_block_t *block= recover_low(first, p, mtr, free_block); + ut_ad(!p->second.being_processed); + p->second.being_processed= 1; + init &init= mlog_init.last(p->first); + mysql_mutex_unlock(&mutex); + buf_block_t *block= recover_low(p, mtr, free_block, init); + mysql_mutex_lock(&mutex); + p->second.being_processed= -1; ut_ad(block == free_block || block == reinterpret_cast(-1)); free_block= nullptr; if (UNIV_UNLIKELY(!block || block == reinterpret_cast(-1))) @@ -939,10 +1106,7 @@ bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, const uint32_t page_no= mach_read_from_4(page + FIL_PAGE_OFFSET); const uint32_t size= fsp_header_get_field(page, FSP_SIZE); - ut_ad(it != recv_spaces.end()); - - if (page_id_t{space_id, page_no} == first && size >= 4 && - it != recv_spaces.end() && + if (page_id_t{space_id, page_no} == p->first && size >= 4 && fil_space_t::is_valid_flags(flags, space_id) && fil_space_t::logical_size(flags) == srv_page_size) { @@ -996,10 +1160,10 @@ bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, } size_set: node->deferred= false; - space->release(); it->second.space= space; block->page.lock.x_unlock(); - return false; + p->second.being_processed= -1; + return space; } release_and_fail: @@ -1007,179 +1171,34 @@ bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, } fail: - ib::error() << "Cannot apply log to " << first + ib::error() << "Cannot apply log to " << p->first << " of corrupted file '" << name << "'"; - return true; + return nullptr; } -/** Report an operation to create, delete, or rename a file during backup. -@param[in] space_id tablespace identifier -@param[in] type redo log type -@param[in] name file name (not NUL-terminated) -@param[in] len length of name, in bytes -@param[in] new_name new file name (NULL if not rename) -@param[in] new_len length of new_name, in bytes (0 if NULL) */ -void (*log_file_op)(uint32_t space_id, int type, - const byte* name, ulint len, - const byte* new_name, ulint new_len); - -void (*undo_space_trunc)(uint32_t space_id); - -void (*first_page_init)(uint32_t space_id); - -/** Information about initializing page contents during redo log processing. -FIXME: Rely on recv_sys.pages! */ -class mlog_init_t -{ -public: - /** A page initialization operation that was parsed from - the redo log */ - struct init { - /** log sequence number of the page initialization */ - lsn_t lsn; - /** Whether btr_page_create() avoided a read of the page. - - At the end of the last recovery batch, mark_ibuf_exist() - will mark pages for which this flag is set. */ - bool created; - }; - -private: - typedef std::map, - ut_allocator > > - map; - /** Map of page initialization operations. - FIXME: Merge this to recv_sys.pages! */ - map inits; -public: - /** Record that a page will be initialized by the redo log. - @param[in] page_id page identifier - @param[in] lsn log sequence number - @return whether the state was changed */ - bool add(const page_id_t page_id, lsn_t lsn) - { - mysql_mutex_assert_owner(&recv_sys.mutex); - const init init = { lsn, false }; - std::pair p = inits.insert( - map::value_type(page_id, init)); - ut_ad(!p.first->second.created); - if (p.second) return true; - if (p.first->second.lsn >= init.lsn) return false; - p.first->second = init; - return true; - } - - /** Get the last stored lsn of the page id and its respective - init/load operation. - @param[in] page_id page id - @param[in,out] init initialize log or load log - @return the latest page initialization; - not valid after releasing recv_sys.mutex. */ - init& last(page_id_t page_id) - { - mysql_mutex_assert_owner(&recv_sys.mutex); - return inits.find(page_id)->second; - } - - /** Determine if a page will be initialized or freed after a time. - @param page_id page identifier - @param lsn log sequence number - @return whether page_id will be freed or initialized after lsn */ - bool will_avoid_read(page_id_t page_id, lsn_t lsn) const - { - mysql_mutex_assert_owner(&recv_sys.mutex); - auto i= inits.find(page_id); - return i != inits.end() && i->second.lsn > lsn; - } - - /** At the end of each recovery batch, reset the 'created' flags. */ - void reset() - { - mysql_mutex_assert_owner(&recv_sys.mutex); - ut_ad(recv_no_ibuf_operations); - for (map::value_type& i : inits) { - i.second.created = false; - } - } - - /** On the last recovery batch, mark whether there exist - buffered changes for the pages that were initialized - by buf_page_create() and still reside in the buffer pool. - @param[in,out] mtr dummy mini-transaction */ - void mark_ibuf_exist(mtr_t& mtr) - { - mysql_mutex_assert_owner(&recv_sys.mutex); - mtr.start(); - - for (const map::value_type& i : inits) { - if (!i.second.created) { - continue; - } - if (buf_block_t* block = buf_page_get_low( - i.first, 0, RW_X_LATCH, nullptr, - BUF_GET_IF_IN_POOL, - &mtr, nullptr, false)) { - if (UNIV_LIKELY_NULL(block->page.zip.data)) { - switch (fil_page_get_type( - block->page.zip.data)) { - case FIL_PAGE_INDEX: - case FIL_PAGE_RTREE: - if (page_zip_decompress( - &block->page.zip, - block->page.frame, - true)) { - break; - } - ib::error() << "corrupted " - << block->page.id(); - } - } - if (recv_no_ibuf_operations) { - mtr.commit(); - mtr.start(); - continue; - } - mysql_mutex_unlock(&recv_sys.mutex); - if (ibuf_page_exists(block->page.id(), - block->zip_size())) { - block->page.set_ibuf_exist(); - } - mtr.commit(); - mtr.start(); - mysql_mutex_lock(&recv_sys.mutex); - } - } - - mtr.commit(); - clear(); - } - - /** Clear the data structure */ - void clear() { inits.clear(); } -}; - -static mlog_init_t mlog_init; - /** Process a record that indicates that a tablespace is being shrunk in size. @param page_id first page identifier that is not in the file @param lsn log sequence number of the shrink operation */ inline void recv_sys_t::trim(const page_id_t page_id, lsn_t lsn) { - DBUG_ENTER("recv_sys_t::trim"); - DBUG_LOG("ib_log", - "discarding log beyond end of tablespace " - << page_id << " before LSN " << lsn); - mysql_mutex_assert_owner(&mutex); - for (recv_sys_t::map::iterator p = pages.lower_bound(page_id); - p != pages.end() && p->first.space() == page_id.space();) { - recv_sys_t::map::iterator r = p++; - if (r->second.trim(lsn)) { - pages.erase(r); - } - } - DBUG_VOID_RETURN; + DBUG_ENTER("recv_sys_t::trim"); + DBUG_LOG("ib_log", "discarding log beyond end of tablespace " + << page_id << " before LSN " << lsn); + mysql_mutex_assert_owner(&mutex); + if (pages_it != pages.end() && pages_it->first.space() == page_id.space()) + pages_it= pages.end(); + for (recv_sys_t::map::iterator p = pages.lower_bound(page_id); + p != pages.end() && p->first.space() == page_id.space();) + { + recv_sys_t::map::iterator r = p++; + if (r->second.trim(lsn)) + { + ut_ad(!r->second.being_processed); + pages.erase(r); + } + } + DBUG_VOID_RETURN; } inline void recv_sys_t::read(os_offset_t total_offset, span buf) @@ -1202,15 +1221,10 @@ inline size_t recv_sys_t::files_size() @param[in] space_id the tablespace ID @param[in] ftype FILE_MODIFY, FILE_DELETE, or FILE_RENAME @param[in] lsn lsn of the redo log -@param[in] store whether the redo log has to be stored */ +@param[in] if_exists whether to check if the tablespace exists */ static void fil_name_process(const char *name, ulint len, uint32_t space_id, - mfile_type_t ftype, lsn_t lsn, store_t store) + mfile_type_t ftype, lsn_t lsn, bool if_exists) { - if (srv_operation == SRV_OPERATION_BACKUP - || srv_operation == SRV_OPERATION_BACKUP_NO_DEFER) { - return; - } - ut_ad(srv_operation <= SRV_OPERATION_EXPORT_RESTORED || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); @@ -1321,7 +1335,7 @@ same_space: case FIL_LOAD_DEFER: /** Skip the deferred spaces when lsn is already processed */ - if (store != store_t::STORE_IF_EXISTS) { + if (!if_exists) { deferred_spaces.add( space_id, fname.name.c_str(), lsn); } @@ -1364,9 +1378,8 @@ void recv_sys_t::close() deferred_spaces.clear(); ut_d(mysql_mutex_unlock(&mutex)); - last_stored_lsn= 0; + scanned_lsn= 0; mysql_mutex_destroy(&mutex); - pthread_cond_destroy(&cond); } recv_spaces.clear(); @@ -1381,34 +1394,34 @@ void recv_sys_t::create() ut_ad(this == &recv_sys); ut_ad(!is_initialised()); mysql_mutex_init(recv_sys_mutex_key, &mutex, nullptr); - pthread_cond_init(&cond, nullptr); apply_log_recs = false; - apply_batch_on = false; len = 0; offset = 0; lsn = 0; + scanned_lsn = 1; found_corrupt_log = false; found_corrupt_fs = false; file_checkpoint = 0; progress_time = time(NULL); + ut_ad(pages.empty()); + pages_it = pages.end(); recv_max_page_lsn = 0; memset(truncated_undo_spaces, 0, sizeof truncated_undo_spaces); - last_stored_lsn = 1; UT_LIST_INIT(blocks, &buf_block_t::unzip_LRU); } /** Clear a fully processed set of stored redo log records. */ -inline void recv_sys_t::clear() +void recv_sys_t::clear() { mysql_mutex_assert_owner(&mutex); apply_log_recs= false; - apply_batch_on= false; ut_ad(!after_apply || found_corrupt_fs || !UT_LIST_GET_LAST(blocks)); pages.clear(); + pages_it= pages.end(); for (buf_block_t *block= UT_LIST_GET_LAST(blocks); block; ) { @@ -1419,8 +1432,6 @@ inline void recv_sys_t::clear() buf_block_free(block); block= prev_block; } - - pthread_cond_broadcast(&cond); } /** Free most recovery data structures. */ @@ -1432,52 +1443,14 @@ void recv_sys_t::debug_free() recovery_on= false; pages.clear(); + pages_it= pages.end(); mysql_mutex_unlock(&mutex); } -inline void *recv_sys_t::alloc(size_t len) -{ - mysql_mutex_assert_owner(&mutex); - ut_ad(len); - ut_ad(len <= srv_page_size); - - buf_block_t *block= UT_LIST_GET_FIRST(blocks); - if (UNIV_UNLIKELY(!block)) - { -create_block: - block= buf_block_alloc(); - block->page.access_time= 1U << 16 | - ut_calc_align(static_cast(len), ALIGNMENT); - static_assert(ut_is_2pow(ALIGNMENT), "ALIGNMENT must be a power of 2"); - UT_LIST_ADD_FIRST(blocks, block); - MEM_MAKE_ADDRESSABLE(block->page.frame, len); - MEM_NOACCESS(block->page.frame + len, srv_page_size - len); - return my_assume_aligned(block->page.frame); - } - - size_t free_offset= static_cast(block->page.access_time); - ut_ad(!ut_2pow_remainder(free_offset, ALIGNMENT)); - if (UNIV_UNLIKELY(!free_offset)) - { - ut_ad(srv_page_size == 65536); - goto create_block; - } - ut_ad(free_offset <= srv_page_size); - free_offset+= len; - - if (free_offset > srv_page_size) - goto create_block; - - block->page.access_time= ((block->page.access_time >> 16) + 1) << 16 | - ut_calc_align(static_cast(free_offset), ALIGNMENT); - MEM_MAKE_ADDRESSABLE(block->page.frame + free_offset - len, len); - return my_assume_aligned(block->page.frame + free_offset - len); -} - /** Free a redo log snippet. -@param data buffer returned by alloc() */ +@param data buffer allocated in add() */ inline void recv_sys_t::free(const void *data) { ut_ad(!ut_align_offset(data, ALIGNMENT)); @@ -1502,8 +1475,11 @@ inline void recv_sys_t::free(const void *data) ut_ad(block->page.state() == buf_page_t::MEMORY); ut_ad(static_cast(block->page.access_time - 1) < srv_page_size); - ut_ad(block->page.access_time >= 1U << 16); - if (!((block->page.access_time -= 1U << 16) >> 16)) + unsigned a= block->page.access_time; + ut_ad(a >= 1U << 16); + a-= 1U << 16; + block->page.access_time= a; + if (!(a >> 16)) { UT_LIST_REMOVE(blocks, block); MEM_MAKE_ADDRESSABLE(block->page.frame, srv_page_size); @@ -1689,6 +1665,9 @@ dberr_t recv_sys_t::find_checkpoint() bool wrong_size= false; byte *buf; + ut_ad(pages.empty()); + pages_it= pages.end(); + if (files.empty()) { file_checkpoint= 0; @@ -1965,7 +1944,31 @@ inline bool page_recv_t::trim(lsn_t start_lsn) } -inline void page_recv_t::recs_t::clear() +void page_recv_t::recs_t::rewind(lsn_t start_lsn) +{ + mysql_mutex_assert_owner(&recv_sys.mutex); + log_phys_t *trim= static_cast(head); + ut_ad(trim); + while (log_phys_t *next= static_cast(trim->next)) + { + ut_ad(trim->start_lsn < start_lsn); + if (next->start_lsn == start_lsn) + break; + trim= next; + } + tail= trim; + log_rec_t *l= tail->next; + tail->next= nullptr; + while (l) + { + log_rec_t *next= l->next; + recv_sys.free(l); + l= next; + } +} + + +void page_recv_t::recs_t::clear() { mysql_mutex_assert_owner(&recv_sys.mutex); for (const log_rec_t *l= head; l; ) @@ -1977,33 +1980,99 @@ inline void page_recv_t::recs_t::clear() head= tail= nullptr; } - /** Ignore any earlier redo log records for this page. */ inline void page_recv_t::will_not_read() { - ut_ad(state == RECV_NOT_PROCESSED || state == RECV_WILL_NOT_READ); - state= RECV_WILL_NOT_READ; + ut_ad(!being_processed); + skip_read= true; log.clear(); } +void recv_sys_t::erase(map::iterator p) +{ + ut_ad(p->second.being_processed <= 0); + p->second.log.clear(); + pages.erase(p); +} + +/** Free log for processed pages. */ +void recv_sys_t::garbage_collect() +{ + mysql_mutex_assert_owner(&mutex); + + if (pages_it != pages.end() && pages_it->second.being_processed < 0) + pages_it= pages.end(); + + for (map::iterator p= pages.begin(); p != pages.end(); ) + { + if (p->second.being_processed < 0) + { + map::iterator r= p++; + erase(r); + } + else + p++; + } +} + +/** Allocate a block from the buffer pool for recv_sys.pages */ +ATTRIBUTE_COLD buf_block_t *recv_sys_t::add_block() +{ + for (bool freed= false;;) + { + const auto rs= UT_LIST_GET_LEN(blocks) * 2; + mysql_mutex_lock(&buf_pool.mutex); + const auto bs= + UT_LIST_GET_LEN(buf_pool.free) + UT_LIST_GET_LEN(buf_pool.LRU); + if (UNIV_LIKELY(bs > BUF_LRU_MIN_LEN || rs < bs)) + { + buf_block_t *block= buf_LRU_get_free_block(true); + mysql_mutex_unlock(&buf_pool.mutex); + return block; + } + /* out of memory: redo log occupies more than 1/3 of buf_pool + and there are fewer than BUF_LRU_MIN_LEN pages left */ + mysql_mutex_unlock(&buf_pool.mutex); + if (freed) + return nullptr; + freed= true; + garbage_collect(); + } +} + +/** Wait for buffer pool to become available. */ +ATTRIBUTE_COLD void recv_sys_t::wait_for_pool(size_t pages) +{ + mysql_mutex_unlock(&mutex); + os_aio_wait_until_no_pending_reads(false); + mysql_mutex_lock(&mutex); + garbage_collect(); + mysql_mutex_lock(&buf_pool.mutex); + bool need_more= UT_LIST_GET_LEN(buf_pool.free) < pages; + mysql_mutex_unlock(&buf_pool.mutex); + if (need_more) + buf_flush_sync_batch(lsn); +} /** Register a redo log snippet for a page. @param it page iterator @param start_lsn start LSN of the mini-transaction @param lsn @see mtr_t::commit_lsn() @param l redo log snippet -@param len length of l, in bytes */ -inline void recv_sys_t::add(map::iterator it, lsn_t start_lsn, lsn_t lsn, - const byte *l, size_t len) +@param len length of l, in bytes +@return whether we ran out of memory */ +ATTRIBUTE_NOINLINE +bool recv_sys_t::add(map::iterator it, lsn_t start_lsn, lsn_t lsn, + const byte *l, size_t len) { mysql_mutex_assert_owner(&mutex); - page_id_t page_id = it->first; page_recv_t &recs= it->second; + buf_block_t *block; switch (*l & 0x70) { case FREE_PAGE: case INIT_PAGE: recs.will_not_read(); - mlog_init.add(page_id, start_lsn); /* FIXME: remove this! */ + mlog_init.add(it->first, start_lsn); /* FIXME: remove this! */ /* fall through */ default: log_phys_t *tail= static_cast(recs.log.last()); @@ -2012,7 +2081,7 @@ inline void recv_sys_t::add(map::iterator it, lsn_t start_lsn, lsn_t lsn, if (tail->start_lsn != start_lsn) break; ut_ad(tail->lsn == lsn); - buf_block_t *block= UT_LIST_GET_LAST(blocks); + block= UT_LIST_GET_LAST(blocks); ut_ad(block); const size_t used= static_cast(block->page.access_time - 1) + 1; ut_ad(used >= ALIGNMENT); @@ -2025,7 +2094,7 @@ append: MEM_MAKE_ADDRESSABLE(end + 1, len); /* Append to the preceding record for the page */ tail->append(l, len); - return; + return false; } if (end <= &block->page.frame[used - ALIGNMENT] || &block->page.frame[used] >= end) @@ -2039,8 +2108,49 @@ append: ut_calc_align(static_cast(new_used), ALIGNMENT); goto append; } - recs.log.append(new (alloc(log_phys_t::alloc_size(len))) + + const size_t size{log_phys_t::alloc_size(len)}; + ut_ad(size <= srv_page_size); + void *buf; + block= UT_LIST_GET_FIRST(blocks); + if (UNIV_UNLIKELY(!block)) + { + create_block: + block= add_block(); + if (UNIV_UNLIKELY(!block)) + return true; + block->page.access_time= 1U << 16 | + ut_calc_align(static_cast(size), ALIGNMENT); + static_assert(ut_is_2pow(ALIGNMENT), "ALIGNMENT must be a power of 2"); + UT_LIST_ADD_FIRST(blocks, block); + MEM_MAKE_ADDRESSABLE(block->page.frame, size); + MEM_NOACCESS(block->page.frame + size, srv_page_size - size); + buf= block->page.frame; + } + else + { + size_t free_offset= static_cast(block->page.access_time); + ut_ad(!ut_2pow_remainder(free_offset, ALIGNMENT)); + if (UNIV_UNLIKELY(!free_offset)) + { + ut_ad(srv_page_size == 65536); + goto create_block; + } + ut_ad(free_offset <= srv_page_size); + free_offset+= size; + + if (free_offset > srv_page_size) + goto create_block; + + block->page.access_time= ((block->page.access_time >> 16) + 1) << 16 | + ut_calc_align(static_cast(free_offset), ALIGNMENT); + MEM_MAKE_ADDRESSABLE(block->page.frame + free_offset - size, size); + buf= block->page.frame + free_offset - size; + } + + recs.log.append(new (my_assume_aligned(buf)) log_phys_t{start_lsn, lsn, l, len}); + return false; } /** Store/remove the freed pages in fil_name_t of recv_spaces. @@ -2304,13 +2414,84 @@ struct recv_ring : public recv_buf }; #endif -/** Parse and register one log_t::FORMAT_10_8 mini-transaction. -@param store whether to store the records -@param l log data source */ template -inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) +void recv_sys_t::rewind(source &l, source &begin) noexcept +{ + ut_ad(srv_operation != SRV_OPERATION_BACKUP); + mysql_mutex_assert_owner(&mutex); + + const source end= l; + uint32_t rlen; + for (l= begin; !(l == end); l+= rlen) + { + const source recs{l}; + ++l; + const byte b= *recs; + + ut_ad(b > 1); + ut_ad(UNIV_LIKELY((b & 0x70) != RESERVED) || srv_force_recovery); + + rlen= b & 0xf; + if (!rlen) + { + const uint32_t lenlen= mlog_decode_varint_length(*l); + const uint32_t addlen= mlog_decode_varint(l); + ut_ad(addlen != MLOG_DECODE_ERROR); + rlen= addlen + 15 - lenlen; + l+= lenlen; + } + ut_ad(!l.is_eof(rlen)); + if (b & 0x80) + continue; + + uint32_t idlen= mlog_decode_varint_length(*l); + if (UNIV_UNLIKELY(idlen > 5 || idlen >= rlen)) + continue; + const uint32_t space_id= mlog_decode_varint(l); + if (UNIV_UNLIKELY(space_id == MLOG_DECODE_ERROR)) + continue; + l+= idlen; + rlen-= idlen; + idlen= mlog_decode_varint_length(*l); + if (UNIV_UNLIKELY(idlen > 5 || idlen > rlen)) + continue; + const uint32_t page_no= mlog_decode_varint(l); + if (UNIV_UNLIKELY(page_no == MLOG_DECODE_ERROR)) + continue; + const page_id_t id{space_id, page_no}; + if (pages_it == pages.end() || pages_it->first != id) + { + pages_it= pages.find(id); + if (pages_it == pages.end()) + continue; + } + + ut_ad(!pages_it->second.being_processed); + const log_phys_t *head= + static_cast(*pages_it->second.log.begin()); + if (!head || head->start_lsn == lsn) + { + erase(pages_it); + pages_it= pages.end(); + } + else + pages_it->second.log.rewind(lsn); + } + + l= begin; + pages_it= pages.end(); +} + +/** Parse and register one log_t::FORMAT_10_8 mini-transaction. +@tparam store whether to store the records +@param l log data source +@param if_exists if store: whether to check if the tablespace exists */ +template +inline +recv_sys_t::parse_mtr_result recv_sys_t::parse(source &l, bool if_exists) noexcept { + restart: #ifndef SUX_LOCK_GENERIC ut_ad(log_sys.latch.is_write_locked() || srv_operation == SRV_OPERATION_BACKUP || @@ -2319,12 +2500,15 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) mysql_mutex_assert_owner(&mutex); ut_ad(log_sys.next_checkpoint_lsn); ut_ad(log_sys.is_latest()); + ut_ad(store || !if_exists); + ut_ad(store || + srv_operation != SRV_OPERATION_BACKUP || + srv_operation != SRV_OPERATION_BACKUP_NO_DEFER); alignas(8) byte iv[MY_AES_BLOCK_SIZE]; byte *decrypt_buf= static_cast(alloca(srv_page_size)); const lsn_t start_lsn{lsn}; - map::iterator cached_pages_it{pages.end()}; /* Check that the entire mini-transaction is included within the buffer */ if (l.is_eof(0)) @@ -2333,7 +2517,7 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) if (*l <= 1) return GOT_EOF; /* We should never write an empty mini-transaction. */ - const source begin{l}; + source begin{l}; uint32_t rlen; for (uint32_t total_len= 0; !l.is_eof(); l+= rlen, total_len+= rlen) { @@ -2433,7 +2617,6 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) sql_print_error("InnoDB: Unknown log record at LSN " LSN_PF, lsn); corrupted: found_corrupt_log= true; - pthread_cond_broadcast(&cond); return GOT_EOF; } @@ -2510,13 +2693,13 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) mach_write_to_4(iv + 12, page_no); got_page_op= !(b & 0x80); if (!got_page_op); - else if (srv_operation == SRV_OPERATION_BACKUP) + else if (!store && srv_operation == SRV_OPERATION_BACKUP) { if (page_no == 0 && first_page_init && (b & 0x10)) first_page_init(space_id); continue; } - else if (file_checkpoint && !is_predefined_tablespace(space_id)) + else if (store && file_checkpoint && !is_predefined_tablespace(space_id)) { recv_spaces_t::iterator i= recv_spaces.lower_bound(space_id); if (i != recv_spaces.end() && i->first == space_id); @@ -2585,7 +2768,7 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) trim({space_id, 0}, lsn); truncated_undo_spaces[space_id - srv_undo_space_id_start]= { lsn, page_no }; - if (undo_space_trunc) + if (!store && undo_space_trunc) undo_space_trunc(space_id); #endif last_offset= 1; /* the next record must not be same_page */ @@ -2626,7 +2809,7 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) { if (UNIV_UNLIKELY(rlen + last_offset > srv_page_size)) goto record_corrupted; - if (UNIV_UNLIKELY(!page_no) && file_checkpoint) + if (store && UNIV_UNLIKELY(!page_no) && file_checkpoint) { const bool has_size= last_offset <= FSP_HEADER_OFFSET + FSP_SIZE && last_offset + rlen >= FSP_HEADER_OFFSET + FSP_SIZE + 4; @@ -2705,38 +2888,57 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) ut_ad(modified.emplace(id).second || (b & 0x70) != INIT_PAGE); } #endif - const bool is_init= (b & 0x70) <= INIT_PAGE; - switch (store) { - case STORE_IF_EXISTS: - if (fil_space_t *space= fil_space_t::get(space_id)) + if (store) + { + if (if_exists) { - const auto size= space->get_size(); - space->release(); - if (!size) + if (fil_space_t *space= fil_space_t::get(space_id)) + { + const auto size= space->get_size(); + space->release(); + if (!size) + continue; + } + else if (!deferred_spaces.find(space_id)) continue; } - else if (!deferred_spaces.find(space_id)) - continue; - /* fall through */ - case STORE_YES: if (!mlog_init.will_avoid_read(id, start_lsn)) { - if (cached_pages_it == pages.end() || - cached_pages_it->first != id) - cached_pages_it= pages.emplace(id, page_recv_t{}).first; - add(cached_pages_it, start_lsn, lsn, - l.get_buf(cl, recs, decrypt_buf), l - recs + rlen); + if (pages_it == pages.end() || pages_it->first != id) + pages_it= pages.emplace(id, page_recv_t{}).first; + if (UNIV_UNLIKELY(add(pages_it, start_lsn, lsn, + l.get_buf(cl, recs, decrypt_buf), + l - recs + rlen))) + { + lsn= start_lsn; + log_sys.set_recovered_lsn(start_lsn); + l+= rlen; + offset= begin.ptr - log_sys.buf; + rewind(l, begin); + if (if_exists) + { + apply(false); + if (is_corrupt_fs()) + return GOT_EOF; + goto restart; + } + sql_print_information("InnoDB: Multi-batch recovery needed at LSN " + LSN_PF, lsn); + return GOT_OOM; + } } - continue; - case STORE_NO: - if (!is_init) - continue; + } + else if ((b & 0x70) <= INIT_PAGE) + { mlog_init.add(id, start_lsn); - map::iterator i= pages.find(id); - if (i == pages.end()) - continue; - i->second.log.clear(); - pages.erase(i); + if (pages_it == pages.end() || pages_it->first != id) + { + pages_it= pages.find(id); + if (pages_it == pages.end()) + continue; + } + map::iterator r= pages_it++; + erase(r); } } else if (rlen) @@ -2749,6 +2951,11 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) if (rlen < UNIV_PAGE_SIZE_MAX && !l.is_zero(rlen)) continue; } + else if (store) + { + ut_ad(file_checkpoint); + continue; + } else if (const lsn_t c= l.read8()) { if (UNIV_UNLIKELY(srv_print_verbose_log == 2)) @@ -2830,21 +3037,27 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) if (UNIV_UNLIKELY(!recv_needed_recovery && srv_read_only_mode)) continue; + if (!store && + (srv_operation == SRV_OPERATION_BACKUP || + srv_operation == SRV_OPERATION_BACKUP_NO_DEFER)) + { + if ((b & 0xf0) < FILE_CHECKPOINT && log_file_op) + log_file_op(space_id, b & 0xf0, + reinterpret_cast(fn), + static_cast(fnend - fn), + reinterpret_cast(fn2), + fn2 ? static_cast(fn2end - fn2) : 0); + continue; + } + fil_name_process(fn, fnend - fn, space_id, (b & 0xf0) == FILE_DELETE ? FILE_DELETE : FILE_MODIFY, - start_lsn, store); - - if ((b & 0xf0) < FILE_CHECKPOINT && log_file_op) - log_file_op(space_id, b & 0xf0, - reinterpret_cast(fn), - static_cast(fnend - fn), - reinterpret_cast(fn2), - fn2 ? static_cast(fn2end - fn2) : 0); + start_lsn, if_exists); if (fn2) { fil_name_process(fn2, fn2end - fn2, space_id, - FILE_RENAME, start_lsn, store); + FILE_RENAME, start_lsn, if_exists); if (file_checkpoint) { const size_t len= fn2end - fn2; @@ -2868,18 +3081,23 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) return OK; } -ATTRIBUTE_NOINLINE -recv_sys_t::parse_mtr_result recv_sys_t::parse_mtr(store_t store) noexcept +template +recv_sys_t::parse_mtr_result recv_sys_t::parse_mtr(bool if_exists) noexcept { recv_buf s{&log_sys.buf[recv_sys.offset]}; - return recv_sys.parse(store, s); + return recv_sys.parse(s, if_exists); } +/** for mariadb-backup; @see xtrabackup_copy_logfile() */ +template +recv_sys_t::parse_mtr_result recv_sys_t::parse_mtr(bool) noexcept; + #ifdef HAVE_PMEM -recv_sys_t::parse_mtr_result recv_sys_t::parse_pmem(store_t store) noexcept +template +recv_sys_t::parse_mtr_result recv_sys_t::parse_pmem(bool if_exists) noexcept { - recv_sys_t::parse_mtr_result r{parse_mtr(store)}; - if (r != PREMATURE_EOF || !log_sys.is_pmem()) + recv_sys_t::parse_mtr_result r{parse_mtr(if_exists)}; + if (UNIV_LIKELY(r != PREMATURE_EOF) || !log_sys.is_pmem()) return r; ut_ad(recv_sys.len == log_sys.file_size); ut_ad(recv_sys.offset >= log_sys.START_OFFSET); @@ -2888,7 +3106,7 @@ recv_sys_t::parse_mtr_result recv_sys_t::parse_pmem(store_t store) noexcept {recv_sys.offset == recv_sys.len ? &log_sys.buf[log_sys.START_OFFSET] : &log_sys.buf[recv_sys.offset]}; - return recv_sys.parse(store, s); + return recv_sys.parse(s, if_exists); } #endif @@ -2896,23 +3114,22 @@ recv_sys_t::parse_mtr_result recv_sys_t::parse_pmem(store_t store) noexcept lsn of a log record. @param[in,out] block buffer pool page @param[in,out] mtr mini-transaction -@param[in,out] p recovery address +@param[in,out] recs log records to apply @param[in,out] space tablespace, or NULL if not looked up yet @param[in,out] init page initialization operation, or NULL @return the recovered page @retval nullptr on failure */ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, - const recv_sys_t::map::iterator &p, - fil_space_t *space= nullptr, - mlog_init_t::init *init= nullptr) + page_recv_t &recs, + fil_space_t *space, + recv_init *init) { - mysql_mutex_assert_owner(&recv_sys.mutex); + mysql_mutex_assert_not_owner(&recv_sys.mutex); ut_ad(recv_sys.apply_log_recs); ut_ad(recv_needed_recovery); ut_ad(!init || init->created); ut_ad(!init || init->lsn); - ut_ad(block->page.id() == p->first); - ut_ad(!p->second.is_being_processed()); + ut_ad(recs.being_processed == 1); ut_ad(!space || space->id == block->page.id().space()); ut_ad(log_sys.is_latest()); @@ -2924,10 +3141,6 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, block->page.id().space(), block->page.id().page_no())); - p->second.state = page_recv_t::RECV_BEING_PROCESSED; - - mysql_mutex_unlock(&recv_sys.mutex); - byte *frame = UNIV_LIKELY_NULL(block->page.zip.data) ? block->page.zip.data : block->page.frame; @@ -2941,7 +3154,7 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, bool skipped_after_init = false; - for (const log_rec_t* recv : p->second.log) { + for (const log_rec_t* recv : recs.log) { const log_phys_t* l = static_cast(recv); ut_ad(l->lsn); ut_ad(end_lsn <= l->lsn); @@ -2999,8 +3212,7 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, block->page.id().space(), block->page.id().page_no())); - log_phys_t::apply_status a= l->apply(*block, - p->second.last_offset); + log_phys_t::apply_status a= l->apply(*block, recs.last_offset); switch (a) { case log_phys_t::APPLIED_NO: @@ -3123,26 +3335,11 @@ set_start_lsn: mtr.commit(); done: - time_t now = time(NULL); - - mysql_mutex_lock(&recv_sys.mutex); - + /* FIXME: do this in page read, protected with recv_sys.mutex! */ if (recv_max_page_lsn < page_lsn) { recv_max_page_lsn = page_lsn; } - ut_ad(!block || p->second.is_being_processed()); - ut_ad(!block || !recv_sys.pages.empty()); - - if (recv_sys.report(now)) { - const size_t n = recv_sys.pages.size(); - sql_print_information("InnoDB: To recover: %zu pages from log", - n); - service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, - "To recover: %zu pages" - " from log", n); - } - return block; } @@ -3156,146 +3353,347 @@ ATTRIBUTE_COLD void recv_sys_t::free_corrupted_page(page_id_t page_id) mysql_mutex_lock(&mutex); map::iterator p= pages.find(page_id); - if (p != pages.end()) + if (p == pages.end()) { - p->second.log.clear(); - pages.erase(p); - if (!srv_force_recovery) - { - set_corrupt_fs(); - ib::error() << "Unable to apply log to corrupted page " << page_id - << "; set innodb_force_recovery to ignore"; - } - else - ib::warn() << "Discarding log for corrupted page " << page_id; + mysql_mutex_unlock(&mutex); + return; } - if (pages.empty()) - pthread_cond_broadcast(&cond); + p->second.being_processed= -1; + if (!srv_force_recovery) + set_corrupt_fs(); mysql_mutex_unlock(&mutex); -} -/** Possibly finish a recovery batch. */ -inline void recv_sys_t::maybe_finish_batch() -{ - mysql_mutex_assert_owner(&mutex); - ut_ad(recovery_on); - if (!apply_batch_on || pages.empty() || is_corrupt_log() || is_corrupt_fs()) - pthread_cond_broadcast(&cond); + ib::error_or_warn(!srv_force_recovery) + << "Unable to apply log to corrupted page " << page_id; } ATTRIBUTE_COLD void recv_sys_t::set_corrupt_log() { mysql_mutex_lock(&mutex); found_corrupt_log= true; - pthread_cond_broadcast(&cond); mysql_mutex_unlock(&mutex); } ATTRIBUTE_COLD void recv_sys_t::set_corrupt_fs() { mysql_mutex_assert_owner(&mutex); + if (!srv_force_recovery) + sql_print_information("InnoDB: Set innodb_force_recovery=1" + " to ignore corrupted pages."); found_corrupt_fs= true; - pthread_cond_broadcast(&cond); } -/** Apply any buffered redo log to a page that was just read from a data file. -@param[in,out] space tablespace -@param[in,out] bpage buffer pool page +/** Apply any buffered redo log to a page. +@param space tablespace +@param bpage buffer pool page @return whether the page was recovered correctly */ bool recv_recover_page(fil_space_t* space, buf_page_t* bpage) { - mtr_t mtr; - mtr.start(); - mtr.set_log_mode(MTR_LOG_NO_REDO); + mtr_t mtr; + mtr.start(); + mtr.set_log_mode(MTR_LOG_NO_REDO); - ut_ad(bpage->frame); - /* Move the ownership of the x-latch on the page to - this OS thread, so that we can acquire a second - x-latch on it. This is needed for the operations to - the page to pass the debug checks. */ - bpage->lock.claim_ownership(); - bpage->lock.x_lock_recursive(); - bpage->fix_on_recovery(); - mtr.memo_push(reinterpret_cast(bpage), - MTR_MEMO_PAGE_X_FIX); + ut_ad(bpage->frame); + /* Move the ownership of the x-latch on the page to this OS thread, + so that we can acquire a second x-latch on it. This is needed for + the operations to the page to pass the debug checks. */ + bpage->lock.claim_ownership(); + bpage->lock.x_lock_recursive(); + bpage->fix_on_recovery(); + mtr.memo_push(reinterpret_cast(bpage), MTR_MEMO_PAGE_X_FIX); - buf_block_t* success = reinterpret_cast(bpage); + buf_block_t *success= reinterpret_cast(bpage); - mysql_mutex_lock(&recv_sys.mutex); - if (recv_sys.apply_log_recs) { - recv_sys_t::map::iterator p = recv_sys.pages.find(bpage->id()); - if (p != recv_sys.pages.end() - && !p->second.is_being_processed()) { - success = recv_recover_page(success, mtr, p, space); - if (UNIV_LIKELY(!!success)) { - p->second.log.clear(); - recv_sys.pages.erase(p); - } - recv_sys.maybe_finish_batch(); - goto func_exit; - } - } - - mtr.commit(); -func_exit: - mysql_mutex_unlock(&recv_sys.mutex); - ut_ad(mtr.has_committed()); - return success; -} - -/** Read pages for which log needs to be applied. -@param page_id first page identifier to read -@param i iterator to recv_sys.pages */ -TRANSACTIONAL_TARGET -static void recv_read_in_area(page_id_t page_id, recv_sys_t::map::iterator i) -{ - uint32_t page_nos[32]; - ut_ad(page_id == i->first); - page_id.set_page_no(ut_2pow_round(page_id.page_no(), 32U)); - const page_id_t up_limit{page_id + 31}; - uint32_t* p= page_nos; - - for (; i != recv_sys.pages.end() && i->first <= up_limit; i++) + mysql_mutex_lock(&recv_sys.mutex); + if (recv_sys.apply_log_recs) { - if (i->second.state == page_recv_t::RECV_NOT_PROCESSED) + const page_id_t id{bpage->id()}; + recv_sys_t::map::iterator p= recv_sys.pages.find(id); + if (p == recv_sys.pages.end()); + else if (p->second.being_processed < 0) { - i->second.state= page_recv_t::RECV_BEING_READ; - *p++= i->first.page_no(); + recv_sys.pages_it_invalidate(p); + recv_sys.erase(p); + } + else + { + p->second.being_processed= 1; + recv_sys_t::init *init= nullptr; + if (p->second.skip_read) + (init= &mlog_init.last(id))->created= true; + mysql_mutex_unlock(&recv_sys.mutex); + success= recv_recover_page(success, mtr, p->second, space, init); + p->second.being_processed= -1; + goto func_exit; } } - if (p != page_nos) + mysql_mutex_unlock(&recv_sys.mutex); + mtr.commit(); +func_exit: + ut_ad(mtr.has_committed()); + return success; +} + +void IORequest::fake_read_complete(os_offset_t offset) const +{ + ut_ad(node); + ut_ad(is_read()); + ut_ad(bpage); + ut_ad(bpage->frame); + ut_ad(recv_recovery_is_on()); + ut_ad(offset); + + mtr_t mtr; + mtr.start(); + mtr.set_log_mode(MTR_LOG_NO_REDO); + + ut_ad(bpage->frame); + /* Move the ownership of the x-latch on the page to this OS thread, + so that we can acquire a second x-latch on it. This is needed for + the operations to the page to pass the debug checks. */ + bpage->lock.claim_ownership(); + bpage->lock.x_lock_recursive(); + bpage->fix_on_recovery(); + mtr.memo_push(reinterpret_cast(bpage), MTR_MEMO_PAGE_X_FIX); + + page_recv_t &recs= *reinterpret_cast(slot); + ut_ad(recs.being_processed == 1); + recv_init &init= *reinterpret_cast(offset); + ut_ad(init.lsn > 1); + init.created= true; + + if (recv_recover_page(reinterpret_cast(bpage), + mtr, recs, node->space, &init)) { - mysql_mutex_unlock(&recv_sys.mutex); - buf_read_recv_pages(page_id.space(), {page_nos, p}); - mysql_mutex_lock(&recv_sys.mutex); + ut_ad(bpage->oldest_modification() || bpage->is_freed()); + bpage->lock.x_unlock(true); + } + recs.being_processed= -1; + ut_ad(mtr.has_committed()); + + node->space->release(); +} + +/** @return whether a page has been freed */ +inline bool fil_space_t::is_freed(uint32_t page) +{ + std::lock_guard freed_lock(freed_range_mutex); + return freed_ranges.contains(page); +} + +bool recv_sys_t::report(time_t time) +{ + if (time - progress_time < 15) + return false; + progress_time= time; + return true; +} + +ATTRIBUTE_COLD +void recv_sys_t::report_progress() const +{ + mysql_mutex_assert_owner(&mutex); + const size_t n{pages.size()}; + if (recv_sys.scanned_lsn == recv_sys.lsn) + { + sql_print_information("InnoDB: To recover: %zu pages", n); + service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, + "To recover: %zu pages", n); + } + else + { + sql_print_information("InnoDB: To recover: LSN " LSN_PF + "/" LSN_PF "; %zu pages", + recv_sys.lsn, recv_sys.scanned_lsn, n); + service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, + "To recover: LSN " LSN_PF + "/" LSN_PF "; %zu pages", + recv_sys.lsn, recv_sys.scanned_lsn, n); } } +/** Apply a recovery batch. +@param space_id current tablespace identifier +@param space current tablespace +@param free_block spare buffer block +@param last_batch whether it is possible to write more redo log +@return whether the caller must provide a new free_block */ +bool recv_sys_t::apply_batch(uint32_t space_id, fil_space_t *&space, + buf_block_t *&free_block, bool last_batch) +{ + mysql_mutex_assert_owner(&mutex); + ut_ad(pages_it != pages.end()); + ut_ad(!pages_it->second.log.empty()); + + mysql_mutex_lock(&buf_pool.mutex); + size_t n= 0, max_n= std::min(BUF_LRU_MIN_LEN, + UT_LIST_GET_LEN(buf_pool.LRU) + + UT_LIST_GET_LEN(buf_pool.free)); + mysql_mutex_unlock(&buf_pool.mutex); + + map::iterator begin= pages.end(); + page_id_t begin_id{~0ULL}; + + while (pages_it != pages.end() && n < max_n) + { + ut_ad(!buf_dblwr.is_inside(pages_it->first)); + if (!pages_it->second.being_processed) + { + if (space_id != pages_it->first.space()) + { + space_id= pages_it->first.space(); + if (space) + space->release(); + space= fil_space_t::get(space_id); + if (!space) + { + auto d= deferred_spaces.defers.find(space_id); + if (d == deferred_spaces.defers.end() || d->second.deleted) + /* For deleted files we preserve the deferred_spaces entry */; + else if (!free_block) + return true; + else + { + space= recover_deferred(pages_it, d->second.file_name, free_block); + deferred_spaces.defers.erase(d); + if (!space && !srv_force_recovery) + { + set_corrupt_fs(); + return false; + } + } + } + } + if (!space || space->is_freed(pages_it->first.page_no())) + pages_it->second.being_processed= -1; + else if (!n++) + { + begin= pages_it; + begin_id= pages_it->first; + } + } + pages_it++; + } + + if (!last_batch) + log_sys.latch.wr_unlock(); + + pages_it= begin; + + if (report(time(nullptr))) + report_progress(); + + if (!n) + goto wait; + + mysql_mutex_lock(&buf_pool.mutex); + + if (UNIV_UNLIKELY(UT_LIST_GET_LEN(buf_pool.free) < n)) + { + mysql_mutex_unlock(&buf_pool.mutex); + wait: + wait_for_pool(n); + if (n); + else if (!last_batch) + goto unlock_relock; + else + goto get_last; + pages_it= pages.lower_bound(begin_id); + ut_ad(pages_it != pages.end()); + } + else + mysql_mutex_unlock(&buf_pool.mutex); + + while (pages_it != pages.end()) + { + ut_ad(!buf_dblwr.is_inside(pages_it->first)); + if (!pages_it->second.being_processed) + { + const page_id_t id{pages_it->first}; + + if (space_id != id.space()) + { + space_id= id.space(); + if (space) + space->release(); + space= fil_space_t::get(space_id); + } + if (!space) + { + const auto it= deferred_spaces.defers.find(space_id); + if (it != deferred_spaces.defers.end() && !it->second.deleted) + /* The records must be processed after recover_deferred(). */ + goto next; + goto space_not_found; + } + else if (space->is_freed(id.page_no())) + { + space_not_found: + pages_it->second.being_processed= -1; + goto next; + } + else + { + page_recv_t &recs= pages_it->second; + ut_ad(!recs.log.empty()); + recs.being_processed= 1; + init *init= recs.skip_read ? &mlog_init.last(id) : nullptr; + mysql_mutex_unlock(&mutex); + buf_read_recover(space, id, recs, init); + } + + if (!--n) + { + if (last_batch) + goto relock_last; + goto relock; + } + mysql_mutex_lock(&mutex); + pages_it= pages.lower_bound(id); + } + else + next: + pages_it++; + } + + if (!last_batch) + { + unlock_relock: + mysql_mutex_unlock(&mutex); + relock: + log_sys.latch.wr_lock(SRW_LOCK_CALL); + relock_last: + mysql_mutex_lock(&mutex); + get_last: + pages_it= pages.lower_bound(begin_id); + } + + return false; +} + /** Attempt to initialize a page based on redo log records. -@param page_id page identifier -@param p iterator pointing to page_id +@param p iterator @param mtr mini-transaction @param b pre-allocated buffer pool block +@param init page initialization @return the recovered block @retval nullptr if the page cannot be initialized based on log records @retval -1 if the page cannot be recovered due to corruption */ -inline buf_block_t *recv_sys_t::recover_low(const page_id_t page_id, - map::iterator &p, mtr_t &mtr, - buf_block_t *b) +inline buf_block_t *recv_sys_t::recover_low(const map::iterator &p, mtr_t &mtr, + buf_block_t *b, init &init) { - mysql_mutex_assert_owner(&mutex); - ut_ad(p->first == page_id); + mysql_mutex_assert_not_owner(&mutex); page_recv_t &recs= p->second; - ut_ad(recs.state == page_recv_t::RECV_WILL_NOT_READ); + ut_ad(recs.skip_read); + ut_ad(recs.being_processed == 1); buf_block_t* block= nullptr; - mlog_init_t::init &i= mlog_init.last(page_id); const lsn_t end_lsn= recs.log.last()->lsn; - if (end_lsn < i.lsn) - DBUG_LOG("ib_log", "skip log for page " << page_id - << " LSN " << end_lsn << " < " << i.lsn); - fil_space_t *space= fil_space_t::get(page_id.space()); + if (end_lsn < init.lsn) + DBUG_LOG("ib_log", "skip log for page " << p->first + << " LSN " << end_lsn << " < " << init.lsn); + fil_space_t *space= fil_space_t::get(p->first.space()); mtr.start(); mtr.set_log_mode(MTR_LOG_NO_REDO); @@ -3304,82 +3702,77 @@ inline buf_block_t *recv_sys_t::recover_low(const page_id_t page_id, if (!space) { - if (page_id.page_no() != 0) + if (p->first.page_no() != 0) { nothing_recoverable: mtr.commit(); return nullptr; } - auto it= recv_spaces.find(page_id.space()); + auto it= recv_spaces.find(p->first.space()); ut_ad(it != recv_spaces.end()); uint32_t flags= it->second.flags; zip_size= fil_space_t::zip_size(flags); - block= buf_page_create_deferred(page_id.space(), zip_size, &mtr, b); + block= buf_page_create_deferred(p->first.space(), zip_size, &mtr, b); ut_ad(block == b); block->page.lock.x_lock_recursive(); } else { - block= buf_page_create(space, page_id.page_no(), zip_size, &mtr, b); + block= buf_page_create(space, p->first.page_no(), zip_size, &mtr, b); if (UNIV_UNLIKELY(block != b)) { /* The page happened to exist in the buffer pool, or it was just being read in. Before the exclusive page latch was acquired by buf_page_create(), all changes to the page must have been applied. */ - ut_ad(pages.find(page_id) == pages.end()); + ut_d(mysql_mutex_lock(&mutex)); + ut_ad(pages.find(p->first) == pages.end()); + ut_d(mysql_mutex_unlock(&mutex)); space->release(); goto nothing_recoverable; } } - ut_ad(&recs == &pages.find(page_id)->second); - i.created= true; - map::iterator r= p++; - block= recv_recover_page(block, mtr, r, space, &i); + ut_d(mysql_mutex_lock(&mutex)); + ut_ad(&recs == &pages.find(p->first)->second); + ut_d(mysql_mutex_unlock(&mutex)); + init.created= true; + block= recv_recover_page(block, mtr, recs, space, &init); ut_ad(mtr.has_committed()); - if (block) - { - recs.log.clear(); - pages.erase(r); - } - else - block= reinterpret_cast(-1); - - if (pages.empty()) - pthread_cond_signal(&cond); - if (space) space->release(); - return block; + return block ? block : reinterpret_cast(-1); } /** Attempt to initialize a page based on redo log records. @param page_id page identifier @return recovered block @retval nullptr if the page cannot be initialized based on log records */ -buf_block_t *recv_sys_t::recover_low(const page_id_t page_id) +ATTRIBUTE_COLD buf_block_t *recv_sys_t::recover_low(const page_id_t page_id) { - buf_block_t *free_block= buf_LRU_get_free_block(false); - buf_block_t *block= nullptr; - mysql_mutex_lock(&mutex); map::iterator p= pages.find(page_id); - if (p != pages.end() && p->second.state == page_recv_t::RECV_WILL_NOT_READ) + if (p != pages.end() && !p->second.being_processed && p->second.skip_read) { + p->second.being_processed= 1; + init &init= mlog_init.last(page_id); + mysql_mutex_unlock(&mutex); + buf_block_t *free_block= buf_LRU_get_free_block(false); mtr_t mtr; - block= recover_low(page_id, p, mtr, free_block); + buf_block_t *block= recover_low(p, mtr, free_block, init); + p->second.being_processed= -1; ut_ad(!block || block == reinterpret_cast(-1) || block == free_block); + if (UNIV_UNLIKELY(!block)) + buf_pool.free_block(free_block); + return block; } mysql_mutex_unlock(&mutex); - if (UNIV_UNLIKELY(!block)) - buf_pool.free_block(free_block); - return block; + return nullptr; } inline fil_space_t *fil_system_t::find(const char *path) const @@ -3427,45 +3820,18 @@ void recv_sys_t::apply(bool last_batch) mysql_mutex_assert_owner(&mutex); - timespec abstime; - - while (apply_batch_on) - { - if (is_corrupt_log()) - return; - if (last_batch) - my_cond_wait(&cond, &mutex.m_mutex); - else - { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif - log_sys.latch.wr_unlock(); - set_timespec_nsec(abstime, 500000000ULL); /* 0.5s */ - my_cond_timedwait(&cond, &mutex.m_mutex, &abstime); - mysql_mutex_unlock(&mutex); - log_sys.latch.wr_lock(SRW_LOCK_CALL); - mysql_mutex_lock(&mutex); - } - } - - recv_no_ibuf_operations = !last_batch || - srv_operation == SRV_OPERATION_RESTORE || - srv_operation == SRV_OPERATION_RESTORE_EXPORT; - - mtr_t mtr; + garbage_collect(); if (!pages.empty()) { - const char *msg= last_batch - ? "Starting final batch to recover" - : "Starting a batch to recover"; - const size_t n= pages.size(); - sql_print_information("InnoDB: %s %zu pages from redo log.", msg, n); - sd_notifyf(0, "STATUS=%s %zu pages from redo log", msg, n); + recv_no_ibuf_operations = !last_batch || + srv_operation == SRV_OPERATION_RESTORE || + srv_operation == SRV_OPERATION_RESTORE_EXPORT; + ut_ad(!last_batch || lsn == scanned_lsn); + progress_time= time(nullptr); + report_progress(); apply_log_recs= true; - apply_batch_on= true; for (auto id= srv_undo_tablespaces_open; id--;) { @@ -3491,132 +3857,71 @@ void recv_sys_t::apply(bool last_batch) fil_system.extend_to_recv_size(); - /* We must release log_sys.latch and recv_sys.mutex before - invoking buf_LRU_get_free_block(). Allocating a block may initiate - a redo log write and therefore acquire log_sys.latch. To avoid - deadlocks, log_sys.latch must not be acquired while holding - recv_sys.mutex. */ - mysql_mutex_unlock(&mutex); - if (!last_batch) - log_sys.latch.wr_unlock(); + fil_space_t *space= nullptr; + uint32_t space_id= ~0; + buf_block_t *free_block= nullptr; - buf_block_t *free_block= buf_LRU_get_free_block(false); - - if (!last_batch) - log_sys.latch.wr_lock(SRW_LOCK_CALL); - mysql_mutex_lock(&mutex); - - for (map::iterator p= pages.begin(); p != pages.end(); ) + for (pages_it= pages.begin(); pages_it != pages.end(); + pages_it= pages.begin()) { - const page_id_t page_id= p->first; - ut_ad(!p->second.log.empty()); - - const uint32_t space_id= page_id.space(); - auto d= deferred_spaces.defers.find(space_id); - if (d != deferred_spaces.defers.end()) + if (!free_block) { - if (d->second.deleted) - { - /* For deleted files we must preserve the entry in deferred_spaces */ -erase_for_space: - while (p != pages.end() && p->first.space() == space_id) - { - map::iterator r= p++; - r->second.log.clear(); - pages.erase(r); - } - } - else if (recover_deferred(p, d->second.file_name, free_block)) - { - if (!srv_force_recovery) - set_corrupt_fs(); - deferred_spaces.defers.erase(d); - goto erase_for_space; - } - else - deferred_spaces.defers.erase(d); - if (!free_block) - goto next_free_block; - p= pages.lower_bound(page_id); - continue; + if (!last_batch) + log_sys.latch.wr_unlock(); + wait_for_pool(1); + pages_it= pages.begin(); + mysql_mutex_unlock(&mutex); + /* We must release log_sys.latch and recv_sys.mutex before + invoking buf_LRU_get_free_block(). Allocating a block may initiate + a redo log write and therefore acquire log_sys.latch. To avoid + deadlocks, log_sys.latch must not be acquired while holding + recv_sys.mutex. */ + free_block= buf_LRU_get_free_block(false); + if (!last_batch) + log_sys.latch.wr_lock(SRW_LOCK_CALL); + mysql_mutex_lock(&mutex); + pages_it= pages.begin(); } - switch (p->second.state) { - case page_recv_t::RECV_BEING_READ: - case page_recv_t::RECV_BEING_PROCESSED: - p++; - continue; - case page_recv_t::RECV_WILL_NOT_READ: - if (UNIV_LIKELY(!!recover_low(page_id, p, mtr, free_block))) - { -next_free_block: - mysql_mutex_unlock(&mutex); - if (!last_batch) - log_sys.latch.wr_unlock(); - free_block= buf_LRU_get_free_block(false); - if (!last_batch) - log_sys.latch.wr_lock(SRW_LOCK_CALL); - mysql_mutex_lock(&mutex); - break; - } - ut_ad(p == pages.end() || p->first > page_id); - continue; - case page_recv_t::RECV_NOT_PROCESSED: - recv_read_in_area(page_id, p); - } - p= pages.lower_bound(page_id); - /* Ensure that progress will be made. */ - ut_ad(p == pages.end() || p->first > page_id || - p->second.state >= page_recv_t::RECV_BEING_READ); - } - - buf_pool.free_block(free_block); - - /* Wait until all the pages have been processed */ - for (;;) - { - const bool empty= pages.empty(); - if (empty && !os_aio_pending_reads()) - break; - - if (!is_corrupt_fs() && !is_corrupt_log()) + while (pages_it != pages.end()) { - if (last_batch) + if (is_corrupt_fs() || is_corrupt_log()) { - if (!empty) - my_cond_wait(&cond, &mutex.m_mutex); - else + if (space) + space->release(); + if (free_block) { mysql_mutex_unlock(&mutex); - os_aio_wait_until_no_pending_reads(false); + mysql_mutex_lock(&buf_pool.mutex); + buf_LRU_block_free_non_file_page(free_block); + mysql_mutex_unlock(&buf_pool.mutex); mysql_mutex_lock(&mutex); - ut_ad(pages.empty()); } + return; } - else - { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif - log_sys.latch.wr_unlock(); - set_timespec_nsec(abstime, 500000000ULL); /* 0.5s */ - my_cond_timedwait(&cond, &mutex.m_mutex, &abstime); - mysql_mutex_unlock(&mutex); - log_sys.latch.wr_lock(SRW_LOCK_CALL); - mysql_mutex_lock(&mutex); - } - continue; + if (apply_batch(space_id, space, free_block, last_batch)) + break; } - if (is_corrupt_fs() && !srv_force_recovery) - sql_print_information("InnoDB: Set innodb_force_recovery=1" - " to ignore corrupted pages."); - return; + } + + if (space) + space->release(); + + if (free_block) + { + mysql_mutex_lock(&buf_pool.mutex); + buf_LRU_block_free_non_file_page(free_block); + mysql_mutex_unlock(&buf_pool.mutex); } } if (last_batch) - /* We skipped this in buf_page_create(). */ - mlog_init.mark_ibuf_exist(mtr); + { + if (!recv_no_ibuf_operations) + /* We skipped this in buf_page_create(). */ + mlog_init.mark_ibuf_exist(); + mlog_init.clear(); + } else { mlog_init.reset(); @@ -3625,21 +3930,22 @@ next_free_block: mysql_mutex_unlock(&mutex); - if (last_batch && srv_operation != SRV_OPERATION_RESTORE && - srv_operation != SRV_OPERATION_RESTORE_EXPORT) - /* Instead of flushing, last_batch sorts the buf_pool.flush_list - in ascending order of buf_page_t::oldest_modification. */ - log_sort_flush_list(); - else - buf_flush_sync_batch(lsn); - if (!last_batch) { + buf_flush_sync_batch(lsn); buf_pool_invalidate(); log_sys.latch.wr_lock(SRW_LOCK_CALL); } + else if (srv_operation == SRV_OPERATION_RESTORE || + srv_operation == SRV_OPERATION_RESTORE_EXPORT) + buf_flush_sync_batch(lsn); + else + /* Instead of flushing, last_batch sorts the buf_pool.flush_list + in ascending order of buf_page_t::oldest_modification. */ + log_sort_flush_list(); + #ifdef HAVE_PMEM - else if (log_sys.is_pmem()) + if (last_batch && log_sys.is_pmem()) mprotect(log_sys.buf, len, PROT_READ | PROT_WRITE); #endif @@ -3649,35 +3955,24 @@ next_free_block: clear(); } -/** Check whether the number of read redo log blocks exceeds the maximum. -@return whether the memory is exhausted */ -inline bool recv_sys_t::is_memory_exhausted() -{ - if (UT_LIST_GET_LEN(blocks) * 3 < buf_pool.get_n_pages()) - return false; - DBUG_PRINT("ib_log",("Ran out of memory and last stored lsn " LSN_PF - " last stored offset %zu\n", lsn, offset)); - return true; -} - /** Scan log_t::FORMAT_10_8 log store records to the parsing buffer. @param last_phase whether changes can be applied to the tablespaces @return whether rescan is needed (not everything was stored) */ static bool recv_scan_log(bool last_phase) { DBUG_ENTER("recv_scan_log"); - DBUG_ASSERT(!last_phase || recv_sys.file_checkpoint); ut_ad(log_sys.is_latest()); const size_t block_size_1{log_sys.get_block_size() - 1}; mysql_mutex_lock(&recv_sys.mutex); - recv_sys.clear(); ut_d(recv_sys.after_apply= last_phase); - ut_ad(!last_phase || recv_sys.file_checkpoint); + if (!last_phase) + recv_sys.clear(); + else + ut_ad(recv_sys.file_checkpoint); - store_t store= last_phase - ? STORE_IF_EXISTS : recv_sys.file_checkpoint ? STORE_YES : STORE_NO; + bool store{recv_sys.file_checkpoint != 0}; size_t buf_size= log_sys.buf_size; #ifdef HAVE_PMEM if (log_sys.is_pmem()) @@ -3694,6 +3989,7 @@ static bool recv_scan_log(bool last_phase) recv_sys.len= 0; } + lsn_t rewound_lsn= 0; for (ut_d(lsn_t source_offset= 0);;) { #ifndef SUX_LOCK_GENERIC @@ -3741,27 +4037,29 @@ static bool recv_scan_log(bool last_phase) if (UNIV_UNLIKELY(!recv_needed_recovery)) { - ut_ad(store == (recv_sys.file_checkpoint ? STORE_YES : STORE_NO)); + ut_ad(!last_phase); ut_ad(recv_sys.lsn >= log_sys.next_checkpoint_lsn); - for (;;) + if (!store) { - const byte& b{log_sys.buf[recv_sys.offset]}; - r= recv_sys.parse_pmem(store); - if (r == recv_sys_t::OK) + ut_ad(!recv_sys.file_checkpoint); + for (;;) { - if (store == STORE_NO && - (b == FILE_CHECKPOINT + 2 + 8 || (b & 0xf0) == FILE_MODIFY)) - continue; - } - else if (r == recv_sys_t::PREMATURE_EOF) - goto read_more; - else if (store != STORE_NO) - break; + const byte& b{log_sys.buf[recv_sys.offset]}; + r= recv_sys.parse_pmem(false); + switch (r) { + case recv_sys_t::PREMATURE_EOF: + goto read_more; + default: + ut_ad(r == recv_sys_t::GOT_EOF); + break; + case recv_sys_t::OK: + if (b == FILE_CHECKPOINT + 2 + 8 || (b & 0xf0) == FILE_MODIFY) + continue; + } - if (store == STORE_NO) - { const lsn_t end{recv_sys.file_checkpoint}; + ut_ad(!end || end == recv_sys.lsn); mysql_mutex_unlock(&recv_sys.mutex); if (!end) @@ -3771,45 +4069,73 @@ static bool recv_scan_log(bool last_phase) ") at " LSN_PF, log_sys.next_checkpoint_lsn, recv_sys.lsn); } - else - ut_ad(end == recv_sys.lsn); DBUG_RETURN(true); } - - recv_needed_recovery= true; - if (srv_read_only_mode) - { - mysql_mutex_unlock(&recv_sys.mutex); - DBUG_RETURN(false); + } + else + { + ut_ad(recv_sys.file_checkpoint != 0); + switch ((r= recv_sys.parse_pmem(false))) { + case recv_sys_t::PREMATURE_EOF: + goto read_more; + case recv_sys_t::GOT_EOF: + break; + default: + ut_ad(r == recv_sys_t::OK); + recv_needed_recovery= true; + if (srv_read_only_mode) + { + mysql_mutex_unlock(&recv_sys.mutex); + DBUG_RETURN(false); + } + sql_print_information("InnoDB: Starting crash recovery from" + " checkpoint LSN=" LSN_PF, + log_sys.next_checkpoint_lsn); } - sql_print_information("InnoDB: Starting crash recovery from" - " checkpoint LSN=" LSN_PF, - log_sys.next_checkpoint_lsn); - break; } } - while ((r= recv_sys.parse_pmem(store)) == recv_sys_t::OK) + if (!store) + skip_the_rest: + while ((r= recv_sys.parse_pmem(false)) == recv_sys_t::OK); + else { - if (store != STORE_NO && recv_sys.is_memory_exhausted()) + uint16_t count= 0; + while ((r= recv_sys.parse_pmem(last_phase)) == recv_sys_t::OK) + if (!++count && recv_sys.report(time(nullptr))) + { + const size_t n= recv_sys.pages.size(); + sql_print_information("InnoDB: Parsed redo log up to LSN=" LSN_PF + "; to recover: %zu pages", recv_sys.lsn, n); + service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, + "Parsed redo log up to LSN=" LSN_PF + "; to recover: %zu pages", + recv_sys.lsn, n); + } + if (r == recv_sys_t::GOT_OOM) { - ut_ad(last_phase == (store == STORE_IF_EXISTS)); - if (store == STORE_YES) - { - store= STORE_NO; - recv_sys.last_stored_lsn= recv_sys.lsn; - } - else - { - ut_ad(store == STORE_IF_EXISTS); - recv_sys.apply(false); - } + ut_ad(!last_phase); + rewound_lsn= recv_sys.lsn; + store= false; + if (recv_sys.scanned_lsn <= 1) + goto skip_the_rest; + ut_ad(recv_sys.file_checkpoint); + goto func_exit; } } if (r != recv_sys_t::PREMATURE_EOF) { ut_ad(r == recv_sys_t::GOT_EOF); + got_eof: + ut_ad(recv_sys.is_initialised()); + if (recv_sys.scanned_lsn > 1) + { + ut_ad(recv_sys.scanned_lsn == recv_sys.lsn); + break; + } + recv_sys.scanned_lsn= recv_sys.lsn; + sql_print_information("InnoDB: End of log at LSN=" LSN_PF, recv_sys.lsn); break; } @@ -3822,7 +4148,7 @@ static bool recv_scan_log(bool last_phase) break; if (recv_sys.offset < log_sys.get_block_size()) - break; + goto got_eof; if (recv_sys.offset > buf_size / 4 || (recv_sys.offset > block_size_1 && @@ -3835,21 +4161,21 @@ static bool recv_scan_log(bool last_phase) } } - const bool corrupt= recv_sys.is_corrupt_log() || recv_sys.is_corrupt_fs(); - recv_sys.maybe_finish_batch(); if (last_phase) + { + ut_ad(!rewound_lsn); + ut_ad(recv_sys.lsn >= recv_sys.file_checkpoint); log_sys.set_recovered_lsn(recv_sys.lsn); + } + else if (rewound_lsn) + { + ut_ad(!store); + ut_ad(recv_sys.file_checkpoint); + recv_sys.lsn= rewound_lsn; + } +func_exit: mysql_mutex_unlock(&recv_sys.mutex); - - if (corrupt) - DBUG_RETURN(false); - - DBUG_PRINT("ib_log", - ("%s " LSN_PF " completed", last_phase ? "rescan" : "scan", - recv_sys.lsn)); - ut_ad(!last_phase || recv_sys.lsn >= recv_sys.file_checkpoint); - - DBUG_RETURN(store == STORE_NO); + DBUG_RETURN(!store); } /** Report a missing tablespace for which page-redo log exists. @@ -3945,8 +4271,8 @@ next: /* fall through */ case file_name_t::DELETED: recv_sys_t::map::iterator r = p++; - r->second.log.clear(); - recv_sys.pages.erase(r); + recv_sys.pages_it_invalidate(r); + recv_sys.erase(r); continue; } ut_ad(0); @@ -3970,8 +4296,6 @@ func_exit: continue; } - missing_tablespace = true; - if (srv_force_recovery) { sql_print_warning("InnoDB: Tablespace " UINT32PF " was not found at %.*s," @@ -3991,14 +4315,11 @@ func_exit: rs.first, int(rs.second.name.size()), rs.second.name.data()); + } else { + missing_tablespace = true; } } - if (!rescan || srv_force_recovery > 0) { - missing_tablespace = false; - } - - err = DB_SUCCESS; goto func_exit; } @@ -4232,35 +4553,41 @@ read_only_recovery: goto early_exit; } - /* If there is any missing tablespace and rescan is needed - then there is a possiblity that hash table will not contain - all space ids redo logs. Rescan the remaining unstored - redo logs for the validation of missing tablespace. */ - ut_ad(rescan || !missing_tablespace); + if (missing_tablespace) { + ut_ad(rescan); + /* If any tablespaces seem to be missing, + validate the remaining log records. */ - while (missing_tablespace) { - recv_sys.lsn = recv_sys.last_stored_lsn; - DBUG_PRINT("ib_log", ("Rescan of redo log to validate " - "the missing tablespace. Scan " - "from last stored LSN " LSN_PF, - recv_sys.lsn)); - rescan = recv_scan_log(false); - ut_ad(!recv_sys.is_corrupt_fs()); + do { + rescan = recv_scan_log(false); + ut_ad(!recv_sys.is_corrupt_fs()); - missing_tablespace = false; + if (recv_sys.is_corrupt_log()) { + goto err_exit; + } - if (recv_sys.is_corrupt_log()) { - goto err_exit; - } + missing_tablespace = false; - err = recv_validate_tablespace( - rescan, missing_tablespace); + err = recv_validate_tablespace( + rescan, missing_tablespace); - if (err != DB_SUCCESS) { - goto early_exit; - } + if (err != DB_SUCCESS) { + goto early_exit; + } + } while (missing_tablespace); rescan = true; + /* Because in the loop above we overwrote the + initially stored recv_sys.pages, we must + restart parsing the log from the very beginning. */ + + /* FIXME: Use a separate loop for checking for + tablespaces (not individual pages), while retaining + the initial recv_sys.pages. */ + mysql_mutex_lock(&recv_sys.mutex); + recv_sys.clear(); + recv_sys.lsn = log_sys.next_checkpoint_lsn; + mysql_mutex_unlock(&recv_sys.mutex); } if (srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { @@ -4271,8 +4598,7 @@ read_only_recovery: ut_ad(srv_force_recovery <= SRV_FORCE_NO_UNDO_LOG_SCAN); if (rescan) { - recv_sys.lsn = log_sys.next_checkpoint_lsn; - rescan = recv_scan_log(true); + recv_scan_log(true); if ((recv_sys.is_corrupt_log() && !srv_force_recovery) || recv_sys.is_corrupt_fs()) { diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index aafa4361b0b..217cf153b59 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -3411,15 +3411,12 @@ os_file_get_status( return(ret); } - -extern void fil_aio_callback(const IORequest &request); - -static void io_callback(tpool::aiocb *cb) +static void io_callback_errorcheck(const tpool::aiocb *cb) { - const IORequest &request= *static_cast - (static_cast(cb->m_userdata)); if (cb->m_err != DB_SUCCESS) { + const IORequest &request= *static_cast + (static_cast(cb->m_userdata)); ib::fatal() << "IO Error: " << cb->m_err << " during " << (request.is_async() ? "async " : "sync ") << (request.is_LRU() ? "lru " : "") << @@ -3427,19 +3424,36 @@ static void io_callback(tpool::aiocb *cb) " of " << cb->m_len << " bytes, for file " << cb->m_fh << ", returned " << cb->m_ret_len; } - /* Return cb back to cache*/ - if (cb->m_opcode == tpool::aio_opcode::AIO_PREAD) - { - ut_ad(read_slots->contains(cb)); - fil_aio_callback(request); - read_slots->release(cb); - } - else - { - ut_ad(write_slots->contains(cb)); - fil_aio_callback(request); - write_slots->release(cb); - } +} + +static void fake_io_callback(void *c) +{ + tpool::aiocb *cb= static_cast(c); + ut_ad(read_slots->contains(cb)); + static_cast(static_cast(cb->m_userdata))-> + fake_read_complete(cb->m_offset); + read_slots->release(cb); +} + +static void read_io_callback(void *c) +{ + tpool::aiocb *cb= static_cast(c); + ut_ad(cb->m_opcode == tpool::aio_opcode::AIO_PREAD); + io_callback_errorcheck(cb); + ut_ad(read_slots->contains(cb)); + static_cast + (static_cast(cb->m_userdata))->read_complete(); + read_slots->release(cb); +} + +static void write_io_callback(void *c) +{ + tpool::aiocb *cb= static_cast(c); + ut_ad(cb->m_opcode == tpool::aio_opcode::AIO_PWRITE); + ut_ad(write_slots->contains(cb)); + static_cast + (static_cast(cb->m_userdata))->write_complete(); + write_slots->release(cb); } #ifdef LINUX_NATIVE_AIO @@ -3684,6 +3698,28 @@ void os_aio_wait_until_no_pending_reads(bool declare) tpool::tpool_wait_end(); } +/** Submit a fake read request during crash recovery. +@param type fake read request +@param offset additional context */ +void os_fake_read(const IORequest &type, os_offset_t offset) +{ + tpool::aiocb *cb= read_slots->acquire(); + + cb->m_group= read_slots->get_task_group(); + cb->m_fh= type.node->handle.m_file; + cb->m_buffer= nullptr; + cb->m_len= 0; + cb->m_offset= offset; + cb->m_opcode= tpool::aio_opcode::AIO_PREAD; + new (cb->m_userdata) IORequest{type}; + cb->m_internal_task.m_func= fake_io_callback; + cb->m_internal_task.m_arg= cb; + cb->m_internal_task.m_group= cb->m_group; + + srv_thread_pool->submit_task(&cb->m_internal_task); +} + + /** Request a read or write. @param type I/O request @param buf buffer @@ -3729,23 +3765,32 @@ func_exit: return err; } + io_slots* slots; + tpool::callback_func callback; + tpool::aio_opcode opcode; + if (type.is_read()) { ++os_n_file_reads; + slots = read_slots; + callback = read_io_callback; + opcode = tpool::aio_opcode::AIO_PREAD; } else { ++os_n_file_writes; + slots = write_slots; + callback = write_io_callback; + opcode = tpool::aio_opcode::AIO_PWRITE; } compile_time_assert(sizeof(IORequest) <= tpool::MAX_AIO_USERDATA_LEN); - io_slots* slots= type.is_read() ? read_slots : write_slots; tpool::aiocb* cb = slots->acquire(); cb->m_buffer = buf; - cb->m_callback = (tpool::callback_func)io_callback; + cb->m_callback = callback; cb->m_group = slots->get_task_group(); cb->m_fh = type.node->handle.m_file; cb->m_len = (int)n; cb->m_offset = offset; - cb->m_opcode = type.is_read() ? tpool::aio_opcode::AIO_PREAD : tpool::aio_opcode::AIO_PWRITE; + cb->m_opcode = opcode; new (cb->m_userdata) IORequest{type}; if (srv_thread_pool->submit_io(cb)) { @@ -3753,6 +3798,7 @@ func_exit: os_file_handle_error(type.node->name, type.is_read() ? "aio read" : "aio write"); err = DB_IO_ERROR; + type.node->space->release(); } goto func_exit; From f2c17cc9d9bcd634887846d3064bcb71243f9cc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 19 May 2023 15:20:07 +0300 Subject: [PATCH 27/76] MDEV-29911 InnoDB recovery and mariadb-backup --prepare fail to report detailed progress This is a 10.6 port of commit 2f9e264781f702b8da1ed418ac9f4f5e8f8aa843 from MariaDB Server 10.9 that is missing some optimization due to a more complex redo log format and recovery logic (which was simplified in commit 685d958e38b825ad9829be311f26729cccf37c46). The progress reporting of InnoDB crash recovery was rather intermittent. Nothing was reported during the single-threaded log record parsing, which could consume minutes when parsing a large log. During log application, there only was progress reporting in background threads that would be invoked on data page read completion. The progress reporting here will be detailed like this: InnoDB: Starting crash recovery from checkpoint LSN=628599973,5653727799 InnoDB: Read redo log up to LSN=1963895808 InnoDB: Multi-batch recovery needed at LSN 2534560930 InnoDB: Read redo log up to LSN=3312233472 InnoDB: Read redo log up to LSN=1599646720 InnoDB: Read redo log up to LSN=2160831488 InnoDB: To recover: LSN 2806789376/2806819840; 195082 pages InnoDB: To recover: LSN 2806789376/2806819840; 63507 pages InnoDB: Read redo log up to LSN=3195776000 InnoDB: Read redo log up to LSN=3687099392 InnoDB: Read redo log up to LSN=4165315584 InnoDB: To recover: LSN 4374395699/4374440960; 241454 pages InnoDB: To recover: LSN 4374395699/4374440960; 123701 pages InnoDB: Read redo log up to LSN=4508724224 InnoDB: Read redo log up to LSN=5094550528 InnoDB: To recover: 205230 pages The previous messages "Starting a batch to recover" or "Starting a final batch to recover" will be replaced by "To recover: ... pages" messages. If a batch lasts longer than 15 seconds, then there will be progress reports every 15 seconds, showing the number of remaining pages. For the non-final batch, the "To recover:" message includes two end LSN: that of the batch, and of the recovered log. This is the primary measure of progress. The batch will end once the number of pages to recover reaches 0. If recovery is possible in a single batch, the output will look like this, with a shorter "To recover:" message that counts only the remaining pages: InnoDB: Starting crash recovery from checkpoint LSN=628599973,5653727799 InnoDB: Read redo log up to LSN=1984539648 InnoDB: Read redo log up to LSN=2710875136 InnoDB: Read redo log up to LSN=3358895104 InnoDB: Read redo log up to LSN=3965299712 InnoDB: Read redo log up to LSN=4557417472 InnoDB: Read redo log up to LSN=5219527680 InnoDB: To recover: 450915 pages We will also speed up recovery by improving the memory management and implementing multi-threaded recovery of data pages that will not need to be read into the buffer pool ("fake read"). Log application in the "fake read" threads will be protected by an atomic being_recovered field and exclusive buf_page_t::lock. Recovery will reserve for data pages two thirds of the buffer pool, or 256 pages, whichever is smaller. Previously, we could only use at most one third of the buffer pool for buffered log records. This would typically mean that with large buffer pools, recovery unnecessary consisted of multiple batches. If recovery runs out of memory, it will "roll back" or "rewind" the current mini-transaction. The recv_sys.recovered_lsn and recv_sys.pages will correspond to the "out of memory LSN", at the end of the previous complete mini-transaction. If recovery runs out of memory while executing the final recovery batch, we can simply invoke recv_sys.apply(false) to make room, and resume parsing. If recovery runs out of memory before the final batch, we will scan the redo log to the end and check for any missing or inconsistent files. In this version of the patch, we will throw away any previously buffered recv_sys.pages and rescan the log from the checkpoint onwards. recv_sys_t::pages_it: A cached iterator to recv_sys.pages. recv_sys_t::is_memory_exhausted(): Remove. We will have out-of-memory handling deep inside recv_sys_t::parse(). recv_sys_t::rewind(), page_recv_t::recs_t::rewind(): Remove all log starting with a specific LSN. IORequest::write_complete(), IORequest::read_complete(): Replaces fil_aio_callback(). read_io_callback(), write_io_callback(): Replaces io_callback(). IORequest::fake_read_complete(), fake_io_callback(), os_fake_read(): Process a "fake read" request for concurrent recovery. recv_sys_t::apply_batch(): Choose a number of successive pages for a recovery batch. recv_sys_t::erase(recv_sys_t::map::iterator): Remove log records for a page whose recovery is not in progress. Log application threads will not invoke this; they will only set being_recovered=-1 to indicate that the entry is no longer needed. recv_sys_t::garbage_collect(): Remove all being_recovered=-1 entries. recv_sys_t::wait_for_pool(): Wait for some space to become available in the buffer pool. mlog_init_t::mark_ibuf_exist(): Avoid calls to recv_sys::recover_low() via ibuf_page_exists() and buf_page_get_low(). Such calls would lead to double locking of recv_sys.mutex, which depending on implementation could cause a deadlock. We will use lower-level calls to look up index pages. buf_LRU_block_remove_hashed(): Disable consistency checks for freed ROW_FORMAT=COMPRESSED pages. Their contents could be uninitialized garbage. This fixes an occasional failure of the test innodb.innodb_bulk_create_index_debug. Tested by: Matthias Leich --- storage/innobase/buf/buf0flu.cc | 1 + storage/innobase/buf/buf0lru.cc | 9 +- storage/innobase/buf/buf0rea.cc | 86 +- storage/innobase/fil/fil0fil.cc | 70 +- storage/innobase/include/buf0buf.h | 3 +- storage/innobase/include/buf0rea.h | 13 +- storage/innobase/include/log0recv.h | 161 +-- storage/innobase/include/os0file.h | 9 + storage/innobase/log/log0recv.cc | 1507 ++++++++++++++++----------- storage/innobase/os/os0file.cc | 90 +- 10 files changed, 1135 insertions(+), 814 deletions(-) diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 68dbaee5e7d..67c79702ec8 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -2483,6 +2483,7 @@ ATTRIBUTE_COLD void buf_flush_page_cleaner_init() /** Flush the buffer pool on shutdown. */ ATTRIBUTE_COLD void buf_flush_buffer_pool() { + ut_ad(!os_aio_pending_reads()); ut_ad(!buf_page_cleaner_is_active); ut_ad(!buf_flush_sync_lsn); diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index feb15fc226c..6cc4f1c3987 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -1095,7 +1095,11 @@ static bool buf_LRU_block_remove_hashed(buf_page_t *bpage, const page_id_t id, ut_a(!zip || !bpage->oldest_modification()); ut_ad(bpage->zip_size()); - + /* Skip consistency checks if the page was freed. + In recovery, we could get a sole FREE_PAGE record + and nothing else, for a ROW_FORMAT=COMPRESSED page. + Its contents would be garbage. */ + if (!bpage->is_freed()) switch (fil_page_get_type(page)) { case FIL_PAGE_TYPE_ALLOCATED: case FIL_PAGE_INODE: @@ -1226,6 +1230,7 @@ void buf_pool_t::corrupted_evict(buf_page_t *bpage, uint32_t state) buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(id.fold()); page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain); + recv_sys.free_corrupted_page(id); mysql_mutex_lock(&mutex); hash_lock.lock(); @@ -1250,8 +1255,6 @@ void buf_pool_t::corrupted_evict(buf_page_t *bpage, uint32_t state) buf_LRU_block_free_hashed_page(reinterpret_cast(bpage)); mysql_mutex_unlock(&mutex); - - recv_sys.free_corrupted_page(id); } /** Update buf_pool.LRU_old_ratio. diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index c2ab50cc674..cf76a9bd93a 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -655,61 +655,35 @@ failed: return count; } -/** @return whether a page has been freed */ -inline bool fil_space_t::is_freed(uint32_t page) +/** Schedule a page for recovery. +@param space tablespace +@param page_id page identifier +@param recs log records +@param init page initialization, or nullptr if the page needs to be read */ +void buf_read_recover(fil_space_t *space, const page_id_t page_id, + page_recv_t &recs, recv_init *init) { - std::lock_guard freed_lock(freed_range_mutex); - return freed_ranges.contains(page); -} - -/** Issues read requests for pages which recovery wants to read in. -@param[in] space_id tablespace id -@param[in] page_nos array of page numbers to read, with the -highest page number the last in the array -@param[in] n number of page numbers in the array */ -void buf_read_recv_pages(ulint space_id, const uint32_t* page_nos, ulint n) -{ - fil_space_t* space = fil_space_t::get(space_id); - - if (!space) { - /* The tablespace is missing or unreadable: do nothing */ - return; - } - - const ulint zip_size = space->zip_size(); - - for (ulint i = 0; i < n; i++) { - - /* Ignore if the page already present in freed ranges. */ - if (space->is_freed(page_nos[i])) { - continue; - } - - const page_id_t cur_page_id(space_id, page_nos[i]); - - ulint limit = 0; - for (ulint j = 0; j < buf_pool.n_chunks; j++) { - limit += buf_pool.chunks[j].size / 2; - } - - if (os_aio_pending_reads() >= limit) { - os_aio_wait_until_no_pending_reads(false); - } - - space->reacquire(); - switch (buf_read_page_low(space, false, BUF_READ_ANY_PAGE, - cur_page_id, zip_size, true)) { - case DB_SUCCESS: case DB_SUCCESS_LOCKED_REC: - break; - default: - sql_print_error("InnoDB: Recovery failed to read page " - UINT32PF " from %s", - cur_page_id.page_no(), - space->chain.start->name); - } - } - - DBUG_PRINT("ib_buf", ("recovery read (%u pages) for %s", n, - space->chain.start->name)); - space->release(); + ut_ad(space->id == page_id.space()); + space->reacquire(); + const ulint zip_size= space->zip_size(); + + if (init) + { + if (buf_page_t *bpage= buf_page_init_for_read(BUF_READ_ANY_PAGE, page_id, + zip_size, true)) + { + ut_ad(bpage->in_file()); + os_fake_read(IORequest{bpage, (buf_tmp_buffer_t*) &recs, + UT_LIST_GET_FIRST(space->chain), + IORequest::READ_ASYNC}, ptrdiff_t(init)); + } + } + else if (dberr_t err= buf_read_page_low(space, false, BUF_READ_ANY_PAGE, + page_id, zip_size, true)) + { + if (err != DB_SUCCESS_LOCKED_REC) + sql_print_error("InnoDB: Recovery failed to read page " + UINT32PF " from %s", + page_id.page_no(), space->chain.start->name); + } } diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 23f0cf75f39..71d96bf2a71 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -2823,53 +2823,55 @@ func_exit: #include -/** Callback for AIO completion */ -void fil_aio_callback(const IORequest &request) +void IORequest::write_complete() const { ut_ad(fil_validate_skip()); - ut_ad(request.node); + ut_ad(node); + ut_ad(is_write()); - if (!request.bpage) + if (!bpage) { ut_ad(!srv_read_only_mode); - if (request.type == IORequest::DBLWR_BATCH) - buf_dblwr.flush_buffered_writes_completed(request); + if (type == IORequest::DBLWR_BATCH) + buf_dblwr.flush_buffered_writes_completed(*this); else - ut_ad(request.type == IORequest::WRITE_ASYNC); -write_completed: - request.node->complete_write(); - } - else if (request.is_write()) - { - buf_page_write_complete(request); - goto write_completed; + ut_ad(type == IORequest::WRITE_ASYNC); } else + buf_page_write_complete(*this); + + node->complete_write(); + node->space->release(); +} + +void IORequest::read_complete() const +{ + ut_ad(fil_validate_skip()); + ut_ad(node); + ut_ad(is_read()); + ut_ad(bpage); + + /* IMPORTANT: since i/o handling for reads will read also the insert + buffer in fil_system.sys_space, we have to be very careful not to + introduce deadlocks. We never close fil_system.sys_space data files + and never issue asynchronous reads of change buffer pages. */ + const page_id_t id(bpage->id()); + + if (dberr_t err= bpage->read_complete(*node)) { - ut_ad(request.is_read()); - - /* IMPORTANT: since i/o handling for reads will read also the insert - buffer in fil_system.sys_space, we have to be very careful not to - introduce deadlocks. We never close fil_system.sys_space data - files and never issue asynchronous reads of change buffer pages. */ - const page_id_t id(request.bpage->id()); - - if (dberr_t err= request.bpage->read_complete(*request.node)) + if (recv_recovery_is_on() && !srv_force_recovery) { - if (recv_recovery_is_on() && !srv_force_recovery) - { - mysql_mutex_lock(&recv_sys.mutex); - recv_sys.set_corrupt_fs(); - mysql_mutex_unlock(&recv_sys.mutex); - } - - if (err != DB_FAIL) - ib::error() << "Failed to read page " << id.page_no() - << " from file '" << request.node->name << "': " << err; + mysql_mutex_lock(&recv_sys.mutex); + recv_sys.set_corrupt_fs(); + mysql_mutex_unlock(&recv_sys.mutex); } + + if (err != DB_FAIL) + ib::error() << "Failed to read page " << id.page_no() + << " from file '" << node->name << "': " << err; } - request.node->space->release(); + node->space->release(); } /** Flush to disk the writes in file spaces of the given type diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 2b4732a64a0..d2b0112da7c 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -75,8 +75,7 @@ struct buf_pool_info_t ulint flush_list_len; /*!< Length of buf_pool.flush_list */ ulint n_pend_unzip; /*!< buf_pool.n_pend_unzip, pages pending decompress */ - ulint n_pend_reads; /*!< buf_pool.n_pend_reads, pages - pending read */ + ulint n_pend_reads; /*!< os_aio_pending_reads() */ ulint n_pending_flush_lru; /*!< Pages pending flush in LRU */ ulint n_pending_flush_list; /*!< Pages pending flush in FLUSH LIST */ diff --git a/storage/innobase/include/buf0rea.h b/storage/innobase/include/buf0rea.h index d898c5efc63..3dd085dda5c 100644 --- a/storage/innobase/include/buf0rea.h +++ b/storage/innobase/include/buf0rea.h @@ -102,12 +102,13 @@ which could result in a deadlock if the OS does not support asynchronous io. ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size, bool ibuf); -/** Issues read requests for pages which recovery wants to read in. -@param[in] space_id tablespace id -@param[in] page_nos array of page numbers to read, with the -highest page number the last in the array -@param[in] n number of page numbers in the array */ -void buf_read_recv_pages(ulint space_id, const uint32_t* page_nos, ulint n); +/** Schedule a page for recovery. +@param space tablespace +@param page_id page identifier +@param recs log records +@param init page initialization, or nullptr if the page needs to be read */ +void buf_read_recover(fil_space_t *space, const page_id_t page_id, + page_recv_t &recs, recv_init *init); /** @name Modes used in read-ahead @{ */ /** read only pages belonging to the insert buffer tree */ diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index 5e8dc1c0160..2e4a4bfa794 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -45,9 +45,9 @@ recv_find_max_checkpoint(ulint* max_field) MY_ATTRIBUTE((nonnull, warn_unused_result)); ATTRIBUTE_COLD MY_ATTRIBUTE((nonnull, warn_unused_result)) -/** Apply any buffered redo log to a page that was just read from a data file. -@param[in,out] space tablespace -@param[in,out] bpage buffer pool page +/** Apply any buffered redo log to a page. +@param space tablespace +@param bpage buffer pool page @return whether the page was recovered correctly */ bool recv_recover_page(fil_space_t* space, buf_page_t* bpage); @@ -146,21 +146,15 @@ struct recv_dblwr_t list pages; }; -/** the recovery state and buffered records for a page */ +/** recv_sys.pages entry; protected by recv_sys.mutex */ struct page_recv_t { - /** Recovery state; protected by recv_sys.mutex */ - enum - { - /** not yet processed */ - RECV_NOT_PROCESSED, - /** not processed; the page will be reinitialized */ - RECV_WILL_NOT_READ, - /** page is being read */ - RECV_BEING_READ, - /** log records are being applied on the page */ - RECV_BEING_PROCESSED - } state= RECV_NOT_PROCESSED; + /** Recovery status: 0=not in progress, 1=log is being applied, + -1=log has been applied and the entry may be erased. + Transitions from 1 to -1 are NOT protected by recv_sys.mutex. */ + Atomic_relaxed being_processed{0}; + /** Whether reading the page will be skipped */ + bool skip_read= false; /** Latest written byte offset when applying the log records. @see mtr_t::m_last_offset */ uint16_t last_offset= 1; @@ -183,6 +177,9 @@ struct page_recv_t head= recs; tail= recs; } + /** Remove the last records for the page + @param start_lsn start of the removed log */ + ATTRIBUTE_COLD void rewind(lsn_t start_lsn); /** @return the last log snippet */ const log_rec_t* last() const { return tail; } @@ -201,8 +198,8 @@ struct page_recv_t iterator begin() { return head; } iterator end() { return NULL; } bool empty() const { ut_ad(!head == !tail); return !head; } - /** Clear and free the records; @see recv_sys_t::alloc() */ - inline void clear(); + /** Clear and free the records; @see recv_sys_t::add() */ + void clear(); } log; /** Trim old log records for a page. @@ -211,21 +208,27 @@ struct page_recv_t inline bool trim(lsn_t start_lsn); /** Ignore any earlier redo log records for this page. */ inline void will_not_read(); - /** @return whether the log records for the page are being processed */ - bool is_being_processed() const { return state == RECV_BEING_PROCESSED; } +}; + +/** A page initialization operation that was parsed from the redo log */ +struct recv_init +{ + /** log sequence number of the page initialization */ + lsn_t lsn; + /** Whether btr_page_create() avoided a read of the page. + At the end of the last recovery batch, mark_ibuf_exist() + will mark pages for which this flag is set. */ + bool created; }; /** Recovery system data structure */ struct recv_sys_t { - /** mutex protecting apply_log_recs and page_recv_t::state */ - mysql_mutex_t mutex; + using init= recv_init; + + /** mutex protecting this as well as some of page_recv_t */ + alignas(CPU_LEVEL1_DCACHE_LINESIZE) mysql_mutex_t mutex; private: - /** condition variable for - !apply_batch_on || pages.empty() || found_corrupt_log || found_corrupt_fs */ - pthread_cond_t cond; - /** whether recv_apply_hashed_log_recs() is running */ - bool apply_batch_on; /** set when finding a corrupt log block or record, or there is a log parsing buffer overflow */ bool found_corrupt_log; @@ -270,6 +273,9 @@ public: map pages; private: + /** iterator to pages, used by parse() */ + map::iterator pages_it; + /** Process a record that indicates that a tablespace size is being shrunk. @param page_id first page that is not in the file @param lsn log sequence number of the shrink operation */ @@ -296,23 +302,38 @@ public: inline size_t files_size(); void close_files() { files.clear(); files.shrink_to_fit(); } + /** Advance pages_it if it matches the iterator */ + void pages_it_invalidate(const map::iterator &p) + { + mysql_mutex_assert_owner(&mutex); + if (pages_it == p) + pages_it++; + } + /** Invalidate pages_it if it points to the given tablespace */ + void pages_it_invalidate(uint32_t space_id) + { + mysql_mutex_assert_owner(&mutex); + if (pages_it != pages.end() && pages_it->first.space() == space_id) + pages_it= pages.end(); + } + private: /** Attempt to initialize a page based on redo log records. - @param page_id page identifier - @param p iterator pointing to page_id + @param p iterator @param mtr mini-transaction @param b pre-allocated buffer pool block + @param init page initialization @return the recovered block @retval nullptr if the page cannot be initialized based on log records @retval -1 if the page cannot be recovered due to corruption */ - inline buf_block_t *recover_low(const page_id_t page_id, map::iterator &p, - mtr_t &mtr, buf_block_t *b); + inline buf_block_t *recover_low(const map::iterator &p, mtr_t &mtr, + buf_block_t *b, init &init); /** Attempt to initialize a page based on redo log records. @param page_id page identifier @return the recovered block @retval nullptr if the page cannot be initialized based on log records @retval -1 if the page cannot be recovered due to corruption */ - buf_block_t *recover_low(const page_id_t page_id); + ATTRIBUTE_COLD buf_block_t *recover_low(const page_id_t page_id); /** All found log files (multiple ones are possible if we are upgrading from before MariaDB Server 10.5.1) */ @@ -323,12 +344,27 @@ private: /** Base node of the redo block list. List elements are linked via buf_block_t::unzip_LRU. */ UT_LIST_BASE_NODE_T(buf_block_t) blocks; + + /** Allocate a block from the buffer pool for recv_sys.pages */ + ATTRIBUTE_COLD buf_block_t *add_block(); + + /** Wait for buffer pool to become available. + @param pages number of buffer pool pages needed */ + ATTRIBUTE_COLD void wait_for_pool(size_t pages); + + /** Free log for processed pages. */ + void garbage_collect(); + + /** Apply a recovery batch. + @param space_id current tablespace identifier + @param space current tablespace + @param free_block spare buffer block + @param last_batch whether it is possible to write more redo log + @return whether the caller must provide a new free_block */ + bool apply_batch(uint32_t space_id, fil_space_t *&space, + buf_block_t *&free_block, bool last_batch); + public: - /** Check whether the number of read redo log blocks exceeds the maximum. - Store last_stored_lsn if the recovery is not in the last phase. - @param[in,out] store whether to store page operations - @return whether the memory is exhausted */ - inline bool is_memory_exhausted(store_t *store); /** Apply buffered log to persistent data pages. @param last_batch whether it is possible to write more redo log */ void apply(bool last_batch); @@ -353,9 +389,10 @@ public: @param start_lsn start LSN of the mini-transaction @param lsn @see mtr_t::commit_lsn() @param l redo log snippet @see log_t::FORMAT_10_5 - @param len length of l, in bytes */ - inline void add(map::iterator it, lsn_t start_lsn, lsn_t lsn, - const byte *l, size_t len); + @param len length of l, in bytes + @return whether we ran out of memory */ + bool add(map::iterator it, lsn_t start_lsn, lsn_t lsn, + const byte *l, size_t len); /** Parse and register one mini-transaction in log_t::FORMAT_10_5. @param checkpoint_lsn the log sequence number of the latest checkpoint @@ -365,32 +402,31 @@ public: or corruption was noticed */ bool parse(lsn_t checkpoint_lsn, store_t *store, bool apply); - /** Clear a fully processed set of stored redo log records. */ - inline void clear(); + /** Erase log records for a page. */ + void erase(map::iterator p); + /** Clear a fully processed set of stored redo log records. */ + void clear(); + +private: + /** Rewind a mini-transaction when parse() runs out of memory. + @param end current position of the mini-transaction + @param begin start of the mini-transaction */ + ATTRIBUTE_COLD void rewind(const byte *end, const byte *begin) noexcept; + /** Report progress in terms of LSN or pages remaining */ + ATTRIBUTE_COLD void report_progress() const; +public: /** Determine whether redo log recovery progress should be reported. @param time the current time @return whether progress should be reported (the last report was at least 15 seconds ago) */ - bool report(time_t time) - { - if (time - progress_time < 15) - return false; - - progress_time= time; - return true; - } + bool report(time_t time); /** The alloc() memory alignment, in bytes */ static constexpr size_t ALIGNMENT= sizeof(size_t); - /** Allocate memory for log_rec_t - @param len allocation size, in bytes - @return pointer to len bytes of memory (never NULL) */ - inline void *alloc(size_t len); - /** Free a redo log snippet. - @param data buffer returned by alloc() */ + @param data buffer allocated in add() */ inline void free(const void *data); /** Remove records for a corrupted page. @@ -402,8 +438,6 @@ public: ATTRIBUTE_COLD void set_corrupt_fs(); /** Flag log file corruption during recovery. */ ATTRIBUTE_COLD void set_corrupt_log(); - /** Possibly finish a recovery batch. */ - inline void maybe_finish_batch(); /** @return whether data file corruption was found */ bool is_corrupt_fs() const { return UNIV_UNLIKELY(found_corrupt_fs); } @@ -421,13 +455,14 @@ public: } /** Try to recover a tablespace that was not readable earlier - @param p iterator, initially pointing to page_id_t{space_id,0}; - the records will be freed and the iterator advanced + @param p iterator @param name tablespace file name @param free_block spare buffer block - @return whether recovery failed */ - bool recover_deferred(map::iterator &p, const std::string &name, - buf_block_t *&free_block); + @return recovered tablespace + @retval nullptr if recovery failed */ + fil_space_t *recover_deferred(const map::iterator &p, + const std::string &name, + buf_block_t *&free_block); }; /** The recovery system */ diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index 7ac0579cc07..af3e7975e76 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -221,6 +221,10 @@ public: bool is_LRU() const { return (type & (WRITE_LRU ^ WRITE_ASYNC)) != 0; } bool is_async() const { return (type & (READ_SYNC ^ READ_ASYNC)) != 0; } + void write_complete() const; + void read_complete() const; + void fake_read_complete(os_offset_t offset) const; + /** If requested, free storage space associated with a section of the file. @param off byte offset from the start (SEEK_SET) @param len size of the hole in bytes @@ -1050,6 +1054,11 @@ int os_aio_init(); Frees the asynchronous io system. */ void os_aio_free(); +/** Submit a fake read request during crash recovery. +@param type fake read request +@param offset additional context */ +void os_fake_read(const IORequest &type, os_offset_t offset); + /** Request a read or write. @param type I/O request @param buf buffer diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 78ba8b70a49..1d80345a5e0 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -748,7 +748,7 @@ static struct { retry: mysql_mutex_unlock(&log_sys.mutex); - bool fail= false; + fil_space_t *space= fil_system.sys_space; buf_block_t *free_block= buf_LRU_get_free_block(false); mysql_mutex_lock(&log_sys.mutex); mysql_mutex_lock(&recv_sys.mutex); @@ -765,11 +765,12 @@ retry: there were no buffered records. Either way, we must create a dummy tablespace with the latest known name, for dict_drop_index_tree(). */ + recv_sys.pages_it_invalidate(space_id); while (p != recv_sys.pages.end() && p->first.space() == space_id) { + ut_ad(!p->second.being_processed); recv_sys_t::map::iterator r= p++; - r->second.log.clear(); - recv_sys.pages.erase(r); + recv_sys.erase(r); } recv_spaces_t::iterator it{recv_spaces.find(space_id)}; if (it != recv_spaces.end()) @@ -792,11 +793,14 @@ retry: } } else - fail= recv_sys.recover_deferred(p, d->second.file_name, free_block); + space= recv_sys.recover_deferred(p, d->second.file_name, free_block); processed: - defers.erase(d++); - if (fail) + auto e= d++; + defers.erase(e); + if (!space) break; + if (space != fil_system.sys_space) + space->release(); if (free_block) continue; mysql_mutex_unlock(&recv_sys.mutex); @@ -807,7 +811,7 @@ processed: mysql_mutex_unlock(&recv_sys.mutex); if (free_block) buf_pool.free_block(free_block); - return fail; + return !space; } /** Create tablespace metadata for a data file that was initially @@ -927,28 +931,191 @@ free_space: } deferred_spaces; +/** Report an operation to create, delete, or rename a file during backup. +@param[in] space_id tablespace identifier +@param[in] type redo log type +@param[in] name file name (not NUL-terminated) +@param[in] len length of name, in bytes +@param[in] new_name new file name (NULL if not rename) +@param[in] new_len length of new_name, in bytes (0 if NULL) */ +void (*log_file_op)(ulint space_id, int type, + const byte* name, ulint len, + const byte* new_name, ulint new_len); + +void (*undo_space_trunc)(uint32_t space_id); + +void (*first_page_init)(ulint space_id); + +/** Information about initializing page contents during redo log processing. +FIXME: Rely on recv_sys.pages! */ +class mlog_init_t +{ + using map= std::map, + ut_allocator>>; + /** Map of page initialization operations. + FIXME: Merge this to recv_sys.pages! */ + map inits; + + /** Iterator to the last add() or will_avoid_read(), for speeding up + will_avoid_read(). */ + map::iterator i; +public: + /** Constructor */ + mlog_init_t() : i(inits.end()) {} + + /** Record that a page will be initialized by the redo log. + @param page_id page identifier + @param lsn log sequence number + @return whether the state was changed */ + bool add(const page_id_t page_id, lsn_t lsn) + { + mysql_mutex_assert_owner(&recv_sys.mutex); + const recv_init init = { lsn, false }; + std::pair p= + inits.insert(map::value_type(page_id, init)); + ut_ad(!p.first->second.created); + if (p.second) return true; + if (p.first->second.lsn >= lsn) return false; + p.first->second = init; + i = p.first; + return true; + } + + /** Get the last stored lsn of the page id and its respective + init/load operation. + @param page_id page identifier + @return the latest page initialization; + not valid after releasing recv_sys.mutex. */ + recv_init &last(page_id_t page_id) + { + mysql_mutex_assert_owner(&recv_sys.mutex); + return inits.find(page_id)->second; + } + + /** Determine if a page will be initialized or freed after a time. + @param page_id page identifier + @param lsn log sequence number + @return whether page_id will be freed or initialized after lsn */ + bool will_avoid_read(page_id_t page_id, lsn_t lsn) + { + mysql_mutex_assert_owner(&recv_sys.mutex); + if (i != inits.end() && i->first == page_id) + return i->second.lsn > lsn; + i = inits.lower_bound(page_id); + return i != inits.end() && i->first == page_id && i->second.lsn > lsn; + } + + /** At the end of each recovery batch, reset the 'created' flags. */ + void reset() + { + mysql_mutex_assert_owner(&recv_sys.mutex); + ut_ad(recv_no_ibuf_operations); + for (map::value_type &i : inits) + i.second.created= false; + } + + /** During the last recovery batch, mark whether there exist + buffered changes for the pages that were initialized + by buf_page_create() and still reside in the buffer pool. */ + void mark_ibuf_exist() + { + mysql_mutex_assert_owner(&recv_sys.mutex); + + for (const map::value_type &i : inits) + if (i.second.created) + { + auto &chain= buf_pool.page_hash.cell_get(i.first.fold()); + page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain); + + hash_lock.lock_shared(); + buf_block_t *block= reinterpret_cast + (buf_pool.page_hash.get(i.first, chain)); + bool got_latch= block && block->page.lock.x_lock_try(); + hash_lock.unlock_shared(); + + if (!block) + continue; + + uint32_t state; + + if (!got_latch) + { + mysql_mutex_lock(&buf_pool.mutex); + block= reinterpret_cast + (buf_pool.page_hash.get(i.first, chain)); + if (!block) + { + mysql_mutex_unlock(&buf_pool.mutex); + continue; + } + + state= block->page.fix(); + mysql_mutex_unlock(&buf_pool.mutex); + if (state < buf_page_t::UNFIXED) + { + block->page.unfix(); + continue; + } + block->page.lock.x_lock(); + state= block->page.unfix(); + ut_ad(state < buf_page_t::READ_FIX); + if (state >= buf_page_t::UNFIXED && block->page.id() == i.first) + goto check_ibuf; + } + else + { + state= block->page.state(); + ut_ad(state >= buf_page_t::FREED); + ut_ad(state < buf_page_t::READ_FIX); + + if (state >= buf_page_t::UNFIXED) + { + check_ibuf: + mysql_mutex_unlock(&recv_sys.mutex); + if (ibuf_page_exists(block->page.id(), block->zip_size())) + block->page.set_ibuf_exist(); + mysql_mutex_lock(&recv_sys.mutex); + } + } + + block->page.lock.x_unlock(); + } + } + + /** Clear the data structure */ + void clear() { inits.clear(); i = inits.end(); } +}; + +static mlog_init_t mlog_init; + /** Try to recover a tablespace that was not readable earlier -@param p iterator, initially pointing to page_id_t{space_id,0}; - the records will be freed and the iterator advanced +@param p iterator to the page @param name tablespace file name @param free_block spare buffer block -@return whether recovery failed */ -bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, - const std::string &name, - buf_block_t *&free_block) +@return recovered tablespace +@retval nullptr if recovery failed */ +fil_space_t *recv_sys_t::recover_deferred(const recv_sys_t::map::iterator &p, + const std::string &name, + buf_block_t *&free_block) { mysql_mutex_assert_owner(&mutex); - const page_id_t first{p->first}; - ut_ad(first.space()); + ut_ad(p->first.space()); - recv_spaces_t::iterator it{recv_spaces.find(first.space())}; + recv_spaces_t::iterator it{recv_spaces.find(p->first.space())}; ut_ad(it != recv_spaces.end()); - if (!first.page_no() && p->second.state == page_recv_t::RECV_WILL_NOT_READ) + if (!p->first.page_no() && p->second.skip_read) { mtr_t mtr; - buf_block_t *block= recover_low(first, p, mtr, free_block); + ut_ad(!p->second.being_processed); + p->second.being_processed= 1; + init &init= mlog_init.last(p->first); + mysql_mutex_unlock(&mutex); + buf_block_t *block= recover_low(p, mtr, free_block, init); + mysql_mutex_lock(&mutex); + p->second.being_processed= -1; ut_ad(block == free_block || block == reinterpret_cast(-1)); free_block= nullptr; if (UNIV_UNLIKELY(!block || block == reinterpret_cast(-1))) @@ -961,10 +1128,7 @@ bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, const uint32_t page_no= mach_read_from_4(page + FIL_PAGE_OFFSET); const uint32_t size= fsp_header_get_field(page, FSP_SIZE); - ut_ad(it != recv_spaces.end()); - - if (page_id_t{space_id, page_no} == first && size >= 4 && - it != recv_spaces.end() && + if (page_id_t{space_id, page_no} == p->first && size >= 4 && fil_space_t::is_valid_flags(flags, space_id) && fil_space_t::logical_size(flags) == srv_page_size) { @@ -1018,10 +1182,10 @@ bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, } size_set: node->deferred= false; - space->release(); it->second.space= space; block->page.lock.x_unlock(); - return false; + p->second.being_processed= -1; + return space; } release_and_fail: @@ -1029,179 +1193,34 @@ bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, } fail: - ib::error() << "Cannot apply log to " << first + ib::error() << "Cannot apply log to " << p->first << " of corrupted file '" << name << "'"; - return true; + return nullptr; } -/** Report an operation to create, delete, or rename a file during backup. -@param[in] space_id tablespace identifier -@param[in] type redo log type -@param[in] name file name (not NUL-terminated) -@param[in] len length of name, in bytes -@param[in] new_name new file name (NULL if not rename) -@param[in] new_len length of new_name, in bytes (0 if NULL) */ -void (*log_file_op)(ulint space_id, int type, - const byte* name, ulint len, - const byte* new_name, ulint new_len); - -void (*undo_space_trunc)(uint32_t space_id); - -void (*first_page_init)(ulint space_id); - -/** Information about initializing page contents during redo log processing. -FIXME: Rely on recv_sys.pages! */ -class mlog_init_t -{ -public: - /** A page initialization operation that was parsed from - the redo log */ - struct init { - /** log sequence number of the page initialization */ - lsn_t lsn; - /** Whether btr_page_create() avoided a read of the page. - - At the end of the last recovery batch, mark_ibuf_exist() - will mark pages for which this flag is set. */ - bool created; - }; - -private: - typedef std::map, - ut_allocator > > - map; - /** Map of page initialization operations. - FIXME: Merge this to recv_sys.pages! */ - map inits; -public: - /** Record that a page will be initialized by the redo log. - @param[in] page_id page identifier - @param[in] lsn log sequence number - @return whether the state was changed */ - bool add(const page_id_t page_id, lsn_t lsn) - { - mysql_mutex_assert_owner(&recv_sys.mutex); - const init init = { lsn, false }; - std::pair p = inits.insert( - map::value_type(page_id, init)); - ut_ad(!p.first->second.created); - if (p.second) return true; - if (p.first->second.lsn >= init.lsn) return false; - p.first->second = init; - return true; - } - - /** Get the last stored lsn of the page id and its respective - init/load operation. - @param[in] page_id page id - @param[in,out] init initialize log or load log - @return the latest page initialization; - not valid after releasing recv_sys.mutex. */ - init& last(page_id_t page_id) - { - mysql_mutex_assert_owner(&recv_sys.mutex); - return inits.find(page_id)->second; - } - - /** Determine if a page will be initialized or freed after a time. - @param page_id page identifier - @param lsn log sequence number - @return whether page_id will be freed or initialized after lsn */ - bool will_avoid_read(page_id_t page_id, lsn_t lsn) const - { - mysql_mutex_assert_owner(&recv_sys.mutex); - auto i= inits.find(page_id); - return i != inits.end() && i->second.lsn > lsn; - } - - /** At the end of each recovery batch, reset the 'created' flags. */ - void reset() - { - mysql_mutex_assert_owner(&recv_sys.mutex); - ut_ad(recv_no_ibuf_operations); - for (map::value_type& i : inits) { - i.second.created = false; - } - } - - /** On the last recovery batch, mark whether there exist - buffered changes for the pages that were initialized - by buf_page_create() and still reside in the buffer pool. - @param[in,out] mtr dummy mini-transaction */ - void mark_ibuf_exist(mtr_t& mtr) - { - mysql_mutex_assert_owner(&recv_sys.mutex); - mtr.start(); - - for (const map::value_type& i : inits) { - if (!i.second.created) { - continue; - } - if (buf_block_t* block = buf_page_get_low( - i.first, 0, RW_X_LATCH, nullptr, - BUF_GET_IF_IN_POOL, - &mtr, nullptr, false)) { - if (UNIV_LIKELY_NULL(block->page.zip.data)) { - switch (fil_page_get_type( - block->page.zip.data)) { - case FIL_PAGE_INDEX: - case FIL_PAGE_RTREE: - if (page_zip_decompress( - &block->page.zip, - block->page.frame, - true)) { - break; - } - ib::error() << "corrupted " - << block->page.id(); - } - } - if (recv_no_ibuf_operations) { - mtr.commit(); - mtr.start(); - continue; - } - mysql_mutex_unlock(&recv_sys.mutex); - if (ibuf_page_exists(block->page.id(), - block->zip_size())) { - block->page.set_ibuf_exist(); - } - mtr.commit(); - mtr.start(); - mysql_mutex_lock(&recv_sys.mutex); - } - } - - mtr.commit(); - clear(); - } - - /** Clear the data structure */ - void clear() { inits.clear(); } -}; - -static mlog_init_t mlog_init; - /** Process a record that indicates that a tablespace is being shrunk in size. @param page_id first page identifier that is not in the file @param lsn log sequence number of the shrink operation */ inline void recv_sys_t::trim(const page_id_t page_id, lsn_t lsn) { - DBUG_ENTER("recv_sys_t::trim"); - DBUG_LOG("ib_log", - "discarding log beyond end of tablespace " - << page_id << " before LSN " << lsn); - mysql_mutex_assert_owner(&mutex); - for (recv_sys_t::map::iterator p = pages.lower_bound(page_id); - p != pages.end() && p->first.space() == page_id.space();) { - recv_sys_t::map::iterator r = p++; - if (r->second.trim(lsn)) { - pages.erase(r); - } - } - DBUG_VOID_RETURN; + DBUG_ENTER("recv_sys_t::trim"); + DBUG_LOG("ib_log", "discarding log beyond end of tablespace " + << page_id << " before LSN " << lsn); + mysql_mutex_assert_owner(&mutex); + if (pages_it != pages.end() && pages_it->first.space() == page_id.space()) + pages_it= pages.end(); + for (recv_sys_t::map::iterator p = pages.lower_bound(page_id); + p != pages.end() && p->first.space() == page_id.space();) + { + recv_sys_t::map::iterator r = p++; + if (r->second.trim(lsn)) + { + ut_ad(!r->second.being_processed); + pages.erase(r); + } + } + DBUG_VOID_RETURN; } void recv_sys_t::open_log_files_if_needed() @@ -1400,7 +1419,6 @@ void recv_sys_t::close() last_stored_lsn= 0; mysql_mutex_destroy(&mutex); - pthread_cond_destroy(&cond); } recv_spaces.clear(); @@ -1415,10 +1433,8 @@ void recv_sys_t::create() ut_ad(this == &recv_sys); ut_ad(!is_initialised()); mysql_mutex_init(recv_sys_mutex_key, &mutex, nullptr); - pthread_cond_init(&cond, nullptr); apply_log_recs = false; - apply_batch_on = false; buf = static_cast(ut_malloc_dontdump(RECV_PARSING_BUF_SIZE, PSI_INSTRUMENT_ME)); @@ -1433,6 +1449,8 @@ void recv_sys_t::create() mlog_checkpoint_lsn = 0; progress_time = time(NULL); + ut_ad(pages.empty()); + pages_it = pages.end(); recv_max_page_lsn = 0; memset(truncated_undo_spaces, 0, sizeof truncated_undo_spaces); @@ -1441,13 +1459,13 @@ void recv_sys_t::create() } /** Clear a fully processed set of stored redo log records. */ -inline void recv_sys_t::clear() +void recv_sys_t::clear() { mysql_mutex_assert_owner(&mutex); apply_log_recs= false; - apply_batch_on= false; ut_ad(!after_apply || found_corrupt_fs || !UT_LIST_GET_LAST(blocks)); pages.clear(); + pages_it= pages.end(); for (buf_block_t *block= UT_LIST_GET_LAST(blocks); block; ) { @@ -1458,8 +1476,6 @@ inline void recv_sys_t::clear() buf_block_free(block); block= prev_block; } - - pthread_cond_broadcast(&cond); } /** Free most recovery data structures. */ @@ -1471,6 +1487,7 @@ void recv_sys_t::debug_free() recovery_on= false; pages.clear(); + pages_it= pages.end(); ut_free_dodump(buf, RECV_PARSING_BUF_SIZE); buf= nullptr; @@ -1478,48 +1495,9 @@ void recv_sys_t::debug_free() mysql_mutex_unlock(&mutex); } -inline void *recv_sys_t::alloc(size_t len) -{ - mysql_mutex_assert_owner(&mutex); - ut_ad(len); - ut_ad(len <= srv_page_size); - - buf_block_t *block= UT_LIST_GET_FIRST(blocks); - if (UNIV_UNLIKELY(!block)) - { -create_block: - block= buf_block_alloc(); - block->page.access_time= 1U << 16 | - ut_calc_align(static_cast(len), ALIGNMENT); - static_assert(ut_is_2pow(ALIGNMENT), "ALIGNMENT must be a power of 2"); - UT_LIST_ADD_FIRST(blocks, block); - MEM_MAKE_ADDRESSABLE(block->page.frame, len); - MEM_NOACCESS(block->page.frame + len, srv_page_size - len); - return my_assume_aligned(block->page.frame); - } - - size_t free_offset= static_cast(block->page.access_time); - ut_ad(!ut_2pow_remainder(free_offset, ALIGNMENT)); - if (UNIV_UNLIKELY(!free_offset)) - { - ut_ad(srv_page_size == 65536); - goto create_block; - } - ut_ad(free_offset <= srv_page_size); - free_offset+= len; - - if (free_offset > srv_page_size) - goto create_block; - - block->page.access_time= ((block->page.access_time >> 16) + 1) << 16 | - ut_calc_align(static_cast(free_offset), ALIGNMENT); - MEM_MAKE_ADDRESSABLE(block->page.frame + free_offset - len, len); - return my_assume_aligned(block->page.frame + free_offset - len); -} - /** Free a redo log snippet. -@param data buffer returned by alloc() */ +@param data buffer allocated in add() */ inline void recv_sys_t::free(const void *data) { ut_ad(!ut_align_offset(data, ALIGNMENT)); @@ -1544,8 +1522,11 @@ inline void recv_sys_t::free(const void *data) ut_ad(block->page.state() == buf_page_t::MEMORY); ut_ad(static_cast(block->page.access_time - 1) < srv_page_size); - ut_ad(block->page.access_time >= 1U << 16); - if (!((block->page.access_time -= 1U << 16) >> 16)) + unsigned a= block->page.access_time; + ut_ad(a >= 1U << 16); + a-= 1U << 16; + block->page.access_time= a; + if (!(a >> 16)) { UT_LIST_REMOVE(blocks, block); MEM_MAKE_ADDRESSABLE(block->page.frame, srv_page_size); @@ -2109,7 +2090,31 @@ inline bool page_recv_t::trim(lsn_t start_lsn) } -inline void page_recv_t::recs_t::clear() +void page_recv_t::recs_t::rewind(lsn_t start_lsn) +{ + mysql_mutex_assert_owner(&recv_sys.mutex); + log_phys_t *trim= static_cast(head); + ut_ad(trim); + while (log_phys_t *next= static_cast(trim->next)) + { + ut_ad(trim->start_lsn < start_lsn); + if (next->start_lsn == start_lsn) + break; + trim= next; + } + tail= trim; + log_rec_t *l= tail->next; + tail->next= nullptr; + while (l) + { + log_rec_t *next= l->next; + recv_sys.free(l); + l= next; + } +} + + +void page_recv_t::recs_t::clear() { mysql_mutex_assert_owner(&recv_sys.mutex); for (const log_rec_t *l= head; l; ) @@ -2121,33 +2126,99 @@ inline void page_recv_t::recs_t::clear() head= tail= nullptr; } - /** Ignore any earlier redo log records for this page. */ inline void page_recv_t::will_not_read() { - ut_ad(state == RECV_NOT_PROCESSED || state == RECV_WILL_NOT_READ); - state= RECV_WILL_NOT_READ; + ut_ad(!being_processed); + skip_read= true; log.clear(); } +void recv_sys_t::erase(map::iterator p) +{ + ut_ad(p->second.being_processed <= 0); + p->second.log.clear(); + pages.erase(p); +} + +/** Free log for processed pages. */ +void recv_sys_t::garbage_collect() +{ + mysql_mutex_assert_owner(&mutex); + + if (pages_it != pages.end() && pages_it->second.being_processed < 0) + pages_it= pages.end(); + + for (map::iterator p= pages.begin(); p != pages.end(); ) + { + if (p->second.being_processed < 0) + { + map::iterator r= p++; + erase(r); + } + else + p++; + } +} + +/** Allocate a block from the buffer pool for recv_sys.pages */ +ATTRIBUTE_COLD buf_block_t *recv_sys_t::add_block() +{ + for (bool freed= false;;) + { + const auto rs= UT_LIST_GET_LEN(blocks) * 2; + mysql_mutex_lock(&buf_pool.mutex); + const auto bs= + UT_LIST_GET_LEN(buf_pool.free) + UT_LIST_GET_LEN(buf_pool.LRU); + if (UNIV_LIKELY(bs > BUF_LRU_MIN_LEN || rs < bs)) + { + buf_block_t *block= buf_LRU_get_free_block(true); + mysql_mutex_unlock(&buf_pool.mutex); + return block; + } + /* out of memory: redo log occupies more than 1/3 of buf_pool + and there are fewer than BUF_LRU_MIN_LEN pages left */ + mysql_mutex_unlock(&buf_pool.mutex); + if (freed) + return nullptr; + freed= true; + garbage_collect(); + } +} + +/** Wait for buffer pool to become available. */ +ATTRIBUTE_COLD void recv_sys_t::wait_for_pool(size_t pages) +{ + mysql_mutex_unlock(&mutex); + os_aio_wait_until_no_pending_reads(false); + mysql_mutex_lock(&mutex); + garbage_collect(); + mysql_mutex_lock(&buf_pool.mutex); + bool need_more= UT_LIST_GET_LEN(buf_pool.free) < pages; + mysql_mutex_unlock(&buf_pool.mutex); + if (need_more) + buf_flush_sync_batch(recovered_lsn); +} /** Register a redo log snippet for a page. @param it page iterator @param start_lsn start LSN of the mini-transaction @param lsn @see mtr_t::commit_lsn() -@param recs redo log snippet @see log_t::FORMAT_10_5 -@param len length of l, in bytes */ -inline void recv_sys_t::add(map::iterator it, lsn_t start_lsn, lsn_t lsn, - const byte *l, size_t len) +@param l redo log snippet +@param len length of l, in bytes +@return whether we ran out of memory */ +ATTRIBUTE_NOINLINE +bool recv_sys_t::add(map::iterator it, lsn_t start_lsn, lsn_t lsn, + const byte *l, size_t len) { mysql_mutex_assert_owner(&mutex); - page_id_t page_id = it->first; page_recv_t &recs= it->second; + buf_block_t *block; switch (*l & 0x70) { case FREE_PAGE: case INIT_PAGE: recs.will_not_read(); - mlog_init.add(page_id, start_lsn); /* FIXME: remove this! */ + mlog_init.add(it->first, start_lsn); /* FIXME: remove this! */ /* fall through */ default: log_phys_t *tail= static_cast(recs.log.last()); @@ -2156,7 +2227,7 @@ inline void recv_sys_t::add(map::iterator it, lsn_t start_lsn, lsn_t lsn, if (tail->start_lsn != start_lsn) break; ut_ad(tail->lsn == lsn); - buf_block_t *block= UT_LIST_GET_LAST(blocks); + block= UT_LIST_GET_LAST(blocks); ut_ad(block); const size_t used= static_cast(block->page.access_time - 1) + 1; ut_ad(used >= ALIGNMENT); @@ -2169,7 +2240,7 @@ append: MEM_MAKE_ADDRESSABLE(end + 1, len); /* Append to the preceding record for the page */ tail->append(l, len); - return; + return false; } if (end <= &block->page.frame[used - ALIGNMENT] || &block->page.frame[used] >= end) @@ -2183,8 +2254,49 @@ append: ut_calc_align(static_cast(new_used), ALIGNMENT); goto append; } - recs.log.append(new (alloc(log_phys_t::alloc_size(len))) - log_phys_t(start_lsn, lsn, l, len)); + + const size_t size{log_phys_t::alloc_size(len)}; + ut_ad(size <= srv_page_size); + void *buf; + block= UT_LIST_GET_FIRST(blocks); + if (UNIV_UNLIKELY(!block)) + { + create_block: + block= add_block(); + if (UNIV_UNLIKELY(!block)) + return true; + block->page.access_time= 1U << 16 | + ut_calc_align(static_cast(size), ALIGNMENT); + static_assert(ut_is_2pow(ALIGNMENT), "ALIGNMENT must be a power of 2"); + UT_LIST_ADD_FIRST(blocks, block); + MEM_MAKE_ADDRESSABLE(block->page.frame, size); + MEM_NOACCESS(block->page.frame + size, srv_page_size - size); + buf= block->page.frame; + } + else + { + size_t free_offset= static_cast(block->page.access_time); + ut_ad(!ut_2pow_remainder(free_offset, ALIGNMENT)); + if (UNIV_UNLIKELY(!free_offset)) + { + ut_ad(srv_page_size == 65536); + goto create_block; + } + ut_ad(free_offset <= srv_page_size); + free_offset+= size; + + if (free_offset > srv_page_size) + goto create_block; + + block->page.access_time= ((block->page.access_time >> 16) + 1) << 16 | + ut_calc_align(static_cast(free_offset), ALIGNMENT); + MEM_MAKE_ADDRESSABLE(block->page.frame + free_offset - size, size); + buf= block->page.frame + free_offset - size; + } + + recs.log.append(new (my_assume_aligned(buf)) + log_phys_t{start_lsn, lsn, l, len}); + return false; } /** Store/remove the freed pages in fil_name_t of recv_spaces. @@ -2220,6 +2332,70 @@ static void store_freed_or_init_rec(page_id_t page_id, bool freed) } } +ATTRIBUTE_COLD +void recv_sys_t::rewind(const byte *end, const byte *begin) noexcept +{ + ut_ad(srv_operation != SRV_OPERATION_BACKUP); + mysql_mutex_assert_owner(&mutex); + + uint32_t rlen; + for (const byte *l= begin; !(l == end); l+= rlen) + { + const byte b= *l++; + ut_ad(UNIV_LIKELY((b & 0x70) != RESERVED) || srv_force_recovery); + + rlen= b & 0xf; + if (!rlen) + { + if (!b) + continue; + const uint32_t lenlen= mlog_decode_varint_length(*l); + const uint32_t addlen= mlog_decode_varint(l); + ut_ad(addlen != MLOG_DECODE_ERROR); + rlen= addlen + 15 - lenlen; + l+= lenlen; + } + ut_ad(l + rlen <= end); + if (b & 0x80) + continue; + + uint32_t idlen= mlog_decode_varint_length(*l); + if (UNIV_UNLIKELY(idlen > 5 || idlen >= rlen)) + continue; + const uint32_t space_id= mlog_decode_varint(l); + if (UNIV_UNLIKELY(space_id == MLOG_DECODE_ERROR)) + continue; + l+= idlen; + rlen-= idlen; + idlen= mlog_decode_varint_length(*l); + if (UNIV_UNLIKELY(idlen > 5 || idlen > rlen)) + continue; + const uint32_t page_no= mlog_decode_varint(l); + if (UNIV_UNLIKELY(page_no == MLOG_DECODE_ERROR)) + continue; + const page_id_t id{space_id, page_no}; + if (pages_it == pages.end() || pages_it->first != id) + { + pages_it= pages.find(id); + if (pages_it == pages.end()) + continue; + } + + ut_ad(!pages_it->second.being_processed); + const log_phys_t *head= + static_cast(*pages_it->second.log.begin()); + if (!head || head->start_lsn == recovered_lsn) + { + erase(pages_it); + pages_it= pages.end(); + } + else + pages_it->second.log.rewind(recovered_lsn); + } + + pages_it= pages.end(); +} + /** Parse and register one mini-transaction in log_t::FORMAT_10_5. @param checkpoint_lsn the log sequence number of the latest checkpoint @param store whether to store the records @@ -2228,17 +2404,16 @@ static void store_freed_or_init_rec(page_id_t page_id, bool freed) or corruption was noticed */ bool recv_sys_t::parse(lsn_t checkpoint_lsn, store_t *store, bool apply) { + restart: mysql_mutex_assert_owner(&log_sys.mutex); mysql_mutex_assert_owner(&mutex); ut_ad(parse_start_lsn); ut_ad(log_sys.is_physical()); - bool last_phase= (*store == STORE_IF_EXISTS); const byte *const end= buf + len; loop: const byte *const log= buf + recovered_offset; const lsn_t start_lsn= recovered_lsn; - map::iterator cached_pages_it = pages.end(); /* Check that the entire mini-transaction is included within the buffer */ const byte *l; @@ -2554,7 +2729,6 @@ same_page: ut_ad(modified.emplace(id).second || (b & 0x70) != INIT_PAGE); } #endif - const bool is_init= (b & 0x70) <= INIT_PAGE; switch (*store) { case STORE_IF_EXISTS: if (fil_space_t *space= fil_space_t::get(space_id)) @@ -2568,23 +2742,48 @@ same_page: continue; /* fall through */ case STORE_YES: - if (!mlog_init.will_avoid_read(id, start_lsn)) + if (mlog_init.will_avoid_read(id, start_lsn)) + continue; + if (pages_it == pages.end() || pages_it->first != id) + pages_it= pages.emplace(id, page_recv_t{}).first; + if (UNIV_UNLIKELY(add(pages_it, start_lsn, end_lsn, recs, + l - recs + rlen))) { - if (cached_pages_it == pages.end() || cached_pages_it->first != id) - cached_pages_it= pages.emplace(id, page_recv_t()).first; - add(cached_pages_it, start_lsn, end_lsn, recs, - static_cast(l + rlen - recs)); + recovered_lsn= start_lsn; + recovered_offset= log - buf; + rewind(l + rlen, log); + if (*store == STORE_IF_EXISTS) + { + log_sys.set_lsn(recovered_lsn); + log_sys.set_flushed_lsn(recovered_lsn); + mysql_mutex_unlock(&mutex); + this->apply(false); + mysql_mutex_lock(&mutex); + if (is_corrupt_fs()) + return true; + } + else + { + last_stored_lsn= recovered_lsn; + sql_print_information("InnoDB: Multi-batch recovery needed at LSN " + LSN_PF, recovered_lsn); + *store= STORE_NO; + } + goto restart; } continue; case STORE_NO: - if (!is_init) + if ((b & 0x70) > INIT_PAGE) continue; mlog_init.add(id, start_lsn); - map::iterator i= pages.find(id); - if (i == pages.end()) - continue; - i->second.log.clear(); - pages.erase(i); + if (pages_it == pages.end() || pages_it->first != id) + { + pages_it= pages.find(id); + if (pages_it == pages.end()) + continue; + } + map::iterator r= pages_it++; + erase(r); } } else if (rlen) @@ -2706,8 +2905,6 @@ same_page: ut_ad(l == el); recovered_offset= l - buf; recovered_lsn= end_lsn; - if (is_memory_exhausted(store) && last_phase) - return false; goto loop; } @@ -2715,23 +2912,22 @@ same_page: lsn of a log record. @param[in,out] block buffer pool page @param[in,out] mtr mini-transaction -@param[in,out] p recovery address +@param[in,out] recs log records to apply @param[in,out] space tablespace, or NULL if not looked up yet @param[in,out] init page initialization operation, or NULL @return the recovered page @retval nullptr on failure */ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, - const recv_sys_t::map::iterator &p, - fil_space_t *space= nullptr, - mlog_init_t::init *init= nullptr) + page_recv_t &recs, + fil_space_t *space, + recv_init *init) { - mysql_mutex_assert_owner(&recv_sys.mutex); + mysql_mutex_assert_not_owner(&recv_sys.mutex); ut_ad(recv_sys.apply_log_recs); ut_ad(recv_needed_recovery); ut_ad(!init || init->created); ut_ad(!init || init->lsn); - ut_ad(block->page.id() == p->first); - ut_ad(!p->second.is_being_processed()); + ut_ad(recs.being_processed == 1); ut_ad(!space || space->id == block->page.id().space()); ut_ad(log_sys.is_physical()); @@ -2743,10 +2939,6 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, block->page.id().space(), block->page.id().page_no())); - p->second.state = page_recv_t::RECV_BEING_PROCESSED; - - mysql_mutex_unlock(&recv_sys.mutex); - byte *frame = UNIV_LIKELY_NULL(block->page.zip.data) ? block->page.zip.data : block->page.frame; @@ -2760,7 +2952,7 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, bool skipped_after_init = false; - for (const log_rec_t* recv : p->second.log) { + for (const log_rec_t* recv : recs.log) { const log_phys_t* l = static_cast(recv); ut_ad(l->lsn); ut_ad(end_lsn <= l->lsn); @@ -2817,8 +3009,7 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, block->page.id().space(), block->page.id().page_no())); - log_phys_t::apply_status a= l->apply(*block, - p->second.last_offset); + log_phys_t::apply_status a= l->apply(*block, recs.last_offset); switch (a) { case log_phys_t::APPLIED_NO: @@ -2937,24 +3128,11 @@ set_start_lsn: mtr.commit(); done: - time_t now = time(NULL); - - mysql_mutex_lock(&recv_sys.mutex); - + /* FIXME: do this in page read, protected with recv_sys.mutex! */ if (recv_max_page_lsn < page_lsn) { recv_max_page_lsn = page_lsn; } - ut_ad(!block || p->second.is_being_processed()); - ut_ad(!block || !recv_sys.pages.empty()); - - if (recv_sys.report(now)) { - const ulint n = recv_sys.pages.size(); - ib::info() << "To recover: " << n << " pages from log"; - service_manager_extend_timeout( - INNODB_EXTEND_TIMEOUT_INTERVAL, "To recover: " ULINTPF " pages from log", n); - } - return block; } @@ -2968,146 +3146,350 @@ ATTRIBUTE_COLD void recv_sys_t::free_corrupted_page(page_id_t page_id) mysql_mutex_lock(&mutex); map::iterator p= pages.find(page_id); - if (p != pages.end()) + if (p == pages.end()) { - p->second.log.clear(); - pages.erase(p); - if (!srv_force_recovery) - { - set_corrupt_fs(); - ib::error() << "Unable to apply log to corrupted page " << page_id - << "; set innodb_force_recovery to ignore"; - } - else - ib::warn() << "Discarding log for corrupted page " << page_id; + mysql_mutex_unlock(&mutex); + return; } - if (pages.empty()) - pthread_cond_broadcast(&cond); + p->second.being_processed= -1; + if (!srv_force_recovery) + set_corrupt_fs(); mysql_mutex_unlock(&mutex); -} -/** Possibly finish a recovery batch. */ -inline void recv_sys_t::maybe_finish_batch() -{ - mysql_mutex_assert_owner(&mutex); - ut_ad(recovery_on); - if (!apply_batch_on || pages.empty() || is_corrupt_log() || is_corrupt_fs()) - pthread_cond_broadcast(&cond); + ib::error_or_warn(!srv_force_recovery) + << "Unable to apply log to corrupted page " << page_id; } ATTRIBUTE_COLD void recv_sys_t::set_corrupt_log() { mysql_mutex_lock(&mutex); found_corrupt_log= true; - pthread_cond_broadcast(&cond); mysql_mutex_unlock(&mutex); } ATTRIBUTE_COLD void recv_sys_t::set_corrupt_fs() { mysql_mutex_assert_owner(&mutex); + if (!srv_force_recovery) + sql_print_information("InnoDB: Set innodb_force_recovery=1" + " to ignore corrupted pages."); found_corrupt_fs= true; - pthread_cond_broadcast(&cond); } -/** Apply any buffered redo log to a page that was just read from a data file. -@param[in,out] space tablespace -@param[in,out] bpage buffer pool page +/** Apply any buffered redo log to a page. +@param space tablespace +@param bpage buffer pool page @return whether the page was recovered correctly */ bool recv_recover_page(fil_space_t* space, buf_page_t* bpage) { - mtr_t mtr; - mtr.start(); - mtr.set_log_mode(MTR_LOG_NO_REDO); + mtr_t mtr; + mtr.start(); + mtr.set_log_mode(MTR_LOG_NO_REDO); - ut_ad(bpage->frame); - /* Move the ownership of the x-latch on the page to - this OS thread, so that we can acquire a second - x-latch on it. This is needed for the operations to - the page to pass the debug checks. */ - bpage->lock.claim_ownership(); - bpage->lock.x_lock_recursive(); - bpage->fix_on_recovery(); - mtr.memo_push(reinterpret_cast(bpage), - MTR_MEMO_PAGE_X_FIX); + ut_ad(bpage->frame); + /* Move the ownership of the x-latch on the page to this OS thread, + so that we can acquire a second x-latch on it. This is needed for + the operations to the page to pass the debug checks. */ + bpage->lock.claim_ownership(); + bpage->lock.x_lock_recursive(); + bpage->fix_on_recovery(); + mtr.memo_push(reinterpret_cast(bpage), MTR_MEMO_PAGE_X_FIX); - buf_block_t* success = reinterpret_cast(bpage); + buf_block_t *success= reinterpret_cast(bpage); - mysql_mutex_lock(&recv_sys.mutex); - if (recv_sys.apply_log_recs) { - recv_sys_t::map::iterator p = recv_sys.pages.find(bpage->id()); - if (p != recv_sys.pages.end() - && !p->second.is_being_processed()) { - success = recv_recover_page(success, mtr, p, space); - if (UNIV_LIKELY(!!success)) { - p->second.log.clear(); - recv_sys.pages.erase(p); - } - recv_sys.maybe_finish_batch(); - goto func_exit; - } - } - - mtr.commit(); -func_exit: - mysql_mutex_unlock(&recv_sys.mutex); - ut_ad(mtr.has_committed()); - return success; -} - -/** Read pages for which log needs to be applied. -@param page_id first page identifier to read -@param i iterator to recv_sys.pages */ -TRANSACTIONAL_TARGET -static void recv_read_in_area(page_id_t page_id, recv_sys_t::map::iterator i) -{ - uint32_t page_nos[32]; - ut_ad(page_id == i->first); - page_id.set_page_no(ut_2pow_round(page_id.page_no(), 32U)); - const page_id_t up_limit{page_id + 31}; - uint32_t* p= page_nos; - - for (; i != recv_sys.pages.end() && i->first <= up_limit; i++) + mysql_mutex_lock(&recv_sys.mutex); + if (recv_sys.apply_log_recs) { - if (i->second.state == page_recv_t::RECV_NOT_PROCESSED) + const page_id_t id{bpage->id()}; + recv_sys_t::map::iterator p= recv_sys.pages.find(id); + if (p == recv_sys.pages.end()); + else if (p->second.being_processed < 0) { - i->second.state= page_recv_t::RECV_BEING_READ; - *p++= i->first.page_no(); + recv_sys.pages_it_invalidate(p); + recv_sys.erase(p); + } + else + { + p->second.being_processed= 1; + recv_sys_t::init *init= nullptr; + if (p->second.skip_read) + (init= &mlog_init.last(id))->created= true; + mysql_mutex_unlock(&recv_sys.mutex); + success= recv_recover_page(success, mtr, p->second, space, init); + p->second.being_processed= -1; + goto func_exit; } } - if (p != page_nos) + mysql_mutex_unlock(&recv_sys.mutex); + mtr.commit(); +func_exit: + ut_ad(mtr.has_committed()); + return success; +} + +void IORequest::fake_read_complete(os_offset_t offset) const +{ + ut_ad(node); + ut_ad(is_read()); + ut_ad(bpage); + ut_ad(bpage->frame); + ut_ad(recv_recovery_is_on()); + ut_ad(offset); + + mtr_t mtr; + mtr.start(); + mtr.set_log_mode(MTR_LOG_NO_REDO); + + ut_ad(bpage->frame); + /* Move the ownership of the x-latch on the page to this OS thread, + so that we can acquire a second x-latch on it. This is needed for + the operations to the page to pass the debug checks. */ + bpage->lock.claim_ownership(); + bpage->lock.x_lock_recursive(); + bpage->fix_on_recovery(); + mtr.memo_push(reinterpret_cast(bpage), MTR_MEMO_PAGE_X_FIX); + + page_recv_t &recs= *reinterpret_cast(slot); + ut_ad(recs.being_processed == 1); + recv_init &init= *reinterpret_cast(offset); + ut_ad(init.lsn > 1); + init.created= true; + + if (recv_recover_page(reinterpret_cast(bpage), + mtr, recs, node->space, &init)) { - mysql_mutex_unlock(&recv_sys.mutex); - buf_read_recv_pages(page_id.space(), page_nos, ulint(p - page_nos)); - mysql_mutex_lock(&recv_sys.mutex); + ut_ad(bpage->oldest_modification() || bpage->is_freed()); + bpage->lock.x_unlock(true); + } + recs.being_processed= -1; + ut_ad(mtr.has_committed()); + + node->space->release(); +} + +/** @return whether a page has been freed */ +inline bool fil_space_t::is_freed(uint32_t page) +{ + std::lock_guard freed_lock(freed_range_mutex); + return freed_ranges.contains(page); +} + +bool recv_sys_t::report(time_t time) +{ + if (time - progress_time < 15) + return false; + progress_time= time; + return true; +} + +ATTRIBUTE_COLD +void recv_sys_t::report_progress() const +{ + mysql_mutex_assert_owner(&mutex); + const size_t n{pages.size()}; + if (recv_sys.scanned_lsn == recv_sys.recovered_lsn) + { + sql_print_information("InnoDB: To recover: %zu pages", n); + service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, + "To recover: %zu pages", n); + } + else + { + sql_print_information("InnoDB: To recover: LSN " LSN_PF + "/" LSN_PF "; %zu pages", + recv_sys.recovered_lsn, recv_sys.scanned_lsn, n); + service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, + "To recover: LSN " LSN_PF + "/" LSN_PF "; %zu pages", + recv_sys.recovered_lsn, + recv_sys.scanned_lsn, n); } } +/** Apply a recovery batch. +@param space_id current tablespace identifier +@param space current tablespace +@param free_block spare buffer block +@param last_batch whether it is possible to write more redo log +@return whether the caller must provide a new free_block */ +bool recv_sys_t::apply_batch(uint32_t space_id, fil_space_t *&space, + buf_block_t *&free_block, bool last_batch) +{ + mysql_mutex_assert_owner(&mutex); + ut_ad(pages_it != pages.end()); + ut_ad(!pages_it->second.log.empty()); + + mysql_mutex_lock(&buf_pool.mutex); + size_t n= 0, max_n= std::min(BUF_LRU_MIN_LEN, + UT_LIST_GET_LEN(buf_pool.LRU) + + UT_LIST_GET_LEN(buf_pool.free)); + mysql_mutex_unlock(&buf_pool.mutex); + + map::iterator begin= pages.end(); + page_id_t begin_id{~0ULL}; + + while (pages_it != pages.end() && n < max_n) + { + ut_ad(!buf_dblwr.is_inside(pages_it->first)); + if (!pages_it->second.being_processed) + { + if (space_id != pages_it->first.space()) + { + space_id= pages_it->first.space(); + if (space) + space->release(); + space= fil_space_t::get(space_id); + if (!space) + { + auto d= deferred_spaces.defers.find(space_id); + if (d == deferred_spaces.defers.end() || d->second.deleted) + /* For deleted files we preserve the deferred_spaces entry */; + else if (!free_block) + return true; + else + { + space= recover_deferred(pages_it, d->second.file_name, free_block); + deferred_spaces.defers.erase(d); + if (!space && !srv_force_recovery) + { + set_corrupt_fs(); + return false; + } + } + } + } + if (!space || space->is_freed(pages_it->first.page_no())) + pages_it->second.being_processed= -1; + else if (!n++) + { + begin= pages_it; + begin_id= pages_it->first; + } + } + pages_it++; + } + + if (!last_batch) + mysql_mutex_unlock(&log_sys.mutex); + + mysql_mutex_assert_not_owner(&log_sys.mutex); + + pages_it= begin; + + if (report(time(nullptr))) + report_progress(); + + if (!n) + goto wait; + + mysql_mutex_lock(&buf_pool.mutex); + + if (UNIV_UNLIKELY(UT_LIST_GET_LEN(buf_pool.free) < n)) + { + mysql_mutex_unlock(&buf_pool.mutex); + wait: + wait_for_pool(n); + if (n); + else if (!last_batch) + goto unlock_relock; + else + goto get_last; + pages_it= pages.lower_bound(begin_id); + ut_ad(pages_it != pages.end()); + } + else + mysql_mutex_unlock(&buf_pool.mutex); + + while (pages_it != pages.end()) + { + ut_ad(!buf_dblwr.is_inside(pages_it->first)); + if (!pages_it->second.being_processed) + { + const page_id_t id{pages_it->first}; + + if (space_id != id.space()) + { + space_id= id.space(); + if (space) + space->release(); + space= fil_space_t::get(space_id); + } + if (!space) + { + const auto it= deferred_spaces.defers.find(space_id); + if (it != deferred_spaces.defers.end() && !it->second.deleted) + /* The records must be processed after recover_deferred(). */ + goto next; + goto space_not_found; + } + else if (space->is_freed(id.page_no())) + { + space_not_found: + pages_it->second.being_processed= -1; + goto next; + } + else + { + page_recv_t &recs= pages_it->second; + ut_ad(!recs.log.empty()); + recs.being_processed= 1; + init *init= recs.skip_read ? &mlog_init.last(id) : nullptr; + mysql_mutex_unlock(&mutex); + buf_read_recover(space, id, recs, init); + } + + if (!--n) + { + if (last_batch) + goto relock_last; + goto relock; + } + mysql_mutex_lock(&mutex); + pages_it= pages.lower_bound(id); + } + else + next: + pages_it++; + } + + if (!last_batch) + { + unlock_relock: + mysql_mutex_unlock(&mutex); + relock: + mysql_mutex_lock(&log_sys.mutex); + relock_last: + mysql_mutex_lock(&mutex); + get_last: + pages_it= pages.lower_bound(begin_id); + } + + return false; +} + /** Attempt to initialize a page based on redo log records. -@param page_id page identifier -@param p iterator pointing to page_id +@param p iterator @param mtr mini-transaction @param b pre-allocated buffer pool block +@param init page initialization @return the recovered block @retval nullptr if the page cannot be initialized based on log records @retval -1 if the page cannot be recovered due to corruption */ -inline buf_block_t *recv_sys_t::recover_low(const page_id_t page_id, - map::iterator &p, mtr_t &mtr, - buf_block_t *b) +inline buf_block_t *recv_sys_t::recover_low(const map::iterator &p, mtr_t &mtr, + buf_block_t *b, init &init) { - mysql_mutex_assert_owner(&mutex); - ut_ad(p->first == page_id); + mysql_mutex_assert_not_owner(&mutex); page_recv_t &recs= p->second; - ut_ad(recs.state == page_recv_t::RECV_WILL_NOT_READ); + ut_ad(recs.skip_read); + ut_ad(recs.being_processed == 1); buf_block_t* block= nullptr; - mlog_init_t::init &i= mlog_init.last(page_id); - const lsn_t end_lsn = recs.log.last()->lsn; - if (end_lsn < i.lsn) - DBUG_LOG("ib_log", "skip log for page " << page_id - << " LSN " << end_lsn << " < " << i.lsn); - fil_space_t *space= fil_space_t::get(page_id.space()); + const lsn_t end_lsn= recs.log.last()->lsn; + if (end_lsn < init.lsn) + DBUG_LOG("ib_log", "skip log for page " << p->first + << " LSN " << end_lsn << " < " << init.lsn); + fil_space_t *space= fil_space_t::get(p->first.space()); mtr.start(); mtr.set_log_mode(MTR_LOG_NO_REDO); @@ -3116,82 +3498,77 @@ inline buf_block_t *recv_sys_t::recover_low(const page_id_t page_id, if (!space) { - if (page_id.page_no() != 0) + if (p->first.page_no() != 0) { nothing_recoverable: mtr.commit(); return nullptr; } - auto it= recv_spaces.find(page_id.space()); + auto it= recv_spaces.find(p->first.space()); ut_ad(it != recv_spaces.end()); uint32_t flags= it->second.flags; zip_size= fil_space_t::zip_size(flags); - block= buf_page_create_deferred(page_id.space(), zip_size, &mtr, b); + block= buf_page_create_deferred(p->first.space(), zip_size, &mtr, b); ut_ad(block == b); block->page.lock.x_lock_recursive(); } else { - block= buf_page_create(space, page_id.page_no(), zip_size, &mtr, b); + block= buf_page_create(space, p->first.page_no(), zip_size, &mtr, b); if (UNIV_UNLIKELY(block != b)) { /* The page happened to exist in the buffer pool, or it was just being read in. Before the exclusive page latch was acquired by buf_page_create(), all changes to the page must have been applied. */ - ut_ad(pages.find(page_id) == pages.end()); + ut_d(mysql_mutex_lock(&mutex)); + ut_ad(pages.find(p->first) == pages.end()); + ut_d(mysql_mutex_unlock(&mutex)); space->release(); goto nothing_recoverable; } } - ut_ad(&recs == &pages.find(page_id)->second); - i.created= true; - map::iterator r= p++; - block= recv_recover_page(block, mtr, r, space, &i); + ut_d(mysql_mutex_lock(&mutex)); + ut_ad(&recs == &pages.find(p->first)->second); + ut_d(mysql_mutex_unlock(&mutex)); + init.created= true; + block= recv_recover_page(block, mtr, recs, space, &init); ut_ad(mtr.has_committed()); - if (block) - { - recs.log.clear(); - pages.erase(r); - } - else - block= reinterpret_cast(-1); - - if (pages.empty()) - pthread_cond_signal(&cond); - if (space) space->release(); - return block; + return block ? block : reinterpret_cast(-1); } /** Attempt to initialize a page based on redo log records. @param page_id page identifier @return recovered block @retval nullptr if the page cannot be initialized based on log records */ -buf_block_t *recv_sys_t::recover_low(const page_id_t page_id) +ATTRIBUTE_COLD buf_block_t *recv_sys_t::recover_low(const page_id_t page_id) { - buf_block_t *free_block= buf_LRU_get_free_block(false); - buf_block_t *block= nullptr; - mysql_mutex_lock(&mutex); map::iterator p= pages.find(page_id); - if (p != pages.end() && p->second.state == page_recv_t::RECV_WILL_NOT_READ) + if (p != pages.end() && !p->second.being_processed && p->second.skip_read) { + p->second.being_processed= 1; + init &init= mlog_init.last(page_id); + mysql_mutex_unlock(&mutex); + buf_block_t *free_block= buf_LRU_get_free_block(false); mtr_t mtr; - block= recover_low(page_id, p, mtr, free_block); + buf_block_t *block= recover_low(p, mtr, free_block, init); + p->second.being_processed= -1; ut_ad(!block || block == reinterpret_cast(-1) || block == free_block); + if (UNIV_UNLIKELY(!block)) + buf_pool.free_block(free_block); + return block; } mysql_mutex_unlock(&mutex); - if (UNIV_UNLIKELY(!block)) - buf_pool.free_block(free_block); - return block; + return nullptr; } inline fil_space_t *fil_system_t::find(const char *path) const @@ -3242,46 +3619,18 @@ void recv_sys_t::apply(bool last_batch) #endif /* SAFE_MUTEX */ mysql_mutex_lock(&mutex); - timespec abstime; - - while (apply_batch_on) - { - if (is_corrupt_log()) - { - mysql_mutex_unlock(&mutex); - return; - } - if (last_batch) - { - mysql_mutex_assert_not_owner(&log_sys.mutex); - my_cond_wait(&cond, &mutex.m_mutex); - } - else - { - mysql_mutex_unlock(&mutex); - set_timespec_nsec(abstime, 500000000ULL); /* 0.5s */ - my_cond_timedwait(&cond, &log_sys.mutex.m_mutex, &abstime); - mysql_mutex_lock(&mutex); - } - } - - recv_no_ibuf_operations = !last_batch || - srv_operation == SRV_OPERATION_RESTORE || - srv_operation == SRV_OPERATION_RESTORE_EXPORT; - - mtr_t mtr; + garbage_collect(); if (!pages.empty()) { - const char *msg= last_batch - ? "Starting final batch to recover " - : "Starting a batch to recover "; - const ulint n= pages.size(); - ib::info() << msg << n << " pages from redo log."; - sd_notifyf(0, "STATUS=%s" ULINTPF " pages from redo log", msg, n); + recv_no_ibuf_operations = !last_batch || + srv_operation == SRV_OPERATION_RESTORE || + srv_operation == SRV_OPERATION_RESTORE_EXPORT; + ut_ad(!last_batch || recovered_lsn == scanned_lsn); + progress_time= time(nullptr); + report_progress(); apply_log_recs= true; - apply_batch_on= true; for (auto id= srv_undo_tablespaces_open; id--;) { @@ -3307,130 +3656,70 @@ void recv_sys_t::apply(bool last_batch) fil_system.extend_to_recv_size(); - /* We must release log_sys.mutex and recv_sys.mutex before - invoking buf_LRU_get_free_block(). Allocating a block may initiate - a redo log write and therefore acquire log_sys.mutex. To avoid - deadlocks, log_sys.mutex must not be acquired while holding - recv_sys.mutex. */ - mysql_mutex_unlock(&mutex); - if (!last_batch) - mysql_mutex_unlock(&log_sys.mutex); + fil_space_t *space= nullptr; + uint32_t space_id= ~0; + buf_block_t *free_block= nullptr; - mysql_mutex_assert_not_owner(&log_sys.mutex); - buf_block_t *free_block= buf_LRU_get_free_block(false); - - if (!last_batch) - mysql_mutex_lock(&log_sys.mutex); - mysql_mutex_lock(&mutex); - - for (map::iterator p= pages.begin(); p != pages.end(); ) + for (pages_it= pages.begin(); pages_it != pages.end(); + pages_it= pages.begin()) { - const page_id_t page_id= p->first; - ut_ad(!p->second.log.empty()); - - const uint32_t space_id= page_id.space(); - auto d= deferred_spaces.defers.find(space_id); - if (d != deferred_spaces.defers.end()) + if (!free_block) { - if (d->second.deleted) - { - /* For deleted files we must preserve the entry in deferred_spaces */ -erase_for_space: - while (p != pages.end() && p->first.space() == space_id) - { - map::iterator r= p++; - r->second.log.clear(); - pages.erase(r); - } - } - else if (recover_deferred(p, d->second.file_name, free_block)) - { - if (!srv_force_recovery) - set_corrupt_fs(); - deferred_spaces.defers.erase(d); - goto erase_for_space; - } - else - deferred_spaces.defers.erase(d); - if (!free_block) - goto next_free_block; - p= pages.lower_bound(page_id); - continue; + if (!last_batch) + mysql_mutex_unlock(&log_sys.mutex); + wait_for_pool(1); + pages_it= pages.begin(); + mysql_mutex_unlock(&mutex); + /* We must release log_sys.mutex and recv_sys.mutex before + invoking buf_LRU_get_free_block(). Allocating a block may initiate + a redo log write and therefore acquire log_sys.mutex. To avoid + deadlocks, log_sys.mutex must not be acquired while holding + recv_sys.mutex. */ + free_block= buf_LRU_get_free_block(false); + if (!last_batch) + mysql_mutex_lock(&log_sys.mutex); + mysql_mutex_lock(&mutex); + pages_it= pages.begin(); } - switch (p->second.state) { - case page_recv_t::RECV_BEING_READ: - case page_recv_t::RECV_BEING_PROCESSED: - p++; - continue; - case page_recv_t::RECV_WILL_NOT_READ: - if (UNIV_LIKELY(!!recover_low(page_id, p, mtr, free_block))) + while (pages_it != pages.end()) + { + if (is_corrupt_fs() || is_corrupt_log()) { -next_free_block: + if (space) + space->release(); mysql_mutex_unlock(&mutex); - if (!last_batch) - mysql_mutex_unlock(&log_sys.mutex); - mysql_mutex_assert_not_owner(&log_sys.mutex); - free_block= buf_LRU_get_free_block(false); - if (!last_batch) - mysql_mutex_lock(&log_sys.mutex); - mysql_mutex_lock(&mutex); - break; + if (free_block) + { + mysql_mutex_lock(&buf_pool.mutex); + buf_LRU_block_free_non_file_page(free_block); + mysql_mutex_unlock(&buf_pool.mutex); + } + return; } - ut_ad(p == pages.end() || p->first > page_id); - continue; - case page_recv_t::RECV_NOT_PROCESSED: - recv_read_in_area(page_id, p); + if (apply_batch(space_id, space, free_block, last_batch)) + break; } - p= pages.lower_bound(page_id); - /* Ensure that progress will be made. */ - ut_ad(p == pages.end() || p->first > page_id || - p->second.state >= page_recv_t::RECV_BEING_READ); } - buf_pool.free_block(free_block); + if (space) + space->release(); - /* Wait until all the pages have been processed */ - for (;;) + if (free_block) { - const bool empty= pages.empty(); - if (empty && !os_aio_pending_reads()) - break; - - if (!is_corrupt_fs() && !is_corrupt_log()) - { - if (last_batch) - { - mysql_mutex_assert_not_owner(&log_sys.mutex); - if (!empty) - my_cond_wait(&cond, &mutex.m_mutex); - else - { - mysql_mutex_unlock(&mutex); - os_aio_wait_until_no_pending_reads(false); - mysql_mutex_lock(&mutex); - ut_ad(pages.empty()); - } - } - else - { - mysql_mutex_unlock(&mutex); - set_timespec_nsec(abstime, 500000000ULL); /* 0.5s */ - my_cond_timedwait(&cond, &log_sys.mutex.m_mutex, &abstime); - mysql_mutex_lock(&mutex); - } - continue; - } - if (is_corrupt_fs() && !srv_force_recovery) - ib::info() << "Set innodb_force_recovery=1 to ignore corrupted pages."; - mysql_mutex_unlock(&mutex); - return; + mysql_mutex_lock(&buf_pool.mutex); + buf_LRU_block_free_non_file_page(free_block); + mysql_mutex_unlock(&buf_pool.mutex); } } if (last_batch) - /* We skipped this in buf_page_create(). */ - mlog_init.mark_ibuf_exist(mtr); + { + if (!recv_no_ibuf_operations) + /* We skipped this in buf_page_create(). */ + mlog_init.mark_ibuf_exist(); + mlog_init.clear(); + } else { mlog_init.reset(); @@ -3440,21 +3729,19 @@ next_free_block: mysql_mutex_assert_not_owner(&log_sys.mutex); mysql_mutex_unlock(&mutex); - if (last_batch && srv_operation != SRV_OPERATION_RESTORE && - srv_operation != SRV_OPERATION_RESTORE_EXPORT) - log_sort_flush_list(); - else - { - /* Instead of flushing, last_batch could sort the buf_pool.flush_list - in ascending order of buf_page_t::oldest_modification. */ - buf_flush_sync_batch(recovered_lsn); - } - if (!last_batch) { + buf_flush_sync_batch(recovered_lsn); buf_pool_invalidate(); mysql_mutex_lock(&log_sys.mutex); } + else if (srv_operation == SRV_OPERATION_RESTORE || + srv_operation == SRV_OPERATION_RESTORE_EXPORT) + buf_flush_sync_batch(recovered_lsn); + else + /* Instead of flushing, last_batch could sort the buf_pool.flush_list + in ascending order of buf_page_t::oldest_modification() */ + log_sort_flush_list(); mysql_mutex_lock(&mutex); @@ -3463,24 +3750,6 @@ next_free_block: mysql_mutex_unlock(&mutex); } -/** Check whether the number of read redo log blocks exceeds the maximum. -Store last_stored_lsn if the recovery is not in the last phase. -@param[in,out] store whether to store page operations -@return whether the memory is exhausted */ -inline bool recv_sys_t::is_memory_exhausted(store_t *store) -{ - if (*store == STORE_NO || - UT_LIST_GET_LEN(blocks) * 3 < buf_pool.get_n_pages()) - return false; - if (*store == STORE_YES) - last_stored_lsn= recovered_lsn; - *store= STORE_NO; - DBUG_PRINT("ib_log",("Ran out of memory and last stored lsn " LSN_PF - " last stored offset " ULINTPF "\n", - recovered_lsn, recovered_offset)); - return true; -} - /** Adds data from a new log block to the parsing buffer of recv_sys if recv_sys.parse_start_lsn is non-zero. @param[in] log_block log block to add @@ -3588,7 +3857,7 @@ static bool recv_scan_log_recs( bool more_data = false; bool apply = recv_sys.mlog_checkpoint_lsn != 0; ulint recv_parsing_buf_size = RECV_PARSING_BUF_SIZE; - const bool last_phase = (*store == STORE_IF_EXISTS); + const store_t old_store = *store; ut_ad(start_lsn % OS_FILE_LOG_BLOCK_SIZE == 0); ut_ad(end_lsn % OS_FILE_LOG_BLOCK_SIZE == 0); ut_ad(end_lsn >= start_lsn + OS_FILE_LOG_BLOCK_SIZE); @@ -3710,8 +3979,8 @@ static bool recv_scan_log_recs( } /* During last phase of scanning, there can be redo logs - left in recv_sys.buf to parse & store it in recv_sys.heap */ - if (last_phase + left in recv_sys.buf to parse & store it in recv_sys.pages */ + if (old_store == STORE_IF_EXISTS && recv_sys.recovered_lsn < recv_sys.scanned_lsn) { more_data = true; } @@ -3732,33 +4001,21 @@ static bool recv_scan_log_recs( if (more_data && !recv_sys.is_corrupt_log()) { /* Try to parse more log records */ if (recv_sys.parse(checkpoint_lsn, store, apply)) { + finished = true; ut_ad(recv_sys.is_corrupt_log() || recv_sys.is_corrupt_fs() || recv_sys.mlog_checkpoint_lsn == recv_sys.recovered_lsn); - finished = true; - goto func_exit; - } - - recv_sys.is_memory_exhausted(store); - - if (recv_sys.recovered_offset > recv_parsing_buf_size / 4 - || (recv_sys.recovered_offset - && recv_sys.len - >= recv_parsing_buf_size - RECV_SCAN_SIZE)) { + } else if (recv_sys.recovered_offset + > recv_parsing_buf_size / 4 + || (recv_sys.recovered_offset + && recv_sys.len + >= recv_parsing_buf_size - RECV_SCAN_SIZE)) { /* Move parsing buffer data to the buffer start */ recv_sys_justify_left_parsing_buf(); } - - /* Need to re-parse the redo log which're stored - in recv_sys.buf */ - if (last_phase && *store == STORE_NO) { - finished = false; - } } -func_exit: - recv_sys.maybe_finish_batch(); mysql_mutex_unlock(&recv_sys.mutex); return(finished); } @@ -3802,13 +4059,6 @@ recv_group_scan_log_recs( ut_d(recv_sys.after_apply = last_phase); do { - if (last_phase && store == STORE_NO) { - store = STORE_IF_EXISTS; - recv_sys.apply(false); - /* Rescan the redo logs from last stored lsn */ - end_lsn = recv_sys.recovered_lsn; - } - start_lsn = ut_uint64_align_down(end_lsn, OS_FILE_LOG_BLOCK_SIZE); end_lsn = start_lsn; @@ -3913,8 +4163,8 @@ next: /* fall through */ case file_name_t::DELETED: recv_sys_t::map::iterator r = p++; - r->second.log.clear(); - recv_sys.pages.erase(r); + recv_sys.pages_it_invalidate(r); + recv_sys.erase(r); continue; } ut_ad(0); @@ -3938,8 +4188,6 @@ func_exit: continue; } - missing_tablespace = true; - if (srv_force_recovery > 0) { ib::warn() << "Tablespace " << rs.first <<" was not found at " << rs.second.name @@ -3954,14 +4202,11 @@ func_exit: << " was not found at '" << rs.second.name << "', but there" <<" were no modifications either."; + } else { + missing_tablespace = true; } } - if (!rescan || srv_force_recovery > 0) { - missing_tablespace = false; - } - - err = DB_SUCCESS; goto func_exit; } @@ -4344,6 +4589,12 @@ completed: return(DB_ERROR); } + ut_ad(contiguous_lsn <= recv_sys.recovered_lsn); + ut_ad(recv_sys.scanned_lsn == recv_sys.scanned_lsn); + + log_sys.set_lsn(recv_sys.recovered_lsn); + log_sys.set_flushed_lsn(recv_sys.recovered_lsn); + /* In case of multi-batch recovery, redo log for the last batch is not applied yet. */ diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index 3b81dc7ee07..f8fb9270230 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -3431,15 +3431,12 @@ os_file_get_status( return(ret); } - -extern void fil_aio_callback(const IORequest &request); - -static void io_callback(tpool::aiocb *cb) +static void io_callback_errorcheck(const tpool::aiocb *cb) { - const IORequest &request= *static_cast - (static_cast(cb->m_userdata)); if (cb->m_err != DB_SUCCESS) { + const IORequest &request= *static_cast + (static_cast(cb->m_userdata)); ib::fatal() << "IO Error: " << cb->m_err << " during " << (request.is_async() ? "async " : "sync ") << (request.is_LRU() ? "lru " : "") << @@ -3447,19 +3444,36 @@ static void io_callback(tpool::aiocb *cb) " of " << cb->m_len << " bytes, for file " << cb->m_fh << ", returned " << cb->m_ret_len; } - /* Return cb back to cache*/ - if (cb->m_opcode == tpool::aio_opcode::AIO_PREAD) - { - ut_ad(read_slots->contains(cb)); - fil_aio_callback(request); - read_slots->release(cb); - } - else - { - ut_ad(write_slots->contains(cb)); - fil_aio_callback(request); - write_slots->release(cb); - } +} + +static void fake_io_callback(void *c) +{ + tpool::aiocb *cb= static_cast(c); + ut_ad(read_slots->contains(cb)); + static_cast(static_cast(cb->m_userdata))-> + fake_read_complete(cb->m_offset); + read_slots->release(cb); +} + +static void read_io_callback(void *c) +{ + tpool::aiocb *cb= static_cast(c); + ut_ad(cb->m_opcode == tpool::aio_opcode::AIO_PREAD); + io_callback_errorcheck(cb); + ut_ad(read_slots->contains(cb)); + static_cast + (static_cast(cb->m_userdata))->read_complete(); + read_slots->release(cb); +} + +static void write_io_callback(void *c) +{ + tpool::aiocb *cb= static_cast(c); + ut_ad(cb->m_opcode == tpool::aio_opcode::AIO_PWRITE); + ut_ad(write_slots->contains(cb)); + static_cast + (static_cast(cb->m_userdata))->write_complete(); + write_slots->release(cb); } #ifdef LINUX_NATIVE_AIO @@ -3704,6 +3718,28 @@ void os_aio_wait_until_no_pending_reads(bool declare) tpool::tpool_wait_end(); } +/** Submit a fake read request during crash recovery. +@param type fake read request +@param offset additional context */ +void os_fake_read(const IORequest &type, os_offset_t offset) +{ + tpool::aiocb *cb= read_slots->acquire(); + + cb->m_group= read_slots->get_task_group(); + cb->m_fh= type.node->handle.m_file; + cb->m_buffer= nullptr; + cb->m_len= 0; + cb->m_offset= offset; + cb->m_opcode= tpool::aio_opcode::AIO_PREAD; + new (cb->m_userdata) IORequest{type}; + cb->m_internal_task.m_func= fake_io_callback; + cb->m_internal_task.m_arg= cb; + cb->m_internal_task.m_group= cb->m_group; + + srv_thread_pool->submit_task(&cb->m_internal_task); +} + + /** Request a read or write. @param type I/O request @param buf buffer @@ -3748,23 +3784,32 @@ func_exit: return err; } + io_slots* slots; + tpool::callback_func callback; + tpool::aio_opcode opcode; + if (type.is_read()) { ++os_n_file_reads; + slots = read_slots; + callback = read_io_callback; + opcode = tpool::aio_opcode::AIO_PREAD; } else { ++os_n_file_writes; + slots = write_slots; + callback = write_io_callback; + opcode = tpool::aio_opcode::AIO_PWRITE; } compile_time_assert(sizeof(IORequest) <= tpool::MAX_AIO_USERDATA_LEN); - io_slots* slots= type.is_read() ? read_slots : write_slots; tpool::aiocb* cb = slots->acquire(); cb->m_buffer = buf; - cb->m_callback = (tpool::callback_func)io_callback; + cb->m_callback = callback; cb->m_group = slots->get_task_group(); cb->m_fh = type.node->handle.m_file; cb->m_len = (int)n; cb->m_offset = offset; - cb->m_opcode = type.is_read() ? tpool::aio_opcode::AIO_PREAD : tpool::aio_opcode::AIO_PWRITE; + cb->m_opcode = opcode; new (cb->m_userdata) IORequest{type}; ut_a(reinterpret_cast(cb->m_buffer) % OS_FILE_LOG_BLOCK_SIZE @@ -3777,6 +3822,7 @@ func_exit: os_file_handle_error(type.node->name, type.is_read() ? "aio read" : "aio write"); err = DB_IO_ERROR; + type.node->space->release(); } goto func_exit; From df524dc06f7a92ebeb737755e8bd56c790fcf002 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 19 May 2023 15:29:26 +0300 Subject: [PATCH 28/76] MDEV-31308 InnoDB monitor trx_rseg_history_len was accidentally disabled by default innodb_counter_info[]: Revert a change that was accidentally made in commit 204e7225dce32130ac2c96f469611d2cb421241e --- mysql-test/suite/innodb/r/monitor.result | 138 ++++++++++++----------- mysql-test/suite/innodb/t/monitor.test | 4 +- storage/innobase/srv/srv0mon.cc | 2 +- 3 files changed, 74 insertions(+), 70 deletions(-) diff --git a/mysql-test/suite/innodb/r/monitor.result b/mysql-test/suite/innodb/r/monitor.result index c874a84d26b..22e6fbea1e3 100644 --- a/mysql-test/suite/innodb/r/monitor.result +++ b/mysql-test/suite/innodb/r/monitor.result @@ -1,10 +1,9 @@ -set global innodb_monitor_disable = All; select name, if(enabled,'enabled','disabled') status from information_schema.innodb_metrics; name status metadata_table_handles_opened disabled -lock_deadlocks disabled -lock_timeouts disabled +lock_deadlocks enabled +lock_timeouts enabled lock_rec_lock_waits disabled lock_table_lock_waits disabled lock_rec_lock_requests disabled @@ -14,32 +13,32 @@ lock_rec_locks disabled lock_table_lock_created disabled lock_table_lock_removed disabled lock_table_locks disabled -lock_row_lock_current_waits disabled -lock_row_lock_time disabled -lock_row_lock_time_max disabled -lock_row_lock_waits disabled -lock_row_lock_time_avg disabled -buffer_pool_size disabled -buffer_pool_reads disabled -buffer_pool_read_requests disabled -buffer_pool_write_requests disabled -buffer_pool_wait_free disabled -buffer_pool_read_ahead disabled -buffer_pool_read_ahead_evicted disabled -buffer_pool_pages_total disabled -buffer_pool_pages_misc disabled -buffer_pool_pages_data disabled -buffer_pool_bytes_data disabled -buffer_pool_pages_dirty disabled -buffer_pool_bytes_dirty disabled -buffer_pool_pages_free disabled -buffer_pages_created disabled -buffer_pages_written disabled -buffer_pages_read disabled -buffer_index_sec_rec_cluster_reads disabled -buffer_index_sec_rec_cluster_reads_avoided disabled -buffer_data_reads disabled -buffer_data_written disabled +lock_row_lock_current_waits enabled +lock_row_lock_time enabled +lock_row_lock_time_max enabled +lock_row_lock_waits enabled +lock_row_lock_time_avg enabled +buffer_pool_size enabled +buffer_pool_reads enabled +buffer_pool_read_requests enabled +buffer_pool_write_requests enabled +buffer_pool_wait_free enabled +buffer_pool_read_ahead enabled +buffer_pool_read_ahead_evicted enabled +buffer_pool_pages_total enabled +buffer_pool_pages_misc enabled +buffer_pool_pages_data enabled +buffer_pool_bytes_data enabled +buffer_pool_pages_dirty enabled +buffer_pool_bytes_dirty enabled +buffer_pool_pages_free enabled +buffer_pages_created enabled +buffer_pages_written enabled +buffer_pages_read enabled +buffer_index_sec_rec_cluster_reads enabled +buffer_index_sec_rec_cluster_reads_avoided enabled +buffer_data_reads enabled +buffer_data_written enabled buffer_flush_batch_scanned disabled buffer_flush_batch_num_scan disabled buffer_flush_batch_scanned_per_call disabled @@ -72,8 +71,8 @@ buffer_flush_background_pages disabled buffer_LRU_batch_scanned disabled buffer_LRU_batch_num_scan disabled buffer_LRU_batch_scanned_per_call disabled -buffer_LRU_batch_flush_total_pages disabled -buffer_LRU_batch_evict_total_pages disabled +buffer_LRU_batch_flush_total_pages enabled +buffer_LRU_batch_evict_total_pages enabled buffer_LRU_single_flush_failure_count disabled buffer_LRU_get_free_search disabled buffer_LRU_search_scanned disabled @@ -114,24 +113,24 @@ buffer_page_written_blob disabled buffer_page_written_zblob disabled buffer_page_written_zblob2 disabled buffer_page_written_other disabled -os_data_reads disabled -os_data_writes disabled -os_data_fsyncs disabled -os_pending_reads disabled -os_pending_writes disabled -os_log_bytes_written disabled -os_log_fsyncs disabled -os_log_pending_fsyncs disabled -os_log_pending_writes disabled +os_data_reads enabled +os_data_writes enabled +os_data_fsyncs enabled +os_pending_reads enabled +os_pending_writes enabled +os_log_bytes_written enabled +os_log_fsyncs enabled +os_log_pending_fsyncs enabled +os_log_pending_writes enabled trx_rw_commits disabled trx_ro_commits disabled trx_nl_ro_commits disabled trx_commits_insert_update disabled trx_rollbacks disabled trx_rollbacks_savepoint disabled -trx_rseg_history_len disabled +trx_rseg_history_len enabled trx_undo_slots_used disabled -trx_undo_slots_cached disabled +trx_undo_slots_cached enabled trx_rseg_current_size disabled purge_del_mark_records disabled purge_upd_exist_or_extern_records disabled @@ -150,10 +149,10 @@ log_max_modified_age_async disabled log_pending_log_flushes disabled log_pending_checkpoint_writes disabled log_num_log_io disabled -log_waits disabled -log_write_requests disabled -log_writes disabled -log_padded disabled +log_waits enabled +log_write_requests enabled +log_writes enabled +log_padded enabled compress_pages_compressed disabled compress_pages_decompressed disabled compression_pad_increments disabled @@ -171,42 +170,42 @@ index_page_merge_successful disabled index_page_reorg_attempts disabled index_page_reorg_successful disabled index_page_discards disabled -adaptive_hash_searches disabled -adaptive_hash_searches_btree disabled +adaptive_hash_searches enabled +adaptive_hash_searches_btree enabled adaptive_hash_pages_added disabled adaptive_hash_pages_removed disabled adaptive_hash_rows_added disabled adaptive_hash_rows_removed disabled adaptive_hash_rows_deleted_no_hash_entry disabled adaptive_hash_rows_updated disabled -file_num_open_files disabled -ibuf_merges_insert disabled -ibuf_merges_delete_mark disabled -ibuf_merges_delete disabled -ibuf_merges_discard_insert disabled -ibuf_merges_discard_delete_mark disabled -ibuf_merges_discard_delete disabled -ibuf_merges disabled -ibuf_size disabled +file_num_open_files enabled +ibuf_merges_insert enabled +ibuf_merges_delete_mark enabled +ibuf_merges_delete enabled +ibuf_merges_discard_insert enabled +ibuf_merges_discard_delete_mark enabled +ibuf_merges_discard_delete enabled +ibuf_merges enabled +ibuf_size enabled innodb_master_thread_sleeps disabled -innodb_activity_count disabled +innodb_activity_count enabled innodb_master_active_loops disabled innodb_master_idle_loops disabled innodb_log_flush_usec disabled innodb_dict_lru_usec disabled innodb_dict_lru_count_active disabled innodb_dict_lru_count_idle disabled -innodb_dblwr_writes disabled -innodb_dblwr_pages_written disabled -innodb_page_size disabled +innodb_dblwr_writes enabled +innodb_dblwr_pages_written enabled +innodb_page_size enabled dml_reads disabled -dml_inserts disabled -dml_deletes disabled -dml_updates disabled -dml_system_reads disabled -dml_system_inserts disabled -dml_system_deletes disabled -dml_system_updates disabled +dml_inserts enabled +dml_deletes enabled +dml_updates enabled +dml_system_reads enabled +dml_system_inserts enabled +dml_system_deletes enabled +dml_system_updates enabled ddl_background_drop_indexes disabled ddl_online_create_index disabled ddl_pending_alter_table disabled @@ -216,6 +215,9 @@ icp_attempts disabled icp_no_match disabled icp_out_of_range disabled icp_match disabled +set global innodb_monitor_disable = All; +select name from information_schema.innodb_metrics where enabled; +name set global innodb_monitor_enable = all; select name from information_schema.innodb_metrics where not enabled; name diff --git a/mysql-test/suite/innodb/t/monitor.test b/mysql-test/suite/innodb/t/monitor.test index d6fa3f2fbc9..65a93e5a97a 100644 --- a/mysql-test/suite/innodb/t/monitor.test +++ b/mysql-test/suite/innodb/t/monitor.test @@ -5,12 +5,14 @@ # sys_vars.innodb_monitor_enable_basic --source include/have_innodb.inc -set global innodb_monitor_disable = All; # Test turn on/off the monitor counter with "all" option # By default, they will be off. select name, if(enabled,'enabled','disabled') status from information_schema.innodb_metrics; +set global innodb_monitor_disable = All; +select name from information_schema.innodb_metrics where enabled; + # Turn on all monitor counters set global innodb_monitor_enable = all; diff --git a/storage/innobase/srv/srv0mon.cc b/storage/innobase/srv/srv0mon.cc index 971f4f330c8..3065ab19462 100644 --- a/storage/innobase/srv/srv0mon.cc +++ b/storage/innobase/srv/srv0mon.cc @@ -704,7 +704,7 @@ static monitor_info_t innodb_counter_info[] = {"trx_rseg_history_len", "transaction", "Length of the TRX_RSEG_HISTORY list", static_cast( - MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT), + MONITOR_EXISTING | MONITOR_DISPLAY_CURRENT | MONITOR_DEFAULT_ON), MONITOR_DEFAULT_START, MONITOR_RSEG_HISTORY_LEN}, {"trx_undo_slots_used", "transaction", "Number of undo slots used", From d2420669bd07a29276e14f52a71bab0e7d5d8587 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 19 May 2023 15:38:48 +0300 Subject: [PATCH 29/76] MDEV-31309 Innodb_buffer_pool_read_requests is not updated correctly srv_export_innodb_status(): Update export_vars.innodb_buffer_pool_read_requests as it was done before commit a55b951e6082a4ce9a1f2ed5ee176ea7dbbaf1f2 (MDEV-26827). If innodb_status_variables[] pointed to a sharded variable, it would only access the first shard. --- storage/innobase/handler/ha_innodb.cc | 3 ++- storage/innobase/include/srv0srv.h | 2 ++ storage/innobase/srv/srv0srv.cc | 3 +++ 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 0b117c02e29..2937ca40752 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -946,7 +946,8 @@ static SHOW_VAR innodb_status_variables[]= { {"buffer_pool_read_ahead", &buf_pool.stat.n_ra_pages_read, SHOW_SIZE_T}, {"buffer_pool_read_ahead_evicted", &buf_pool.stat.n_ra_pages_evicted, SHOW_SIZE_T}, - {"buffer_pool_read_requests", &buf_pool.stat.n_page_gets, SHOW_SIZE_T}, + {"buffer_pool_read_requests", + &export_vars.innodb_buffer_pool_read_requests, SHOW_SIZE_T}, {"buffer_pool_reads", &buf_pool.stat.n_pages_read, SHOW_SIZE_T}, {"buffer_pool_wait_free", &buf_pool.stat.LRU_waits, SHOW_SIZE_T}, {"buffer_pool_write_requests", diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 96cfe886c02..c4c854f6b9c 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -674,6 +674,8 @@ struct export_var_t{ #ifdef UNIV_DEBUG ulint innodb_buffer_pool_pages_latched; /*!< Latched pages */ #endif /* UNIV_DEBUG */ + /** buf_pool.stat.n_page_gets (a sharded counter) */ + ulint innodb_buffer_pool_read_requests; ulint innodb_buffer_pool_write_requests;/*!< srv_stats.buf_pool_write_requests */ ulint innodb_checkpoint_age; ulint innodb_checkpoint_max_age; diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index af2845085ad..41ef2bccdc4 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1011,6 +1011,9 @@ srv_export_innodb_status(void) export_vars.innodb_data_written = srv_stats.data_written + (dblwr << srv_page_size_shift); + export_vars.innodb_buffer_pool_read_requests + = buf_pool.stat.n_page_gets; + export_vars.innodb_buffer_pool_write_requests = srv_stats.buf_pool_write_requests; From 03d4fd3214bae64856255f4de1eb533b05f88e8d Mon Sep 17 00:00:00 2001 From: Robin Newhouse Date: Fri, 27 Jan 2023 23:10:47 +0000 Subject: [PATCH 30/76] Backport GitLab CI to 10.5 Add .gitlab-ci.yml file to earliest supported branch to enable automated building and testing for all MariaDB major branches. Note to mergers: GitLab CI is available for branches >= 10.6. This commit includes a GitLab CI file identical to that in branches >= 10.6, except for the MARIADB_MAJOR_VERSION variable which should reflect the branch version. A modified CI will be included in branches 10.4 with PR !2418. Also changed is the `allow_failure: true` for the MSAN build, which should be merged up to later branches. All new code of the whole pull request, including one or several files that are either new files or modified ones, are contributed under the BSD-new license. I am contributing on behalf of my employer Amazon Web Services, Inc. --- .gitlab-ci.yml | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 39dae0facb8..0a9113a36c7 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -39,10 +39,10 @@ default: # submodules (a commit in this repo does not affect their builds anyway) and # many components that are otherwise slow to build. variables: - CMAKE_FLAGS: "-DPLUGIN_COLUMNSTORE=NO -DPLUGIN_ROCKSDB=NO -DPLUGIN_S3=NO -DPLUGIN_MROONGA=NO -DPLUGIN_CONNECT=NO -DPLUGIN_MROONGA=NO -DPLUGIN_TOKUDB=NO -DPLUGIN_PERFSCHEMA=NO -DWITH_WSREP=OFF" + CMAKE_FLAGS: "-DWITH_SSL=system -DPLUGIN_COLUMNSTORE=NO -DPLUGIN_ROCKSDB=NO -DPLUGIN_S3=NO -DPLUGIN_MROONGA=NO -DPLUGIN_CONNECT=NO -DPLUGIN_MROONGA=NO -DPLUGIN_TOKUDB=NO -DPLUGIN_PERFSCHEMA=NO -DWITH_WSREP=OFF" # Major version dictates which branches share the same ccache. E.g. 10.6-abc # and 10.6-xyz will have the same cache. - MARIADB_MAJOR_VERSION: "10.6" + MARIADB_MAJOR_VERSION: "10.5" # NOTE! Currently ccache is only used on the Centos8 build. As each job has # sufficiently different environments they are unable to benefit from each # other's ccaches. As each build generates about 1 GB of ccache, having @@ -82,13 +82,13 @@ fedora: GIT_STRATEGY: fetch GIT_SUBMODULE_STRATEGY: normal script: - - yum install -y yum-utils rpm-build openssl-devel graphviz clang gnutls-devel + - yum install -y yum-utils rpm-build openssl-devel graphviz # Accelerate builds with unsafe disk access, as we can afford to loose the entire build anyway - yum install -y https://github.com/stewartsmith/libeatmydata/releases/download/v129/libeatmydata-129-1.fc33.x86_64.rpm # This repository does not have any .spec files, so install dependencies based on Fedora spec file - yum-builddep -y mariadb-server - mkdir builddir; cd builddir - - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS -DWITH_SSL=bundled .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log - cmake --graphviz=../dependencies.dot .. && dot -Tpng -o ../dependencies.png ../dependencies.dot - eatmydata make package -j 2 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log # @TODO: Don't use -j without the limit of 2 on Gitlab.com as builds just @@ -113,13 +113,13 @@ fedora-ninja: GIT_STRATEGY: fetch GIT_SUBMODULE_STRATEGY: normal script: - - yum install -y yum-utils rpm-build openssl-devel graphviz ninja-build gnutls-devel + - yum install -y yum-utils rpm-build openssl-devel graphviz ninja-build # Accelerate builds with unsafe disk access, as we can afford to loose the entire build anyway - yum install -y https://github.com/stewartsmith/libeatmydata/releases/download/v129/libeatmydata-129-1.fc33.x86_64.rpm # This repository does not have any .spec files, so install dependencies based on Fedora spec file - yum-builddep -y mariadb-server - mkdir builddir; cd builddir - - cmake -DRPM=generic $CMAKE_FLAGS -DWITH_SSL=bundled -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON -G Ninja .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - cmake -DRPM=generic $CMAKE_FLAGS -DCMAKE_BUILD_WITH_INSTALL_RPATH=ON -G Ninja .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log - ninja -t graph > ../dependencies.dot && dot -Tpng -o ../dependencies.png ../dependencies.dot - eatmydata ninja package -j 2 --verbose 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log # @TODO: Unlike other builds, the Ninja builds using Gitlab.com runners don't get stuck, but they do get @@ -144,7 +144,7 @@ fedora-clang: GIT_STRATEGY: fetch GIT_SUBMODULE_STRATEGY: normal script: - - yum install -y yum-utils rpm-build openssl-devel graphviz clang gnutls-devel + - yum install -y yum-utils rpm-build openssl-devel graphviz clang # Accelerate builds with unsafe disk access, as we can afford to loose the entire build anyway - yum install -y https://github.com/stewartsmith/libeatmydata/releases/download/v129/libeatmydata-129-1.fc33.x86_64.rpm # This repository does not have any .spec files, so install dependencies based on Fedora spec file @@ -156,7 +156,7 @@ fedora-clang: - export CC_FOR_BUILD=${CC_FOR_BUILD:-clang} - export CFLAGS='-Wno-unused-command-line-argument' - export CXXFLAGS='-Wno-unused-command-line-argument' - - cmake -DRPM=generic $CMAKE_FLAGS -DWITH_SSL=bundled .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - cmake -DRPM=generic $CMAKE_FLAGS .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log - cmake --graphviz=../dependencies.dot .. && dot -Tpng -o ../dependencies.png ../dependencies.dot - eatmydata make package -j 2 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log # @TODO: Don't use -j without the limit of 2 on Gitlab.com as builds just @@ -181,7 +181,7 @@ fedora-sanitizer: GIT_STRATEGY: fetch GIT_SUBMODULE_STRATEGY: normal script: - - yum install -y yum-utils rpm-build openssl-devel clang gnutls-devel + - yum install -y yum-utils rpm-build openssl-devel clang - yum install -y libasan libtsan libubsan # This repository does not have any .spec files, so install dependencies based on Fedora spec file - yum-builddep -y mariadb-server @@ -192,7 +192,7 @@ fedora-sanitizer: - export CC_FOR_BUILD=${CC_FOR_BUILD:-clang} - export CFLAGS='-Wno-unused-command-line-argument' - export CXXFLAGS='-Wno-unused-command-line-argument' - - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS -DWITH_SSL=bundled $SANITIZER .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS $SANITIZER .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log # @TODO: the build will fail consistently at 24% when trying to make using eatmydata - make package -j 2 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log - *rpm_listfiles @@ -233,7 +233,7 @@ centos8: # This repository does not have any .spec files, so install dependencies based on CentOS spec file - yum-builddep -y mariadb-server - mkdir builddir; cd builddir - - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS -DWITH_SSL=system .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log - eatmydata make package -j 2 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log # @TODO: Don't use -j without the limit of 2 on Gitlab.com as builds just # get stuck when running multi-proc and out of memory, see https://jira.mariadb.org/browse/MDEV-25968 @@ -266,7 +266,7 @@ centos7: # ..with a few extra ones, as CentOS 7 is very old and these are added in newer MariaDB releases - yum install -y yum-utils rpm-build gcc gcc-c++ bison libxml2-devel libevent-devel openssl-devel pcre2-devel - mkdir builddir; cd builddir - - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS -DWITH_SSL=system .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log + - cmake -DRPM=$CI_JOB_NAME $CMAKE_FLAGS .. 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log - make package -j 2 2>&1 | tee -a ../build-$CI_JOB_NAME-$CI_COMMIT_REF_SLUG.log # @TODO: Don't use -j without the limit of 2 on Gitlab.com as builds just # get stuck when running multi-proc and out of memory, see https://jira.mariadb.org/browse/MDEV-25968 @@ -331,6 +331,7 @@ mysql-test-run-asan: needs: - "fedora-sanitizer: [-DWITH_ASAN=YES]" <<: *mysql-test-run-def + allow_failure: true artifacts: when: always # Also show results when tests fail reports: From 3f59bbeeaec751e9aabdc544324546f3c8326f0f Mon Sep 17 00:00:00 2001 From: Teemu Ollakka Date: Mon, 17 Apr 2023 16:04:01 +0300 Subject: [PATCH 31/76] MDEV-29293 MariaDB stuck on starting commit state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The problem seems to be a deadlock between KILL command execution and BF abort issued by an applier, where: * KILL has locked victim's LOCK_thd_kill and LOCK_thd_data. * Applier has innodb side global lock mutex and victim trx mutex. * KILL is calling innobase_kill_query, and is blocked by innodb global lock mutex. * Applier is in wsrep_innobase_kill_one_trx and is blocked by victim's LOCK_thd_kill. The fix in this commit removes the TOI replication of KILL command and makes KILL execution less intrusive operation. Aborting the victim happens now by using awake_no_mutex() and ha_abort_transaction(). If the KILL happens when the transaction is committing, the KILL operation is postponed to happen after the statement has completed in order to avoid KILL to interrupt commit processing. Notable changes in this commit: * wsrep client connections's error state may remain sticky after client connection is closed. This error message will then pop up for the next client session issuing first SQL statement. This problem raised with test galera.galera_bf_kill. The fix is to reset wsrep client error state, before a THD is reused for next connetion. * Release THD locks in wsrep_abort_transaction when locking innodb mutexes. This guarantees same locking order as with applier BF aborting. * BF abort from MDL was changed to do BF abort on server/wsrep-lib side first, and only then do the BF abort on InnoDB side. This removes the need to call back from InnoDB for BF aborts which originate from MDL and simplifies the locking. * Removed wsrep_thd_set_wsrep_aborter() from service_wsrep.h. The manipulation of the wsrep_aborter can be done solely on server side. Moreover, it is now debug only variable and could be excluded from optimized builds. * Remove LOCK_thd_kill from wsrep_thd_LOCK/UNLOCK to allow more fine grained locking for SR BF abort which may require locking of victim LOCK_thd_kill. Added explicit call for wsrep_thd_kill_LOCK/UNLOCK where appropriate. * Wsrep-lib was updated to version which allows external locking for BF abort calls. Changes to MTR tests: * Disable galera_bf_abort_group_commit. This test is going to be removed (MDEV-30855). * Record galera_gcache_recover_manytrx as result file was incomplete. Trivial change. * Make galera_create_table_as_select more deterministic: Wait until CTAS execution has reached MDL wait for multi-master conflict case. Expected error from multi-master conflict is ER_QUERY_INTERRUPTED. This is because CTAS does not yet have open wsrep transaction when it is waiting for MDL, query gets interrupted instead of BF aborted. This should be addressed in separate task. * A new test galera_kill_group_commit to verify correct behavior when KILL is executed while the transaction is committing. Co-authored-by: Seppo Jaakola Co-authored-by: Jan Lindström Signed-off-by: Julius Goryavsky --- include/mysql/service_wsrep.h | 9 +- mysql-test/suite/galera/disabled.def | 2 + mysql-test/suite/galera/r/MDEV-29293.result | 21 ++ .../r/galera_create_table_as_select.result | 1 + .../r/galera_gcache_recover_manytrx.result | 3 - .../galera/r/galera_kill_group_commit.result | 27 +++ mysql-test/suite/galera/t/MDEV-29293.test | 41 ++++ .../t/galera_create_table_as_select.test | 6 +- .../galera/t/galera_kill_group_commit.cnf | 5 + .../galera/t/galera_kill_group_commit.test | 69 ++++++ sql/handler.cc | 7 + sql/service_wsrep.cc | 39 +--- sql/sql_class.cc | 27 ++- sql/sql_class.h | 9 +- sql/sql_parse.cc | 58 +---- sql/sql_plugin_services.inl | 2 +- sql/wsrep_dummy.cc | 7 +- sql/wsrep_high_priority_service.cc | 1 + sql/wsrep_mysqld.cc | 45 ++-- sql/wsrep_server_service.cc | 8 +- sql/wsrep_server_service.h | 3 +- sql/wsrep_thd.cc | 214 +++++++++++++----- sql/wsrep_thd.h | 36 ++- sql/wsrep_trans_observer.h | 23 +- storage/innobase/handler/ha_innodb.cc | 190 +++++++++------- wsrep-lib | 2 +- 26 files changed, 592 insertions(+), 263 deletions(-) create mode 100644 mysql-test/suite/galera/r/MDEV-29293.result create mode 100644 mysql-test/suite/galera/r/galera_kill_group_commit.result create mode 100644 mysql-test/suite/galera/t/MDEV-29293.test create mode 100644 mysql-test/suite/galera/t/galera_kill_group_commit.cnf create mode 100644 mysql-test/suite/galera/t/galera_kill_group_commit.test diff --git a/include/mysql/service_wsrep.h b/include/mysql/service_wsrep.h index 42b758c03f3..f3588da4b46 100644 --- a/include/mysql/service_wsrep.h +++ b/include/mysql/service_wsrep.h @@ -57,6 +57,7 @@ extern struct wsrep_service_st { my_bool (*wsrep_on_func)(const MYSQL_THD thd); bool (*wsrep_prepare_key_for_innodb_func)(MYSQL_THD thd, const unsigned char*, size_t, const unsigned char*, size_t, struct wsrep_buf*, size_t*); void (*wsrep_thd_LOCK_func)(const MYSQL_THD thd); + int (*wsrep_thd_TRYLOCK_func)(const MYSQL_THD thd); void (*wsrep_thd_UNLOCK_func)(const MYSQL_THD thd); const char * (*wsrep_thd_query_func)(const MYSQL_THD thd); int (*wsrep_thd_retry_counter_func)(const MYSQL_THD thd); @@ -86,7 +87,6 @@ extern struct wsrep_service_st { ulong (*wsrep_OSU_method_get_func)(const MYSQL_THD thd); my_bool (*wsrep_thd_has_ignored_error_func)(const MYSQL_THD thd); void (*wsrep_thd_set_ignored_error_func)(MYSQL_THD thd, my_bool val); - bool (*wsrep_thd_set_wsrep_aborter_func)(MYSQL_THD bf_thd, MYSQL_THD thd); void (*wsrep_report_bf_lock_wait_func)(const MYSQL_THD thd, unsigned long long trx_id); void (*wsrep_thd_kill_LOCK_func)(const MYSQL_THD thd); @@ -108,6 +108,7 @@ extern struct wsrep_service_st { #define wsrep_on(thd) (thd) && WSREP_ON && wsrep_service->wsrep_on_func(thd) #define wsrep_prepare_key_for_innodb(A,B,C,D,E,F,G) wsrep_service->wsrep_prepare_key_for_innodb_func(A,B,C,D,E,F,G) #define wsrep_thd_LOCK(T) wsrep_service->wsrep_thd_LOCK_func(T) +#define wsrep_thd_TRYLOCK(T) wsrep_service->wsrep_thd_TRYLOCK_func(T) #define wsrep_thd_UNLOCK(T) wsrep_service->wsrep_thd_UNLOCK_func(T) #define wsrep_thd_kill_LOCK(T) wsrep_service->wsrep_thd_kill_LOCK_func(T) #define wsrep_thd_kill_UNLOCK(T) wsrep_service->wsrep_thd_kill_UNLOCK_func(T) @@ -136,7 +137,6 @@ extern struct wsrep_service_st { #define wsrep_OSU_method_get(T) wsrep_service->wsrep_OSU_method_get_func(T) #define wsrep_thd_has_ignored_error(T) wsrep_service->wsrep_thd_has_ignored_error_func(T) #define wsrep_thd_set_ignored_error(T,V) wsrep_service->wsrep_thd_set_ignored_error_func(T,V) -#define wsrep_thd_set_wsrep_aborter(T) wsrep_service->wsrep_thd_set_wsrep_aborter_func(T1, T2) #define wsrep_report_bf_lock_wait(T,I) wsrep_service->wsrep_report_bf_lock_wait(T,I) #define wsrep_thd_set_PA_unsafe(T) wsrep_service->wsrep_thd_set_PA_unsafe_func(T) #else @@ -170,6 +170,8 @@ void wsrep_set_data_home_dir(const char *data_dir); extern "C" my_bool wsrep_on(const MYSQL_THD thd); /* Lock thd wsrep lock */ extern "C" void wsrep_thd_LOCK(const MYSQL_THD thd); +/* Try thd wsrep lock. Return non-zero if lock could not be taken. */ +extern "C" int wsrep_thd_TRYLOCK(const MYSQL_THD thd); /* Unlock thd wsrep lock */ extern "C" void wsrep_thd_UNLOCK(const MYSQL_THD thd); @@ -192,8 +194,6 @@ extern "C" my_bool wsrep_thd_is_local(const MYSQL_THD thd); /* Return true if thd is in high priority mode */ /* todo: rename to is_high_priority() */ extern "C" my_bool wsrep_thd_is_applying(const MYSQL_THD thd); -/* set wsrep_aborter for the target THD */ -extern "C" bool wsrep_thd_set_wsrep_aborter(MYSQL_THD bf_thd, MYSQL_THD victim_thd); /* Return true if thd is in TOI mode */ extern "C" my_bool wsrep_thd_is_toi(const MYSQL_THD thd); /* Return true if thd is in replicating TOI mode */ @@ -237,7 +237,6 @@ extern "C" my_bool wsrep_thd_is_applying(const MYSQL_THD thd); extern "C" ulong wsrep_OSU_method_get(const MYSQL_THD thd); extern "C" my_bool wsrep_thd_has_ignored_error(const MYSQL_THD thd); extern "C" void wsrep_thd_set_ignored_error(MYSQL_THD thd, my_bool val); -extern "C" bool wsrep_thd_set_wsrep_aborter(MYSQL_THD bf_thd, MYSQL_THD victim_thd); extern "C" void wsrep_report_bf_lock_wait(const THD *thd, unsigned long long trx_id); /* declare parallel applying unsafety for the THD */ diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index d15f2d271ca..0c98133ab5f 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -22,3 +22,5 @@ MDEV-26575 : MDEV-29878 Galera test failure on MDEV-26575 galera_bf_abort_shutdown : MDEV-29918 Assertion failure on galera_bf_abort_shutdown galera_wan : [ERROR] WSREP: /home/buildbot/buildbot/build/gcs/src/gcs_state_msg.cpp:gcs_state_msg_get_quorum():947: Failed to establish quorum. galera_var_ignore_apply_errors : 28: "Server did not transition to READY state" +MDEV-27713 : test is using get_lock(), which is now rejected in cluster +galera_bf_abort_group_commit : MDEV-30855 PR to remove the test exists diff --git a/mysql-test/suite/galera/r/MDEV-29293.result b/mysql-test/suite/galera/r/MDEV-29293.result new file mode 100644 index 00000000000..70c0cc84a31 --- /dev/null +++ b/mysql-test/suite/galera/r/MDEV-29293.result @@ -0,0 +1,21 @@ +connection node_2; +connection node_1; +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1; +set wsrep_sync_wait = 0; +CREATE TABLE t1(a int not null primary key auto_increment, b int) engine=InnoDB; +INSERT INTO t1 VALUES (1,2); +connection node_1a; +BEGIN; +UPDATE t1 SET b=3 WHERE a=1; +connection node_1; +set debug_sync='wsrep_kill_before_awake_no_mutex SIGNAL before_kill WAIT_FOR continue'; +connection node_1b; +set debug_sync= 'now WAIT_FOR before_kill'; +connection node_2; +UPDATE t1 SET b=7 WHERE a=1; +connection node_1b; +set debug_sync= 'now SIGNAL continue'; +connection node_1; +DROP TABLE t1; +SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/suite/galera/r/galera_create_table_as_select.result b/mysql-test/suite/galera/r/galera_create_table_as_select.result index 6f65ee99f0a..beda5f30fe2 100644 --- a/mysql-test/suite/galera/r/galera_create_table_as_select.result +++ b/mysql-test/suite/galera/r/galera_create_table_as_select.result @@ -82,6 +82,7 @@ connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; LOCK TABLE t2 WRITE; connection node_1; CREATE TABLE t1 AS SELECT * FROM t2;; +connection node_1a; connection node_2; SELECT COUNT(*) = 5 FROM t2; COUNT(*) = 5 diff --git a/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result b/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result index 8495bfde2f9..9b1e8105c1c 100644 --- a/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result +++ b/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result @@ -134,6 +134,3 @@ connection node_1; call mtr.add_suppression("Error in Log_event::read_log_event():.*"); CALL mtr.add_suppression("conflict state 7 after post commit"); CALL mtr.add_suppression("Skipped GCache ring buffer recovery"); -connection node_2; -call mtr.add_suppression("Error in Log_event::read_log_event():.*"); -CALL mtr.add_suppression("Skipped GCache ring buffer recovery"); diff --git a/mysql-test/suite/galera/r/galera_kill_group_commit.result b/mysql-test/suite/galera/r/galera_kill_group_commit.result new file mode 100644 index 00000000000..bb59ce1486f --- /dev/null +++ b/mysql-test/suite/galera/r/galera_kill_group_commit.result @@ -0,0 +1,27 @@ +connection node_2; +connection node_1; +connect node_1_kill, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connect node_1_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_1; +SET SESSION wsrep_sync_wait = 0; +connect node_1_follower, 127.0.0.1, root, , test, $NODE_MYPORT_1; +SET SESSION wsrep_sync_wait = 0; +connection node_1; +CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB; +SET SESSION DEBUG_SYNC = "commit_before_enqueue SIGNAL leader_before_enqueue_reached WAIT_FOR leader_before_enqueue_continue"; +INSERT INTO t1 VALUES (1); +connection node_1_ctrl; +SET DEBUG_SYNC = "now WAIT_FOR leader_before_enqueue_reached"; +connection node_1_follower; +INSERT INTO t1 VALUES (2);; +connection node_1_ctrl; +connection node_1_kill; +# Execute KILL QUERY for group commit follower +SET DEBUG_SYNC = "now SIGNAL leader_before_enqueue_continue"; +connection node_1_follower; +connection node_1; +SELECT * FROM t1; +f1 +1 +2 +SET DEBUG_SYNC = "RESET"; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/MDEV-29293.test b/mysql-test/suite/galera/t/MDEV-29293.test new file mode 100644 index 00000000000..dacbf714c06 --- /dev/null +++ b/mysql-test/suite/galera/t/MDEV-29293.test @@ -0,0 +1,41 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc +--source include/have_debug_sync.inc +--source include/galera_have_debug_sync.inc + +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1 +set wsrep_sync_wait = 0; + +CREATE TABLE t1(a int not null primary key auto_increment, b int) engine=InnoDB; +INSERT INTO t1 VALUES (1,2); + +--connection node_1a +--let $victim_id = `SELECT CONNECTION_ID()` +BEGIN; +UPDATE t1 SET b=3 WHERE a=1; + +--connection node_1 +set debug_sync='wsrep_kill_before_awake_no_mutex SIGNAL before_kill WAIT_FOR continue'; +--disable_query_log +--disable_result_log +--send_eval KILL CONNECTION $victim_id +--enable_result_log +--enable_query_log + +--connection node_1b +set debug_sync= 'now WAIT_FOR before_kill'; + +--connection node_2 +UPDATE t1 SET b=7 WHERE a=1; + +--connection node_1b +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE User = 'system user' AND State LIKE 'Update_rows_log_event%'; +--source include/wait_condition.inc +set debug_sync= 'now SIGNAL continue'; + +--connection node_1 +--reap +DROP TABLE t1; +SET DEBUG_SYNC= 'RESET'; + diff --git a/mysql-test/suite/galera/t/galera_create_table_as_select.test b/mysql-test/suite/galera/t/galera_create_table_as_select.test index a6c1f657280..cfee63e5e27 100644 --- a/mysql-test/suite/galera/t/galera_create_table_as_select.test +++ b/mysql-test/suite/galera/t/galera_create_table_as_select.test @@ -113,6 +113,10 @@ LOCK TABLE t2 WRITE; --connection node_1 --send CREATE TABLE t1 AS SELECT * FROM t2; +--connection node_1a +--let $wait_condition = SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE STATE LIKE 'Waiting for table metadata lock%' +--source include/wait_condition.inc + --connection node_2 SELECT COUNT(*) = 5 FROM t2; CREATE TABLE t1 AS SELECT * FROM t2; @@ -121,7 +125,7 @@ CREATE TABLE t1 AS SELECT * FROM t2; UNLOCK TABLES; --connection node_1 ---error ER_TABLE_EXISTS_ERROR,ER_LOCK_DEADLOCK +--error ER_TABLE_EXISTS_ERROR,ER_QUERY_INTERRUPTED --reap DROP TABLE t1, t2; diff --git a/mysql-test/suite/galera/t/galera_kill_group_commit.cnf b/mysql-test/suite/galera/t/galera_kill_group_commit.cnf new file mode 100644 index 00000000000..60f4f776409 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_kill_group_commit.cnf @@ -0,0 +1,5 @@ +!include ../galera_2nodes.cnf + +[mysqld] +log-bin +log-slave-updates diff --git a/mysql-test/suite/galera/t/galera_kill_group_commit.test b/mysql-test/suite/galera/t/galera_kill_group_commit.test new file mode 100644 index 00000000000..4b84f2d90ef --- /dev/null +++ b/mysql-test/suite/galera/t/galera_kill_group_commit.test @@ -0,0 +1,69 @@ +# +# Verify that transaction which has reached group commit queue +# cannot be killed. If the kill succeeds, assertion for +# wsrep transaction state will fail. +# +# If the bug is present, i.e. wsrep transaction gets killed during +# group commit wait, this test is enough to reproduce the crash +# most of the time. +# + +--source include/have_innodb.inc +--source include/have_debug_sync.inc +--source include/galera_cluster.inc + +# Connection for KILL commands +--connect node_1_kill, 127.0.0.1, root, , test, $NODE_MYPORT_1 +# Connection for sync point control +--connect node_1_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_1 +SET SESSION wsrep_sync_wait = 0; +# Connection for group commit follower +--connect node_1_follower, 127.0.0.1, root, , test, $NODE_MYPORT_1 +# Need to disable sync wait to reach commit queue when leader +# is blocked. +SET SESSION wsrep_sync_wait = 0; +--let $follower_id = `SELECT CONNECTION_ID()` + +--connection node_1 +CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB; + +SET SESSION DEBUG_SYNC = "commit_before_enqueue SIGNAL leader_before_enqueue_reached WAIT_FOR leader_before_enqueue_continue"; +--send INSERT INTO t1 VALUES (1) + +--connection node_1_ctrl +SET DEBUG_SYNC = "now WAIT_FOR leader_before_enqueue_reached"; + +--connection node_1_follower +# SET SESSION DEBUG_SYNC = "group_commit_waiting_for_prior SIGNAL follower_waiting_for_prior_reached WAIT_FOR follower_waiting_for_prior_continue"; +--send INSERT INTO t1 VALUES (2); + +--connection node_1_ctrl +# TODO: Is it possible to use sync points to enforce group commit to happen? +# The leader will hold commit monitor in commit_before_enqueue sync point, +# which prevents the follower to reach the group commit wait state. +# We now sleep and expect the follower to reach group commit, but this +# may cause false negatives. +--sleep 1 + +--connection node_1_kill +--echo # Execute KILL QUERY for group commit follower +--disable_query_log +--disable_result_log +# Because it is currently impossible to verify that the +# follower has reached group commit queue, the KILL may +# sometimes return success. +--error 0,ER_KILL_DENIED_ERROR +--eval KILL QUERY $follower_id +--enable_result_log +--enable_query_log + +SET DEBUG_SYNC = "now SIGNAL leader_before_enqueue_continue"; +--connection node_1_follower +--reap + +--connection node_1 +--reap +SELECT * FROM t1; + +SET DEBUG_SYNC = "RESET"; +DROP TABLE t1; diff --git a/sql/handler.cc b/sql/handler.cc index eaaf4664c07..7f591b8456c 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -7599,6 +7599,9 @@ Compare_keys handler::compare_key_parts(const Field &old_field, concurrent accesses. And it's an overkill to take LOCK_plugin and iterate the whole installed_htons[] array every time. + @note Object victim_thd is not guaranteed to exist after this + function returns. + @param bf_thd brute force THD asking for the abort @param victim_thd victim THD to be aborted @@ -7612,6 +7615,8 @@ int ha_abort_transaction(THD *bf_thd, THD *victim_thd, my_bool signal) if (!WSREP(bf_thd) && !(bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU && wsrep_thd_is_toi(bf_thd))) { + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); + mysql_mutex_unlock(&victim_thd->LOCK_thd_kill); DBUG_RETURN(0); } @@ -7623,6 +7628,8 @@ int ha_abort_transaction(THD *bf_thd, THD *victim_thd, my_bool signal) else { WSREP_WARN("Cannot abort InnoDB transaction"); + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); + mysql_mutex_unlock(&victim_thd->LOCK_thd_kill); } DBUG_RETURN(0); diff --git a/sql/service_wsrep.cc b/sql/service_wsrep.cc index 2d8eff2bd4b..ccce076d8a9 100644 --- a/sql/service_wsrep.cc +++ b/sql/service_wsrep.cc @@ -29,14 +29,17 @@ extern "C" my_bool wsrep_on(const THD *thd) extern "C" void wsrep_thd_LOCK(const THD *thd) { - mysql_mutex_lock(&thd->LOCK_thd_kill); mysql_mutex_lock(&thd->LOCK_thd_data); } +extern "C" int wsrep_thd_TRYLOCK(const THD *thd) +{ + return mysql_mutex_trylock(&thd->LOCK_thd_data); +} + extern "C" void wsrep_thd_UNLOCK(const THD *thd) { mysql_mutex_unlock(&thd->LOCK_thd_data); - mysql_mutex_unlock(&thd->LOCK_thd_kill); } extern "C" void wsrep_thd_kill_LOCK(const THD *thd) @@ -248,21 +251,12 @@ extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd, if ((ret || !wsrep_on(victim_thd)) && signal) { - if (victim_thd->wsrep_aborter && victim_thd->wsrep_aborter != bf_thd->thread_id) - { - WSREP_DEBUG("victim is killed already by %llu, skipping awake", - victim_thd->wsrep_aborter); - wsrep_thd_UNLOCK(victim_thd); - return false; - } - - victim_thd->wsrep_aborter= bf_thd->thread_id; victim_thd->awake_no_mutex(KILL_QUERY_HARD); } else - WSREP_DEBUG("wsrep_thd_bf_abort skipped awake for %llu", thd_get_thread_id(victim_thd)); + WSREP_DEBUG("wsrep_thd_bf_abort skipped awake for %llu", + thd_get_thread_id(victim_thd)); - wsrep_thd_UNLOCK(victim_thd); return ret; } @@ -385,25 +379,6 @@ extern "C" ulong wsrep_OSU_method_get(const MYSQL_THD thd) return(global_system_variables.wsrep_OSU_method); } -extern "C" bool wsrep_thd_set_wsrep_aborter(THD *bf_thd, THD *victim_thd) -{ - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); - if (!bf_thd) - { - victim_thd->wsrep_aborter= 0; - WSREP_DEBUG("wsrep_thd_set_wsrep_aborter resetting wsrep_aborter"); - return false; - } - if (victim_thd->wsrep_aborter && victim_thd->wsrep_aborter != bf_thd->thread_id) - { - return true; - } - victim_thd->wsrep_aborter= bf_thd->thread_id; - WSREP_DEBUG("wsrep_thd_set_wsrep_aborter setting wsrep_aborter %u", - victim_thd->wsrep_aborter); - return false; -} - extern "C" void wsrep_report_bf_lock_wait(const THD *thd, unsigned long long trx_id) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 95a777c75cf..b22b766c409 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1305,6 +1305,11 @@ void THD::init() wsrep_affected_rows = 0; m_wsrep_next_trx_id = WSREP_UNDEFINED_TRX_ID; wsrep_aborter = 0; + wsrep_abort_by_kill = NOT_KILLED; + wsrep_abort_by_kill_err = 0; +#ifndef DBUG_OFF + wsrep_killed_state = 0; +#endif /* DBUG_OFF */ wsrep_desynced_backup_stage= false; #endif /* WITH_WSREP */ @@ -1656,6 +1661,13 @@ void THD::reset_for_reuse() #endif #ifdef WITH_WSREP wsrep_free_status(this); + wsrep_cs().reset_error(); + wsrep_aborter= 0; + wsrep_abort_by_kill= NOT_KILLED; + wsrep_abort_by_kill_err= 0; +#ifndef DBUG_OFF + wsrep_killed_state= 0; +#endif /* DBUG_OFF */ #endif /* WITH_WSREP */ } @@ -1911,7 +1923,9 @@ void THD::awake_no_mutex(killed_state state_to_set) } /* Interrupt target waiting inside a storage engine. */ - if (state_to_set != NOT_KILLED && !wsrep_is_bf_aborted(this)) + if (state_to_set != NOT_KILLED && + IF_WSREP(!wsrep_is_bf_aborted(this) && wsrep_abort_by_kill == NOT_KILLED, + true)) ha_kill_query(this, thd_kill_level(this)); abort_current_cond_wait(false); @@ -2153,6 +2167,17 @@ void THD::reset_killed() mysql_mutex_unlock(&LOCK_thd_kill); } #ifdef WITH_WSREP + if (WSREP_NNULL(this)) + { + if (wsrep_abort_by_kill != NOT_KILLED) + { + mysql_mutex_assert_not_owner(&LOCK_thd_kill); + mysql_mutex_lock(&LOCK_thd_kill); + wsrep_abort_by_kill= NOT_KILLED; + wsrep_abort_by_kill_err= 0; + mysql_mutex_unlock(&LOCK_thd_kill); + } + } mysql_mutex_assert_not_owner(&LOCK_thd_data); mysql_mutex_lock(&LOCK_thd_data); wsrep_aborter= 0; diff --git a/sql/sql_class.h b/sql/sql_class.h index 68a69762354..c373c0f6a43 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -5147,7 +5147,14 @@ public: bool wsrep_ignore_table; /* thread who has started kill for this THD protected by LOCK_thd_data*/ my_thread_id wsrep_aborter; - + /* Kill signal used, if thread was killed by manual KILL. Protected by + LOCK_thd_kill. */ + std::atomic wsrep_abort_by_kill; + /* */ + struct err_info* wsrep_abort_by_kill_err; +#ifndef DBUG_OFF + int wsrep_killed_state; +#endif /* DBUG_OFF */ /* true if BF abort is observed in do_command() right after reading client's packet, and if the client has sent PS execute command. */ bool wsrep_delayed_BF_abort; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 4c7313265e8..59f21247445 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -7945,7 +7945,7 @@ static bool wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, thd->wsrep_retry_counter < thd->variables.wsrep_retry_autocommit) { #ifdef ENABLED_DEBUG_SYNC - DBUG_EXECUTE_IF("sync.wsrep_retry_autocommit", + DBUG_EXECUTE_IF("sync.wsrep_retry_autocommit", { const char act[]= "now " @@ -9309,21 +9309,15 @@ kill_one_thread(THD *thd, my_thread_id id, killed_state kill_signal, killed_type thd->security_ctx->user_matches(tmp->security_ctx)) #endif /* WITH_WSREP */ { + { #ifdef WITH_WSREP - DEBUG_SYNC(thd, "before_awake_no_mutex"); - if (tmp->wsrep_aborter && tmp->wsrep_aborter != thd->thread_id) - { - /* victim is in hit list already, bail out */ - WSREP_DEBUG("victim %lld has wsrep aborter: %lu, skipping awake()", - id, tmp->wsrep_aborter); - error= 0; - } - else + if (WSREP(tmp)) + { + /* Object tmp is not guaranteed to exist after wsrep_kill_thd() + returns, so do early return from this function. */ + DBUG_RETURN(wsrep_kill_thd(thd, tmp, kill_signal)); + } #endif /* WITH_WSREP */ - { - WSREP_DEBUG("kill_one_thread victim: %lld wsrep_aborter %lu" - " by signal %d", - id, tmp->wsrep_aborter, kill_signal); tmp->awake_no_mutex(kill_signal); error= 0; } @@ -9448,18 +9442,6 @@ static void sql_kill(THD *thd, my_thread_id id, killed_state state, killed_type type) { uint error; -#ifdef WITH_WSREP - if (WSREP(thd)) - { - WSREP_DEBUG("sql_kill called"); - if (thd->wsrep_applier) - { - WSREP_DEBUG("KILL in applying, bailing out here"); - return; - } - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) - } -#endif /* WITH_WSREP */ if (likely(!(error= kill_one_thread(thd, id, state, type)))) { if (!thd->killed) @@ -9469,13 +9451,6 @@ void sql_kill(THD *thd, my_thread_id id, killed_state state, killed_type type) } else my_error(error, MYF(0), id); -#ifdef WITH_WSREP - return; - wsrep_error_label: - error= (type == KILL_TYPE_QUERY ? ER_KILL_QUERY_DENIED_ERROR : - ER_KILL_DENIED_ERROR); - my_error(error, MYF(0), (long long) id); -#endif /* WITH_WSREP */ } @@ -9484,18 +9459,6 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) { uint error; ha_rows rows; -#ifdef WITH_WSREP - if (WSREP(thd)) - { - WSREP_DEBUG("sql_kill_user called"); - if (thd->wsrep_applier) - { - WSREP_DEBUG("KILL in applying, bailing out here"); - return; - } - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) - } -#endif /* WITH_WSREP */ switch (error= kill_threads_for_user(thd, user, state, &rows)) { case 0: @@ -9511,11 +9474,6 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) default: my_error(error, MYF(0)); } -#ifdef WITH_WSREP - return; - wsrep_error_label: - my_error(ER_CANNOT_USER, MYF(0), user ? user->user.str : "NULL"); -#endif /* WITH_WSREP */ } diff --git a/sql/sql_plugin_services.inl b/sql/sql_plugin_services.inl index c6f07158003..8863f581afb 100644 --- a/sql/sql_plugin_services.inl +++ b/sql/sql_plugin_services.inl @@ -151,6 +151,7 @@ static struct wsrep_service_st wsrep_handler = { wsrep_on, wsrep_prepare_key_for_innodb, wsrep_thd_LOCK, + wsrep_thd_TRYLOCK, wsrep_thd_UNLOCK, wsrep_thd_query, wsrep_thd_retry_counter, @@ -177,7 +178,6 @@ static struct wsrep_service_st wsrep_handler = { wsrep_OSU_method_get, wsrep_thd_has_ignored_error, wsrep_thd_set_ignored_error, - wsrep_thd_set_wsrep_aborter, wsrep_report_bf_lock_wait, wsrep_thd_kill_LOCK, wsrep_thd_kill_UNLOCK, diff --git a/sql/wsrep_dummy.cc b/sql/wsrep_dummy.cc index ac14fc4597a..a67da77c472 100644 --- a/sql/wsrep_dummy.cc +++ b/sql/wsrep_dummy.cc @@ -56,6 +56,11 @@ my_bool wsrep_on(const THD *) void wsrep_thd_LOCK(const THD *) { } +int wsrep_thd_TRYLOCK(const THD *) +{ + return 0; +} + void wsrep_thd_UNLOCK(const THD *) { } @@ -148,8 +153,6 @@ void wsrep_thd_set_ignored_error(THD*, my_bool) { } ulong wsrep_OSU_method_get(const THD*) { return 0;} -bool wsrep_thd_set_wsrep_aborter(THD*, THD*) -{ return 0;} void wsrep_report_bf_lock_wait(const THD*, unsigned long long) diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc index 53ef20f3e78..0a2fa273723 100644 --- a/sql/wsrep_high_priority_service.cc +++ b/sql/wsrep_high_priority_service.cc @@ -510,6 +510,7 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_ m_thd->wait_for_prior_commit(); } + WSREP_DEBUG("checkpointing dummy write set %lld", ws_meta.seqno().get()); wsrep_set_SE_checkpoint(ws_meta.gtid(), wsrep_gtid_server.gtid()); if (!WSREP_EMULATE_BINLOG(m_thd)) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index db4a2a2e7b9..bf28f7fd39a 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -2249,11 +2249,6 @@ static int wsrep_TOI_event_buf(THD* thd, uchar** buf, size_t* buf_len) case SQLCOM_DROP_TABLE: err= wsrep_drop_table_query(thd, buf, buf_len); break; - case SQLCOM_KILL: - WSREP_DEBUG("KILL as TOI: %s", thd->query()); - err= wsrep_to_buf_helper(thd, thd->query(), thd->query_length(), - buf, buf_len); - break; case SQLCOM_CREATE_ROLE: if (sp_process_definer(thd)) { @@ -2675,8 +2670,15 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, /* Here we will call wsrep_abort_transaction so we should hold THD::LOCK_thd_data to protect victim from concurrent usage - and THD::LOCK_thd_kill to protect from disconnect or delete. */ - wsrep_thd_LOCK(granted_thd); + and THD::LOCK_thd_kill to protect from disconnect or delete. + + Note that all calls to wsrep_abort_thd() and ha_abort_transaction() + unlock LOCK_thd_kill for granted_thd, so granted_thd must not be + accessed after any of those calls. Moreover all other if branches + must release those locks. + */ + mysql_mutex_lock(&granted_thd->LOCK_thd_kill); + mysql_mutex_lock(&granted_thd->LOCK_thd_data); if (wsrep_thd_is_toi(granted_thd) || wsrep_thd_is_applying(granted_thd)) @@ -2685,22 +2687,22 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, { WSREP_DEBUG("BF thread waiting for SR in aborting state"); ticket->wsrep_report(wsrep_debug); - wsrep_thd_UNLOCK(granted_thd); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); } else if (wsrep_thd_is_SR(granted_thd) && !wsrep_thd_is_SR(request_thd)) { WSREP_MDL_LOG(INFO, "MDL conflict, DDL vs SR", schema, schema_len, request_thd, granted_thd); wsrep_abort_thd(request_thd, granted_thd, 1); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_data); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_kill); } else { WSREP_MDL_LOG(INFO, "MDL BF-BF conflict", schema, schema_len, request_thd, granted_thd); ticket->wsrep_report(true); - wsrep_thd_UNLOCK(granted_thd); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); unireg_abort(1); } } @@ -2709,7 +2711,8 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, { WSREP_DEBUG("BF thread waiting for FLUSH"); ticket->wsrep_report(wsrep_debug); - wsrep_thd_UNLOCK(granted_thd); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); } else if (request_thd->lex->sql_command == SQLCOM_DROP_TABLE) { @@ -2717,8 +2720,6 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, wsrep_thd_transaction_state_str(granted_thd)); ticket->wsrep_report(wsrep_debug); wsrep_abort_thd(request_thd, granted_thd, 1); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_data); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_kill); } else { @@ -2728,8 +2729,6 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, if (granted_thd->wsrep_trx().active()) { wsrep_abort_thd(request_thd, granted_thd, true); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_data); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_kill); } else { @@ -2739,15 +2738,16 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, */ if (wsrep_thd_is_BF(request_thd, FALSE)) { + granted_thd->awake_no_mutex(KILL_QUERY_HARD); ha_abort_transaction(request_thd, granted_thd, TRUE); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_data); - mysql_mutex_assert_not_owner(&granted_thd->LOCK_thd_kill); } else { WSREP_MDL_LOG(INFO, "MDL unknown BF-BF conflict", schema, schema_len, request_thd, granted_thd); ticket->wsrep_report(true); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); unireg_abort(1); } } @@ -2763,17 +2763,22 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, static bool abort_replicated(THD *thd) { bool ret_code= false; + wsrep_thd_kill_LOCK(thd); wsrep_thd_LOCK(thd); if (thd->wsrep_trx().state() == wsrep::transaction::s_committing) { WSREP_DEBUG("aborting replicated trx: %llu", (ulonglong)(thd->real_id)); - (void)wsrep_abort_thd(thd, thd, TRUE); + wsrep_abort_thd(thd, thd, TRUE); ret_code= true; } else + { + /* wsrep_abort_thd() above releases LOCK_thd_data and LOCK_thd_kill, so + must do it here too. */ wsrep_thd_UNLOCK(thd); - + wsrep_thd_kill_UNLOCK(thd); + } return ret_code; } diff --git a/sql/wsrep_server_service.cc b/sql/wsrep_server_service.cc index ac7226b9948..a1b96a60672 100644 --- a/sql/wsrep_server_service.cc +++ b/sql/wsrep_server_service.cc @@ -143,9 +143,13 @@ void Wsrep_server_service::release_high_priority_service(wsrep::high_priority_se wsrep_delete_threadvars(); } -void Wsrep_server_service::background_rollback(wsrep::client_state& client_state) +void Wsrep_server_service::background_rollback( + wsrep::unique_lock &lock WSREP_UNUSED, + wsrep::client_state &client_state) { - Wsrep_client_state& cs= static_cast(client_state); + DBUG_ASSERT(lock.owns_lock()); + Wsrep_client_state &cs= static_cast(client_state); + mysql_mutex_assert_owner(&cs.thd()->LOCK_thd_data); wsrep_fire_rollbacker(cs.thd()); } diff --git a/sql/wsrep_server_service.h b/sql/wsrep_server_service.h index 168e98206e3..0fc48402024 100644 --- a/sql/wsrep_server_service.h +++ b/sql/wsrep_server_service.h @@ -46,7 +46,8 @@ public: void release_high_priority_service(wsrep::high_priority_service*); - void background_rollback(wsrep::client_state&); + void background_rollback(wsrep::unique_lock &, + wsrep::client_state &); void bootstrap(); void log_message(enum wsrep::log::level, const char*); diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index ccb32fb13af..d05ddcbae16 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -307,50 +307,9 @@ void wsrep_fire_rollbacker(THD *thd) } } - -int wsrep_abort_thd(THD *bf_thd, - THD *victim_thd, - my_bool signal) +static bool wsrep_bf_abort_low(THD *bf_thd, THD *victim_thd) { - DBUG_ENTER("wsrep_abort_thd"); - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); - - /* Note that when you use RSU node is desynced from cluster, thus WSREP(thd) - might not be true. - */ - if ((WSREP(bf_thd) || - ((WSREP_ON || bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU) && - wsrep_thd_is_toi(bf_thd))) && - !wsrep_thd_is_aborting(victim_thd)) - { - WSREP_DEBUG("wsrep_abort_thd, by: %llu, victim: %llu", - (long long)bf_thd->real_id, (long long)victim_thd->real_id); - ha_abort_transaction(bf_thd, victim_thd, signal); - } - else - { - WSREP_DEBUG("wsrep_abort_thd not effective: bf %llu victim %llu " - "wsrep %d wsrep_on %d RSU %d TOI %d aborting %d", - (long long)bf_thd->real_id, (long long)victim_thd->real_id, - WSREP_NNULL(bf_thd), WSREP_ON, - bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU, - wsrep_thd_is_toi(bf_thd), - wsrep_thd_is_aborting(victim_thd)); - wsrep_thd_UNLOCK(victim_thd); - } - - DBUG_RETURN(1); -} - -bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) -{ - WSREP_LOG_THD(bf_thd, "BF aborter before"); - WSREP_LOG_THD(victim_thd, "victim before"); - - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); #ifdef ENABLED_DEBUG_SYNC DBUG_EXECUTE_IF("sync.wsrep_bf_abort", @@ -364,6 +323,88 @@ bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) };); #endif + wsrep::seqno bf_seqno(bf_thd->wsrep_trx().ws_meta().seqno()); + bool ret; + + { + /* Adopt the lock, it is being held by the caller. */ + Wsrep_mutex wsm{&victim_thd->LOCK_thd_data}; + wsrep::unique_lock lock{wsm, std::adopt_lock}; + + if (wsrep_thd_is_toi(bf_thd)) + { + ret= victim_thd->wsrep_cs().total_order_bf_abort(lock, bf_seqno); + } + else + { + DBUG_ASSERT(WSREP(victim_thd) ? victim_thd->wsrep_trx().active() : 1); + ret= victim_thd->wsrep_cs().bf_abort(lock, bf_seqno); + } + if (ret) + { + /* BF abort should be allowed only once by wsrep-lib.*/ + DBUG_ASSERT(victim_thd->wsrep_aborter == 0); + victim_thd->wsrep_aborter= bf_thd->thread_id; + wsrep_bf_aborts_counter++; + } + lock.release(); /* No unlock at the end of the scope. */ + } + + /* Sanity check for wsrep-lib calls to return with LOCK_thd_data held. */ + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + + return ret; +} + + +void wsrep_abort_thd(THD *bf_thd, + THD *victim_thd, + my_bool signal) +{ + DBUG_ENTER("wsrep_abort_thd"); + + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); + + /* Note that when you use RSU node is desynced from cluster, thus WSREP(thd) + might not be true. + */ + if ((WSREP(bf_thd) + || ((WSREP_ON || bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU) + && wsrep_thd_is_toi(bf_thd)) + || bf_thd->lex->sql_command == SQLCOM_KILL) + && !wsrep_thd_is_aborting(victim_thd) && + wsrep_bf_abort_low(bf_thd, victim_thd) && + !victim_thd->wsrep_cs().is_rollbacker_active()) + { + WSREP_DEBUG("wsrep_abort_thd, by: %llu, victim: %llu", + (long long)bf_thd->real_id, (long long)victim_thd->real_id); + victim_thd->awake_no_mutex(KILL_QUERY_HARD); + ha_abort_transaction(bf_thd, victim_thd, signal); + } + else + { + WSREP_DEBUG("wsrep_abort_thd not effective: bf %llu victim %llu " + "wsrep %d wsrep_on %d RSU %d TOI %d aborting %d", + (long long)bf_thd->real_id, (long long)victim_thd->real_id, + WSREP_NNULL(bf_thd), WSREP_ON, + bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU, + wsrep_thd_is_toi(bf_thd), + wsrep_thd_is_aborting(victim_thd)); + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); + mysql_mutex_unlock(&victim_thd->LOCK_thd_kill); + } + + DBUG_VOID_RETURN; +} + +bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) +{ + WSREP_LOG_THD(bf_thd, "BF aborter before"); + WSREP_LOG_THD(victim_thd, "victim before"); + + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + if (WSREP(victim_thd) && !victim_thd->wsrep_trx().active()) { WSREP_DEBUG("wsrep_bf_abort, BF abort for non active transaction"); @@ -385,32 +426,81 @@ bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) mysql_mutex_lock(&victim_thd->LOCK_thd_data); } - bool ret; - wsrep::seqno bf_seqno(bf_thd->wsrep_trx().ws_meta().seqno()); + return wsrep_bf_abort_low(bf_thd, victim_thd); +} - if (wsrep_thd_is_toi(bf_thd)) +uint wsrep_kill_thd(THD *thd, THD *victim_thd, killed_state kill_signal) +{ + DBUG_ENTER("wsrep_kill_thd"); + DBUG_ASSERT(WSREP(victim_thd)); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + using trans= wsrep::transaction; + auto trx_state= victim_thd->wsrep_trx().state(); +#ifndef DBUG_OFF + victim_thd->wsrep_killed_state= trx_state; +#endif /* DBUG_OFF */ + /* + Already killed or in commit codepath. Mark the victim as killed, + the killed status will be restored in wsrep_after_commit() and + will be processed after the commit is over. In case of multiple + KILLs happened on commit codepath, the last one will be effective. + */ + if (victim_thd->wsrep_abort_by_kill || + trx_state == trans::s_preparing || + trx_state == trans::s_committing || + trx_state == trans::s_ordered_commit) { - /* Here we enter wsrep-lib were LOCK_thd_data will be acquired, - thus we need to release it. However, we can still hold - LOCK_thd_kill to protect from disconnect or delete. */ + victim_thd->wsrep_abort_by_kill= kill_signal; mysql_mutex_unlock(&victim_thd->LOCK_thd_data); - ret= victim_thd->wsrep_cs().total_order_bf_abort(bf_seqno); - mysql_mutex_lock(&victim_thd->LOCK_thd_data); + mysql_mutex_unlock(&victim_thd->LOCK_thd_kill); + DBUG_RETURN(0); } - else + /* + Mark killed victim_thd with kill_signal so that awake_no_mutex does + not dive into storage engine. We use ha_abort_transaction() + to do the storage engine part for wsrep THDs. + */ + DEBUG_SYNC(thd, "wsrep_kill_before_awake_no_mutex"); + victim_thd->wsrep_abort_by_kill= kill_signal; + victim_thd->awake_no_mutex(kill_signal); + /* ha_abort_transaction() releases tmp->LOCK_thd_kill, so tmp + is not safe to access anymore. */ + ha_abort_transaction(thd, victim_thd, 1); + DBUG_RETURN(0); +} + +void wsrep_backup_kill_for_commit(THD *thd) +{ + DBUG_ASSERT(WSREP(thd)); + mysql_mutex_assert_owner(&thd->LOCK_thd_kill); + DBUG_ASSERT(thd->killed != NOT_KILLED); + mysql_mutex_lock(&thd->LOCK_thd_data); + /* If the transaction will roll back, keep the killed state. + For must replay, the replay will happen in different THD context + which is high priority and cannot be killed. The owning thread will + pick the killed state in after statement processing. */ + if (thd->wsrep_trx().state() != wsrep::transaction::s_cert_failed && + thd->wsrep_trx().state() != wsrep::transaction::s_must_abort && + thd->wsrep_trx().state() != wsrep::transaction::s_aborting && + thd->wsrep_trx().state() != wsrep::transaction::s_must_replay) { - /* Test: mysql-wsrep-features#165. Here we enter wsrep-lib - were LOCK_thd_data will be acquired and later LOCK_thd_kill - thus we need to release them. */ - wsrep_thd_UNLOCK(victim_thd); - ret= victim_thd->wsrep_cs().bf_abort(bf_seqno); - wsrep_thd_LOCK(victim_thd); + thd->wsrep_abort_by_kill= thd->killed; + thd->wsrep_abort_by_kill_err= thd->killed_err; + thd->killed= NOT_KILLED; + thd->killed_err= 0; } - if (ret) - { - wsrep_bf_aborts_counter++; - } - return ret; + mysql_mutex_unlock(&thd->LOCK_thd_data); +} + +void wsrep_restore_kill_after_commit(THD *thd) +{ + DBUG_ASSERT(WSREP(thd)); + mysql_mutex_assert_owner(&thd->LOCK_thd_kill); + thd->killed= thd->wsrep_abort_by_kill; + thd->killed_err= thd->wsrep_abort_by_kill_err; + thd->wsrep_abort_by_kill= NOT_KILLED; + thd->wsrep_abort_by_kill_err= 0; } int wsrep_create_threadvars() diff --git a/sql/wsrep_thd.h b/sql/wsrep_thd.h index 0ce612d6097..6f5a70a30a4 100644 --- a/sql/wsrep_thd.h +++ b/sql/wsrep_thd.h @@ -88,10 +88,44 @@ bool wsrep_create_appliers(long threads, bool mutex_protected=false); void wsrep_create_rollbacker(); bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd); -int wsrep_abort_thd(THD *bf_thd, +/* + Abort transaction for victim_thd. This function is called from + MDL BF abort codepath. + + @note This thread unlocks victim_thd->LOCK_thd_kill, so accessing + victim_thd after the function returns is not safe anymore. +*/ +void wsrep_abort_thd(THD *bf_thd, THD *victim_thd, my_bool signal) __attribute__((nonnull(1,2))); +/** + Kill wsrep connection with kill_signal. Object thd is not + guaranteed to exist anymore when this function returns. + + Asserts that the caller holds victim_thd->LOCK_thd_kill, + victim_thd->LOCK_thd_data. + + Releases victim_thd->LOCK_thd_kill, victim_thd->LOCK_thd_data. + + @param thd THD object for connection that executes the KILL. + @param victim_thd THD object for connection to be killed. + @param kill_signal Kill signal. + + @return Zero if the kill was successful, otherwise non-zero error code. + */ +uint wsrep_kill_thd(THD *thd, THD *victim_thd, killed_state kill_signal); + +/* + Backup kill status for commit. + */ +void wsrep_backup_kill_for_commit(THD *); + +/* + Restore KILL status after commit. + */ +void wsrep_restore_kill_after_commit(THD *); + /* Helper methods to deal with thread local storage. The purpose of these methods is to hide the details of thread diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h index 8f998244ee6..40fbf80cfb3 100644 --- a/sql/wsrep_trans_observer.h +++ b/sql/wsrep_trans_observer.h @@ -256,6 +256,11 @@ static inline int wsrep_before_prepare(THD* thd, bool all) thd->wsrep_trx().ws_meta().gtid(), wsrep_gtid_server.gtid()); } + + mysql_mutex_lock(&thd->LOCK_thd_kill); + if (thd->killed) wsrep_backup_kill_for_commit(thd); + mysql_mutex_unlock(&thd->LOCK_thd_kill); + DBUG_RETURN(ret); } @@ -323,6 +328,11 @@ static inline int wsrep_before_commit(THD* thd, bool all) wsrep_gtid_server.gtid()); wsrep_register_for_group_commit(thd); } + + mysql_mutex_lock(&thd->LOCK_thd_kill); + if (thd->killed) wsrep_backup_kill_for_commit(thd); + mysql_mutex_unlock(&thd->LOCK_thd_kill); + DBUG_RETURN(ret); } @@ -341,7 +351,8 @@ static inline int wsrep_before_commit(THD* thd, bool all) static inline int wsrep_ordered_commit(THD* thd, bool all) { DBUG_ENTER("wsrep_ordered_commit"); - WSREP_DEBUG("wsrep_ordered_commit: %d", wsrep_is_real(thd, all)); + WSREP_DEBUG("wsrep_ordered_commit: %d %lld", wsrep_is_real(thd, all), + (long long) wsrep_thd_trx_seqno(thd)); DBUG_ASSERT(wsrep_run_commit_hook(thd, all)); DBUG_RETURN(thd->wsrep_cs().ordered_commit()); } @@ -449,10 +460,18 @@ int wsrep_after_statement(THD* thd) wsrep::to_c_string(thd->wsrep_cs().state()), wsrep::to_c_string(thd->wsrep_cs().mode()), wsrep::to_c_string(thd->wsrep_cs().transaction().state())); - DBUG_RETURN((thd->wsrep_cs().state() != wsrep::client_state::s_none && + int ret= ((thd->wsrep_cs().state() != wsrep::client_state::s_none && thd->wsrep_cs().mode() == Wsrep_client_state::m_local) && !thd->internal_transaction() ? thd->wsrep_cs().after_statement() : 0); + + if (wsrep_is_active(thd)) + { + mysql_mutex_lock(&thd->LOCK_thd_kill); + wsrep_restore_kill_after_commit(thd); + mysql_mutex_unlock(&thd->LOCK_thd_kill); + } + DBUG_RETURN(ret); } static inline void wsrep_after_apply(THD* thd) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index f7dd18e0e36..98385e3b5dd 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -18709,50 +18709,6 @@ static struct st_mysql_storage_engine innobase_storage_engine= #ifdef WITH_WSREP -static -void -wsrep_kill_victim( - MYSQL_THD const bf_thd, - MYSQL_THD thd, - trx_t* victim_trx, - my_bool signal) -{ - DBUG_ENTER("wsrep_kill_victim"); - - /* Mark transaction as a victim for Galera abort */ - victim_trx->lock.was_chosen_as_wsrep_victim= true; - if (wsrep_thd_set_wsrep_aborter(bf_thd, thd)) - { - WSREP_DEBUG("innodb kill transaction skipped due to wsrep_aborter set"); - wsrep_thd_UNLOCK(thd); - DBUG_VOID_RETURN; - } - - if (wsrep_thd_bf_abort(bf_thd, thd, signal)) - { - lock_t* wait_lock= victim_trx->lock.wait_lock; - if (wait_lock) - { - DBUG_ASSERT(victim_trx->is_wsrep()); - WSREP_DEBUG("victim has wait flag: %lu", thd_get_thread_id(thd)); - victim_trx->lock.was_chosen_as_deadlock_victim= TRUE; - lock_cancel_waiting_and_release(wait_lock); - } - } - else - { - wsrep_thd_LOCK(thd); - victim_trx->lock.was_chosen_as_wsrep_victim= false; - wsrep_thd_set_wsrep_aborter(NULL, thd); - wsrep_thd_UNLOCK(thd); - - WSREP_DEBUG("wsrep_thd_bf_abort has failed, victim %lu will survive", - thd_get_thread_id(thd)); - } - - DBUG_VOID_RETURN; -} - /** This function is used to kill one transaction. This transaction was open on this node (not-yet-committed), and a @@ -18799,10 +18755,45 @@ wsrep_innobase_kill_one_trx( DBUG_VOID_RETURN; } - /* Here we need to lock THD::LOCK_thd_data to protect from - concurrent usage or disconnect or delete. */ + /* Grab reference to victim_trx before releasing the mutex, this will + prevent victim to release locks or commit while the mutex is + unlocked. The state may change to TRX_STATE_COMMITTED_IN_MEMORY. + See skip_lock_inheritance_n_ref in trx0trx.h. */ + const trx_id_t victim_trx_id= victim_trx->id; +retry_lock: + victim_trx->reference(); + trx_mutex_exit(victim_trx); + DEBUG_SYNC(bf_thd, "wsrep_before_BF_victim_lock"); - wsrep_thd_LOCK(thd); + wsrep_thd_kill_LOCK(thd); + /* + There is now a cycle + + trx reference + -> LOCK_commit_order + -> LOCK_thd_data + -> trx reference + + which may prevent the transaction committing because reference was grabbed + above. Try to lock LOCK_thd_data, and if not successul, enter the + trx mutex again to release the reference and try again. + */ + if (wsrep_thd_TRYLOCK(thd)) + { + wsrep_thd_kill_UNLOCK(thd); + trx_mutex_enter(victim_trx); + victim_trx->release_reference(); + if (victim_trx_id != victim_trx->id || + victim_trx->state == TRX_STATE_COMMITTED_IN_MEMORY || + victim_trx->state == TRX_STATE_NOT_STARTED) + { + WSREP_DEBUG("wsrep_innobase_kill_one_trx: Victim committed in memory"); + DBUG_VOID_RETURN; + } + goto retry_lock; + } + + DEBUG_SYNC(bf_thd, "wsrep_after_BF_victim_lock"); WSREP_LOG_CONFLICT(bf_thd, thd, TRUE); @@ -18833,7 +18824,31 @@ wsrep_innobase_kill_one_trx( wsrep_thd_transaction_state_str(thd), wsrep_thd_query(thd)); - wsrep_kill_victim(bf_thd, thd, victim_trx, signal); + const bool success= wsrep_thd_bf_abort(bf_thd, thd, signal); + + wsrep_thd_UNLOCK(thd); + wsrep_thd_kill_UNLOCK(thd); + trx_mutex_enter(victim_trx); + + if (success && victim_trx->state == TRX_STATE_ACTIVE) + { + lock_t* wait_lock= victim_trx->lock.wait_lock; + if (wait_lock) + { + victim_trx->lock.was_chosen_as_deadlock_victim= TRUE; + DBUG_ASSERT(victim_trx->is_wsrep()); + WSREP_DEBUG("victim has wait flag: %lu", thd_get_thread_id(thd)); + lock_cancel_waiting_and_release(wait_lock); + } + } + else + { + victim_trx->lock.was_chosen_as_wsrep_victim= false; + WSREP_DEBUG("wsrep_thd_bf_abort has failed, victim %lu will survive", + thd_get_thread_id(thd)); + } + victim_trx->release_reference(); + DBUG_VOID_RETURN; } @@ -18854,42 +18869,61 @@ wsrep_abort_transaction( THD *victim_thd, my_bool signal) { - /* Note that victim thd is protected with - THD::LOCK_thd_data and THD::LOCK_thd_kill here. */ + /* Unlock LOCK_thd_kill and LOCK_thd_data temporarily to grab mutexes + in the right order: + lock_sys.mutex + LOCK_thd_kill + LOCK_thd_data + trx.mutex + */ trx_t* victim_trx= thd_to_trx(victim_thd); - trx_t* bf_trx= thd_to_trx(bf_thd); - WSREP_DEBUG("wsrep_abort_transaction: BF:" - " thread %ld client_state %s client_mode %s" - " trans_state %s query %s trx " TRX_ID_FMT, - thd_get_thread_id(bf_thd), - wsrep_thd_client_state_str(bf_thd), - wsrep_thd_client_mode_str(bf_thd), - wsrep_thd_transaction_state_str(bf_thd), - wsrep_thd_query(bf_thd), - bf_trx ? bf_trx->id : 0); + trx_id_t victim_trx_id= victim_trx ? victim_trx->id : 0; + wsrep_thd_UNLOCK(victim_thd); + wsrep_thd_kill_UNLOCK(victim_thd); + /* After this point must use find_thread_by_id() if victim_thd + is needed again. */ - WSREP_DEBUG("wsrep_abort_transaction: victim:" - " thread %ld client_state %s client_mode %s" - " trans_state %s query %s trx " TRX_ID_FMT, - thd_get_thread_id(victim_thd), - wsrep_thd_client_state_str(victim_thd), - wsrep_thd_client_mode_str(victim_thd), - wsrep_thd_transaction_state_str(victim_thd), - wsrep_thd_query(victim_thd), - victim_trx ? victim_trx->id : 0); - - if (victim_trx) + /* Victim didn't have active RW transaction. Note that tere is a possible + race when the victim transaction is just starting write operation + as is still read only. This however will be resolved eventually since + all the possible blocking transactions are also BF aborted, + and the victim will find that it was BF aborted on server level after + the write operation in InnoDB completes. */ + if (!victim_trx_id) { - lock_mutex_enter(); - trx_mutex_enter(victim_trx); - wsrep_kill_victim(bf_thd, victim_thd, victim_trx, signal); +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF( + "sync.wsrep_abort_transaction_read_only", + {const char act[]= + "now " + "SIGNAL sync.wsrep_abort_transaction_read_only_reached " + "WAIT_FOR signal.wsrep_abort_transaction_read_only"; + DBUG_ASSERT(!debug_sync_set_action(bf_thd, STRING_WITH_LEN(act))); + };); +#endif /* ENABLED_DEBUG_SYNC*/ + return; + } + lock_mutex_enter(); + + /* Check if victim trx still exists. */ + /* Note based on comment on trx0sys.h only ACTIVE or PREPARED trx + objects may participate in hash. However, transaction may get committed + before this method returns. */ + if(!(victim_trx= trx_sys.find(nullptr, victim_trx_id, true))) { + WSREP_DEBUG("wsrep_abort_transaction: Victim trx does not exist anymore"); lock_mutex_exit(); - trx_mutex_exit(victim_trx); + return; } - else - { - wsrep_thd_bf_abort(bf_thd, victim_thd, signal); + trx_mutex_enter(victim_trx); + + if (victim_trx->state == TRX_STATE_ACTIVE && victim_trx->lock.wait_lock) { + victim_trx->lock.was_chosen_as_deadlock_victim= TRUE; + lock_cancel_waiting_and_release(victim_trx->lock.wait_lock); } + + trx_mutex_exit(victim_trx); + victim_trx->release_reference(); + lock_mutex_exit(); } static diff --git a/wsrep-lib b/wsrep-lib index 4951c383577..e238c0d240c 160000 --- a/wsrep-lib +++ b/wsrep-lib @@ -1 +1 @@ -Subproject commit 4951c38357737d568b554402bc5b6abe88a38fe1 +Subproject commit e238c0d240c2557229b0523a4a032f3cf8b41639 From f307160218f8f9ed2528ffc685f49c4e2ae050b3 Mon Sep 17 00:00:00 2001 From: Teemu Ollakka Date: Wed, 19 Apr 2023 16:51:55 +0300 Subject: [PATCH 32/76] MDEV-29293 MariaDB stuck on starting commit state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This commit contains a merge from 10.5-MDEV-29293-squash into 10.6. Although the bug MDEV-29293 was not reproducible with 10.6, the fix contains several improvements for wsrep KILL query and BF abort handling, and addresses the following issues: * MDEV-30307 KILL command issued inside a transaction is problematic for galera replication: This commit will remove KILL TOI replication, so Galera side transaction context is not lost during KILL. * MDEV-21075 KILL QUERY maintains nodes data consistency but breaks GTID sequence: This is fixed as well as KILL does not use TOI, and thus does not change GTID state. * MDEV-30372 Assertion in wsrep-lib state: This was caused by BF abort or KILL when local transaction was in the middle of group commit. This commit disables THD::killed handling during commit, so the problem is avoided. * MDEV-30963 Assertion failure !lock.was_chosen_as_deadlock_victim in trx0trx.h:1065: The assertion happened when the victim was BF aborted via MDL while it was committing. This commit changes MDL BF aborts so that transactions which are committing cannot be BF aborted via MDL. The RQG grammar attached in the issue could not reproduce the crash anymore. Original commit message from 10.5 fix: MDEV-29293 MariaDB stuck on starting commit state The problem seems to be a deadlock between KILL command execution and BF abort issued by an applier, where: * KILL has locked victim's LOCK_thd_kill and LOCK_thd_data. * Applier has innodb side global lock mutex and victim trx mutex. * KILL is calling innobase_kill_query, and is blocked by innodb global lock mutex. * Applier is in wsrep_innobase_kill_one_trx and is blocked by victim's LOCK_thd_kill. The fix in this commit removes the TOI replication of KILL command and makes KILL execution less intrusive operation. Aborting the victim happens now by using awake_no_mutex() and ha_abort_transaction(). If the KILL happens when the transaction is committing, the KILL operation is postponed to happen after the statement has completed in order to avoid KILL to interrupt commit processing. Notable changes in this commit: * wsrep client connections's error state may remain sticky after client connection is closed. This error message will then pop up for the next client session issuing first SQL statement. This problem raised with test galera.galera_bf_kill. The fix is to reset wsrep client error state, before a THD is reused for next connetion. * Release THD locks in wsrep_abort_transaction when locking innodb mutexes. This guarantees same locking order as with applier BF aborting. * BF abort from MDL was changed to do BF abort on server/wsrep-lib side first, and only then do the BF abort on InnoDB side. This removes the need to call back from InnoDB for BF aborts which originate from MDL and simplifies the locking. * Removed wsrep_thd_set_wsrep_aborter() from service_wsrep.h. The manipulation of the wsrep_aborter can be done solely on server side. Moreover, it is now debug only variable and could be excluded from optimized builds. * Remove LOCK_thd_kill from wsrep_thd_LOCK/UNLOCK to allow more fine grained locking for SR BF abort which may require locking of victim LOCK_thd_kill. Added explicit call for wsrep_thd_kill_LOCK/UNLOCK where appropriate. * Wsrep-lib was updated to version which allows external locking for BF abort calls. Changes to MTR tests: * Disable galera_bf_abort_group_commit. This test is going to be removed (MDEV-30855). * Make galera_var_retry_autocommit result more readable by echoing cases and expectations into result. Only one expected result for reap to verify that server returns expected status for query. * Record galera_gcache_recover_manytrx as result file was incomplete. Trivial change. * Make galera_create_table_as_select more deterministic: Wait until CTAS execution has reached MDL wait for multi-master conflict case. Expected error from multi-master conflict is ER_QUERY_INTERRUPTED. This is because CTAS does not yet have open wsrep transaction when it is waiting for MDL, query gets interrupted instead of BF aborted. This should be addressed in separate task. * A new test galera_bf_abort_registering to check that registering trx gets BF aborted through MDL. * A new test galera_kill_group_commit to verify correct behavior when KILL is executed while the transaction is committing. Co-authored-by: Seppo Jaakola Co-authored-by: Jan Lindström Signed-off-by: Julius Goryavsky --- include/mysql/service_wsrep.h | 9 +- mysql-test/suite/galera/disabled.def | 2 + mysql-test/suite/galera/r/MDEV-29293.result | 21 ++ .../r/galera_create_table_as_select.result | 1 + .../r/galera_gcache_recover_manytrx.result | 3 - .../galera/r/galera_kill_group_commit.result | 27 +++ .../r/galera_var_retry_autocommit.result | 5 +- mysql-test/suite/galera/t/MDEV-29293.test | 41 ++++ .../t/galera_create_table_as_select.test | 6 +- .../galera/t/galera_kill_group_commit.cnf | 5 + .../galera/t/galera_kill_group_commit.test | 69 ++++++ .../galera/t/galera_var_retry_autocommit.test | 1 + sql/handler.h | 7 +- sql/service_wsrep.cc | 39 +--- sql/sql_class.cc | 27 ++- sql/sql_class.h | 9 +- sql/sql_parse.cc | 59 +---- sql/sql_plugin_services.inl | 2 +- sql/wsrep_dummy.cc | 7 +- sql/wsrep_high_priority_service.cc | 1 + sql/wsrep_mysqld.cc | 34 ++- sql/wsrep_server_service.cc | 8 +- sql/wsrep_server_service.h | 3 +- sql/wsrep_thd.cc | 204 +++++++++++++----- sql/wsrep_thd.h | 31 ++- sql/wsrep_trans_observer.h | 23 +- storage/innobase/handler/ha_innodb.cc | 138 ++++++------ storage/innobase/include/lock0lock.h | 4 + storage/innobase/lock/lock0lock.cc | 21 +- wsrep-lib | 2 +- 30 files changed, 567 insertions(+), 242 deletions(-) create mode 100644 mysql-test/suite/galera/r/MDEV-29293.result create mode 100644 mysql-test/suite/galera/r/galera_kill_group_commit.result create mode 100644 mysql-test/suite/galera/t/MDEV-29293.test create mode 100644 mysql-test/suite/galera/t/galera_kill_group_commit.cnf create mode 100644 mysql-test/suite/galera/t/galera_kill_group_commit.test diff --git a/include/mysql/service_wsrep.h b/include/mysql/service_wsrep.h index 8541b348ae4..8add709362e 100644 --- a/include/mysql/service_wsrep.h +++ b/include/mysql/service_wsrep.h @@ -57,6 +57,7 @@ extern struct wsrep_service_st { my_bool (*wsrep_on_func)(const MYSQL_THD thd); bool (*wsrep_prepare_key_for_innodb_func)(MYSQL_THD thd, const unsigned char*, size_t, const unsigned char*, size_t, struct wsrep_buf*, size_t*); void (*wsrep_thd_LOCK_func)(const MYSQL_THD thd); + int (*wsrep_thd_TRYLOCK_func)(const MYSQL_THD thd); void (*wsrep_thd_UNLOCK_func)(const MYSQL_THD thd); const char * (*wsrep_thd_query_func)(const MYSQL_THD thd); int (*wsrep_thd_retry_counter_func)(const MYSQL_THD thd); @@ -89,7 +90,6 @@ extern struct wsrep_service_st { ulong (*wsrep_OSU_method_get_func)(const MYSQL_THD thd); my_bool (*wsrep_thd_has_ignored_error_func)(const MYSQL_THD thd); void (*wsrep_thd_set_ignored_error_func)(MYSQL_THD thd, my_bool val); - bool (*wsrep_thd_set_wsrep_aborter_func)(MYSQL_THD bf_thd, MYSQL_THD thd); void (*wsrep_report_bf_lock_wait_func)(const MYSQL_THD thd, unsigned long long trx_id); void (*wsrep_thd_kill_LOCK_func)(const MYSQL_THD thd); @@ -111,6 +111,7 @@ extern struct wsrep_service_st { #define wsrep_on(thd) (thd) && WSREP_ON && wsrep_service->wsrep_on_func(thd) #define wsrep_prepare_key_for_innodb(A,B,C,D,E,F,G) wsrep_service->wsrep_prepare_key_for_innodb_func(A,B,C,D,E,F,G) #define wsrep_thd_LOCK(T) wsrep_service->wsrep_thd_LOCK_func(T) +#define wsrep_thd_TRYLOCK(T) wsrep_service->wsrep_thd_TRYLOCK_func(T) #define wsrep_thd_UNLOCK(T) wsrep_service->wsrep_thd_UNLOCK_func(T) #define wsrep_thd_kill_LOCK(T) wsrep_service->wsrep_thd_kill_LOCK_func(T) #define wsrep_thd_kill_UNLOCK(T) wsrep_service->wsrep_thd_kill_UNLOCK_func(T) @@ -141,7 +142,6 @@ extern struct wsrep_service_st { #define wsrep_OSU_method_get(T) wsrep_service->wsrep_OSU_method_get_func(T) #define wsrep_thd_has_ignored_error(T) wsrep_service->wsrep_thd_has_ignored_error_func(T) #define wsrep_thd_set_ignored_error(T,V) wsrep_service->wsrep_thd_set_ignored_error_func(T,V) -#define wsrep_thd_set_wsrep_aborter(T) wsrep_service->wsrep_thd_set_wsrep_aborter_func(T1, T2) #define wsrep_report_bf_lock_wait(T,I) wsrep_service->wsrep_report_bf_lock_wait(T,I) #define wsrep_thd_set_PA_unsafe(T) wsrep_service->wsrep_thd_set_PA_unsafe_func(T) #else @@ -175,6 +175,8 @@ void wsrep_set_data_home_dir(const char *data_dir); extern "C" my_bool wsrep_on(const MYSQL_THD thd); /* Lock thd wsrep lock */ extern "C" void wsrep_thd_LOCK(const MYSQL_THD thd); +/* Try thd wsrep lock. Return non-zero if lock could not be taken. */ +extern "C" int wsrep_thd_TRYLOCK(const MYSQL_THD thd); /* Unlock thd wsrep lock */ extern "C" void wsrep_thd_UNLOCK(const MYSQL_THD thd); @@ -197,8 +199,6 @@ extern "C" my_bool wsrep_thd_is_local(const MYSQL_THD thd); /* Return true if thd is in high priority mode */ /* todo: rename to is_high_priority() */ extern "C" my_bool wsrep_thd_is_applying(const MYSQL_THD thd); -/* set wsrep_aborter for the target THD */ -extern "C" bool wsrep_thd_set_wsrep_aborter(MYSQL_THD bf_thd, MYSQL_THD victim_thd); /* Return true if thd is in TOI mode */ extern "C" my_bool wsrep_thd_is_toi(const MYSQL_THD thd); /* Return true if thd is in replicating TOI mode */ @@ -249,7 +249,6 @@ extern "C" my_bool wsrep_thd_is_applying(const MYSQL_THD thd); extern "C" ulong wsrep_OSU_method_get(const MYSQL_THD thd); extern "C" my_bool wsrep_thd_has_ignored_error(const MYSQL_THD thd); extern "C" void wsrep_thd_set_ignored_error(MYSQL_THD thd, my_bool val); -extern "C" bool wsrep_thd_set_wsrep_aborter(MYSQL_THD bf_thd, MYSQL_THD victim_thd); extern "C" void wsrep_report_bf_lock_wait(const THD *thd, unsigned long long trx_id); /* declare parallel applying unsafety for the THD */ diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 2d68598c03b..af1f6cd8861 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -26,3 +26,5 @@ galera_var_ignore_apply_errors : 28: "Server did not transition to READY state" galera_bf_kill_debug : timeout after 900 seconds galera_ssl_upgrade : [Warning] Failed to load slave replication state from table mysql.gtid_slave_pos: 130: Incorrect file format 'gtid_slave_pos' galera_insert_bulk : MDEV-30536 no expected deadlock in galera_insert_bulk test +MDEV-27713 : test is using get_lock(), which is now rejected in cluster +galera_bf_abort_group_commit : MDEV-30855 PR to remove the test exists diff --git a/mysql-test/suite/galera/r/MDEV-29293.result b/mysql-test/suite/galera/r/MDEV-29293.result new file mode 100644 index 00000000000..70c0cc84a31 --- /dev/null +++ b/mysql-test/suite/galera/r/MDEV-29293.result @@ -0,0 +1,21 @@ +connection node_2; +connection node_1; +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1; +set wsrep_sync_wait = 0; +CREATE TABLE t1(a int not null primary key auto_increment, b int) engine=InnoDB; +INSERT INTO t1 VALUES (1,2); +connection node_1a; +BEGIN; +UPDATE t1 SET b=3 WHERE a=1; +connection node_1; +set debug_sync='wsrep_kill_before_awake_no_mutex SIGNAL before_kill WAIT_FOR continue'; +connection node_1b; +set debug_sync= 'now WAIT_FOR before_kill'; +connection node_2; +UPDATE t1 SET b=7 WHERE a=1; +connection node_1b; +set debug_sync= 'now SIGNAL continue'; +connection node_1; +DROP TABLE t1; +SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/suite/galera/r/galera_create_table_as_select.result b/mysql-test/suite/galera/r/galera_create_table_as_select.result index 6f65ee99f0a..beda5f30fe2 100644 --- a/mysql-test/suite/galera/r/galera_create_table_as_select.result +++ b/mysql-test/suite/galera/r/galera_create_table_as_select.result @@ -82,6 +82,7 @@ connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; LOCK TABLE t2 WRITE; connection node_1; CREATE TABLE t1 AS SELECT * FROM t2;; +connection node_1a; connection node_2; SELECT COUNT(*) = 5 FROM t2; COUNT(*) = 5 diff --git a/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result b/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result index 5caf22b39ca..5718807b5c4 100644 --- a/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result +++ b/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result @@ -134,6 +134,3 @@ connection node_1; call mtr.add_suppression("Error in Log_event::read_log_event():.*"); CALL mtr.add_suppression("conflict state 7 after post commit"); CALL mtr.add_suppression("Skipped GCache ring buffer recovery"); -connection node_2; -call mtr.add_suppression("Error in Log_event::read_log_event():.*"); -CALL mtr.add_suppression("Skipped GCache ring buffer recovery"); diff --git a/mysql-test/suite/galera/r/galera_kill_group_commit.result b/mysql-test/suite/galera/r/galera_kill_group_commit.result new file mode 100644 index 00000000000..bb59ce1486f --- /dev/null +++ b/mysql-test/suite/galera/r/galera_kill_group_commit.result @@ -0,0 +1,27 @@ +connection node_2; +connection node_1; +connect node_1_kill, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connect node_1_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_1; +SET SESSION wsrep_sync_wait = 0; +connect node_1_follower, 127.0.0.1, root, , test, $NODE_MYPORT_1; +SET SESSION wsrep_sync_wait = 0; +connection node_1; +CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB; +SET SESSION DEBUG_SYNC = "commit_before_enqueue SIGNAL leader_before_enqueue_reached WAIT_FOR leader_before_enqueue_continue"; +INSERT INTO t1 VALUES (1); +connection node_1_ctrl; +SET DEBUG_SYNC = "now WAIT_FOR leader_before_enqueue_reached"; +connection node_1_follower; +INSERT INTO t1 VALUES (2);; +connection node_1_ctrl; +connection node_1_kill; +# Execute KILL QUERY for group commit follower +SET DEBUG_SYNC = "now SIGNAL leader_before_enqueue_continue"; +connection node_1_follower; +connection node_1; +SELECT * FROM t1; +f1 +1 +2 +SET DEBUG_SYNC = "RESET"; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_var_retry_autocommit.result b/mysql-test/suite/galera/r/galera_var_retry_autocommit.result index 50667b0a4fa..eee740b6036 100644 --- a/mysql-test/suite/galera/r/galera_var_retry_autocommit.result +++ b/mysql-test/suite/galera/r/galera_var_retry_autocommit.result @@ -36,7 +36,10 @@ SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue'; connection node_1; SELECT COUNT(*) FROM t1; COUNT(*) -1 +connection node_1; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 SET DEBUG_SYNC = 'RESET'; SET GLOBAL debug_dbug = NULL; DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/MDEV-29293.test b/mysql-test/suite/galera/t/MDEV-29293.test new file mode 100644 index 00000000000..dacbf714c06 --- /dev/null +++ b/mysql-test/suite/galera/t/MDEV-29293.test @@ -0,0 +1,41 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc +--source include/have_debug_sync.inc +--source include/galera_have_debug_sync.inc + +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1 +set wsrep_sync_wait = 0; + +CREATE TABLE t1(a int not null primary key auto_increment, b int) engine=InnoDB; +INSERT INTO t1 VALUES (1,2); + +--connection node_1a +--let $victim_id = `SELECT CONNECTION_ID()` +BEGIN; +UPDATE t1 SET b=3 WHERE a=1; + +--connection node_1 +set debug_sync='wsrep_kill_before_awake_no_mutex SIGNAL before_kill WAIT_FOR continue'; +--disable_query_log +--disable_result_log +--send_eval KILL CONNECTION $victim_id +--enable_result_log +--enable_query_log + +--connection node_1b +set debug_sync= 'now WAIT_FOR before_kill'; + +--connection node_2 +UPDATE t1 SET b=7 WHERE a=1; + +--connection node_1b +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE User = 'system user' AND State LIKE 'Update_rows_log_event%'; +--source include/wait_condition.inc +set debug_sync= 'now SIGNAL continue'; + +--connection node_1 +--reap +DROP TABLE t1; +SET DEBUG_SYNC= 'RESET'; + diff --git a/mysql-test/suite/galera/t/galera_create_table_as_select.test b/mysql-test/suite/galera/t/galera_create_table_as_select.test index a6c1f657280..cfee63e5e27 100644 --- a/mysql-test/suite/galera/t/galera_create_table_as_select.test +++ b/mysql-test/suite/galera/t/galera_create_table_as_select.test @@ -113,6 +113,10 @@ LOCK TABLE t2 WRITE; --connection node_1 --send CREATE TABLE t1 AS SELECT * FROM t2; +--connection node_1a +--let $wait_condition = SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE STATE LIKE 'Waiting for table metadata lock%' +--source include/wait_condition.inc + --connection node_2 SELECT COUNT(*) = 5 FROM t2; CREATE TABLE t1 AS SELECT * FROM t2; @@ -121,7 +125,7 @@ CREATE TABLE t1 AS SELECT * FROM t2; UNLOCK TABLES; --connection node_1 ---error ER_TABLE_EXISTS_ERROR,ER_LOCK_DEADLOCK +--error ER_TABLE_EXISTS_ERROR,ER_QUERY_INTERRUPTED --reap DROP TABLE t1, t2; diff --git a/mysql-test/suite/galera/t/galera_kill_group_commit.cnf b/mysql-test/suite/galera/t/galera_kill_group_commit.cnf new file mode 100644 index 00000000000..60f4f776409 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_kill_group_commit.cnf @@ -0,0 +1,5 @@ +!include ../galera_2nodes.cnf + +[mysqld] +log-bin +log-slave-updates diff --git a/mysql-test/suite/galera/t/galera_kill_group_commit.test b/mysql-test/suite/galera/t/galera_kill_group_commit.test new file mode 100644 index 00000000000..4b84f2d90ef --- /dev/null +++ b/mysql-test/suite/galera/t/galera_kill_group_commit.test @@ -0,0 +1,69 @@ +# +# Verify that transaction which has reached group commit queue +# cannot be killed. If the kill succeeds, assertion for +# wsrep transaction state will fail. +# +# If the bug is present, i.e. wsrep transaction gets killed during +# group commit wait, this test is enough to reproduce the crash +# most of the time. +# + +--source include/have_innodb.inc +--source include/have_debug_sync.inc +--source include/galera_cluster.inc + +# Connection for KILL commands +--connect node_1_kill, 127.0.0.1, root, , test, $NODE_MYPORT_1 +# Connection for sync point control +--connect node_1_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_1 +SET SESSION wsrep_sync_wait = 0; +# Connection for group commit follower +--connect node_1_follower, 127.0.0.1, root, , test, $NODE_MYPORT_1 +# Need to disable sync wait to reach commit queue when leader +# is blocked. +SET SESSION wsrep_sync_wait = 0; +--let $follower_id = `SELECT CONNECTION_ID()` + +--connection node_1 +CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB; + +SET SESSION DEBUG_SYNC = "commit_before_enqueue SIGNAL leader_before_enqueue_reached WAIT_FOR leader_before_enqueue_continue"; +--send INSERT INTO t1 VALUES (1) + +--connection node_1_ctrl +SET DEBUG_SYNC = "now WAIT_FOR leader_before_enqueue_reached"; + +--connection node_1_follower +# SET SESSION DEBUG_SYNC = "group_commit_waiting_for_prior SIGNAL follower_waiting_for_prior_reached WAIT_FOR follower_waiting_for_prior_continue"; +--send INSERT INTO t1 VALUES (2); + +--connection node_1_ctrl +# TODO: Is it possible to use sync points to enforce group commit to happen? +# The leader will hold commit monitor in commit_before_enqueue sync point, +# which prevents the follower to reach the group commit wait state. +# We now sleep and expect the follower to reach group commit, but this +# may cause false negatives. +--sleep 1 + +--connection node_1_kill +--echo # Execute KILL QUERY for group commit follower +--disable_query_log +--disable_result_log +# Because it is currently impossible to verify that the +# follower has reached group commit queue, the KILL may +# sometimes return success. +--error 0,ER_KILL_DENIED_ERROR +--eval KILL QUERY $follower_id +--enable_result_log +--enable_query_log + +SET DEBUG_SYNC = "now SIGNAL leader_before_enqueue_continue"; +--connection node_1_follower +--reap + +--connection node_1 +--reap +SELECT * FROM t1; + +SET DEBUG_SYNC = "RESET"; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_var_retry_autocommit.test b/mysql-test/suite/galera/t/galera_var_retry_autocommit.test index c58eba1410e..8009fe88c65 100644 --- a/mysql-test/suite/galera/t/galera_var_retry_autocommit.test +++ b/mysql-test/suite/galera/t/galera_var_retry_autocommit.test @@ -64,6 +64,7 @@ SELECT COUNT(*) FROM t1; SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue'; --connection node_1 +--error 0,ER_LOCK_DEADLOCK --reap SELECT COUNT(*) FROM t1; diff --git a/sql/handler.h b/sql/handler.h index e3d968808ee..ca118813656 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -45,6 +45,7 @@ #include "sql_sequence.h" #include "mem_root_array.h" #include // pair +#include /* __attribute__ */ class Alter_info; class Virtual_column_info; @@ -1530,9 +1531,9 @@ struct handlerton const char *query, uint query_length, const char *db, const char *table_name); - void (*abort_transaction)(handlerton *hton, THD *bf_thd, - THD *victim_thd, my_bool signal); - int (*set_checkpoint)(handlerton *hton, const XID* xid); + void (*abort_transaction)(handlerton *hton, THD *bf_thd, THD *victim_thd, + my_bool signal) __attribute__((nonnull)); + int (*set_checkpoint)(handlerton *hton, const XID *xid); int (*get_checkpoint)(handlerton *hton, XID* xid); /** Check if the version of the table matches the version in the .frm diff --git a/sql/service_wsrep.cc b/sql/service_wsrep.cc index dd12149ff48..e1a4a25b27a 100644 --- a/sql/service_wsrep.cc +++ b/sql/service_wsrep.cc @@ -32,6 +32,11 @@ extern "C" void wsrep_thd_LOCK(const THD *thd) mysql_mutex_lock(&thd->LOCK_thd_data); } +extern "C" int wsrep_thd_TRYLOCK(const THD *thd) +{ + return mysql_mutex_trylock(&thd->LOCK_thd_data); +} + extern "C" void wsrep_thd_UNLOCK(const THD *thd) { mysql_mutex_unlock(&thd->LOCK_thd_data); @@ -196,6 +201,7 @@ extern "C" void wsrep_handle_SR_rollback(THD *bf_thd, /* Note: do not store/reset globals before wsrep_bf_abort() call to avoid losing BF thd context. */ + mysql_mutex_lock(&victim_thd->LOCK_thd_data); if (!(bf_thd && bf_thd != victim_thd)) { DEBUG_SYNC(victim_thd, "wsrep_before_SR_rollback"); @@ -208,6 +214,7 @@ extern "C" void wsrep_handle_SR_rollback(THD *bf_thd, { wsrep_thd_self_abort(victim_thd); } + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); if (bf_thd) { wsrep_store_threadvars(bf_thd); @@ -218,7 +225,7 @@ extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd, my_bool signal) { mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); - mysql_mutex_assert_not_owner(&victim_thd->LOCK_thd_data); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); my_bool ret= wsrep_bf_abort(bf_thd, victim_thd); /* Send awake signal if victim was BF aborted or does not @@ -227,19 +234,8 @@ extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd, */ if ((ret || !wsrep_on(victim_thd)) && signal) { - mysql_mutex_lock(&victim_thd->LOCK_thd_data); - - if (victim_thd->wsrep_aborter && victim_thd->wsrep_aborter != bf_thd->thread_id) - { - WSREP_DEBUG("victim is killed already by %llu, skipping awake", - victim_thd->wsrep_aborter); - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); - return false; - } - victim_thd->wsrep_aborter= bf_thd->thread_id; victim_thd->awake_no_mutex(KILL_QUERY_HARD); - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); } else { WSREP_DEBUG("wsrep_thd_bf_abort skipped awake, signal %d", signal); } @@ -368,25 +364,6 @@ extern "C" ulong wsrep_OSU_method_get(const MYSQL_THD thd) return(global_system_variables.wsrep_OSU_method); } -extern "C" bool wsrep_thd_set_wsrep_aborter(THD *bf_thd, THD *victim_thd) -{ - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); - if (!bf_thd) - { - victim_thd->wsrep_aborter= 0; - WSREP_DEBUG("wsrep_thd_set_wsrep_aborter resetting wsrep_aborter"); - return false; - } - if (victim_thd->wsrep_aborter && victim_thd->wsrep_aborter != bf_thd->thread_id) - { - return true; - } - victim_thd->wsrep_aborter= bf_thd->thread_id; - WSREP_DEBUG("wsrep_thd_set_wsrep_aborter setting wsrep_aborter %u", - victim_thd->wsrep_aborter); - return false; -} - extern "C" void wsrep_report_bf_lock_wait(const THD *thd, unsigned long long trx_id) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 56d0d1682cb..a49271125cc 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1310,6 +1310,11 @@ void THD::init() wsrep_affected_rows = 0; m_wsrep_next_trx_id = WSREP_UNDEFINED_TRX_ID; wsrep_aborter = 0; + wsrep_abort_by_kill = NOT_KILLED; + wsrep_abort_by_kill_err = 0; +#ifndef DBUG_OFF + wsrep_killed_state = 0; +#endif /* DBUG_OFF */ wsrep_desynced_backup_stage= false; #endif /* WITH_WSREP */ @@ -1661,6 +1666,13 @@ void THD::reset_for_reuse() #endif #ifdef WITH_WSREP wsrep_free_status(this); + wsrep_cs().reset_error(); + wsrep_aborter= 0; + wsrep_abort_by_kill= NOT_KILLED; + wsrep_abort_by_kill_err= 0; +#ifndef DBUG_OFF + wsrep_killed_state= 0; +#endif /* DBUG_OFF */ #endif /* WITH_WSREP */ } @@ -1917,7 +1929,9 @@ void THD::awake_no_mutex(killed_state state_to_set) } /* Interrupt target waiting inside a storage engine. */ - if (state_to_set != NOT_KILLED && !wsrep_is_bf_aborted(this)) + if (state_to_set != NOT_KILLED && + IF_WSREP(!wsrep_is_bf_aborted(this) && wsrep_abort_by_kill == NOT_KILLED, + true)) ha_kill_query(this, thd_kill_level(this)); abort_current_cond_wait(false); @@ -2144,6 +2158,17 @@ void THD::reset_killed() mysql_mutex_unlock(&LOCK_thd_kill); } #ifdef WITH_WSREP + if (WSREP_NNULL(this)) + { + if (wsrep_abort_by_kill != NOT_KILLED) + { + mysql_mutex_assert_not_owner(&LOCK_thd_kill); + mysql_mutex_lock(&LOCK_thd_kill); + wsrep_abort_by_kill= NOT_KILLED; + wsrep_abort_by_kill_err= 0; + mysql_mutex_unlock(&LOCK_thd_kill); + } + } mysql_mutex_assert_not_owner(&LOCK_thd_data); mysql_mutex_lock(&LOCK_thd_data); wsrep_aborter= 0; diff --git a/sql/sql_class.h b/sql/sql_class.h index 6265d8060ce..2bca2c993fc 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -5402,7 +5402,14 @@ public: bool wsrep_ignore_table; /* thread who has started kill for this THD protected by LOCK_thd_data*/ my_thread_id wsrep_aborter; - + /* Kill signal used, if thread was killed by manual KILL. Protected by + LOCK_thd_kill. */ + std::atomic wsrep_abort_by_kill; + /* */ + struct err_info* wsrep_abort_by_kill_err; +#ifndef DBUG_OFF + int wsrep_killed_state; +#endif /* DBUG_OFF */ /* true if BF abort is observed in do_command() right after reading client's packet, and if the client has sent PS execute command. */ bool wsrep_delayed_BF_abort; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index f2f622e78c8..1b9504abe07 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -7886,7 +7886,7 @@ static bool wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, thd->wsrep_retry_counter < thd->variables.wsrep_retry_autocommit) { #ifdef ENABLED_DEBUG_SYNC - DBUG_EXECUTE_IF("sync.wsrep_retry_autocommit", + DBUG_EXECUTE_IF("sync.wsrep_retry_autocommit", { const char act[]= "now " @@ -9248,23 +9248,20 @@ kill_one_thread(THD *thd, my_thread_id id, killed_state kill_signal, killed_type thd->security_ctx->user_matches(tmp->security_ctx)) #endif /* WITH_WSREP */ { + { #ifdef WITH_WSREP - DEBUG_SYNC(thd, "before_awake_no_mutex"); - if (tmp->wsrep_aborter && tmp->wsrep_aborter != thd->thread_id) - { - /* victim is in hit list already, bail out */ - WSREP_DEBUG("victim %lld has wsrep aborter: %lu, skipping awake()", - id, tmp->wsrep_aborter); - error= 0; - } - else + if (WSREP(tmp)) + { + error = wsrep_kill_thd(thd, tmp, kill_signal); + } + else + { #endif /* WITH_WSREP */ - { - WSREP_DEBUG("kill_one_thread victim: %lld wsrep_aborter %lu" - " by signal %d", - id, tmp->wsrep_aborter, kill_signal); tmp->awake_no_mutex(kill_signal); error= 0; +#ifdef WITH_WSREP + } +#endif /* WITH_WSREP */ } } else @@ -9387,18 +9384,6 @@ static void sql_kill(THD *thd, my_thread_id id, killed_state state, killed_type type) { uint error; -#ifdef WITH_WSREP - if (WSREP(thd)) - { - WSREP_DEBUG("sql_kill called"); - if (thd->wsrep_applier) - { - WSREP_DEBUG("KILL in applying, bailing out here"); - return; - } - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) - } -#endif /* WITH_WSREP */ if (likely(!(error= kill_one_thread(thd, id, state, type)))) { if (!thd->killed) @@ -9408,11 +9393,6 @@ void sql_kill(THD *thd, my_thread_id id, killed_state state, killed_type type) } else my_error(error, MYF(0), id); -#ifdef WITH_WSREP - return; - wsrep_error_label: - my_error(ER_KILL_DENIED_ERROR, MYF(0), (long long) thd->thread_id); -#endif /* WITH_WSREP */ } @@ -9421,18 +9401,6 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) { uint error; ha_rows rows; -#ifdef WITH_WSREP - if (WSREP(thd)) - { - WSREP_DEBUG("sql_kill_user called"); - if (thd->wsrep_applier) - { - WSREP_DEBUG("KILL in applying, bailing out here"); - return; - } - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) - } -#endif /* WITH_WSREP */ switch (error= kill_threads_for_user(thd, user, state, &rows)) { case 0: @@ -9448,11 +9416,6 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) default: my_error(error, MYF(0)); } -#ifdef WITH_WSREP - return; - wsrep_error_label: - my_error(ER_KILL_DENIED_ERROR, MYF(0), (long long) thd->thread_id); -#endif /* WITH_WSREP */ } diff --git a/sql/sql_plugin_services.inl b/sql/sql_plugin_services.inl index 86b2fb69b22..7bd38368f3d 100644 --- a/sql/sql_plugin_services.inl +++ b/sql/sql_plugin_services.inl @@ -151,6 +151,7 @@ static struct wsrep_service_st wsrep_handler = { wsrep_on, wsrep_prepare_key_for_innodb, wsrep_thd_LOCK, + wsrep_thd_TRYLOCK, wsrep_thd_UNLOCK, wsrep_thd_query, wsrep_thd_retry_counter, @@ -179,7 +180,6 @@ static struct wsrep_service_st wsrep_handler = { wsrep_OSU_method_get, wsrep_thd_has_ignored_error, wsrep_thd_set_ignored_error, - wsrep_thd_set_wsrep_aborter, wsrep_report_bf_lock_wait, wsrep_thd_kill_LOCK, wsrep_thd_kill_UNLOCK, diff --git a/sql/wsrep_dummy.cc b/sql/wsrep_dummy.cc index 9bfaf9285f3..e1508884075 100644 --- a/sql/wsrep_dummy.cc +++ b/sql/wsrep_dummy.cc @@ -56,6 +56,11 @@ my_bool wsrep_on(const THD *) void wsrep_thd_LOCK(const THD *) { } +int wsrep_thd_TRYLOCK(const THD *) +{ + return 0; +} + void wsrep_thd_UNLOCK(const THD *) { } @@ -154,8 +159,6 @@ void wsrep_thd_set_ignored_error(THD*, my_bool) { } ulong wsrep_OSU_method_get(const THD*) { return 0;} -bool wsrep_thd_set_wsrep_aborter(THD*, THD*) -{ return 0;} void wsrep_report_bf_lock_wait(const THD*, unsigned long long) diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc index 53ef20f3e78..0a2fa273723 100644 --- a/sql/wsrep_high_priority_service.cc +++ b/sql/wsrep_high_priority_service.cc @@ -510,6 +510,7 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_ m_thd->wait_for_prior_commit(); } + WSREP_DEBUG("checkpointing dummy write set %lld", ws_meta.seqno().get()); wsrep_set_SE_checkpoint(ws_meta.gtid(), wsrep_gtid_server.gtid()); if (!WSREP_EMULATE_BINLOG(m_thd)) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index e0db00a3de3..50c85aa7173 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -51,6 +51,7 @@ #include "log_event.h" #include "sql_connect.h" #include "thread_cache.h" +#include "debug_sync.h" #include @@ -3037,7 +3038,22 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, request_thd, granted_thd); ticket->wsrep_report(wsrep_debug); + DEBUG_SYNC(request_thd, "before_wsrep_thd_abort"); + DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", { + const char act[]= "now " + "SIGNAL sync.before_wsrep_thd_abort_reached " + "WAIT_FOR signal.before_wsrep_thd_abort"; + DBUG_ASSERT(!debug_sync_set_action(request_thd, STRING_WITH_LEN(act))); + };); + + /* Here we will call wsrep_abort_transaction so we should hold + THD::LOCK_thd_data to protect victim from concurrent usage + and THD::LOCK_thd_kill to protect from disconnect or delete. + + */ + mysql_mutex_lock(&granted_thd->LOCK_thd_kill); mysql_mutex_lock(&granted_thd->LOCK_thd_data); + if (wsrep_thd_is_toi(granted_thd) || wsrep_thd_is_applying(granted_thd)) { @@ -3045,13 +3061,11 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, { WSREP_DEBUG("BF thread waiting for SR in aborting state"); ticket->wsrep_report(wsrep_debug); - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); } else if (wsrep_thd_is_SR(granted_thd) && !wsrep_thd_is_SR(request_thd)) { WSREP_MDL_LOG(INFO, "MDL conflict, DDL vs SR", schema, schema_len, request_thd, granted_thd); - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); wsrep_abort_thd(request_thd, granted_thd, 1); } else @@ -3060,6 +3074,7 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, request_thd, granted_thd); ticket->wsrep_report(true); mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); unireg_abort(1); } } @@ -3068,7 +3083,6 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, { WSREP_DEBUG("BF thread waiting for FLUSH"); ticket->wsrep_report(wsrep_debug); - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); if (granted_thd->current_backup_stage != BACKUP_FINISHED && wsrep_check_mode(WSREP_MODE_BF_MARIABACKUP)) { @@ -3080,7 +3094,6 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, WSREP_DEBUG("DROP caused BF abort, conf %s", wsrep_thd_transaction_state_str(granted_thd)); ticket->wsrep_report(wsrep_debug); - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); wsrep_abort_thd(request_thd, granted_thd, 1); } else @@ -3090,7 +3103,6 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, ticket->wsrep_report(wsrep_debug); if (granted_thd->wsrep_trx().active()) { - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); wsrep_abort_thd(request_thd, granted_thd, 1); } else @@ -3099,9 +3111,9 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, Granted_thd is likely executing with wsrep_on=0. If the requesting thd is BF, BF abort and wait. */ - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); if (wsrep_thd_is_BF(request_thd, FALSE)) { + granted_thd->awake_no_mutex(KILL_QUERY_HARD); ha_abort_transaction(request_thd, granted_thd, TRUE); } else @@ -3109,10 +3121,14 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, WSREP_MDL_LOG(INFO, "MDL unknown BF-BF conflict", schema, schema_len, request_thd, granted_thd); ticket->wsrep_report(true); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); unireg_abort(1); } } } + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); } else { @@ -3124,13 +3140,17 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, static bool abort_replicated(THD *thd) { bool ret_code= false; + mysql_mutex_lock(&thd->LOCK_thd_kill); + mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_trx().state() == wsrep::transaction::s_committing) { WSREP_DEBUG("aborting replicated trx: %llu", (ulonglong)(thd->real_id)); - (void)wsrep_abort_thd(thd, thd, TRUE); + wsrep_abort_thd(thd, thd, TRUE); ret_code= true; } + mysql_mutex_unlock(&thd->LOCK_thd_data); + mysql_mutex_unlock(&thd->LOCK_thd_kill); return ret_code; } diff --git a/sql/wsrep_server_service.cc b/sql/wsrep_server_service.cc index 9be6af71c56..52a0a9753c1 100644 --- a/sql/wsrep_server_service.cc +++ b/sql/wsrep_server_service.cc @@ -148,9 +148,13 @@ void Wsrep_server_service::release_high_priority_service(wsrep::high_priority_se wsrep_delete_threadvars(); } -void Wsrep_server_service::background_rollback(wsrep::client_state& client_state) +void Wsrep_server_service::background_rollback( + wsrep::unique_lock &lock WSREP_UNUSED, + wsrep::client_state &client_state) { - Wsrep_client_state& cs= static_cast(client_state); + DBUG_ASSERT(lock.owns_lock()); + Wsrep_client_state &cs= static_cast(client_state); + mysql_mutex_assert_owner(&cs.thd()->LOCK_thd_data); wsrep_fire_rollbacker(cs.thd()); } diff --git a/sql/wsrep_server_service.h b/sql/wsrep_server_service.h index 168e98206e3..0fc48402024 100644 --- a/sql/wsrep_server_service.h +++ b/sql/wsrep_server_service.h @@ -46,7 +46,8 @@ public: void release_high_priority_service(wsrep::high_priority_service*); - void background_rollback(wsrep::client_state&); + void background_rollback(wsrep::unique_lock &, + wsrep::client_state &); void bootstrap(); void log_message(enum wsrep::log::level, const char*); diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index 420a25dd2ae..682e64859b4 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -307,48 +307,9 @@ void wsrep_fire_rollbacker(THD *thd) } } - -int wsrep_abort_thd(THD *bf_thd, - THD *victim_thd, - my_bool signal) +static bool wsrep_bf_abort_low(THD *bf_thd, THD *victim_thd) { - DBUG_ENTER("wsrep_abort_thd"); - - mysql_mutex_lock(&victim_thd->LOCK_thd_data); - - /* Note that when you use RSU node is desynced from cluster, thus WSREP(thd) - might not be true. - */ - if ((WSREP_NNULL(bf_thd) || - ((WSREP_ON || bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU) && - wsrep_thd_is_toi(bf_thd))) && - !wsrep_thd_is_aborting(victim_thd)) - { - WSREP_DEBUG("wsrep_abort_thd, by: %llu, victim: %llu", - (long long)bf_thd->real_id, (long long)victim_thd->real_id); - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); - ha_abort_transaction(bf_thd, victim_thd, signal); - DBUG_RETURN(1); - } - else - { - WSREP_DEBUG("wsrep_abort_thd not effective: bf %llu victim %llu " - "wsrep %d wsrep_on %d RSU %d TOI %d aborting %d", - (long long)bf_thd->real_id, (long long)victim_thd->real_id, - WSREP_NNULL(bf_thd), WSREP_ON, - bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU, - wsrep_thd_is_toi(bf_thd), - wsrep_thd_is_aborting(victim_thd)); - } - - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); - DBUG_RETURN(1); -} - -bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) -{ - WSREP_LOG_THD(bf_thd, "BF aborter before"); - WSREP_LOG_THD(victim_thd, "victim before"); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); #ifdef ENABLED_DEBUG_SYNC DBUG_EXECUTE_IF("sync.wsrep_bf_abort", @@ -362,6 +323,85 @@ bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) };); #endif + wsrep::seqno bf_seqno(bf_thd->wsrep_trx().ws_meta().seqno()); + bool ret; + + { + /* Adopt the lock, it is being held by the caller. */ + Wsrep_mutex wsm{&victim_thd->LOCK_thd_data}; + wsrep::unique_lock lock{wsm, std::adopt_lock}; + + if (wsrep_thd_is_toi(bf_thd)) + { + ret= victim_thd->wsrep_cs().total_order_bf_abort(lock, bf_seqno); + } + else + { + DBUG_ASSERT(WSREP(victim_thd) ? victim_thd->wsrep_trx().active() : 1); + ret= victim_thd->wsrep_cs().bf_abort(lock, bf_seqno); + } + if (ret) + { + /* BF abort should be allowed only once by wsrep-lib.*/ + DBUG_ASSERT(victim_thd->wsrep_aborter == 0); + victim_thd->wsrep_aborter= bf_thd->thread_id; + wsrep_bf_aborts_counter++; + } + lock.release(); /* No unlock at the end of the scope. */ + } + + /* Sanity check for wsrep-lib calls to return with LOCK_thd_data held. */ + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + + return ret; +} + +void wsrep_abort_thd(THD *bf_thd, + THD *victim_thd, + my_bool signal) +{ + DBUG_ENTER("wsrep_abort_thd"); + + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + + /* Note that when you use RSU node is desynced from cluster, thus WSREP(thd) + might not be true. + */ + if ((WSREP(bf_thd) + || ((WSREP_ON || bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU) + && wsrep_thd_is_toi(bf_thd)) + || bf_thd->lex->sql_command == SQLCOM_KILL) + && !wsrep_thd_is_aborting(victim_thd) && + wsrep_bf_abort_low(bf_thd, victim_thd) && + !victim_thd->wsrep_cs().is_rollbacker_active()) + { + WSREP_DEBUG("wsrep_abort_thd, by: %llu, victim: %llu", + (long long)bf_thd->real_id, (long long)victim_thd->real_id); + victim_thd->awake_no_mutex(KILL_QUERY_HARD); + ha_abort_transaction(bf_thd, victim_thd, signal); + } + else + { + WSREP_DEBUG("wsrep_abort_thd not effective: bf %llu victim %llu " + "wsrep %d wsrep_on %d RSU %d TOI %d aborting %d", + (long long)bf_thd->real_id, (long long)victim_thd->real_id, + WSREP_NNULL(bf_thd), WSREP_ON, + bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU, + wsrep_thd_is_toi(bf_thd), + wsrep_thd_is_aborting(victim_thd)); + } + + DBUG_VOID_RETURN; +} + +bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) +{ + WSREP_LOG_THD(bf_thd, "BF aborter before"); + WSREP_LOG_THD(victim_thd, "victim before"); + + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + if (WSREP(victim_thd) && !victim_thd->wsrep_trx().active()) { WSREP_DEBUG("wsrep_bf_abort, BF abort for non active transaction." @@ -384,30 +424,84 @@ bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) wsrep_check_mode(WSREP_MODE_BF_MARIABACKUP)) { WSREP_DEBUG("killing connection for non wsrep session"); - mysql_mutex_lock(&victim_thd->LOCK_thd_data); victim_thd->awake_no_mutex(KILL_CONNECTION); - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); } return false; } - bool ret; - wsrep::seqno bf_seqno(bf_thd->wsrep_trx().ws_meta().seqno()); + return wsrep_bf_abort_low(bf_thd, victim_thd); +} - if (wsrep_thd_is_toi(bf_thd)) +uint wsrep_kill_thd(THD *thd, THD *victim_thd, killed_state kill_signal) +{ + DBUG_ENTER("wsrep_kill_thd"); + DBUG_ASSERT(WSREP(victim_thd)); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + using trans= wsrep::transaction; + auto trx_state= victim_thd->wsrep_trx().state(); +#ifndef DBUG_OFF + victim_thd->wsrep_killed_state= trx_state; +#endif /* DBUG_OFF */ + /* + Already killed or in commit codepath. Mark the victim as killed, + the killed status will be restored in wsrep_after_commit() and + will be processed after the commit is over. In case of multiple + KILLs happened on commit codepath, the last one will be effective. + */ + if (victim_thd->wsrep_abort_by_kill || + trx_state == trans::s_preparing || + trx_state == trans::s_committing || + trx_state == trans::s_ordered_commit) { - ret= victim_thd->wsrep_cs().total_order_bf_abort(bf_seqno); + victim_thd->wsrep_abort_by_kill= kill_signal; + DBUG_RETURN(0); } - else + /* + Mark killed victim_thd with kill_signal so that awake_no_mutex does + not dive into storage engine. We use ha_abort_transaction() + to do the storage engine part for wsrep THDs. + */ + DEBUG_SYNC(thd, "wsrep_kill_before_awake_no_mutex"); + victim_thd->wsrep_abort_by_kill= kill_signal; + victim_thd->awake_no_mutex(kill_signal); + /* ha_abort_transaction() releases tmp->LOCK_thd_kill, so tmp + is not safe to access anymore. */ + ha_abort_transaction(thd, victim_thd, 1); + DBUG_RETURN(0); +} + +void wsrep_backup_kill_for_commit(THD *thd) +{ + DBUG_ASSERT(WSREP(thd)); + mysql_mutex_assert_owner(&thd->LOCK_thd_kill); + DBUG_ASSERT(thd->killed != NOT_KILLED); + mysql_mutex_lock(&thd->LOCK_thd_data); + /* If the transaction will roll back, keep the killed state. + For must replay, the replay will happen in different THD context + which is high priority and cannot be killed. The owning thread will + pick the killed state in after statement processing. */ + if (thd->wsrep_trx().state() != wsrep::transaction::s_cert_failed && + thd->wsrep_trx().state() != wsrep::transaction::s_must_abort && + thd->wsrep_trx().state() != wsrep::transaction::s_aborting && + thd->wsrep_trx().state() != wsrep::transaction::s_must_replay) { - DBUG_ASSERT(WSREP(victim_thd) ? victim_thd->wsrep_trx().active() : 1); - ret= victim_thd->wsrep_cs().bf_abort(bf_seqno); + thd->wsrep_abort_by_kill= thd->killed; + thd->wsrep_abort_by_kill_err= thd->killed_err; + thd->killed= NOT_KILLED; + thd->killed_err= 0; } - if (ret) - { - wsrep_bf_aborts_counter++; - } - return ret; + mysql_mutex_unlock(&thd->LOCK_thd_data); +} + +void wsrep_restore_kill_after_commit(THD *thd) +{ + DBUG_ASSERT(WSREP(thd)); + mysql_mutex_assert_owner(&thd->LOCK_thd_kill); + thd->killed= thd->wsrep_abort_by_kill; + thd->killed_err= thd->wsrep_abort_by_kill_err; + thd->wsrep_abort_by_kill= NOT_KILLED; + thd->wsrep_abort_by_kill_err= 0; } int wsrep_create_threadvars() diff --git a/sql/wsrep_thd.h b/sql/wsrep_thd.h index 0ce612d6097..f3790887bf5 100644 --- a/sql/wsrep_thd.h +++ b/sql/wsrep_thd.h @@ -88,10 +88,39 @@ bool wsrep_create_appliers(long threads, bool mutex_protected=false); void wsrep_create_rollbacker(); bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd); -int wsrep_abort_thd(THD *bf_thd, +/* + Abort transaction for victim_thd. This function is called from + MDL BF abort codepath. +*/ +void wsrep_abort_thd(THD *bf_thd, THD *victim_thd, my_bool signal) __attribute__((nonnull(1,2))); +/** + Kill wsrep connection with kill_signal. Object thd is not + guaranteed to exist anymore when this function returns. + + Asserts that the caller holds victim_thd->LOCK_thd_kill, + victim_thd->LOCK_thd_data. + + @param thd THD object for connection that executes the KILL. + @param victim_thd THD object for connection to be killed. + @param kill_signal Kill signal. + + @return Zero if the kill was successful, otherwise non-zero error code. + */ +uint wsrep_kill_thd(THD *thd, THD *victim_thd, killed_state kill_signal); + +/* + Backup kill status for commit. + */ +void wsrep_backup_kill_for_commit(THD *); + +/* + Restore KILL status after commit. + */ +void wsrep_restore_kill_after_commit(THD *); + /* Helper methods to deal with thread local storage. The purpose of these methods is to hide the details of thread diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h index 8f998244ee6..40fbf80cfb3 100644 --- a/sql/wsrep_trans_observer.h +++ b/sql/wsrep_trans_observer.h @@ -256,6 +256,11 @@ static inline int wsrep_before_prepare(THD* thd, bool all) thd->wsrep_trx().ws_meta().gtid(), wsrep_gtid_server.gtid()); } + + mysql_mutex_lock(&thd->LOCK_thd_kill); + if (thd->killed) wsrep_backup_kill_for_commit(thd); + mysql_mutex_unlock(&thd->LOCK_thd_kill); + DBUG_RETURN(ret); } @@ -323,6 +328,11 @@ static inline int wsrep_before_commit(THD* thd, bool all) wsrep_gtid_server.gtid()); wsrep_register_for_group_commit(thd); } + + mysql_mutex_lock(&thd->LOCK_thd_kill); + if (thd->killed) wsrep_backup_kill_for_commit(thd); + mysql_mutex_unlock(&thd->LOCK_thd_kill); + DBUG_RETURN(ret); } @@ -341,7 +351,8 @@ static inline int wsrep_before_commit(THD* thd, bool all) static inline int wsrep_ordered_commit(THD* thd, bool all) { DBUG_ENTER("wsrep_ordered_commit"); - WSREP_DEBUG("wsrep_ordered_commit: %d", wsrep_is_real(thd, all)); + WSREP_DEBUG("wsrep_ordered_commit: %d %lld", wsrep_is_real(thd, all), + (long long) wsrep_thd_trx_seqno(thd)); DBUG_ASSERT(wsrep_run_commit_hook(thd, all)); DBUG_RETURN(thd->wsrep_cs().ordered_commit()); } @@ -449,10 +460,18 @@ int wsrep_after_statement(THD* thd) wsrep::to_c_string(thd->wsrep_cs().state()), wsrep::to_c_string(thd->wsrep_cs().mode()), wsrep::to_c_string(thd->wsrep_cs().transaction().state())); - DBUG_RETURN((thd->wsrep_cs().state() != wsrep::client_state::s_none && + int ret= ((thd->wsrep_cs().state() != wsrep::client_state::s_none && thd->wsrep_cs().mode() == Wsrep_client_state::m_local) && !thd->internal_transaction() ? thd->wsrep_cs().after_statement() : 0); + + if (wsrep_is_active(thd)) + { + mysql_mutex_lock(&thd->LOCK_thd_kill); + wsrep_restore_kill_after_commit(thd); + mysql_mutex_unlock(&thd->LOCK_thd_kill); + } + DBUG_RETURN(ret); } static inline void wsrep_after_apply(THD* thd) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 2937ca40752..0a56c2b691c 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1987,8 +1987,9 @@ static void innodb_disable_internal_writes(bool disable) sst_enable_innodb_writes(); } -static void wsrep_abort_transaction(handlerton*, THD *, THD *, my_bool); -static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid); +static void wsrep_abort_transaction(handlerton *, THD *, THD *, my_bool) + __attribute__((nonnull)); +static int innobase_wsrep_set_checkpoint(handlerton *hton, const XID *xid); static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid); #endif /* WITH_WSREP */ @@ -18672,36 +18673,45 @@ void lock_wait_wsrep_kill(trx_t *bf_trx, ulong thd_id, trx_id_t trx_id) wsrep_thd_client_mode_str(vthd), wsrep_thd_transaction_state_str(vthd), wsrep_thd_query(vthd)); - /* Mark transaction as a victim for Galera abort */ - vtrx->lock.set_wsrep_victim(); - if (!wsrep_thd_set_wsrep_aborter(bf_thd, vthd)) - aborting= true; - else - WSREP_DEBUG("kill transaction skipped due to wsrep_aborter set"); + aborting= true; } } mysql_mutex_unlock(&lock_sys.wait_mutex); vtrx->mutex_unlock(); } - wsrep_thd_UNLOCK(vthd); - if (aborting) + + DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); + if (aborting && wsrep_thd_bf_abort(bf_thd, vthd, true)) { + /* Need to grab mutexes again to ensure that the trx is still in + right state. */ + lock_sys.wr_lock(SRW_LOCK_CALL); + mysql_mutex_lock(&lock_sys.wait_mutex); + vtrx->mutex_lock(); + /* if victim is waiting for some other lock, we have to cancel that waiting */ - lock_sys.cancel_lock_wait_for_trx(vtrx); - - DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); - if (!wsrep_thd_bf_abort(bf_thd, vthd, true)) + if (vtrx->id == trx_id) { - wsrep_thd_LOCK(vthd); - wsrep_thd_set_wsrep_aborter(NULL, vthd); - wsrep_thd_UNLOCK(vthd); - - WSREP_DEBUG("wsrep_thd_bf_abort has failed, victim %lu will survive", - thd_get_thread_id(vthd)); + switch (vtrx->state) { + default: + break; + case TRX_STATE_ACTIVE: + case TRX_STATE_PREPARED: + lock_sys.cancel_lock_wait_for_wsrep_bf_abort(vtrx); + } } + lock_sys.wr_unlock(); + mysql_mutex_unlock(&lock_sys.wait_mutex); + vtrx->mutex_unlock(); } + else + { + WSREP_DEBUG("wsrep_thd_bf_abort has failed, victim %lu will survive", + thd_get_thread_id(vthd)); + } + wsrep_thd_UNLOCK(vthd); wsrep_thd_kill_UNLOCK(vthd); } } @@ -18709,68 +18719,50 @@ void lock_wait_wsrep_kill(trx_t *bf_trx, ulong thd_id, trx_id_t trx_id) /** This function forces the victim transaction to abort. Aborting the transaction does NOT end it, it still has to be rolled back. + The caller must lock LOCK_thd_kill and LOCK_thd_data. + @param bf_thd brute force THD asking for the abort @param victim_thd victim THD to be aborted - - @return 0 victim was aborted - @return -1 victim thread was aborted (no transaction) */ -static -void -wsrep_abort_transaction( - handlerton*, - THD *bf_thd, - THD *victim_thd, - my_bool signal) +static void wsrep_abort_transaction(handlerton *, THD *bf_thd, THD *victim_thd, + my_bool signal) { - DBUG_ENTER("wsrep_abort_transaction"); - ut_ad(bf_thd); - ut_ad(victim_thd); + DBUG_ENTER("wsrep_abort_transaction"); + ut_ad(bf_thd); + ut_ad(victim_thd); - wsrep_thd_kill_LOCK(victim_thd); - wsrep_thd_LOCK(victim_thd); - trx_t* victim_trx= thd_to_trx(victim_thd); - wsrep_thd_UNLOCK(victim_thd); + trx_t *victim_trx= thd_to_trx(victim_thd); - WSREP_DEBUG("abort transaction: BF: %s victim: %s victim conf: %s", - wsrep_thd_query(bf_thd), - wsrep_thd_query(victim_thd), - wsrep_thd_transaction_state_str(victim_thd)); + WSREP_DEBUG("abort transaction: BF: %s victim: %s victim conf: %s", + wsrep_thd_query(bf_thd), wsrep_thd_query(victim_thd), + wsrep_thd_transaction_state_str(victim_thd)); - if (victim_trx) { - victim_trx->lock.set_wsrep_victim(); + if (!victim_trx) + { + WSREP_DEBUG("abort transaction: victim did not exist"); + DBUG_VOID_RETURN; + } - wsrep_thd_LOCK(victim_thd); - bool aborting= !wsrep_thd_set_wsrep_aborter(bf_thd, victim_thd); - wsrep_thd_UNLOCK(victim_thd); - if (aborting) { - DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); - DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", - { - const char act[]= - "now " - "SIGNAL sync.before_wsrep_thd_abort_reached " - "WAIT_FOR signal.before_wsrep_thd_abort"; - DBUG_ASSERT(!debug_sync_set_action(bf_thd, - STRING_WITH_LEN(act))); - };); - wsrep_thd_bf_abort(bf_thd, victim_thd, signal); - } - } else { - DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", - { - const char act[]= - "now " - "SIGNAL sync.before_wsrep_thd_abort_reached " - "WAIT_FOR signal.before_wsrep_thd_abort"; - DBUG_ASSERT(!debug_sync_set_action(bf_thd, - STRING_WITH_LEN(act))); - };); - wsrep_thd_bf_abort(bf_thd, victim_thd, signal); - } + lock_sys.wr_lock(SRW_LOCK_CALL); + mysql_mutex_lock(&lock_sys.wait_mutex); + victim_trx->mutex_lock(); - wsrep_thd_kill_UNLOCK(victim_thd); - DBUG_VOID_RETURN; + switch (victim_trx->state) { + default: + break; + case TRX_STATE_ACTIVE: + case TRX_STATE_PREPARED: + /* Cancel lock wait if the victim is waiting for a lock in InnoDB. + The transaction which is blocked somewhere else (e.g. waiting + for next command or MDL) has been interrupted by THD::awake_no_mutex() + on server level before calling this function. */ + lock_sys.cancel_lock_wait_for_wsrep_bf_abort(victim_trx); + } + lock_sys.wr_unlock(); + mysql_mutex_unlock(&lock_sys.wait_mutex); + victim_trx->mutex_unlock(); + + DBUG_VOID_RETURN; } static diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index 16acd031177..e8299bb1189 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -955,6 +955,10 @@ public: /** Cancel possible lock waiting for a transaction */ static void cancel_lock_wait_for_trx(trx_t *trx); +#ifdef WITH_WSREP + /** Cancel lock waiting for a wsrep BF abort. */ + static void cancel_lock_wait_for_wsrep_bf_abort(trx_t *trx); +#endif /* WITH_WSREP */ }; /** The lock system */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 3c7c3d348af..08547f169f3 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -5732,13 +5732,14 @@ static void lock_release_autoinc_locks(trx_t *trx) } /** Cancel a waiting lock request and release possibly waiting transactions */ -template +template void lock_cancel_waiting_and_release(lock_t *lock) { lock_sys.assert_locked(*lock); mysql_mutex_assert_owner(&lock_sys.wait_mutex); trx_t *trx= lock->trx; - trx->mutex_lock(); + if (inner_trx_lock) + trx->mutex_lock(); ut_d(const auto trx_state= trx->state); ut_ad(trx_state == TRX_STATE_COMMITTED_IN_MEMORY || trx_state == TRX_STATE_ACTIVE); @@ -5762,7 +5763,8 @@ void lock_cancel_waiting_and_release(lock_t *lock) lock_wait_end(trx); - trx->mutex_unlock(); + if (inner_trx_lock) + trx->mutex_unlock(); } void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx) @@ -5779,6 +5781,19 @@ void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx) mysql_mutex_unlock(&lock_sys.wait_mutex); } +#ifdef WITH_WSREP +void lock_sys_t::cancel_lock_wait_for_wsrep_bf_abort(trx_t *trx) +{ + lock_sys.assert_locked(); + mysql_mutex_assert_owner(&lock_sys.wait_mutex); + ut_ad(trx->mutex_is_owner()); + ut_ad(trx->state == TRX_STATE_ACTIVE || trx->state == TRX_STATE_PREPARED); + trx->lock.set_wsrep_victim(); + if (lock_t *lock= trx->lock.wait_lock) + lock_cancel_waiting_and_release(lock); +} +#endif /* WITH_WSREP */ + /** Cancel a waiting lock request. @tparam check_victim whether to check for DB_DEADLOCK @param trx active transaction diff --git a/wsrep-lib b/wsrep-lib index 4951c383577..e238c0d240c 160000 --- a/wsrep-lib +++ b/wsrep-lib @@ -1 +1 @@ -Subproject commit 4951c38357737d568b554402bc5b6abe88a38fe1 +Subproject commit e238c0d240c2557229b0523a4a032f3cf8b41639 From c0adb05b30e75196a424e1fc11854c9451127120 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Mon, 15 May 2023 09:07:43 +1000 Subject: [PATCH 33/76] MDEV-31268: Fedora mariadb-connector-c-config conflicts with MariaDB's MariaDB-common The previous fix in MDEV-24629 had a version end of life date. Thanks @pgnd on Zulip for noticing. --- cmake/cpack_rpm.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/cpack_rpm.cmake b/cmake/cpack_rpm.cmake index 41177f8a908..7b1f0b0ff87 100644 --- a/cmake/cpack_rpm.cmake +++ b/cmake/cpack_rpm.cmake @@ -289,7 +289,7 @@ ELSEIF(RPM MATCHES "sles") ENDIF() # MDEV-24629, we need it outside of ELSIFs -IF(RPM MATCHES "fedora3[234]") +IF(RPM MATCHES "fedora") ALTERNATIVE_NAME("common" "mariadb-connector-c-config" ${MARIADB_CONNECTOR_C_VERSION}-1) ENDIF() From a5ce335ac919cc0735b6eb703262c7cffb2af578 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 22 May 2023 17:10:25 +0300 Subject: [PATCH 34/76] MDEV-29593 fixup: Avoid a leak if rseg.undo_cached is corrupted trx_purge_truncate_rseg_history(): Avoid a leak similar to the one that was fixed in MDEV-31324, in case a supposedly cached undo log page is not found in the rseg.undo_cached list. --- storage/innobase/trx/trx0purge.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 6541ede447e..e8d81ff49cd 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -486,9 +486,9 @@ loop: if (undo->hdr_page_no == hdr_addr.page) goto found_cached; ut_ad("inconsistent undo logs" == 0); - break; - found_cached: - UT_LIST_REMOVE(rseg.undo_cached, undo); + if (false) + found_cached: + UT_LIST_REMOVE(rseg.undo_cached, undo); static_assert(FIL_NULL == 0xffffffff, ""); if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT + rseg_hdr->page.frame))) From 16258677b37011b543fd68a41eae4abd45f64f68 Mon Sep 17 00:00:00 2001 From: Monty Date: Fri, 31 Mar 2023 19:35:04 +0300 Subject: [PATCH 35/76] MDEV-6768 Wrong result with aggregate with join with no result set When a query does implicit grouping and join operation produces an empty result set, a NULL-complemented row combination is generated. However, constant table fields still show non-NULL values. What happens in the is that end_send_group() is called with a const row but without any rows matching the WHERE clause. This last part is shown by 'join->first_record' not being set. This causes item->no_rows_in_result() to be called for all items to reset all sum functions to their initial state. However fields are not set to NULL. The used fix is to produce NULL-complemented records for constant tables as well. Also, reset the constant table's records back in case we're in a subquery which may get re-executed. An alternative fix would have item->no_rows_in_result() also work with Item_field objects. There is some other issues with the code: - join->no_rows_in_result_called is used but never set. - Tables that are used with group functions are not properly marked as maybe_null, which is required if the table rows should be regarded as null-complemented (not existing). - The code that tries to detect if mixed_implicit_grouping should be set didn't take into account all usage of fields and sum functions. - Item_func::restore_to_before_no_rows_in_result() called the wrong function. - join->clear() does not use a table_map argument to clear_tables(), which caused it to ignore constant tables. - unclear_tables() does not correctly restore status to what is was before clear_tables(). Main bug fix was to always use a table_map argument to clear_tables() and always use join->clear() and clear_tables() together with unclear_tables(). Other fixes: - Fixed Item_func::restore_to_before_no_rows_in_result() - Set 'join->no_rows_in_result_called' when no_rows_in_result_set() is called. - Removed not used argument from setup_end_select_func(). - More code comments - Ensure that end_send_group() modifies the same fields as are in the result set. - Changed return_zero_rows() to use pointers instead of references, similar to the rest of the code. Reviewer: Sergei Petrunia --- mysql-test/main/group_min_max.result | 110 +++++++++++ mysql-test/main/group_min_max.test | 110 +++++++++++ mysql-test/main/type_timestamp.result | 2 + mysql-test/main/type_timestamp.test | 1 + sql/item_func.h | 2 +- sql/sql_select.cc | 264 ++++++++++++++++---------- sql/sql_select.h | 5 +- sql/table.h | 10 +- 8 files changed, 397 insertions(+), 107 deletions(-) diff --git a/mysql-test/main/group_min_max.result b/mysql-test/main/group_min_max.result index a87a79fbc56..712466c8afb 100644 --- a/mysql-test/main/group_min_max.result +++ b/mysql-test/main/group_min_max.result @@ -4095,5 +4095,115 @@ MIN(pk) a 5 10 DROP TABLE t1; # +# MDEV-6768 Wrong result with agregate with join with no resultset +# +create table t1 +( +PARENT_ID int(10) unsigned NOT NULL AUTO_INCREMENT, +PARENT_FIELD VARCHAR(10), +PRIMARY KEY (PARENT_ID) +) engine=innodb; +create table t2 +( +CHILD_ID INT NOT NULL AUTO_INCREMENT, +PARENT_ID INT NOT NULL, +CHILD_FIELD varchar(10), +PRIMARY KEY (CHILD_ID) +)engine=innodb; +INSERT INTO t1 (PARENT_FIELD) +SELECT 'AAAA'; +INSERT INTO t2 (PARENT_ID, CHILD_FIELD) +SELECT 1, 'BBBB'; +explain select +t1.PARENT_ID, +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 Using index +1 SIMPLE t2 ALL NULL NULL NULL NULL 1 Using where +select +t1.PARENT_ID, +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +PARENT_ID min(CHILD_FIELD) +NULL NULL +select +1, +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +1 min(CHILD_FIELD) +1 NULL +select +IFNULL(t1.PARENT_ID,1), +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +IFNULL(t1.PARENT_ID,1) min(CHILD_FIELD) +1 NULL +# Check that things works with MyISAM (which has different explain) +alter table t1 engine=myisam; +alter table t2 engine=myisam; +explain select +t1.PARENT_ID, +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +select +t1.PARENT_ID, +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +PARENT_ID min(CHILD_FIELD) +NULL NULL +drop table t1,t2; +# Check that things works if sub queries are re-executed +create table t1 (a int primary key, b int); +create table t2 (a int primary key, b int); +create table t3 (a int primary key, b int); +insert into t1 values (1,1),(2,2),(3,3); +insert into t2 values (1,1),(2,2),(3,3); +insert into t3 values (1,1),(3,3); +explain +select *, +(select +CONCAT('t2:', IFNULL(t2.a, 't2a-null'), ';', +'min_t3_b:', IFNULL(min(t3.b), 't3b-null')) +from t2,t3 +where t2.a=1 and t1.b = t3.a) as s1 +from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 +2 DEPENDENT SUBQUERY t2 const PRIMARY PRIMARY 4 const 1 Using index +2 DEPENDENT SUBQUERY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 +select *, +(select +CONCAT('t2:', IFNULL(t2.a, 't2a-null'), ';', +'min_t3_b:', IFNULL(min(t3.b), 't3b-null')) +from t2,t3 +where t2.a=1 and t1.b = t3.a) as s1 +from t1; +a b s1 +1 1 t2:1;min_t3_b:1 +2 2 t2:t2a-null;min_t3_b:t3b-null +3 3 t2:1;min_t3_b:3 +drop table t1,t2,t3; +# # End of 10.5 tests # diff --git a/mysql-test/main/group_min_max.test b/mysql-test/main/group_min_max.test index 5f7981b8b30..1fc2be6231a 100644 --- a/mysql-test/main/group_min_max.test +++ b/mysql-test/main/group_min_max.test @@ -1748,6 +1748,116 @@ SELECT MIN(pk), a FROM t1 WHERE pk <> 1 GROUP BY a; DROP TABLE t1; +--echo # +--echo # MDEV-6768 Wrong result with agregate with join with no resultset +--echo # + +create table t1 +( + PARENT_ID int(10) unsigned NOT NULL AUTO_INCREMENT, + PARENT_FIELD VARCHAR(10), + PRIMARY KEY (PARENT_ID) +) engine=innodb; + +create table t2 +( + CHILD_ID INT NOT NULL AUTO_INCREMENT, + PARENT_ID INT NOT NULL, + CHILD_FIELD varchar(10), + PRIMARY KEY (CHILD_ID) +)engine=innodb; + +INSERT INTO t1 (PARENT_FIELD) +SELECT 'AAAA'; + +INSERT INTO t2 (PARENT_ID, CHILD_FIELD) +SELECT 1, 'BBBB'; + +explain select + t1.PARENT_ID, + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + +select + t1.PARENT_ID, + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + +select + 1, + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + +select + IFNULL(t1.PARENT_ID,1), + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + + +--echo # Check that things works with MyISAM (which has different explain) + +alter table t1 engine=myisam; +alter table t2 engine=myisam; + +explain select + t1.PARENT_ID, + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + +select + t1.PARENT_ID, + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + +drop table t1,t2; + +--echo # Check that things works if sub queries are re-executed + +create table t1 (a int primary key, b int); +create table t2 (a int primary key, b int); +create table t3 (a int primary key, b int); + +insert into t1 values (1,1),(2,2),(3,3); +insert into t2 values (1,1),(2,2),(3,3); +insert into t3 values (1,1),(3,3); + +explain +select *, + (select + CONCAT('t2:', IFNULL(t2.a, 't2a-null'), ';', + 'min_t3_b:', IFNULL(min(t3.b), 't3b-null')) + from t2,t3 + where t2.a=1 and t1.b = t3.a) as s1 +from t1; + +select *, + (select + CONCAT('t2:', IFNULL(t2.a, 't2a-null'), ';', + 'min_t3_b:', IFNULL(min(t3.b), 't3b-null')) + from t2,t3 + where t2.a=1 and t1.b = t3.a) as s1 +from t1; + +drop table t1,t2,t3; + --echo # --echo # End of 10.5 tests --echo # diff --git a/mysql-test/main/type_timestamp.result b/mysql-test/main/type_timestamp.result index f23c66e10a5..c78619fe585 100644 --- a/mysql-test/main/type_timestamp.result +++ b/mysql-test/main/type_timestamp.result @@ -1230,6 +1230,8 @@ SELECT * FROM t1 HAVING MIN(t1.c1) >= ALL(SELECT 'a' UNION SELECT 'r'); c1 Warnings: Warning 1292 Truncated incorrect datetime value: 'r' +SELECT * FROM t1 HAVING MIN(t1.c1) > 0; +c1 DROP TABLE t1; CREATE TABLE t1 (c1 timestamp); INSERT INTO t1 VALUES ('2010-01-01 00:00:00'); diff --git a/mysql-test/main/type_timestamp.test b/mysql-test/main/type_timestamp.test index a0f1f2c3d78..84b7e1cf49c 100644 --- a/mysql-test/main/type_timestamp.test +++ b/mysql-test/main/type_timestamp.test @@ -810,6 +810,7 @@ DROP TABLE t1; CREATE TABLE t1 (c1 timestamp); SELECT MIN(t1.c1) AS k1 FROM t1 HAVING (k1 >= ALL(SELECT 'a' UNION SELECT 'r')); SELECT * FROM t1 HAVING MIN(t1.c1) >= ALL(SELECT 'a' UNION SELECT 'r'); +SELECT * FROM t1 HAVING MIN(t1.c1) > 0; DROP TABLE t1; CREATE TABLE t1 (c1 timestamp); diff --git a/sql/item_func.h b/sql/item_func.h index 0af90fab24b..1a10b6e1d29 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -380,7 +380,7 @@ public: { for (uint i= 0; i < arg_count; i++) { - args[i]->no_rows_in_result(); + args[i]->restore_to_before_no_rows_in_result(); } } void convert_const_compared_to_int_field(THD *thd); diff --git a/sql/sql_select.cc b/sql/sql_select.cc index b6fee1c6f85..2f1cc62fe39 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -141,10 +141,10 @@ static void update_depend_map_for_order(JOIN *join, ORDER *order); static ORDER *remove_const(JOIN *join,ORDER *first_order,COND *cond, bool change_list, bool *simple_order); static int return_zero_rows(JOIN *join, select_result *res, - List &tables, - List &fields, bool send_row, + List *tables, + List *fields, bool send_row, ulonglong select_options, const char *info, - Item *having, List &all_fields); + Item *having, List *all_fields); static COND *build_equal_items(JOIN *join, COND *cond, COND_EQUAL *inherited, List *join_list, @@ -1165,11 +1165,40 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) DBUG_RETURN(0); } + /***************************************************************************** Check fields, find best join, do the select and output fields. mysql_select assumes that all tables are already opened *****************************************************************************/ +/* + Check if we have a field reference. If yes, we have to use + mixed_implicit_grouping. +*/ + +static bool check_list_for_field(List *items) +{ + List_iterator_fast select_it(*items); + Item *select_el; + + while ((select_el= select_it++)) + { + if (select_el->with_field) + return true; + } + return false; +} + +static bool check_list_for_field(ORDER *order) +{ + for (; order; order= order->next) + { + if (order->item[0]->with_field) + return true; + } + return false; +} + /** Prepare of whole select (including sub queries in future). @@ -1248,53 +1277,45 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, DBUG_RETURN(-1); /* - TRUE if the SELECT list mixes elements with and without grouping, - and there is no GROUP BY clause. Mixing non-aggregated fields with - aggregate functions in the SELECT list is a MySQL extenstion that - is allowed only if the ONLY_FULL_GROUP_BY sql mode is not set. + mixed_implicit_grouping will be set to TRUE if the SELECT list + mixes elements with and without grouping, and there is no GROUP BY + clause. + Mixing non-aggregated fields with aggregate functions in the + SELECT list or HAVING is a MySQL extension that is allowed only if + the ONLY_FULL_GROUP_BY sql mode is not set. */ mixed_implicit_grouping= false; if ((~thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) && select_lex->with_sum_func && !group_list) { - List_iterator_fast select_it(fields_list); - Item *select_el; /* Element of the SELECT clause, can be an expression. */ - bool found_field_elem= false; - bool found_sum_func_elem= false; - - while ((select_el= select_it++)) + if (check_list_for_field(&fields_list) || + check_list_for_field(order)) { - if (select_el->with_sum_func()) - found_sum_func_elem= true; - if (select_el->with_field) - found_field_elem= true; - if (found_sum_func_elem && found_field_elem) + TABLE_LIST *tbl; + List_iterator_fast li(select_lex->leaf_tables); + + mixed_implicit_grouping= true; // mark for future + + while ((tbl= li++)) { - mixed_implicit_grouping= true; - break; + /* + If the query uses implicit grouping where the select list + contains both aggregate functions and non-aggregate fields, + any non-aggregated field may produce a NULL value. Set all + fields of each table as nullable before semantic analysis to + take into account this change of nullability. + + Note: this loop doesn't touch tables inside merged + semi-joins, because subquery-to-semijoin conversion has not + been done yet. This is intended. + */ + if (tbl->table) + tbl->table->maybe_null= 1; } } } - table_count= select_lex->leaf_tables.elements; - TABLE_LIST *tbl; - List_iterator_fast li(select_lex->leaf_tables); - while ((tbl= li++)) - { - /* - If the query uses implicit grouping where the select list contains both - aggregate functions and non-aggregate fields, any non-aggregated field - may produce a NULL value. Set all fields of each table as nullable before - semantic analysis to take into account this change of nullability. - - Note: this loop doesn't touch tables inside merged semi-joins, because - subquery-to-semijoin conversion has not been done yet. This is intended. - */ - if (mixed_implicit_grouping && tbl->table) - tbl->table->maybe_null= 1; - } - uint real_og_num= og_num; if (skip_order_by && select_lex != select_lex->master_unit()->global_parameters()) @@ -3838,7 +3859,7 @@ bool JOIN::make_aggr_tables_info() set_items_ref_array(items0); if (join_tab) join_tab[exec_join_tab_cnt() + aggr_tables - 1].next_select= - setup_end_select_func(this, NULL); + setup_end_select_func(this); group= has_group_by; DBUG_RETURN(false); @@ -4216,13 +4237,7 @@ JOIN::reinit() } } - /* Reset of sum functions */ - if (sum_funcs) - { - Item_sum *func, **func_ptr= sum_funcs; - while ((func= *(func_ptr++))) - func->clear(); - } + clear_sum_funcs(); if (no_rows_in_result_called) { @@ -4507,12 +4522,12 @@ void JOIN::exec_inner() } else { - (void) return_zero_rows(this, result, select_lex->leaf_tables, - *columns_list, + (void) return_zero_rows(this, result, &select_lex->leaf_tables, + columns_list, send_row_on_empty_set(), select_options, zero_result_cause, - having ? having : tmp_having, all_fields); + having ? having : tmp_having, &all_fields); DBUG_VOID_RETURN; } } @@ -14768,10 +14783,36 @@ ORDER *simple_remove_const(ORDER *order, COND *where) } +/* + Set all fields in the table to have a null value + + @param tables Table list +*/ + +static void make_tables_null_complemented(List *tables) +{ + List_iterator ti(*tables); + TABLE_LIST *table; + while ((table= ti++)) + { + /* + Don't touch semi-join materialization tables, as the a join_free() + call may have freed them (and HAVING clause can't have references to + them anyway). + */ + if (!table->is_jtbm()) + { + TABLE *tbl= table->table; + mark_as_null_row(tbl); // Set fields to NULL + } + } +} + + static int -return_zero_rows(JOIN *join, select_result *result, List &tables, - List &fields, bool send_row, ulonglong select_options, - const char *info, Item *having, List &all_fields) +return_zero_rows(JOIN *join, select_result *result, List *tables, + List *fields, bool send_row, ulonglong select_options, + const char *info, Item *having, List *all_fields) { DBUG_ENTER("return_zero_rows"); @@ -14787,24 +14828,15 @@ return_zero_rows(JOIN *join, select_result *result, List &tables, Set all tables to have NULL row. This is needed as we will be evaluating HAVING condition. */ - List_iterator ti(tables); - TABLE_LIST *table; - while ((table= ti++)) - { - /* - Don't touch semi-join materialization tables, as the above join_free() - call has freed them (and HAVING clause can't have references to them - anyway). - */ - if (!table->is_jtbm()) - mark_as_null_row(table->table); // All fields are NULL - } - List_iterator_fast it(all_fields); + make_tables_null_complemented(tables); + + List_iterator_fast it(*all_fields); Item *item; /* Inform all items (especially aggregating) to calculate HAVING correctly, also we will need it for sending results. */ + join->no_rows_in_result_called= 1; while ((item= it++)) item->no_rows_in_result(); if (having && having->val_int() == 0) @@ -14818,12 +14850,12 @@ return_zero_rows(JOIN *join, select_result *result, List &tables, join->thd->limit_found_rows= 0; } - if (!(result->send_result_set_metadata(fields, + if (!(result->send_result_set_metadata(*fields, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))) { bool send_error= FALSE; if (send_row) - send_error= result->send_data_with_check(fields, join->unit, 0) > 0; + send_error= result->send_data_with_check(*fields, join->unit, 0) > 0; if (likely(!send_error)) result->send_eof(); // Should be safe } @@ -14839,49 +14871,42 @@ return_zero_rows(JOIN *join, select_result *result, List &tables, } /** - used only in JOIN::clear (always) and in do_select() - (if there where no matching rows) + Reset table rows to contain a null-complement row (all fields are null) + + Used only in JOIN::clear() and in do_select() if there where no matching rows. @param join JOIN - @param cleared_tables If not null, clear also const tables and mark all - cleared tables in the map. cleared_tables is only - set when called from do_select() when there is a - group function and there where no matching rows. + @param cleared_tables Used to mark all cleared tables in the map. Needed for + unclear_tables() to know which tables to restore to + their original state. */ static void clear_tables(JOIN *join, table_map *cleared_tables) { - /* - must clear only the non-const tables as const tables are not re-calculated. - */ + DBUG_ASSERT(cleared_tables); for (uint i= 0 ; i < join->table_count ; i++) { TABLE *table= join->table[i]; if (table->null_row) continue; // Nothing more to do - if (!(table->map & join->const_table_map) || cleared_tables) + (*cleared_tables)|= (((table_map) 1) << i); + if (table->s->null_bytes) { - if (cleared_tables) - { - (*cleared_tables)|= (((table_map) 1) << i); - if (table->s->null_bytes) - { - /* - Remember null bits for the record so that we can restore the - original const record in unclear_tables() - */ - memcpy(table->record[1], table->null_flags, table->s->null_bytes); - } - } - mark_as_null_row(table); // All fields are NULL + /* + Remember null bits for the record so that we can restore the + original const record in unclear_tables() + */ + memcpy(table->record[1], table->null_flags, table->s->null_bytes); } + mark_as_null_row(table); // All fields are NULL } } /** Reverse null marking for tables and restore null bits. + This return the tables to the state of before clear_tables(). We have to do this because the tables may be re-used in a sub query and the subquery will assume that the const tables contains the original @@ -20519,9 +20544,9 @@ void set_postjoin_aggr_write_func(JOIN_TAB *tab) end_select function to use. This function can't fail. */ -Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab) +Next_select_func setup_end_select_func(JOIN *join) { - TMP_TABLE_PARAM *tmp_tbl= tab ? tab->tmp_table_param : &join->tmp_table_param; + TMP_TABLE_PARAM *tmp_tbl= &join->tmp_table_param; /* Choose method for presenting result to user. Use end_send_group @@ -20591,7 +20616,7 @@ do_select(JOIN *join, Procedure *procedure) join->duplicate_rows= join->send_records=0; if (join->only_const_tables() && !join->need_tmp) { - Next_select_func end_select= setup_end_select_func(join, NULL); + Next_select_func end_select= setup_end_select_func(join); /* HAVING will be checked after processing aggregate functions, @@ -21077,6 +21102,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) } } + /* Restore state if mark_as_null_row() have been called */ if (join_tab->last_inner) { JOIN_TAB *last_inner_tab= join_tab->last_inner; @@ -22461,11 +22487,18 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { int idx= -1; enum_nested_loop_state ok_code= NESTED_LOOP_OK; + /* + join_tab can be 0 in the case all tables are const tables and we did not + need a temporary table to store the result. + In this case we use the original given fields, which is stored in + join->fields. + */ List *fields= join_tab ? (join_tab-1)->fields : join->fields; DBUG_ENTER("end_send_group"); if (!join->items3.is_null() && !join->set_group_rpa) { + /* Move ref_pointer_array to points to items3 */ join->set_group_rpa= true; join->set_items_ref_array(join->items3); } @@ -22473,10 +22506,12 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (!join->first_record || end_of_records || (idx=test_if_group_changed(join->group_fields)) >= 0) { + if (!join->group_sent && (join->first_record || (end_of_records && !join->group && !join->group_optimized_away))) { + table_map cleared_tables= (table_map) 0; if (join->procedure) join->procedure->end_group(); if (idx < (int) join->send_group_parts) @@ -22499,11 +22534,13 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { if (!join->first_record) { - List_iterator_fast it(*join->fields); - Item *item; /* No matching rows for group function */ - join->clear(); + List_iterator_fast it(*fields); + Item *item; + join->no_rows_in_result_called= 1; + + join->clear(&cleared_tables); while ((item= it++)) item->no_rows_in_result(); } @@ -22531,7 +22568,14 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (join->rollup_send_data((uint) (idx+1))) error= 1; } - } + if (join->no_rows_in_result_called) + { + /* Restore null tables to original state */ + join->no_rows_in_result_called= 0; + if (cleared_tables) + unclear_tables(join, &cleared_tables); + } + } if (unlikely(error > 0)) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (end_of_records) @@ -22835,6 +22879,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { if (join->first_record || (end_of_records && !join->group)) { + table_map cleared_tables= (table_map) 0; if (join->procedure) join->procedure->end_group(); int send_group_parts= join->send_group_parts; @@ -22843,7 +22888,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (!join->first_record) { /* No matching rows for group function */ - join->clear(); + join->clear(&cleared_tables); } copy_sum_funcs(join->sum_funcs, join->sum_funcs_end[send_group_parts]); @@ -22866,6 +22911,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(NESTED_LOOP_ERROR); } } + if (cleared_tables) + unclear_tables(join, &cleared_tables); if (end_of_records) goto end; } @@ -26965,11 +27012,8 @@ int JOIN::rollup_write_data(uint idx, TMP_TABLE_PARAM *tmp_table_param_arg, TABL (end_send_group/end_write_group) */ -void JOIN::clear() +void inline JOIN::clear_sum_funcs() { - clear_tables(this, 0); - copy_fields(&tmp_table_param); - if (sum_funcs) { Item_sum *func, **func_ptr= sum_funcs; @@ -26979,6 +27023,22 @@ void JOIN::clear() } +/* + Prepare for returning 'empty row' when there is no matching row. + + - Mark all tables with mark_as_null_row() + - Make a copy of of all simple SELECT items + - Reset all sum functions to NULL or 0. +*/ + +void JOIN::clear(table_map *cleared_tables) +{ + clear_tables(this, cleared_tables); + copy_fields(&tmp_table_param); + clear_sum_funcs(); +} + + /** Print an EXPLAIN line with all NULLs and given message in the 'Extra' column diff --git a/sql/sql_select.h b/sql/sql_select.h index 4eafffd385f..be50925034c 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -227,7 +227,7 @@ enum sj_strategy_enum typedef enum_nested_loop_state (*Next_select_func)(JOIN *, struct st_join_table *, bool); -Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab); +Next_select_func setup_end_select_func(JOIN *join); int rr_sequential(READ_RECORD *info); int rr_sequential_and_unpack(READ_RECORD *info); Item *remove_pushed_top_conjuncts(THD *thd, Item *cond); @@ -1767,7 +1767,8 @@ public: void join_free(); /** Cleanup this JOIN, possibly for reuse */ void cleanup(bool full); - void clear(); + void clear(table_map *cleared_tables); + void inline clear_sum_funcs(); bool send_row_on_empty_set() { return (do_send_rows && implicit_grouping && !group_optimized_away && diff --git a/sql/table.h b/sql/table.h index d8756deb43c..f28e1268fad 100644 --- a/sql/table.h +++ b/sql/table.h @@ -3306,10 +3306,16 @@ inline void mark_as_null_row(TABLE *table) bfill(table->null_flags,table->s->null_bytes,255); } +/* + Restore table to state before mark_as_null_row() call. + This assumes that the caller has restored table->null_flags, + as is done in unclear_tables(). +*/ + inline void unmark_as_null_row(TABLE *table) { - table->null_row=0; - table->status= STATUS_NO_RECORD; + table->null_row= 0; + table->status&= ~STATUS_NULL_ROW; } bool is_simple_order(ORDER *order); From b0c285bb06f85650055fb28f03c8dec3338414dd Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 22 May 2023 11:27:00 +0300 Subject: [PATCH 36/76] Remove warning of not freed memory if mysqld aborts Fixes warning when doing: ./sql/mariadbd --socket=/tmp/xxxx/ddd --- sql/mysqld.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 3163ff36c9a..843f9b5cbae 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1714,6 +1714,11 @@ static void close_connections(void) (void) unlink(mysqld_unix_port); } } + /* + The following is needed to the threads stuck in + setup_connection_thread_globals() + to continue. + */ listen_sockets.free_memory(); mysql_mutex_unlock(&LOCK_start_thread); @@ -1999,6 +2004,7 @@ static void clean_up(bool print_message) end_ssl(); #ifndef EMBEDDED_LIBRARY vio_end(); + listen_sockets.free_memory(); #endif /*!EMBEDDED_LIBRARY*/ #if defined(ENABLED_DEBUG_SYNC) /* End the debug sync facility. See debug_sync.cc. */ From 6a0314063d892370ea56d36bd87050f76ea54e30 Mon Sep 17 00:00:00 2001 From: Monty Date: Tue, 18 Apr 2023 11:21:06 +0300 Subject: [PATCH 37/76] Make install.db read only in mtr This ensures that no mtr test can change install.db after it's initial creation as changing it while as another thread is coping it will lead to failures in at least InnoDB and Aria recovery. Fixed spider/bugfix.mdev_30370 that was wrongly used install.db --- mysql-test/lib/My/File/Path.pm | 31 ++++++++++++++++++- mysql-test/lib/My/SafeProcess/Base.pm | 2 +- mysql-test/mysql-test-run.pl | 7 +++-- .../spider/bugfix/t/mdev_30370.test | 5 ++- 4 files changed, 40 insertions(+), 5 deletions(-) diff --git a/mysql-test/lib/My/File/Path.pm b/mysql-test/lib/My/File/Path.pm index d60027c909e..fd3cf6dd61c 100644 --- a/mysql-test/lib/My/File/Path.pm +++ b/mysql-test/lib/My/File/Path.pm @@ -34,7 +34,7 @@ use strict; use Exporter; use base "Exporter"; -our @EXPORT= qw /rmtree mkpath copytree/; +our @EXPORT= qw /rmtree mkpath copytree make_readonly/; use File::Find; use File::Copy; @@ -184,6 +184,10 @@ sub copytree { # Only copy plain files next unless -f "$from_dir/$_"; copy("$from_dir/$_", "$to_dir/$_"); + if (!$use_umask) + { + chmod(0666, "$to_dir/$_"); + } } closedir(DIR); @@ -193,4 +197,29 @@ sub copytree { } } + +sub make_readonly { + my ($dir) = @_; + + die "Usage: make_readonly(])" + unless @_ == 1; + + opendir(DIR, "$dir") + or croak("Can't find $dir$!"); + for(readdir(DIR)) { + + next if "$_" eq "." or "$_" eq ".."; + + if ( -d "$dir/$_" ) + { + make_readonly("$dir/$_"); + next; + } + + # Only copy plain files + next unless -f "$dir/$_"; + chmod 0444, "$dir/$_"; + } + closedir(DIR); +} 1; diff --git a/mysql-test/lib/My/SafeProcess/Base.pm b/mysql-test/lib/My/SafeProcess/Base.pm index 818e6e34e11..1cd01cb0ca9 100644 --- a/mysql-test/lib/My/SafeProcess/Base.pm +++ b/mysql-test/lib/My/SafeProcess/Base.pm @@ -40,7 +40,7 @@ our @EXPORT= qw(create_process); # Retry a couple of times if fork returns EAGAIN # sub _safe_fork { - my $retries= 5; + my $retries= 100; my $pid; FORK: diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index da5e33428a9..0513bc209ad 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -401,8 +401,11 @@ sub main { my $tests= collect_test_cases($opt_reorder, $opt_suites, \@opt_cases, \@opt_skip_test_list); mark_time_used('collect'); - mysql_install_db(default_mysqld(), "$opt_vardir/install.db") unless using_extern(); - + if (!using_extern()) + { + mysql_install_db(default_mysqld(), "$opt_vardir/install.db"); + make_readonly("$opt_vardir/install.db"); + } if ($opt_dry_run) { for (@$tests) { diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_30370.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_30370.test index 788ea2323f7..99e56ab062a 100644 --- a/storage/spider/mysql-test/spider/bugfix/t/mdev_30370.test +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_30370.test @@ -2,4 +2,7 @@ --echo # MDEV-30370 mariadbd hangs when running with --wsrep-recover and --plugin-load-add=ha_spider.so --echo # ---exec $MYSQLD_BOOTSTRAP_CMD --wsrep-recover --plugin-load-add=ha_spider.so +let $MYSQLD_DATADIR=$MYSQLTEST_VARDIR/mdev_30370; +--mkdir $MYSQLD_DATADIR +--exec $MYSQLD_BOOTSTRAP_CMD --wsrep-recover --plugin-load-add=ha_spider.so --datadir=$MYSQLD_DATADIR +--rmdir $MYSQLD_DATADIR From c7e04af8bc4b3c8a390e362bc9b24716b6edb16a Mon Sep 17 00:00:00 2001 From: Monty Date: Tue, 18 Apr 2023 14:56:07 +0300 Subject: [PATCH 38/76] Update main.selectivity test and results --- mysql-test/main/selectivity.result | 35 +++++++++++++++++------ mysql-test/main/selectivity.test | 12 ++++---- mysql-test/main/selectivity_innodb.result | 35 +++++++++++++++++------ 3 files changed, 59 insertions(+), 23 deletions(-) diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result index 5a7ec7799a0..2bde5027d0e 100644 --- a/mysql-test/main/selectivity.result +++ b/mysql-test/main/selectivity.result @@ -1824,7 +1824,6 @@ test.t1 analyze status Table is already up to date test.t2 analyze status Engine-independent statistics collected test.t2 analyze status Table is already up to date set optimizer_switch='exists_to_in=off'; -set optimizer_use_condition_selectivity=2; SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id @@ -1849,18 +1848,39 @@ id a 17 17 18 18 19 19 -explain SELECT * FROM t1 +set statement optimizer_use_condition_selectivity=2 for explain SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where -2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 -2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter -EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 +3 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter +set statement optimizer_use_condition_selectivity=4 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1 -1 SIMPLE B ref a a 5 const 1 +1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 +3 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter +set @query="EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65"; +set statement optimizer_use_condition_selectivity=2 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 +3 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter +set statement optimizer_use_condition_selectivity=4 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 +3 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter explain SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id @@ -1870,7 +1890,6 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (10%) Using where; Using rowid filter set optimizer_switch= @save_optimizer_switch; -set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; # # MDEV-21495: Conditional jump or move depends on uninitialised value in sel_arg_range_seq_next diff --git a/mysql-test/main/selectivity.test b/mysql-test/main/selectivity.test index df3850d74b7..9f21bea442a 100644 --- a/mysql-test/main/selectivity.test +++ b/mysql-test/main/selectivity.test @@ -1236,13 +1236,10 @@ set optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity; drop table t1,t2,t3; - --echo # --echo # MDEV-20519: Query plan regression with optimizer_use_condition_selectivity=4 --echo # - - create table t1 (id int, a int, PRIMARY KEY(id), key(a)); insert into t1 select seq,seq from seq_1_to_100; @@ -1252,7 +1249,6 @@ insert into t2 select seq,seq,seq from seq_1_to_100; analyze table t1,t2 persistent for all; set optimizer_switch='exists_to_in=off'; -set optimizer_use_condition_selectivity=2; let $query= SELECT * FROM t1 WHERE @@ -1260,14 +1256,16 @@ let $query= SELECT * FROM t1 WHERE A.a=t1.a AND t2.b < 20); eval $query; -eval explain $query; +eval set statement optimizer_use_condition_selectivity=2 for explain $query; +eval set statement optimizer_use_condition_selectivity=4 for explain $query; -EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; +set @query="EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65"; +eval set statement optimizer_use_condition_selectivity=2 for explain $query; +eval set statement optimizer_use_condition_selectivity=4 for explain $query; eval explain $query; set optimizer_switch= @save_optimizer_switch; -set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; --echo # diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result index f9c623cd4b3..ed7da42e37f 100644 --- a/mysql-test/main/selectivity_innodb.result +++ b/mysql-test/main/selectivity_innodb.result @@ -1834,7 +1834,6 @@ test.t1 analyze status OK test.t2 analyze status Engine-independent statistics collected test.t2 analyze status OK set optimizer_switch='exists_to_in=off'; -set optimizer_use_condition_selectivity=2; SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id @@ -1859,18 +1858,39 @@ id a 17 17 18 18 19 19 -explain SELECT * FROM t1 +set statement optimizer_use_condition_selectivity=2 for explain SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index -2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index -2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (19%) Using where; Using rowid filter -EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index +3 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (19%) Using where; Using rowid filter +set statement optimizer_use_condition_selectivity=4 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1 -1 SIMPLE B ref a a 5 const 1 Using index +1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index +3 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (19%) Using where; Using rowid filter +set @query="EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65"; +set statement optimizer_use_condition_selectivity=2 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index +3 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (19%) Using where; Using rowid filter +set statement optimizer_use_condition_selectivity=4 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index +3 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (19%) Using where; Using rowid filter explain SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id @@ -1880,7 +1900,6 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index 2 DEPENDENT SUBQUERY t2 ref|filter a,b a|b 5|5 test.A.id 1 (19%) Using where; Using rowid filter set optimizer_switch= @save_optimizer_switch; -set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; # # MDEV-21495: Conditional jump or move depends on uninitialised value in sel_arg_range_seq_next From cd37e494220fa556592d8f56666a72876ee63505 Mon Sep 17 00:00:00 2001 From: Monty Date: Thu, 20 Apr 2023 14:12:48 +0300 Subject: [PATCH 39/76] MDEV-31083 ASAN use-after-poison in myrg_attach_children The reason for ASAN report was that the MERGE and MYISAM file had different key definitions, which is not allowed. Fixed by ensuring that the MERGE code is not copying more key stats than what is in the MyISAM file. Other things: - Give an error if different MyISAM files has different number of key parts. --- include/myisammrg.h | 1 + mysql-test/main/merge.result | 12 ++++++++++++ mysql-test/main/merge.test | 12 ++++++++++++ storage/myisam/mi_open.c | 1 + storage/myisam/myisamdef.h | 2 +- storage/myisammrg/myrg_open.c | 16 ++++++++++------ 6 files changed, 37 insertions(+), 7 deletions(-) diff --git a/include/myisammrg.h b/include/myisammrg.h index 1d7efbe74d6..b3bca218a44 100644 --- a/include/myisammrg.h +++ b/include/myisammrg.h @@ -71,6 +71,7 @@ typedef struct st_myrg_info ulong cache_size; uint merge_insert_method; uint tables,options,reclength,keys; + uint key_parts; my_bool cache_in_use; /* If MERGE children attached to parent. See top comment in ha_myisammrg.cc */ my_bool children_attached; diff --git a/mysql-test/main/merge.result b/mysql-test/main/merge.result index 3761ca2c150..6722dd38b9c 100644 --- a/mysql-test/main/merge.result +++ b/mysql-test/main/merge.result @@ -3919,3 +3919,15 @@ ERROR HY000: Unable to open underlying table which is differently defined or of DROP TRIGGER trg1; DROP TABLE t1; DROP TABLE m1; +# +# MDEV-31083 ASAN use-after-poison in myrg_attach_children +# +CREATE TABLE t1 (f TEXT, FULLTEXT (f)) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('foo'),('bar'); +CREATE TABLE mrg (f TEXT) ENGINE=MERGE, UNION(t1); +SELECT * FROM mrg; +f +foo +bar +DROP TABLE mrg, t1; +End of 10.5 tests diff --git a/mysql-test/main/merge.test b/mysql-test/main/merge.test index 888b41b24bd..ba2ce29ade2 100644 --- a/mysql-test/main/merge.test +++ b/mysql-test/main/merge.test @@ -2919,3 +2919,15 @@ set global default_storage_engine=@save_default_storage_engine; # Check that all connections opened by test cases in this file are really # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc + +--echo # +--echo # MDEV-31083 ASAN use-after-poison in myrg_attach_children +--echo # + +CREATE TABLE t1 (f TEXT, FULLTEXT (f)) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('foo'),('bar'); +CREATE TABLE mrg (f TEXT) ENGINE=MERGE, UNION(t1); +SELECT * FROM mrg; +DROP TABLE mrg, t1; + +--echo End of 10.5 tests diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c index 3db424ea997..f47ae48ad0d 100644 --- a/storage/myisam/mi_open.c +++ b/storage/myisam/mi_open.c @@ -518,6 +518,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) share->kfile=kfile; share->this_process=(ulong) getpid(); share->last_process= share->state.process; + share->base.base_key_parts= base_key_parts; share->base.key_parts=key_parts; share->base.all_key_parts=key_parts+unique_key_parts; if (!(share->last_version=share->state.version)) diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h index c90d989c975..f84ad6fa184 100644 --- a/storage/myisam/myisamdef.h +++ b/storage/myisam/myisamdef.h @@ -132,7 +132,7 @@ typedef struct st_mi_base_info uint extra_alloc_bytes; uint extra_alloc_procent; /* The following are from the header */ - uint key_parts, all_key_parts; + uint key_parts, all_key_parts, base_key_parts; } MI_BASE_INFO; diff --git a/storage/myisammrg/myrg_open.c b/storage/myisammrg/myrg_open.c index d9ea4b754f2..4a983684394 100644 --- a/storage/myisammrg/myrg_open.c +++ b/storage/myisammrg/myrg_open.c @@ -432,17 +432,20 @@ int myrg_attach_children(MYRG_INFO *m_info, int handle_locking, first_child= FALSE; m_info->reclength= myisam->s->base.reclength; min_keys= myisam->s->base.keys; - key_parts= myisam->s->base.key_parts; + key_parts= myisam->s->base.base_key_parts; if (*need_compat_check && m_info->rec_per_key_part) { my_free(m_info->rec_per_key_part); m_info->rec_per_key_part= NULL; } - if (!m_info->rec_per_key_part) + if (!m_info->rec_per_key_part || m_info->key_parts != key_parts) { - if(!(m_info->rec_per_key_part= (ulong*) - my_malloc(rg_key_memory_MYRG_INFO, - key_parts * sizeof(long), MYF(MY_WME)))) + m_info->key_parts= key_parts; + /* The +1 is because by my_realloc() don't allow zero length */ + if (!(m_info->rec_per_key_part= (ulong*) + my_realloc(rg_key_memory_MYRG_INFO, m_info->rec_per_key_part, + key_parts * sizeof(long) +1, + MYF(MY_WME | MY_ALLOW_ZERO_PTR | MY_FREE_ON_ERROR)))) goto err; /* purecov: inspected */ errpos= 1; } @@ -457,7 +460,8 @@ int myrg_attach_children(MYRG_INFO *m_info, int handle_locking, myisam->open_flag|= HA_OPEN_MERGE_TABLE; /* Check table definition match. */ - if (m_info->reclength != myisam->s->base.reclength) + if (m_info->reclength != myisam->s->base.reclength || + key_parts != myisam->s->base.base_key_parts) { DBUG_PRINT("error", ("definition mismatch table: '%s' repair: %d", myisam->filename, From 92d2ceac73aa175a01f520fd4b7a31ed338c1ef5 Mon Sep 17 00:00:00 2001 From: Monty Date: Mon, 22 May 2023 18:58:45 +0300 Subject: [PATCH 40/76] MDEV-28285 Unexpected result when combining DISTINCT, subselect and LIMIT The problem was that when JOIN_TAB::remove_duplicates() noticed there can only be one possible row in the output, it adjusted limits but didn't take into account any possible offset. Fixed by not adjusting limit offset when setting one-row-limit. --- mysql-test/main/distinct.result | 25 +++++++++++++++++++++++++ mysql-test/main/distinct.test | 21 +++++++++++++++++++++ sql/sql_limit.h | 9 +++++++++ sql/sql_select.cc | 2 +- 4 files changed, 56 insertions(+), 1 deletion(-) diff --git a/mysql-test/main/distinct.result b/mysql-test/main/distinct.result index fa9f0259a0f..ac693421ba2 100644 --- a/mysql-test/main/distinct.result +++ b/mysql-test/main/distinct.result @@ -1157,3 +1157,28 @@ explain select * from t1 limit 0 offset 10; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Zero limit drop table t1, t2; +# +# MDEV-28285 Unexpected result when combining DISTINCT, subselect +# and LIMIT +# +create table t1 (a int primary key); +create table t2 (a int primary key, b int not null); +insert into t1 select seq from seq_1_to_10; +insert into t2 select seq,seq from seq_1_to_10; +select distinct a from t1 where t1.a=1 and t1.a in (select a from t2 where t2.b in (1,2)); +a +1 +explain select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 10,10; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1 Using index; Using temporary +1 PRIMARY eq_ref distinct_key distinct_key 8 func 1 +2 MATERIALIZED t2 ALL NULL NULL NULL NULL 10 Using where +select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 10,10; +a +select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 0,1; +a +1 +drop table t1,t2; +# +# end of 10.5 tests +# diff --git a/mysql-test/main/distinct.test b/mysql-test/main/distinct.test index 893e2dcc9a7..9aa3b2921aa 100644 --- a/mysql-test/main/distinct.test +++ b/mysql-test/main/distinct.test @@ -892,3 +892,24 @@ explain select * from t1 limit 0; explain select * from t1 limit 0 offset 10; drop table t1, t2; + +--echo # +--echo # MDEV-28285 Unexpected result when combining DISTINCT, subselect +--echo # and LIMIT +--echo # + +create table t1 (a int primary key); +create table t2 (a int primary key, b int not null); + +insert into t1 select seq from seq_1_to_10; +insert into t2 select seq,seq from seq_1_to_10; + +select distinct a from t1 where t1.a=1 and t1.a in (select a from t2 where t2.b in (1,2)); +explain select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 10,10; +select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 10,10; +select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 0,1; +drop table t1,t2; + +--echo # +--echo # end of 10.5 tests +--echo # diff --git a/sql/sql_limit.h b/sql/sql_limit.h index 19c1ce57e99..a85e8118175 100644 --- a/sql/sql_limit.h +++ b/sql/sql_limit.h @@ -52,6 +52,15 @@ class Select_limit_counters select_limit_cnt= 1; } + /* Send the first row, still honoring offset_limit_cnt */ + void send_first_row() + { + /* Guard against overflow */ + if ((select_limit_cnt= offset_limit_cnt +1 ) == 0) + select_limit_cnt= offset_limit_cnt; + // with_ties= false; Remove // on merge to 10.6 + } + bool is_unlimited() { return select_limit_cnt == HA_POS_ERROR; } bool is_unrestricted() diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 2f1cc62fe39..5af2a9e6896 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -24640,7 +24640,7 @@ JOIN_TAB::remove_duplicates() !(join->select_options & OPTION_FOUND_ROWS)) { // only const items with no OPTION_FOUND_ROWS - join->unit->lim.set_single_row(); // Only send first row + join->unit->lim.send_first_row(); // Only send first row my_free(sortorder); DBUG_RETURN(false); } From a7adfd4c52307876d68ad3386cefd3757ee66e92 Mon Sep 17 00:00:00 2001 From: Monty Date: Tue, 23 May 2023 10:02:33 +0300 Subject: [PATCH 41/76] Optimized version of safe_strcpy() Note: We should replace most case of safe_strcpy() with strmake() to avoid the not needed zerofill. --- include/m_string.h | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/include/m_string.h b/include/m_string.h index 1db86d1b197..722de2855ca 100644 --- a/include/m_string.h +++ b/include/m_string.h @@ -249,14 +249,15 @@ static inline void lex_string_set3(LEX_CSTRING *lex_str, const char *c_str, */ static inline int safe_strcpy(char *dst, size_t dst_size, const char *src) { - memset(dst, '\0', dst_size); - strncpy(dst, src, dst_size - 1); - /* - If the first condition is true, we are guaranteed to have src length - >= (dst_size - 1), hence safe to access src[dst_size - 1]. - */ - if (dst[dst_size - 2] != '\0' && src[dst_size - 1] != '\0') - return 1; /* Truncation of src. */ + DBUG_ASSERT(dst_size > 0); + /* Note, strncpy will zerofill end of dst if src shorter than dst_size */ + strncpy(dst, src, dst_size); + if (dst[dst_size-1]) + { + /* Ensure string is zero terminated */ + dst[dst_size-1]= 0; + return 1; + } return 0; } From 9c35f9c9c160c729a235336101d1cfe384a15d41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 23 May 2023 12:20:27 +0300 Subject: [PATCH 42/76] MDEV-31234 fixup: Allow innodb_undo_log_truncate=ON after upgrade trx_purge_truncate_history(): Relax a condition that would prevent undo log truncation if the undo log tablespaces were "contaminated" by the bug that commit e0084b9d315f10e3ceb578b65e144d751b208bf1 fixed. That is, trx_purge_truncate_rseg_history() would have invoked flst_remove() on TRX_RSEG_HISTORY but not reduced TRX_RSEG_HISTORY_SIZE. To avoid any regression with normal operation, we implement this fixup during slow shutdown only. The condition on the history list being empty is necessary: without it, in the test innodb.undo_truncate_recover there may be much fewer than the expected 90,000 calls to row_purge() before the truncation. That is, we would truncate the undo tablespace before actually having processed all undo log records in it. To truncate such "contaminated" or "bloated" undo log tablespaces (when using innodb_undo_tablespaces=2 or more) you can execute the following SQL: BEGIN;INSERT mysql.innodb_table_stats VALUES('','',DEFAULT,0,0,0);ROLLBACK; SET GLOBAL innodb_undo_log_truncate=ON, innodb_fast_shutdown=0; SHUTDOWN; The first line creates a dummy InnoDB transaction, to ensure that there will be some history to be purged during shutdown and that the undo tablespaces will be truncated. --- storage/innobase/trx/trx0purge.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 97979a3fefe..37c73486c8a 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -619,7 +619,8 @@ static void trx_purge_truncate_history() ut_ad(rseg->curr_size > cached); - if (rseg->curr_size > cached + 1) + if (rseg->curr_size > cached + 1 && + (srv_fast_shutdown || srv_undo_sources || trx_sys.rseg_history_len)) goto not_free; mutex_exit(&rseg->mutex); From c5cf94b2dcebc55b23d28a9b14c3c5b10b16f541 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 24 May 2023 08:25:26 +0300 Subject: [PATCH 43/76] MDEV-31234 fixup: Free some UNDO pages earlier trx_purge_truncate_rseg_history(): Add a parameter to specify if the entire rollback segment is safe to be freed. If not, we may still be able to invoke trx_undo_truncate_start() and free some pages. --- storage/innobase/trx/trx0purge.cc | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 97979a3fefe..c5ec42ecfce 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -399,12 +399,14 @@ void trx_purge_free_segment(mtr_t &mtr, trx_rseg_t* rseg, fil_addr_t hdr_addr) /** Remove unnecessary history data from a rollback segment. @param[in,out] rseg rollback segment -@param[in] limit truncate anything before this */ +@param[in] limit truncate anything before this +@param[in] all whether everything can be truncated */ static void trx_purge_truncate_rseg_history( trx_rseg_t& rseg, - const purge_sys_t::iterator& limit) + const purge_sys_t::iterator& limit, + bool all) { fil_addr_t hdr_addr; fil_addr_t prev_hdr_addr; @@ -443,6 +445,10 @@ func_exit: goto func_exit; } + if (!all) { + goto func_exit; + } + prev_hdr_addr = flst_get_prev_addr(block->frame + hdr_addr.boffset + TRX_UNDO_HISTORY_NODE); prev_hdr_addr.boffset = static_cast(prev_hdr_addr.boffset @@ -539,8 +545,9 @@ static void trx_purge_truncate_history() ut_ad(rseg->id == i); ut_ad(rseg->is_persistent()); mutex_enter(&rseg->mutex); - if (!rseg->trx_ref_count && rseg->needs_purge <= head.trx_no) - trx_purge_truncate_rseg_history(*rseg, head); + trx_purge_truncate_rseg_history(*rseg, head, + !rseg->trx_ref_count && + rseg->needs_purge <= head.trx_no); mutex_exit(&rseg->mutex); } } From 7737f15f8740ab994d704200a58bf62aa2f3c875 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Wed, 24 May 2023 14:34:57 +0530 Subject: [PATCH 44/76] MDEV-31333 fsp_free_page() fails to move the extent from FSP_FREE_FRAG to FSP_FREE list - This issue was caused by commit 0b47c126e31cddda1e94588799599e138400bcf8. In fsp_free_page(), InnoDB should set XDES_FREE_BIT of the page before moving the extent from FSP_FREE_FRAG to FSP_FREE list. --- storage/innobase/fsp/fsp0fsp.cc | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index 514083d35cc..8618c7e2e0e 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -1286,23 +1286,20 @@ static dberr_t fsp_free_page(fil_space_t *space, page_no_t offset, mtr_t *mtr) + header->page.frame, frag_n_used - 1); } + mtr->free(*space, static_cast(offset)); + xdes_set_free(*xdes, descr, offset % FSP_EXTENT_SIZE, mtr); + ut_ad(err == DB_SUCCESS); + if (!xdes_get_n_used(descr)) { /* The extent has become free: move it to another list */ err = flst_remove(header, FSP_HEADER_OFFSET + FSP_FREE_FRAG, xdes, xoffset, mtr); - if (UNIV_UNLIKELY(err != DB_SUCCESS)) { - return err; - } - err = fsp_free_extent(space, offset, mtr); - if (UNIV_UNLIKELY(err != DB_SUCCESS)) { - return err; + if (err == DB_SUCCESS) { + err = fsp_free_extent(space, offset, mtr); } } - mtr->free(*space, static_cast(offset)); - xdes_set_free(*xdes, descr, offset % FSP_EXTENT_SIZE, mtr); - - return DB_SUCCESS; + return err; } /** @return Number of segment inodes which fit on a single page */ From e9fe39d56686277f6e503be65f04dec58486e952 Mon Sep 17 00:00:00 2001 From: Monty Date: Tue, 23 May 2023 15:32:50 +0300 Subject: [PATCH 45/76] MDEV-7389 Request: log warnings into SQL_ERROR_LOG Changes: - Audit_null records and displays warning count - sql_error_log prints warnings Reviewer: Alexey Botchkov --- include/mysql/plugin_audit.h | 1 + mysql-test/suite/plugins/r/audit_null.result | 2 ++ .../suite/plugins/r/server_audit.result | 22 +++++++++++++++++++ .../suite/plugins/r/sql_error_log.result | 9 ++++++++ mysql-test/suite/plugins/t/server_audit.test | 6 +++++ mysql-test/suite/plugins/t/sql_error_log.test | 11 +++++++--- plugin/audit_null/audit_null.c | 6 +++++ plugin/sql_errlog/sql_errlog.c | 15 ++++++++----- sql/sql_class.cc | 18 +++++++-------- 9 files changed, 71 insertions(+), 19 deletions(-) diff --git a/include/mysql/plugin_audit.h b/include/mysql/plugin_audit.h index e99c01376e6..bfa6621b007 100644 --- a/include/mysql/plugin_audit.h +++ b/include/mysql/plugin_audit.h @@ -48,6 +48,7 @@ extern "C" { #define MYSQL_AUDIT_GENERAL_ERROR 1 #define MYSQL_AUDIT_GENERAL_RESULT 2 #define MYSQL_AUDIT_GENERAL_STATUS 3 +#define MYSQL_AUDIT_GENERAL_WARNING 4 struct mysql_event_general { diff --git a/mysql-test/suite/plugins/r/audit_null.result b/mysql-test/suite/plugins/r/audit_null.result index ada85b661ee..45fe05d999b 100644 --- a/mysql-test/suite/plugins/r/audit_null.result +++ b/mysql-test/suite/plugins/r/audit_null.result @@ -14,6 +14,7 @@ Audit_null_called 9 Audit_null_general_error 1 Audit_null_general_log 3 Audit_null_general_result 2 +Audit_null_general_warning 1 create procedure au1(x char(16)) select concat("test1", x); call au1("-12"); concat("test1", x) @@ -24,6 +25,7 @@ Audit_null_called 22 Audit_null_general_error 1 Audit_null_general_log 7 Audit_null_general_result 5 +Audit_null_general_warning 1 create table t1 (a int); insert t1 values (1), (2); select * from t1; diff --git a/mysql-test/suite/plugins/r/server_audit.result b/mysql-test/suite/plugins/r/server_audit.result index 212f27fdf84..75cefc34074 100644 --- a/mysql-test/suite/plugins/r/server_audit.result +++ b/mysql-test/suite/plugins/r/server_audit.result @@ -268,6 +268,13 @@ drop database sa_db; select length('01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789'); length('0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456 2750 +CREATE TABLE test.t1 (a char(4)); +set sql_mode=""; +insert into test.t1 value("12345"); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +set sql_mode=default; +drop table test.t1; set global server_audit_file_path='.'; show status like 'server_audit_current_log'; Variable_name Value @@ -505,6 +512,21 @@ TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proc, TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,event, TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop database sa_db',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'select length(\'012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567',0 +TIME,HOSTNAME,root,localhost,ID,ID,CREATE,test,t1, +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'CREATE TABLE test.t1 (a char(4))',0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set sql_mode=""',0 +TIME,HOSTNAME,root,localhost,ID,ID,WRITE,test,t1, +TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,table_stats, +TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,column_stats, +TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,index_stats, +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'insert into test.t1 value("12345")',0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'SHOW WARNINGS',0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set sql_mode=default',0 +TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,table_stats, +TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,column_stats, +TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,index_stats, +TIME,HOSTNAME,root,localhost,ID,ID,DROP,test,t1, +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop table test.t1',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_file_path=\'.\'',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_file_path=\'.\'',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'show status like \'server_audit_current_log\'',0 diff --git a/mysql-test/suite/plugins/r/sql_error_log.result b/mysql-test/suite/plugins/r/sql_error_log.result index 98dfe0374fd..34c711a1e8d 100644 --- a/mysql-test/suite/plugins/r/sql_error_log.result +++ b/mysql-test/suite/plugins/r/sql_error_log.result @@ -44,6 +44,13 @@ END| CALL e1(); ERROR 42S02: Table 'test.non_exists' doesn't exist DROP PROCEDURE e1; +CREATE TABLE t1 (a char(4)); +set sql_mode=""; +insert into t1 value("12345"); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +set sql_mode=default; +drop table t1; uninstall plugin SQL_ERROR_LOG; Warnings: Warning 1620 Plugin is busy and will be uninstalled on shutdown @@ -55,3 +62,5 @@ MYSQL_ERRNO = 1000, MESSAGE_TEXT = 'new message' TIME HOSTNAME ERROR 1366: Incorrect integer value: 'aa' for column `test`.`t1`.`id` at row 1 : insert into t1 values ('aa') TIME HOSTNAME ERROR 1146: Table 'test.non_exists' doesn't exist : INSERT INTO test.non_exists VALUES (0,0,0) /* e1 */ +TIME HOSTNAME WARNING 1265: Data truncated for column 'a' at row 1 : insert into t1 value("12345") +TIME HOSTNAME WARNING 1620: Plugin is busy and will be uninstalled on shutdown : uninstall plugin SQL_ERROR_LOG diff --git a/mysql-test/suite/plugins/t/server_audit.test b/mysql-test/suite/plugins/t/server_audit.test index 1beeaff7538..675b82522ca 100644 --- a/mysql-test/suite/plugins/t/server_audit.test +++ b/mysql-test/suite/plugins/t/server_audit.test @@ -218,6 +218,12 @@ drop database sa_db; select length('01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789'); +CREATE TABLE test.t1 (a char(4)); +set sql_mode=""; +insert into test.t1 value("12345"); +set sql_mode=default; +drop table test.t1; + set global server_audit_file_path='.'; --replace_regex /\.[\\\/]/HOME_DIR\// show status like 'server_audit_current_log'; diff --git a/mysql-test/suite/plugins/t/sql_error_log.test b/mysql-test/suite/plugins/t/sql_error_log.test index 6c83e9655ce..03d494031a2 100644 --- a/mysql-test/suite/plugins/t/sql_error_log.test +++ b/mysql-test/suite/plugins/t/sql_error_log.test @@ -1,4 +1,3 @@ - --source include/not_embedded.inc if (!$SQL_ERRLOG_SO) { @@ -66,10 +65,16 @@ DELIMITER ;| CALL e1(); DROP PROCEDURE e1; +CREATE TABLE t1 (a char(4)); +set sql_mode=""; +insert into t1 value("12345"); +set sql_mode=default; +drop table t1; + uninstall plugin SQL_ERROR_LOG; let $MYSQLD_DATADIR= `SELECT @@datadir`; # replace the timestamp and the hostname with constant values ---replace_regex /[1-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] [ 0-9][0-9]:[0-9][0-9]:[0-9][0-9] [^E]*/TIME HOSTNAME / +--replace_regex /[1-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] [ 0-9][0-9]:[0-9][0-9]:[0-9][0-9] [^EW]*/TIME HOSTNAME / cat_file $MYSQLD_DATADIR/sql_errors.log; - +remove_file $MYSQLD_DATADIR/sql_errors.log; diff --git a/plugin/audit_null/audit_null.c b/plugin/audit_null/audit_null.c index 6e084c59b77..8f308b7377c 100644 --- a/plugin/audit_null/audit_null.c +++ b/plugin/audit_null/audit_null.c @@ -30,6 +30,7 @@ static volatile int ncalls; /* for SHOW STATUS, see below */ static volatile int ncalls_general_log; static volatile int ncalls_general_error; +static volatile int ncalls_general_warning; static volatile int ncalls_general_result; FILE *f; @@ -53,6 +54,7 @@ static int audit_null_plugin_init(void *arg __attribute__((unused))) ncalls= 0; ncalls_general_log= 0; ncalls_general_error= 0; + ncalls_general_warning= 0; ncalls_general_result= 0; f = fopen("audit_null_tables.log", "w"); @@ -113,6 +115,9 @@ static void audit_null_notify(MYSQL_THD thd __attribute__((unused)), case MYSQL_AUDIT_GENERAL_ERROR: ncalls_general_error++; break; + case MYSQL_AUDIT_GENERAL_WARNING: + ncalls_general_warning++; + break; case MYSQL_AUDIT_GENERAL_RESULT: ncalls_general_result++; break; @@ -179,6 +184,7 @@ static struct st_mysql_show_var simple_status[]= { "general_error", (char *) &ncalls_general_error, SHOW_INT }, { "general_log", (char *) &ncalls_general_log, SHOW_INT }, { "general_result", (char *) &ncalls_general_result, SHOW_INT }, + { "general_warning", (char *) &ncalls_general_error, SHOW_INT }, { 0, 0, 0} }; diff --git a/plugin/sql_errlog/sql_errlog.c b/plugin/sql_errlog/sql_errlog.c index e0ebd6b7737..1454d4bd4dc 100644 --- a/plugin/sql_errlog/sql_errlog.c +++ b/plugin/sql_errlog/sql_errlog.c @@ -84,8 +84,11 @@ static void log_sql_errors(MYSQL_THD thd __attribute__((unused)), const struct mysql_event_general *event = (const struct mysql_event_general*)ev; if (rate && - event->event_subclass == MYSQL_AUDIT_GENERAL_ERROR) + (event->event_subclass == MYSQL_AUDIT_GENERAL_ERROR || + event->event_subclass == MYSQL_AUDIT_GENERAL_WARNING)) { + const char *type= (event->event_subclass == MYSQL_AUDIT_GENERAL_ERROR ? + "ERROR" : "WARNING"); if (++count >= rate) { struct tm t; @@ -94,11 +97,11 @@ static void log_sql_errors(MYSQL_THD thd __attribute__((unused)), count = 0; (void) localtime_r(&event_time, &t); logger_printf(logfile, "%04d-%02d-%02d %2d:%02d:%02d " - "%s ERROR %d: %s : %s\n", - t.tm_year + 1900, t.tm_mon + 1, - t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, - event->general_user, event->general_error_code, - event->general_command, event->general_query); + "%s %s %d: %s : %s\n", + t.tm_year + 1900, t.tm_mon + 1, + t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, + event->general_user, type, event->general_error_code, + event->general_command, event->general_query); } } } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index e0c6c15a3e2..74f9e80df7e 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1082,19 +1082,13 @@ Sql_condition* THD::raise_condition(const Sql_condition *cond) goto ret; switch (level) { - case Sql_condition::WARN_LEVEL_NOTE: case Sql_condition::WARN_LEVEL_WARN: + mysql_audit_general(this, MYSQL_AUDIT_GENERAL_WARNING, sql_errno, msg); + /* fall through */ + case Sql_condition::WARN_LEVEL_NOTE: got_warning= 1; break; case Sql_condition::WARN_LEVEL_ERROR: - break; - case Sql_condition::WARN_LEVEL_END: - /* Impossible */ - break; - } - - if (level == Sql_condition::WARN_LEVEL_ERROR) - { mysql_audit_general(this, MYSQL_AUDIT_GENERAL_ERROR, sql_errno, msg); is_slave_error= 1; // needed to catch query errors during replication @@ -1103,7 +1097,7 @@ Sql_condition* THD::raise_condition(const Sql_condition *cond) /* With wsrep we allow converting BF abort error to warning if errors are ignored. - */ + */ if (!is_fatal_error && no_errors && (wsrep_trx().bf_aborted() || wsrep_retry_counter)) { @@ -1118,6 +1112,10 @@ Sql_condition* THD::raise_condition(const Sql_condition *cond) da->set_error_status(sql_errno, msg, sqlstate, *cond, raised); } } + break; + case Sql_condition::WARN_LEVEL_END: + /* Impossible */ + break; } query_cache_abort(this, &query_cache_tls); From d77d9e1f6fb4333567a4701837806b1b8f5f5740 Mon Sep 17 00:00:00 2001 From: Monty Date: Wed, 24 May 2023 13:35:22 +0300 Subject: [PATCH 46/76] MENT-1703 Repeatable crash during backup after processing very large ibdata1 The crash happened in filename_to_spacename() when using it on a filename that is not in the format of "./database/table.ibd". According to Marko, it is possible the function is called with the path to an undo file, which would cause a crash. This patch fixes this by, instead of crashing with unexpected filenames, returning them 'as such', except for changing all '\' to '/'. --- extra/mariabackup/xtrabackup.cc | 61 ++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 20 deletions(-) diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index cccf5abb01b..52174388b87 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -851,28 +851,49 @@ void mdl_lock_all() // Convert non-null terminated filename to space name +// Note that in 10.6 the filename may be an undo file name static std::string filename_to_spacename(const void *filename, size_t len) { - // null- terminate filename - char *f = (char *)malloc(len + 1); - ut_a(f); - memcpy(f, filename, len); - f[len] = 0; - for (size_t i = 0; i < len; i++) - if (f[i] == '\\') - f[i] = '/'; - char *p = strrchr(f, '.'); - ut_a(p); - *p = 0; - char *table = strrchr(f, '/'); - ut_a(table); - *table = 0; - char *db = strrchr(f, '/'); - ut_a(db); - *table = '/'; - std::string s(db+1); - free(f); - return s; + char f[FN_REFLEN]; + char *p= 0, *table, *db; + DBUG_ASSERT(len = FN_REFLEN); + + strmake(f, (const char*) filename, len); + +#ifdef _WIN32 + for (size_t i = 0; i < len; i++) + { + if (f[i] == '\\') + f[i] = '/'; + } +#endif + + /* Remove extension, if exists */ + if (!(p= strrchr(f, '.'))) + goto err; + *p= 0; + + /* Find table name */ + if (!(table= strrchr(f, '/'))) + goto err; + *table = 0; + + /* Find database name */ + db= strrchr(f, '/'); + *table = '/'; + if (!db) + goto err; + { + std::string s(db+1); + return s; + } + +err: + /* Not a database/table. Return original (converted) name */ + if (p) + *p= '.'; // Restore removed extension + std::string s(f); + return s; } /** Report an operation to create, delete, or rename a file during backup. From 9b3084b7be6ac63193da72a40f1ed65af0a5f340 Mon Sep 17 00:00:00 2001 From: Monty Date: Wed, 24 May 2023 17:32:19 +0300 Subject: [PATCH 47/76] Fixed typo in xtrabackup.c --- extra/mariabackup/xtrabackup.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 52174388b87..83e3806eea1 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -856,7 +856,7 @@ static std::string filename_to_spacename(const void *filename, size_t len) { char f[FN_REFLEN]; char *p= 0, *table, *db; - DBUG_ASSERT(len = FN_REFLEN); + DBUG_ASSERT(len < FN_REFLEN); strmake(f, (const char*) filename, len); From 9edb1a5ce3d56ffc4ce7be1e698d55fecac0fe09 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Wed, 24 May 2023 13:17:47 +0400 Subject: [PATCH 48/76] MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" Problem: Field_timestampf implementations differ in MySQL and MariaDB: - MariaDB sets the UNSIGNED_FLAG in Field::flags - MySQL does not The reference table structures (defined in table_stats_schema and index_stats_schema) expected the last_update column to have the DATA_UNSIGNED flag, because MariaDB's Field_timestampf has the UNSIGNED_FLAG. It worked fine on pure MariaDB installations. However, if a MariaDB server starts over a MySQL-5.7 data directory during a migration, the last_update column does not have DATA_UNSIGNED flag, because MySQL's Field_timestampf does not have the UNSIGNED_FLAG. This made InnoDB (after the migration from MySQL) complain into the server error log about the unexpected data type. The actual fix is done in storage/innobase/dict/dict0stats.cc: It removes DATA_UNSIGNED from the prtype_mask member of the reference columns, so now it does not require the underlying columns to have this flag. The rest of the fix is needed for MTR tests. The new data type plugin TYPE_MYSQL_TIMESTAMP implements a slightly modified version of Field_timestampf, which removes the unsigned flag, so it works like MySQL's Field_timestampf. The MTR test ALTERs the data type of the columns table_stats_schema.last_update and index_stats_schema.last_update from TIMESTAMP to TYPE_MYSQL_TIMESTAMP, then makes InnoDB verify the structure of the two statistics tables by creating and populating an InnoDB table t1. Without the fix made storage/innobase/dict/dict0stats.cc, MTR complains about unexpected warnings in the server error log: [ERROR] InnoDB: Column last_update in table mysql.innodb_table_stats is ... [ERROR] InnoDB: Column last_update in table mysql.innodb_index_stats is ... With the fix made storage/innobase/dict/dict0stats.cc these warnings go away. --- plugin/type_mysql_timestamp/CMakeLists.txt | 17 ++ .../mysql-test/type_mysql_timestamp/suite.opt | 1 + .../mysql-test/type_mysql_timestamp/suite.pm | 10 ++ .../type_mysql_timestamp.result | 45 +++++ .../type_mysql_timestamp.test | 31 ++++ .../type_mysql_timestamp_stat_tables.result | 73 ++++++++ .../type_mysql_timestamp_stat_tables.test | 24 +++ plugin/type_mysql_timestamp/plugin.cc | 161 ++++++++++++++++++ sql/field.h | 2 +- sql/sql_type.h | 5 +- storage/innobase/dict/dict0stats.cc | 18 +- 11 files changed, 382 insertions(+), 5 deletions(-) create mode 100644 plugin/type_mysql_timestamp/CMakeLists.txt create mode 100644 plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.opt create mode 100644 plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.pm create mode 100644 plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.result create mode 100644 plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.test create mode 100644 plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result create mode 100644 plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test create mode 100644 plugin/type_mysql_timestamp/plugin.cc diff --git a/plugin/type_mysql_timestamp/CMakeLists.txt b/plugin/type_mysql_timestamp/CMakeLists.txt new file mode 100644 index 00000000000..ca7bf1e7704 --- /dev/null +++ b/plugin/type_mysql_timestamp/CMakeLists.txt @@ -0,0 +1,17 @@ +# Copyright (c) 2019, MariaDB corporation +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA + +MYSQL_ADD_PLUGIN(type_mysql_timestamp plugin.cc RECOMPILE_FOR_EMBEDDED + MODULE_ONLY COMPONENT Test) diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.opt b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.opt new file mode 100644 index 00000000000..e9e2a99b589 --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.opt @@ -0,0 +1 @@ +--plugin-load-add=$TYPE_MYSQL_TIMESTAMP_SO diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.pm b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.pm new file mode 100644 index 00000000000..cbb8f1b097f --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.pm @@ -0,0 +1,10 @@ +package My::Suite::Type_test; + +@ISA = qw(My::Suite); + +return "No TYPE_TEST plugin" unless $ENV{TYPE_MYSQL_TIMESTAMP_SO}; + +sub is_default { 1 } + +bless { }; + diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.result b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.result new file mode 100644 index 00000000000..009b3a7c47c --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.result @@ -0,0 +1,45 @@ +# +# MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +# +SELECT +PLUGIN_NAME, +PLUGIN_VERSION, +PLUGIN_STATUS, +PLUGIN_TYPE, +PLUGIN_AUTHOR, +PLUGIN_DESCRIPTION, +PLUGIN_LICENSE, +PLUGIN_MATURITY, +PLUGIN_AUTH_VERSION +FROM INFORMATION_SCHEMA.PLUGINS +WHERE PLUGIN_TYPE='DATA TYPE' + AND PLUGIN_NAME LIKE 'type_mysql_timestamp'; +PLUGIN_NAME type_mysql_timestamp +PLUGIN_VERSION 1.0 +PLUGIN_STATUS ACTIVE +PLUGIN_TYPE DATA TYPE +PLUGIN_AUTHOR MariaDB Corporation +PLUGIN_DESCRIPTION Data type TYPE_MYSQL_TIMESTAMP +PLUGIN_LICENSE GPL +PLUGIN_MATURITY Experimental +PLUGIN_AUTH_VERSION 1.0 +CREATE TABLE t1 (a TYPE_MYSQL_TIMESTAMP); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` type_mysql_timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp() +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +DROP TABLE t1; +CREATE TABLE t1 (a TIMESTAMP); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp() +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +ALTER TABLE t1 MODIFY a TYPE_MYSQL_TIMESTAMP; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` type_mysql_timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp() +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +DROP TABLE t1; diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.test b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.test new file mode 100644 index 00000000000..a7aaa5a3e4c --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.test @@ -0,0 +1,31 @@ +--source include/have_innodb.inc + +--echo # +--echo # MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +--echo # + +--vertical_results +SELECT + PLUGIN_NAME, + PLUGIN_VERSION, + PLUGIN_STATUS, + PLUGIN_TYPE, + PLUGIN_AUTHOR, + PLUGIN_DESCRIPTION, + PLUGIN_LICENSE, + PLUGIN_MATURITY, + PLUGIN_AUTH_VERSION +FROM INFORMATION_SCHEMA.PLUGINS + WHERE PLUGIN_TYPE='DATA TYPE' + AND PLUGIN_NAME LIKE 'type_mysql_timestamp'; +--horizontal_results + +CREATE TABLE t1 (a TYPE_MYSQL_TIMESTAMP); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a TIMESTAMP); +SHOW CREATE TABLE t1; +ALTER TABLE t1 MODIFY a TYPE_MYSQL_TIMESTAMP; +SHOW CREATE TABLE t1; +DROP TABLE t1; diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result new file mode 100644 index 00000000000..e48f29c9a26 --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result @@ -0,0 +1,73 @@ +# +# MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +# +SET @@global.innodb_stats_persistent=0; +SHOW CREATE TABLE mysql.innodb_table_stats; +Table Create Table +innodb_table_stats CREATE TABLE `innodb_table_stats` ( + `database_name` varchar(64) NOT NULL, + `table_name` varchar(199) NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `n_rows` bigint(20) unsigned NOT NULL, + `clustered_index_size` bigint(20) unsigned NOT NULL, + `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL, + PRIMARY KEY (`database_name`,`table_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP; +SHOW CREATE TABLE mysql.innodb_table_stats; +Table Create Table +innodb_table_stats CREATE TABLE `innodb_table_stats` ( + `database_name` varchar(64) NOT NULL, + `table_name` varchar(199) NOT NULL, + `last_update` type_mysql_timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `n_rows` bigint(20) unsigned NOT NULL, + `clustered_index_size` bigint(20) unsigned NOT NULL, + `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL, + PRIMARY KEY (`database_name`,`table_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP; +SHOW CREATE TABLE mysql.innodb_index_stats; +Table Create Table +innodb_index_stats CREATE TABLE `innodb_index_stats` ( + `database_name` varchar(64) NOT NULL, + `table_name` varchar(199) NOT NULL, + `index_name` varchar(64) NOT NULL, + `last_update` type_mysql_timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `stat_name` varchar(64) NOT NULL, + `stat_value` bigint(20) unsigned NOT NULL, + `sample_size` bigint(20) unsigned DEFAULT NULL, + `stat_description` varchar(1024) NOT NULL, + PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 +SET @@global.innodb_stats_persistent=1; +CREATE TABLE t1 (a INT, KEY(a)) ENGINE=InnoDB; +INSERT INTO t1 VALUES (10); +DROP TABLE t1; +SET @@global.innodb_stats_persistent=0; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP; +SHOW CREATE TABLE mysql.innodb_table_stats; +Table Create Table +innodb_table_stats CREATE TABLE `innodb_table_stats` ( + `database_name` varchar(64) NOT NULL, + `table_name` varchar(199) NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `n_rows` bigint(20) unsigned NOT NULL, + `clustered_index_size` bigint(20) unsigned NOT NULL, + `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL, + PRIMARY KEY (`database_name`,`table_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP; +SHOW CREATE TABLE mysql.innodb_index_stats; +Table Create Table +innodb_index_stats CREATE TABLE `innodb_index_stats` ( + `database_name` varchar(64) NOT NULL, + `table_name` varchar(199) NOT NULL, + `index_name` varchar(64) NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `stat_name` varchar(64) NOT NULL, + `stat_value` bigint(20) unsigned NOT NULL, + `sample_size` bigint(20) unsigned DEFAULT NULL, + `stat_description` varchar(1024) NOT NULL, + PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 +SET @@global.innodb_stats_persistent=1; diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test new file mode 100644 index 00000000000..d22c94c6f82 --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test @@ -0,0 +1,24 @@ +--source include/have_innodb.inc + +--echo # +--echo # MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +--echo # + +SET @@global.innodb_stats_persistent=0; +SHOW CREATE TABLE mysql.innodb_table_stats; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP; +SHOW CREATE TABLE mysql.innodb_table_stats; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP; +SHOW CREATE TABLE mysql.innodb_index_stats; +SET @@global.innodb_stats_persistent=1; + +CREATE TABLE t1 (a INT, KEY(a)) ENGINE=InnoDB; +INSERT INTO t1 VALUES (10); +DROP TABLE t1; + +SET @@global.innodb_stats_persistent=0; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP; +SHOW CREATE TABLE mysql.innodb_table_stats; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP; +SHOW CREATE TABLE mysql.innodb_index_stats; +SET @@global.innodb_stats_persistent=1; diff --git a/plugin/type_mysql_timestamp/plugin.cc b/plugin/type_mysql_timestamp/plugin.cc new file mode 100644 index 00000000000..f361ab6c0eb --- /dev/null +++ b/plugin/type_mysql_timestamp/plugin.cc @@ -0,0 +1,161 @@ +/* + Copyright (c) 2023, MariaDB Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + +#include +#include +#include +#include "sql_type.h" + + +class Type_collection_local: public Type_collection +{ +protected: + const Type_handler *aggregate_common(const Type_handler *h1, + const Type_handler *h2) const + { + if (h1 == h2) + return h1; + return NULL; + } +public: + const Type_handler *handler_by_name(const LEX_CSTRING &name) const override + { + return NULL; + } + + const Type_handler *aggregate_for_result(const Type_handler *h1, + const Type_handler *h2) + const override + { + return aggregate_common(h1, h2); + } + + const Type_handler *aggregate_for_comparison(const Type_handler *h1, + const Type_handler *h2) + const override + { + return aggregate_common(h1, h2); + } + + const Type_handler *aggregate_for_min_max(const Type_handler *h1, + const Type_handler *h2) + const override + { + return aggregate_common(h1, h2); + } + + const Type_handler *aggregate_for_num_op(const Type_handler *h1, + const Type_handler *h2) + const override + { + return aggregate_common(h1, h2); + } +}; + + +static Type_collection_local type_collection_local; + + +/* + A more MySQL compatible Field: + it does not set the UNSIGNED_FLAG. + This is how MySQL's Field_timestampf works. +*/ +class Field_mysql_timestampf :public Field_timestampf +{ +public: + Field_mysql_timestampf(const LEX_CSTRING &name, + const Record_addr &addr, + enum utype unireg_check_arg, + TABLE_SHARE *share, decimal_digits_t dec_arg) + :Field_timestampf(addr.ptr(), addr.null_ptr(), addr.null_bit(), + unireg_check_arg, &name, share, dec_arg) + { + flags&= ~UNSIGNED_FLAG; // MySQL compatibility + } + void sql_type(String &str) const override + { + sql_type_opt_dec_comment(str, + Field_mysql_timestampf::type_handler()->name(), + dec, type_version_mysql56()); + } + const Type_handler *type_handler() const override; +}; + + +class Type_handler_mysql_timestamp2: public Type_handler_timestamp2 +{ +public: + const Type_collection *type_collection() const override + { + return &type_collection_local; + } + Field *make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *root, + const LEX_CSTRING *name, + const Record_addr &rec, const Bit_addr &bit, + const Column_definition_attributes *attr, + uint32 flags) const override + { + return new (root) + Field_mysql_timestampf(*name, rec, attr->unireg_check, share, + attr->temporal_dec(MAX_DATETIME_WIDTH)); + } + void Column_definition_implicit_upgrade(Column_definition *c) const override + { + /* + Suppress the automatic upgrade depending on opt_mysql56_temporal_format, + derived from Type_handler_timestamp_common. + */ + } +}; + + +static Type_handler_mysql_timestamp2 type_handler_mysql_timestamp2; + + +const Type_handler *Field_mysql_timestampf::type_handler() const +{ + return &type_handler_mysql_timestamp2; +} + + +static struct st_mariadb_data_type plugin_descriptor_type_mysql_timestamp= +{ + MariaDB_DATA_TYPE_INTERFACE_VERSION, + &type_handler_mysql_timestamp2 +}; + + + +/*************************************************************************/ + +maria_declare_plugin(type_mysql_timestamp) +{ + MariaDB_DATA_TYPE_PLUGIN, // the plugin type (see include/mysql/plugin.h) + &plugin_descriptor_type_mysql_timestamp, // pointer to type-specific plugin descriptor + "type_mysql_timestamp", // plugin name + "MariaDB Corporation", // plugin author + "Data type TYPE_MYSQL_TIMESTAMP", // the plugin description + PLUGIN_LICENSE_GPL, // the plugin license (see include/mysql/plugin.h) + 0, // Pointer to plugin initialization function + 0, // Pointer to plugin deinitialization function + 0x0100, // Numeric version 0xAABB means AA.BB version + NULL, // Status variables + NULL, // System variables + "1.0", // String version representation + MariaDB_PLUGIN_MATURITY_EXPERIMENTAL // Maturity(see include/mysql/plugin.h)*/ +} +maria_declare_plugin_end; diff --git a/sql/field.h b/sql/field.h index b146ded321d..fd7336afd2c 100644 --- a/sql/field.h +++ b/sql/field.h @@ -3336,7 +3336,7 @@ public: /** TIMESTAMP(0..6) - MySQL56 version */ -class Field_timestampf final :public Field_timestamp_with_dec { +class Field_timestampf :public Field_timestamp_with_dec { void store_TIMEVAL(const timeval &tv) override; public: Field_timestampf(uchar *ptr_arg, diff --git a/sql/sql_type.h b/sql/sql_type.h index b082449639b..479f924a727 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -7593,8 +7593,9 @@ extern Named_type_handler type_handler_time; extern Named_type_handler type_handler_time2; extern Named_type_handler type_handler_datetime; extern Named_type_handler type_handler_datetime2; -extern Named_type_handler type_handler_timestamp; -extern Named_type_handler type_handler_timestamp2; + +extern MYSQL_PLUGIN_IMPORT Named_type_handler type_handler_timestamp; +extern MYSQL_PLUGIN_IMPORT Named_type_handler type_handler_timestamp2; extern Type_handler_interval_DDhhmmssff type_handler_interval_DDhhmmssff; diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 7bdccd899b8..b5291ccbd87 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -204,7 +204,17 @@ static const dict_table_schema_t table_stats_schema = { {"database_name", DATA_VARMYSQL, DATA_NOT_NULL, 192}, {"table_name", DATA_VARMYSQL, DATA_NOT_NULL, 597}, - {"last_update", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 4}, + /* + Don't check the DATA_UNSIGNED flag in last_update. + It presents if the server is running in a pure MariaDB installation, + because MariaDB's Field_timestampf::flags has UNSIGNED_FLAG. + But DATA_UNSIGNED misses when the server starts on a MySQL-5.7 directory + (during a migration), because MySQL's Field_timestampf::flags does not + have UNSIGNED_FLAG. + This is fine not to check DATA_UNSIGNED, because Field_timestampf + in both MariaDB and MySQL support only non-negative time_t values. + */ + {"last_update", DATA_INT, DATA_NOT_NULL, 4}, {"n_rows", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 8}, {"clustered_index_size", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 8}, {"sum_of_other_index_sizes", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 8}, @@ -218,7 +228,11 @@ static const dict_table_schema_t index_stats_schema = {"database_name", DATA_VARMYSQL, DATA_NOT_NULL, 192}, {"table_name", DATA_VARMYSQL, DATA_NOT_NULL, 597}, {"index_name", DATA_VARMYSQL, DATA_NOT_NULL, 192}, - {"last_update", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 4}, + /* + Don't check the DATA_UNSIGNED flag in last_update. + See comments about last_update in table_stats_schema above. + */ + {"last_update", DATA_INT, DATA_NOT_NULL, 4}, {"stat_name", DATA_VARMYSQL, DATA_NOT_NULL, 64*3}, {"stat_value", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 8}, {"sample_size", DATA_INT, DATA_UNSIGNED, 8}, From 03a9366c73263b62a847372ca42fc8a65ae4bc54 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Fri, 26 May 2023 16:04:02 +0400 Subject: [PATCH 49/76] Extra tests for MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" Adding tests demonstrating that columns: - mysql.innodb_table_stats.last_update - mysql.innodb_index_stats.last_update contain sane values close to NOW() rathar than a garbage. Tests cover these three underlying TIMESTAMP data formats: - MariaDB Field_timestamp0 - UINT4 based Like in a MariaDB native installation running with mysql56_temporal_format=0 - MariaDB Field_timestampf - BINARY(4) based, with UNSIGNED_FLAG Like in a MariaDB native installation running with mysql56_temporal_format=1 - MySQL-alike Field_timestampf - BINARY(4) based, without UNSIGNED_FLAG Like with a MariaDB server running over a MySQL-5.6 directory (e.g. during a migragion). --- mysql-test/suite/innodb/r/stat_tables.result | 56 +++++++++++++++++++ mysql-test/suite/innodb/t/stat_tables.test | 53 ++++++++++++++++++ .../type_mysql_timestamp_stat_tables.result | 35 ++++++++++++ .../type_mysql_timestamp_stat_tables.test | 38 +++++++++++++ 4 files changed, 182 insertions(+) diff --git a/mysql-test/suite/innodb/r/stat_tables.result b/mysql-test/suite/innodb/r/stat_tables.result index c1ce6fc8fce..d91b854d347 100644 --- a/mysql-test/suite/innodb/r/stat_tables.result +++ b/mysql-test/suite/innodb/r/stat_tables.result @@ -26,4 +26,60 @@ UPDATE mysql.innodb_table_stats SET last_update=NULL WHERE table_name='t1'; XA END 'test'; XA ROLLBACK 'test'; DROP TABLE t1; +# +# MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +# +# +# Testing a non-default format: Field_timestamp0 - UINT4 based +# +SET @@global.mysql56_temporal_format=0; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP; +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update timestamp /* mariadb-5.3 */ NO current_timestamp() on update current_timestamp() +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update timestamp /* mariadb-5.3 */ NO current_timestamp() on update current_timestamp() +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +DROP TABLE t1; +# +# Now as the table t1 is dropped, expect no statistics +# +SELECT * FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +database_name table_name last_update n_rows clustered_index_size sum_of_other_index_sizes +SELECT * FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +database_name table_name index_name last_update stat_name stat_value sample_size stat_description +# +# Testing with the default format: Field_timestampf - BINARY(4) based with the UNSIGNED_FLAG +# +SET @@global.mysql56_temporal_format=1; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP; +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update timestamp NO current_timestamp() on update current_timestamp() +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update timestamp NO current_timestamp() on update current_timestamp() +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +DROP TABLE t1; # End of 10.6 tests diff --git a/mysql-test/suite/innodb/t/stat_tables.test b/mysql-test/suite/innodb/t/stat_tables.test index dd18c265e99..359fb1e00ec 100644 --- a/mysql-test/suite/innodb/t/stat_tables.test +++ b/mysql-test/suite/innodb/t/stat_tables.test @@ -28,4 +28,57 @@ XA END 'test'; XA ROLLBACK 'test'; DROP TABLE t1; +--echo # +--echo # MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +--echo # + +# The following tests demonstrate that these columns: +# - innodb_table_stats.last_update +# - innodb_index_stats.last_update +# have sane values close to NOW(), rather than any garbage, +# with all TIMESTAMP formats. + +--echo # +--echo # Testing a non-default format: Field_timestamp0 - UINT4 based +--echo # + +SET @@global.mysql56_temporal_format=0; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP; +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; + +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +DROP TABLE t1; + +--echo # +--echo # Now as the table t1 is dropped, expect no statistics +--echo # + +SELECT * FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +SELECT * FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; + +--echo # +--echo # Testing with the default format: Field_timestampf - BINARY(4) based with the UNSIGNED_FLAG +--echo # + +SET @@global.mysql56_temporal_format=1; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP; +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +DROP TABLE t1; + + --echo # End of 10.6 tests diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result index e48f29c9a26..381daff5497 100644 --- a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result @@ -71,3 +71,38 @@ innodb_index_stats CREATE TABLE `innodb_index_stats` ( PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 SET @@global.innodb_stats_persistent=1; +# +# Testing MySQL-5.6-alike Field_timestampf: BINARY(4) based, without UNSIGNED_FLAG +# +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP; +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update type_mysql_timestamp NO current_timestamp() on update current_timestamp() +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update type_mysql_timestamp NO current_timestamp() on update current_timestamp() +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +DROP TABLE t1; +# +# Now as the table t1 is dropped, expect no statistics +# +SELECT * FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +database_name table_name last_update n_rows clustered_index_size sum_of_other_index_sizes +SELECT * FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +database_name table_name index_name last_update stat_name stat_value sample_size stat_description +# +# Restore the structure of the tables +# +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP; diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test index d22c94c6f82..22d8ed4e97e 100644 --- a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test @@ -22,3 +22,41 @@ SHOW CREATE TABLE mysql.innodb_table_stats; ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP; SHOW CREATE TABLE mysql.innodb_index_stats; SET @@global.innodb_stats_persistent=1; + + +# The following test demonstrate that these columns: +# - innodb_table_stats.last_update +# - innodb_index_stats.last_update +# have sane values close to NOW(), rather than any garbage, +# with MySQL-alike Field_timestampf + +--echo # +--echo # Testing MySQL-5.6-alike Field_timestampf: BINARY(4) based, without UNSIGNED_FLAG +--echo # + +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP; +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +DROP TABLE t1; + +--echo # +--echo # Now as the table t1 is dropped, expect no statistics +--echo # + +SELECT * FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +SELECT * FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; + +--echo # +--echo # Restore the structure of the tables +--echo # + +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP; From db8765500eaa83464106c7d68be2c647e034fef2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 26 May 2023 16:16:10 +0300 Subject: [PATCH 50/76] MDEV-31343 Another server hang with innodb_undo_log_truncate=ON trx_purge_truncate_history(): While waiting for a write-fixed block to become available, simply wait for an exclusive latch on it. Also, simplify the iteration: first check for oldest_modification>2 (to ignore clean pages or pages belonging to the temporary tablespace) and then compare the tablespace identifier. Before releasing buf_pool.flush_list_mutex we will buffer-fix the block of interest. In that way, buf_page_t::can_relocate() will not hold on the block and it must remain in the buffer pool until we have acquired an exclusive latch on it. If the block is still dirty, we will register it with the tablespace truncation mini-transaction; else, we will simply release the latch and buffer-fix and move to the next block. This also reverts commit c4d79399895827c592d12b7be4b7ef21443d3a0f because that fix should no longer be necessary; the wait for an exclusive block latch should allow buf_pool_t::release_freed_page() on the same block to proceed. Tested by: Axel Schwenke, Matthias Leich --- storage/innobase/trx/trx0purge.cc | 42 ++++++++++++++++--------------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 239dcfb6438..3cb5e9574fb 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -685,6 +685,7 @@ not_free: mtr_t mtr; mtr.start(); mtr.x_lock_space(&space); + const auto space_id= space.id; /* Lock all modified pages of the tablespace. @@ -694,8 +695,8 @@ not_free: mini-transaction commit and the server was killed, then discarding the to-be-trimmed pages without flushing would break crash recovery. */ + rescan: mysql_mutex_lock(&buf_pool.flush_list_mutex); - for (buf_page_t *bpage= UT_LIST_GET_LAST(buf_pool.flush_list); bpage; ) { ut_ad(bpage->oldest_modification()); @@ -703,46 +704,47 @@ not_free: buf_page_t *prev= UT_LIST_GET_PREV(list, bpage); - if (bpage->id().space() == space.id && - bpage->oldest_modification() != 1) + if (bpage->oldest_modification() > 2 && bpage->id().space() == space_id) { ut_ad(bpage->frame); - auto block= reinterpret_cast(bpage); - if (!bpage->lock.x_lock_try()) + bpage->fix(); { - rescan: - /* Let buf_pool_t::release_freed_page() proceed. */ + /* Try to acquire an exclusive latch while the cache line is + fresh after fix(). */ + const bool got_lock{bpage->lock.x_lock_try()}; + buf_pool.flush_hp.set(prev); mysql_mutex_unlock(&buf_pool.flush_list_mutex); - mysql_mutex_lock(&buf_pool.mutex); - mysql_mutex_lock(&buf_pool.flush_list_mutex); - mysql_mutex_unlock(&buf_pool.mutex); - bpage= UT_LIST_GET_LAST(buf_pool.flush_list); - continue; + if (!got_lock) + bpage->lock.x_lock(); } - buf_pool.flush_hp.set(prev); - mysql_mutex_unlock(&buf_pool.flush_list_mutex); #ifdef BTR_CUR_HASH_ADAPT - ut_ad(!block->index); /* There is no AHI on undo tablespaces. */ + /* There is no AHI on undo tablespaces. */ + ut_ad(!reinterpret_cast(bpage)->index); #endif - bpage->fix(); ut_ad(!bpage->is_io_fixed()); - mysql_mutex_lock(&buf_pool.flush_list_mutex); + ut_ad(bpage->id().space() == space_id); - if (bpage->oldest_modification() > 1) + if (bpage->oldest_modification() > 2) { + mtr.memo_push(reinterpret_cast(bpage), + MTR_MEMO_PAGE_X_FIX); + mysql_mutex_lock(&buf_pool.flush_list_mutex); + ut_ad(bpage->oldest_modification() > 2); bpage->reset_oldest_modification(); - mtr.memo_push(block, MTR_MEMO_PAGE_X_FIX); } else { bpage->unfix(); bpage->lock.x_unlock(); + mysql_mutex_lock(&buf_pool.flush_list_mutex); } if (prev != buf_pool.flush_hp.get()) - /* Rescan, because we may have lost the position. */ + { + mysql_mutex_unlock(&buf_pool.flush_list_mutex); goto rescan; + } } bpage= prev; From e38c075aa0fd7527698430431f8629b01accf980 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 26 May 2023 16:39:46 +0300 Subject: [PATCH 51/76] MDEV-31346 trx_purge_add_undo_to_history() is not optimal trx_undo_set_state_at_finish(): Merge to its only caller, trx_purge_add_undo_to_history(). trx_purge_add_undo_to_history(): Evaluate the condition related to TRX_UNDO_STATE only once. Tested by: Matthias Leich --- storage/innobase/include/trx0undo.h | 8 -- storage/innobase/trx/trx0purge.cc | 204 ++++++++++++++-------------- storage/innobase/trx/trx0undo.cc | 31 ----- 3 files changed, 99 insertions(+), 144 deletions(-) diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h index 670fe00c25b..c1435930551 100644 --- a/storage/innobase/include/trx0undo.h +++ b/storage/innobase/include/trx0undo.h @@ -216,14 +216,6 @@ buf_block_t* trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo, mtr_t *mtr, dberr_t *err) MY_ATTRIBUTE((nonnull, warn_unused_result)); -/******************************************************************//** -Sets the state of the undo log segment at a transaction finish. -@return undo log segment header page, x-latched */ -buf_block_t* -trx_undo_set_state_at_finish( -/*=========================*/ - trx_undo_t* undo, /*!< in: undo log memory copy */ - mtr_t* mtr); /*!< in: mtr */ /** Set the state of the undo log segment at a XA PREPARE or XA ROLLBACK. @param[in,out] trx transaction diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 3cb5e9574fb..7ce6d4ea61d 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -246,127 +246,121 @@ Remove the undo log segment from the rseg slot if it is too big for reuse. void trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr) { - DBUG_PRINT("trx", ("commit(" TRX_ID_FMT "," TRX_ID_FMT ")", - trx->id, trx_id_t{trx->rw_trx_hash_element->no})); - ut_ad(undo == trx->rsegs.m_redo.undo); - trx_rseg_t* rseg = trx->rsegs.m_redo.rseg; - ut_ad(undo->rseg == rseg); - buf_block_t* rseg_header = rseg->get(mtr, nullptr); - /* We are in transaction commit; we cannot return an error. If the - database is corrupted, it is better to crash it than to - intentionally violate ACID by committing something that is known to - be corrupted. */ - ut_ad(rseg_header); - buf_block_t* undo_page = trx_undo_set_state_at_finish( - undo, mtr); - trx_ulogf_t* undo_header = undo_page->page.frame - + undo->hdr_offset; + DBUG_PRINT("trx", ("commit(" TRX_ID_FMT "," TRX_ID_FMT ")", + trx->id, trx_id_t{trx->rw_trx_hash_element->no})); + ut_ad(undo->id < TRX_RSEG_N_SLOTS); + ut_ad(undo == trx->rsegs.m_redo.undo); + trx_rseg_t *rseg= trx->rsegs.m_redo.rseg; + ut_ad(undo->rseg == rseg); + buf_block_t *rseg_header= rseg->get(mtr, nullptr); + /* We are in transaction commit; we cannot return an error. If the + database is corrupted, it is better to crash it than to + intentionally violate ACID by committing something that is known to + be corrupted. */ + ut_ad(rseg_header); + buf_block_t *undo_page= + buf_page_get(page_id_t(rseg->space->id, undo->hdr_page_no), 0, + RW_X_LATCH, mtr); + /* This function is invoked during transaction commit, which is not + allowed to fail. If we get a corrupted undo header, we will crash here. */ + ut_a(undo_page); + trx_ulogf_t *undo_header= undo_page->page.frame + undo->hdr_offset; - ut_ad(mach_read_from_2(undo_header + TRX_UNDO_NEEDS_PURGE) <= 1); - ut_ad(rseg->needs_purge > trx->id); + ut_ad(mach_read_from_2(undo_header + TRX_UNDO_NEEDS_PURGE) <= 1); + ut_ad(rseg->needs_purge > trx->id); - if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT - + rseg_header->page.frame))) { - /* This database must have been upgraded from - before MariaDB 10.3.5. */ - trx_rseg_format_upgrade(rseg_header, mtr); - } + if (rseg->last_page_no == FIL_NULL) + { + rseg->last_page_no= undo->hdr_page_no; + rseg->set_last_commit(undo->hdr_offset, trx->rw_trx_hash_element->no); + } - if (undo->state != TRX_UNDO_CACHED) { - /* The undo log segment will not be reused */ - ut_a(undo->id < TRX_RSEG_N_SLOTS); - static_assert(FIL_NULL == 0xffffffff, ""); - mtr->memset(rseg_header, - TRX_RSEG + TRX_RSEG_UNDO_SLOTS - + undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff); + rseg->history_size++; - uint32_t hist_size = mach_read_from_4( - TRX_RSEG_HISTORY_SIZE + TRX_RSEG - + rseg_header->page.frame); + if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT + + rseg_header->page.frame))) + /* This database must have been upgraded from before MariaDB 10.3.5. */ + trx_rseg_format_upgrade(rseg_header, mtr); - ut_ad(undo->size == flst_get_len(TRX_UNDO_SEG_HDR - + TRX_UNDO_PAGE_LIST - + undo_page->page.frame)); + uint16_t undo_state; - mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_HISTORY_SIZE - + rseg_header->page.frame, - hist_size + undo->size); - mtr->write<8>(*rseg_header, TRX_RSEG + TRX_RSEG_MAX_TRX_ID - + rseg_header->page.frame, - trx_sys.get_max_trx_id()); - } + if (undo->size == 1 && + TRX_UNDO_PAGE_REUSE_LIMIT > + mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + + undo_page->page.frame)) + { + undo->state= undo_state= TRX_UNDO_CACHED; + UT_LIST_ADD_FIRST(rseg->undo_cached, undo); + } + else + { + ut_ad(undo->size == flst_get_len(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + + undo_page->page.frame)); + /* The undo log segment will not be reused */ + static_assert(FIL_NULL == 0xffffffff, ""); + mtr->memset(rseg_header, TRX_RSEG + TRX_RSEG_UNDO_SLOTS + + undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff); + uint32_t hist_size= mach_read_from_4(TRX_RSEG_HISTORY_SIZE + TRX_RSEG + + rseg_header->page.frame); + mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_HISTORY_SIZE + + rseg_header->page.frame, hist_size + undo->size); + mtr->write<8>(*rseg_header, TRX_RSEG + TRX_RSEG_MAX_TRX_ID + + rseg_header->page.frame, trx_sys.get_max_trx_id()); + ut_free(undo); + undo_state= TRX_UNDO_TO_PURGE; + } - /* After the purge thread has been given permission to exit, - we may roll back transactions (trx->undo_no==0) - in THD::cleanup() invoked from unlink_thd() in fast shutdown, - or in trx_rollback_recovered() in slow shutdown. + undo= nullptr; - Before any transaction-generating background threads or the - purge have been started, we can - start transactions in row_merge_drop_temp_indexes(), - and roll back recovered transactions. + /* After the purge thread has been given permission to exit, + we may roll back transactions (trx->undo_no==0) + in THD::cleanup() invoked from unlink_thd() in fast shutdown, + or in trx_rollback_recovered() in slow shutdown. - Arbitrary user transactions may be executed when all the undo log - related background processes (including purge) are disabled due to - innodb_force_recovery=2 or innodb_force_recovery=3. - DROP TABLE may be executed at any innodb_force_recovery level. + Before any transaction-generating background threads or the purge + have been started, we can start transactions in + row_merge_drop_temp_indexes(), and roll back recovered transactions. - During fast shutdown, we may also continue to execute - user transactions. */ - ut_ad(srv_undo_sources - || trx->undo_no == 0 - || (!purge_sys.enabled() - && (srv_is_being_started - || trx_rollback_is_active - || srv_force_recovery >= SRV_FORCE_NO_BACKGROUND)) - || srv_fast_shutdown); + Arbitrary user transactions may be executed when all the undo log + related background processes (including purge) are disabled due to + innodb_force_recovery=2 or innodb_force_recovery=3. DROP TABLE may + be executed at any innodb_force_recovery level. -#ifdef WITH_WSREP - if (wsrep_is_wsrep_xid(&trx->xid)) { - trx_rseg_update_wsrep_checkpoint(rseg_header, &trx->xid, mtr); - } + During fast shutdown, we may also continue to execute user + transactions. */ + ut_ad(srv_undo_sources || trx->undo_no == 0 || + (!purge_sys.enabled() && + (srv_is_being_started || + trx_rollback_is_active || + srv_force_recovery >= SRV_FORCE_NO_BACKGROUND)) || + srv_fast_shutdown); + +#ifdef WITH_WSREP + if (wsrep_is_wsrep_xid(&trx->xid)) + trx_rseg_update_wsrep_checkpoint(rseg_header, &trx->xid, mtr); #endif - if (trx->mysql_log_file_name && *trx->mysql_log_file_name) { - /* Update the latest MySQL binlog name and offset info - in rollback segment header if MySQL binlogging is on - or the database server is a MySQL replication save. */ - trx_rseg_update_binlog_offset(rseg_header, trx, mtr); - } + if (trx->mysql_log_file_name && *trx->mysql_log_file_name) + /* Update the latest binlog name and offset if log_bin=ON or this + is a replica. */ + trx_rseg_update_binlog_offset(rseg_header, trx, mtr); - /* Add the log as the first in the history list */ + /* Add the log as the first in the history list */ - /* We are in transaction commit; we cannot return an error - when detecting corruption. It is better to crash the server - than to intentionally violate ACID by committing something - that is known to be corrupted. */ - ut_a(flst_add_first(rseg_header, TRX_RSEG + TRX_RSEG_HISTORY, undo_page, - static_cast(undo->hdr_offset - + TRX_UNDO_HISTORY_NODE), - mtr) == DB_SUCCESS); + /* We are in transaction commit; we cannot return an error + when detecting corruption. It is better to crash the server + than to intentionally violate ACID by committing something + that is known to be corrupted. */ + ut_a(flst_add_first(rseg_header, TRX_RSEG + TRX_RSEG_HISTORY, undo_page, + uint16_t(page_offset(undo_header) + + TRX_UNDO_HISTORY_NODE), mtr) == DB_SUCCESS); - mtr->write<8,mtr_t::MAYBE_NOP>(*undo_page, - undo_header + TRX_UNDO_TRX_NO, - trx->rw_trx_hash_element->no); - mtr->write<2,mtr_t::MAYBE_NOP>(*undo_page, undo_header - + TRX_UNDO_NEEDS_PURGE, 1U); - - if (rseg->last_page_no == FIL_NULL) { - rseg->last_page_no = undo->hdr_page_no; - rseg->set_last_commit(undo->hdr_offset, - trx->rw_trx_hash_element->no); - } - - rseg->history_size++; - - if (undo->state == TRX_UNDO_CACHED) { - UT_LIST_ADD_FIRST(rseg->undo_cached, undo); - } else { - ut_ad(undo->state == TRX_UNDO_TO_PURGE); - ut_free(undo); - } - - undo = NULL; + mtr->write<2>(*undo_page, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + + undo_page->page.frame, undo_state); + mtr->write<8,mtr_t::MAYBE_NOP>(*undo_page, undo_header + TRX_UNDO_TRX_NO, + trx->rw_trx_hash_element->no); + mtr->write<2,mtr_t::MAYBE_NOP>(*undo_page, undo_header + + TRX_UNDO_NEEDS_PURGE, 1U); } /** Free an undo log segment. diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc index 20434d9fb9c..4811d2380aa 100644 --- a/storage/innobase/trx/trx0undo.cc +++ b/storage/innobase/trx/trx0undo.cc @@ -1463,37 +1463,6 @@ template buf_block_t* trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo, mtr_t *mtr, dberr_t *err); -/******************************************************************//** -Sets the state of the undo log segment at a transaction finish. -@return undo log segment header page, x-latched */ -buf_block_t* -trx_undo_set_state_at_finish( -/*=========================*/ - trx_undo_t* undo, /*!< in: undo log memory copy */ - mtr_t* mtr) /*!< in: mtr */ -{ - ut_ad(undo->id < TRX_RSEG_N_SLOTS); - ut_ad(undo->rseg->is_persistent()); - - buf_block_t *block= - buf_page_get(page_id_t(undo->rseg->space->id, undo->hdr_page_no), 0, - RW_X_LATCH, mtr); - /* This function is invoked during transaction commit, which is not - allowed to fail. If we get a corrupted undo header, we will crash here. */ - ut_a(block); - const uint16_t state = undo->size == 1 && - TRX_UNDO_PAGE_REUSE_LIMIT > - mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + - block->page.frame) - ? TRX_UNDO_CACHED - : TRX_UNDO_TO_PURGE; - - undo->state= state; - mtr->write<2>(*block, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + block->page.frame, - state); - return block; -} - /** Set the state of the undo log segment at a XA PREPARE or XA ROLLBACK. @param[in,out] trx transaction @param[in,out] undo undo log From 7b72fc0a57ec594f70837bde470048a0d55383d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 26 May 2023 16:40:02 +0300 Subject: [PATCH 52/76] MDEV-22739 !cursor->index->is_committed() in row0ins.cc row_ins_sec_index_entry_by_modify(): When noticing a corrupted secondary index on which CREATE INDEX is not in progress, return DB_CORRUPTION instead of intentionally crashing the server. Tested by: Matthias Leich --- storage/innobase/row/row0ins.cc | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index bd998094f42..3188c8c27ce 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -214,14 +214,14 @@ row_ins_sec_index_entry_by_modify( made to the clustered index, and completed the secondary index creation before we got here. In this case, the change would already be there. The CREATE - INDEX should be waiting for a MySQL meta-data lock - upgrade at least until this INSERT or UPDATE - returns. After that point, set_committed(true) - would be invoked in commit_inplace_alter_table(). */ + INDEX should be in wait_while_table_is_used() at least + until this INSERT or UPDATE returns. After that point, + set_committed(true) would be invoked in + commit_inplace_alter_table(). */ ut_a(update->n_fields == 0); - ut_a(!cursor->index()->is_committed()); ut_ad(!dict_index_is_online_ddl(cursor->index())); - return(DB_SUCCESS); + return cursor->index()->is_committed() + ? DB_CORRUPTION : DB_SUCCESS; } if (mode == BTR_MODIFY_LEAF) { From ce547cfc0564bf54e73bcc5171a2212d6650eb00 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 26 May 2023 16:40:07 +0300 Subject: [PATCH 53/76] MDEV-31350: Hang in innodb.recovery_memory buf_flush_page_cleaner(): Whenever buf_pool.ran_out(), invoke buf_pool.get_oldest_modification(0) so that all clean blocks will be removed from buf_pool.flush_list and buf_flush_LRU_list_batch() will be able to evict some pages. This fixes a regression that was likely caused by commit a55b951e6082a4ce9a1f2ed5ee176ea7dbbaf1f2 (MDEV-26827). --- storage/innobase/buf/buf0flu.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 67c79702ec8..9eeecbd1302 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -2330,6 +2330,7 @@ static void buf_flush_page_cleaner() else if (buf_pool.ran_out()) { buf_pool.page_cleaner_set_idle(false); + buf_pool.get_oldest_modification(0); mysql_mutex_unlock(&buf_pool.flush_list_mutex); n= srv_max_io_capacity; mysql_mutex_lock(&buf_pool.mutex); From ea66df2f454a09f8182b0951723ef63dcee2332a Mon Sep 17 00:00:00 2001 From: Otto Kekalainen Date: Sat, 27 May 2023 22:42:59 -0700 Subject: [PATCH 54/76] Deb: Fix blocksize check to use $mysql_datadir/$datadir correctly In commit f99a8918 this line was changed to not use awk, and new version copied both to init file and preinst file but overlooking that they use different variable names. Also fix minor syntax issues to make Shellcheck happy. All new code of the whole pull request, including one or several files that are either new files or modified ones, are contributed under the BSD-new license. I am contributing on behalf of my employer Amazon Web Services, Inc. --- debian/mariadb-server-10.6.mariadb.init | 2 +- debian/mariadb-server-10.6.preinst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/debian/mariadb-server-10.6.mariadb.init b/debian/mariadb-server-10.6.mariadb.init index b2b7142084f..f68fbc99bc2 100644 --- a/debian/mariadb-server-10.6.mariadb.init +++ b/debian/mariadb-server-10.6.mariadb.init @@ -86,7 +86,7 @@ sanity_checks() { datadir=`mariadbd_get_param datadir` # As preset blocksize of GNU df is 1024 then available bytes is $df_available_blocks * 1024 # 4096 blocks is then lower than 4 MB - df_available_blocks=`LC_ALL=C BLOCKSIZE= df --output=avail "$datadir" | tail -n 1` + df_available_blocks="$(LC_ALL=C BLOCKSIZE='' df --output=avail "$datadir" | tail -n 1)" if [ "$df_available_blocks" -lt "4096" ]; then log_failure_msg "$0: ERROR: The partition with $datadir is too full!" echo "ERROR: The partition with $datadir is too full!" | $ERR_LOGGER diff --git a/debian/mariadb-server-10.6.preinst b/debian/mariadb-server-10.6.preinst index 9a31b5c636e..2513e3ab021 100644 --- a/debian/mariadb-server-10.6.preinst +++ b/debian/mariadb-server-10.6.preinst @@ -226,7 +226,7 @@ fi # As preset blocksize of GNU df is 1024 then available bytes is $df_available_blocks * 1024 # 4096 blocks is then lower than 4 MB -df_available_blocks=`LC_ALL=C BLOCKSIZE= df --output=avail "$datadir" | tail -n 1` +df_available_blocks="$(LC_ALL=C BLOCKSIZE='' df --output=avail "$mysql_datadir" | tail -n 1)" if [ "$df_available_blocks" -lt "4096" ] then echo "ERROR: There's not enough space in $mysql_datadir/" 1>&2 From a6c0a2769663850c055e41a83b0cca9dd9ab89a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 30 May 2023 17:21:49 +0300 Subject: [PATCH 55/76] MDEV-31362 recv_sys_t::apply(bool): Assertion `!last_batch || recovered_lsn == scanned_lsn' failed recv_sys_t::apply(): Remove a bogus debug assertion that had been added in commit f2c17cc9d9bcd634887846d3064bcb71243f9cc0 (MDEV-29911). It is perfectly normal that when the server was killed in the middle of writing multiple redo log blocks, the recovery would end such that recv_sys.scanned_lsn will point to the end of the last complete 512-byte log block, but recv_sys.recovered_lsn will be less than that. Also, correct the function comment of recv_sys_t::parse(). --- storage/innobase/include/log0recv.h | 2 +- storage/innobase/log/log0recv.cc | 5 ++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index 2e4a4bfa794..79e1f862cf5 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -394,7 +394,7 @@ public: bool add(map::iterator it, lsn_t start_lsn, lsn_t lsn, const byte *l, size_t len); - /** Parse and register one mini-transaction in log_t::FORMAT_10_5. + /** Parse and register mini-transactions in log_t::FORMAT_10_5. @param checkpoint_lsn the log sequence number of the latest checkpoint @param store whether to store the records @param apply whether to apply file-level log records diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 1d80345a5e0..2764cc79829 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -2396,7 +2396,7 @@ void recv_sys_t::rewind(const byte *end, const byte *begin) noexcept pages_it= pages.end(); } -/** Parse and register one mini-transaction in log_t::FORMAT_10_5. +/** Parse and register mini-transactions in log_t::FORMAT_10_5. @param checkpoint_lsn the log sequence number of the latest checkpoint @param store whether to store the records @param apply whether to apply file-level log records @@ -2404,7 +2404,7 @@ void recv_sys_t::rewind(const byte *end, const byte *begin) noexcept or corruption was noticed */ bool recv_sys_t::parse(lsn_t checkpoint_lsn, store_t *store, bool apply) { - restart: +restart: mysql_mutex_assert_owner(&log_sys.mutex); mysql_mutex_assert_owner(&mutex); ut_ad(parse_start_lsn); @@ -3626,7 +3626,6 @@ void recv_sys_t::apply(bool last_batch) recv_no_ibuf_operations = !last_batch || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT; - ut_ad(!last_batch || recovered_lsn == scanned_lsn); progress_time= time(nullptr); report_progress(); From 30fb72ca6e27742f20003c208985eb711e12c70a Mon Sep 17 00:00:00 2001 From: Tuukka Pasanen Date: Tue, 30 May 2023 09:15:11 +0300 Subject: [PATCH 56/76] MDEV-31331: Fix cut'n'paste variable name in Debian pre-inst script There is unwanted cut'n'paste variable name in Debian pre-inst script which causes: df: '': No such file or directory /var/lib/dpkg/tmp.ci/preinst: line 215: [: : integer expression expected Rename variable to correct one and make check that that directory or symlink really exists. If it does not then fail with error and message. --- debian/mariadb-server-10.6.preinst | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/debian/mariadb-server-10.6.preinst b/debian/mariadb-server-10.6.preinst index 2513e3ab021..0b3babd3ca8 100644 --- a/debian/mariadb-server-10.6.preinst +++ b/debian/mariadb-server-10.6.preinst @@ -224,14 +224,23 @@ then mkdir -Z $mysql_datadir fi -# As preset blocksize of GNU df is 1024 then available bytes is $df_available_blocks * 1024 -# 4096 blocks is then lower than 4 MB -df_available_blocks="$(LC_ALL=C BLOCKSIZE='' df --output=avail "$mysql_datadir" | tail -n 1)" -if [ "$df_available_blocks" -lt "4096" ] +# Check if MariaDB datadir is available if not fails. +# There should be symlink or directory available or something will fail. +if [ -d "$mysql_datadir" ] || [ -L "$mysql_datadir" ] then - echo "ERROR: There's not enough space in $mysql_datadir/" 1>&2 - db_stop - exit 1 + # As preset blocksize of GNU df is 1024 then available bytes is $df_available_blocks * 1024 + # 4096 blocks is then lower than 4 MB + df_available_blocks="$(LC_ALL=C BLOCKSIZE='' df --output=avail "$mysql_datadir" | tail -n 1)" + if [ "$df_available_blocks" -lt "4096" ] + then + echo "ERROR: There's not enough space in $mysql_datadir/" 1>&2 + db_stop + exit 1 + fi +else + echo "ERROR: There's no directory or symlink available: $mysql_datadir/" 1>&2 + db_stop + exit 1 fi # Since the home directory was created before putting the user into From eb20e7c9008b343b2441e9587149ac58de1f221e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 31 May 2023 15:20:54 +0300 Subject: [PATCH 57/76] MDEV-31353 InnoDB recovery hangs after reporting corruption recv_recover_page(): Remove some code which was added in commit 0b47c126e31cddda1e94588799599e138400bcf8 with no good reason and which would cause a hang after a corrupted page was reported during crash recovery. Tested by: Matthias Leich --- storage/innobase/log/log0recv.cc | 3 --- 1 file changed, 3 deletions(-) diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 2764cc79829..44680e9e485 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -3076,9 +3076,6 @@ set_start_lsn: || recv_sys.is_corrupt_log()) && !srv_force_recovery) { if (init) { init->created = false; - if (space || block->page.id().page_no()) { - block->page.lock.x_lock_recursive(); - } } mtr.discard_modifications(); From e3b06156c6ecd5d3fd4376ee025df1ab45311a6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 31 May 2023 15:25:07 +0300 Subject: [PATCH 58/76] MDEV-31347 fil_ibd_create() may hijack the file handle of an old file fil_ibd_create(): Hold fil_system.mutex until fil_node_t::find_metadata() has completed, so that node->handle cannot be closed by a concurrent thread. This race condition was introduced in commit 10dd290b4b8b8b235c8cf42e100f0a4415629e79 (MDEV-17380). Tested by: Matthias Leich --- storage/innobase/fil/fil0fil.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 7bd813b0c0d..e08d8a25171 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -2001,8 +2001,8 @@ err_exit: FIL_TYPE_TABLESPACE, crypt_data, mode, true)) { fil_node_t* node = space->add(path, file, size, false, true); - mysql_mutex_unlock(&fil_system.mutex); IF_WIN(node->find_metadata(), node->find_metadata(file, true)); + mysql_mutex_unlock(&fil_system.mutex); mtr.start(); mtr.set_named_space(space); ut_a(fsp_header_init(space, size, &mtr) == DB_SUCCESS); From 5919f7b675835b5a4eec7f921ed024a7ab2de58c Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Wed, 31 May 2023 19:07:41 +0530 Subject: [PATCH 59/76] MDEV-31264 Purge trying to access freed secondary index page - InnoDB purge tries to access aborted secondary index and access the freed secondary index root page. --- .../suite/parts/r/partition_purge.result | 26 +++++++++++++ mysql-test/suite/parts/t/partition_purge.opt | 1 + mysql-test/suite/parts/t/partition_purge.test | 37 +++++++++++++++++++ storage/innobase/row/row0purge.cc | 7 ++++ 4 files changed, 71 insertions(+) create mode 100644 mysql-test/suite/parts/r/partition_purge.result create mode 100644 mysql-test/suite/parts/t/partition_purge.opt create mode 100644 mysql-test/suite/parts/t/partition_purge.test diff --git a/mysql-test/suite/parts/r/partition_purge.result b/mysql-test/suite/parts/r/partition_purge.result new file mode 100644 index 00000000000..072b141cd8d --- /dev/null +++ b/mysql-test/suite/parts/r/partition_purge.result @@ -0,0 +1,26 @@ +CREATE TABLE t1(f1 INT, f2 INT, INDEX(f1))ENGINE=InnoDB +PARTITION BY LIST(f1) ( +PARTITION p1 VALUES in (1, 2, 3), +PARTITION p2 VALUES in (4, 5, 6)); +INSERT INTO t1 VALUES(1, 1), (1, 1), (6, 1); +connect con1,localhost,root,,,; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connect con2,localhost,root,,,; +SET DEBUG_SYNC="innodb_rollback_inplace_alter_table SIGNAL default_resume WAIT_FOR alter_resume"; +ALTER TABLE t1 ADD UNIQUE INDEX(f1); +connection default; +set DEBUG_SYNC="now WAIT_FOR default_resume"; +SET DEBUG_SYNC="innodb_row_update_for_mysql_begin SIGNAL alter_resume WAIT_FOR alter_finish"; +DELETE FROM t1; +connection con2; +ERROR 23000: Duplicate entry '1' for key 'f1_2' +SET DEBUG_SYNC="now SIGNAL alter_finish"; +connection default; +connection con1; +commit; +connection default; +disconnect con1; +disconnect con2; +InnoDB 0 transactions not purged +drop table t1; +SET DEBUG_SYNC=reset; diff --git a/mysql-test/suite/parts/t/partition_purge.opt b/mysql-test/suite/parts/t/partition_purge.opt new file mode 100644 index 00000000000..a39e5228c9d --- /dev/null +++ b/mysql-test/suite/parts/t/partition_purge.opt @@ -0,0 +1 @@ +--innodb_purge_threads=1 diff --git a/mysql-test/suite/parts/t/partition_purge.test b/mysql-test/suite/parts/t/partition_purge.test new file mode 100644 index 00000000000..2df81b0eb77 --- /dev/null +++ b/mysql-test/suite/parts/t/partition_purge.test @@ -0,0 +1,37 @@ +--source include/have_innodb.inc +--source include/have_partition.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc + +CREATE TABLE t1(f1 INT, f2 INT, INDEX(f1))ENGINE=InnoDB + PARTITION BY LIST(f1) ( + PARTITION p1 VALUES in (1, 2, 3), + PARTITION p2 VALUES in (4, 5, 6)); +INSERT INTO t1 VALUES(1, 1), (1, 1), (6, 1); +connect(con1,localhost,root,,,); +START TRANSACTION WITH CONSISTENT SNAPSHOT; + +connect(con2,localhost,root,,,); +SET DEBUG_SYNC="innodb_rollback_inplace_alter_table SIGNAL default_resume WAIT_FOR alter_resume"; +send ALTER TABLE t1 ADD UNIQUE INDEX(f1); + +connection default; +set DEBUG_SYNC="now WAIT_FOR default_resume"; +SET DEBUG_SYNC="innodb_row_update_for_mysql_begin SIGNAL alter_resume WAIT_FOR alter_finish"; +send DELETE FROM t1; + +connection con2; +--error ER_DUP_ENTRY +reap; +SET DEBUG_SYNC="now SIGNAL alter_finish"; + +connection default; +reap; +connection con1; +commit; +connection default; +disconnect con1; +disconnect con2; +--source ../../innodb/include/wait_all_purged.inc +drop table t1; +SET DEBUG_SYNC=reset; diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index f4db252b069..92403fa739d 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -615,6 +615,8 @@ row_purge_del_mark( const auto type= node->index->type; if (type & (DICT_FTS | DICT_CORRUPT)) continue; + if (node->index->online_status > ONLINE_INDEX_CREATION) + continue; if (UNIV_UNLIKELY(DICT_VIRTUAL & type) && !node->index->is_committed() && node->index->has_new_v_col()) continue; @@ -767,6 +769,11 @@ row_purge_upd_exist_or_extern_func( continue; } + if (node->index->online_status + > ONLINE_INDEX_CREATION) { + continue; + } + if (row_upd_changes_ord_field_binary(node->index, node->update, thr, NULL, NULL)) { /* Build the older version of the index entry */ From 3aea77edeb472753da3c8a57f39fa70c7f960847 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 1 Jun 2023 09:41:17 +0300 Subject: [PATCH 60/76] MDEV-31347 fil_ibd_create() may hijack the file handle of an old file fil_space_t::add(): If a file handle was passed, invoke fil_node_t::find_metadata() before releasing fil_system.mutex. The call was moved from fil_ibd_create(). This is a 10.5 version of commit e3b06156c6ecd5d3fd4376ee025df1ab45311a6d from 10.6. --- storage/innobase/fil/fil0fil.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 58261d27d8e..c75144413ac 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -350,6 +350,7 @@ fil_node_t* fil_space_t::add(const char* name, pfs_os_file_t handle, this->size += size; UT_LIST_ADD_LAST(chain, node); if (node->is_open()) { + node->find_metadata(node->handle); n_pending.fetch_and(~CLOSING, std::memory_order_relaxed); if (++fil_system.n_open >= srv_max_n_open_files) { reacquire(); @@ -2433,7 +2434,6 @@ err_exit: mtr.log_file_op(FILE_CREATE, space_id, node->name); mtr.commit(); - node->find_metadata(file); *err = DB_SUCCESS; return space; } From bb9da13baf5e5a4a435408fc05fd46253a00ea69 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 1 Jun 2023 12:11:18 +0300 Subject: [PATCH 61/76] MDEV-31373 innodb_undo_log_truncate=ON recovery results in a corrupted undo log recv_sys_t::apply(): When applying an undo log truncation operation, invoke os_file_truncate() on space->recv_size, which must not be less than the original truncated file size. Alternatively, as pointed out by Thirunarayanan Balathandayuthapani, we could assign space->size = t.pages, so that fil_system_t::extend_to_recv_size() would extend the file back to space->recv_size. --- storage/innobase/log/log0recv.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 86e7f43015c..16022c8dd0e 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -2690,10 +2690,12 @@ void recv_sys_t::apply(bool last_batch) if (fil_space_t *space = fil_space_get(id + srv_undo_space_id_start)) { ut_ad(UT_LIST_GET_LEN(space->chain) == 1); + ut_ad(space->recv_size >= t.pages); fil_node_t *file= UT_LIST_GET_FIRST(space->chain); ut_ad(file->is_open()); os_file_truncate(file->name, file->handle, - os_offset_t{t.pages} << srv_page_size_shift, true); + os_offset_t{space->recv_size} << + srv_page_size_shift, true); } } } From 8a86df37ef024c85d8a049b237fd331ac09c2683 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 2 Jun 2023 10:44:34 +0300 Subject: [PATCH 62/76] MDEV-31088 Server freeze due to innodb_change_buffering A 3-thread deadlock has been frequently observed when using innodb_change_buffering!=none and innodb_file_per_table=0: (1) ibuf_merge_or_delete_for_page() holding an exclusive latch on the block and waiting for an exclusive tablespace latch in fseg_page_is_allocated() (2) btr_free_but_not_root() in fseg_free_step() waiting for an exclusive tablespace latch (3) fsp_alloc_free_page() holding the exclusive tablespace latch and waiting for a latch on the block, which it is reallocating for something else While this was reproduced using innodb_file_per_table=0, this hang should be theoretically possible in .ibd files as well, when the recovery or cleanup of a failed DROP INDEX or ADD INDEX is executing concurrently with something that involves page allocation. ibuf_merge_or_delete_for_page(): Avoid invoking fseg_page_is_allocated() when block==nullptr. The call was redundant in this case, and it could cause deadlocks due to latching order violation. ibuf_read_merge_pages(): Acquire an exclusive tablespace latch before invoking buf_page_get_gen(), which may cause fseg_page_is_allocated() to be invoked in ibuf_merge_or_delete_for_page(). Note: This will not fix all latching order violations in this area! Deadlocks involving ibuf_merge_or_delete_for_page(block!=nullptr) are still possible if the caller is not acquiring an exclusive tablespace latch upfront. This would be the case in any read operation that involves a change buffer merge, such as SELECT, CHECK TABLE, or any DML operation that cannot be buffered in the change buffer. --- storage/innobase/ibuf/ibuf0ibuf.cc | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/storage/innobase/ibuf/ibuf0ibuf.cc b/storage/innobase/ibuf/ibuf0ibuf.cc index a176f5d1bea..d1d6176720a 100644 --- a/storage/innobase/ibuf/ibuf0ibuf.cc +++ b/storage/innobase/ibuf/ibuf0ibuf.cc @@ -2363,6 +2363,7 @@ tablespace_deleted: } const ulint zip_size = s->zip_size(), size = s->size; + s->x_lock(); s->release(); mtr_t mtr; @@ -2380,13 +2381,17 @@ tablespace_deleted: || !page_is_leaf(block->page.frame); mtr.commit(); if (err == DB_TABLESPACE_DELETED) { + s->x_unlock(); goto tablespace_deleted; } if (!remove) { + s->x_unlock(); continue; } } + s->x_unlock(); + if (srv_shutdown_state == SRV_SHUTDOWN_NONE || srv_fast_shutdown) { continue; @@ -2415,7 +2420,7 @@ tablespace_deleted: /* Prevent an infinite loop, by removing entries from the change buffer in the case the bitmap bits were wrongly clear even though buffered changes exist. */ - ibuf_delete_recs(page_id_t(space_ids[i], page_nos[i])); + ibuf_delete_recs(page_id_t(space_id, page_nos[i])); } } @@ -4193,25 +4198,26 @@ dberr_t ibuf_merge_or_delete_for_page(buf_block_t *block, ibuf_mtr_commit(&mtr); - if (bitmap_bits - && DB_SUCCESS + if (!bitmap_bits) { + done: + /* No changes are buffered for this page. */ + space->release(); + return DB_SUCCESS; + } + + if (!block + || DB_SUCCESS == fseg_page_is_allocated(space, page_id.page_no())) { ibuf_mtr_start(&mtr); mtr.set_named_space(space); ibuf_reset_bitmap(block, page_id, zip_size, &mtr); ibuf_mtr_commit(&mtr); - bitmap_bits = 0; if (!block || btr_page_get_index_id(block->page.frame) != DICT_IBUF_ID_MIN + IBUF_SPACE_ID) { ibuf_delete_recs(page_id); } - } - - if (!bitmap_bits) { - /* No changes are buffered for this page. */ - space->release(); - return DB_SUCCESS; + goto done; } } From f569e06e03a7efa6050258a8d167de0aaa4e124c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 2 Jun 2023 11:06:09 +0300 Subject: [PATCH 63/76] MDEV-31385 Change buffer stale entries leads to corruption while reusing page buf_page_free(): If buffered changes existed for the page, drop them. Co-developed with Thirunarayanan Balathandayuthapani --- storage/innobase/buf/buf0buf.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 693826917c9..90886173b1b 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -2151,6 +2151,8 @@ void buf_page_free(fil_space_t *space, uint32_t page, mtr_t *mtr) } block->page.lock.x_lock(); + if (block->page.is_ibuf_exist()) + ibuf_merge_or_delete_for_page(nullptr, page_id, block->page.zip_size()); #ifdef BTR_CUR_HASH_ADAPT if (block->index) btr_search_drop_page_hash_index(block, false); From a0e7bd735b5bc1fd458766c2005d5ac349682d9f Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 1 Jun 2023 14:06:06 +0300 Subject: [PATCH 64/76] MDEV-31380: Assertion `s->table->opt_range_condition_rows <= s->found_records' failed LooseScan code set opt_range_condition_rows to be the MIN(loose_scan_plan->records, table->records) totally ignoring possible quick range selects. If there was a quick select $QUICK on another index with $QUICK->records < loose_scan_plan->records this would create a situation where opt_range_condition_rows > $QUICK->records which causes an assert in 10.6+ and potentially wrong query plan choice in 10.5. Fixed by making opt_range_condition_rows to be the minimum #rows of any quick select. Approved-by: Monty --- mysql-test/main/group_min_max.result | 24 ++++++++++++++++++++++++ mysql-test/main/group_min_max.test | 25 +++++++++++++++++++++++++ sql/opt_range.cc | 4 ++-- 3 files changed, 51 insertions(+), 2 deletions(-) diff --git a/mysql-test/main/group_min_max.result b/mysql-test/main/group_min_max.result index 712466c8afb..0df9abc7a25 100644 --- a/mysql-test/main/group_min_max.result +++ b/mysql-test/main/group_min_max.result @@ -4204,6 +4204,30 @@ a b s1 2 2 t2:t2a-null;min_t3_b:t3b-null 3 3 t2:1;min_t3_b:3 drop table t1,t2,t3; +# +# MDEV-31380: Assertion `s->table->opt_range_condition_rows <= s->found_records' failed +# (assertion in 10.6+, DBL_MAX costs in 10.5) +# +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY(a), KEY(b)) ENGINE=InnoDB; +INSERT INTO t1 SELECT seq, seq FROM seq_1_to_100; +SET +@tmp=@@optimizer_use_condition_selectivity, +optimizer_use_condition_selectivity = 1, +@tmp2=@@optimizer_trace, +optimizer_trace=1; +SELECT DISTINCT * FROM t1 WHERE a IN (1, 2); +a b +1 1 +2 2 +select +CAST(json_value(json_extract(trace, '$**.chosen_access_method.cost'), '$[0]') +as DOUBLE) < 1.0e100 +from information_schema.optimizer_trace; +CAST(json_value(json_extract(trace, '$**.chosen_access_method.cost'), '$[0]') +as DOUBLE) < 1.0e100 +1 +set optimizer_use_condition_selectivity = @tmp, optimizer_trace=@tmp2; +drop table t1; # # End of 10.5 tests # diff --git a/mysql-test/main/group_min_max.test b/mysql-test/main/group_min_max.test index 1fc2be6231a..482235571db 100644 --- a/mysql-test/main/group_min_max.test +++ b/mysql-test/main/group_min_max.test @@ -6,6 +6,7 @@ --source include/no_valgrind_without_big.inc --source include/default_optimizer_switch.inc --source include/have_innodb.inc +--source include/have_sequence.inc # # TODO: # Add queries with: @@ -1858,6 +1859,30 @@ from t1; drop table t1,t2,t3; +--echo # +--echo # MDEV-31380: Assertion `s->table->opt_range_condition_rows <= s->found_records' failed +--echo # (assertion in 10.6+, DBL_MAX costs in 10.5) +--echo # + +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY(a), KEY(b)) ENGINE=InnoDB; +INSERT INTO t1 SELECT seq, seq FROM seq_1_to_100; + +SET + @tmp=@@optimizer_use_condition_selectivity, + optimizer_use_condition_selectivity = 1, + @tmp2=@@optimizer_trace, + optimizer_trace=1; + +SELECT DISTINCT * FROM t1 WHERE a IN (1, 2); + +select + CAST(json_value(json_extract(trace, '$**.chosen_access_method.cost'), '$[0]') + as DOUBLE) < 1.0e100 +from information_schema.optimizer_trace; + +set optimizer_use_condition_selectivity = @tmp, optimizer_trace=@tmp2; +drop table t1; + --echo # --echo # End of 10.5 tests --echo # diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 7b6f373eea4..905d5aa5ef9 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -3030,8 +3030,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use, restore_nonrange_trees(¶m, tree, backup_keys); if ((group_trp= get_best_group_min_max(¶m, tree, read_time))) { - param.table->opt_range_condition_rows= MY_MIN(group_trp->records, - head->stat_records()); + set_if_smaller(param.table->opt_range_condition_rows, + group_trp->records); Json_writer_object grp_summary(thd, "best_group_range_summary"); if (unlikely(thd->trace_started())) From 91367e82f10fde14ad2eb42928626cabcc0d49cd Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 7 Jun 2023 08:10:48 -0400 Subject: [PATCH 65/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 2806542e3ac..e1357a38a48 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=5 -MYSQL_VERSION_PATCH=21 +MYSQL_VERSION_PATCH=22 SERVER_MATURITY=stable From a77939ec8ce65b88e2d256b54ac6d72d255a245b Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 7 Jun 2023 08:12:43 -0400 Subject: [PATCH 66/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index b7bb36d6024..44b9653f38a 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=6 -MYSQL_VERSION_PATCH=14 +MYSQL_VERSION_PATCH=15 SERVER_MATURITY=stable From 7b3538051c25c6107d0a58c0d542cfa3031dc955 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 7 Jun 2023 08:13:53 -0400 Subject: [PATCH 67/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 47b4b206e8d..a84254dffc7 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=9 -MYSQL_VERSION_PATCH=7 +MYSQL_VERSION_PATCH=8 SERVER_MATURITY=stable From 68b52ae31276206eb36e469bbe526f84b712060f Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 7 Jun 2023 08:15:16 -0400 Subject: [PATCH 68/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 2c5ca290588..281e3f49dcf 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=10 -MYSQL_VERSION_PATCH=5 +MYSQL_VERSION_PATCH=6 SERVER_MATURITY=stable From a2aaf26aaaf2257c1eed9af54b26ee0c816110d2 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 7 Jun 2023 08:16:38 -0400 Subject: [PATCH 69/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index cd377cb282f..edb88571b8d 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=11 -MYSQL_VERSION_PATCH=4 +MYSQL_VERSION_PATCH=5 SERVER_MATURITY=stable From d6335d42324b0263ba650b72b957ff8c679cc581 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 7 Jun 2023 08:15:16 -0400 Subject: [PATCH 70/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 2c5ca290588..281e3f49dcf 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=10 -MYSQL_VERSION_PATCH=5 +MYSQL_VERSION_PATCH=6 SERVER_MATURITY=stable From 5b0fe277832b8ae1729051f7df8a6d19574f43e9 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Wed, 7 Jun 2023 08:16:38 -0400 Subject: [PATCH 71/76] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index cd377cb282f..edb88571b8d 100644 --- a/VERSION +++ b/VERSION @@ -1,4 +1,4 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=11 -MYSQL_VERSION_PATCH=4 +MYSQL_VERSION_PATCH=5 SERVER_MATURITY=stable From 3e40f9a7f3bbe82d96c8acccbb017deebfa00647 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 8 Jun 2023 09:17:52 +0300 Subject: [PATCH 72/76] MDEV-31355 innodb_undo_log_truncate=ON fails to wait for purge of enough transaction history purge_sys_t::sees(): Wrapper for view.sees(). trx_purge_truncate_history(): Invoke purge_sys.sees() instead of comparing to head.trx_no, to determine if undo pages can be safely freed. The test innodb.cursor-restore-locking was adjusted by Vladislav Lesin, as was the the debug instrumentation in row_purge_del_mark(). Reviewed by: Vladislav Lesin --- .../gcol/r/innodb_virtual_debug_purge.result | 2 +- .../gcol/t/innodb_virtual_debug_purge.test | 2 ++ .../innodb/r/cursor-restore-locking.result | 13 ++++--- mysql-test/suite/innodb/r/dml_purge.result | 2 +- .../r/instant_alter_debug,dynamic.rdiff | 6 ---- .../suite/innodb/r/instant_alter_debug.result | 3 +- .../suite/innodb/r/instant_alter_purge.result | 3 +- .../innodb/t/cursor-restore-locking.test | 35 ++++++++++--------- mysql-test/suite/innodb/t/dml_purge.test | 2 +- .../suite/innodb/t/instant_alter_debug.test | 3 ++ .../suite/innodb/t/instant_alter_purge.test | 4 +-- storage/innobase/include/trx0purge.h | 12 +++++++ storage/innobase/row/row0purge.cc | 14 +++++++- storage/innobase/trx/trx0purge.cc | 4 +-- 14 files changed, 68 insertions(+), 37 deletions(-) delete mode 100644 mysql-test/suite/innodb/r/instant_alter_debug,dynamic.rdiff diff --git a/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result b/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result index 4a204532630..2820aa7cdbb 100644 --- a/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result +++ b/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result @@ -76,7 +76,7 @@ SET DEBUG_SYNC= 'now WAIT_FOR uncommitted'; # enable purge COMMIT; # wait for purge to process the deleted records. -InnoDB 0 transactions not purged +InnoDB 1 transactions not purged SET DEBUG_SYNC= 'now SIGNAL purged'; connection default; /* connection default */ ALTER TABLE t1 ADD COLUMN c INT GENERATED ALWAYS AS(a+b), ADD INDEX idx (c), ALGORITHM=INPLACE, LOCK=SHARED; diff --git a/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test b/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test index ca60ed84a98..c8f0cc4c414 100644 --- a/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test +++ b/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test @@ -111,7 +111,9 @@ SET DEBUG_SYNC= 'now WAIT_FOR uncommitted'; COMMIT; --echo # wait for purge to process the deleted records. +let $wait_all_purged = 1; --source ../../innodb/include/wait_all_purged.inc +let $wait_all_purged = 0; SET DEBUG_SYNC= 'now SIGNAL purged'; diff --git a/mysql-test/suite/innodb/r/cursor-restore-locking.result b/mysql-test/suite/innodb/r/cursor-restore-locking.result index fc56f0935fa..a792babe5cd 100644 --- a/mysql-test/suite/innodb/r/cursor-restore-locking.result +++ b/mysql-test/suite/innodb/r/cursor-restore-locking.result @@ -1,31 +1,34 @@ SET @save_freq=@@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; -CREATE TABLE t (a int PRIMARY KEY, b int NOT NULL UNIQUE) engine = InnoDB; +CREATE TABLE t (a int PRIMARY KEY, b int NOT NULL UNIQUE) engine = InnoDB, STATS_PERSISTENT=0; InnoDB 0 transactions not purged connect prevent_purge,localhost,root,,; start transaction with consistent snapshot; connect con_del_1,localhost,root,,; INSERT INTO t VALUES (20,20); SET DEBUG_SYNC = 'innodb_row_search_for_mysql_exit SIGNAL first_del_row_search_mvcc_finished WAIT_FOR first_del_cont'; -DELETE FROM t WHERE b = 20; +DELETE FROM t WHERE b = 20 # trx_1; connect con_ins_1,localhost,root,,; SET DEBUG_SYNC = 'now WAIT_FOR first_del_row_search_mvcc_finished'; SET DEBUG_SYNC = 'lock_wait_suspend_thread_enter SIGNAL first_ins_locked'; SET DEBUG_SYNC = 'ib_after_row_insert SIGNAL first_ins_row_inserted WAIT_FOR first_ins_cont'; -INSERT INTO t VALUES(10, 20); +INSERT INTO t VALUES(10, 20) # trx_2; connect con_del_2,localhost,root,,; SET TRANSACTION ISOLATION LEVEL READ COMMITTED; SET DEBUG_SYNC = 'now WAIT_FOR first_ins_locked'; SET DEBUG_SYNC = 'lock_wait_suspend_thread_enter SIGNAL second_del_locked'; -DELETE FROM t WHERE b = 20; +DELETE FROM t WHERE b = 20 # trx_3; connection default; SET DEBUG_SYNC = 'now WAIT_FOR second_del_locked'; +SET @saved_dbug = @@GLOBAL.debug_dbug; +SET @@GLOBAL.debug_dbug="d,enable_row_purge_del_mark_exit_sync_point"; SET DEBUG_SYNC = 'now SIGNAL first_del_cont'; SET DEBUG_SYNC = 'now WAIT_FOR first_ins_row_inserted'; connection con_del_1; connection default; disconnect prevent_purge; -InnoDB 0 transactions not purged +SET DEBUG_SYNC = 'now WAIT_FOR row_purge_del_mark_finished'; +SET @@GLOBAL.debug_dbug = @saved_dbug; SET DEBUG_SYNC = 'now SIGNAL first_ins_cont'; connection con_del_2; connection con_ins_1; diff --git a/mysql-test/suite/innodb/r/dml_purge.result b/mysql-test/suite/innodb/r/dml_purge.result index 38273d571c0..75a5f0fec6c 100644 --- a/mysql-test/suite/innodb/r/dml_purge.result +++ b/mysql-test/suite/innodb/r/dml_purge.result @@ -19,7 +19,7 @@ BEGIN; UPDATE t1 SET b=4 WHERE a=3; disconnect prevent_purge; connection default; -InnoDB 0 transactions not purged +SET GLOBAL innodb_max_purge_lag_wait=1; connection con1; ROLLBACK; disconnect con1; diff --git a/mysql-test/suite/innodb/r/instant_alter_debug,dynamic.rdiff b/mysql-test/suite/innodb/r/instant_alter_debug,dynamic.rdiff deleted file mode 100644 index 379514edad9..00000000000 --- a/mysql-test/suite/innodb/r/instant_alter_debug,dynamic.rdiff +++ /dev/null @@ -1,6 +0,0 @@ -@@ -470,4 +470,4 @@ - FROM information_schema.global_status - WHERE variable_name = 'innodb_instant_alter_column'; - instants --33 -+32 diff --git a/mysql-test/suite/innodb/r/instant_alter_debug.result b/mysql-test/suite/innodb/r/instant_alter_debug.result index 82230573c44..39846a0e329 100644 --- a/mysql-test/suite/innodb/r/instant_alter_debug.result +++ b/mysql-test/suite/innodb/r/instant_alter_debug.result @@ -391,11 +391,12 @@ connection stop_purge; COMMIT; disconnect stop_purge; connection default; -InnoDB 0 transactions not purged +InnoDB 1 transactions not purged SET DEBUG_SYNC='now SIGNAL s2'; connection dml; disconnect dml; connection default; +InnoDB 0 transactions not purged SET DEBUG_SYNC=RESET; DROP TABLE t1; # End of 10.3 tests diff --git a/mysql-test/suite/innodb/r/instant_alter_purge.result b/mysql-test/suite/innodb/r/instant_alter_purge.result index 1179ff62ecc..261356bad12 100644 --- a/mysql-test/suite/innodb/r/instant_alter_purge.result +++ b/mysql-test/suite/innodb/r/instant_alter_purge.result @@ -1,5 +1,6 @@ SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; +InnoDB 0 transactions not purged # # MDEV-17793 Crash in purge after instant DROP and emptying the table # @@ -16,7 +17,7 @@ COMMIT; START TRANSACTION WITH CONSISTENT SNAPSHOT; connection default; ALTER TABLE t1 ADD COLUMN extra TINYINT UNSIGNED NOT NULL DEFAULT 42; -InnoDB 1 transactions not purged +SET GLOBAL innodb_max_purge_lag_wait=1; ALTER TABLE t1 DROP extra; disconnect prevent_purge; InnoDB 0 transactions not purged diff --git a/mysql-test/suite/innodb/t/cursor-restore-locking.test b/mysql-test/suite/innodb/t/cursor-restore-locking.test index 0f083f9295b..815542c32db 100644 --- a/mysql-test/suite/innodb/t/cursor-restore-locking.test +++ b/mysql-test/suite/innodb/t/cursor-restore-locking.test @@ -5,7 +5,7 @@ source include/have_debug_sync.inc; SET @save_freq=@@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; -CREATE TABLE t (a int PRIMARY KEY, b int NOT NULL UNIQUE) engine = InnoDB; +CREATE TABLE t (a int PRIMARY KEY, b int NOT NULL UNIQUE) engine = InnoDB, STATS_PERSISTENT=0; --source include/wait_all_purged.inc --connect(prevent_purge,localhost,root,,) @@ -14,20 +14,20 @@ start transaction with consistent snapshot; --connect(con_del_1,localhost,root,,) INSERT INTO t VALUES (20,20); SET DEBUG_SYNC = 'innodb_row_search_for_mysql_exit SIGNAL first_del_row_search_mvcc_finished WAIT_FOR first_del_cont'; ---send DELETE FROM t WHERE b = 20 +--send DELETE FROM t WHERE b = 20 # trx_1 --connect(con_ins_1,localhost,root,,) SET DEBUG_SYNC = 'now WAIT_FOR first_del_row_search_mvcc_finished'; # It's supposed the following INSERT will be suspended just after # lock_wait_suspend_thread_enter syncpoint, and will be awaken -# after the previous DELETE commits. ib_after_row_insert will be executed -# after the INSERT is woken up. The previous DELETE will wait for +# after trx_1 DELETE commits. ib_after_row_insert will be executed +# after the trx_2 INSERT is woken up. The trx_1 DELETE will wait for # first_del_cont signal before commit, and this signal will be sent later. # So it's safe to use two signals in a row here, it's guaranted the first # signal will be received before the second signal is sent. SET DEBUG_SYNC = 'lock_wait_suspend_thread_enter SIGNAL first_ins_locked'; SET DEBUG_SYNC = 'ib_after_row_insert SIGNAL first_ins_row_inserted WAIT_FOR first_ins_cont'; ---send INSERT INTO t VALUES(10, 20) +--send INSERT INTO t VALUES(10, 20) # trx_2 --connect(con_del_2,localhost,root,,) # After MDEV-30225 is fixed, the following DELETE creates next-key lock for @@ -36,24 +36,26 @@ SET DEBUG_SYNC = 'ib_after_row_insert SIGNAL first_ins_row_inserted WAIT_FOR fir SET TRANSACTION ISOLATION LEVEL READ COMMITTED; SET DEBUG_SYNC = 'now WAIT_FOR first_ins_locked'; SET DEBUG_SYNC = 'lock_wait_suspend_thread_enter SIGNAL second_del_locked'; -############################################################################### -# This DELETE is locked by the previous DELETE, after that DELETE is -# committed, it will still be locked by the next INSERT on delete-marked -# heap_no 2 record. After that INSERT inserted the record with heap_no 3, -# and after heap_no 2 record is purged, this DELETE will be unlocked and +############################################################################## +# trx_3 DELETE is locked by trx_1 DELETE, after trx_1 DELETE is +# committed, it will still be locked by trx_2 INSERT on delete-marked +# heap_no 2 record. After trx_2 INSERT inserted the record with heap_no 3, +# and after heap_no 2 record is purged, trx_3 DELETE will be unlocked and # must restore persistent cursor position at heap_no 3 record, as it has the # same secondary key value as former heap_no 2 record. Then it must be blocked -# by the previous INSERT, and after the INSERT is committed, it must -# delete the record, inserted by the previous INSERT, and the last INSERT(see +# by trx_2 INSERT, and after trx_2 INSERT is committed, it must +# delete the record, inserted by trx_2 INSERT, and trx_4 INSERT(see # below) must be finished without error. But instead this DELETE restores # persistent cursor position to supremum, as a result, it does not delete the -# record, inserted by the previous INSERT, and the last INSERT is finished with +# record, inserted by trx_2 INSERT, and trx_4 INSERT is finished with # duplicate key check error. ############################################################################### ---send DELETE FROM t WHERE b = 20 +--send DELETE FROM t WHERE b = 20 # trx_3 --connection default SET DEBUG_SYNC = 'now WAIT_FOR second_del_locked'; +SET @saved_dbug = @@GLOBAL.debug_dbug; +SET @@GLOBAL.debug_dbug="d,enable_row_purge_del_mark_exit_sync_point"; SET DEBUG_SYNC = 'now SIGNAL first_del_cont'; SET DEBUG_SYNC = 'now WAIT_FOR first_ins_row_inserted'; --connection con_del_1 @@ -61,7 +63,8 @@ SET DEBUG_SYNC = 'now WAIT_FOR first_ins_row_inserted'; --connection default --disconnect prevent_purge ---source include/wait_all_purged.inc +SET DEBUG_SYNC = 'now WAIT_FOR row_purge_del_mark_finished'; +SET @@GLOBAL.debug_dbug = @saved_dbug; SET DEBUG_SYNC = 'now SIGNAL first_ins_cont'; --connection con_del_2 @@ -74,7 +77,7 @@ SET DEBUG_SYNC = 'now SIGNAL first_ins_cont'; ############################################################################### # Duplicate key error is expected if the bug is not fixed. ############################################################################### -INSERT INTO t VALUES(30, 20); +INSERT INTO t VALUES(30, 20); # trx_4 --disconnect con_ins_1 --disconnect con_del_1 diff --git a/mysql-test/suite/innodb/t/dml_purge.test b/mysql-test/suite/innodb/t/dml_purge.test index 7034939aa4e..c13ff22572b 100644 --- a/mysql-test/suite/innodb/t/dml_purge.test +++ b/mysql-test/suite/innodb/t/dml_purge.test @@ -32,7 +32,7 @@ UPDATE t1 SET b=4 WHERE a=3; --connection default # Initiate a full purge, which should reset the DB_TRX_ID except for a=3. ---source include/wait_all_purged.inc +SET GLOBAL innodb_max_purge_lag_wait=1; # Initiate a ROLLBACK of the update, which should reset the DB_TRX_ID for a=3. --connection con1 ROLLBACK; diff --git a/mysql-test/suite/innodb/t/instant_alter_debug.test b/mysql-test/suite/innodb/t/instant_alter_debug.test index f102185c27f..c49ab758f24 100644 --- a/mysql-test/suite/innodb/t/instant_alter_debug.test +++ b/mysql-test/suite/innodb/t/instant_alter_debug.test @@ -451,7 +451,9 @@ COMMIT; disconnect stop_purge; connection default; +let $wait_all_purged = 1; --source include/wait_all_purged.inc +let $wait_all_purged = 0; SET DEBUG_SYNC='now SIGNAL s2'; connection dml; @@ -459,6 +461,7 @@ reap; disconnect dml; connection default; +--source include/wait_all_purged.inc SET DEBUG_SYNC=RESET; DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/instant_alter_purge.test b/mysql-test/suite/innodb/t/instant_alter_purge.test index 9ccf3347d7b..88a56141a1f 100644 --- a/mysql-test/suite/innodb/t/instant_alter_purge.test +++ b/mysql-test/suite/innodb/t/instant_alter_purge.test @@ -6,6 +6,7 @@ if ($have_debug) { SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; +--source include/wait_all_purged.inc --echo # --echo # MDEV-17793 Crash in purge after instant DROP and emptying the table @@ -27,8 +28,7 @@ START TRANSACTION WITH CONSISTENT SNAPSHOT; connection default; ALTER TABLE t1 ADD COLUMN extra TINYINT UNSIGNED NOT NULL DEFAULT 42; -let $wait_all_purged= 1; ---source include/wait_all_purged.inc +SET GLOBAL innodb_max_purge_lag_wait=1; ALTER TABLE t1 DROP extra; disconnect prevent_purge; let $wait_all_purged= 0; diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h index 14cf6a2958b..b8d349af2b0 100644 --- a/storage/innobase/include/trx0purge.h +++ b/storage/innobase/include/trx0purge.h @@ -253,6 +253,18 @@ public: #endif return view.low_limit_no(); } + /** A wrapper around ReadView::sees(). */ + trx_id_t sees(trx_id_t id) const + { + /* This function may only be called by purge_coordinator_callback(). + + The purge coordinator task may call this without holding any latch, + because it is the only thread that may modify purge_sys.view. + + Any other threads that access purge_sys.view must hold purge_sys.latch, + typically via purge_sys_t::view_guard. */ + return view.sees(id); + } /** A wrapper around trx_sys_t::clone_oldest_view(). */ void clone_oldest_view() { diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index 74bbc61df52..6942f5b7af8 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -46,6 +46,7 @@ Created 3/14/1997 Heikki Tuuri #include "handler.h" #include "ha_innodb.h" #include "fil0fil.h" +#include "debug_sync.h" /************************************************************************* IMPORTANT NOTE: Any operation that generates redo MUST check that there @@ -646,7 +647,18 @@ row_purge_del_mark( mem_heap_free(heap); - return(row_purge_remove_clust_if_poss(node)); + bool result = row_purge_remove_clust_if_poss(node); + +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF( + "enable_row_purge_del_mark_exit_sync_point", + debug_sync_set_action( + current_thd, + STRING_WITH_LEN( + "now SIGNAL row_purge_del_mark_finished"));); +#endif + + return result; } /** Reset DB_TRX_ID, DB_ROLL_PTR of a clustered index record diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 2f21a4de1e6..7140b2c8816 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -547,7 +547,7 @@ static void trx_purge_truncate_history() mutex_enter(&rseg->mutex); trx_purge_truncate_rseg_history(*rseg, head, !rseg->trx_ref_count && - rseg->needs_purge <= head.trx_no); + purge_sys.sees(rseg->needs_purge)); mutex_exit(&rseg->mutex); } } @@ -604,7 +604,7 @@ static void trx_purge_truncate_history() transactions to finish and to be purged. */ rseg->skip_allocation = true; - if (rseg->trx_ref_count || rseg->needs_purge > head.trx_no) + if (rseg->trx_ref_count || !purge_sys.sees(rseg->needs_purge)) { not_free: mutex_exit(&rseg->mutex); From c25b4967244b1ed7fefbc11ba7e069f5d56daed2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 8 Jun 2023 09:18:21 +0300 Subject: [PATCH 73/76] MDEV-31382 SET GLOBAL innodb_undo_log_truncate=ON has no effect on logically empty undo logs innodb_undo_log_truncate_update(): A callback function. If SET GLOBAL innodb_undo_log_truncate=ON, invoke srv_wake_purge_thread_if_not_active(). srv_wake_purge_thread_if_not_active(): If innodb_undo_log_truncate=ON, always wake up the purge subsystem. srv_do_purge(): If the history is empty, invoke trx_purge_truncate_history() in order to free undo log pages. trx_purge_truncate_history(): If head.trx_no==0, consider the cached undo logs to be free. trx_purge(): Remove the parameter "bool truncate" and let the caller invoke trx_purge_truncate_history() directly. Reviewed by: Vladislav Lesin --- storage/innobase/handler/ha_innodb.cc | 9 ++++++- storage/innobase/include/trx0purge.h | 11 ++++++-- storage/innobase/srv/srv0srv.cc | 37 ++++++++++++++------------- storage/innobase/trx/trx0purge.cc | 18 +++++-------- 4 files changed, 43 insertions(+), 32 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 98385e3b5dd..20200515060 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -19684,10 +19684,17 @@ static MYSQL_SYSVAR_ULONG(purge_rseg_truncate_frequency, " purge rollback segment(s) on every Nth iteration of purge invocation", NULL, NULL, 128, 1, 128, 0); +static void innodb_undo_log_truncate_update(THD *thd, struct st_mysql_sys_var*, + void*, const void *save) +{ + if ((srv_undo_log_truncate= *static_cast(save))) + srv_wake_purge_thread_if_not_active(); +} + static MYSQL_SYSVAR_BOOL(undo_log_truncate, srv_undo_log_truncate, PLUGIN_VAR_OPCMDARG, "Enable or Disable Truncate of UNDO tablespace.", - NULL, NULL, FALSE); + NULL, innodb_undo_log_truncate_update, FALSE); static MYSQL_SYSVAR_LONG(autoinc_lock_mode, innobase_autoinc_lock_mode, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h index b8d349af2b0..b693f784ffb 100644 --- a/storage/innobase/include/trx0purge.h +++ b/storage/innobase/include/trx0purge.h @@ -43,12 +43,19 @@ Remove the undo log segment from the rseg slot if it is too big for reuse. @param[in,out] mtr mini-transaction */ void trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr); + +/** +Remove unnecessary history data from rollback segments. NOTE that when this +function is called, the caller (purge_coordinator_callback) +must not have any latches on undo log pages! +*/ +void trx_purge_truncate_history(); + /** Run a purge batch. @param n_tasks number of purge tasks to submit to the queue -@param truncate whether to truncate the history at the end of the batch @return number of undo log pages handled in the batch */ -ulint trx_purge(ulint n_tasks, bool truncate); +ulint trx_purge(ulint n_tasks); /** Rollback segements from a given transaction with trx-no scheduled for purge. */ diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 50569f810ea..c3e1f6084a8 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1346,17 +1346,14 @@ static tpool::waitable_task purge_coordinator_task static tpool::timer *purge_coordinator_timer; /** Wake up the purge threads if there is work to do. */ -void -srv_wake_purge_thread_if_not_active() +void srv_wake_purge_thread_if_not_active() { - ut_ad(!srv_read_only_mode); + ut_ad(!srv_read_only_mode); - if (purge_sys.enabled() && !purge_sys.paused() - && trx_sys.rseg_history_len) { - if(++purge_state.m_running == 1) { - srv_thread_pool->submit_task(&purge_coordinator_task); - } - } + if (purge_sys.enabled() && !purge_sys.paused() && + (srv_undo_log_truncate || trx_sys.rseg_history_len) && + ++purge_state.m_running == 1) + srv_thread_pool->submit_task(&purge_coordinator_task); } /** @return whether the purge tasks are active */ @@ -1811,8 +1808,8 @@ static size_t srv_do_purge(ulint* n_total_purged) n_threads = n_use_threads = srv_n_purge_threads; srv_purge_thread_count_changed = 0; } else if (trx_sys.rseg_history_len > rseg_history_len - || (srv_max_purge_lag > 0 - && rseg_history_len > srv_max_purge_lag)) { + || (srv_max_purge_lag > 0 + && rseg_history_len > srv_max_purge_lag)) { /* History length is now longer than what it was when we took the last snapshot. Use more threads. */ @@ -1838,15 +1835,19 @@ static size_t srv_do_purge(ulint* n_total_purged) /* Take a snapshot of the history list before purge. */ if (!(rseg_history_len = trx_sys.rseg_history_len)) { - break; + n_pages_purged = 0; + goto truncate; } - n_pages_purged = trx_purge( - n_use_threads, - !(++count % srv_purge_rseg_truncate_frequency) - || purge_sys.truncate.current - || (srv_shutdown_state != SRV_SHUTDOWN_NONE - && srv_fast_shutdown == 0)); + n_pages_purged = trx_purge(n_use_threads); + + if (!(++count % srv_purge_rseg_truncate_frequency) + || purge_sys.truncate.current + || (srv_shutdown_state != SRV_SHUTDOWN_NONE + && srv_fast_shutdown == 0)) { +truncate: + trx_purge_truncate_history(); + } *n_total_purged += n_pages_purged; } while (n_pages_purged > 0 && !purge_sys.paused() diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index 7140b2c8816..c6adaf5f2bf 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -522,10 +522,11 @@ __attribute__((optimize(0))) # endif #endif /** -Removes unnecessary history data from rollback segments. NOTE that when this -function is called, the caller must not have any latches on undo log pages! +Remove unnecessary history data from rollback segments. NOTE that when this +function is called, the caller (purge_coordinator_callback) +must not have any latches on undo log pages! */ -static void trx_purge_truncate_history() +void trx_purge_truncate_history() { ut_ad(purge_sys.head <= purge_sys.tail); purge_sys_t::iterator &head= purge_sys.head.trx_no @@ -618,7 +619,7 @@ static void trx_purge_truncate_history() for (const trx_undo_t *undo= UT_LIST_GET_FIRST(rseg->undo_cached); undo; undo= UT_LIST_GET_NEXT(undo_list, undo)) { - if (head.trx_no < undo->trx_id) + if (head.trx_no && head.trx_no < undo->trx_id) goto not_free; else cached+= undo->size; @@ -731,7 +732,7 @@ static void trx_purge_truncate_history() ut_ad(rseg->id == i); ut_ad(rseg->is_persistent()); ut_ad(!rseg->trx_ref_count); - ut_ad(rseg->needs_purge <= head.trx_no); + ut_ad(!head.trx_no || rseg->needs_purge <= head.trx_no); ut_d(const auto old_page= rseg->page_no); buf_block_t *rblock= trx_rseg_header_create(&space, i, @@ -1235,9 +1236,8 @@ static void trx_purge_wait_for_workers_to_complete() /** Run a purge batch. @param n_tasks number of purge tasks to submit to the queue -@param truncate whether to truncate the history at the end of the batch @return number of undo log pages handled in the batch */ -ulint trx_purge(ulint n_tasks, bool truncate) +ulint trx_purge(ulint n_tasks) { que_thr_t* thr = NULL; ulint n_pages_handled; @@ -1271,10 +1271,6 @@ ulint trx_purge(ulint n_tasks, bool truncate) trx_purge_wait_for_workers_to_complete(); - if (truncate) { - trx_purge_truncate_history(); - } - MONITOR_INC_VALUE(MONITOR_PURGE_INVOKED, 1); MONITOR_INC_VALUE(MONITOR_PURGE_N_PAGE_HANDLED, n_pages_handled); From 6882eeabb07d6322870d7d313d65dbdc027f102a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 8 Jun 2023 09:23:01 +0300 Subject: [PATCH 74/76] MDEV-30483 fixup: Declare the test plugin for Debian --- debian/mariadb-test.install | 1 + 1 file changed, 1 insertion(+) diff --git a/debian/mariadb-test.install b/debian/mariadb-test.install index e587a3bf73d..ca463bdbca9 100644 --- a/debian/mariadb-test.install +++ b/debian/mariadb-test.install @@ -22,6 +22,7 @@ usr/lib/mysql/plugin/qa_auth_interface.so usr/lib/mysql/plugin/qa_auth_server.so usr/lib/mysql/plugin/test_sql_service.so usr/lib/mysql/plugin/test_versioning.so +usr/lib/mysql/plugin/type_mysql_timestamp.so usr/share/man/man1/mariadb-client-test-embedded.1 usr/share/man/man1/mariadb-client-test.1 usr/share/man/man1/mariadb-test-embedded.1 From 21031b24fc6d10921edbe2f57a212bf48dc02969 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 8 Jun 2023 09:38:03 +0300 Subject: [PATCH 75/76] Suppress an occasional buffer pool warning --- mysql-test/suite/innodb/t/purge_secondary.test | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mysql-test/suite/innodb/t/purge_secondary.test b/mysql-test/suite/innodb/t/purge_secondary.test index 1a0d178f66a..a7f75f56b53 100644 --- a/mysql-test/suite/innodb/t/purge_secondary.test +++ b/mysql-test/suite/innodb/t/purge_secondary.test @@ -1,6 +1,10 @@ --source include/have_innodb.inc --source include/have_sequence.inc +--disable_query_log +call mtr.add_suppression("InnoDB: Difficult to find free blocks in the buffer pool"); +--enable_query_log + # Ensure that the history list length will actually be decremented by purge. SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency = 1; From d3eefbaa55edb585e4fbf8f09ad4141c3be900e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 8 Jun 2023 10:40:48 +0300 Subject: [PATCH 76/76] MDEV-31355 fixup: Adjust one more test The test gcol.gcol_purge would reliably hang on 10.6 on a Microsoft Windows builder without this adjustment. A similar adjustment was applied in commit 3e40f9a7f3bbe82d96c8acccbb017deebfa00647 to the tests innodb.dml_purge and innodb.instant_alter_purge. --- mysql-test/suite/gcol/r/gcol_purge.result | 2 +- mysql-test/suite/gcol/t/gcol_purge.test | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/gcol/r/gcol_purge.result b/mysql-test/suite/gcol/r/gcol_purge.result index 11063c7cd6f..a130485f219 100644 --- a/mysql-test/suite/gcol/r/gcol_purge.result +++ b/mysql-test/suite/gcol/r/gcol_purge.result @@ -16,7 +16,7 @@ INSERT INTO t1 (f1, f2) VALUES(1,2); set global debug_dbug="+d,ib_purge_virtual_index_callback"; connection con1; COMMIT; -InnoDB 0 transactions not purged +SET GLOBAL innodb_max_purge_lag_wait=1; connection con2; commit; disconnect con1; diff --git a/mysql-test/suite/gcol/t/gcol_purge.test b/mysql-test/suite/gcol/t/gcol_purge.test index ecfd89f4469..8fff375cdc2 100644 --- a/mysql-test/suite/gcol/t/gcol_purge.test +++ b/mysql-test/suite/gcol/t/gcol_purge.test @@ -23,7 +23,7 @@ set global debug_dbug="+d,ib_purge_virtual_index_callback"; connection con1; COMMIT; ---source ../innodb/include/wait_all_purged.inc +SET GLOBAL innodb_max_purge_lag_wait=1; connection con2; commit;