diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS deleted file mode 100644 index 538007ed62d..00000000000 --- a/.github/CODEOWNERS +++ /dev/null @@ -1,2 +0,0 @@ -/debian @ottok - diff --git a/cmake/cpack_rpm.cmake b/cmake/cpack_rpm.cmake index 434ae1bb495..32490328d75 100644 --- a/cmake/cpack_rpm.cmake +++ b/cmake/cpack_rpm.cmake @@ -316,7 +316,7 @@ ELSEIF(RPM MATCHES "sles") ENDIF() # MDEV-24629, we need it outside of ELSIFs -IF(RPM MATCHES "fedora3[234]") +IF(RPM MATCHES "fedora") ALTERNATIVE_NAME("common" "mariadb-connector-c-config" ${MARIADB_CONNECTOR_C_VERSION}-1) ENDIF() diff --git a/debian/additions/innotop/innotop b/debian/additions/innotop/innotop old mode 100644 new mode 100755 index d47b122f29c..e88de561157 --- a/debian/additions/innotop/innotop +++ b/debian/additions/innotop/innotop @@ -20,6 +20,9 @@ # Street, Fifth Floor, Boston, MA 02110-1335 USA use strict; +use warnings; +use utf8; +use feature ':5.16'; use warnings FATAL => 'all'; our $VERSION = '1.11.4'; @@ -265,7 +268,7 @@ sub get_dbh { $dbh->do($sql); MKDEBUG && _d('Enabling charset for STDOUT'); if ( $charset eq 'utf8' ) { - binmode(STDOUT, ':utf8') + binmode(STDOUT, ':encoding(UTF-8)') or die "Can't binmode(STDOUT, ':utf8'): $OS_ERROR"; } else { @@ -612,6 +615,9 @@ sub ts_to_string { sub parse_innodb_timestamp { my $text = shift; + if ( ! defined $text ) { + return (0, 0, 0, 0, 0, 0); + } my ( $y, $m, $d, $h, $i, $s ) = $text =~ m/^(\d\d)(\d\d)(\d\d) +(\d+):(\d+):(\d+)$/; die("Can't get timestamp from $text\n") unless $y; @@ -803,7 +809,8 @@ sub parse_fk_transaction_error { # TODO: write some tests for this sub parse_innodb_record_dump { my ( $dump, $complete, $debug ) = @_; - return undef unless $dump; + # Use bare return as recommend in page 199 of PBP + return unless $dump; my $result = {}; @@ -6769,6 +6776,9 @@ sub set_precision { my ( $num, $precision ) = @_; $num = 0 unless defined $num; $precision = $config{num_digits}->{val} if !defined $precision; + if ( $num eq "" ) { + $num = int(0); + } sprintf("%.${precision}f", $num); } @@ -6777,6 +6787,9 @@ sub set_precision { sub percent { my ( $num ) = @_; $num = 0 unless defined $num; + if ( $num eq "" ) { + $num = int(0); + } my $digits = $config{num_digits}->{val}; return sprintf("%.${digits}f", $num * 100) . ($config{show_percent}->{val} ? '%' : ''); @@ -6841,7 +6854,7 @@ sub make_color_func { push @criteria, "( defined \$set->{$spec->{col}} && \$set->{$spec->{col}} $spec->{op} $val ) { return '$spec->{color}'; }"; } - return undef unless @criteria; + return unless @criteria; my $sub = eval 'sub { my ( $set ) = @_; if ' . join(" elsif ", @criteria) . '}'; die if $EVAL_ERROR; return $sub; @@ -7521,10 +7534,10 @@ sub choose_connections { sub do_stmt { my ( $cxn, $stmt_name, @args ) = @_; - return undef if $file; + return if $file; # Test if the cxn should not even be tried - return undef if $dbhs{$cxn} + return if $dbhs{$cxn} && $dbhs{$cxn}->{failed} && ( !$dbhs{$cxn}->{dbh} || !$dbhs{$cxn}->{dbh}->{Active} || $dbhs{$cxn}->{mode} eq $config{mode}->{val} ); @@ -7596,10 +7609,10 @@ sub handle_cxn_error { sub do_query { my ( $cxn, $query ) = @_; - return undef if $file; + return if $file; # Test if the cxn should not even be tried - return undef if $dbhs{$cxn} + return if $dbhs{$cxn} && $dbhs{$cxn}->{failed} && ( !$dbhs{$cxn}->{dbh} || !$dbhs{$cxn}->{dbh}->{Active} || $dbhs{$cxn}->{mode} eq $config{mode}->{val} ); @@ -7781,7 +7794,7 @@ sub compile_select_stmt { sub compile_filter { my ( $text ) = @_; my ( $sub, $err ); - eval "\$sub = sub { my \$set = shift; $text }"; + eval { $sub = sub { my $set = shift; $text } }; if ( $EVAL_ERROR ) { $EVAL_ERROR =~ s/at \(eval.*$//; $sub = sub { return $EVAL_ERROR }; @@ -8013,7 +8026,7 @@ sub load_config_plugins { # First, find a list of all plugins that exist on disk, and get information about them. my $dir = $config{plugin_dir}->{val}; - foreach my $p_file ( <$dir/*.pm> ) { + foreach my $p_file (glob($dir."/*.pm")) { my ($package, $desc); eval { open my $p_in, "<", $p_file or die $OS_ERROR; @@ -9192,7 +9205,7 @@ sub switch_var_set { # edit_stmt_sleep_times {{{3 sub edit_stmt_sleep_times { $clear_screen_sub->(); - my $stmt = prompt_list('Specify a statement', '', sub { return sort keys %stmt_maker_for }); + my $stmt = prompt_list('Specify a statement', '', sub { my @tmparray = sort keys %stmt_maker_for; return @tmparray }); return unless $stmt && exists $stmt_maker_for{$stmt}; $clear_screen_sub->(); my $curr_val = $stmt_sleep_time_for{$stmt} || 0; @@ -9843,7 +9856,7 @@ sub get_slave_status { sub is_func { my ( $word ) = @_; return defined(&$word) - || eval "my \$x= sub { $word }; 1" + || eval { my $x = sub { $word }; 1 } || $EVAL_ERROR !~ m/^Bareword/; } diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index 4fb69be0947..9c658b6df49 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -115,7 +115,6 @@ case "${LSBNAME}" in # Debian "buster") - add_lsb_base_depends disable_libfmt replace_uring_with_aio if [ ! "$architecture" = amd64 ] @@ -123,11 +122,10 @@ in disable_pmem fi ;& - "bullseye"|"bookworm") - if [[ "${LSBNAME}" == "bullseye" ]] - then - add_lsb_base_depends - fi + "bullseye") + add_lsb_base_depends + ;& + "bookworm") # mariadb-plugin-rocksdb in control is 4 arches covered by the distro rocksdb-tools # so no removal is necessary. if [[ ! "$architecture" =~ amd64|arm64|ppc64el ]] @@ -145,17 +143,17 @@ in ;; # Ubuntu "bionic") - add_lsb_base_depends remove_rocksdb_tools [ "$architecture" != amd64 ] && disable_pmem ;& "focal") - add_lsb_base_depends replace_uring_with_aio disable_libfmt ;& - "impish"|"jammy"|"kinetic"|"lunar") + "jammy"|"kinetic") add_lsb_base_depends + ;& + "lunar"|"mantic") # mariadb-plugin-rocksdb s390x not supported by us (yet) # ubuntu doesn't support mips64el yet, so keep this just # in case something changes. diff --git a/debian/mariadb-common.postinst b/debian/mariadb-common.postinst index 12f65bd3d92..53d131a3929 100644 --- a/debian/mariadb-common.postinst +++ b/debian/mariadb-common.postinst @@ -35,7 +35,7 @@ case "$1" in then update-alternatives --install /etc/mysql/my.cnf my.cnf "/etc/mysql/mariadb.cnf" 500 || true fi - ;; + ;; esac #DEBHELPER# diff --git a/debian/mariadb-common.postrm b/debian/mariadb-common.postrm index d0bfa266b7d..2548733a1b9 100644 --- a/debian/mariadb-common.postrm +++ b/debian/mariadb-common.postrm @@ -10,7 +10,7 @@ case "$1" in then /usr/share/mysql-common/configure-symlinks remove mariadb "/etc/mysql/mariadb.cnf" fi - ;; + ;; esac #DEBHELPER# diff --git a/debian/mariadb-server.postinst b/debian/mariadb-server.postinst index a5ab6e8e1dc..8486a5be46d 100644 --- a/debian/mariadb-server.postinst +++ b/debian/mariadb-server.postinst @@ -1,9 +1,15 @@ #!/bin/bash set -e +# shellcheck source=/dev/null . /usr/share/debconf/confmodule -if [ -n "$DEBIAN_SCRIPT_DEBUG" ]; then set -v -x; DEBIAN_SCRIPT_TRACE=1; fi +if [ -n "$DEBIAN_SCRIPT_DEBUG" ] +then + set -v -x + DEBIAN_SCRIPT_TRACE=1 +fi + ${DEBIAN_SCRIPT_TRACE:+ echo "#42#DEBUG# RUNNING $0 $*" 1>&2 } export PATH=$PATH:/sbin:/usr/sbin:/bin:/usr/bin @@ -21,7 +27,9 @@ case "$1" in # and because changed configuration options should take effect immediately. # In case the server wasn't running at all it should be ok if the stop # script fails. I can't tell at this point because of the cleaned /run. - set +e; invoke-rc.d mariadb stop; set -e + set +e + invoke-rc.d mariadb stop + set -e # An existing /etc/init.d/mysql might be on the system if there was a # previous MySQL or MariaDB installation, since /etc/init.d files are @@ -61,21 +69,26 @@ case "$1" in # If the following symlink exists, it is a preserved copy the old data dir # created by the preinst script during a upgrade that would have otherwise # been replaced by an empty mysql dir. This should restore it. - for dir in DATADIR LOGDIR; do + for dir in DATADIR LOGDIR + do - if [ "$dir" = "DATADIR" ]; then + if [ "$dir" = "DATADIR" ] + then targetdir=$mysql_datadir else targetdir=$mysql_logdir fi savelink="$mysql_upgradedir/$dir.link" - if [ -L "$savelink" ]; then + if [ -L "$savelink" ] + then # If the targetdir was a symlink before we upgraded it is supposed # to be either still be present or not existing anymore now. - if [ -L "$targetdir" ]; then + if [ -L "$targetdir" ] + then rm "$savelink" - elif [ ! -d "$targetdir" ]; then + elif [ ! -d "$targetdir" ] + then mv "$savelink" "$targetdir" else # this should never even happen, but just in case... @@ -97,7 +110,7 @@ this all away. EOF fi fi - rmdir $mysql_upgradedir 2>/dev/null || true + rmdir $mysql_upgradedir 2>/dev/null || true done @@ -109,17 +122,29 @@ EOF # This direct update is needed to enable an authentication mechanism to # perform mariadb-upgrade, (MDEV-22678). To keep the impact minimal, we # skip innodb and set key-buffer-size to 0 as it isn't reused. - if [ -f "$mysql_datadir"/auto.cnf ] && [ -f "$mysql_datadir"/mysql/user.MYD ] && - [ ! lsof -nt "$mysql_datadir"/mysql/user.MYD > /dev/null ] && [ ! -f "$mysql_datadir"/undo_001 ]; then - echo "UPDATE mysql.user SET plugin='unix_socket' WHERE plugin='auth_socket';" | - mariadbd --skip-innodb --key_buffer_size=0 --default-storage-engine=MyISAM --bootstrap 2> /dev/null + if [ -f "$mysql_datadir/auto.cnf" ] && + [ -f "$mysql_datadir/mysql/user.MYD" ] && + ! lsof -nt "$mysql_datadir"/mysql/user.MYD > /dev/null && + [ ! -f "$mysql_datadir/undo_001" ] + then + echo "UPDATE mysql.user SET plugin='unix_socket' WHERE plugin='auth_socket';" | + mariadbd --skip-innodb --key_buffer_size=0 --default-storage-engine=MyISAM --bootstrap 2> /dev/null fi # Ensure the existence and right permissions for the database and # log files. Use mkdir option 'Z' to create with correct SELinux context. - if [ ! -d "$mysql_statedir" ] && [ ! -L "$mysql_statedir" ]; then mkdir -Z "$mysql_statedir"; fi - if [ ! -d "$mysql_datadir" ] && [ ! -L "$mysql_datadir" ]; then mkdir -Z "$mysql_datadir" ; fi - if [ ! -d "$mysql_logdir" ] && [ ! -L "$mysql_logdir" ]; then mkdir -Z "$mysql_logdir" ; fi + if [ ! -d "$mysql_statedir" ] && [ ! -L "$mysql_statedir" ] + then + mkdir -Z "$mysql_statedir" + fi + if [ ! -d "$mysql_datadir" ] && [ ! -L "$mysql_datadir" ] + then + mkdir -Z "$mysql_datadir" + fi + if [ ! -d "$mysql_logdir" ] && [ ! -L "$mysql_logdir" ] + then + mkdir -Z "$mysql_logdir" + fi # When creating an ext3 jounal on an already mounted filesystem like e.g. # /var/lib/mysql, you get a .journal file that is not modifiable by chown. # The mysql_statedir must not be writable by the mysql user under any @@ -168,8 +193,8 @@ EOF # Debian: can safely run on upgrades with existing databases set +e bash /usr/bin/mariadb-install-db --rpm --cross-bootstrap --user=mysql \ - --disable-log-bin --skip-test-db 2>&1 | \ - $ERR_LOGGER + --disable-log-bin --skip-test-db 2>&1 | \ + $ERR_LOGGER set -e # On new installations root user can connect via unix_socket. @@ -180,26 +205,30 @@ EOF # --defaults-file option for tools (for the sake of upgrades) # and thus need /etc/mysql/debian.cnf to exist, even if it's empty. # In the long run the goal is to obsolete this file. - dc=$mysql_cfgdir/debian.cnf; - if [ ! -d "$mysql_cfgdir" ]; then + dc="$mysql_cfgdir/debian.cnf" + if [ ! -d "$mysql_cfgdir" ] + then install -o 0 -g 0 -m 0755 -d $mysql_cfgdir fi - if [ ! -e "$dc" ]; then - cat /dev/null > $dc - echo "# THIS FILE IS OBSOLETE. STOP USING IT IF POSSIBLE." >>$dc - echo "# This file exists only for backwards compatibility for" >>$dc - echo "# tools that run '--defaults-file=/etc/mysql/debian.cnf'" >>$dc - echo "# and have root level access to the local filesystem." >>$dc - echo "# With those permissions one can run 'mariadb' directly" >>$dc - echo "# anyway thanks to unix socket authentication and hence" >>$dc - echo "# this file is useless. See package README for more info." >>$dc - echo "[client]" >>$dc - echo "host = localhost" >>$dc - echo "user = root" >>$dc - echo "[mysql_upgrade]" >>$dc - echo "host = localhost" >>$dc - echo "user = root" >>$dc - echo "# THIS FILE WILL BE REMOVED IN A FUTURE DEBIAN RELEASE." >>$dc + if [ ! -e "$dc" ] + then + cat /dev/null > $dc + { + echo "# THIS FILE IS OBSOLETE. STOP USING IT IF POSSIBLE."; + echo "# This file exists only for backwards compatibility for"; + echo "# tools that run '--defaults-file=/etc/mysql/debian.cnf'"; + echo "# and have root level access to the local filesystem."; + echo "# With those permissions one can run 'mariadb' directly"; + echo "# anyway thanks to unix socket authentication and hence"; + echo "# this file is useless. See package README for more info."; + echo "[client]"; + echo "host = localhost"; + echo "user = root"; + echo "[mysql_upgrade]"; + echo "host = localhost"; + echo "user = root"; + echo "# THIS FILE WILL BE REMOVED IN A FUTURE DEBIAN RELEASE."; + } >> $dc fi # Keep it only root-readable, as it always was chown 0:0 $dc @@ -212,8 +241,10 @@ EOF # on by default) to work both to disable a default profile, and to keep # any profile installed and maintained by users themselves. profile="/etc/apparmor.d/usr.sbin.mariadbd" - if [ -f "$profile" ] && aa-status --enabled 2>/dev/null; then - if grep -q /usr/sbin/mariadbd "$profile" 2>/dev/null ; then + if [ -f "$profile" ] && aa-status --enabled 2>/dev/null + then + if grep -q /usr/sbin/mariadbd "$profile" 2>/dev/null + then apparmor_parser -r "$profile" || true else echo "/usr/sbin/mariadbd { }" | apparmor_parser --remove 2>/dev/null || true @@ -232,24 +263,24 @@ EOF # Note that file cannot be empty, otherwise systemd version in Ubuntu Bionic # will think the service is masked echo "# empty placeholder" > /etc/systemd/system/mariadb.service.d/migrated-from-my.cnf-settings.conf - - ;; + ;; abort-upgrade|abort-remove|abort-configure) - ;; + ;; triggered) - if [ -d /run/systemd/system ]; then + if [ -d /run/systemd/system ] + then systemctl --system daemon-reload else invoke-rc.d mariadb restart fi - ;; + ;; *) echo "postinst called with unknown argument '$1'" 1>&2 exit 1 - ;; + ;; esac db_stop # in case invoke fails @@ -259,19 +290,23 @@ db_stop # in case invoke fails # systemctl. If we upgrade from MySQL mysql.service may be masked, which also # means init.d script is disabled. Unmask mysql service explicitly. # Check first that the command exists, to avoid emitting any warning messages. -if [ -x "$(command -v deb-systemd-helper)" ]; then +if [ -x "$(command -v deb-systemd-helper)" ] +then deb-systemd-helper unmask mysql.service > /dev/null fi #DEBHELPER# # Modified dh_systemd_start snippet that's not added automatically -if [ -d /run/systemd/system ]; then - systemctl --system daemon-reload >/dev/null || true - deb-systemd-invoke start mariadb.service >/dev/null || true -# Modified dh_installinit snippet to only run with sysvinit -elif [ -x "/etc/init.d/mariadb" ]; then - if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ]; then - invoke-rc.d mariadb start || exit $? - fi +if [ -d /run/systemd/system ] +then + systemctl --system daemon-reload >/dev/null || true + deb-systemd-invoke start mariadb.service >/dev/null || true + # Modified dh_installinit snippet to only run with sysvinit +elif [ -x "/etc/init.d/mariadb" ] +then + if [ "$1" = "configure" ] || [ "$1" = "abort-upgrade" ] + then + invoke-rc.d mariadb start || exit $? + fi fi diff --git a/debian/mariadb-server.postrm b/debian/mariadb-server.postrm index 035f10bbb8a..4b62a8910fa 100644 --- a/debian/mariadb-server.postrm +++ b/debian/mariadb-server.postrm @@ -1,9 +1,15 @@ #!/bin/bash set -e +# shellcheck source=/dev/null . /usr/share/debconf/confmodule -if [ -n "$DEBIAN_SCRIPT_DEBUG" ]; then set -v -x; DEBIAN_SCRIPT_TRACE=1; fi +if [ -n "$DEBIAN_SCRIPT_DEBUG" ] +then + set -v -x + DEBIAN_SCRIPT_TRACE=1 +fi + ${DEBIAN_SCRIPT_TRACE:+ echo "#42#DEBUG# RUNNING $0 $*" 1>&2 } MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf" @@ -12,38 +18,43 @@ MYADMIN="/usr/bin/mysqladmin --defaults-file=/etc/mysql/debian.cnf" # do it himself. No database directories should be removed while the server # is running! Another mariadbd in e.g. a different chroot is fine for us. stop_server() { - # Return immediately if there are no mysqld processes running - # as there is no point in trying to shutdown in that case. - if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null; then return; fi + # Return immediately if there are no mysqld processes running + # as there is no point in trying to shutdown in that case. + if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null + then + return + fi - set +e - invoke-rc.d mariadb stop - invoke-rc.d mysql stop # Backwards compatibility - errno=$? - set -e + set +e + invoke-rc.d mariadb stop + invoke-rc.d mysql stop # Backwards compatibility + errno=$? + set -e - # systemctl could emit exit code 100=no init script (fresh install) - if [ "$errno" != 0 -a "$errno" != 100 ]; then - echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 - echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 - echo "Stop it yourself and try again!" 1>&2 - db_stop - exit 1 - fi + # systemctl could emit exit code 100=no init script (fresh install) + if [ "$errno" != 0 ] && [ "$errno" != 100 ] + then + echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 + echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 + echo "Stop it yourself and try again!" 1>&2 + db_stop + exit 1 + fi } case "$1" in purge|remove|upgrade|failed-upgrade|abort-install|abort-upgrade|disappear) - if [ -n "`$MYADMIN ping 2>/dev/null`" ]; then + if [ -n "$($MYADMIN ping 2>/dev/null)" ] + then stop_server sleep 2 fi - ;; + ;; *) echo "postrm called with unknown argument '$1'" 1>&2 exit 1 - ;; + ;; esac # @@ -51,7 +62,8 @@ esac # - Remove the mysql user only after all his owned files are purged. # - Cleanup the initscripts only if this was the last provider of them # -if [ "$1" = "purge" ] && [ -f "/var/lib/mysql/debian-__MARIADB_MAJOR_VER__.flag" ]; then +if [ "$1" = "purge" ] && [ -f "/var/lib/mysql/debian-__MARIADB_MAJOR_VER__.flag" ] +then # we remove the mysql user only after all his owned files are purged rm -f /var/log/mysql.{log,err}{,.0,.[1234567].gz} rm -rf /var/log/mysql @@ -59,7 +71,8 @@ if [ "$1" = "purge" ] && [ -f "/var/lib/mysql/debian-__MARIADB_MAJOR_VER__.flag" db_input high "mariadb-server/postrm_remove_databases" || true db_go || true db_get "mariadb-server/postrm_remove_databases" || true - if [ "$RET" = "true" ]; then + if [ "$RET" = "true" ] + then # never remove the debian.cnf when the databases are still existing # else we ran into big trouble on the next install! rm -f /etc/mysql/debian.cnf @@ -72,9 +85,9 @@ if [ "$1" = "purge" ] && [ -f "/var/lib/mysql/debian-__MARIADB_MAJOR_VER__.flag" if [ -d /var/lib/mysql ] then find /var/lib/mysql -mindepth 1 \ - -not -path '*/lost+found/*' -not -name 'lost+found' \ - -not -path '*/lost@002bfound/*' -not -name 'lost@002bfound' \ - -delete + -not -path '*/lost+found/*' -not -name 'lost+found' \ + -not -path '*/lost@002bfound/*' -not -name 'lost@002bfound' \ + -delete # "|| true" still needed as rmdir still exits with non-zero if # /var/lib/mysql is a mount point @@ -89,6 +102,7 @@ fi #DEBHELPER# # Modified dh_systemd_start snippet that's not added automatically -if [ -d /run/systemd/system ]; then - systemctl --system daemon-reload >/dev/null || true +if [ -d /run/systemd/system ] +then + systemctl --system daemon-reload >/dev/null || true fi diff --git a/debian/mariadb-server.preinst b/debian/mariadb-server.preinst index 46659344218..eb0b825ca28 100644 --- a/debian/mariadb-server.preinst +++ b/debian/mariadb-server.preinst @@ -7,14 +7,20 @@ # * abort-upgrade # +# shellcheck source=/dev/null . /usr/share/debconf/confmodule # Just kill the invalid insserv.conf.d directory without fallback -if [ -d "/etc/insserv.conf.d/mariadb/" ]; then - rm -rf "/etc/insserv.conf.d/mariadb/" +if [ -d "/etc/insserv.conf.d/mariadb/" ] +then + rm -rf "/etc/insserv.conf.d/mariadb/" fi -if [ -n "$DEBIAN_SCRIPT_DEBUG" ]; then set -v -x; DEBIAN_SCRIPT_TRACE=1; fi +if [ -n "$DEBIAN_SCRIPT_DEBUG" ] +then + set -v -x + DEBIAN_SCRIPT_TRACE=1 +fi ${DEBIAN_SCRIPT_TRACE:+ echo "#42#DEBUG# RUNNING $0 $*" 1>&2 } export PATH=$PATH:/sbin:/usr/sbin:/bin:/usr/bin @@ -25,25 +31,29 @@ mysql_upgradedir=/var/lib/mysql-upgrade # do it himself. No database directories should be removed while the server # is running! Another mariadbd in e.g. a different chroot is fine for us. stop_server() { - # Return immediately if there are no mysqld processes running on a host - # (leave containerized processes with the same name in other namespaces) - # as there is no point in trying to shutdown in that case. - if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null; then return; fi + # Return immediately if there are no mysqld processes running on a host + # (leave containerized processes with the same name in other namespaces) + # as there is no point in trying to shutdown in that case. + if ! pgrep -x --nslist pid --ns $$ "mysqld|mariadbd" > /dev/null + then + return + fi - set +e - invoke-rc.d mariadb stop - invoke-rc.d mysql stop # Backwards compatibility - errno=$? - set -e + set +e + invoke-rc.d mariadb stop + invoke-rc.d mysql stop # Backwards compatibility + errno=$? + set -e - # systemctl could emit exit code 100=no init script (fresh install) - if [ "$errno" != 0 -a "$errno" != 100 ]; then - echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 - echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 - echo "Stop it yourself and try again!" 1>&2 - db_stop - exit 1 - fi + # systemctl could emit exit code 100=no init script (fresh install) + if [ "$errno" != 0 ] && [ "$errno" != 100 ] + then + echo "Attempt to stop MariaDB/MySQL server returned exitcode $errno" 1>&2 + echo "There is a MariaDB/MySQL server running, but we failed in our attempts to stop it." 1>&2 + echo "Stop it yourself and try again!" 1>&2 + db_stop + exit 1 + fi } ################################ main() ########################## @@ -56,7 +66,7 @@ max_upgradeable_version=5.7 # Check if a flag file is found that indicates a previous MariaDB or MySQL # version was installed. If multiple flags are found, check which one was # the biggest version number. -for flag in $mysql_datadir/debian-*.flag +for flag in "$mysql_datadir"/debian-*.flag do # The for loop leaves $flag as the query string if there are no results, @@ -91,7 +101,7 @@ done # Downgrade is detected if the flag version is bigger than $this_version # (e.g. 10.1 > 10.0) or the flag version is smaller than 10.0 but bigger # than $max_upgradeable_version. -if [ ! -z "$found_version" ] +if [ -n "$found_version" ] then # MySQL 8.0 in Ubuntu has a bug in packaging and the file is name wrongly @@ -111,7 +121,7 @@ then fi if dpkg --compare-versions "$found_version" '>>' "$max_upgradeable_version" \ - && dpkg --compare-versions "$found_version" '<<' "10.0" + && dpkg --compare-versions "$found_version" '<<' "10.0" then downgrade_detected=true fi @@ -133,7 +143,7 @@ fi # Don't abort dpkg if downgrade is detected (as was done previously). # Instead simply move the old datadir and create a new for this_version. -if [ ! -z "$downgrade_detected" ] +if [ -n "$downgrade_detected" ] then db_input critical "mariadb-server/old_data_directory_saved" || true db_go @@ -154,7 +164,8 @@ stop_server # If we use NIS then errors should be tolerated. It's up to the # user to ensure that the mysql user is correctly setup. # Beware that there are two ypwhich one of them needs the 2>/dev/null! -if test -n "$(which ypwhich 2>/dev/null)" && ypwhich >/dev/null 2>&1; then +if test -n "$(which ypwhich 2>/dev/null)" && ypwhich >/dev/null 2>&1 +then set +e fi @@ -169,23 +180,25 @@ fi # # creating mysql group if he isn't already there -if ! getent group mysql >/dev/null; then - # Adding system group: mysql. - addgroup --system mysql >/dev/null +if ! getent group mysql >/dev/null +then + # Adding system group: mysql. + addgroup --system mysql >/dev/null fi # creating mysql user if he isn't already there -if ! getent passwd mysql >/dev/null; then - # Adding system user: mysql. - adduser \ - --system \ - --disabled-login \ - --ingroup mysql \ - --no-create-home \ - --home /nonexistent \ - --gecos "MySQL Server" \ - --shell /bin/false \ - mysql >/dev/null +if ! getent passwd mysql >/dev/null +then + # Adding system user: mysql. + adduser \ + --system \ + --disabled-login \ + --ingroup mysql \ + --no-create-home \ + --home /nonexistent \ + --gecos "MySQL Server" \ + --shell /bin/false \ + mysql >/dev/null fi # end of NIS tolerance zone @@ -193,7 +206,8 @@ set -e # if there's a symlink, let's store where it's pointing, because otherwise # it's going to be lost in some situations -for dir in DATADIR LOGDIR; do +for dir in DATADIR LOGDIR +do checkdir=$(eval echo "$"$dir) if [ -L "$checkdir" ]; then # Use mkdir option 'Z' to create with correct SELinux context. @@ -203,18 +217,29 @@ for dir in DATADIR LOGDIR; do done # creating mysql home directory -if [ ! -d $mysql_datadir ] && [ ! -L $mysql_datadir ]; then - # Use mkdir option 'Z' to create with correct SELinux context. +if [ ! -d $mysql_datadir ] && [ ! -L $mysql_datadir ] +then + # Use mkdir option 'Z' to create with correct SELinux context. mkdir -Z $mysql_datadir fi -# As preset blocksize of GNU df is 1024 then available bytes is $df_available_blocks * 1024 -# 4096 blocks is then lower than 4 MB -df_available_blocks="$(LC_ALL=C BLOCKSIZE='' df --output=avail "$mysql_datadir" | tail -n 1)" -if [ "$df_available_blocks" -lt "4096" ]; then - echo "ERROR: There's not enough space in $mysql_datadir/" 1>&2 - db_stop - exit 1 +# Check if MariaDB datadir is available if not fails. +# There should be symlink or directory available or something will fail. +if [ -d "$mysql_datadir" ] || [ -L "$mysql_datadir" ] +then + # As preset blocksize of GNU df is 1024 then available bytes is $df_available_blocks * 1024 + # 4096 blocks is then lower than 4 MB + df_available_blocks="$(LC_ALL=C BLOCKSIZE='' df --output=avail "$mysql_datadir" | tail -n 1)" + if [ "$df_available_blocks" -lt "4096" ] + then + echo "ERROR: There's not enough space in $mysql_datadir/" 1>&2 + db_stop + exit 1 + fi +else + echo "ERROR: There's no directory or symlink available: $mysql_datadir/" 1>&2 + db_stop + exit 1 fi # Since the home directory was created before putting the user into @@ -230,7 +255,6 @@ find $mysql_datadir -follow -not -group mysql -print0 2>/dev/null \ | xargs -0 --no-run-if-empty chgrp mysql set -e - db_stop #DEBHELPER# diff --git a/debian/mariadb-server.prerm b/debian/mariadb-server.prerm index 8fd172da9d2..2701fa5fc36 100644 --- a/debian/mariadb-server.prerm +++ b/debian/mariadb-server.prerm @@ -4,9 +4,11 @@ set -e #DEBHELPER# # Modified dh_systemd_start snippet that's not added automatically -if [ -d /run/systemd/system ]; then - deb-systemd-invoke stop mariadb.service >/dev/null -# Modified dh_installinit snippet to only run with sysvinit -elif [ -x "/etc/init.d/mariadb" ]; then - invoke-rc.d mariadb stop || exit $? +if [ -d /run/systemd/system ] +then + deb-systemd-invoke stop mariadb.service >/dev/null + # Modified dh_installinit snippet to only run with sysvinit +elif [ -x "/etc/init.d/mariadb" ] +then + invoke-rc.d mariadb stop || exit $? fi diff --git a/debian/mariadb-test.install b/debian/mariadb-test.install index 5dda38d665c..b9d99885374 100644 --- a/debian/mariadb-test.install +++ b/debian/mariadb-test.install @@ -26,6 +26,7 @@ usr/lib/mysql/plugin/qa_auth_interface.so usr/lib/mysql/plugin/qa_auth_server.so usr/lib/mysql/plugin/test_sql_service.so usr/lib/mysql/plugin/test_versioning.so +usr/lib/mysql/plugin/type_mysql_timestamp.so usr/share/man/man1/mariadb-client-test-embedded.1 usr/share/man/man1/mariadb-client-test.1 usr/share/man/man1/mariadb-test-embedded.1 diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 1ca4c5fad7e..c480dae29c4 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -849,27 +849,49 @@ void mdl_lock_all() // Convert non-null terminated filename to space name +// Note that in 10.6 the filename may be an undo file name static std::string filename_to_spacename(const void *filename, size_t len) { - // null- terminate filename - char *f = (char *)malloc(len + 1); - ut_a(f); - memcpy(f, filename, len); - f[len] = 0; - for (size_t i = 0; i < len; i++) - if (f[i] == '\\') - f[i] = '/'; - char *p = strrchr(f, '.'); - ut_a(p); - *p = 0; - char *table = strrchr(f, '/'); - ut_a(table); - *table = 0; - char *db = strrchr(f, '/'); - *table = '/'; - std::string s(db ? db+1 : f); - free(f); - return s; + char f[FN_REFLEN]; + char *p= 0, *table, *db; + DBUG_ASSERT(len < FN_REFLEN); + + strmake(f, (const char*) filename, len); + +#ifdef _WIN32 + for (size_t i = 0; i < len; i++) + { + if (f[i] == '\\') + f[i] = '/'; + } +#endif + + /* Remove extension, if exists */ + if (!(p= strrchr(f, '.'))) + goto err; + *p= 0; + + /* Find table name */ + if (!(table= strrchr(f, '/'))) + goto err; + *table = 0; + + /* Find database name */ + db= strrchr(f, '/'); + *table = '/'; + if (!db) + goto err; + { + std::string s(db+1); + return s; + } + +err: + /* Not a database/table. Return original (converted) name */ + if (p) + *p= '.'; // Restore removed extension + std::string s(f); + return s; } /** Report an operation to create, delete, or rename a file during backup. @@ -3137,7 +3159,7 @@ static bool xtrabackup_copy_logfile() if (log_sys.buf[recv_sys.offset] <= 1) break; - if (recv_sys.parse_mtr(STORE_NO) == recv_sys_t::OK) + if (recv_sys.parse_mtr(false) == recv_sys_t::OK) { do { @@ -3147,7 +3169,7 @@ static bool xtrabackup_copy_logfile() sequence_offset)); *seq= 1; } - while ((r= recv_sys.parse_mtr(STORE_NO)) == recv_sys_t::OK); + while ((r= recv_sys.parse_mtr(false)) == recv_sys_t::OK); if (ds_write(dst_log_file, log_sys.buf + start_offset, recv_sys.offset - start_offset)) diff --git a/include/m_string.h b/include/m_string.h index 046dc39d13a..6a645b20a7f 100644 --- a/include/m_string.h +++ b/include/m_string.h @@ -249,14 +249,15 @@ static inline void lex_string_set3(LEX_CSTRING *lex_str, const char *c_str, */ static inline int safe_strcpy(char *dst, size_t dst_size, const char *src) { - memset(dst, '\0', dst_size); - strncpy(dst, src, dst_size - 1); - /* - If the first condition is true, we are guaranteed to have src length - >= (dst_size - 1), hence safe to access src[dst_size - 1]. - */ - if (dst[dst_size - 2] != '\0' && src[dst_size - 1] != '\0') - return 1; /* Truncation of src. */ + DBUG_ASSERT(dst_size > 0); + /* Note, strncpy will zerofill end of dst if src shorter than dst_size */ + strncpy(dst, src, dst_size); + if (dst[dst_size-1]) + { + /* Ensure string is zero terminated */ + dst[dst_size-1]= 0; + return 1; + } return 0; } diff --git a/include/myisammrg.h b/include/myisammrg.h index 1d7efbe74d6..b3bca218a44 100644 --- a/include/myisammrg.h +++ b/include/myisammrg.h @@ -71,6 +71,7 @@ typedef struct st_myrg_info ulong cache_size; uint merge_insert_method; uint tables,options,reclength,keys; + uint key_parts; my_bool cache_in_use; /* If MERGE children attached to parent. See top comment in ha_myisammrg.cc */ my_bool children_attached; diff --git a/include/mysql/plugin_audit.h b/include/mysql/plugin_audit.h index e99c01376e6..bfa6621b007 100644 --- a/include/mysql/plugin_audit.h +++ b/include/mysql/plugin_audit.h @@ -48,6 +48,7 @@ extern "C" { #define MYSQL_AUDIT_GENERAL_ERROR 1 #define MYSQL_AUDIT_GENERAL_RESULT 2 #define MYSQL_AUDIT_GENERAL_STATUS 3 +#define MYSQL_AUDIT_GENERAL_WARNING 4 struct mysql_event_general { diff --git a/include/mysql/service_wsrep.h b/include/mysql/service_wsrep.h index 8541b348ae4..8add709362e 100644 --- a/include/mysql/service_wsrep.h +++ b/include/mysql/service_wsrep.h @@ -57,6 +57,7 @@ extern struct wsrep_service_st { my_bool (*wsrep_on_func)(const MYSQL_THD thd); bool (*wsrep_prepare_key_for_innodb_func)(MYSQL_THD thd, const unsigned char*, size_t, const unsigned char*, size_t, struct wsrep_buf*, size_t*); void (*wsrep_thd_LOCK_func)(const MYSQL_THD thd); + int (*wsrep_thd_TRYLOCK_func)(const MYSQL_THD thd); void (*wsrep_thd_UNLOCK_func)(const MYSQL_THD thd); const char * (*wsrep_thd_query_func)(const MYSQL_THD thd); int (*wsrep_thd_retry_counter_func)(const MYSQL_THD thd); @@ -89,7 +90,6 @@ extern struct wsrep_service_st { ulong (*wsrep_OSU_method_get_func)(const MYSQL_THD thd); my_bool (*wsrep_thd_has_ignored_error_func)(const MYSQL_THD thd); void (*wsrep_thd_set_ignored_error_func)(MYSQL_THD thd, my_bool val); - bool (*wsrep_thd_set_wsrep_aborter_func)(MYSQL_THD bf_thd, MYSQL_THD thd); void (*wsrep_report_bf_lock_wait_func)(const MYSQL_THD thd, unsigned long long trx_id); void (*wsrep_thd_kill_LOCK_func)(const MYSQL_THD thd); @@ -111,6 +111,7 @@ extern struct wsrep_service_st { #define wsrep_on(thd) (thd) && WSREP_ON && wsrep_service->wsrep_on_func(thd) #define wsrep_prepare_key_for_innodb(A,B,C,D,E,F,G) wsrep_service->wsrep_prepare_key_for_innodb_func(A,B,C,D,E,F,G) #define wsrep_thd_LOCK(T) wsrep_service->wsrep_thd_LOCK_func(T) +#define wsrep_thd_TRYLOCK(T) wsrep_service->wsrep_thd_TRYLOCK_func(T) #define wsrep_thd_UNLOCK(T) wsrep_service->wsrep_thd_UNLOCK_func(T) #define wsrep_thd_kill_LOCK(T) wsrep_service->wsrep_thd_kill_LOCK_func(T) #define wsrep_thd_kill_UNLOCK(T) wsrep_service->wsrep_thd_kill_UNLOCK_func(T) @@ -141,7 +142,6 @@ extern struct wsrep_service_st { #define wsrep_OSU_method_get(T) wsrep_service->wsrep_OSU_method_get_func(T) #define wsrep_thd_has_ignored_error(T) wsrep_service->wsrep_thd_has_ignored_error_func(T) #define wsrep_thd_set_ignored_error(T,V) wsrep_service->wsrep_thd_set_ignored_error_func(T,V) -#define wsrep_thd_set_wsrep_aborter(T) wsrep_service->wsrep_thd_set_wsrep_aborter_func(T1, T2) #define wsrep_report_bf_lock_wait(T,I) wsrep_service->wsrep_report_bf_lock_wait(T,I) #define wsrep_thd_set_PA_unsafe(T) wsrep_service->wsrep_thd_set_PA_unsafe_func(T) #else @@ -175,6 +175,8 @@ void wsrep_set_data_home_dir(const char *data_dir); extern "C" my_bool wsrep_on(const MYSQL_THD thd); /* Lock thd wsrep lock */ extern "C" void wsrep_thd_LOCK(const MYSQL_THD thd); +/* Try thd wsrep lock. Return non-zero if lock could not be taken. */ +extern "C" int wsrep_thd_TRYLOCK(const MYSQL_THD thd); /* Unlock thd wsrep lock */ extern "C" void wsrep_thd_UNLOCK(const MYSQL_THD thd); @@ -197,8 +199,6 @@ extern "C" my_bool wsrep_thd_is_local(const MYSQL_THD thd); /* Return true if thd is in high priority mode */ /* todo: rename to is_high_priority() */ extern "C" my_bool wsrep_thd_is_applying(const MYSQL_THD thd); -/* set wsrep_aborter for the target THD */ -extern "C" bool wsrep_thd_set_wsrep_aborter(MYSQL_THD bf_thd, MYSQL_THD victim_thd); /* Return true if thd is in TOI mode */ extern "C" my_bool wsrep_thd_is_toi(const MYSQL_THD thd); /* Return true if thd is in replicating TOI mode */ @@ -249,7 +249,6 @@ extern "C" my_bool wsrep_thd_is_applying(const MYSQL_THD thd); extern "C" ulong wsrep_OSU_method_get(const MYSQL_THD thd); extern "C" my_bool wsrep_thd_has_ignored_error(const MYSQL_THD thd); extern "C" void wsrep_thd_set_ignored_error(MYSQL_THD thd, my_bool val); -extern "C" bool wsrep_thd_set_wsrep_aborter(MYSQL_THD bf_thd, MYSQL_THD victim_thd); extern "C" void wsrep_report_bf_lock_wait(const THD *thd, unsigned long long trx_id); /* declare parallel applying unsafety for the THD */ diff --git a/mysql-test/lib/My/File/Path.pm b/mysql-test/lib/My/File/Path.pm index d60027c909e..fd3cf6dd61c 100644 --- a/mysql-test/lib/My/File/Path.pm +++ b/mysql-test/lib/My/File/Path.pm @@ -34,7 +34,7 @@ use strict; use Exporter; use base "Exporter"; -our @EXPORT= qw /rmtree mkpath copytree/; +our @EXPORT= qw /rmtree mkpath copytree make_readonly/; use File::Find; use File::Copy; @@ -184,6 +184,10 @@ sub copytree { # Only copy plain files next unless -f "$from_dir/$_"; copy("$from_dir/$_", "$to_dir/$_"); + if (!$use_umask) + { + chmod(0666, "$to_dir/$_"); + } } closedir(DIR); @@ -193,4 +197,29 @@ sub copytree { } } + +sub make_readonly { + my ($dir) = @_; + + die "Usage: make_readonly(])" + unless @_ == 1; + + opendir(DIR, "$dir") + or croak("Can't find $dir$!"); + for(readdir(DIR)) { + + next if "$_" eq "." or "$_" eq ".."; + + if ( -d "$dir/$_" ) + { + make_readonly("$dir/$_"); + next; + } + + # Only copy plain files + next unless -f "$dir/$_"; + chmod 0444, "$dir/$_"; + } + closedir(DIR); +} 1; diff --git a/mysql-test/lib/My/SafeProcess/Base.pm b/mysql-test/lib/My/SafeProcess/Base.pm index 818e6e34e11..1cd01cb0ca9 100644 --- a/mysql-test/lib/My/SafeProcess/Base.pm +++ b/mysql-test/lib/My/SafeProcess/Base.pm @@ -40,7 +40,7 @@ our @EXPORT= qw(create_process); # Retry a couple of times if fork returns EAGAIN # sub _safe_fork { - my $retries= 5; + my $retries= 100; my $pid; FORK: diff --git a/mysql-test/main/create_w_max_indexes_128.result b/mysql-test/main/create_w_max_indexes_128.result index d86bc912e58..112df1b7c41 100644 --- a/mysql-test/main/create_w_max_indexes_128.result +++ b/mysql-test/main/create_w_max_indexes_128.result @@ -470,7 +470,7 @@ t1 CREATE TABLE `t1` ( KEY `a126_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`), KEY `a127_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`), KEY `a128_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1 +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci flush tables; show create table t1; Table Create Table @@ -619,7 +619,7 @@ t1 CREATE TABLE `t1` ( KEY `a126_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`), KEY `a127_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`), KEY `a128_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1 +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci drop table t1; create table t1 (c1 int, c2 int, c3 int, c4 int, c5 int, c6 int, c7 int, c8 int, c9 int, c10 int, c11 int, c12 int, c13 int, c14 int, c15 int, c16 int); @@ -1092,7 +1092,7 @@ t1 CREATE TABLE `t1` ( KEY `a126_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`), KEY `a127_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`), KEY `a128_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1 +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci flush tables; show create table t1; Table Create Table @@ -1241,7 +1241,7 @@ t1 CREATE TABLE `t1` ( KEY `a126_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`), KEY `a127_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`), KEY `a128_long_123456789_123456789_123456789_123456789_123456789_1234` (`c1`,`c2`,`c3`,`c4`,`c5`,`c6`,`c7`,`c8`,`c9`,`c10`,`c11`,`c12`,`c13`,`c14`,`c15`,`c16`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1 +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci alter table t1 add key a129_long_123456789_123456789_123456789_123456789_123456789_1234 ( c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c14,c15,c16); diff --git a/mysql-test/main/distinct.result b/mysql-test/main/distinct.result index c779459ebb5..1f5d6b5bd46 100644 --- a/mysql-test/main/distinct.result +++ b/mysql-test/main/distinct.result @@ -1161,3 +1161,27 @@ explain select * from t1 limit 0 offset 10; id select_type table type possible_keys key key_len ref rows Extra 1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Zero limit drop table t1, t2; +# +# MDEV-28285 Unexpected result when combining DISTINCT, subselect +# and LIMIT +# +create table t1 (a int primary key); +create table t2 (a int primary key, b int not null); +insert into t1 select seq from seq_1_to_10; +insert into t2 select seq,seq from seq_1_to_10; +select distinct a from t1 where t1.a=1 and t1.a in (select a from t2 where t2.b in (1,2)); +a +1 +explain select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 10,10; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 const PRIMARY PRIMARY 4 const 1 Using index; Using temporary +1 PRIMARY t2 ALL NULL NULL NULL NULL 10 Using where; FirstMatch(t1) +select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 10,10; +a +select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 0,1; +a +1 +drop table t1,t2; +# +# end of 10.5 tests +# diff --git a/mysql-test/main/distinct.test b/mysql-test/main/distinct.test index 06d04afca56..93db9a72cba 100644 --- a/mysql-test/main/distinct.test +++ b/mysql-test/main/distinct.test @@ -893,3 +893,24 @@ explain select * from t1 limit 0; explain select * from t1 limit 0 offset 10; drop table t1, t2; + +--echo # +--echo # MDEV-28285 Unexpected result when combining DISTINCT, subselect +--echo # and LIMIT +--echo # + +create table t1 (a int primary key); +create table t2 (a int primary key, b int not null); + +insert into t1 select seq from seq_1_to_10; +insert into t2 select seq,seq from seq_1_to_10; + +select distinct a from t1 where t1.a=1 and t1.a in (select a from t2 where t2.b in (1,2)); +explain select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 10,10; +select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 10,10; +select distinct a from t1 where t1.a=1 and t1.a in (select a+0 from t2 where t2.b in (1,2)) limit 0,1; +drop table t1,t2; + +--echo # +--echo # end of 10.5 tests +--echo # diff --git a/mysql-test/main/func_json.result b/mysql-test/main/func_json.result index 8cbc5305405..5af8e658d34 100644 --- a/mysql-test/main/func_json.result +++ b/mysql-test/main/func_json.result @@ -2578,5 +2578,29 @@ SELECT JSON_EXTRACT('{ "my-key": 1 }', '$.my-key'); JSON_EXTRACT('{ "my-key": 1 }', '$.my-key') 1 # +# MDEV-23187: Assorted assertion failures in json_find_path with certain collations +# +SET @save_collation_connection= @@collation_connection; +SET @json='{ "A": [ [{"k":"v"},[1]],true],"B": {"C": 1} }'; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); +JSON_VALUE(@json,'$.A[last-1][last-1].key1') +NULL +SET @json='{ "A": [ [{"k":"v"},[1]],true],"B": {"C": 1} }'; +SET collation_connection='ucs2_bin'; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); +JSON_VALUE(@json,'$.A[last-1][last-1].key1') +NULL +SET @json='{ "A": [ [{"k":"v"},[15]],true],"B": {"C": 1} }'; +SET sql_mode=0,character_set_connection=utf32; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); +JSON_VALUE(@json,'$.A[last-1][last-1].key1') +NULL +SET @json='{ "A": [ [{"k":"v"},[15]],true],"B": {"C": 1} }'; +SET sql_mode=0,character_set_connection=utf32; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); +JSON_VALUE(@json,'$.A[last-1][last-1].key1') +NULL +SET @@collation_connection= @save_collation_connection; +# # End of 10.9 Test # diff --git a/mysql-test/main/func_json.test b/mysql-test/main/func_json.test index 9bf0c9bae05..23a703ca716 100644 --- a/mysql-test/main/func_json.test +++ b/mysql-test/main/func_json.test @@ -1772,6 +1772,32 @@ DROP TABLE t1; SELECT JSON_EXTRACT('{ "my-key": 1 }', '$."my-key"'); SELECT JSON_EXTRACT('{ "my-key": 1 }', '$.my-key'); +--echo # +--echo # MDEV-23187: Assorted assertion failures in json_find_path with certain collations +--echo # + + +SET @save_collation_connection= @@collation_connection; + +SET @json='{ "A": [ [{"k":"v"},[1]],true],"B": {"C": 1} }'; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); + +SET @json='{ "A": [ [{"k":"v"},[1]],true],"B": {"C": 1} }'; +SET collation_connection='ucs2_bin'; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); + +SET @json='{ "A": [ [{"k":"v"},[15]],true],"B": {"C": 1} }'; +SET sql_mode=0,character_set_connection=utf32; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); + + +SET @json='{ "A": [ [{"k":"v"},[15]],true],"B": {"C": 1} }'; +SET sql_mode=0,character_set_connection=utf32; +SELECT JSON_VALUE(@json,'$.A[last-1][last-1].key1'); + +SET @@collation_connection= @save_collation_connection; + + --echo # --echo # End of 10.9 Test --echo # diff --git a/mysql-test/main/func_sformat.result b/mysql-test/main/func_sformat.result index 1809cbad23b..9e8a11677b5 100644 --- a/mysql-test/main/func_sformat.result +++ b/mysql-test/main/func_sformat.result @@ -434,7 +434,7 @@ create table t1 as select sformat(_ucs2 x'003D007B007D003D', _ucs2 x'04420435044 show create table t1; Table Create Table t1 CREATE TABLE `t1` ( - `x` varchar(8) CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL + `x` longtext CHARACTER SET ucs2 COLLATE ucs2_general_ci DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci drop table t1; set names latin1; @@ -468,3 +468,26 @@ set names latin1; # # End of 10.7 tests # +# +# Start of 10.8 tests +# +# +# MDEV-29646 sformat('Num [{:20}]', 42) gives incorrect result in view +# +create view v1 as select sformat('Num [{:20}]', 42); +select * from v1; +sformat('Num [{:20}]', 42) +Num [ 42] +drop view v1; +create view v1 as SELECT sformat('Square root of [{:d}] is [{:.20}]', 2, sqrt(2)); +select * from v1; +sformat('Square root of [{:d}] is [{:.20}]', 2, sqrt(2)) +Square root of [2] is [1.4142135623730951455] +drop view v1; +create table t1 (a text, b int, c text); +insert t1 values ('[{} -> {}]', 10, '{}'), ('[{:20} <- {}]', 1, '{:30}'); +select sformat(a,b,c) from t1; +sformat(a,b,c) +[10 -> {}] +[ 1 <- {:30}] +drop table t1; diff --git a/mysql-test/main/func_sformat.test b/mysql-test/main/func_sformat.test index cb7e7c8f1c1..65e4b639179 100644 --- a/mysql-test/main/func_sformat.test +++ b/mysql-test/main/func_sformat.test @@ -253,3 +253,24 @@ set names latin1; echo #; echo # End of 10.7 tests; echo #; + +echo #; +echo # Start of 10.8 tests; +echo #; + +echo #; +echo # MDEV-29646 sformat('Num [{:20}]', 42) gives incorrect result in view; +echo #; + +create view v1 as select sformat('Num [{:20}]', 42); +select * from v1; +drop view v1; + +create view v1 as SELECT sformat('Square root of [{:d}] is [{:.20}]', 2, sqrt(2)); +select * from v1; +drop view v1; + +create table t1 (a text, b int, c text); +insert t1 values ('[{} -> {}]', 10, '{}'), ('[{:20} <- {}]', 1, '{:30}'); +select sformat(a,b,c) from t1; +drop table t1; diff --git a/mysql-test/main/group_min_max.result b/mysql-test/main/group_min_max.result index cf4bfd62424..e1993eec07d 100644 --- a/mysql-test/main/group_min_max.result +++ b/mysql-test/main/group_min_max.result @@ -4129,6 +4129,140 @@ MIN(pk) a 5 10 DROP TABLE t1; # +# MDEV-6768 Wrong result with agregate with join with no resultset +# +create table t1 +( +PARENT_ID int(10) unsigned NOT NULL AUTO_INCREMENT, +PARENT_FIELD VARCHAR(10), +PRIMARY KEY (PARENT_ID) +) engine=innodb; +create table t2 +( +CHILD_ID INT NOT NULL AUTO_INCREMENT, +PARENT_ID INT NOT NULL, +CHILD_FIELD varchar(10), +PRIMARY KEY (CHILD_ID) +)engine=innodb; +INSERT INTO t1 (PARENT_FIELD) +SELECT 'AAAA'; +INSERT INTO t2 (PARENT_ID, CHILD_FIELD) +SELECT 1, 'BBBB'; +explain select +t1.PARENT_ID, +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 const PRIMARY PRIMARY 4 const 1 +1 SIMPLE t2 ALL NULL NULL NULL NULL 1 Using where +select +t1.PARENT_ID, +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +PARENT_ID min(CHILD_FIELD) +NULL NULL +select +1, +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +1 min(CHILD_FIELD) +1 NULL +select +IFNULL(t1.PARENT_ID,1), +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +IFNULL(t1.PARENT_ID,1) min(CHILD_FIELD) +1 NULL +# Check that things works with MyISAM (which has different explain) +alter table t1 engine=myisam; +alter table t2 engine=myisam; +explain select +t1.PARENT_ID, +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Impossible WHERE noticed after reading const tables +select +t1.PARENT_ID, +min(CHILD_FIELD) +from t1 straight_join t2 +where t1.PARENT_ID = 1 +and t1.PARENT_ID = t2.PARENT_ID +and t2.CHILD_FIELD = "ZZZZ"; +PARENT_ID min(CHILD_FIELD) +NULL NULL +drop table t1,t2; +# Check that things works if sub queries are re-executed +create table t1 (a int primary key, b int); +create table t2 (a int primary key, b int); +create table t3 (a int primary key, b int); +insert into t1 values (1,1),(2,2),(3,3); +insert into t2 values (1,1),(2,2),(3,3); +insert into t3 values (1,1),(3,3); +explain +select *, +(select +CONCAT('t2:', IFNULL(t2.a, 't2a-null'), ';', +'min_t3_b:', IFNULL(min(t3.b), 't3b-null')) +from t2,t3 +where t2.a=1 and t1.b = t3.a) as s1 +from t1; +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 3 +2 DEPENDENT SUBQUERY t2 const PRIMARY PRIMARY 4 const 1 Using index +2 DEPENDENT SUBQUERY t3 eq_ref PRIMARY PRIMARY 4 test.t1.b 1 +select *, +(select +CONCAT('t2:', IFNULL(t2.a, 't2a-null'), ';', +'min_t3_b:', IFNULL(min(t3.b), 't3b-null')) +from t2,t3 +where t2.a=1 and t1.b = t3.a) as s1 +from t1; +a b s1 +1 1 t2:1;min_t3_b:1 +2 2 t2:t2a-null;min_t3_b:t3b-null +3 3 t2:1;min_t3_b:3 +drop table t1,t2,t3; +# +# MDEV-31380: Assertion `s->table->opt_range_condition_rows <= s->found_records' failed +# (assertion in 10.6+, DBL_MAX costs in 10.5) +# +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY(a), KEY(b)) ENGINE=InnoDB; +INSERT INTO t1 SELECT seq, seq FROM seq_1_to_100; +SET +@tmp=@@optimizer_use_condition_selectivity, +optimizer_use_condition_selectivity = 1, +@tmp2=@@optimizer_trace, +optimizer_trace=1; +SELECT DISTINCT * FROM t1 WHERE a IN (1, 2); +a b +1 1 +2 2 +select +CAST(json_value(json_extract(trace, '$**.chosen_access_method.cost'), '$[0]') +as DOUBLE) < 1.0e100 +from information_schema.optimizer_trace; +CAST(json_value(json_extract(trace, '$**.chosen_access_method.cost'), '$[0]') +as DOUBLE) < 1.0e100 +1 +set optimizer_use_condition_selectivity = @tmp, optimizer_trace=@tmp2; +drop table t1; +# # End of 10.5 tests # # diff --git a/mysql-test/main/group_min_max.test b/mysql-test/main/group_min_max.test index a1d2988ad4c..9a0ca3041d3 100644 --- a/mysql-test/main/group_min_max.test +++ b/mysql-test/main/group_min_max.test @@ -7,6 +7,7 @@ --source include/default_optimizer_switch.inc --source include/have_sequence.inc --source include/have_innodb.inc +--source include/have_sequence.inc # # TODO: # Add queries with: @@ -1756,6 +1757,140 @@ SELECT MIN(pk), a FROM t1 WHERE pk <> 1 GROUP BY a; DROP TABLE t1; +--echo # +--echo # MDEV-6768 Wrong result with agregate with join with no resultset +--echo # + +create table t1 +( + PARENT_ID int(10) unsigned NOT NULL AUTO_INCREMENT, + PARENT_FIELD VARCHAR(10), + PRIMARY KEY (PARENT_ID) +) engine=innodb; + +create table t2 +( + CHILD_ID INT NOT NULL AUTO_INCREMENT, + PARENT_ID INT NOT NULL, + CHILD_FIELD varchar(10), + PRIMARY KEY (CHILD_ID) +)engine=innodb; + +INSERT INTO t1 (PARENT_FIELD) +SELECT 'AAAA'; + +INSERT INTO t2 (PARENT_ID, CHILD_FIELD) +SELECT 1, 'BBBB'; + +explain select + t1.PARENT_ID, + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + +select + t1.PARENT_ID, + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + +select + 1, + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + +select + IFNULL(t1.PARENT_ID,1), + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + + +--echo # Check that things works with MyISAM (which has different explain) + +alter table t1 engine=myisam; +alter table t2 engine=myisam; + +explain select + t1.PARENT_ID, + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + +select + t1.PARENT_ID, + min(CHILD_FIELD) + from t1 straight_join t2 + where t1.PARENT_ID = 1 + and t1.PARENT_ID = t2.PARENT_ID + and t2.CHILD_FIELD = "ZZZZ"; + +drop table t1,t2; + +--echo # Check that things works if sub queries are re-executed + +create table t1 (a int primary key, b int); +create table t2 (a int primary key, b int); +create table t3 (a int primary key, b int); + +insert into t1 values (1,1),(2,2),(3,3); +insert into t2 values (1,1),(2,2),(3,3); +insert into t3 values (1,1),(3,3); + +explain +select *, + (select + CONCAT('t2:', IFNULL(t2.a, 't2a-null'), ';', + 'min_t3_b:', IFNULL(min(t3.b), 't3b-null')) + from t2,t3 + where t2.a=1 and t1.b = t3.a) as s1 +from t1; + +select *, + (select + CONCAT('t2:', IFNULL(t2.a, 't2a-null'), ';', + 'min_t3_b:', IFNULL(min(t3.b), 't3b-null')) + from t2,t3 + where t2.a=1 and t1.b = t3.a) as s1 +from t1; + +drop table t1,t2,t3; + +--echo # +--echo # MDEV-31380: Assertion `s->table->opt_range_condition_rows <= s->found_records' failed +--echo # (assertion in 10.6+, DBL_MAX costs in 10.5) +--echo # + +CREATE TABLE t1 (a INT, b INT, PRIMARY KEY(a), KEY(b)) ENGINE=InnoDB; +INSERT INTO t1 SELECT seq, seq FROM seq_1_to_100; + +SET + @tmp=@@optimizer_use_condition_selectivity, + optimizer_use_condition_selectivity = 1, + @tmp2=@@optimizer_trace, + optimizer_trace=1; + +SELECT DISTINCT * FROM t1 WHERE a IN (1, 2); + +select + CAST(json_value(json_extract(trace, '$**.chosen_access_method.cost'), '$[0]') + as DOUBLE) < 1.0e100 +from information_schema.optimizer_trace; + +set optimizer_use_condition_selectivity = @tmp, optimizer_trace=@tmp2; +drop table t1; + --echo # --echo # End of 10.5 tests --echo # diff --git a/mysql-test/main/select.result b/mysql-test/main/select.result index 4f7afe933ff..19fd2858f5f 100644 --- a/mysql-test/main/select.result +++ b/mysql-test/main/select.result @@ -608,7 +608,7 @@ id select_type table type possible_keys key key_len ref rows Extra # explain select count(*) from t3 as t1,t3 where t1.period=t3.period order by t3.period; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index period period 4 NULL 41810 Using index +1 SIMPLE t1 index period period 4 NULL 41810 Using where; Using index 1 SIMPLE t3 ref period period 4 test.t1.period 4181 Using index explain select sum(t1.price+t3.price) from t3 as t1,t3 where t1.period=t3.period order by t3.period; id select_type table type possible_keys key key_len ref rows Extra diff --git a/mysql-test/main/select_jcl6.result b/mysql-test/main/select_jcl6.result index 177edbc4acf..97b05171a1d 100644 --- a/mysql-test/main/select_jcl6.result +++ b/mysql-test/main/select_jcl6.result @@ -619,7 +619,7 @@ id select_type table type possible_keys key key_len ref rows Extra # explain select count(*) from t3 as t1,t3 where t1.period=t3.period order by t3.period; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index period period 4 NULL 41810 Using index +1 SIMPLE t1 index period period 4 NULL 41810 Using where; Using index 1 SIMPLE t3 ref period period 4 test.t1.period 4181 Using index explain select sum(t1.price+t3.price) from t3 as t1,t3 where t1.period=t3.period order by t3.period; id select_type table type possible_keys key key_len ref rows Extra diff --git a/mysql-test/main/select_pkeycache.result b/mysql-test/main/select_pkeycache.result index 4f7afe933ff..19fd2858f5f 100644 --- a/mysql-test/main/select_pkeycache.result +++ b/mysql-test/main/select_pkeycache.result @@ -608,7 +608,7 @@ id select_type table type possible_keys key key_len ref rows Extra # explain select count(*) from t3 as t1,t3 where t1.period=t3.period order by t3.period; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 index period period 4 NULL 41810 Using index +1 SIMPLE t1 index period period 4 NULL 41810 Using where; Using index 1 SIMPLE t3 ref period period 4 test.t1.period 4181 Using index explain select sum(t1.price+t3.price) from t3 as t1,t3 where t1.period=t3.period order by t3.period; id select_type table type possible_keys key key_len ref rows Extra diff --git a/mysql-test/main/selectivity.result b/mysql-test/main/selectivity.result index ad8d733a163..4d1ad98e905 100644 --- a/mysql-test/main/selectivity.result +++ b/mysql-test/main/selectivity.result @@ -1860,7 +1860,6 @@ test.t1 analyze status Table is already up to date test.t2 analyze status Engine-independent statistics collected test.t2 analyze status Table is already up to date set optimizer_switch='exists_to_in=off'; -set optimizer_use_condition_selectivity=2; SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id @@ -1885,18 +1884,39 @@ id a 17 17 18 18 19 19 -explain SELECT * FROM t1 +set statement optimizer_use_condition_selectivity=2 for explain SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where -2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 -2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where -EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 +3 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +set statement optimizer_use_condition_selectivity=4 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1 -1 SIMPLE B ref a a 5 const 1 +1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 +3 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +set @query="EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65"; +set statement optimizer_use_condition_selectivity=2 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 +3 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +set statement optimizer_use_condition_selectivity=4 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 ALL NULL NULL NULL NULL 100 Using where +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 +3 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where explain SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id @@ -1906,7 +1926,6 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where set optimizer_switch= @save_optimizer_switch; -set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; # # MDEV-21495: Conditional jump or move depends on uninitialised value in sel_arg_range_seq_next diff --git a/mysql-test/main/selectivity.test b/mysql-test/main/selectivity.test index c96fb864408..a41b695608d 100644 --- a/mysql-test/main/selectivity.test +++ b/mysql-test/main/selectivity.test @@ -1262,7 +1262,6 @@ insert into t2 select seq,seq,seq from seq_1_to_100; analyze table t1,t2 persistent for all; set optimizer_switch='exists_to_in=off'; -set optimizer_use_condition_selectivity=2; let $query= SELECT * FROM t1 WHERE @@ -1270,14 +1269,16 @@ let $query= SELECT * FROM t1 WHERE A.a=t1.a AND t2.b < 20); eval $query; -eval explain $query; +eval set statement optimizer_use_condition_selectivity=2 for explain $query; +eval set statement optimizer_use_condition_selectivity=4 for explain $query; -EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; +set @query="EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65"; +eval set statement optimizer_use_condition_selectivity=2 for explain $query; +eval set statement optimizer_use_condition_selectivity=4 for explain $query; eval explain $query; set optimizer_switch= @save_optimizer_switch; -set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; --echo # diff --git a/mysql-test/main/selectivity_innodb.result b/mysql-test/main/selectivity_innodb.result index 99723013eaa..79d08e455a8 100644 --- a/mysql-test/main/selectivity_innodb.result +++ b/mysql-test/main/selectivity_innodb.result @@ -1867,7 +1867,6 @@ test.t1 analyze status OK test.t2 analyze status Engine-independent statistics collected test.t2 analyze status OK set optimizer_switch='exists_to_in=off'; -set optimizer_use_condition_selectivity=2; SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id @@ -1892,18 +1891,39 @@ id a 17 17 18 18 19 19 -explain SELECT * FROM t1 +set statement optimizer_use_condition_selectivity=2 for explain SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra 1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index -2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index -2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where -EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65; +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index +3 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +set statement optimizer_use_condition_selectivity=4 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE A const PRIMARY,a PRIMARY 4 const 1 -1 SIMPLE B ref a a 5 const 1 Using index +1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index +3 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +set @query="EXPLAIN SELECT * FROM t1 A, t1 B WHERE A.a = B.a and A.id = 65"; +set statement optimizer_use_condition_selectivity=2 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index +3 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where +set statement optimizer_use_condition_selectivity=4 for explain SELECT * FROM t1 +WHERE +EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id +WHERE A.a=t1.a AND t2.b < 20); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 index NULL a 5 NULL 100 Using where; Using index +3 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index +3 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where explain SELECT * FROM t1 WHERE EXISTS (SELECT * FROM t1 A INNER JOIN t2 ON t2.a = A.id @@ -1913,7 +1933,6 @@ id select_type table type possible_keys key key_len ref rows Extra 2 DEPENDENT SUBQUERY A ref PRIMARY,a a 5 test.t1.a 1 Using index 2 DEPENDENT SUBQUERY t2 ref a,b a 5 test.A.id 1 Using where set optimizer_switch= @save_optimizer_switch; -set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; drop table t1,t2; # # MDEV-21495: Conditional jump or move depends on uninitialised value in sel_arg_range_seq_next diff --git a/mysql-test/main/type_timestamp.result b/mysql-test/main/type_timestamp.result index a64d393ee66..064dee6715d 100644 --- a/mysql-test/main/type_timestamp.result +++ b/mysql-test/main/type_timestamp.result @@ -1230,6 +1230,8 @@ SELECT * FROM t1 HAVING MIN(t1.c1) >= ALL(SELECT 'a' UNION SELECT 'r'); c1 Warnings: Warning 1292 Truncated incorrect datetime value: 'r' +SELECT * FROM t1 HAVING MIN(t1.c1) > 0; +c1 DROP TABLE t1; CREATE TABLE t1 (c1 timestamp); INSERT INTO t1 VALUES ('2010-01-01 00:00:00'); diff --git a/mysql-test/main/type_timestamp.test b/mysql-test/main/type_timestamp.test index 8edd52fec2a..45ec6eae8bb 100644 --- a/mysql-test/main/type_timestamp.test +++ b/mysql-test/main/type_timestamp.test @@ -810,6 +810,7 @@ DROP TABLE t1; CREATE TABLE t1 (c1 timestamp); SELECT MIN(t1.c1) AS k1 FROM t1 HAVING (k1 >= ALL(SELECT 'a' UNION SELECT 'r')); SELECT * FROM t1 HAVING MIN(t1.c1) >= ALL(SELECT 'a' UNION SELECT 'r'); +SELECT * FROM t1 HAVING MIN(t1.c1) > 0; DROP TABLE t1; CREATE TABLE t1 (c1 timestamp); diff --git a/mysql-test/mariadb-test-run.pl b/mysql-test/mariadb-test-run.pl index 07d55afe959..9fff5b8f8fb 100755 --- a/mysql-test/mariadb-test-run.pl +++ b/mysql-test/mariadb-test-run.pl @@ -410,8 +410,11 @@ sub main { mark_time_used('collect'); - mysql_install_db(default_mysqld(), "$opt_vardir/install.db") unless using_extern(); - + if (!using_extern()) + { + mysql_install_db(default_mysqld(), "$opt_vardir/install.db"); + make_readonly("$opt_vardir/install.db"); + } if ($opt_dry_run) { for (@$tests) { diff --git a/mysql-test/suite/binlog/r/binlog_flush_binlogs_delete_domain.result b/mysql-test/suite/binlog/r/binlog_flush_binlogs_delete_domain.result index fdcfb4bfa01..1c11191802f 100644 --- a/mysql-test/suite/binlog/r/binlog_flush_binlogs_delete_domain.result +++ b/mysql-test/suite/binlog/r/binlog_flush_binlogs_delete_domain.result @@ -46,15 +46,23 @@ Warning 1076 The current gtid binlog state is incompatible with a former one mis Warning 1076 The gtid domain being deleted ('1') is not in the current binlog state FLUSH BINARY LOGS DELETE_DOMAIN_ID = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 0); ERROR HY000: Could not delete gtid domain. Reason: binlog files may contain gtids from the domain ('1') being deleted. Make sure to first purge those files. +MDEV-31140: Missing error from DELETE_DOMAIN_ID when gtid_binlog_state partially matches GTID_LIST. FLUSH BINARY LOGS; PURGE BINARY LOGS TO 'master-bin.000005'; +SET @@SESSION.gtid_domain_id=8; +SET @@SESSION.server_id=10*8 + 1; +INSERT INTO t SELECT 1+MAX(a) FROM t; +FLUSH BINARY LOGS DELETE_DOMAIN_ID = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 0); +ERROR HY000: Could not delete gtid domain. Reason: binlog files may contain gtids from the domain ('8') being deleted. Make sure to first purge those files. +FLUSH BINARY LOGS; +PURGE BINARY LOGS TO 'master-bin.000006'; FLUSH BINARY LOGS DELETE_DOMAIN_ID = (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 0); Warnings: Warning 1076 The gtid domain being deleted ('0') is not in the current binlog state Gtid_list of the current binlog does not contain 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 0: -show binlog events in 'master-bin.000006' limit 1,1; +show binlog events in 'master-bin.000007' limit 1,1; Log_name Pos Event_type Server_id End_log_pos Info -master-bin.000006 # Gtid_list 1 # [] +master-bin.000007 # Gtid_list 1 # [] SET @@SESSION.gtid_domain_id=1;; SET @@SESSION.server_id=1; SET @@SESSION.gtid_seq_no=1; @@ -75,7 +83,7 @@ INSERT INTO t SET a=1; SELECT @gtid_binlog_state_saved "as original state", @@GLOBAL.gtid_binlog_state as "out of order for 11 domain state"; as original state out of order for 11 domain state 1-1-1,1-2-2,11-11-11 1-1-1,1-2-2,11-11-1 -PURGE BINARY LOGS TO 'master-bin.000007'; +PURGE BINARY LOGS TO 'master-bin.000008'; the following command succeeds with warnings FLUSH BINARY LOGS DELETE_DOMAIN_ID = (1); Warnings: diff --git a/mysql-test/suite/binlog/t/binlog_flush_binlogs_delete_domain.test b/mysql-test/suite/binlog/t/binlog_flush_binlogs_delete_domain.test index 8311f4bd800..1643ecff72d 100644 --- a/mysql-test/suite/binlog/t/binlog_flush_binlogs_delete_domain.test +++ b/mysql-test/suite/binlog/t/binlog_flush_binlogs_delete_domain.test @@ -21,7 +21,6 @@ FLUSH BINARY LOGS DELETE_DOMAIN_ID = (); --echo but with a warning --let $binlog_pre_flush=query_get_value(SHOW MASTER STATUS, Position, 1) FLUSH BINARY LOGS DELETE_DOMAIN_ID = (99); ---let $binlog_start=$binlog_pre_flush --source include/show_binary_logs.inc # Log one event in a specified domain and try to delete the domain @@ -62,6 +61,8 @@ FLUSH BINARY LOGS DELETE_DOMAIN_ID = (1); # expected overrun of the static buffers of underlying dynamic arrays is doing. --let $domain_cnt=17 --let $server_in_domain_cnt=3 +--let $err_domain_id=`SELECT FLOOR($domain_cnt/2)` +--let $err_server_id=`SELECT FLOOR($server_in_domain_cnt/2)` --let $domain_list= --disable_query_log while ($domain_cnt) @@ -86,6 +87,16 @@ while ($domain_cnt) --error ER_BINLOG_CANT_DELETE_GTID_DOMAIN --eval FLUSH BINARY LOGS DELETE_DOMAIN_ID = ($domain_list) +--echo MDEV-31140: Missing error from DELETE_DOMAIN_ID when gtid_binlog_state partially matches GTID_LIST. +FLUSH BINARY LOGS; +--let $purge_to_binlog= query_get_value(SHOW MASTER STATUS, File, 1) +--eval PURGE BINARY LOGS TO '$purge_to_binlog' +--eval SET @@SESSION.gtid_domain_id=$err_domain_id +--eval SET @@SESSION.server_id=10*$err_domain_id + $err_server_id +eval INSERT INTO t SELECT 1+MAX(a) FROM t; +--error ER_BINLOG_CANT_DELETE_GTID_DOMAIN +--eval FLUSH BINARY LOGS DELETE_DOMAIN_ID = ($domain_list) + # Now satisfy the safety condtion to purge log files containing $domain list FLUSH BINARY LOGS; --let $purge_to_binlog= query_get_value(SHOW MASTER STATUS, File, 1) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index bcf6078f624..84ecd3eb7fb 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -27,3 +27,5 @@ galera_bf_kill_debug : timeout after 900 seconds galera_ssl_upgrade : [Warning] Failed to load slave replication state from table mysql.gtid_slave_pos: 130: Incorrect file format 'gtid_slave_pos' galera_parallel_simple : timeout related to wsrep_sync_wait galera_insert_bulk : MDEV-30536 no expected deadlock in galera_insert_bulk test +MDEV-27713 : test is using get_lock(), which is now rejected in cluster +galera_bf_abort_group_commit : MDEV-30855 PR to remove the test exists diff --git a/mysql-test/suite/galera/r/MDEV-29293.result b/mysql-test/suite/galera/r/MDEV-29293.result new file mode 100644 index 00000000000..70c0cc84a31 --- /dev/null +++ b/mysql-test/suite/galera/r/MDEV-29293.result @@ -0,0 +1,21 @@ +connection node_2; +connection node_1; +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1; +set wsrep_sync_wait = 0; +CREATE TABLE t1(a int not null primary key auto_increment, b int) engine=InnoDB; +INSERT INTO t1 VALUES (1,2); +connection node_1a; +BEGIN; +UPDATE t1 SET b=3 WHERE a=1; +connection node_1; +set debug_sync='wsrep_kill_before_awake_no_mutex SIGNAL before_kill WAIT_FOR continue'; +connection node_1b; +set debug_sync= 'now WAIT_FOR before_kill'; +connection node_2; +UPDATE t1 SET b=7 WHERE a=1; +connection node_1b; +set debug_sync= 'now SIGNAL continue'; +connection node_1; +DROP TABLE t1; +SET DEBUG_SYNC= 'RESET'; diff --git a/mysql-test/suite/galera/r/galera_create_table_as_select.result b/mysql-test/suite/galera/r/galera_create_table_as_select.result index 6f65ee99f0a..beda5f30fe2 100644 --- a/mysql-test/suite/galera/r/galera_create_table_as_select.result +++ b/mysql-test/suite/galera/r/galera_create_table_as_select.result @@ -82,6 +82,7 @@ connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; LOCK TABLE t2 WRITE; connection node_1; CREATE TABLE t1 AS SELECT * FROM t2;; +connection node_1a; connection node_2; SELECT COUNT(*) = 5 FROM t2; COUNT(*) = 5 diff --git a/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result b/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result index 5caf22b39ca..5718807b5c4 100644 --- a/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result +++ b/mysql-test/suite/galera/r/galera_gcache_recover_manytrx.result @@ -134,6 +134,3 @@ connection node_1; call mtr.add_suppression("Error in Log_event::read_log_event():.*"); CALL mtr.add_suppression("conflict state 7 after post commit"); CALL mtr.add_suppression("Skipped GCache ring buffer recovery"); -connection node_2; -call mtr.add_suppression("Error in Log_event::read_log_event():.*"); -CALL mtr.add_suppression("Skipped GCache ring buffer recovery"); diff --git a/mysql-test/suite/galera/r/galera_kill_group_commit.result b/mysql-test/suite/galera/r/galera_kill_group_commit.result new file mode 100644 index 00000000000..bb59ce1486f --- /dev/null +++ b/mysql-test/suite/galera/r/galera_kill_group_commit.result @@ -0,0 +1,27 @@ +connection node_2; +connection node_1; +connect node_1_kill, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connect node_1_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_1; +SET SESSION wsrep_sync_wait = 0; +connect node_1_follower, 127.0.0.1, root, , test, $NODE_MYPORT_1; +SET SESSION wsrep_sync_wait = 0; +connection node_1; +CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB; +SET SESSION DEBUG_SYNC = "commit_before_enqueue SIGNAL leader_before_enqueue_reached WAIT_FOR leader_before_enqueue_continue"; +INSERT INTO t1 VALUES (1); +connection node_1_ctrl; +SET DEBUG_SYNC = "now WAIT_FOR leader_before_enqueue_reached"; +connection node_1_follower; +INSERT INTO t1 VALUES (2);; +connection node_1_ctrl; +connection node_1_kill; +# Execute KILL QUERY for group commit follower +SET DEBUG_SYNC = "now SIGNAL leader_before_enqueue_continue"; +connection node_1_follower; +connection node_1; +SELECT * FROM t1; +f1 +1 +2 +SET DEBUG_SYNC = "RESET"; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/r/galera_var_retry_autocommit.result b/mysql-test/suite/galera/r/galera_var_retry_autocommit.result index 50667b0a4fa..eee740b6036 100644 --- a/mysql-test/suite/galera/r/galera_var_retry_autocommit.result +++ b/mysql-test/suite/galera/r/galera_var_retry_autocommit.result @@ -36,7 +36,10 @@ SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue'; connection node_1; SELECT COUNT(*) FROM t1; COUNT(*) -1 +connection node_1; +SELECT COUNT(*) FROM t1; +COUNT(*) +0 SET DEBUG_SYNC = 'RESET'; SET GLOBAL debug_dbug = NULL; DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/MDEV-29293.test b/mysql-test/suite/galera/t/MDEV-29293.test new file mode 100644 index 00000000000..dacbf714c06 --- /dev/null +++ b/mysql-test/suite/galera/t/MDEV-29293.test @@ -0,0 +1,41 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc +--source include/have_debug_sync.inc +--source include/galera_have_debug_sync.inc + +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1 +set wsrep_sync_wait = 0; + +CREATE TABLE t1(a int not null primary key auto_increment, b int) engine=InnoDB; +INSERT INTO t1 VALUES (1,2); + +--connection node_1a +--let $victim_id = `SELECT CONNECTION_ID()` +BEGIN; +UPDATE t1 SET b=3 WHERE a=1; + +--connection node_1 +set debug_sync='wsrep_kill_before_awake_no_mutex SIGNAL before_kill WAIT_FOR continue'; +--disable_query_log +--disable_result_log +--send_eval KILL CONNECTION $victim_id +--enable_result_log +--enable_query_log + +--connection node_1b +set debug_sync= 'now WAIT_FOR before_kill'; + +--connection node_2 +UPDATE t1 SET b=7 WHERE a=1; + +--connection node_1b +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE User = 'system user' AND State LIKE 'Update_rows_log_event%'; +--source include/wait_condition.inc +set debug_sync= 'now SIGNAL continue'; + +--connection node_1 +--reap +DROP TABLE t1; +SET DEBUG_SYNC= 'RESET'; + diff --git a/mysql-test/suite/galera/t/galera_create_table_as_select.test b/mysql-test/suite/galera/t/galera_create_table_as_select.test index a6c1f657280..cfee63e5e27 100644 --- a/mysql-test/suite/galera/t/galera_create_table_as_select.test +++ b/mysql-test/suite/galera/t/galera_create_table_as_select.test @@ -113,6 +113,10 @@ LOCK TABLE t2 WRITE; --connection node_1 --send CREATE TABLE t1 AS SELECT * FROM t2; +--connection node_1a +--let $wait_condition = SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE STATE LIKE 'Waiting for table metadata lock%' +--source include/wait_condition.inc + --connection node_2 SELECT COUNT(*) = 5 FROM t2; CREATE TABLE t1 AS SELECT * FROM t2; @@ -121,7 +125,7 @@ CREATE TABLE t1 AS SELECT * FROM t2; UNLOCK TABLES; --connection node_1 ---error ER_TABLE_EXISTS_ERROR,ER_LOCK_DEADLOCK +--error ER_TABLE_EXISTS_ERROR,ER_QUERY_INTERRUPTED --reap DROP TABLE t1, t2; diff --git a/mysql-test/suite/galera/t/galera_kill_group_commit.cnf b/mysql-test/suite/galera/t/galera_kill_group_commit.cnf new file mode 100644 index 00000000000..60f4f776409 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_kill_group_commit.cnf @@ -0,0 +1,5 @@ +!include ../galera_2nodes.cnf + +[mysqld] +log-bin +log-slave-updates diff --git a/mysql-test/suite/galera/t/galera_kill_group_commit.test b/mysql-test/suite/galera/t/galera_kill_group_commit.test new file mode 100644 index 00000000000..4b84f2d90ef --- /dev/null +++ b/mysql-test/suite/galera/t/galera_kill_group_commit.test @@ -0,0 +1,69 @@ +# +# Verify that transaction which has reached group commit queue +# cannot be killed. If the kill succeeds, assertion for +# wsrep transaction state will fail. +# +# If the bug is present, i.e. wsrep transaction gets killed during +# group commit wait, this test is enough to reproduce the crash +# most of the time. +# + +--source include/have_innodb.inc +--source include/have_debug_sync.inc +--source include/galera_cluster.inc + +# Connection for KILL commands +--connect node_1_kill, 127.0.0.1, root, , test, $NODE_MYPORT_1 +# Connection for sync point control +--connect node_1_ctrl, 127.0.0.1, root, , test, $NODE_MYPORT_1 +SET SESSION wsrep_sync_wait = 0; +# Connection for group commit follower +--connect node_1_follower, 127.0.0.1, root, , test, $NODE_MYPORT_1 +# Need to disable sync wait to reach commit queue when leader +# is blocked. +SET SESSION wsrep_sync_wait = 0; +--let $follower_id = `SELECT CONNECTION_ID()` + +--connection node_1 +CREATE TABLE t1 (f1 INT PRIMARY KEY) ENGINE=InnoDB; + +SET SESSION DEBUG_SYNC = "commit_before_enqueue SIGNAL leader_before_enqueue_reached WAIT_FOR leader_before_enqueue_continue"; +--send INSERT INTO t1 VALUES (1) + +--connection node_1_ctrl +SET DEBUG_SYNC = "now WAIT_FOR leader_before_enqueue_reached"; + +--connection node_1_follower +# SET SESSION DEBUG_SYNC = "group_commit_waiting_for_prior SIGNAL follower_waiting_for_prior_reached WAIT_FOR follower_waiting_for_prior_continue"; +--send INSERT INTO t1 VALUES (2); + +--connection node_1_ctrl +# TODO: Is it possible to use sync points to enforce group commit to happen? +# The leader will hold commit monitor in commit_before_enqueue sync point, +# which prevents the follower to reach the group commit wait state. +# We now sleep and expect the follower to reach group commit, but this +# may cause false negatives. +--sleep 1 + +--connection node_1_kill +--echo # Execute KILL QUERY for group commit follower +--disable_query_log +--disable_result_log +# Because it is currently impossible to verify that the +# follower has reached group commit queue, the KILL may +# sometimes return success. +--error 0,ER_KILL_DENIED_ERROR +--eval KILL QUERY $follower_id +--enable_result_log +--enable_query_log + +SET DEBUG_SYNC = "now SIGNAL leader_before_enqueue_continue"; +--connection node_1_follower +--reap + +--connection node_1 +--reap +SELECT * FROM t1; + +SET DEBUG_SYNC = "RESET"; +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_var_retry_autocommit.test b/mysql-test/suite/galera/t/galera_var_retry_autocommit.test index c58eba1410e..8009fe88c65 100644 --- a/mysql-test/suite/galera/t/galera_var_retry_autocommit.test +++ b/mysql-test/suite/galera/t/galera_var_retry_autocommit.test @@ -64,6 +64,7 @@ SELECT COUNT(*) FROM t1; SET DEBUG_SYNC = 'now SIGNAL wsrep_retry_autocommit_continue'; --connection node_1 +--error 0,ER_LOCK_DEADLOCK --reap SELECT COUNT(*) FROM t1; diff --git a/mysql-test/suite/gcol/r/gcol_purge.result b/mysql-test/suite/gcol/r/gcol_purge.result index 11063c7cd6f..a130485f219 100644 --- a/mysql-test/suite/gcol/r/gcol_purge.result +++ b/mysql-test/suite/gcol/r/gcol_purge.result @@ -16,7 +16,7 @@ INSERT INTO t1 (f1, f2) VALUES(1,2); set global debug_dbug="+d,ib_purge_virtual_index_callback"; connection con1; COMMIT; -InnoDB 0 transactions not purged +SET GLOBAL innodb_max_purge_lag_wait=1; connection con2; commit; disconnect con1; diff --git a/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result b/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result index 3f3b2db8f32..89711a2d8bb 100644 --- a/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result +++ b/mysql-test/suite/gcol/r/innodb_virtual_debug_purge.result @@ -78,7 +78,7 @@ SET DEBUG_SYNC= 'now WAIT_FOR uncommitted'; # enable purge COMMIT; # wait for purge to process the deleted records. -InnoDB 0 transactions not purged +InnoDB 1 transactions not purged SET DEBUG_SYNC= 'now SIGNAL purged'; connection default; /* connection default */ ALTER TABLE t1 ADD COLUMN c INT GENERATED ALWAYS AS(a+b), ADD INDEX idx (c), ALGORITHM=INPLACE, LOCK=SHARED; diff --git a/mysql-test/suite/gcol/t/gcol_purge.test b/mysql-test/suite/gcol/t/gcol_purge.test index ecfd89f4469..8fff375cdc2 100644 --- a/mysql-test/suite/gcol/t/gcol_purge.test +++ b/mysql-test/suite/gcol/t/gcol_purge.test @@ -23,7 +23,7 @@ set global debug_dbug="+d,ib_purge_virtual_index_callback"; connection con1; COMMIT; ---source ../innodb/include/wait_all_purged.inc +SET GLOBAL innodb_max_purge_lag_wait=1; connection con2; commit; diff --git a/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test b/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test index d9b764a75a7..e7bf8eb485b 100644 --- a/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test +++ b/mysql-test/suite/gcol/t/innodb_virtual_debug_purge.test @@ -113,7 +113,9 @@ SET DEBUG_SYNC= 'now WAIT_FOR uncommitted'; COMMIT; --echo # wait for purge to process the deleted records. +let $wait_all_purged = 1; --source ../../innodb/include/wait_all_purged.inc +let $wait_all_purged = 0; SET DEBUG_SYNC= 'now SIGNAL purged'; diff --git a/mysql-test/suite/innodb/r/cursor-restore-locking.result b/mysql-test/suite/innodb/r/cursor-restore-locking.result index 48263151ceb..6259cfd58ca 100644 --- a/mysql-test/suite/innodb/r/cursor-restore-locking.result +++ b/mysql-test/suite/innodb/r/cursor-restore-locking.result @@ -1,31 +1,34 @@ SET @save_freq=@@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; -CREATE TABLE t (a int PRIMARY KEY, b int NOT NULL UNIQUE) engine = InnoDB; +CREATE TABLE t (a int PRIMARY KEY, b int NOT NULL UNIQUE) engine = InnoDB, STATS_PERSISTENT=0; InnoDB 0 transactions not purged connect prevent_purge,localhost,root,,; start transaction with consistent snapshot; connect con_del_1,localhost,root,,; INSERT INTO t VALUES (20,20); SET DEBUG_SYNC = 'innodb_row_search_for_mysql_exit SIGNAL first_del_row_search_mvcc_finished WAIT_FOR first_del_cont'; -DELETE FROM t WHERE b = 20; +DELETE FROM t WHERE b = 20 # trx_1; connect con_ins_1,localhost,root,,; SET DEBUG_SYNC = 'now WAIT_FOR first_del_row_search_mvcc_finished'; SET DEBUG_SYNC = 'lock_wait_start SIGNAL first_ins_locked'; SET DEBUG_SYNC = 'ib_after_row_insert SIGNAL first_ins_row_inserted WAIT_FOR first_ins_cont'; -INSERT INTO t VALUES(10, 20); +INSERT INTO t VALUES(10, 20) # trx_2; connect con_del_2,localhost,root,,; SET TRANSACTION ISOLATION LEVEL READ COMMITTED; SET DEBUG_SYNC = 'now WAIT_FOR first_ins_locked'; SET DEBUG_SYNC = 'lock_wait_start SIGNAL second_del_locked'; -DELETE FROM t WHERE b = 20; +DELETE FROM t WHERE b = 20 # trx_3; connection default; SET DEBUG_SYNC = 'now WAIT_FOR second_del_locked'; +SET @saved_dbug = @@GLOBAL.debug_dbug; +SET @@GLOBAL.debug_dbug="d,enable_row_purge_del_mark_exit_sync_point"; SET DEBUG_SYNC = 'now SIGNAL first_del_cont'; SET DEBUG_SYNC = 'now WAIT_FOR first_ins_row_inserted'; connection con_del_1; connection default; disconnect prevent_purge; -InnoDB 0 transactions not purged +SET DEBUG_SYNC = 'now WAIT_FOR row_purge_del_mark_finished'; +SET @@GLOBAL.debug_dbug = @saved_dbug; SET DEBUG_SYNC = 'now SIGNAL first_ins_cont'; connection con_del_2; connection con_ins_1; diff --git a/mysql-test/suite/innodb/r/dml_purge.result b/mysql-test/suite/innodb/r/dml_purge.result index 38273d571c0..75a5f0fec6c 100644 --- a/mysql-test/suite/innodb/r/dml_purge.result +++ b/mysql-test/suite/innodb/r/dml_purge.result @@ -19,7 +19,7 @@ BEGIN; UPDATE t1 SET b=4 WHERE a=3; disconnect prevent_purge; connection default; -InnoDB 0 transactions not purged +SET GLOBAL innodb_max_purge_lag_wait=1; connection con1; ROLLBACK; disconnect con1; diff --git a/mysql-test/suite/innodb/r/instant_alter_debug.result b/mysql-test/suite/innodb/r/instant_alter_debug.result index 6efe5d7c734..9dcf5eb8c62 100644 --- a/mysql-test/suite/innodb/r/instant_alter_debug.result +++ b/mysql-test/suite/innodb/r/instant_alter_debug.result @@ -392,11 +392,12 @@ connection stop_purge; COMMIT; disconnect stop_purge; connection default; -InnoDB 0 transactions not purged +InnoDB 1 transactions not purged SET DEBUG_SYNC='now SIGNAL s2'; connection dml; disconnect dml; connection default; +InnoDB 0 transactions not purged SET DEBUG_SYNC=RESET; DROP TABLE t1; # End of 10.3 tests diff --git a/mysql-test/suite/innodb/r/instant_alter_purge.result b/mysql-test/suite/innodb/r/instant_alter_purge.result index 1179ff62ecc..261356bad12 100644 --- a/mysql-test/suite/innodb/r/instant_alter_purge.result +++ b/mysql-test/suite/innodb/r/instant_alter_purge.result @@ -1,5 +1,6 @@ SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; +InnoDB 0 transactions not purged # # MDEV-17793 Crash in purge after instant DROP and emptying the table # @@ -16,7 +17,7 @@ COMMIT; START TRANSACTION WITH CONSISTENT SNAPSHOT; connection default; ALTER TABLE t1 ADD COLUMN extra TINYINT UNSIGNED NOT NULL DEFAULT 42; -InnoDB 1 transactions not purged +SET GLOBAL innodb_max_purge_lag_wait=1; ALTER TABLE t1 DROP extra; disconnect prevent_purge; InnoDB 0 transactions not purged diff --git a/mysql-test/suite/innodb/r/stat_tables.result b/mysql-test/suite/innodb/r/stat_tables.result index c1ce6fc8fce..99c862fea77 100644 --- a/mysql-test/suite/innodb/r/stat_tables.result +++ b/mysql-test/suite/innodb/r/stat_tables.result @@ -26,4 +26,60 @@ UPDATE mysql.innodb_table_stats SET last_update=NULL WHERE table_name='t1'; XA END 'test'; XA ROLLBACK 'test'; DROP TABLE t1; +# +# MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +# +# +# Testing a non-default format: Field_timestamp0 - UINT4 based +# +SET @@global.mysql56_temporal_format=0; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update timestamp /* mariadb-5.3 */ NO current_timestamp() on update current_timestamp() +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update timestamp /* mariadb-5.3 */ NO current_timestamp() on update current_timestamp() +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +DROP TABLE t1; +# +# Now as the table t1 is dropped, expect no statistics +# +SELECT * FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +database_name table_name last_update n_rows clustered_index_size sum_of_other_index_sizes +SELECT * FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +database_name table_name index_name last_update stat_name stat_value sample_size stat_description +# +# Testing with the default format: Field_timestampf - BINARY(4) based with the UNSIGNED_FLAG +# +SET @@global.mysql56_temporal_format=1; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update timestamp NO current_timestamp() on update current_timestamp() +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update timestamp NO current_timestamp() on update current_timestamp() +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +DROP TABLE t1; # End of 10.6 tests diff --git a/mysql-test/suite/innodb/t/cursor-restore-locking.test b/mysql-test/suite/innodb/t/cursor-restore-locking.test index f8d00f57a5e..b65d3773ba2 100644 --- a/mysql-test/suite/innodb/t/cursor-restore-locking.test +++ b/mysql-test/suite/innodb/t/cursor-restore-locking.test @@ -5,7 +5,7 @@ source include/have_debug_sync.inc; SET @save_freq=@@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; -CREATE TABLE t (a int PRIMARY KEY, b int NOT NULL UNIQUE) engine = InnoDB; +CREATE TABLE t (a int PRIMARY KEY, b int NOT NULL UNIQUE) engine = InnoDB, STATS_PERSISTENT=0; --source include/wait_all_purged.inc --connect(prevent_purge,localhost,root,,) @@ -14,20 +14,20 @@ start transaction with consistent snapshot; --connect(con_del_1,localhost,root,,) INSERT INTO t VALUES (20,20); SET DEBUG_SYNC = 'innodb_row_search_for_mysql_exit SIGNAL first_del_row_search_mvcc_finished WAIT_FOR first_del_cont'; ---send DELETE FROM t WHERE b = 20 +--send DELETE FROM t WHERE b = 20 # trx_1 --connect(con_ins_1,localhost,root,,) SET DEBUG_SYNC = 'now WAIT_FOR first_del_row_search_mvcc_finished'; # It's supposed the following INSERT will be suspended just after # lock_wait_start syncpoint, and will be awaken -# after the previous DELETE commits. ib_after_row_insert will be executed -# after the INSERT is woken up. The previous DELETE will wait for +# after trx_1 DELETE commits. ib_after_row_insert will be executed +# after the trx_2 INSERT is woken up. The trx_1 DELETE will wait for # first_del_cont signal before commit, and this signal will be sent later. # So it's safe to use two signals in a row here, it's guaranted the first # signal will be received before the second signal is sent. SET DEBUG_SYNC = 'lock_wait_start SIGNAL first_ins_locked'; SET DEBUG_SYNC = 'ib_after_row_insert SIGNAL first_ins_row_inserted WAIT_FOR first_ins_cont'; ---send INSERT INTO t VALUES(10, 20) +--send INSERT INTO t VALUES(10, 20) # trx_2 --connect(con_del_2,localhost,root,,) # After MDEV-30225 is fixed, the following DELETE creates next-key lock for @@ -36,24 +36,26 @@ SET DEBUG_SYNC = 'ib_after_row_insert SIGNAL first_ins_row_inserted WAIT_FOR fir SET TRANSACTION ISOLATION LEVEL READ COMMITTED; SET DEBUG_SYNC = 'now WAIT_FOR first_ins_locked'; SET DEBUG_SYNC = 'lock_wait_start SIGNAL second_del_locked'; -############################################################################### -# This DELETE is locked by the previous DELETE, after that DELETE is -# committed, it will still be locked by the next INSERT on delete-marked -# heap_no 2 record. After that INSERT inserted the record with heap_no 3, -# and after heap_no 2 record is purged, this DELETE will be unlocked and +############################################################################## +# trx_3 DELETE is locked by trx_1 DELETE, after trx_1 DELETE is +# committed, it will still be locked by trx_2 INSERT on delete-marked +# heap_no 2 record. After trx_2 INSERT inserted the record with heap_no 3, +# and after heap_no 2 record is purged, trx_3 DELETE will be unlocked and # must restore persistent cursor position at heap_no 3 record, as it has the # same secondary key value as former heap_no 2 record. Then it must be blocked -# by the previous INSERT, and after the INSERT is committed, it must -# delete the record, inserted by the previous INSERT, and the last INSERT(see +# by trx_2 INSERT, and after trx_2 INSERT is committed, it must +# delete the record, inserted by trx_2 INSERT, and trx_4 INSERT(see # below) must be finished without error. But instead this DELETE restores # persistent cursor position to supremum, as a result, it does not delete the -# record, inserted by the previous INSERT, and the last INSERT is finished with +# record, inserted by trx_2 INSERT, and trx_4 INSERT is finished with # duplicate key check error. ############################################################################### ---send DELETE FROM t WHERE b = 20 +--send DELETE FROM t WHERE b = 20 # trx_3 --connection default SET DEBUG_SYNC = 'now WAIT_FOR second_del_locked'; +SET @saved_dbug = @@GLOBAL.debug_dbug; +SET @@GLOBAL.debug_dbug="d,enable_row_purge_del_mark_exit_sync_point"; SET DEBUG_SYNC = 'now SIGNAL first_del_cont'; SET DEBUG_SYNC = 'now WAIT_FOR first_ins_row_inserted'; --connection con_del_1 @@ -61,7 +63,8 @@ SET DEBUG_SYNC = 'now WAIT_FOR first_ins_row_inserted'; --connection default --disconnect prevent_purge ---source include/wait_all_purged.inc +SET DEBUG_SYNC = 'now WAIT_FOR row_purge_del_mark_finished'; +SET @@GLOBAL.debug_dbug = @saved_dbug; SET DEBUG_SYNC = 'now SIGNAL first_ins_cont'; --connection con_del_2 @@ -74,7 +77,7 @@ SET DEBUG_SYNC = 'now SIGNAL first_ins_cont'; ############################################################################### # Duplicate key error is expected if the bug is not fixed. ############################################################################### -INSERT INTO t VALUES(30, 20); +INSERT INTO t VALUES(30, 20); # trx_4 --disconnect con_ins_1 --disconnect con_del_1 diff --git a/mysql-test/suite/innodb/t/dml_purge.test b/mysql-test/suite/innodb/t/dml_purge.test index 7034939aa4e..c13ff22572b 100644 --- a/mysql-test/suite/innodb/t/dml_purge.test +++ b/mysql-test/suite/innodb/t/dml_purge.test @@ -32,7 +32,7 @@ UPDATE t1 SET b=4 WHERE a=3; --connection default # Initiate a full purge, which should reset the DB_TRX_ID except for a=3. ---source include/wait_all_purged.inc +SET GLOBAL innodb_max_purge_lag_wait=1; # Initiate a ROLLBACK of the update, which should reset the DB_TRX_ID for a=3. --connection con1 ROLLBACK; diff --git a/mysql-test/suite/innodb/t/instant_alter_debug.test b/mysql-test/suite/innodb/t/instant_alter_debug.test index 64f94a78f2e..864d341550c 100644 --- a/mysql-test/suite/innodb/t/instant_alter_debug.test +++ b/mysql-test/suite/innodb/t/instant_alter_debug.test @@ -450,7 +450,9 @@ COMMIT; disconnect stop_purge; connection default; +let $wait_all_purged = 1; --source include/wait_all_purged.inc +let $wait_all_purged = 0; SET DEBUG_SYNC='now SIGNAL s2'; connection dml; @@ -458,6 +460,7 @@ reap; disconnect dml; connection default; +--source include/wait_all_purged.inc SET DEBUG_SYNC=RESET; DROP TABLE t1; diff --git a/mysql-test/suite/innodb/t/instant_alter_purge.test b/mysql-test/suite/innodb/t/instant_alter_purge.test index 9ccf3347d7b..88a56141a1f 100644 --- a/mysql-test/suite/innodb/t/instant_alter_purge.test +++ b/mysql-test/suite/innodb/t/instant_alter_purge.test @@ -6,6 +6,7 @@ if ($have_debug) { SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency=1; +--source include/wait_all_purged.inc --echo # --echo # MDEV-17793 Crash in purge after instant DROP and emptying the table @@ -27,8 +28,7 @@ START TRANSACTION WITH CONSISTENT SNAPSHOT; connection default; ALTER TABLE t1 ADD COLUMN extra TINYINT UNSIGNED NOT NULL DEFAULT 42; -let $wait_all_purged= 1; ---source include/wait_all_purged.inc +SET GLOBAL innodb_max_purge_lag_wait=1; ALTER TABLE t1 DROP extra; disconnect prevent_purge; let $wait_all_purged= 0; diff --git a/mysql-test/suite/innodb/t/purge_secondary.test b/mysql-test/suite/innodb/t/purge_secondary.test index 1a0d178f66a..a7f75f56b53 100644 --- a/mysql-test/suite/innodb/t/purge_secondary.test +++ b/mysql-test/suite/innodb/t/purge_secondary.test @@ -1,6 +1,10 @@ --source include/have_innodb.inc --source include/have_sequence.inc +--disable_query_log +call mtr.add_suppression("InnoDB: Difficult to find free blocks in the buffer pool"); +--enable_query_log + # Ensure that the history list length will actually be decremented by purge. SET @saved_frequency = @@GLOBAL.innodb_purge_rseg_truncate_frequency; SET GLOBAL innodb_purge_rseg_truncate_frequency = 1; diff --git a/mysql-test/suite/innodb/t/stat_tables.test b/mysql-test/suite/innodb/t/stat_tables.test index dd18c265e99..ad421587283 100644 --- a/mysql-test/suite/innodb/t/stat_tables.test +++ b/mysql-test/suite/innodb/t/stat_tables.test @@ -28,4 +28,57 @@ XA END 'test'; XA ROLLBACK 'test'; DROP TABLE t1; +--echo # +--echo # MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +--echo # + +# The following tests demonstrate that these columns: +# - innodb_table_stats.last_update +# - innodb_index_stats.last_update +# have sane values close to NOW(), rather than any garbage, +# with all TIMESTAMP formats. + +--echo # +--echo # Testing a non-default format: Field_timestamp0 - UINT4 based +--echo # + +SET @@global.mysql56_temporal_format=0; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; + +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +DROP TABLE t1; + +--echo # +--echo # Now as the table t1 is dropped, expect no statistics +--echo # + +SELECT * FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +SELECT * FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; + +--echo # +--echo # Testing with the default format: Field_timestampf - BINARY(4) based with the UNSIGNED_FLAG +--echo # + +SET @@global.mysql56_temporal_format=1; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +DROP TABLE t1; + + --echo # End of 10.6 tests diff --git a/mysql-test/suite/merge/merge.result b/mysql-test/suite/merge/merge.result index 590a2f74e34..7bd7343d4f8 100644 --- a/mysql-test/suite/merge/merge.result +++ b/mysql-test/suite/merge/merge.result @@ -3898,6 +3898,18 @@ DROP TABLE t1; DROP TABLE m1; set global default_storage_engine=@save_default_storage_engine; # +# MDEV-31083 ASAN use-after-poison in myrg_attach_children +# +CREATE TABLE t1 (f TEXT, FULLTEXT (f)) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('foo'),('bar'); +CREATE TABLE mrg (f TEXT) ENGINE=MERGE, UNION(t1); +SELECT * FROM mrg; +f +foo +bar +DROP TABLE mrg, t1; +End of 10.5 tests +# # End of 10.0 tests # # diff --git a/mysql-test/suite/merge/merge.test b/mysql-test/suite/merge/merge.test index 21b296a81d7..44ff4b498a6 100644 --- a/mysql-test/suite/merge/merge.test +++ b/mysql-test/suite/merge/merge.test @@ -2859,6 +2859,18 @@ set global default_storage_engine=@save_default_storage_engine; # gone so execution of other tests won't be affected by their presence. --source include/wait_until_count_sessions.inc +--echo # +--echo # MDEV-31083 ASAN use-after-poison in myrg_attach_children +--echo # + +CREATE TABLE t1 (f TEXT, FULLTEXT (f)) ENGINE=MyISAM; +INSERT INTO t1 VALUES ('foo'),('bar'); +CREATE TABLE mrg (f TEXT) ENGINE=MERGE, UNION(t1); +SELECT * FROM mrg; +DROP TABLE mrg, t1; + +--echo End of 10.5 tests + --echo # --echo # End of 10.0 tests --echo # diff --git a/mysql-test/suite/parts/r/partition_purge.result b/mysql-test/suite/parts/r/partition_purge.result new file mode 100644 index 00000000000..072b141cd8d --- /dev/null +++ b/mysql-test/suite/parts/r/partition_purge.result @@ -0,0 +1,26 @@ +CREATE TABLE t1(f1 INT, f2 INT, INDEX(f1))ENGINE=InnoDB +PARTITION BY LIST(f1) ( +PARTITION p1 VALUES in (1, 2, 3), +PARTITION p2 VALUES in (4, 5, 6)); +INSERT INTO t1 VALUES(1, 1), (1, 1), (6, 1); +connect con1,localhost,root,,,; +START TRANSACTION WITH CONSISTENT SNAPSHOT; +connect con2,localhost,root,,,; +SET DEBUG_SYNC="innodb_rollback_inplace_alter_table SIGNAL default_resume WAIT_FOR alter_resume"; +ALTER TABLE t1 ADD UNIQUE INDEX(f1); +connection default; +set DEBUG_SYNC="now WAIT_FOR default_resume"; +SET DEBUG_SYNC="innodb_row_update_for_mysql_begin SIGNAL alter_resume WAIT_FOR alter_finish"; +DELETE FROM t1; +connection con2; +ERROR 23000: Duplicate entry '1' for key 'f1_2' +SET DEBUG_SYNC="now SIGNAL alter_finish"; +connection default; +connection con1; +commit; +connection default; +disconnect con1; +disconnect con2; +InnoDB 0 transactions not purged +drop table t1; +SET DEBUG_SYNC=reset; diff --git a/mysql-test/suite/parts/t/partition_purge.opt b/mysql-test/suite/parts/t/partition_purge.opt new file mode 100644 index 00000000000..a39e5228c9d --- /dev/null +++ b/mysql-test/suite/parts/t/partition_purge.opt @@ -0,0 +1 @@ +--innodb_purge_threads=1 diff --git a/mysql-test/suite/parts/t/partition_purge.test b/mysql-test/suite/parts/t/partition_purge.test new file mode 100644 index 00000000000..2df81b0eb77 --- /dev/null +++ b/mysql-test/suite/parts/t/partition_purge.test @@ -0,0 +1,37 @@ +--source include/have_innodb.inc +--source include/have_partition.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc + +CREATE TABLE t1(f1 INT, f2 INT, INDEX(f1))ENGINE=InnoDB + PARTITION BY LIST(f1) ( + PARTITION p1 VALUES in (1, 2, 3), + PARTITION p2 VALUES in (4, 5, 6)); +INSERT INTO t1 VALUES(1, 1), (1, 1), (6, 1); +connect(con1,localhost,root,,,); +START TRANSACTION WITH CONSISTENT SNAPSHOT; + +connect(con2,localhost,root,,,); +SET DEBUG_SYNC="innodb_rollback_inplace_alter_table SIGNAL default_resume WAIT_FOR alter_resume"; +send ALTER TABLE t1 ADD UNIQUE INDEX(f1); + +connection default; +set DEBUG_SYNC="now WAIT_FOR default_resume"; +SET DEBUG_SYNC="innodb_row_update_for_mysql_begin SIGNAL alter_resume WAIT_FOR alter_finish"; +send DELETE FROM t1; + +connection con2; +--error ER_DUP_ENTRY +reap; +SET DEBUG_SYNC="now SIGNAL alter_finish"; + +connection default; +reap; +connection con1; +commit; +connection default; +disconnect con1; +disconnect con2; +--source ../../innodb/include/wait_all_purged.inc +drop table t1; +SET DEBUG_SYNC=reset; diff --git a/mysql-test/suite/plugins/r/audit_null.result b/mysql-test/suite/plugins/r/audit_null.result index ada85b661ee..45fe05d999b 100644 --- a/mysql-test/suite/plugins/r/audit_null.result +++ b/mysql-test/suite/plugins/r/audit_null.result @@ -14,6 +14,7 @@ Audit_null_called 9 Audit_null_general_error 1 Audit_null_general_log 3 Audit_null_general_result 2 +Audit_null_general_warning 1 create procedure au1(x char(16)) select concat("test1", x); call au1("-12"); concat("test1", x) @@ -24,6 +25,7 @@ Audit_null_called 22 Audit_null_general_error 1 Audit_null_general_log 7 Audit_null_general_result 5 +Audit_null_general_warning 1 create table t1 (a int); insert t1 values (1), (2); select * from t1; diff --git a/mysql-test/suite/plugins/r/server_audit.result b/mysql-test/suite/plugins/r/server_audit.result index 212f27fdf84..75cefc34074 100644 --- a/mysql-test/suite/plugins/r/server_audit.result +++ b/mysql-test/suite/plugins/r/server_audit.result @@ -268,6 +268,13 @@ drop database sa_db; select length('01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789'); length('0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456 2750 +CREATE TABLE test.t1 (a char(4)); +set sql_mode=""; +insert into test.t1 value("12345"); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +set sql_mode=default; +drop table test.t1; set global server_audit_file_path='.'; show status like 'server_audit_current_log'; Variable_name Value @@ -505,6 +512,21 @@ TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,proc, TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,event, TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop database sa_db',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'select length(\'012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567',0 +TIME,HOSTNAME,root,localhost,ID,ID,CREATE,test,t1, +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'CREATE TABLE test.t1 (a char(4))',0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set sql_mode=""',0 +TIME,HOSTNAME,root,localhost,ID,ID,WRITE,test,t1, +TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,table_stats, +TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,column_stats, +TIME,HOSTNAME,root,localhost,ID,ID,READ,mysql,index_stats, +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'insert into test.t1 value("12345")',0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'SHOW WARNINGS',0 +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set sql_mode=default',0 +TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,table_stats, +TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,column_stats, +TIME,HOSTNAME,root,localhost,ID,ID,WRITE,mysql,index_stats, +TIME,HOSTNAME,root,localhost,ID,ID,DROP,test,t1, +TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'drop table test.t1',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_file_path=\'.\'',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'set global server_audit_file_path=\'.\'',0 TIME,HOSTNAME,root,localhost,ID,ID,QUERY,sa_db,'show status like \'server_audit_current_log\'',0 diff --git a/mysql-test/suite/plugins/r/sql_error_log.result b/mysql-test/suite/plugins/r/sql_error_log.result index 98dfe0374fd..34c711a1e8d 100644 --- a/mysql-test/suite/plugins/r/sql_error_log.result +++ b/mysql-test/suite/plugins/r/sql_error_log.result @@ -44,6 +44,13 @@ END| CALL e1(); ERROR 42S02: Table 'test.non_exists' doesn't exist DROP PROCEDURE e1; +CREATE TABLE t1 (a char(4)); +set sql_mode=""; +insert into t1 value("12345"); +Warnings: +Warning 1265 Data truncated for column 'a' at row 1 +set sql_mode=default; +drop table t1; uninstall plugin SQL_ERROR_LOG; Warnings: Warning 1620 Plugin is busy and will be uninstalled on shutdown @@ -55,3 +62,5 @@ MYSQL_ERRNO = 1000, MESSAGE_TEXT = 'new message' TIME HOSTNAME ERROR 1366: Incorrect integer value: 'aa' for column `test`.`t1`.`id` at row 1 : insert into t1 values ('aa') TIME HOSTNAME ERROR 1146: Table 'test.non_exists' doesn't exist : INSERT INTO test.non_exists VALUES (0,0,0) /* e1 */ +TIME HOSTNAME WARNING 1265: Data truncated for column 'a' at row 1 : insert into t1 value("12345") +TIME HOSTNAME WARNING 1620: Plugin is busy and will be uninstalled on shutdown : uninstall plugin SQL_ERROR_LOG diff --git a/mysql-test/suite/plugins/t/server_audit.test b/mysql-test/suite/plugins/t/server_audit.test index 1beeaff7538..675b82522ca 100644 --- a/mysql-test/suite/plugins/t/server_audit.test +++ b/mysql-test/suite/plugins/t/server_audit.test @@ -218,6 +218,12 @@ drop database sa_db; select length('01234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789'); +CREATE TABLE test.t1 (a char(4)); +set sql_mode=""; +insert into test.t1 value("12345"); +set sql_mode=default; +drop table test.t1; + set global server_audit_file_path='.'; --replace_regex /\.[\\\/]/HOME_DIR\// show status like 'server_audit_current_log'; diff --git a/mysql-test/suite/plugins/t/sql_error_log.test b/mysql-test/suite/plugins/t/sql_error_log.test index 6c83e9655ce..03d494031a2 100644 --- a/mysql-test/suite/plugins/t/sql_error_log.test +++ b/mysql-test/suite/plugins/t/sql_error_log.test @@ -1,4 +1,3 @@ - --source include/not_embedded.inc if (!$SQL_ERRLOG_SO) { @@ -66,10 +65,16 @@ DELIMITER ;| CALL e1(); DROP PROCEDURE e1; +CREATE TABLE t1 (a char(4)); +set sql_mode=""; +insert into t1 value("12345"); +set sql_mode=default; +drop table t1; + uninstall plugin SQL_ERROR_LOG; let $MYSQLD_DATADIR= `SELECT @@datadir`; # replace the timestamp and the hostname with constant values ---replace_regex /[1-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] [ 0-9][0-9]:[0-9][0-9]:[0-9][0-9] [^E]*/TIME HOSTNAME / +--replace_regex /[1-9][0-9][0-9][0-9]-[0-9][0-9]-[0-9][0-9] [ 0-9][0-9]:[0-9][0-9]:[0-9][0-9] [^EW]*/TIME HOSTNAME / cat_file $MYSQLD_DATADIR/sql_errors.log; - +remove_file $MYSQLD_DATADIR/sql_errors.log; diff --git a/plugin/audit_null/audit_null.c b/plugin/audit_null/audit_null.c index 6e084c59b77..8f308b7377c 100644 --- a/plugin/audit_null/audit_null.c +++ b/plugin/audit_null/audit_null.c @@ -30,6 +30,7 @@ static volatile int ncalls; /* for SHOW STATUS, see below */ static volatile int ncalls_general_log; static volatile int ncalls_general_error; +static volatile int ncalls_general_warning; static volatile int ncalls_general_result; FILE *f; @@ -53,6 +54,7 @@ static int audit_null_plugin_init(void *arg __attribute__((unused))) ncalls= 0; ncalls_general_log= 0; ncalls_general_error= 0; + ncalls_general_warning= 0; ncalls_general_result= 0; f = fopen("audit_null_tables.log", "w"); @@ -113,6 +115,9 @@ static void audit_null_notify(MYSQL_THD thd __attribute__((unused)), case MYSQL_AUDIT_GENERAL_ERROR: ncalls_general_error++; break; + case MYSQL_AUDIT_GENERAL_WARNING: + ncalls_general_warning++; + break; case MYSQL_AUDIT_GENERAL_RESULT: ncalls_general_result++; break; @@ -179,6 +184,7 @@ static struct st_mysql_show_var simple_status[]= { "general_error", (char *) &ncalls_general_error, SHOW_INT }, { "general_log", (char *) &ncalls_general_log, SHOW_INT }, { "general_result", (char *) &ncalls_general_result, SHOW_INT }, + { "general_warning", (char *) &ncalls_general_error, SHOW_INT }, { 0, 0, 0} }; diff --git a/plugin/sql_errlog/sql_errlog.c b/plugin/sql_errlog/sql_errlog.c index e0ebd6b7737..1454d4bd4dc 100644 --- a/plugin/sql_errlog/sql_errlog.c +++ b/plugin/sql_errlog/sql_errlog.c @@ -84,8 +84,11 @@ static void log_sql_errors(MYSQL_THD thd __attribute__((unused)), const struct mysql_event_general *event = (const struct mysql_event_general*)ev; if (rate && - event->event_subclass == MYSQL_AUDIT_GENERAL_ERROR) + (event->event_subclass == MYSQL_AUDIT_GENERAL_ERROR || + event->event_subclass == MYSQL_AUDIT_GENERAL_WARNING)) { + const char *type= (event->event_subclass == MYSQL_AUDIT_GENERAL_ERROR ? + "ERROR" : "WARNING"); if (++count >= rate) { struct tm t; @@ -94,11 +97,11 @@ static void log_sql_errors(MYSQL_THD thd __attribute__((unused)), count = 0; (void) localtime_r(&event_time, &t); logger_printf(logfile, "%04d-%02d-%02d %2d:%02d:%02d " - "%s ERROR %d: %s : %s\n", - t.tm_year + 1900, t.tm_mon + 1, - t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, - event->general_user, event->general_error_code, - event->general_command, event->general_query); + "%s %s %d: %s : %s\n", + t.tm_year + 1900, t.tm_mon + 1, + t.tm_mday, t.tm_hour, t.tm_min, t.tm_sec, + event->general_user, type, event->general_error_code, + event->general_command, event->general_query); } } } diff --git a/plugin/type_mysql_timestamp/CMakeLists.txt b/plugin/type_mysql_timestamp/CMakeLists.txt new file mode 100644 index 00000000000..ca7bf1e7704 --- /dev/null +++ b/plugin/type_mysql_timestamp/CMakeLists.txt @@ -0,0 +1,17 @@ +# Copyright (c) 2019, MariaDB corporation +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA + +MYSQL_ADD_PLUGIN(type_mysql_timestamp plugin.cc RECOMPILE_FOR_EMBEDDED + MODULE_ONLY COMPONENT Test) diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.opt b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.opt new file mode 100644 index 00000000000..e9e2a99b589 --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.opt @@ -0,0 +1 @@ +--plugin-load-add=$TYPE_MYSQL_TIMESTAMP_SO diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.pm b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.pm new file mode 100644 index 00000000000..cbb8f1b097f --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/suite.pm @@ -0,0 +1,10 @@ +package My::Suite::Type_test; + +@ISA = qw(My::Suite); + +return "No TYPE_TEST plugin" unless $ENV{TYPE_MYSQL_TIMESTAMP_SO}; + +sub is_default { 1 } + +bless { }; + diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.result b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.result new file mode 100644 index 00000000000..4a622ffa8bf --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.result @@ -0,0 +1,45 @@ +# +# MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +# +SELECT +PLUGIN_NAME, +PLUGIN_VERSION, +PLUGIN_STATUS, +PLUGIN_TYPE, +PLUGIN_AUTHOR, +PLUGIN_DESCRIPTION, +PLUGIN_LICENSE, +PLUGIN_MATURITY, +PLUGIN_AUTH_VERSION +FROM INFORMATION_SCHEMA.PLUGINS +WHERE PLUGIN_TYPE='DATA TYPE' + AND PLUGIN_NAME LIKE 'type_mysql_timestamp'; +PLUGIN_NAME type_mysql_timestamp +PLUGIN_VERSION 1.0 +PLUGIN_STATUS ACTIVE +PLUGIN_TYPE DATA TYPE +PLUGIN_AUTHOR MariaDB Corporation +PLUGIN_DESCRIPTION Data type TYPE_MYSQL_TIMESTAMP +PLUGIN_LICENSE GPL +PLUGIN_MATURITY Experimental +PLUGIN_AUTH_VERSION 1.0 +CREATE TABLE t1 (a TYPE_MYSQL_TIMESTAMP); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` type_mysql_timestamp NULL DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +DROP TABLE t1; +CREATE TABLE t1 (a TIMESTAMP); +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` timestamp NULL DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +ALTER TABLE t1 MODIFY a TYPE_MYSQL_TIMESTAMP; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `a` type_mysql_timestamp NULL DEFAULT NULL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci +DROP TABLE t1; diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.test b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.test new file mode 100644 index 00000000000..a7aaa5a3e4c --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp.test @@ -0,0 +1,31 @@ +--source include/have_innodb.inc + +--echo # +--echo # MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +--echo # + +--vertical_results +SELECT + PLUGIN_NAME, + PLUGIN_VERSION, + PLUGIN_STATUS, + PLUGIN_TYPE, + PLUGIN_AUTHOR, + PLUGIN_DESCRIPTION, + PLUGIN_LICENSE, + PLUGIN_MATURITY, + PLUGIN_AUTH_VERSION +FROM INFORMATION_SCHEMA.PLUGINS + WHERE PLUGIN_TYPE='DATA TYPE' + AND PLUGIN_NAME LIKE 'type_mysql_timestamp'; +--horizontal_results + +CREATE TABLE t1 (a TYPE_MYSQL_TIMESTAMP); +SHOW CREATE TABLE t1; +DROP TABLE t1; + +CREATE TABLE t1 (a TIMESTAMP); +SHOW CREATE TABLE t1; +ALTER TABLE t1 MODIFY a TYPE_MYSQL_TIMESTAMP; +SHOW CREATE TABLE t1; +DROP TABLE t1; diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result new file mode 100644 index 00000000000..97be602f673 --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.result @@ -0,0 +1,108 @@ +# +# MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +# +SET @@global.innodb_stats_persistent=0; +SHOW CREATE TABLE mysql.innodb_table_stats; +Table Create Table +innodb_table_stats CREATE TABLE `innodb_table_stats` ( + `database_name` varchar(64) NOT NULL, + `table_name` varchar(199) NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `n_rows` bigint(20) unsigned NOT NULL, + `clustered_index_size` bigint(20) unsigned NOT NULL, + `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL, + PRIMARY KEY (`database_name`,`table_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW CREATE TABLE mysql.innodb_table_stats; +Table Create Table +innodb_table_stats CREATE TABLE `innodb_table_stats` ( + `database_name` varchar(64) NOT NULL, + `table_name` varchar(199) NOT NULL, + `last_update` type_mysql_timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `n_rows` bigint(20) unsigned NOT NULL, + `clustered_index_size` bigint(20) unsigned NOT NULL, + `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL, + PRIMARY KEY (`database_name`,`table_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW CREATE TABLE mysql.innodb_index_stats; +Table Create Table +innodb_index_stats CREATE TABLE `innodb_index_stats` ( + `database_name` varchar(64) NOT NULL, + `table_name` varchar(199) NOT NULL, + `index_name` varchar(64) NOT NULL, + `last_update` type_mysql_timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `stat_name` varchar(64) NOT NULL, + `stat_value` bigint(20) unsigned NOT NULL, + `sample_size` bigint(20) unsigned DEFAULT NULL, + `stat_description` varchar(1024) NOT NULL, + PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 +SET @@global.innodb_stats_persistent=1; +CREATE TABLE t1 (a INT, KEY(a)) ENGINE=InnoDB; +INSERT INTO t1 VALUES (10); +DROP TABLE t1; +SET @@global.innodb_stats_persistent=0; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW CREATE TABLE mysql.innodb_table_stats; +Table Create Table +innodb_table_stats CREATE TABLE `innodb_table_stats` ( + `database_name` varchar(64) NOT NULL, + `table_name` varchar(199) NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `n_rows` bigint(20) unsigned NOT NULL, + `clustered_index_size` bigint(20) unsigned NOT NULL, + `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL, + PRIMARY KEY (`database_name`,`table_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW CREATE TABLE mysql.innodb_index_stats; +Table Create Table +innodb_index_stats CREATE TABLE `innodb_index_stats` ( + `database_name` varchar(64) NOT NULL, + `table_name` varchar(199) NOT NULL, + `index_name` varchar(64) NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `stat_name` varchar(64) NOT NULL, + `stat_value` bigint(20) unsigned NOT NULL, + `sample_size` bigint(20) unsigned DEFAULT NULL, + `stat_description` varchar(1024) NOT NULL, + PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_bin STATS_PERSISTENT=0 +SET @@global.innodb_stats_persistent=1; +# +# Testing MySQL-5.6-alike Field_timestampf: BINARY(4) based, without UNSIGNED_FLAG +# +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update type_mysql_timestamp NO current_timestamp() on update current_timestamp() +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +Field Type Null Key Default Extra +last_update type_mysql_timestamp NO current_timestamp() on update current_timestamp() +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +TIMESTAMPDIFF(DAY,last_update,now())<=1 +1 +DROP TABLE t1; +# +# Now as the table t1 is dropped, expect no statistics +# +SELECT * FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +database_name table_name last_update n_rows clustered_index_size sum_of_other_index_sizes +SELECT * FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +database_name table_name index_name last_update stat_name stat_value sample_size stat_description +# +# Restore the structure of the tables +# +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); diff --git a/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test new file mode 100644 index 00000000000..ff596c36fb3 --- /dev/null +++ b/plugin/type_mysql_timestamp/mysql-test/type_mysql_timestamp/type_mysql_timestamp_stat_tables.test @@ -0,0 +1,62 @@ +--source include/have_innodb.inc + +--echo # +--echo # MDEV-30483 After upgrade to 10.6 from Mysql 5.7 seeing "InnoDB: Column last_update in table mysql.innodb_table_stats is BINARY(4) NOT NULL but should be INT UNSIGNED NOT NULL" +--echo # + +SET @@global.innodb_stats_persistent=0; +SHOW CREATE TABLE mysql.innodb_table_stats; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW CREATE TABLE mysql.innodb_table_stats; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW CREATE TABLE mysql.innodb_index_stats; +SET @@global.innodb_stats_persistent=1; + +CREATE TABLE t1 (a INT, KEY(a)) ENGINE=InnoDB; +INSERT INTO t1 VALUES (10); +DROP TABLE t1; + +SET @@global.innodb_stats_persistent=0; +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW CREATE TABLE mysql.innodb_table_stats; +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW CREATE TABLE mysql.innodb_index_stats; +SET @@global.innodb_stats_persistent=1; + + +# The following test demonstrate that these columns: +# - innodb_table_stats.last_update +# - innodb_index_stats.last_update +# have sane values close to NOW(), rather than any garbage, +# with MySQL-alike Field_timestampf + +--echo # +--echo # Testing MySQL-5.6-alike Field_timestampf: BINARY(4) based, without UNSIGNED_FLAG +--echo # + +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TYPE_MYSQL_TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +SHOW COLUMNS FROM mysql.innodb_table_stats LIKE 'last_update'; +SHOW COLUMNS FROM mysql.innodb_index_stats LIKE 'last_update'; +CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=1; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +SELECT TIMESTAMPDIFF(DAY,last_update,now())<=1 FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; +DROP TABLE t1; + +--echo # +--echo # Now as the table t1 is dropped, expect no statistics +--echo # + +SELECT * FROM mysql.innodb_table_stats +WHERE database_name='test' AND table_name='t1'; +SELECT * FROM mysql.innodb_index_stats +WHERE database_name='test' AND table_name='t1' AND stat_name='size'; + +--echo # +--echo # Restore the structure of the tables +--echo # + +ALTER TABLE mysql.innodb_table_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); +ALTER TABLE mysql.innodb_index_stats MODIFY last_update TIMESTAMP NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(); diff --git a/plugin/type_mysql_timestamp/plugin.cc b/plugin/type_mysql_timestamp/plugin.cc new file mode 100644 index 00000000000..fd6ad896aa7 --- /dev/null +++ b/plugin/type_mysql_timestamp/plugin.cc @@ -0,0 +1,177 @@ +/* + Copyright (c) 2023, MariaDB Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + +#include +#include +#include +#include "sql_type.h" + + +class Type_collection_local: public Type_collection +{ +protected: + const Type_handler *aggregate_common(const Type_handler *h1, + const Type_handler *h2) const; +public: + const Type_handler *handler_by_name(const LEX_CSTRING &name) const override + { + return NULL; + } + + const Type_handler *aggregate_for_result(const Type_handler *h1, + const Type_handler *h2) + const override + { + return aggregate_common(h1, h2); + } + + const Type_handler *aggregate_for_comparison(const Type_handler *h1, + const Type_handler *h2) + const override + { + return aggregate_common(h1, h2); + } + + const Type_handler *aggregate_for_min_max(const Type_handler *h1, + const Type_handler *h2) + const override + { + return aggregate_common(h1, h2); + } + + const Type_handler *aggregate_for_num_op(const Type_handler *h1, + const Type_handler *h2) + const override + { + return aggregate_common(h1, h2); + } +}; + + +static Type_collection_local type_collection_local; + + +/* + A more MySQL compatible Field: + it does not set the UNSIGNED_FLAG. + This is how MySQL's Field_timestampf works. +*/ +class Field_mysql_timestampf :public Field_timestampf +{ +public: + Field_mysql_timestampf(const LEX_CSTRING &name, + const Record_addr &addr, + enum utype unireg_check_arg, + TABLE_SHARE *share, decimal_digits_t dec_arg) + :Field_timestampf(addr.ptr(), addr.null_ptr(), addr.null_bit(), + unireg_check_arg, &name, share, dec_arg) + { + flags&= ~UNSIGNED_FLAG; // MySQL compatibility + } + void sql_type(String &str) const override + { + sql_type_opt_dec_comment(str, + Field_mysql_timestampf::type_handler()->name(), + dec, type_version_mysql56()); + } + const Type_handler *type_handler() const override; +}; + + +class Type_handler_mysql_timestamp2: public Type_handler_timestamp2 +{ +public: + const Type_collection *type_collection() const override + { + return &type_collection_local; + } + Field *make_table_field_from_def(TABLE_SHARE *share, MEM_ROOT *root, + const LEX_CSTRING *name, + const Record_addr &rec, const Bit_addr &bit, + const Column_definition_attributes *attr, + uint32 flags) const override + { + return new (root) + Field_mysql_timestampf(*name, rec, attr->unireg_check, share, + attr->temporal_dec(MAX_DATETIME_WIDTH)); + } + void Column_definition_implicit_upgrade(Column_definition *c) const override + { + /* + Suppress the automatic upgrade depending on opt_mysql56_temporal_format, + derived from Type_handler_timestamp_common. + */ + } +}; + + +static Type_handler_mysql_timestamp2 type_handler_mysql_timestamp2; + + +const Type_handler *Field_mysql_timestampf::type_handler() const +{ + return &type_handler_mysql_timestamp2; +} + + +const Type_handler * +Type_collection_local::aggregate_common(const Type_handler *h1, + const Type_handler *h2) const +{ + if (h1 == h2) + return h1; + + static const Type_aggregator::Pair agg[]= + { + { + &type_handler_timestamp2, + &type_handler_mysql_timestamp2, + &type_handler_mysql_timestamp2 + }, + {NULL,NULL,NULL} + }; + + return Type_aggregator::find_handler_in_array(agg, h1, h2, true); +} + + +static struct st_mariadb_data_type plugin_descriptor_type_mysql_timestamp= +{ + MariaDB_DATA_TYPE_INTERFACE_VERSION, + &type_handler_mysql_timestamp2 +}; + + + +/*************************************************************************/ + +maria_declare_plugin(type_mysql_timestamp) +{ + MariaDB_DATA_TYPE_PLUGIN, // the plugin type (see include/mysql/plugin.h) + &plugin_descriptor_type_mysql_timestamp, // pointer to type-specific plugin descriptor + "type_mysql_timestamp", // plugin name + "MariaDB Corporation", // plugin author + "Data type TYPE_MYSQL_TIMESTAMP", // the plugin description + PLUGIN_LICENSE_GPL, // the plugin license (see include/mysql/plugin.h) + 0, // Pointer to plugin initialization function + 0, // Pointer to plugin deinitialization function + 0x0100, // Numeric version 0xAABB means AA.BB version + NULL, // Status variables + NULL, // System variables + "1.0", // String version representation + MariaDB_PLUGIN_MATURITY_EXPERIMENTAL // Maturity(see include/mysql/plugin.h)*/ +} +maria_declare_plugin_end; diff --git a/sql/field.h b/sql/field.h index 642456b9774..e57a93b6562 100644 --- a/sql/field.h +++ b/sql/field.h @@ -3354,7 +3354,7 @@ public: /** TIMESTAMP(0..6) - MySQL56 version */ -class Field_timestampf final :public Field_timestamp_with_dec { +class Field_timestampf :public Field_timestamp_with_dec { void store_TIMEVAL(const timeval &tv) override; public: Field_timestampf(uchar *ptr_arg, diff --git a/sql/handler.cc b/sql/handler.cc index a50cdeaf9dd..7ef0a51acb0 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -8144,6 +8144,9 @@ Compare_keys handler::compare_key_parts(const Field &old_field, concurrent accesses. And it's an overkill to take LOCK_plugin and iterate the whole installed_htons[] array every time. + @note Object victim_thd is not guaranteed to exist after this + function returns. + @param bf_thd brute force THD asking for the abort @param victim_thd victim THD to be aborted @@ -8157,6 +8160,8 @@ int ha_abort_transaction(THD *bf_thd, THD *victim_thd, my_bool signal) if (!WSREP(bf_thd) && !(bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU && wsrep_thd_is_toi(bf_thd))) { + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); + mysql_mutex_unlock(&victim_thd->LOCK_thd_kill); DBUG_RETURN(0); } @@ -8168,6 +8173,8 @@ int ha_abort_transaction(THD *bf_thd, THD *victim_thd, my_bool signal) else { WSREP_WARN("Cannot abort InnoDB transaction"); + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); + mysql_mutex_unlock(&victim_thd->LOCK_thd_kill); } DBUG_RETURN(0); diff --git a/sql/handler.h b/sql/handler.h index 4f12b26f224..bcbaf98572b 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -47,6 +47,7 @@ #include "sql_sequence.h" #include "mem_root_array.h" #include // pair +#include /* __attribute__ */ class Alter_info; class Virtual_column_info; @@ -1463,9 +1464,9 @@ struct handlerton const char *query, uint query_length, const char *db, const char *table_name); - void (*abort_transaction)(handlerton *hton, THD *bf_thd, - THD *victim_thd, my_bool signal); - int (*set_checkpoint)(handlerton *hton, const XID* xid); + void (*abort_transaction)(handlerton *hton, THD *bf_thd, THD *victim_thd, + my_bool signal) __attribute__((nonnull)); + int (*set_checkpoint)(handlerton *hton, const XID *xid); int (*get_checkpoint)(handlerton *hton, XID* xid); /** Check if the version of the table matches the version in the .frm diff --git a/sql/item_func.h b/sql/item_func.h index 6e714814526..6df3b98276b 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -373,7 +373,7 @@ public: { for (uint i= 0; i < arg_count; i++) { - args[i]->no_rows_in_result(); + args[i]->restore_to_before_no_rows_in_result(); } } void convert_const_compared_to_int_field(THD *thd); diff --git a/sql/item_strfunc.cc b/sql/item_strfunc.cc index 53c5ecde103..e0ee2c51706 100644 --- a/sql/item_strfunc.cc +++ b/sql/item_strfunc.cc @@ -1373,13 +1373,13 @@ bool Item_func_sformat::fix_length_and_dec(THD *thd) for (uint i=0 ; i < arg_count ; i++) { - char_length+= args[i]->max_char_length(); if (args[i]->result_type() == STRING_RESULT && Type_std_attributes::agg_item_set_converter(c, func_name_cstring(), args+i, 1, flags, 1)) return TRUE; } + char_length= MAX_BLOB_WIDTH; fix_char_length_ulonglong(char_length); return FALSE; } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index b254e76d14d..389891427f9 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1747,6 +1747,11 @@ static void close_connections(void) (void) unlink(mysqld_unix_port); } } + /* + The following is needed to the threads stuck in + setup_connection_thread_globals() + to continue. + */ listen_sockets.free_memory(); mysql_mutex_unlock(&LOCK_start_thread); @@ -2033,6 +2038,7 @@ static void clean_up(bool print_message) end_ssl(); #ifndef EMBEDDED_LIBRARY vio_end(); + listen_sockets.free_memory(); #endif /*!EMBEDDED_LIBRARY*/ #if defined(ENABLED_DEBUG_SYNC) /* End the debug sync facility. See debug_sync.cc. */ diff --git a/sql/rpl_gtid.cc b/sql/rpl_gtid.cc index c4e5c75b10a..7b67a83b3dd 100644 --- a/sql/rpl_gtid.cc +++ b/sql/rpl_gtid.cc @@ -2209,18 +2209,16 @@ rpl_binlog_state::drop_domain(DYNAMIC_ARRAY *ids, /* For each domain_id from ids - when no such domain in binlog state - warn && continue - For each domain.server's last gtid - when not locate the last gtid in glev.list - error out binlog state can't change - otherwise continue + If the domain is already absent from the binlog state + Warn && continue + If any GTID with that domain in binlog state is missing from glev.list + Error out binlog state can't change */ for (ulong i= 0; i < ids->elements; i++) { rpl_binlog_state::element *elem= NULL; uint32 *ptr_domain_id; - bool not_match; + bool all_found; ptr_domain_id= (uint32*) dynamic_array_ptr(ids, i); elem= (rpl_binlog_state::element *) @@ -2235,14 +2233,18 @@ rpl_binlog_state::drop_domain(DYNAMIC_ARRAY *ids, continue; } - for (not_match= true, k= 0; k < elem->hash.records; k++) + all_found= true; + for (k= 0; k < elem->hash.records && all_found; k++) { rpl_gtid *d_gtid= (rpl_gtid *)my_hash_element(&elem->hash, k); - for (ulong l= 0; l < glev->count && not_match; l++) - not_match= !(*d_gtid == glev->list[l]); + bool match_found= false; + for (ulong l= 0; l < glev->count && !match_found; l++) + match_found= match_found || (*d_gtid == glev->list[l]); + if (!match_found) + all_found= false; } - if (not_match) + if (!all_found) { sprintf(errbuf, "binlog files may contain gtids from the domain ('%u') " "being deleted. Make sure to first purge those files", diff --git a/sql/service_wsrep.cc b/sql/service_wsrep.cc index dd12149ff48..e1a4a25b27a 100644 --- a/sql/service_wsrep.cc +++ b/sql/service_wsrep.cc @@ -32,6 +32,11 @@ extern "C" void wsrep_thd_LOCK(const THD *thd) mysql_mutex_lock(&thd->LOCK_thd_data); } +extern "C" int wsrep_thd_TRYLOCK(const THD *thd) +{ + return mysql_mutex_trylock(&thd->LOCK_thd_data); +} + extern "C" void wsrep_thd_UNLOCK(const THD *thd) { mysql_mutex_unlock(&thd->LOCK_thd_data); @@ -196,6 +201,7 @@ extern "C" void wsrep_handle_SR_rollback(THD *bf_thd, /* Note: do not store/reset globals before wsrep_bf_abort() call to avoid losing BF thd context. */ + mysql_mutex_lock(&victim_thd->LOCK_thd_data); if (!(bf_thd && bf_thd != victim_thd)) { DEBUG_SYNC(victim_thd, "wsrep_before_SR_rollback"); @@ -208,6 +214,7 @@ extern "C" void wsrep_handle_SR_rollback(THD *bf_thd, { wsrep_thd_self_abort(victim_thd); } + mysql_mutex_unlock(&victim_thd->LOCK_thd_data); if (bf_thd) { wsrep_store_threadvars(bf_thd); @@ -218,7 +225,7 @@ extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd, my_bool signal) { mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); - mysql_mutex_assert_not_owner(&victim_thd->LOCK_thd_data); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); my_bool ret= wsrep_bf_abort(bf_thd, victim_thd); /* Send awake signal if victim was BF aborted or does not @@ -227,19 +234,8 @@ extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd, */ if ((ret || !wsrep_on(victim_thd)) && signal) { - mysql_mutex_lock(&victim_thd->LOCK_thd_data); - - if (victim_thd->wsrep_aborter && victim_thd->wsrep_aborter != bf_thd->thread_id) - { - WSREP_DEBUG("victim is killed already by %llu, skipping awake", - victim_thd->wsrep_aborter); - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); - return false; - } - victim_thd->wsrep_aborter= bf_thd->thread_id; victim_thd->awake_no_mutex(KILL_QUERY_HARD); - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); } else { WSREP_DEBUG("wsrep_thd_bf_abort skipped awake, signal %d", signal); } @@ -368,25 +364,6 @@ extern "C" ulong wsrep_OSU_method_get(const MYSQL_THD thd) return(global_system_variables.wsrep_OSU_method); } -extern "C" bool wsrep_thd_set_wsrep_aborter(THD *bf_thd, THD *victim_thd) -{ - mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); - if (!bf_thd) - { - victim_thd->wsrep_aborter= 0; - WSREP_DEBUG("wsrep_thd_set_wsrep_aborter resetting wsrep_aborter"); - return false; - } - if (victim_thd->wsrep_aborter && victim_thd->wsrep_aborter != bf_thd->thread_id) - { - return true; - } - victim_thd->wsrep_aborter= bf_thd->thread_id; - WSREP_DEBUG("wsrep_thd_set_wsrep_aborter setting wsrep_aborter %u", - victim_thd->wsrep_aborter); - return false; -} - extern "C" void wsrep_report_bf_lock_wait(const THD *thd, unsigned long long trx_id) { diff --git a/sql/sql_class.cc b/sql/sql_class.cc index c6a929d6fea..622cf1f465d 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1082,19 +1082,13 @@ Sql_condition* THD::raise_condition(const Sql_condition *cond) goto ret; switch (level) { - case Sql_condition::WARN_LEVEL_NOTE: case Sql_condition::WARN_LEVEL_WARN: + mysql_audit_general(this, MYSQL_AUDIT_GENERAL_WARNING, sql_errno, msg); + /* fall through */ + case Sql_condition::WARN_LEVEL_NOTE: got_warning= 1; break; case Sql_condition::WARN_LEVEL_ERROR: - break; - case Sql_condition::WARN_LEVEL_END: - /* Impossible */ - break; - } - - if (level == Sql_condition::WARN_LEVEL_ERROR) - { mysql_audit_general(this, MYSQL_AUDIT_GENERAL_ERROR, sql_errno, msg); is_slave_error= 1; // needed to catch query errors during replication @@ -1103,7 +1097,7 @@ Sql_condition* THD::raise_condition(const Sql_condition *cond) /* With wsrep we allow converting BF abort error to warning if errors are ignored. - */ + */ if (!is_fatal_error && no_errors && (wsrep_trx().bf_aborted() || wsrep_retry_counter)) { @@ -1118,6 +1112,10 @@ Sql_condition* THD::raise_condition(const Sql_condition *cond) da->set_error_status(sql_errno, msg, sqlstate, *cond, raised); } } + break; + case Sql_condition::WARN_LEVEL_END: + /* Impossible */ + break; } query_cache_abort(this, &query_cache_tls); @@ -1294,6 +1292,11 @@ void THD::init() wsrep_affected_rows = 0; m_wsrep_next_trx_id = WSREP_UNDEFINED_TRX_ID; wsrep_aborter = 0; + wsrep_abort_by_kill = NOT_KILLED; + wsrep_abort_by_kill_err = 0; +#ifndef DBUG_OFF + wsrep_killed_state = 0; +#endif /* DBUG_OFF */ wsrep_desynced_backup_stage= false; #endif /* WITH_WSREP */ @@ -1642,6 +1645,13 @@ void THD::reset_for_reuse() #endif #ifdef WITH_WSREP wsrep_free_status(this); + wsrep_cs().reset_error(); + wsrep_aborter= 0; + wsrep_abort_by_kill= NOT_KILLED; + wsrep_abort_by_kill_err= 0; +#ifndef DBUG_OFF + wsrep_killed_state= 0; +#endif /* DBUG_OFF */ #endif /* WITH_WSREP */ } @@ -1898,7 +1908,9 @@ void THD::awake_no_mutex(killed_state state_to_set) } /* Interrupt target waiting inside a storage engine. */ - if (state_to_set != NOT_KILLED && !wsrep_is_bf_aborted(this)) + if (state_to_set != NOT_KILLED && + IF_WSREP(!wsrep_is_bf_aborted(this) && wsrep_abort_by_kill == NOT_KILLED, + true)) ha_kill_query(this, thd_kill_level(this)); abort_current_cond_wait(false); @@ -2126,6 +2138,17 @@ void THD::reset_killed() mysql_mutex_unlock(&LOCK_thd_kill); } #ifdef WITH_WSREP + if (WSREP_NNULL(this)) + { + if (wsrep_abort_by_kill != NOT_KILLED) + { + mysql_mutex_assert_not_owner(&LOCK_thd_kill); + mysql_mutex_lock(&LOCK_thd_kill); + wsrep_abort_by_kill= NOT_KILLED; + wsrep_abort_by_kill_err= 0; + mysql_mutex_unlock(&LOCK_thd_kill); + } + } mysql_mutex_assert_not_owner(&LOCK_thd_data); mysql_mutex_lock(&LOCK_thd_data); wsrep_aborter= 0; diff --git a/sql/sql_class.h b/sql/sql_class.h index 75e86bab415..ebbe0de8400 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -5384,7 +5384,14 @@ public: bool wsrep_ignore_table; /* thread who has started kill for this THD protected by LOCK_thd_data*/ my_thread_id wsrep_aborter; - + /* Kill signal used, if thread was killed by manual KILL. Protected by + LOCK_thd_kill. */ + std::atomic wsrep_abort_by_kill; + /* */ + struct err_info* wsrep_abort_by_kill_err; +#ifndef DBUG_OFF + int wsrep_killed_state; +#endif /* DBUG_OFF */ /* true if BF abort is observed in do_command() right after reading client's packet, and if the client has sent PS execute command. */ bool wsrep_delayed_BF_abort; diff --git a/sql/sql_limit.h b/sql/sql_limit.h index 41308bc12db..335aff9d215 100644 --- a/sql/sql_limit.h +++ b/sql/sql_limit.h @@ -61,6 +61,15 @@ class Select_limit_counters with_ties= false; } + /* Send the first row, still honoring offset_limit_cnt */ + void send_first_row() + { + /* Guard against overflow */ + if ((select_limit_cnt= offset_limit_cnt +1 ) == 0) + select_limit_cnt= offset_limit_cnt; + // with_ties= false; Remove // on merge to 10.6 + } + bool is_unlimited() const { return select_limit_cnt == HA_POS_ERROR; } /* diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 80621685ea2..542df625dd9 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -7863,7 +7863,7 @@ static bool wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, thd->wsrep_retry_counter < thd->variables.wsrep_retry_autocommit) { #ifdef ENABLED_DEBUG_SYNC - DBUG_EXECUTE_IF("sync.wsrep_retry_autocommit", + DBUG_EXECUTE_IF("sync.wsrep_retry_autocommit", { const char act[]= "now " @@ -9224,23 +9224,20 @@ kill_one_thread(THD *thd, my_thread_id id, killed_state kill_signal, killed_type thd->security_ctx->user_matches(tmp->security_ctx)) #endif /* WITH_WSREP */ { + { #ifdef WITH_WSREP - DEBUG_SYNC(thd, "before_awake_no_mutex"); - if (tmp->wsrep_aborter && tmp->wsrep_aborter != thd->thread_id) - { - /* victim is in hit list already, bail out */ - WSREP_DEBUG("victim %lld has wsrep aborter: %lu, skipping awake()", - id, tmp->wsrep_aborter); - error= 0; - } - else + if (WSREP(tmp)) + { + error = wsrep_kill_thd(thd, tmp, kill_signal); + } + else + { #endif /* WITH_WSREP */ - { - WSREP_DEBUG("kill_one_thread victim: %lld wsrep_aborter %lu" - " by signal %d", - id, tmp->wsrep_aborter, kill_signal); tmp->awake_no_mutex(kill_signal); error= 0; +#ifdef WITH_WSREP + } +#endif /* WITH_WSREP */ } } else @@ -9363,18 +9360,6 @@ static void sql_kill(THD *thd, my_thread_id id, killed_state state, killed_type type) { uint error; -#ifdef WITH_WSREP - if (WSREP(thd)) - { - WSREP_DEBUG("sql_kill called"); - if (thd->wsrep_applier) - { - WSREP_DEBUG("KILL in applying, bailing out here"); - return; - } - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) - } -#endif /* WITH_WSREP */ if (likely(!(error= kill_one_thread(thd, id, state, type)))) { if (!thd->killed) @@ -9384,11 +9369,6 @@ void sql_kill(THD *thd, my_thread_id id, killed_state state, killed_type type) } else my_error(error, MYF(0), id); -#ifdef WITH_WSREP - return; - wsrep_error_label: - my_error(ER_KILL_DENIED_ERROR, MYF(0), (long long) thd->thread_id); -#endif /* WITH_WSREP */ } @@ -9397,18 +9377,6 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) { uint error; ha_rows rows; -#ifdef WITH_WSREP - if (WSREP(thd)) - { - WSREP_DEBUG("sql_kill_user called"); - if (thd->wsrep_applier) - { - WSREP_DEBUG("KILL in applying, bailing out here"); - return; - } - WSREP_TO_ISOLATION_BEGIN(WSREP_MYSQL_DB, NULL, NULL) - } -#endif /* WITH_WSREP */ switch (error= kill_threads_for_user(thd, user, state, &rows)) { case 0: @@ -9424,11 +9392,6 @@ sql_kill_user(THD *thd, LEX_USER *user, killed_state state) default: my_error(error, MYF(0)); } -#ifdef WITH_WSREP - return; - wsrep_error_label: - my_error(ER_KILL_DENIED_ERROR, MYF(0), (long long) thd->thread_id); -#endif /* WITH_WSREP */ } diff --git a/sql/sql_plugin_services.inl b/sql/sql_plugin_services.inl index 3a66e982e7b..f2b2d08de1d 100644 --- a/sql/sql_plugin_services.inl +++ b/sql/sql_plugin_services.inl @@ -151,6 +151,7 @@ static struct wsrep_service_st wsrep_handler = { wsrep_on, wsrep_prepare_key_for_innodb, wsrep_thd_LOCK, + wsrep_thd_TRYLOCK, wsrep_thd_UNLOCK, wsrep_thd_query, wsrep_thd_retry_counter, @@ -179,7 +180,6 @@ static struct wsrep_service_st wsrep_handler = { wsrep_OSU_method_get, wsrep_thd_has_ignored_error, wsrep_thd_set_ignored_error, - wsrep_thd_set_wsrep_aborter, wsrep_report_bf_lock_wait, wsrep_thd_kill_LOCK, wsrep_thd_kill_UNLOCK, diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 486c76add24..45501476ed6 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -185,10 +185,10 @@ static void update_depend_map_for_order(JOIN *join, ORDER *order); static ORDER *remove_const(JOIN *join,ORDER *first_order,COND *cond, bool change_list, bool *simple_order); static int return_zero_rows(JOIN *join, select_result *res, - List &tables, - List &fields, bool send_row, + List *tables, + List *fields, bool send_row, ulonglong select_options, const char *info, - Item *having, List &all_fields); + Item *having, List *all_fields); static COND *build_equal_items(JOIN *join, COND *cond, COND_EQUAL *inherited, List *join_list, @@ -1341,11 +1341,40 @@ int SELECT_LEX::vers_setup_conds(THD *thd, TABLE_LIST *tables) DBUG_RETURN(0); } + /***************************************************************************** Check fields, find best join, do the select and output fields. mysql_select assumes that all tables are already opened *****************************************************************************/ +/* + Check if we have a field reference. If yes, we have to use + mixed_implicit_grouping. +*/ + +static bool check_list_for_field(List *items) +{ + List_iterator_fast select_it(*items); + Item *select_el; + + while ((select_el= select_it++)) + { + if (select_el->with_field()) + return true; + } + return false; +} + +static bool check_list_for_field(ORDER *order) +{ + for (; order; order= order->next) + { + if (order->item[0]->with_field()) + return true; + } + return false; +} + /** Prepare of whole select (including sub queries in future). @@ -1427,53 +1456,44 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, DBUG_RETURN(-1); /* - TRUE if the SELECT list mixes elements with and without grouping, - and there is no GROUP BY clause. Mixing non-aggregated fields with - aggregate functions in the SELECT list is a MySQL extenstion that - is allowed only if the ONLY_FULL_GROUP_BY sql mode is not set. + mixed_implicit_grouping will be set to TRUE if the SELECT list + mixes elements with and without grouping, and there is no GROUP BY + clause. + Mixing non-aggregated fields with aggregate functions in the + SELECT list or HAVING is a MySQL extension that is allowed only if + the ONLY_FULL_GROUP_BY sql mode is not set. */ mixed_implicit_grouping= false; if ((~thd->variables.sql_mode & MODE_ONLY_FULL_GROUP_BY) && select_lex->with_sum_func && !group_list) { - List_iterator_fast select_it(fields_list); - Item *select_el; /* Element of the SELECT clause, can be an expression. */ - bool found_field_elem= false; - bool found_sum_func_elem= false; - - while ((select_el= select_it++)) + if (check_list_for_field(&fields_list) || + check_list_for_field(order)) { - if (select_el->with_sum_func()) - found_sum_func_elem= true; - if (select_el->with_field()) - found_field_elem= true; - if (found_sum_func_elem && found_field_elem) + List_iterator_fast li(select_lex->leaf_tables); + + mixed_implicit_grouping= true; // mark for future + + while (TABLE_LIST *tbl= li++) { - mixed_implicit_grouping= true; - break; + /* + If the query uses implicit grouping where the select list + contains both aggregate functions and non-aggregate fields, + any non-aggregated field may produce a NULL value. Set all + fields of each table as nullable before semantic analysis to + take into account this change of nullability. + + Note: this loop doesn't touch tables inside merged + semi-joins, because subquery-to-semijoin conversion has not + been done yet. This is intended. + */ + if (tbl->table) + tbl->table->maybe_null= 1; } } } - table_count= select_lex->leaf_tables.elements; - TABLE_LIST *tbl; - List_iterator_fast li(select_lex->leaf_tables); - while ((tbl= li++)) - { - /* - If the query uses implicit grouping where the select list contains both - aggregate functions and non-aggregate fields, any non-aggregated field - may produce a NULL value. Set all fields of each table as nullable before - semantic analysis to take into account this change of nullability. - - Note: this loop doesn't touch tables inside merged semi-joins, because - subquery-to-semijoin conversion has not been done yet. This is intended. - */ - if (mixed_implicit_grouping && tbl->table) - tbl->table->maybe_null= 1; - } - uint real_og_num= og_num; if (skip_order_by && select_lex != select_lex->master_unit()->global_parameters()) @@ -1486,14 +1506,14 @@ JOIN::prepare(TABLE_LIST *tables_init, COND *conds_init, uint og_num, DBUG_RETURN(-1); ref_ptrs= ref_ptr_array_slice(0); - + enum_parsing_place save_place= thd->lex->current_select->context_analysis_place; thd->lex->current_select->context_analysis_place= SELECT_LIST; { List_iterator_fast it(select_lex->leaf_tables); - while ((tbl= it++)) + while (TABLE_LIST *tbl= it++) { if (tbl->table_function && tbl->table_function->setup(thd, tbl, select_lex_arg)) @@ -4135,7 +4155,7 @@ bool JOIN::make_aggr_tables_info() set_items_ref_array(items0); if (join_tab) join_tab[exec_join_tab_cnt() + aggr_tables - 1].next_select= - setup_end_select_func(this, NULL); + setup_end_select_func(this); group= has_group_by; DBUG_RETURN(false); @@ -4537,13 +4557,7 @@ JOIN::reinit() } } - /* Reset of sum functions */ - if (sum_funcs) - { - Item_sum *func, **func_ptr= sum_funcs; - while ((func= *(func_ptr++))) - func->clear(); - } + clear_sum_funcs(); if (no_rows_in_result_called) { @@ -4828,12 +4842,12 @@ int JOIN::exec_inner() } else { - (void) return_zero_rows(this, result, select_lex->leaf_tables, - *columns_list, + (void) return_zero_rows(this, result, &select_lex->leaf_tables, + columns_list, send_row_on_empty_set(), select_options, zero_result_cause, - having ? having : tmp_having, all_fields); + having ? having : tmp_having, &all_fields); DBUG_RETURN(0); } } @@ -9099,7 +9113,7 @@ best_access_path(JOIN *join, { Json_writer_object trace_access_hash(thd); double refills, row_copy_cost, copy_cost, cur_cost, where_cost; - double matching_combinations, fanout, join_sel; + double matching_combinations, fanout= 0.0, join_sel; trace_access_hash.add("type", "hash"); trace_access_hash.add("index", "hj-key"); /* Estimate the cost of the hash join access to the table */ @@ -16773,10 +16787,36 @@ ORDER *simple_remove_const(ORDER *order, COND *where) } +/* + Set all fields in the table to have a null value + + @param tables Table list +*/ + +static void make_tables_null_complemented(List *tables) +{ + List_iterator ti(*tables); + TABLE_LIST *table; + while ((table= ti++)) + { + /* + Don't touch semi-join materialization tables, as the a join_free() + call may have freed them (and HAVING clause can't have references to + them anyway). + */ + if (!table->is_jtbm()) + { + TABLE *tbl= table->table; + mark_as_null_row(tbl); // Set fields to NULL + } + } +} + + static int -return_zero_rows(JOIN *join, select_result *result, List &tables, - List &fields, bool send_row, ulonglong select_options, - const char *info, Item *having, List &all_fields) +return_zero_rows(JOIN *join, select_result *result, List *tables, + List *fields, bool send_row, ulonglong select_options, + const char *info, Item *having, List *all_fields) { DBUG_ENTER("return_zero_rows"); @@ -16792,24 +16832,15 @@ return_zero_rows(JOIN *join, select_result *result, List &tables, Set all tables to have NULL row. This is needed as we will be evaluating HAVING condition. */ - List_iterator ti(tables); - TABLE_LIST *table; - while ((table= ti++)) - { - /* - Don't touch semi-join materialization tables, as the above join_free() - call has freed them (and HAVING clause can't have references to them - anyway). - */ - if (!table->is_jtbm()) - mark_as_null_row(table->table); // All fields are NULL - } - List_iterator_fast it(all_fields); + make_tables_null_complemented(tables); + + List_iterator_fast it(*all_fields); Item *item; /* Inform all items (especially aggregating) to calculate HAVING correctly, also we will need it for sending results. */ + join->no_rows_in_result_called= 1; while ((item= it++)) item->no_rows_in_result(); if (having && having->val_int() == 0) @@ -16823,12 +16854,12 @@ return_zero_rows(JOIN *join, select_result *result, List &tables, join->thd->limit_found_rows= 0; } - if (!(result->send_result_set_metadata(fields, + if (!(result->send_result_set_metadata(*fields, Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF))) { bool send_error= FALSE; if (send_row) - send_error= result->send_data_with_check(fields, join->unit, 0) > 0; + send_error= result->send_data_with_check(*fields, join->unit, 0) > 0; if (likely(!send_error)) result->send_eof(); // Should be safe } @@ -16844,49 +16875,42 @@ return_zero_rows(JOIN *join, select_result *result, List &tables, } /** - used only in JOIN::clear (always) and in do_select() - (if there where no matching rows) + Reset table rows to contain a null-complement row (all fields are null) + + Used only in JOIN::clear() and in do_select() if there where no matching rows. @param join JOIN - @param cleared_tables If not null, clear also const tables and mark all - cleared tables in the map. cleared_tables is only - set when called from do_select() when there is a - group function and there where no matching rows. + @param cleared_tables Used to mark all cleared tables in the map. Needed for + unclear_tables() to know which tables to restore to + their original state. */ static void clear_tables(JOIN *join, table_map *cleared_tables) { - /* - must clear only the non-const tables as const tables are not re-calculated. - */ + DBUG_ASSERT(cleared_tables); for (uint i= 0 ; i < join->table_count ; i++) { TABLE *table= join->table[i]; if (table->null_row) continue; // Nothing more to do - if (!(table->map & join->const_table_map) || cleared_tables) + (*cleared_tables)|= (((table_map) 1) << i); + if (table->s->null_bytes) { - if (cleared_tables) - { - (*cleared_tables)|= (((table_map) 1) << i); - if (table->s->null_bytes) - { - /* - Remember null bits for the record so that we can restore the - original const record in unclear_tables() - */ - memcpy(table->record[1], table->null_flags, table->s->null_bytes); - } - } - mark_as_null_row(table); // All fields are NULL + /* + Remember null bits for the record so that we can restore the + original const record in unclear_tables() + */ + memcpy(table->record[1], table->null_flags, table->s->null_bytes); } + mark_as_null_row(table); // All fields are NULL } } /** Reverse null marking for tables and restore null bits. + This return the tables to the state of before clear_tables(). We have to do this because the tables may be re-used in a sub query and the subquery will assume that the const tables contains the original @@ -22699,9 +22723,9 @@ void set_postjoin_aggr_write_func(JOIN_TAB *tab) end_select function to use. This function can't fail. */ -Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab) +Next_select_func setup_end_select_func(JOIN *join) { - TMP_TABLE_PARAM *tmp_tbl= tab ? tab->tmp_table_param : &join->tmp_table_param; + TMP_TABLE_PARAM *tmp_tbl= &join->tmp_table_param; /* Choose method for presenting result to user. Use end_send_group @@ -22774,7 +22798,7 @@ do_select(JOIN *join, Procedure *procedure) if (join->only_const_tables() && !join->need_tmp) { - Next_select_func end_select= setup_end_select_func(join, NULL); + Next_select_func end_select= setup_end_select_func(join); /* HAVING will be checked after processing aggregate functions, @@ -23264,6 +23288,7 @@ sub_select(JOIN *join,JOIN_TAB *join_tab,bool end_of_records) } } + /* Restore state if mark_as_null_row() have been called */ if (join_tab->last_inner) { JOIN_TAB *last_inner_tab= join_tab->last_inner; @@ -24715,6 +24740,12 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) { int idx= -1; enum_nested_loop_state ok_code= NESTED_LOOP_OK; + /* + join_tab can be 0 in the case all tables are const tables and we did not + need a temporary table to store the result. + In this case we use the original given fields, which is stored in + join->fields. + */ List *fields= join_tab ? (join_tab-1)->fields : join->fields; DBUG_ENTER("end_send_group"); @@ -24724,10 +24755,12 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) if (!join->first_record || end_of_records || (idx=test_if_group_changed(join->group_fields)) >= 0) { + if (!join->group_sent && (join->first_record || (end_of_records && !join->group && !join->group_optimized_away))) { + table_map cleared_tables= (table_map) 0; if (join->procedure) join->procedure->end_group(); /* Test if there was a group change. */ @@ -24752,11 +24785,13 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) /* Reset all sum functions on group change. */ if (!join->first_record) { - List_iterator_fast it(*join->fields); - Item *item; /* No matching rows for group function */ - join->clear(); + List_iterator_fast it(*fields); + Item *item; + join->no_rows_in_result_called= 1; + + join->clear(&cleared_tables); while ((item= it++)) item->no_rows_in_result(); } @@ -24784,7 +24819,14 @@ end_send_group(JOIN *join, JOIN_TAB *join_tab, bool end_of_records) if (join->rollup_send_data((uint) (idx+1))) error= 1; } - } + if (join->no_rows_in_result_called) + { + /* Restore null tables to original state */ + join->no_rows_in_result_called= 0; + if (cleared_tables) + unclear_tables(join, &cleared_tables); + } + } if (unlikely(error > 0)) DBUG_RETURN(NESTED_LOOP_ERROR); /* purecov: inspected */ if (end_of_records) @@ -25100,6 +25142,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), { if (join->first_record || (end_of_records && !join->group)) { + table_map cleared_tables= (table_map) 0; if (join->procedure) join->procedure->end_group(); int send_group_parts= join->send_group_parts; @@ -25108,7 +25151,7 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), if (!join->first_record) { /* No matching rows for group function */ - join->clear(); + join->clear(&cleared_tables); } copy_sum_funcs(join->sum_funcs, join->sum_funcs_end[send_group_parts]); @@ -25131,6 +25174,8 @@ end_write_group(JOIN *join, JOIN_TAB *join_tab __attribute__((unused)), DBUG_RETURN(NESTED_LOOP_ERROR); } } + if (cleared_tables) + unclear_tables(join, &cleared_tables); if (end_of_records) goto end; } @@ -26864,7 +26909,7 @@ JOIN_TAB::remove_duplicates() !(join->select_options & OPTION_FOUND_ROWS)) { // only const items with no OPTION_FOUND_ROWS - join->unit->lim.set_single_row(); // Only send first row + join->unit->lim.send_first_row(); // Only send first row my_free(sortorder); DBUG_RETURN(false); } @@ -29279,11 +29324,8 @@ int JOIN::rollup_write_data(uint idx, TMP_TABLE_PARAM *tmp_table_param_arg, (end_send_group/end_write_group) */ -void JOIN::clear() +void inline JOIN::clear_sum_funcs() { - clear_tables(this, 0); - copy_fields(&tmp_table_param); - if (sum_funcs) { Item_sum *func, **func_ptr= sum_funcs; @@ -29293,6 +29335,22 @@ void JOIN::clear() } +/* + Prepare for returning 'empty row' when there is no matching row. + + - Mark all tables with mark_as_null_row() + - Make a copy of of all simple SELECT items + - Reset all sum functions to NULL or 0. +*/ + +void JOIN::clear(table_map *cleared_tables) +{ + clear_tables(this, cleared_tables); + copy_fields(&tmp_table_param); + clear_sum_funcs(); +} + + /** Print an EXPLAIN line with all NULLs and given message in the 'Extra' column diff --git a/sql/sql_select.h b/sql/sql_select.h index 59b058c84e4..2097b674852 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -227,7 +227,7 @@ enum sj_strategy_enum typedef enum_nested_loop_state (*Next_select_func)(JOIN *, struct st_join_table *, bool); -Next_select_func setup_end_select_func(JOIN *join, JOIN_TAB *tab); +Next_select_func setup_end_select_func(JOIN *join); int rr_sequential(READ_RECORD *info); int read_record_func_for_rr_and_unpack(READ_RECORD *info); Item *remove_pushed_top_conjuncts(THD *thd, Item *cond); @@ -1812,7 +1812,8 @@ public: void join_free(); /** Cleanup this JOIN, possibly for reuse */ void cleanup(bool full); - void clear(); + void clear(table_map *cleared_tables); + void inline clear_sum_funcs(); bool send_row_on_empty_set() { return (do_send_rows && implicit_grouping && !group_optimized_away && diff --git a/sql/sql_type.h b/sql/sql_type.h index 52c17d61d2e..8ebdb38db49 100644 --- a/sql/sql_type.h +++ b/sql/sql_type.h @@ -7574,8 +7574,9 @@ extern Named_type_handler type_handler_time; extern Named_type_handler type_handler_time2; extern Named_type_handler type_handler_datetime; extern Named_type_handler type_handler_datetime2; -extern Named_type_handler type_handler_timestamp; -extern Named_type_handler type_handler_timestamp2; + +extern MYSQL_PLUGIN_IMPORT Named_type_handler type_handler_timestamp; +extern MYSQL_PLUGIN_IMPORT Named_type_handler type_handler_timestamp2; extern Type_handler_interval_DDhhmmssff type_handler_interval_DDhhmmssff; diff --git a/sql/table.h b/sql/table.h index 3874ccacb9a..1adbae84823 100644 --- a/sql/table.h +++ b/sql/table.h @@ -3445,10 +3445,16 @@ inline void mark_as_null_row(TABLE *table) bfill(table->null_flags,table->s->null_bytes,255); } +/* + Restore table to state before mark_as_null_row() call. + This assumes that the caller has restored table->null_flags, + as is done in unclear_tables(). +*/ + inline void unmark_as_null_row(TABLE *table) { - table->null_row=0; - table->status= STATUS_NO_RECORD; + table->null_row= 0; + table->status&= ~STATUS_NULL_ROW; } bool is_simple_order(ORDER *order); diff --git a/sql/wsrep_dummy.cc b/sql/wsrep_dummy.cc index 9bfaf9285f3..e1508884075 100644 --- a/sql/wsrep_dummy.cc +++ b/sql/wsrep_dummy.cc @@ -56,6 +56,11 @@ my_bool wsrep_on(const THD *) void wsrep_thd_LOCK(const THD *) { } +int wsrep_thd_TRYLOCK(const THD *) +{ + return 0; +} + void wsrep_thd_UNLOCK(const THD *) { } @@ -154,8 +159,6 @@ void wsrep_thd_set_ignored_error(THD*, my_bool) { } ulong wsrep_OSU_method_get(const THD*) { return 0;} -bool wsrep_thd_set_wsrep_aborter(THD*, THD*) -{ return 0;} void wsrep_report_bf_lock_wait(const THD*, unsigned long long) diff --git a/sql/wsrep_high_priority_service.cc b/sql/wsrep_high_priority_service.cc index 53ef20f3e78..0a2fa273723 100644 --- a/sql/wsrep_high_priority_service.cc +++ b/sql/wsrep_high_priority_service.cc @@ -510,6 +510,7 @@ int Wsrep_high_priority_service::log_dummy_write_set(const wsrep::ws_handle& ws_ m_thd->wait_for_prior_commit(); } + WSREP_DEBUG("checkpointing dummy write set %lld", ws_meta.seqno().get()); wsrep_set_SE_checkpoint(ws_meta.gtid(), wsrep_gtid_server.gtid()); if (!WSREP_EMULATE_BINLOG(m_thd)) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 3a8bfe3532d..71609e651a7 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -52,6 +52,7 @@ #include "log_event.h" #include "sql_connect.h" #include "thread_cache.h" +#include "debug_sync.h" #include @@ -3112,6 +3113,20 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, request_thd, granted_thd); ticket->wsrep_report(wsrep_debug); + DEBUG_SYNC(request_thd, "before_wsrep_thd_abort"); + DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", { + const char act[]= "now " + "SIGNAL sync.before_wsrep_thd_abort_reached " + "WAIT_FOR signal.before_wsrep_thd_abort"; + DBUG_ASSERT(!debug_sync_set_action(request_thd, STRING_WITH_LEN(act))); + };); + + /* Here we will call wsrep_abort_transaction so we should hold + THD::LOCK_thd_data to protect victim from concurrent usage + and THD::LOCK_thd_kill to protect from disconnect or delete. + + */ + mysql_mutex_lock(&granted_thd->LOCK_thd_kill); mysql_mutex_lock(&granted_thd->LOCK_thd_data); if (wsrep_thd_is_toi(granted_thd) || @@ -3123,13 +3138,11 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, wsrep_thd_query(request_thd)); THD_STAGE_INFO(request_thd, stage_waiting_isolation); ticket->wsrep_report(wsrep_debug); - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); } else if (wsrep_thd_is_SR(granted_thd) && !wsrep_thd_is_SR(request_thd)) { WSREP_MDL_LOG(INFO, "MDL conflict, DDL vs SR", schema, schema_len, request_thd, granted_thd); - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); WSREP_DEBUG("wsrep_handle_mdl_conflict DDL vs SR for %s", wsrep_thd_query(request_thd)); THD_STAGE_INFO(request_thd, stage_waiting_isolation); @@ -3141,6 +3154,7 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, request_thd, granted_thd); ticket->wsrep_report(true); mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); unireg_abort(1); } } @@ -3151,7 +3165,6 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, wsrep_thd_query(request_thd)); THD_STAGE_INFO(request_thd, stage_waiting_ddl); ticket->wsrep_report(wsrep_debug); - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); if (granted_thd->current_backup_stage != BACKUP_FINISHED && wsrep_check_mode(WSREP_MODE_BF_MARIABACKUP)) { @@ -3165,7 +3178,6 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, wsrep_thd_query(request_thd)); THD_STAGE_INFO(request_thd, stage_waiting_isolation); ticket->wsrep_report(wsrep_debug); - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); wsrep_abort_thd(request_thd, granted_thd, 1); } else @@ -3179,7 +3191,6 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, if (granted_thd->wsrep_trx().active()) { - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); wsrep_abort_thd(request_thd, granted_thd, 1); } else @@ -3188,10 +3199,9 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, Granted_thd is likely executing with wsrep_on=0. If the requesting thd is BF, BF abort and wait. */ - mysql_mutex_unlock(&granted_thd->LOCK_thd_data); - if (wsrep_thd_is_BF(request_thd, FALSE)) { + granted_thd->awake_no_mutex(KILL_QUERY_HARD); ha_abort_transaction(request_thd, granted_thd, TRUE); } else @@ -3200,10 +3210,14 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, schema, schema_len, request_thd, granted_thd); ticket->wsrep_report(true); + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); unireg_abort(1); } } } + mysql_mutex_unlock(&granted_thd->LOCK_thd_data); + mysql_mutex_unlock(&granted_thd->LOCK_thd_kill); } else { @@ -3215,13 +3229,17 @@ void wsrep_handle_mdl_conflict(MDL_context *requestor_ctx, static bool abort_replicated(THD *thd) { bool ret_code= false; + mysql_mutex_lock(&thd->LOCK_thd_kill); + mysql_mutex_lock(&thd->LOCK_thd_data); if (thd->wsrep_trx().state() == wsrep::transaction::s_committing) { WSREP_DEBUG("aborting replicated trx: %llu", (ulonglong)(thd->real_id)); - (void)wsrep_abort_thd(thd, thd, TRUE); + wsrep_abort_thd(thd, thd, TRUE); ret_code= true; } + mysql_mutex_unlock(&thd->LOCK_thd_data); + mysql_mutex_unlock(&thd->LOCK_thd_kill); return ret_code; } diff --git a/sql/wsrep_server_service.cc b/sql/wsrep_server_service.cc index 9be6af71c56..52a0a9753c1 100644 --- a/sql/wsrep_server_service.cc +++ b/sql/wsrep_server_service.cc @@ -148,9 +148,13 @@ void Wsrep_server_service::release_high_priority_service(wsrep::high_priority_se wsrep_delete_threadvars(); } -void Wsrep_server_service::background_rollback(wsrep::client_state& client_state) +void Wsrep_server_service::background_rollback( + wsrep::unique_lock &lock WSREP_UNUSED, + wsrep::client_state &client_state) { - Wsrep_client_state& cs= static_cast(client_state); + DBUG_ASSERT(lock.owns_lock()); + Wsrep_client_state &cs= static_cast(client_state); + mysql_mutex_assert_owner(&cs.thd()->LOCK_thd_data); wsrep_fire_rollbacker(cs.thd()); } diff --git a/sql/wsrep_server_service.h b/sql/wsrep_server_service.h index 168e98206e3..0fc48402024 100644 --- a/sql/wsrep_server_service.h +++ b/sql/wsrep_server_service.h @@ -46,7 +46,8 @@ public: void release_high_priority_service(wsrep::high_priority_service*); - void background_rollback(wsrep::client_state&); + void background_rollback(wsrep::unique_lock &, + wsrep::client_state &); void bootstrap(); void log_message(enum wsrep::log::level, const char*); diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index 420a25dd2ae..682e64859b4 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -307,48 +307,9 @@ void wsrep_fire_rollbacker(THD *thd) } } - -int wsrep_abort_thd(THD *bf_thd, - THD *victim_thd, - my_bool signal) +static bool wsrep_bf_abort_low(THD *bf_thd, THD *victim_thd) { - DBUG_ENTER("wsrep_abort_thd"); - - mysql_mutex_lock(&victim_thd->LOCK_thd_data); - - /* Note that when you use RSU node is desynced from cluster, thus WSREP(thd) - might not be true. - */ - if ((WSREP_NNULL(bf_thd) || - ((WSREP_ON || bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU) && - wsrep_thd_is_toi(bf_thd))) && - !wsrep_thd_is_aborting(victim_thd)) - { - WSREP_DEBUG("wsrep_abort_thd, by: %llu, victim: %llu", - (long long)bf_thd->real_id, (long long)victim_thd->real_id); - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); - ha_abort_transaction(bf_thd, victim_thd, signal); - DBUG_RETURN(1); - } - else - { - WSREP_DEBUG("wsrep_abort_thd not effective: bf %llu victim %llu " - "wsrep %d wsrep_on %d RSU %d TOI %d aborting %d", - (long long)bf_thd->real_id, (long long)victim_thd->real_id, - WSREP_NNULL(bf_thd), WSREP_ON, - bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU, - wsrep_thd_is_toi(bf_thd), - wsrep_thd_is_aborting(victim_thd)); - } - - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); - DBUG_RETURN(1); -} - -bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) -{ - WSREP_LOG_THD(bf_thd, "BF aborter before"); - WSREP_LOG_THD(victim_thd, "victim before"); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); #ifdef ENABLED_DEBUG_SYNC DBUG_EXECUTE_IF("sync.wsrep_bf_abort", @@ -362,6 +323,85 @@ bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) };); #endif + wsrep::seqno bf_seqno(bf_thd->wsrep_trx().ws_meta().seqno()); + bool ret; + + { + /* Adopt the lock, it is being held by the caller. */ + Wsrep_mutex wsm{&victim_thd->LOCK_thd_data}; + wsrep::unique_lock lock{wsm, std::adopt_lock}; + + if (wsrep_thd_is_toi(bf_thd)) + { + ret= victim_thd->wsrep_cs().total_order_bf_abort(lock, bf_seqno); + } + else + { + DBUG_ASSERT(WSREP(victim_thd) ? victim_thd->wsrep_trx().active() : 1); + ret= victim_thd->wsrep_cs().bf_abort(lock, bf_seqno); + } + if (ret) + { + /* BF abort should be allowed only once by wsrep-lib.*/ + DBUG_ASSERT(victim_thd->wsrep_aborter == 0); + victim_thd->wsrep_aborter= bf_thd->thread_id; + wsrep_bf_aborts_counter++; + } + lock.release(); /* No unlock at the end of the scope. */ + } + + /* Sanity check for wsrep-lib calls to return with LOCK_thd_data held. */ + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + + return ret; +} + +void wsrep_abort_thd(THD *bf_thd, + THD *victim_thd, + my_bool signal) +{ + DBUG_ENTER("wsrep_abort_thd"); + + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + + /* Note that when you use RSU node is desynced from cluster, thus WSREP(thd) + might not be true. + */ + if ((WSREP(bf_thd) + || ((WSREP_ON || bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU) + && wsrep_thd_is_toi(bf_thd)) + || bf_thd->lex->sql_command == SQLCOM_KILL) + && !wsrep_thd_is_aborting(victim_thd) && + wsrep_bf_abort_low(bf_thd, victim_thd) && + !victim_thd->wsrep_cs().is_rollbacker_active()) + { + WSREP_DEBUG("wsrep_abort_thd, by: %llu, victim: %llu", + (long long)bf_thd->real_id, (long long)victim_thd->real_id); + victim_thd->awake_no_mutex(KILL_QUERY_HARD); + ha_abort_transaction(bf_thd, victim_thd, signal); + } + else + { + WSREP_DEBUG("wsrep_abort_thd not effective: bf %llu victim %llu " + "wsrep %d wsrep_on %d RSU %d TOI %d aborting %d", + (long long)bf_thd->real_id, (long long)victim_thd->real_id, + WSREP_NNULL(bf_thd), WSREP_ON, + bf_thd->variables.wsrep_OSU_method == WSREP_OSU_RSU, + wsrep_thd_is_toi(bf_thd), + wsrep_thd_is_aborting(victim_thd)); + } + + DBUG_VOID_RETURN; +} + +bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) +{ + WSREP_LOG_THD(bf_thd, "BF aborter before"); + WSREP_LOG_THD(victim_thd, "victim before"); + + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + if (WSREP(victim_thd) && !victim_thd->wsrep_trx().active()) { WSREP_DEBUG("wsrep_bf_abort, BF abort for non active transaction." @@ -384,30 +424,84 @@ bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd) wsrep_check_mode(WSREP_MODE_BF_MARIABACKUP)) { WSREP_DEBUG("killing connection for non wsrep session"); - mysql_mutex_lock(&victim_thd->LOCK_thd_data); victim_thd->awake_no_mutex(KILL_CONNECTION); - mysql_mutex_unlock(&victim_thd->LOCK_thd_data); } return false; } - bool ret; - wsrep::seqno bf_seqno(bf_thd->wsrep_trx().ws_meta().seqno()); + return wsrep_bf_abort_low(bf_thd, victim_thd); +} - if (wsrep_thd_is_toi(bf_thd)) +uint wsrep_kill_thd(THD *thd, THD *victim_thd, killed_state kill_signal) +{ + DBUG_ENTER("wsrep_kill_thd"); + DBUG_ASSERT(WSREP(victim_thd)); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_data); + using trans= wsrep::transaction; + auto trx_state= victim_thd->wsrep_trx().state(); +#ifndef DBUG_OFF + victim_thd->wsrep_killed_state= trx_state; +#endif /* DBUG_OFF */ + /* + Already killed or in commit codepath. Mark the victim as killed, + the killed status will be restored in wsrep_after_commit() and + will be processed after the commit is over. In case of multiple + KILLs happened on commit codepath, the last one will be effective. + */ + if (victim_thd->wsrep_abort_by_kill || + trx_state == trans::s_preparing || + trx_state == trans::s_committing || + trx_state == trans::s_ordered_commit) { - ret= victim_thd->wsrep_cs().total_order_bf_abort(bf_seqno); + victim_thd->wsrep_abort_by_kill= kill_signal; + DBUG_RETURN(0); } - else + /* + Mark killed victim_thd with kill_signal so that awake_no_mutex does + not dive into storage engine. We use ha_abort_transaction() + to do the storage engine part for wsrep THDs. + */ + DEBUG_SYNC(thd, "wsrep_kill_before_awake_no_mutex"); + victim_thd->wsrep_abort_by_kill= kill_signal; + victim_thd->awake_no_mutex(kill_signal); + /* ha_abort_transaction() releases tmp->LOCK_thd_kill, so tmp + is not safe to access anymore. */ + ha_abort_transaction(thd, victim_thd, 1); + DBUG_RETURN(0); +} + +void wsrep_backup_kill_for_commit(THD *thd) +{ + DBUG_ASSERT(WSREP(thd)); + mysql_mutex_assert_owner(&thd->LOCK_thd_kill); + DBUG_ASSERT(thd->killed != NOT_KILLED); + mysql_mutex_lock(&thd->LOCK_thd_data); + /* If the transaction will roll back, keep the killed state. + For must replay, the replay will happen in different THD context + which is high priority and cannot be killed. The owning thread will + pick the killed state in after statement processing. */ + if (thd->wsrep_trx().state() != wsrep::transaction::s_cert_failed && + thd->wsrep_trx().state() != wsrep::transaction::s_must_abort && + thd->wsrep_trx().state() != wsrep::transaction::s_aborting && + thd->wsrep_trx().state() != wsrep::transaction::s_must_replay) { - DBUG_ASSERT(WSREP(victim_thd) ? victim_thd->wsrep_trx().active() : 1); - ret= victim_thd->wsrep_cs().bf_abort(bf_seqno); + thd->wsrep_abort_by_kill= thd->killed; + thd->wsrep_abort_by_kill_err= thd->killed_err; + thd->killed= NOT_KILLED; + thd->killed_err= 0; } - if (ret) - { - wsrep_bf_aborts_counter++; - } - return ret; + mysql_mutex_unlock(&thd->LOCK_thd_data); +} + +void wsrep_restore_kill_after_commit(THD *thd) +{ + DBUG_ASSERT(WSREP(thd)); + mysql_mutex_assert_owner(&thd->LOCK_thd_kill); + thd->killed= thd->wsrep_abort_by_kill; + thd->killed_err= thd->wsrep_abort_by_kill_err; + thd->wsrep_abort_by_kill= NOT_KILLED; + thd->wsrep_abort_by_kill_err= 0; } int wsrep_create_threadvars() diff --git a/sql/wsrep_thd.h b/sql/wsrep_thd.h index 0ce612d6097..f3790887bf5 100644 --- a/sql/wsrep_thd.h +++ b/sql/wsrep_thd.h @@ -88,10 +88,39 @@ bool wsrep_create_appliers(long threads, bool mutex_protected=false); void wsrep_create_rollbacker(); bool wsrep_bf_abort(THD* bf_thd, THD* victim_thd); -int wsrep_abort_thd(THD *bf_thd, +/* + Abort transaction for victim_thd. This function is called from + MDL BF abort codepath. +*/ +void wsrep_abort_thd(THD *bf_thd, THD *victim_thd, my_bool signal) __attribute__((nonnull(1,2))); +/** + Kill wsrep connection with kill_signal. Object thd is not + guaranteed to exist anymore when this function returns. + + Asserts that the caller holds victim_thd->LOCK_thd_kill, + victim_thd->LOCK_thd_data. + + @param thd THD object for connection that executes the KILL. + @param victim_thd THD object for connection to be killed. + @param kill_signal Kill signal. + + @return Zero if the kill was successful, otherwise non-zero error code. + */ +uint wsrep_kill_thd(THD *thd, THD *victim_thd, killed_state kill_signal); + +/* + Backup kill status for commit. + */ +void wsrep_backup_kill_for_commit(THD *); + +/* + Restore KILL status after commit. + */ +void wsrep_restore_kill_after_commit(THD *); + /* Helper methods to deal with thread local storage. The purpose of these methods is to hide the details of thread diff --git a/sql/wsrep_trans_observer.h b/sql/wsrep_trans_observer.h index 812452f451a..6b656f84c78 100644 --- a/sql/wsrep_trans_observer.h +++ b/sql/wsrep_trans_observer.h @@ -256,6 +256,11 @@ static inline int wsrep_before_prepare(THD* thd, bool all) thd->wsrep_trx().ws_meta().gtid(), wsrep_gtid_server.gtid()); } + + mysql_mutex_lock(&thd->LOCK_thd_kill); + if (thd->killed) wsrep_backup_kill_for_commit(thd); + mysql_mutex_unlock(&thd->LOCK_thd_kill); + DBUG_RETURN(ret); } @@ -325,6 +330,11 @@ static inline int wsrep_before_commit(THD* thd, bool all) wsrep_gtid_server.gtid()); wsrep_register_for_group_commit(thd); } + + mysql_mutex_lock(&thd->LOCK_thd_kill); + if (thd->killed) wsrep_backup_kill_for_commit(thd); + mysql_mutex_unlock(&thd->LOCK_thd_kill); + DBUG_RETURN(ret); } @@ -343,7 +353,8 @@ static inline int wsrep_before_commit(THD* thd, bool all) static inline int wsrep_ordered_commit(THD* thd, bool all) { DBUG_ENTER("wsrep_ordered_commit"); - WSREP_DEBUG("wsrep_ordered_commit: %d", wsrep_is_real(thd, all)); + WSREP_DEBUG("wsrep_ordered_commit: %d %lld", wsrep_is_real(thd, all), + (long long) wsrep_thd_trx_seqno(thd)); DBUG_ASSERT(wsrep_run_commit_hook(thd, all)); DBUG_RETURN(thd->wsrep_cs().ordered_commit()); } @@ -451,10 +462,18 @@ int wsrep_after_statement(THD* thd) wsrep::to_c_string(thd->wsrep_cs().state()), wsrep::to_c_string(thd->wsrep_cs().mode()), wsrep::to_c_string(thd->wsrep_cs().transaction().state())); - DBUG_RETURN((thd->wsrep_cs().state() != wsrep::client_state::s_none && + int ret= ((thd->wsrep_cs().state() != wsrep::client_state::s_none && thd->wsrep_cs().mode() == Wsrep_client_state::m_local) && !thd->internal_transaction() ? thd->wsrep_cs().after_statement() : 0); + + if (wsrep_is_active(thd)) + { + mysql_mutex_lock(&thd->LOCK_thd_kill); + wsrep_restore_kill_after_commit(thd); + mysql_mutex_unlock(&thd->LOCK_thd_kill); + } + DBUG_RETURN(ret); } static inline void wsrep_after_apply(THD* thd) diff --git a/storage/innobase/buf/buf0dblwr.cc b/storage/innobase/buf/buf0dblwr.cc index 1260145ed1c..510ad02256d 100644 --- a/storage/innobase/buf/buf0dblwr.cc +++ b/storage/innobase/buf/buf0dblwr.cc @@ -372,7 +372,7 @@ void buf_dblwr_t::recover() const uint32_t space_id= page_get_space_id(page); const page_id_t page_id(space_id, page_no); - if (recv_sys.lsn < lsn) + if (recv_sys.scanned_lsn < lsn) { ib::info() << "Ignoring a doublewrite copy of page " << page_id << " with future log sequence number " << lsn; diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index be29f287061..64c482d22a2 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -966,11 +966,19 @@ uint32_t fil_space_t::flush_freed(bool writable) mysql_mutex_assert_not_owner(&buf_pool.flush_list_mutex); mysql_mutex_assert_not_owner(&buf_pool.mutex); - freed_range_mutex.lock(); - if (freed_ranges.empty() || log_sys.get_flushed_lsn() < get_last_freed_lsn()) + for (;;) { + freed_range_mutex.lock(); + if (freed_ranges.empty()) + { + freed_range_mutex.unlock(); + return 0; + } + const lsn_t flush_lsn= last_freed_lsn; + if (log_sys.get_flushed_lsn() >= flush_lsn) + break; freed_range_mutex.unlock(); - return 0; + log_write_up_to(flush_lsn, true); } const unsigned physical{physical_size()}; @@ -2571,6 +2579,7 @@ ATTRIBUTE_COLD void buf_flush_page_cleaner_init() /** Flush the buffer pool on shutdown. */ ATTRIBUTE_COLD void buf_flush_buffer_pool() { + ut_ad(!os_aio_pending_reads()); ut_ad(!buf_page_cleaner_is_active); ut_ad(!buf_flush_sync_lsn); diff --git a/storage/innobase/buf/buf0lru.cc b/storage/innobase/buf/buf0lru.cc index 4c42d3eab54..427b3f0737c 100644 --- a/storage/innobase/buf/buf0lru.cc +++ b/storage/innobase/buf/buf0lru.cc @@ -1093,7 +1093,11 @@ static bool buf_LRU_block_remove_hashed(buf_page_t *bpage, const page_id_t id, ut_a(!zip || !bpage->oldest_modification()); ut_ad(bpage->zip_size()); - + /* Skip consistency checks if the page was freed. + In recovery, we could get a sole FREE_PAGE record + and nothing else, for a ROW_FORMAT=COMPRESSED page. + Its contents would be garbage. */ + if (!bpage->is_freed()) switch (fil_page_get_type(page)) { case FIL_PAGE_TYPE_ALLOCATED: case FIL_PAGE_INODE: @@ -1224,6 +1228,7 @@ void buf_pool_t::corrupted_evict(buf_page_t *bpage, uint32_t state) buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(id.fold()); page_hash_latch &hash_lock= buf_pool.page_hash.lock_get(chain); + recv_sys.free_corrupted_page(id); mysql_mutex_lock(&mutex); hash_lock.lock(); @@ -1248,8 +1253,6 @@ void buf_pool_t::corrupted_evict(buf_page_t *bpage, uint32_t state) buf_LRU_block_free_hashed_page(reinterpret_cast(bpage)); mysql_mutex_unlock(&mutex); - - recv_sys.free_corrupted_page(id); } /** Update buf_pool.LRU_old_ratio. diff --git a/storage/innobase/buf/buf0rea.cc b/storage/innobase/buf/buf0rea.cc index 1f7f9bfba0a..1e678ea3666 100644 --- a/storage/innobase/buf/buf0rea.cc +++ b/storage/innobase/buf/buf0rea.cc @@ -221,7 +221,10 @@ buf_read_page_low( { buf_page_t* bpage; - ut_ad(!buf_dblwr.is_inside(page_id)); + if (buf_dblwr.is_inside(page_id)) { + space->release(); + return DB_PAGE_CORRUPTED; + } bpage = buf_page_init_for_read(page_id, zip_size, chain, block); @@ -517,7 +520,7 @@ ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size) /* We will check that almost all pages in the area have been accessed in the desired order. */ - const bool descending= page_id == low; + const bool descending= page_id != low; if (!descending && page_id != high_1) /* This is not a border page of the area */ @@ -542,7 +545,7 @@ fail: uint32_t{buf_pool.read_ahead_area}); page_id_t new_low= low, new_high_1= high_1; unsigned prev_accessed= 0; - for (page_id_t i= low; i != high_1; ++i) + for (page_id_t i= low; i <= high_1; ++i) { buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(i.fold()); transactional_shared_lock_guard g @@ -570,12 +573,21 @@ failed: if (prev == FIL_NULL || next == FIL_NULL) goto fail; page_id_t id= page_id; - if (descending && next - 1 == page_id.page_no()) - id.set_page_no(prev); - else if (!descending && prev + 1 == page_id.page_no()) - id.set_page_no(next); + if (descending) + { + if (id == high_1) + ++id; + else if (next - 1 != page_id.page_no()) + goto fail; + else + id.set_page_no(prev); + } else - goto fail; /* Successor or predecessor not in the right order */ + { + if (prev + 1 != page_id.page_no()) + goto fail; + id.set_page_no(next); + } new_low= id - (id.page_no() % buf_read_ahead_area); new_high_1= new_low + (buf_read_ahead_area - 1); @@ -619,7 +631,7 @@ failed: } count= 0; - for (; new_low != new_high_1; ++new_low) + for (; new_low <= new_high_1; ++new_low) { if (space->is_stopping()) break; @@ -654,69 +666,45 @@ failed: return count; } -/** @return whether a page has been freed */ -inline bool fil_space_t::is_freed(uint32_t page) +/** Schedule a page for recovery. +@param space tablespace +@param page_id page identifier +@param recs log records +@param init_lsn page initialization, or 0 if the page needs to be read */ +void buf_read_recover(fil_space_t *space, const page_id_t page_id, + page_recv_t &recs, lsn_t init_lsn) { - std::lock_guard freed_lock(freed_range_mutex); - return freed_ranges.contains(page); -} - -/** Issues read requests for pages which recovery wants to read in. -@param space_id tablespace identifier -@param page_nos page numbers to read, in ascending order */ -void buf_read_recv_pages(uint32_t space_id, st_::span page_nos) -{ - fil_space_t* space = fil_space_t::get(space_id); - - if (!space) { - /* The tablespace is missing or unreadable: do nothing */ - return; - } - - const ulint zip_size = space->zip_size() | 1; - buf_block_t* block = buf_LRU_get_free_block(have_no_mutex); - - for (ulint i = 0; i < page_nos.size(); i++) { - - /* Ignore if the page already present in freed ranges. */ - if (space->is_freed(page_nos[i])) { - continue; - } - - const page_id_t cur_page_id(space_id, page_nos[i]); - - ulint limit = 0; - for (ulint j = 0; j < buf_pool.n_chunks; j++) { - limit += buf_pool.chunks[j].size / 2; - } - - if (os_aio_pending_reads() >= limit) { - os_aio_wait_until_no_pending_reads(false); - } - - buf_pool_t::hash_chain& chain = - buf_pool.page_hash.cell_get(cur_page_id.fold()); - space->reacquire(); - switch (buf_read_page_low(cur_page_id, zip_size, chain, space, - block)) { - case DB_SUCCESS: - ut_ad(!block); - block = buf_LRU_get_free_block(have_no_mutex); - break; - case DB_SUCCESS_LOCKED_REC: - break; - default: - sql_print_error("InnoDB: Recovery failed to read page " - UINT32PF " from %s", - cur_page_id.page_no(), - space->chain.start->name); - } - ut_ad(block); - } - - DBUG_PRINT("ib_buf", ("recovery read (%zu pages) for %s", - page_nos.size(), space->chain.start->name)); - space->release(); - - buf_read_release(block); + ut_ad(space->id == page_id.space()); + space->reacquire(); + const ulint zip_size= space->zip_size() | 1; + buf_pool_t::hash_chain &chain= buf_pool.page_hash.cell_get(page_id.fold()); + buf_block_t *block= buf_LRU_get_free_block(have_no_mutex); + + if (init_lsn) + { + if (buf_page_t *bpage= + buf_page_init_for_read(page_id, zip_size, chain, block)) + { + ut_ad(bpage->in_file()); + os_fake_read(IORequest{bpage, (buf_tmp_buffer_t*) &recs, + UT_LIST_GET_FIRST(space->chain), + IORequest::READ_ASYNC}, init_lsn); + return; + } + } + else if (dberr_t err= + buf_read_page_low(page_id, zip_size, chain, space, block)) + { + if (err != DB_SUCCESS_LOCKED_REC) + sql_print_error("InnoDB: Recovery failed to read page " + UINT32PF " from %s", + page_id.page_no(), space->chain.start->name); + } + else + { + ut_ad(!block); + return; + } + + buf_LRU_block_free_non_file_page(block); } diff --git a/storage/innobase/dict/dict0stats.cc b/storage/innobase/dict/dict0stats.cc index 04b1ec88ac3..8267e435b4c 100644 --- a/storage/innobase/dict/dict0stats.cc +++ b/storage/innobase/dict/dict0stats.cc @@ -204,7 +204,17 @@ static const dict_table_schema_t table_stats_schema = { {"database_name", DATA_VARMYSQL, DATA_NOT_NULL, 192}, {"table_name", DATA_VARMYSQL, DATA_NOT_NULL, 597}, - {"last_update", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 4}, + /* + Don't check the DATA_UNSIGNED flag in last_update. + It presents if the server is running in a pure MariaDB installation, + because MariaDB's Field_timestampf::flags has UNSIGNED_FLAG. + But DATA_UNSIGNED misses when the server starts on a MySQL-5.7 directory + (during a migration), because MySQL's Field_timestampf::flags does not + have UNSIGNED_FLAG. + This is fine not to check DATA_UNSIGNED, because Field_timestampf + in both MariaDB and MySQL support only non-negative time_t values. + */ + {"last_update", DATA_INT, DATA_NOT_NULL, 4}, {"n_rows", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 8}, {"clustered_index_size", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 8}, {"sum_of_other_index_sizes", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 8}, @@ -218,7 +228,11 @@ static const dict_table_schema_t index_stats_schema = {"database_name", DATA_VARMYSQL, DATA_NOT_NULL, 192}, {"table_name", DATA_VARMYSQL, DATA_NOT_NULL, 597}, {"index_name", DATA_VARMYSQL, DATA_NOT_NULL, 192}, - {"last_update", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 4}, + /* + Don't check the DATA_UNSIGNED flag in last_update. + See comments about last_update in table_stats_schema above. + */ + {"last_update", DATA_INT, DATA_NOT_NULL, 4}, {"stat_name", DATA_VARMYSQL, DATA_NOT_NULL, 64*3}, {"stat_value", DATA_INT, DATA_NOT_NULL | DATA_UNSIGNED, 8}, {"sample_size", DATA_INT, DATA_UNSIGNED, 8}, diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 53ea91529ec..7398c50f5d4 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -447,7 +447,9 @@ static bool fil_node_open_file(fil_node_t *node) } } - return fil_node_open_file_low(node); + /* The node can be opened beween releasing and acquiring fil_system.mutex + in the above code */ + return node->is_open() || fil_node_open_file_low(node); } /** Close the file handle. */ @@ -2066,8 +2068,8 @@ err_exit: FIL_TYPE_TABLESPACE, crypt_data, mode, true)) { fil_node_t* node = space->add(path, file, size, false, true); - mysql_mutex_unlock(&fil_system.mutex); IF_WIN(node->find_metadata(), node->find_metadata(file, true)); + mysql_mutex_unlock(&fil_system.mutex); mtr.start(); mtr.set_named_space(space); ut_a(fsp_header_init(space, size, &mtr) == DB_SUCCESS); @@ -2888,49 +2890,51 @@ func_exit: #include -/** Callback for AIO completion */ -void fil_aio_callback(const IORequest &request) +void IORequest::write_complete() const { ut_ad(fil_validate_skip()); - ut_ad(request.node); + ut_ad(node); + ut_ad(is_write()); - if (!request.bpage) + if (!bpage) { ut_ad(!srv_read_only_mode); - if (request.type == IORequest::DBLWR_BATCH) - buf_dblwr.flush_buffered_writes_completed(request); + if (type == IORequest::DBLWR_BATCH) + buf_dblwr.flush_buffered_writes_completed(*this); else - ut_ad(request.type == IORequest::WRITE_ASYNC); -write_completed: - request.node->complete_write(); - } - else if (request.is_write()) - { - buf_page_write_complete(request); - goto write_completed; + ut_ad(type == IORequest::WRITE_ASYNC); } else + buf_page_write_complete(*this); + + node->complete_write(); + node->space->release(); +} + +void IORequest::read_complete() const +{ + ut_ad(fil_validate_skip()); + ut_ad(node); + ut_ad(is_read()); + ut_ad(bpage); + + const page_id_t id(bpage->id()); + + if (dberr_t err= bpage->read_complete(*node)) { - ut_ad(request.is_read()); - - const page_id_t id(request.bpage->id()); - - if (dberr_t err= request.bpage->read_complete(*request.node)) + if (recv_recovery_is_on() && !srv_force_recovery) { - if (recv_recovery_is_on() && !srv_force_recovery) - { - mysql_mutex_lock(&recv_sys.mutex); - recv_sys.set_corrupt_fs(); - mysql_mutex_unlock(&recv_sys.mutex); - } - - if (err != DB_FAIL) - ib::error() << "Failed to read page " << id.page_no() - << " from file '" << request.node->name << "': " << err; + mysql_mutex_lock(&recv_sys.mutex); + recv_sys.set_corrupt_fs(); + mysql_mutex_unlock(&recv_sys.mutex); } + + if (err != DB_FAIL) + ib::error() << "Failed to read page " << id.page_no() + << " from file '" << node->name << "': " << err; } - request.node->space->release(); + node->space->release(); } /** Flush to disk the writes in file spaces of the given type diff --git a/storage/innobase/fsp/fsp0fsp.cc b/storage/innobase/fsp/fsp0fsp.cc index f7625974886..6a4d86f7aea 100644 --- a/storage/innobase/fsp/fsp0fsp.cc +++ b/storage/innobase/fsp/fsp0fsp.cc @@ -1289,23 +1289,20 @@ static dberr_t fsp_free_page(fil_space_t *space, page_no_t offset, mtr_t *mtr) + header->page.frame, frag_n_used - 1); } + mtr->free(*space, static_cast(offset)); + xdes_set_free(*xdes, descr, offset % FSP_EXTENT_SIZE, mtr); + ut_ad(err == DB_SUCCESS); + if (!xdes_get_n_used(descr)) { /* The extent has become free: move it to another list */ err = flst_remove(header, FSP_HEADER_OFFSET + FSP_FREE_FRAG, xdes, xoffset, mtr); - if (UNIV_UNLIKELY(err != DB_SUCCESS)) { - return err; - } - err = fsp_free_extent(space, offset, mtr); - if (UNIV_UNLIKELY(err != DB_SUCCESS)) { - return err; + if (err == DB_SUCCESS) { + err = fsp_free_extent(space, offset, mtr); } } - mtr->free(*space, static_cast(offset)); - xdes_set_free(*xdes, descr, offset % FSP_EXTENT_SIZE, mtr); - - return DB_SUCCESS; + return err; } /** @return Number of segment inodes which fit on a single page */ diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 14b31142951..2788f8871d2 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1880,8 +1880,9 @@ static void innodb_disable_internal_writes(bool disable) sst_enable_innodb_writes(); } -static void wsrep_abort_transaction(handlerton*, THD *, THD *, my_bool); -static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid); +static void wsrep_abort_transaction(handlerton *, THD *, THD *, my_bool) + __attribute__((nonnull)); +static int innobase_wsrep_set_checkpoint(handlerton *hton, const XID *xid); static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid); #endif /* WITH_WSREP */ @@ -18625,36 +18626,45 @@ void lock_wait_wsrep_kill(trx_t *bf_trx, ulong thd_id, trx_id_t trx_id) wsrep_thd_client_mode_str(vthd), wsrep_thd_transaction_state_str(vthd), wsrep_thd_query(vthd)); - /* Mark transaction as a victim for Galera abort */ - vtrx->lock.set_wsrep_victim(); - if (!wsrep_thd_set_wsrep_aborter(bf_thd, vthd)) - aborting= true; - else - WSREP_DEBUG("kill transaction skipped due to wsrep_aborter set"); + aborting= true; } } mysql_mutex_unlock(&lock_sys.wait_mutex); vtrx->mutex_unlock(); } - wsrep_thd_UNLOCK(vthd); - if (aborting) + + DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); + if (aborting && wsrep_thd_bf_abort(bf_thd, vthd, true)) { + /* Need to grab mutexes again to ensure that the trx is still in + right state. */ + lock_sys.wr_lock(SRW_LOCK_CALL); + mysql_mutex_lock(&lock_sys.wait_mutex); + vtrx->mutex_lock(); + /* if victim is waiting for some other lock, we have to cancel that waiting */ - lock_sys.cancel_lock_wait_for_trx(vtrx); - - DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); - if (!wsrep_thd_bf_abort(bf_thd, vthd, true)) + if (vtrx->id == trx_id) { - wsrep_thd_LOCK(vthd); - wsrep_thd_set_wsrep_aborter(NULL, vthd); - wsrep_thd_UNLOCK(vthd); - - WSREP_DEBUG("wsrep_thd_bf_abort has failed, victim %lu will survive", - thd_get_thread_id(vthd)); + switch (vtrx->state) { + default: + break; + case TRX_STATE_ACTIVE: + case TRX_STATE_PREPARED: + lock_sys.cancel_lock_wait_for_wsrep_bf_abort(vtrx); + } } + lock_sys.wr_unlock(); + mysql_mutex_unlock(&lock_sys.wait_mutex); + vtrx->mutex_unlock(); } + else + { + WSREP_DEBUG("wsrep_thd_bf_abort has failed, victim %lu will survive", + thd_get_thread_id(vthd)); + } + wsrep_thd_UNLOCK(vthd); wsrep_thd_kill_UNLOCK(vthd); } } @@ -18662,68 +18672,50 @@ void lock_wait_wsrep_kill(trx_t *bf_trx, ulong thd_id, trx_id_t trx_id) /** This function forces the victim transaction to abort. Aborting the transaction does NOT end it, it still has to be rolled back. + The caller must lock LOCK_thd_kill and LOCK_thd_data. + @param bf_thd brute force THD asking for the abort @param victim_thd victim THD to be aborted - - @return 0 victim was aborted - @return -1 victim thread was aborted (no transaction) */ -static -void -wsrep_abort_transaction( - handlerton*, - THD *bf_thd, - THD *victim_thd, - my_bool signal) +static void wsrep_abort_transaction(handlerton *, THD *bf_thd, THD *victim_thd, + my_bool signal) { - DBUG_ENTER("wsrep_abort_transaction"); - ut_ad(bf_thd); - ut_ad(victim_thd); + DBUG_ENTER("wsrep_abort_transaction"); + ut_ad(bf_thd); + ut_ad(victim_thd); - wsrep_thd_kill_LOCK(victim_thd); - wsrep_thd_LOCK(victim_thd); - trx_t* victim_trx= thd_to_trx(victim_thd); - wsrep_thd_UNLOCK(victim_thd); + trx_t *victim_trx= thd_to_trx(victim_thd); - WSREP_DEBUG("abort transaction: BF: %s victim: %s victim conf: %s", - wsrep_thd_query(bf_thd), - wsrep_thd_query(victim_thd), - wsrep_thd_transaction_state_str(victim_thd)); + WSREP_DEBUG("abort transaction: BF: %s victim: %s victim conf: %s", + wsrep_thd_query(bf_thd), wsrep_thd_query(victim_thd), + wsrep_thd_transaction_state_str(victim_thd)); - if (victim_trx) { - victim_trx->lock.set_wsrep_victim(); + if (!victim_trx) + { + WSREP_DEBUG("abort transaction: victim did not exist"); + DBUG_VOID_RETURN; + } - wsrep_thd_LOCK(victim_thd); - bool aborting= !wsrep_thd_set_wsrep_aborter(bf_thd, victim_thd); - wsrep_thd_UNLOCK(victim_thd); - if (aborting) { - DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); - DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", - { - const char act[]= - "now " - "SIGNAL sync.before_wsrep_thd_abort_reached " - "WAIT_FOR signal.before_wsrep_thd_abort"; - DBUG_ASSERT(!debug_sync_set_action(bf_thd, - STRING_WITH_LEN(act))); - };); - wsrep_thd_bf_abort(bf_thd, victim_thd, signal); - } - } else { - DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", - { - const char act[]= - "now " - "SIGNAL sync.before_wsrep_thd_abort_reached " - "WAIT_FOR signal.before_wsrep_thd_abort"; - DBUG_ASSERT(!debug_sync_set_action(bf_thd, - STRING_WITH_LEN(act))); - };); - wsrep_thd_bf_abort(bf_thd, victim_thd, signal); - } + lock_sys.wr_lock(SRW_LOCK_CALL); + mysql_mutex_lock(&lock_sys.wait_mutex); + victim_trx->mutex_lock(); - wsrep_thd_kill_UNLOCK(victim_thd); - DBUG_VOID_RETURN; + switch (victim_trx->state) { + default: + break; + case TRX_STATE_ACTIVE: + case TRX_STATE_PREPARED: + /* Cancel lock wait if the victim is waiting for a lock in InnoDB. + The transaction which is blocked somewhere else (e.g. waiting + for next command or MDL) has been interrupted by THD::awake_no_mutex() + on server level before calling this function. */ + lock_sys.cancel_lock_wait_for_wsrep_bf_abort(victim_trx); + } + lock_sys.wr_unlock(); + mysql_mutex_unlock(&lock_sys.wait_mutex); + victim_trx->mutex_unlock(); + + DBUG_VOID_RETURN; } static @@ -19441,10 +19433,17 @@ static MYSQL_SYSVAR_ULONG(purge_rseg_truncate_frequency, " purge rollback segment(s) on every Nth iteration of purge invocation", NULL, NULL, 128, 1, 128, 0); +static void innodb_undo_log_truncate_update(THD *thd, struct st_mysql_sys_var*, + void*, const void *save) +{ + if ((srv_undo_log_truncate= *static_cast(save))) + srv_wake_purge_thread_if_not_active(); +} + static MYSQL_SYSVAR_BOOL(undo_log_truncate, srv_undo_log_truncate, PLUGIN_VAR_OPCMDARG, "Enable or Disable Truncate of UNDO tablespace.", - NULL, NULL, FALSE); + NULL, innodb_undo_log_truncate_update, FALSE); static MYSQL_SYSVAR_LONG(autoinc_lock_mode, innobase_autoinc_lock_mode, PLUGIN_VAR_RQCMDARG | PLUGIN_VAR_READONLY, diff --git a/storage/innobase/include/buf0buf.h b/storage/innobase/include/buf0buf.h index 9ff65ce7c9c..c12097be010 100644 --- a/storage/innobase/include/buf0buf.h +++ b/storage/innobase/include/buf0buf.h @@ -71,8 +71,7 @@ struct buf_pool_info_t ulint flush_list_len; /*!< Length of buf_pool.flush_list */ ulint n_pend_unzip; /*!< buf_pool.n_pend_unzip, pages pending decompress */ - ulint n_pend_reads; /*!< buf_pool.n_pend_reads, pages - pending read */ + ulint n_pend_reads; /*!< os_aio_pending_reads() */ ulint n_pending_flush_lru; /*!< Pages pending flush in LRU */ ulint n_pending_flush_list; /*!< Pages pending flush in FLUSH LIST */ diff --git a/storage/innobase/include/buf0rea.h b/storage/innobase/include/buf0rea.h index ebf0f60ffe5..46d08243596 100644 --- a/storage/innobase/include/buf0rea.h +++ b/storage/innobase/include/buf0rea.h @@ -91,7 +91,10 @@ latches! @return number of page read requests issued */ ulint buf_read_ahead_linear(const page_id_t page_id, ulint zip_size); -/** Issue read requests for pages that need to be recovered. -@param space_id tablespace identifier -@param page_nos page numbers to read, in ascending order */ -void buf_read_recv_pages(uint32_t space_id, st_::span page_nos); +/** Schedule a page for recovery. +@param space tablespace +@param page_id page identifier +@param recs log records +@param init_lsn page initialization, or 0 if the page needs to be read */ +void buf_read_recover(fil_space_t *space, const page_id_t page_id, + page_recv_t &recs, lsn_t init_lsn); diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index e4e0b2c9c74..71b7dd423b7 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -611,8 +611,6 @@ public: /** Close all tablespace files at shutdown */ static void close_all(); - /** @return last_freed_lsn */ - lsn_t get_last_freed_lsn() { return last_freed_lsn; } /** Update last_freed_lsn */ void update_last_freed_lsn(lsn_t lsn) { last_freed_lsn= lsn; } diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index 16acd031177..e8299bb1189 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -955,6 +955,10 @@ public: /** Cancel possible lock waiting for a transaction */ static void cancel_lock_wait_for_trx(trx_t *trx); +#ifdef WITH_WSREP + /** Cancel lock waiting for a wsrep BF abort. */ + static void cancel_lock_wait_for_wsrep_bf_abort(trx_t *trx); +#endif /* WITH_WSREP */ }; /** The lock system */ diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index c0b79f1a76d..2c4e92a6dee 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -38,9 +38,9 @@ Created 9/20/1997 Heikki Tuuri #define recv_recovery_is_on() UNIV_UNLIKELY(recv_sys.recovery_on) ATTRIBUTE_COLD MY_ATTRIBUTE((nonnull, warn_unused_result)) -/** Apply any buffered redo log to a page that was just read from a data file. -@param[in,out] space tablespace -@param[in,out] bpage buffer pool page +/** Apply any buffered redo log to a page. +@param space tablespace +@param bpage buffer pool page @return whether the page was recovered correctly */ bool recv_recover_page(fil_space_t* space, buf_page_t* bpage); @@ -49,17 +49,6 @@ of first system tablespace page @return error code or DB_SUCCESS */ dberr_t recv_recovery_from_checkpoint_start(); -/** Whether to store redo log records in recv_sys.pages */ -enum store_t { - /** Do not store redo log records. */ - STORE_NO, - /** Store redo log records. */ - STORE_YES, - /** Store redo log records if the tablespace exists. */ - STORE_IF_EXISTS -}; - - /** Report an operation to create, delete, or rename a file during backup. @param[in] space_id tablespace identifier @param[in] type file operation redo log type @@ -125,21 +114,15 @@ struct recv_dblwr_t list pages; }; -/** the recovery state and buffered records for a page */ +/** recv_sys.pages entry; protected by recv_sys.mutex */ struct page_recv_t { - /** Recovery state; protected by recv_sys.mutex */ - enum - { - /** not yet processed */ - RECV_NOT_PROCESSED, - /** not processed; the page will be reinitialized */ - RECV_WILL_NOT_READ, - /** page is being read */ - RECV_BEING_READ, - /** log records are being applied on the page */ - RECV_BEING_PROCESSED - } state= RECV_NOT_PROCESSED; + /** Recovery status: 0=not in progress, 1=log is being applied, + -1=log has been applied and the entry may be erased. + Transitions from 1 to -1 are NOT protected by recv_sys.mutex. */ + Atomic_relaxed being_processed{0}; + /** Whether reading the page will be skipped */ + bool skip_read= false; /** Latest written byte offset when applying the log records. @see mtr_t::m_last_offset */ uint16_t last_offset= 1; @@ -162,6 +145,9 @@ struct page_recv_t head= recs; tail= recs; } + /** Remove the last records for the page + @param start_lsn start of the removed log */ + ATTRIBUTE_COLD void rewind(lsn_t start_lsn); /** @return the last log snippet */ const log_rec_t* last() const { return tail; } @@ -180,8 +166,8 @@ struct page_recv_t iterator begin() { return head; } iterator end() { return NULL; } bool empty() const { ut_ad(!head == !tail); return !head; } - /** Clear and free the records; @see recv_sys_t::alloc() */ - inline void clear(); + /** Clear and free the records; @see recv_sys_t::add() */ + void clear(); } log; /** Trim old log records for a page. @@ -190,21 +176,14 @@ struct page_recv_t inline bool trim(lsn_t start_lsn); /** Ignore any earlier redo log records for this page. */ inline void will_not_read(); - /** @return whether the log records for the page are being processed */ - bool is_being_processed() const { return state == RECV_BEING_PROCESSED; } }; /** Recovery system data structure */ struct recv_sys_t { - /** mutex protecting apply_log_recs and page_recv_t::state */ - mysql_mutex_t mutex; + /** mutex protecting this as well as some of page_recv_t */ + alignas(CPU_LEVEL1_DCACHE_LINESIZE) mysql_mutex_t mutex; private: - /** condition variable for - !apply_batch_on || pages.empty() || found_corrupt_log || found_corrupt_fs */ - pthread_cond_t cond; - /** whether recv_apply_hashed_log_recs() is running */ - bool apply_batch_on; /** set when finding a corrupt log block or record, or there is a log parsing buffer overflow */ bool found_corrupt_log; @@ -226,6 +205,8 @@ public: size_t offset; /** log sequence number of the first non-parsed record */ lsn_t lsn; + /** log sequence number of the last parsed mini-transaction */ + lsn_t scanned_lsn; /** log sequence number at the end of the FILE_CHECKPOINT record, or 0 */ lsn_t file_checkpoint; /** the time when progress was last reported */ @@ -238,6 +219,9 @@ public: map pages; private: + /** iterator to pages, used by parse() */ + map::iterator pages_it; + /** Process a record that indicates that a tablespace size is being shrunk. @param page_id first page that is not in the file @param lsn log sequence number of the shrink operation */ @@ -257,30 +241,42 @@ public: /** The contents of the doublewrite buffer */ recv_dblwr_t dblwr; - /** Last added LSN to pages, before switching to STORE_NO */ - lsn_t last_stored_lsn= 0; - inline void read(os_offset_t offset, span buf); inline size_t files_size(); void close_files() { files.clear(); files.shrink_to_fit(); } + /** Advance pages_it if it matches the iterator */ + void pages_it_invalidate(const map::iterator &p) + { + mysql_mutex_assert_owner(&mutex); + if (pages_it == p) + pages_it++; + } + /** Invalidate pages_it if it points to the given tablespace */ + void pages_it_invalidate(uint32_t space_id) + { + mysql_mutex_assert_owner(&mutex); + if (pages_it != pages.end() && pages_it->first.space() == space_id) + pages_it= pages.end(); + } + private: /** Attempt to initialize a page based on redo log records. - @param page_id page identifier - @param p iterator pointing to page_id + @param p iterator @param mtr mini-transaction @param b pre-allocated buffer pool block + @param init_lsn page initialization @return the recovered block @retval nullptr if the page cannot be initialized based on log records @retval -1 if the page cannot be recovered due to corruption */ - inline buf_block_t *recover_low(const page_id_t page_id, map::iterator &p, - mtr_t &mtr, buf_block_t *b); + inline buf_block_t *recover_low(const map::iterator &p, mtr_t &mtr, + buf_block_t *b, lsn_t init_lsn); /** Attempt to initialize a page based on redo log records. @param page_id page identifier @return the recovered block @retval nullptr if the page cannot be initialized based on log records @retval -1 if the page cannot be recovered due to corruption */ - buf_block_t *recover_low(const page_id_t page_id); + ATTRIBUTE_COLD buf_block_t *recover_low(const page_id_t page_id); /** All found log files (multiple ones are possible if we are upgrading from before MariaDB Server 10.5.1) */ @@ -289,10 +285,27 @@ private: /** Base node of the redo block list. List elements are linked via buf_block_t::unzip_LRU. */ UT_LIST_BASE_NODE_T(buf_block_t) blocks; + + /** Allocate a block from the buffer pool for recv_sys.pages */ + ATTRIBUTE_COLD buf_block_t *add_block(); + + /** Wait for buffer pool to become available. + @param pages number of buffer pool pages needed */ + ATTRIBUTE_COLD void wait_for_pool(size_t pages); + + /** Free log for processed pages. */ + void garbage_collect(); + + /** Apply a recovery batch. + @param space_id current tablespace identifier + @param space current tablespace + @param free_block spare buffer block + @param last_batch whether it is possible to write more redo log + @return whether the caller must provide a new free_block */ + bool apply_batch(uint32_t space_id, fil_space_t *&space, + buf_block_t *&free_block, bool last_batch); + public: - /** Check whether the number of read redo log blocks exceeds the maximum. - @return whether the memory is exhausted */ - inline bool is_memory_exhausted(); /** Apply buffered log to persistent data pages. @param last_batch whether it is possible to write more redo log */ void apply(bool last_batch); @@ -310,7 +323,7 @@ public: /** Clean up after create() */ void close(); - bool is_initialised() const { return last_stored_lsn != 0; } + bool is_initialised() const { return scanned_lsn != 0; } /** Find the latest checkpoint. @return error code or DB_SUCCESS */ @@ -321,60 +334,76 @@ public: @param start_lsn start LSN of the mini-transaction @param lsn @see mtr_t::commit_lsn() @param l redo log snippet - @param len length of l, in bytes */ - inline void add(map::iterator it, lsn_t start_lsn, lsn_t lsn, - const byte *l, size_t len); + @param len length of l, in bytes + @return whether we ran out of memory */ + bool add(map::iterator it, lsn_t start_lsn, lsn_t lsn, + const byte *l, size_t len); - enum parse_mtr_result { OK, PREMATURE_EOF, GOT_EOF }; + /** Parsing result */ + enum parse_mtr_result { + /** a record was successfully parsed */ + OK, + /** the log ended prematurely (need to read more) */ + PREMATURE_EOF, + /** the end of the log was reached */ + GOT_EOF, + /** parse(l, false) ran out of memory */ + GOT_OOM + }; private: /** Parse and register one log_t::FORMAT_10_8 mini-transaction. - @param store whether to store the records - @param l log data source */ + @tparam store whether to store the records + @param l log data source + @param if_exists if store: whether to check if the tablespace exists */ + template + inline parse_mtr_result parse(source &l, bool if_exists) noexcept; + + /** Rewind a mini-transaction when parse() runs out of memory. + @param l log data source + @param begin start of the mini-transaction */ template - inline parse_mtr_result parse(store_t store, source& l) noexcept; + ATTRIBUTE_COLD void rewind(source &l, source &begin) noexcept; + + /** Report progress in terms of LSN or pages remaining */ + ATTRIBUTE_COLD void report_progress() const; public: /** Parse and register one log_t::FORMAT_10_8 mini-transaction, handling log_sys.is_pmem() buffer wrap-around. - @param store whether to store the records */ - static parse_mtr_result parse_mtr(store_t store) noexcept; + @tparam store whether to store the records + @param if_exists if store: whether to check if the tablespace exists */ + template + static parse_mtr_result parse_mtr(bool if_exists) noexcept; /** Parse and register one log_t::FORMAT_10_8 mini-transaction, handling log_sys.is_pmem() buffer wrap-around. - @param store whether to store the records */ - static parse_mtr_result parse_pmem(store_t store) noexcept + @tparam store whether to store the records + @param if_exists if store: whether to check if the tablespace exists */ + template + static parse_mtr_result parse_pmem(bool if_exists) noexcept #ifdef HAVE_PMEM ; #else - { return parse_mtr(store); } + { return parse_mtr(if_exists); } #endif + /** Erase log records for a page. */ + void erase(map::iterator p); + /** Clear a fully processed set of stored redo log records. */ - inline void clear(); + void clear(); /** Determine whether redo log recovery progress should be reported. @param time the current time @return whether progress should be reported (the last report was at least 15 seconds ago) */ - bool report(time_t time) - { - if (time - progress_time < 15) - return false; - - progress_time= time; - return true; - } + bool report(time_t time); /** The alloc() memory alignment, in bytes */ static constexpr size_t ALIGNMENT= sizeof(size_t); - /** Allocate memory for log_rec_t - @param len allocation size, in bytes - @return pointer to len bytes of memory (never NULL) */ - inline void *alloc(size_t len); - /** Free a redo log snippet. - @param data buffer returned by alloc() */ + @param data buffer allocated in add() */ inline void free(const void *data); /** Remove records for a corrupted page. @@ -386,8 +415,6 @@ public: ATTRIBUTE_COLD void set_corrupt_fs(); /** Flag log file corruption during recovery. */ ATTRIBUTE_COLD void set_corrupt_log(); - /** Possibly finish a recovery batch. */ - inline void maybe_finish_batch(); /** @return whether data file corruption was found */ bool is_corrupt_fs() const { return UNIV_UNLIKELY(found_corrupt_fs); } @@ -405,13 +432,14 @@ public: } /** Try to recover a tablespace that was not readable earlier - @param p iterator, initially pointing to page_id_t{space_id,0}; - the records will be freed and the iterator advanced + @param p iterator @param name tablespace file name @param free_block spare buffer block - @return whether recovery failed */ - bool recover_deferred(map::iterator &p, const std::string &name, - buf_block_t *&free_block); + @return recovered tablespace + @retval nullptr if recovery failed */ + fil_space_t *recover_deferred(const map::iterator &p, + const std::string &name, + buf_block_t *&free_block); }; /** The recovery system */ diff --git a/storage/innobase/include/os0file.h b/storage/innobase/include/os0file.h index 13f9d3de3f8..54f7ceeb4c0 100644 --- a/storage/innobase/include/os0file.h +++ b/storage/innobase/include/os0file.h @@ -212,6 +212,10 @@ public: bool is_LRU() const { return (type & (WRITE_LRU ^ WRITE_ASYNC)) != 0; } bool is_async() const { return (type & (READ_SYNC ^ READ_ASYNC)) != 0; } + void write_complete() const; + void read_complete() const; + void fake_read_complete(os_offset_t offset) const; + /** If requested, free storage space associated with a section of the file. @param off byte offset from the start (SEEK_SET) @param len size of the hole in bytes @@ -1040,6 +1044,11 @@ int os_aio_init(); Frees the asynchronous io system. */ void os_aio_free(); +/** Submit a fake read request during crash recovery. +@param type fake read request +@param offset additional context */ +void os_fake_read(const IORequest &type, os_offset_t offset); + /** Request a read or write. @param type I/O request @param buf buffer diff --git a/storage/innobase/include/trx0purge.h b/storage/innobase/include/trx0purge.h index bf6f6eb8eff..79b5c294e2a 100644 --- a/storage/innobase/include/trx0purge.h +++ b/storage/innobase/include/trx0purge.h @@ -39,13 +39,20 @@ Remove the undo log segment from the rseg slot if it is too big for reuse. @param[in,out] mtr mini-transaction */ void trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr); + +/** +Remove unnecessary history data from rollback segments. NOTE that when this +function is called, the caller (purge_coordinator_callback) +must not have any latches on undo log pages! +*/ +void trx_purge_truncate_history(); + /** Run a purge batch. @param n_tasks number of purge tasks to submit to the queue @param history_size trx_sys.history_size() -@param truncate whether to truncate the history at the end of the batch @return number of undo log pages handled in the batch */ -ulint trx_purge(ulint n_tasks, ulint history_size, bool truncate); +ulint trx_purge(ulint n_tasks, ulint history_size); /** Rollback segements from a given transaction with trx-no scheduled for purge. */ @@ -285,6 +292,18 @@ public: typically via purge_sys_t::view_guard. */ return view.low_limit_no(); } + /** A wrapper around ReadView::sees(). */ + trx_id_t sees(trx_id_t id) const + { + /* This function may only be called by purge_coordinator_callback(). + + The purge coordinator task may call this without holding any latch, + because it is the only thread that may modify purge_sys.view. + + Any other threads that access purge_sys.view must hold purge_sys.latch, + typically via purge_sys_t::view_guard. */ + return view.sees(id); + } /** A wrapper around trx_sys_t::clone_oldest_view(). */ template void clone_oldest_view() diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h index 8adff09a3df..45ed1f4a0fd 100644 --- a/storage/innobase/include/trx0undo.h +++ b/storage/innobase/include/trx0undo.h @@ -216,14 +216,6 @@ buf_block_t* trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo, mtr_t *mtr, dberr_t *err) MY_ATTRIBUTE((nonnull, warn_unused_result)); -/******************************************************************//** -Sets the state of the undo log segment at a transaction finish. -@return undo log segment header page, x-latched */ -buf_block_t* -trx_undo_set_state_at_finish( -/*=========================*/ - trx_undo_t* undo, /*!< in: undo log memory copy */ - mtr_t* mtr); /*!< in: mtr */ /** Set the state of the undo log segment at a XA PREPARE or XA ROLLBACK. @param[in,out] trx transaction diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 3c7c3d348af..08547f169f3 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -5732,13 +5732,14 @@ static void lock_release_autoinc_locks(trx_t *trx) } /** Cancel a waiting lock request and release possibly waiting transactions */ -template +template void lock_cancel_waiting_and_release(lock_t *lock) { lock_sys.assert_locked(*lock); mysql_mutex_assert_owner(&lock_sys.wait_mutex); trx_t *trx= lock->trx; - trx->mutex_lock(); + if (inner_trx_lock) + trx->mutex_lock(); ut_d(const auto trx_state= trx->state); ut_ad(trx_state == TRX_STATE_COMMITTED_IN_MEMORY || trx_state == TRX_STATE_ACTIVE); @@ -5762,7 +5763,8 @@ void lock_cancel_waiting_and_release(lock_t *lock) lock_wait_end(trx); - trx->mutex_unlock(); + if (inner_trx_lock) + trx->mutex_unlock(); } void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx) @@ -5779,6 +5781,19 @@ void lock_sys_t::cancel_lock_wait_for_trx(trx_t *trx) mysql_mutex_unlock(&lock_sys.wait_mutex); } +#ifdef WITH_WSREP +void lock_sys_t::cancel_lock_wait_for_wsrep_bf_abort(trx_t *trx) +{ + lock_sys.assert_locked(); + mysql_mutex_assert_owner(&lock_sys.wait_mutex); + ut_ad(trx->mutex_is_owner()); + ut_ad(trx->state == TRX_STATE_ACTIVE || trx->state == TRX_STATE_PREPARED); + trx->lock.set_wsrep_victim(); + if (lock_t *lock= trx->lock.wait_lock) + lock_cancel_waiting_and_release(lock); +} +#endif /* WITH_WSREP */ + /** Cancel a waiting lock request. @tparam check_victim whether to check for DB_DEADLOCK @param trx active transaction diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 2d1c2a9419f..82d4ba3afde 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -726,7 +726,7 @@ static struct { retry: log_sys.latch.wr_unlock(); - bool fail= false; + fil_space_t *space= fil_system.sys_space; buf_block_t *free_block= buf_LRU_get_free_block(have_no_mutex); log_sys.latch.wr_lock(SRW_LOCK_CALL); mysql_mutex_lock(&recv_sys.mutex); @@ -743,11 +743,12 @@ retry: there were no buffered records. Either way, we must create a dummy tablespace with the latest known name, for dict_drop_index_tree(). */ + recv_sys.pages_it_invalidate(space_id); while (p != recv_sys.pages.end() && p->first.space() == space_id) { + ut_ad(!p->second.being_processed); recv_sys_t::map::iterator r= p++; - r->second.log.clear(); - recv_sys.pages.erase(r); + recv_sys.erase(r); } recv_spaces_t::iterator it{recv_spaces.find(space_id)}; if (it != recv_spaces.end()) @@ -770,11 +771,14 @@ retry: } } else - fail= recv_sys.recover_deferred(p, d->second.file_name, free_block); + space= recv_sys.recover_deferred(p, d->second.file_name, free_block); processed: - defers.erase(d++); - if (fail) + auto e= d++; + defers.erase(e); + if (!space) break; + if (space != fil_system.sys_space) + space->release(); if (free_block) continue; mysql_mutex_unlock(&recv_sys.mutex); @@ -785,7 +789,7 @@ processed: mysql_mutex_unlock(&recv_sys.mutex); if (free_block) buf_pool.free_block(free_block); - return fail; + return !space; } /** Create tablespace metadata for a data file that was initially @@ -893,28 +897,111 @@ free_space: } deferred_spaces; +/** Report an operation to create, delete, or rename a file during backup. +@param[in] space_id tablespace identifier +@param[in] type redo log type +@param[in] name file name (not NUL-terminated) +@param[in] len length of name, in bytes +@param[in] new_name new file name (NULL if not rename) +@param[in] new_len length of new_name, in bytes (0 if NULL) */ +void (*log_file_op)(uint32_t space_id, int type, + const byte* name, ulint len, + const byte* new_name, ulint new_len); + +void (*undo_space_trunc)(uint32_t space_id); + +void (*first_page_init)(uint32_t space_id); + +/** Information about initializing page contents during redo log processing. +FIXME: Rely on recv_sys.pages! */ +class mlog_init_t +{ + using map= std::map, + ut_allocator>>; + /** Map of page initialization operations. + FIXME: Merge this to recv_sys.pages! */ + map inits; + + /** Iterator to the last add() or will_avoid_read(), for speeding up + will_avoid_read(). */ + map::iterator i; +public: + /** Constructor */ + mlog_init_t() : i(inits.end()) {} + + /** Record that a page will be initialized by the redo log. + @param page_id page identifier + @param lsn log sequence number + @return whether the state was changed */ + bool add(const page_id_t page_id, lsn_t lsn) + { + mysql_mutex_assert_owner(&recv_sys.mutex); + std::pair p= + inits.emplace(map::value_type{page_id, lsn}); + if (p.second) return true; + if (p.first->second >= lsn) return false; + p.first->second= lsn; + i= p.first; + return true; + } + + /** Get the last initialization lsn of a page. + @param page_id page identifier + @return the latest page initialization; + not valid after releasing recv_sys.mutex. */ + lsn_t last(page_id_t page_id) + { + mysql_mutex_assert_owner(&recv_sys.mutex); + return inits.find(page_id)->second; + } + + /** Determine if a page will be initialized or freed after a time. + @param page_id page identifier + @param lsn log sequence number + @return whether page_id will be freed or initialized after lsn */ + bool will_avoid_read(page_id_t page_id, lsn_t lsn) + { + mysql_mutex_assert_owner(&recv_sys.mutex); + if (i != inits.end() && i->first == page_id) + return i->second > lsn; + i= inits.lower_bound(page_id); + return i != inits.end() && i->first == page_id && i->second > lsn; + } + + /** Clear the data structure */ + void clear() { inits.clear(); i= inits.end(); } +}; + +static mlog_init_t mlog_init; + /** Try to recover a tablespace that was not readable earlier -@param p iterator, initially pointing to page_id_t{space_id,0}; - the records will be freed and the iterator advanced +@param p iterator to the page @param name tablespace file name @param free_block spare buffer block -@return whether recovery failed */ -bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, - const std::string &name, - buf_block_t *&free_block) +@return recovered tablespace +@retval nullptr if recovery failed */ +fil_space_t *recv_sys_t::recover_deferred(const recv_sys_t::map::iterator &p, + const std::string &name, + buf_block_t *&free_block) { mysql_mutex_assert_owner(&mutex); - const page_id_t first{p->first}; - ut_ad(first.space()); + ut_ad(p->first.space()); - recv_spaces_t::iterator it{recv_spaces.find(first.space())}; + recv_spaces_t::iterator it{recv_spaces.find(p->first.space())}; ut_ad(it != recv_spaces.end()); - if (!first.page_no() && p->second.state == page_recv_t::RECV_WILL_NOT_READ) + if (!p->first.page_no() && p->second.skip_read) { mtr_t mtr; - buf_block_t *block= recover_low(first, p, mtr, free_block); + ut_ad(!p->second.being_processed); + p->second.being_processed= 1; + lsn_t init_lsn= mlog_init.last(p->first); + mysql_mutex_unlock(&mutex); + buf_block_t *block= recover_low(p, mtr, free_block, init_lsn); + mysql_mutex_lock(&mutex); + p->second.being_processed= -1; ut_ad(block == free_block || block == reinterpret_cast(-1)); free_block= nullptr; if (UNIV_UNLIKELY(!block || block == reinterpret_cast(-1))) @@ -927,10 +1014,7 @@ bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, const uint32_t page_no= mach_read_from_4(page + FIL_PAGE_OFFSET); const uint32_t size= fsp_header_get_field(page, FSP_SIZE); - ut_ad(it != recv_spaces.end()); - - if (page_id_t{space_id, page_no} == first && size >= 4 && - it != recv_spaces.end() && + if (page_id_t{space_id, page_no} == p->first && size >= 4 && fil_space_t::is_valid_flags(flags, space_id) && fil_space_t::logical_size(flags) == srv_page_size) { @@ -984,10 +1068,10 @@ bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, } size_set: node->deferred= false; - space->release(); it->second.space= space; block->page.lock.x_unlock(); - return false; + p->second.being_processed= -1; + return space; } release_and_fail: @@ -995,106 +1079,34 @@ bool recv_sys_t::recover_deferred(recv_sys_t::map::iterator &p, } fail: - ib::error() << "Cannot apply log to " << first + ib::error() << "Cannot apply log to " << p->first << " of corrupted file '" << name << "'"; - return true; + return nullptr; } -/** Report an operation to create, delete, or rename a file during backup. -@param[in] space_id tablespace identifier -@param[in] type redo log type -@param[in] name file name (not NUL-terminated) -@param[in] len length of name, in bytes -@param[in] new_name new file name (NULL if not rename) -@param[in] new_len length of new_name, in bytes (0 if NULL) */ -void (*log_file_op)(uint32_t space_id, int type, - const byte* name, ulint len, - const byte* new_name, ulint new_len); - -void (*undo_space_trunc)(uint32_t space_id); - -void (*first_page_init)(uint32_t space_id); - -/** Information about initializing page contents during redo log processing. -FIXME: Rely on recv_sys.pages! */ -class mlog_init_t -{ -public: - /** log sequence number of the page initialization */ - lsn_t lsn; - -private: - typedef std::map, - ut_allocator > > - map; - /** Map of page initialization operations. - FIXME: Merge this to recv_sys.pages! */ - map inits; -public: - /** Record that a page will be initialized by the redo log. - @param[in] page_id page identifier - @param[in] lsn log sequence number - @return whether the state was changed */ - bool add(const page_id_t page_id, lsn_t lsn) - { - mysql_mutex_assert_owner(&recv_sys.mutex); - std::pair p = inits.emplace( - map::value_type(page_id, lsn)); - if (p.second) return true; - if (p.first->second >= lsn) return false; - p.first->second = lsn; - return true; - } - - /** Get the last stored lsn of the page id and its respective - init/load operation. - @param[in] page_id page id - @param[in,out] init initialize log or load log - @return the latest page initialization; - not valid after releasing recv_sys.mutex. */ - lsn_t last(page_id_t page_id) - { - mysql_mutex_assert_owner(&recv_sys.mutex); - return inits.find(page_id)->second; - } - - /** Determine if a page will be initialized or freed after a time. - @param page_id page identifier - @param lsn log sequence number - @return whether page_id will be freed or initialized after lsn */ - bool will_avoid_read(page_id_t page_id, lsn_t lsn) const - { - mysql_mutex_assert_owner(&recv_sys.mutex); - auto i= inits.find(page_id); - return i != inits.end() && i->second > lsn; - } - - /** Clear the data structure */ - void clear() { inits.clear(); } -}; - -static mlog_init_t mlog_init; - /** Process a record that indicates that a tablespace is being shrunk in size. @param page_id first page identifier that is not in the file @param lsn log sequence number of the shrink operation */ inline void recv_sys_t::trim(const page_id_t page_id, lsn_t lsn) { - DBUG_ENTER("recv_sys_t::trim"); - DBUG_LOG("ib_log", - "discarding log beyond end of tablespace " - << page_id << " before LSN " << lsn); - mysql_mutex_assert_owner(&mutex); - for (recv_sys_t::map::iterator p = pages.lower_bound(page_id); - p != pages.end() && p->first.space() == page_id.space();) { - recv_sys_t::map::iterator r = p++; - if (r->second.trim(lsn)) { - pages.erase(r); - } - } - DBUG_VOID_RETURN; + DBUG_ENTER("recv_sys_t::trim"); + DBUG_LOG("ib_log", "discarding log beyond end of tablespace " + << page_id << " before LSN " << lsn); + mysql_mutex_assert_owner(&mutex); + if (pages_it != pages.end() && pages_it->first.space() == page_id.space()) + pages_it= pages.end(); + for (recv_sys_t::map::iterator p = pages.lower_bound(page_id); + p != pages.end() && p->first.space() == page_id.space();) + { + recv_sys_t::map::iterator r = p++; + if (r->second.trim(lsn)) + { + ut_ad(!r->second.being_processed); + pages.erase(r); + } + } + DBUG_VOID_RETURN; } inline void recv_sys_t::read(os_offset_t total_offset, span buf) @@ -1117,15 +1129,10 @@ inline size_t recv_sys_t::files_size() @param[in] space_id the tablespace ID @param[in] ftype FILE_MODIFY, FILE_DELETE, or FILE_RENAME @param[in] lsn lsn of the redo log -@param[in] store whether the redo log has to be stored */ +@param[in] if_exists whether to check if the tablespace exists */ static void fil_name_process(const char *name, ulint len, uint32_t space_id, - mfile_type_t ftype, lsn_t lsn, store_t store) + mfile_type_t ftype, lsn_t lsn, bool if_exists) { - if (srv_operation == SRV_OPERATION_BACKUP - || srv_operation == SRV_OPERATION_BACKUP_NO_DEFER) { - return; - } - ut_ad(srv_operation <= SRV_OPERATION_EXPORT_RESTORED || srv_operation == SRV_OPERATION_RESTORE || srv_operation == SRV_OPERATION_RESTORE_EXPORT); @@ -1236,7 +1243,7 @@ same_space: case FIL_LOAD_DEFER: /** Skip the deferred spaces when lsn is already processed */ - if (store != store_t::STORE_IF_EXISTS) { + if (!if_exists) { deferred_spaces.add( space_id, fname.name.c_str(), lsn); } @@ -1279,9 +1286,8 @@ void recv_sys_t::close() deferred_spaces.clear(); ut_d(mysql_mutex_unlock(&mutex)); - last_stored_lsn= 0; + scanned_lsn= 0; mysql_mutex_destroy(&mutex); - pthread_cond_destroy(&cond); } recv_spaces.clear(); @@ -1296,34 +1302,34 @@ void recv_sys_t::create() ut_ad(this == &recv_sys); ut_ad(!is_initialised()); mysql_mutex_init(recv_sys_mutex_key, &mutex, nullptr); - pthread_cond_init(&cond, nullptr); apply_log_recs = false; - apply_batch_on = false; len = 0; offset = 0; lsn = 0; + scanned_lsn = 1; found_corrupt_log = false; found_corrupt_fs = false; file_checkpoint = 0; progress_time = time(NULL); + ut_ad(pages.empty()); + pages_it = pages.end(); recv_max_page_lsn = 0; memset(truncated_undo_spaces, 0, sizeof truncated_undo_spaces); - last_stored_lsn = 1; UT_LIST_INIT(blocks, &buf_block_t::unzip_LRU); } /** Clear a fully processed set of stored redo log records. */ -inline void recv_sys_t::clear() +void recv_sys_t::clear() { mysql_mutex_assert_owner(&mutex); apply_log_recs= false; - apply_batch_on= false; ut_ad(!after_apply || found_corrupt_fs || !UT_LIST_GET_LAST(blocks)); pages.clear(); + pages_it= pages.end(); for (buf_block_t *block= UT_LIST_GET_LAST(blocks); block; ) { @@ -1334,8 +1340,6 @@ inline void recv_sys_t::clear() buf_block_free(block); block= prev_block; } - - pthread_cond_broadcast(&cond); } /** Free most recovery data structures. */ @@ -1347,52 +1351,14 @@ void recv_sys_t::debug_free() recovery_on= false; pages.clear(); + pages_it= pages.end(); mysql_mutex_unlock(&mutex); } -inline void *recv_sys_t::alloc(size_t len) -{ - mysql_mutex_assert_owner(&mutex); - ut_ad(len); - ut_ad(len <= srv_page_size); - - buf_block_t *block= UT_LIST_GET_FIRST(blocks); - if (UNIV_UNLIKELY(!block)) - { -create_block: - block= buf_block_alloc(); - block->page.access_time= 1U << 16 | - ut_calc_align(static_cast(len), ALIGNMENT); - static_assert(ut_is_2pow(ALIGNMENT), "ALIGNMENT must be a power of 2"); - UT_LIST_ADD_FIRST(blocks, block); - MEM_MAKE_ADDRESSABLE(block->page.frame, len); - MEM_NOACCESS(block->page.frame + len, srv_page_size - len); - return my_assume_aligned(block->page.frame); - } - - size_t free_offset= static_cast(block->page.access_time); - ut_ad(!ut_2pow_remainder(free_offset, ALIGNMENT)); - if (UNIV_UNLIKELY(!free_offset)) - { - ut_ad(srv_page_size == 65536); - goto create_block; - } - ut_ad(free_offset <= srv_page_size); - free_offset+= len; - - if (free_offset > srv_page_size) - goto create_block; - - block->page.access_time= ((block->page.access_time >> 16) + 1) << 16 | - ut_calc_align(static_cast(free_offset), ALIGNMENT); - MEM_MAKE_ADDRESSABLE(block->page.frame + free_offset - len, len); - return my_assume_aligned(block->page.frame + free_offset - len); -} - /** Free a redo log snippet. -@param data buffer returned by alloc() */ +@param data buffer allocated in add() */ inline void recv_sys_t::free(const void *data) { ut_ad(!ut_align_offset(data, ALIGNMENT)); @@ -1417,8 +1383,11 @@ inline void recv_sys_t::free(const void *data) ut_ad(block->page.state() == buf_page_t::MEMORY); ut_ad(static_cast(block->page.access_time - 1) < srv_page_size); - ut_ad(block->page.access_time >= 1U << 16); - if (!((block->page.access_time -= 1U << 16) >> 16)) + unsigned a= block->page.access_time; + ut_ad(a >= 1U << 16); + a-= 1U << 16; + block->page.access_time= a; + if (!(a >> 16)) { UT_LIST_REMOVE(blocks, block); MEM_MAKE_ADDRESSABLE(block->page.frame, srv_page_size); @@ -1604,6 +1573,9 @@ dberr_t recv_sys_t::find_checkpoint() bool wrong_size= false; byte *buf; + ut_ad(pages.empty()); + pages_it= pages.end(); + if (files.empty()) { file_checkpoint= 0; @@ -1880,7 +1852,31 @@ inline bool page_recv_t::trim(lsn_t start_lsn) } -inline void page_recv_t::recs_t::clear() +void page_recv_t::recs_t::rewind(lsn_t start_lsn) +{ + mysql_mutex_assert_owner(&recv_sys.mutex); + log_phys_t *trim= static_cast(head); + ut_ad(trim); + while (log_phys_t *next= static_cast(trim->next)) + { + ut_ad(trim->start_lsn < start_lsn); + if (next->start_lsn == start_lsn) + break; + trim= next; + } + tail= trim; + log_rec_t *l= tail->next; + tail->next= nullptr; + while (l) + { + log_rec_t *next= l->next; + recv_sys.free(l); + l= next; + } +} + + +void page_recv_t::recs_t::clear() { mysql_mutex_assert_owner(&recv_sys.mutex); for (const log_rec_t *l= head; l; ) @@ -1892,33 +1888,99 @@ inline void page_recv_t::recs_t::clear() head= tail= nullptr; } - /** Ignore any earlier redo log records for this page. */ inline void page_recv_t::will_not_read() { - ut_ad(state == RECV_NOT_PROCESSED || state == RECV_WILL_NOT_READ); - state= RECV_WILL_NOT_READ; + ut_ad(!being_processed); + skip_read= true; log.clear(); } +void recv_sys_t::erase(map::iterator p) +{ + ut_ad(p->second.being_processed <= 0); + p->second.log.clear(); + pages.erase(p); +} + +/** Free log for processed pages. */ +void recv_sys_t::garbage_collect() +{ + mysql_mutex_assert_owner(&mutex); + + if (pages_it != pages.end() && pages_it->second.being_processed < 0) + pages_it= pages.end(); + + for (map::iterator p= pages.begin(); p != pages.end(); ) + { + if (p->second.being_processed < 0) + { + map::iterator r= p++; + erase(r); + } + else + p++; + } +} + +/** Allocate a block from the buffer pool for recv_sys.pages */ +ATTRIBUTE_COLD buf_block_t *recv_sys_t::add_block() +{ + for (bool freed= false;;) + { + const auto rs= UT_LIST_GET_LEN(blocks) * 2; + mysql_mutex_lock(&buf_pool.mutex); + const auto bs= + UT_LIST_GET_LEN(buf_pool.free) + UT_LIST_GET_LEN(buf_pool.LRU); + if (UNIV_LIKELY(bs > BUF_LRU_MIN_LEN || rs < bs)) + { + buf_block_t *block= buf_LRU_get_free_block(have_mutex); + mysql_mutex_unlock(&buf_pool.mutex); + return block; + } + /* out of memory: redo log occupies more than 1/3 of buf_pool + and there are fewer than BUF_LRU_MIN_LEN pages left */ + mysql_mutex_unlock(&buf_pool.mutex); + if (freed) + return nullptr; + freed= true; + garbage_collect(); + } +} + +/** Wait for buffer pool to become available. */ +ATTRIBUTE_COLD void recv_sys_t::wait_for_pool(size_t pages) +{ + mysql_mutex_unlock(&mutex); + os_aio_wait_until_no_pending_reads(false); + mysql_mutex_lock(&mutex); + garbage_collect(); + mysql_mutex_lock(&buf_pool.mutex); + bool need_more= UT_LIST_GET_LEN(buf_pool.free) < pages; + mysql_mutex_unlock(&buf_pool.mutex); + if (need_more) + buf_flush_sync_batch(lsn); +} /** Register a redo log snippet for a page. @param it page iterator @param start_lsn start LSN of the mini-transaction @param lsn @see mtr_t::commit_lsn() @param l redo log snippet -@param len length of l, in bytes */ -inline void recv_sys_t::add(map::iterator it, lsn_t start_lsn, lsn_t lsn, - const byte *l, size_t len) +@param len length of l, in bytes +@return whether we ran out of memory */ +ATTRIBUTE_NOINLINE +bool recv_sys_t::add(map::iterator it, lsn_t start_lsn, lsn_t lsn, + const byte *l, size_t len) { mysql_mutex_assert_owner(&mutex); - page_id_t page_id = it->first; page_recv_t &recs= it->second; + buf_block_t *block; switch (*l & 0x70) { case FREE_PAGE: case INIT_PAGE: recs.will_not_read(); - mlog_init.add(page_id, start_lsn); /* FIXME: remove this! */ + mlog_init.add(it->first, start_lsn); /* FIXME: remove this! */ /* fall through */ default: log_phys_t *tail= static_cast(recs.log.last()); @@ -1927,7 +1989,7 @@ inline void recv_sys_t::add(map::iterator it, lsn_t start_lsn, lsn_t lsn, if (tail->start_lsn != start_lsn) break; ut_ad(tail->lsn == lsn); - buf_block_t *block= UT_LIST_GET_LAST(blocks); + block= UT_LIST_GET_LAST(blocks); ut_ad(block); const size_t used= static_cast(block->page.access_time - 1) + 1; ut_ad(used >= ALIGNMENT); @@ -1940,7 +2002,7 @@ append: MEM_MAKE_ADDRESSABLE(end + 1, len); /* Append to the preceding record for the page */ tail->append(l, len); - return; + return false; } if (end <= &block->page.frame[used - ALIGNMENT] || &block->page.frame[used] >= end) @@ -1954,8 +2016,49 @@ append: ut_calc_align(static_cast(new_used), ALIGNMENT); goto append; } - recs.log.append(new (alloc(log_phys_t::alloc_size(len))) + + const size_t size{log_phys_t::alloc_size(len)}; + ut_ad(size <= srv_page_size); + void *buf; + block= UT_LIST_GET_FIRST(blocks); + if (UNIV_UNLIKELY(!block)) + { + create_block: + block= add_block(); + if (UNIV_UNLIKELY(!block)) + return true; + block->page.access_time= 1U << 16 | + ut_calc_align(static_cast(size), ALIGNMENT); + static_assert(ut_is_2pow(ALIGNMENT), "ALIGNMENT must be a power of 2"); + UT_LIST_ADD_FIRST(blocks, block); + MEM_MAKE_ADDRESSABLE(block->page.frame, size); + MEM_NOACCESS(block->page.frame + size, srv_page_size - size); + buf= block->page.frame; + } + else + { + size_t free_offset= static_cast(block->page.access_time); + ut_ad(!ut_2pow_remainder(free_offset, ALIGNMENT)); + if (UNIV_UNLIKELY(!free_offset)) + { + ut_ad(srv_page_size == 65536); + goto create_block; + } + ut_ad(free_offset <= srv_page_size); + free_offset+= size; + + if (free_offset > srv_page_size) + goto create_block; + + block->page.access_time= ((block->page.access_time >> 16) + 1) << 16 | + ut_calc_align(static_cast(free_offset), ALIGNMENT); + MEM_MAKE_ADDRESSABLE(block->page.frame + free_offset - size, size); + buf= block->page.frame + free_offset - size; + } + + recs.log.append(new (my_assume_aligned(buf)) log_phys_t{start_lsn, lsn, l, len}); + return false; } /** Store/remove the freed pages in fil_name_t of recv_spaces. @@ -2219,13 +2322,84 @@ struct recv_ring : public recv_buf }; #endif -/** Parse and register one log_t::FORMAT_10_8 mini-transaction. -@param store whether to store the records -@param l log data source */ template -inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) +void recv_sys_t::rewind(source &l, source &begin) noexcept +{ + ut_ad(srv_operation != SRV_OPERATION_BACKUP); + mysql_mutex_assert_owner(&mutex); + + const source end= l; + uint32_t rlen; + for (l= begin; !(l == end); l+= rlen) + { + const source recs{l}; + ++l; + const byte b= *recs; + + ut_ad(b > 1); + ut_ad(UNIV_LIKELY((b & 0x70) != RESERVED) || srv_force_recovery); + + rlen= b & 0xf; + if (!rlen) + { + const uint32_t lenlen= mlog_decode_varint_length(*l); + const uint32_t addlen= mlog_decode_varint(l); + ut_ad(addlen != MLOG_DECODE_ERROR); + rlen= addlen + 15 - lenlen; + l+= lenlen; + } + ut_ad(!l.is_eof(rlen)); + if (b & 0x80) + continue; + + uint32_t idlen= mlog_decode_varint_length(*l); + if (UNIV_UNLIKELY(idlen > 5 || idlen >= rlen)) + continue; + const uint32_t space_id= mlog_decode_varint(l); + if (UNIV_UNLIKELY(space_id == MLOG_DECODE_ERROR)) + continue; + l+= idlen; + rlen-= idlen; + idlen= mlog_decode_varint_length(*l); + if (UNIV_UNLIKELY(idlen > 5 || idlen > rlen)) + continue; + const uint32_t page_no= mlog_decode_varint(l); + if (UNIV_UNLIKELY(page_no == MLOG_DECODE_ERROR)) + continue; + const page_id_t id{space_id, page_no}; + if (pages_it == pages.end() || pages_it->first != id) + { + pages_it= pages.find(id); + if (pages_it == pages.end()) + continue; + } + + ut_ad(!pages_it->second.being_processed); + const log_phys_t *head= + static_cast(*pages_it->second.log.begin()); + if (!head || head->start_lsn == lsn) + { + erase(pages_it); + pages_it= pages.end(); + } + else + pages_it->second.log.rewind(lsn); + } + + l= begin; + pages_it= pages.end(); +} + +/** Parse and register one log_t::FORMAT_10_8 mini-transaction. +@tparam store whether to store the records +@param l log data source +@param if_exists if store: whether to check if the tablespace exists */ +template +inline +recv_sys_t::parse_mtr_result recv_sys_t::parse(source &l, bool if_exists) noexcept { +restart: #ifndef SUX_LOCK_GENERIC ut_ad(log_sys.latch.is_write_locked() || srv_operation == SRV_OPERATION_BACKUP || @@ -2234,12 +2408,15 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) mysql_mutex_assert_owner(&mutex); ut_ad(log_sys.next_checkpoint_lsn); ut_ad(log_sys.is_latest()); + ut_ad(store || !if_exists); + ut_ad(store || + srv_operation != SRV_OPERATION_BACKUP || + srv_operation != SRV_OPERATION_BACKUP_NO_DEFER); alignas(8) byte iv[MY_AES_BLOCK_SIZE]; byte *decrypt_buf= static_cast(alloca(srv_page_size)); const lsn_t start_lsn{lsn}; - map::iterator cached_pages_it{pages.end()}; /* Check that the entire mini-transaction is included within the buffer */ if (l.is_eof(0)) @@ -2248,7 +2425,7 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) if (*l <= 1) return GOT_EOF; /* We should never write an empty mini-transaction. */ - const source begin{l}; + source begin{l}; uint32_t rlen; for (uint32_t total_len= 0; !l.is_eof(); l+= rlen, total_len+= rlen) { @@ -2348,7 +2525,6 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) sql_print_error("InnoDB: Unknown log record at LSN " LSN_PF, lsn); corrupted: found_corrupt_log= true; - pthread_cond_broadcast(&cond); return GOT_EOF; } @@ -2425,13 +2601,13 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) mach_write_to_4(iv + 12, page_no); got_page_op= !(b & 0x80); if (!got_page_op); - else if (srv_operation == SRV_OPERATION_BACKUP) + else if (!store && srv_operation == SRV_OPERATION_BACKUP) { if (page_no == 0 && first_page_init && (b & 0x10)) first_page_init(space_id); continue; } - else if (file_checkpoint && !is_predefined_tablespace(space_id)) + else if (store && file_checkpoint && !is_predefined_tablespace(space_id)) { recv_spaces_t::iterator i= recv_spaces.lower_bound(space_id); if (i != recv_spaces.end() && i->first == space_id); @@ -2500,7 +2676,7 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) trim({space_id, 0}, lsn); truncated_undo_spaces[space_id - srv_undo_space_id_start]= { lsn, page_no }; - if (undo_space_trunc) + if (!store && undo_space_trunc) undo_space_trunc(space_id); #endif last_offset= 1; /* the next record must not be same_page */ @@ -2541,7 +2717,7 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) { if (UNIV_UNLIKELY(rlen + last_offset > srv_page_size)) goto record_corrupted; - if (UNIV_UNLIKELY(!page_no) && file_checkpoint) + if (store && UNIV_UNLIKELY(!page_no) && file_checkpoint) { const bool has_size= last_offset <= FSP_HEADER_OFFSET + FSP_SIZE && last_offset + rlen >= FSP_HEADER_OFFSET + FSP_SIZE + 4; @@ -2620,38 +2796,57 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) ut_ad(modified.emplace(id).second || (b & 0x70) != INIT_PAGE); } #endif - const bool is_init= (b & 0x70) <= INIT_PAGE; - switch (store) { - case STORE_IF_EXISTS: - if (fil_space_t *space= fil_space_t::get(space_id)) + if (store) + { + if (if_exists) { - const auto size= space->get_size(); - space->release(); - if (!size) + if (fil_space_t *space= fil_space_t::get(space_id)) + { + const auto size= space->get_size(); + space->release(); + if (!size) + continue; + } + else if (!deferred_spaces.find(space_id)) continue; } - else if (!deferred_spaces.find(space_id)) - continue; - /* fall through */ - case STORE_YES: if (!mlog_init.will_avoid_read(id, start_lsn)) { - if (cached_pages_it == pages.end() || - cached_pages_it->first != id) - cached_pages_it= pages.emplace(id, page_recv_t{}).first; - add(cached_pages_it, start_lsn, lsn, - l.get_buf(cl, recs, decrypt_buf), l - recs + rlen); + if (pages_it == pages.end() || pages_it->first != id) + pages_it= pages.emplace(id, page_recv_t{}).first; + if (UNIV_UNLIKELY(add(pages_it, start_lsn, lsn, + l.get_buf(cl, recs, decrypt_buf), + l - recs + rlen))) + { + lsn= start_lsn; + log_sys.set_recovered_lsn(start_lsn); + l+= rlen; + offset= begin.ptr - log_sys.buf; + rewind(l, begin); + if (if_exists) + { + apply(false); + if (is_corrupt_fs()) + return GOT_EOF; + goto restart; + } + sql_print_information("InnoDB: Multi-batch recovery needed at LSN " + LSN_PF, lsn); + return GOT_OOM; + } } - continue; - case STORE_NO: - if (!is_init) - continue; + } + else if ((b & 0x70) <= INIT_PAGE) + { mlog_init.add(id, start_lsn); - map::iterator i= pages.find(id); - if (i == pages.end()) - continue; - i->second.log.clear(); - pages.erase(i); + if (pages_it == pages.end() || pages_it->first != id) + { + pages_it= pages.find(id); + if (pages_it == pages.end()) + continue; + } + map::iterator r= pages_it++; + erase(r); } } else if (rlen) @@ -2664,6 +2859,11 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) if (rlen < UNIV_PAGE_SIZE_MAX && !l.is_zero(rlen)) continue; } + else if (store) + { + ut_ad(file_checkpoint); + continue; + } else if (const lsn_t c= l.read8()) { if (UNIV_UNLIKELY(srv_print_verbose_log == 2)) @@ -2745,21 +2945,27 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) if (UNIV_UNLIKELY(!recv_needed_recovery && srv_read_only_mode)) continue; + if (!store && + (srv_operation == SRV_OPERATION_BACKUP || + srv_operation == SRV_OPERATION_BACKUP_NO_DEFER)) + { + if ((b & 0xf0) < FILE_CHECKPOINT && log_file_op) + log_file_op(space_id, b & 0xf0, + reinterpret_cast(fn), + static_cast(fnend - fn), + reinterpret_cast(fn2), + fn2 ? static_cast(fn2end - fn2) : 0); + continue; + } + fil_name_process(fn, fnend - fn, space_id, (b & 0xf0) == FILE_DELETE ? FILE_DELETE : FILE_MODIFY, - start_lsn, store); - - if ((b & 0xf0) < FILE_CHECKPOINT && log_file_op) - log_file_op(space_id, b & 0xf0, - reinterpret_cast(fn), - static_cast(fnend - fn), - reinterpret_cast(fn2), - fn2 ? static_cast(fn2end - fn2) : 0); + start_lsn, if_exists); if (fn2) { fil_name_process(fn2, fn2end - fn2, space_id, - FILE_RENAME, start_lsn, store); + FILE_RENAME, start_lsn, if_exists); if (file_checkpoint) { const size_t len= fn2end - fn2; @@ -2783,18 +2989,23 @@ inline recv_sys_t::parse_mtr_result recv_sys_t::parse(store_t store, source &l) return OK; } -ATTRIBUTE_NOINLINE -recv_sys_t::parse_mtr_result recv_sys_t::parse_mtr(store_t store) noexcept +template +recv_sys_t::parse_mtr_result recv_sys_t::parse_mtr(bool if_exists) noexcept { recv_buf s{&log_sys.buf[recv_sys.offset]}; - return recv_sys.parse(store, s); + return recv_sys.parse(s, if_exists); } +/** for mariadb-backup; @see xtrabackup_copy_logfile() */ +template +recv_sys_t::parse_mtr_result recv_sys_t::parse_mtr(bool) noexcept; + #ifdef HAVE_PMEM -recv_sys_t::parse_mtr_result recv_sys_t::parse_pmem(store_t store) noexcept +template +recv_sys_t::parse_mtr_result recv_sys_t::parse_pmem(bool if_exists) noexcept { - recv_sys_t::parse_mtr_result r{parse_mtr(store)}; - if (r != PREMATURE_EOF || !log_sys.is_pmem()) + recv_sys_t::parse_mtr_result r{parse_mtr(if_exists)}; + if (UNIV_LIKELY(r != PREMATURE_EOF) || !log_sys.is_pmem()) return r; ut_ad(recv_sys.len == log_sys.file_size); ut_ad(recv_sys.offset >= log_sys.START_OFFSET); @@ -2803,7 +3014,7 @@ recv_sys_t::parse_mtr_result recv_sys_t::parse_pmem(store_t store) noexcept {recv_sys.offset == recv_sys.len ? &log_sys.buf[log_sys.START_OFFSET] : &log_sys.buf[recv_sys.offset]}; - return recv_sys.parse(store, s); + return recv_sys.parse(s, if_exists); } #endif @@ -2811,21 +3022,19 @@ recv_sys_t::parse_mtr_result recv_sys_t::parse_pmem(store_t store) noexcept lsn of a log record. @param[in,out] block buffer pool page @param[in,out] mtr mini-transaction -@param[in,out] p recovery address +@param[in,out] recs log records to apply @param[in,out] space tablespace, or NULL if not looked up yet @param[in,out] init_lsn page initialization LSN, or 0 @return the recovered page @retval nullptr on failure */ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, - const recv_sys_t::map::iterator &p, - fil_space_t *space= nullptr, - lsn_t init_lsn= 0) + page_recv_t &recs, + fil_space_t *space, lsn_t init_lsn = 0) { - mysql_mutex_assert_owner(&recv_sys.mutex); + mysql_mutex_assert_not_owner(&recv_sys.mutex); ut_ad(recv_sys.apply_log_recs); ut_ad(recv_needed_recovery); - ut_ad(block->page.id() == p->first); - ut_ad(!p->second.is_being_processed()); + ut_ad(recs.being_processed == 1); ut_ad(!space || space->id == block->page.id().space()); ut_ad(log_sys.is_latest()); @@ -2837,10 +3046,6 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, block->page.id().space(), block->page.id().page_no())); - p->second.state = page_recv_t::RECV_BEING_PROCESSED; - - mysql_mutex_unlock(&recv_sys.mutex); - byte *frame = UNIV_LIKELY_NULL(block->page.zip.data) ? block->page.zip.data : block->page.frame; @@ -2853,7 +3058,7 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, bool skipped_after_init = false; - for (const log_rec_t* recv : p->second.log) { + for (const log_rec_t* recv : recs.log) { const log_phys_t* l = static_cast(recv); ut_ad(l->lsn); ut_ad(end_lsn <= l->lsn); @@ -2911,8 +3116,7 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, block->page.id().space(), block->page.id().page_no())); - log_phys_t::apply_status a= l->apply(*block, - p->second.last_offset); + log_phys_t::apply_status a= l->apply(*block, recs.last_offset); switch (a) { case log_phys_t::APPLIED_NO: @@ -2977,12 +3181,6 @@ static buf_block_t *recv_recover_page(buf_block_t *block, mtr_t &mtr, set_start_lsn: if ((a == log_phys_t::APPLIED_CORRUPTED || recv_sys.is_corrupt_log()) && !srv_force_recovery) { - if (init_lsn) { - if (space || block->page.id().page_no()) { - block->page.lock.x_lock_recursive(); - } - } - mtr.discard_modifications(); mtr.commit(); @@ -3031,26 +3229,11 @@ set_start_lsn: mtr.commit(); done: - time_t now = time(NULL); - - mysql_mutex_lock(&recv_sys.mutex); - + /* FIXME: do this in page read, protected with recv_sys.mutex! */ if (recv_max_page_lsn < page_lsn) { recv_max_page_lsn = page_lsn; } - ut_ad(!block || p->second.is_being_processed()); - ut_ad(!block || !recv_sys.pages.empty()); - - if (recv_sys.report(now)) { - const size_t n = recv_sys.pages.size(); - sql_print_information("InnoDB: To recover: %zu pages from log", - n); - service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, - "To recover: %zu pages" - " from log", n); - } - return block; } @@ -3064,150 +3247,344 @@ ATTRIBUTE_COLD void recv_sys_t::free_corrupted_page(page_id_t page_id) mysql_mutex_lock(&mutex); map::iterator p= pages.find(page_id); - if (p != pages.end()) + if (p == pages.end()) { - p->second.log.clear(); - pages.erase(p); - if (!srv_force_recovery) - { - set_corrupt_fs(); - ib::error() << "Unable to apply log to corrupted page " << page_id - << "; set innodb_force_recovery to ignore"; - } - else - ib::warn() << "Discarding log for corrupted page " << page_id; + mysql_mutex_unlock(&mutex); + return; } - if (pages.empty()) - pthread_cond_broadcast(&cond); + p->second.being_processed= -1; + if (!srv_force_recovery) + set_corrupt_fs(); mysql_mutex_unlock(&mutex); -} -/** Possibly finish a recovery batch. */ -inline void recv_sys_t::maybe_finish_batch() -{ - mysql_mutex_assert_owner(&mutex); - ut_ad(recovery_on); - if (!apply_batch_on || pages.empty() || is_corrupt_log() || is_corrupt_fs()) - pthread_cond_broadcast(&cond); + ib::error_or_warn(!srv_force_recovery) + << "Unable to apply log to corrupted page " << page_id; } ATTRIBUTE_COLD void recv_sys_t::set_corrupt_log() { mysql_mutex_lock(&mutex); found_corrupt_log= true; - pthread_cond_broadcast(&cond); mysql_mutex_unlock(&mutex); } ATTRIBUTE_COLD void recv_sys_t::set_corrupt_fs() { mysql_mutex_assert_owner(&mutex); + if (!srv_force_recovery) + sql_print_information("InnoDB: Set innodb_force_recovery=1" + " to ignore corrupted pages."); found_corrupt_fs= true; - pthread_cond_broadcast(&cond); } -/** Apply any buffered redo log to a page that was just read from a data file. -@param[in,out] space tablespace -@param[in,out] bpage buffer pool page +/** Apply any buffered redo log to a page. +@param space tablespace +@param bpage buffer pool page @return whether the page was recovered correctly */ bool recv_recover_page(fil_space_t* space, buf_page_t* bpage) { - mtr_t mtr; - mtr.start(); - mtr.set_log_mode(MTR_LOG_NO_REDO); + mtr_t mtr; + mtr.start(); + mtr.set_log_mode(MTR_LOG_NO_REDO); - ut_ad(bpage->frame); - /* Move the ownership of the x-latch on the page to - this OS thread, so that we can acquire a second - x-latch on it. This is needed for the operations to - the page to pass the debug checks. */ - bpage->lock.claim_ownership(); - bpage->lock.x_lock_recursive(); - bpage->fix_on_recovery(); - mtr.memo_push(reinterpret_cast(bpage), - MTR_MEMO_PAGE_X_FIX); + ut_ad(bpage->frame); + /* Move the ownership of the x-latch on the page to this OS thread, + so that we can acquire a second x-latch on it. This is needed for + the operations to the page to pass the debug checks. */ + bpage->lock.claim_ownership(); + bpage->lock.x_lock_recursive(); + bpage->fix_on_recovery(); + mtr.memo_push(reinterpret_cast(bpage), MTR_MEMO_PAGE_X_FIX); - buf_block_t* success = reinterpret_cast(bpage); + buf_block_t *success= reinterpret_cast(bpage); - mysql_mutex_lock(&recv_sys.mutex); - if (recv_sys.apply_log_recs) { - recv_sys_t::map::iterator p = recv_sys.pages.find(bpage->id()); - if (p != recv_sys.pages.end() - && !p->second.is_being_processed()) { - success = recv_recover_page(success, mtr, p, space); - if (UNIV_LIKELY(!!success)) { - p->second.log.clear(); - recv_sys.pages.erase(p); - } - recv_sys.maybe_finish_batch(); - goto func_exit; - } - } - - mtr.commit(); -func_exit: - mysql_mutex_unlock(&recv_sys.mutex); - ut_ad(mtr.has_committed()); - return success; -} - -/** Read pages for which log needs to be applied. -@param page_id first page identifier to read -@param i iterator to recv_sys.pages -@param last_batch whether it is possible to write more redo log */ -TRANSACTIONAL_TARGET -static void recv_read_in_area(page_id_t page_id, recv_sys_t::map::iterator i, - bool last_batch) -{ - uint32_t page_nos[32]; - ut_ad(page_id == i->first); - page_id.set_page_no(ut_2pow_round(page_id.page_no(), 32U)); - const page_id_t up_limit{page_id + 31}; - uint32_t* p= page_nos; - - for (; i != recv_sys.pages.end() && i->first <= up_limit; i++) + mysql_mutex_lock(&recv_sys.mutex); + if (recv_sys.apply_log_recs) { - if (i->second.state == page_recv_t::RECV_NOT_PROCESSED) + const page_id_t id{bpage->id()}; + recv_sys_t::map::iterator p= recv_sys.pages.find(id); + if (p == recv_sys.pages.end()); + else if (p->second.being_processed < 0) { - i->second.state= page_recv_t::RECV_BEING_READ; - *p++= i->first.page_no(); + recv_sys.pages_it_invalidate(p); + recv_sys.erase(p); + } + else + { + p->second.being_processed= 1; + const lsn_t init_lsn{p->second.skip_read ? mlog_init.last(id) : 0}; + mysql_mutex_unlock(&recv_sys.mutex); + success= recv_recover_page(success, mtr, p->second, space, init_lsn); + p->second.being_processed= -1; + goto func_exit; } } - if (p != page_nos) + mysql_mutex_unlock(&recv_sys.mutex); + mtr.commit(); +func_exit: + ut_ad(mtr.has_committed()); + return success; +} + +void IORequest::fake_read_complete(os_offset_t offset) const +{ + ut_ad(node); + ut_ad(is_read()); + ut_ad(bpage); + ut_ad(bpage->frame); + ut_ad(recv_recovery_is_on()); + ut_ad(offset); + + mtr_t mtr; + mtr.start(); + mtr.set_log_mode(MTR_LOG_NO_REDO); + + ut_ad(bpage->frame); + /* Move the ownership of the x-latch on the page to this OS thread, + so that we can acquire a second x-latch on it. This is needed for + the operations to the page to pass the debug checks. */ + bpage->lock.claim_ownership(); + bpage->lock.x_lock_recursive(); + bpage->fix_on_recovery(); + mtr.memo_push(reinterpret_cast(bpage), MTR_MEMO_PAGE_X_FIX); + + page_recv_t &recs= *reinterpret_cast(slot); + ut_ad(recs.being_processed == 1); + const lsn_t init_lsn{offset}; + ut_ad(init_lsn > 1); + + if (recv_recover_page(reinterpret_cast(bpage), + mtr, recs, node->space, init_lsn)) { - mysql_mutex_unlock(&recv_sys.mutex); - if (!last_batch) log_sys.latch.wr_unlock(); - buf_read_recv_pages(page_id.space(), {page_nos, p}); - if (!last_batch) log_sys.latch.wr_lock(SRW_LOCK_CALL); - mysql_mutex_lock(&recv_sys.mutex); + ut_ad(bpage->oldest_modification() || bpage->is_freed()); + bpage->lock.x_unlock(true); + } + recs.being_processed= -1; + ut_ad(mtr.has_committed()); + + node->space->release(); +} + +/** @return whether a page has been freed */ +inline bool fil_space_t::is_freed(uint32_t page) +{ + std::lock_guard freed_lock(freed_range_mutex); + return freed_ranges.contains(page); +} + +bool recv_sys_t::report(time_t time) +{ + if (time - progress_time < 15) + return false; + progress_time= time; + return true; +} + +ATTRIBUTE_COLD +void recv_sys_t::report_progress() const +{ + mysql_mutex_assert_owner(&mutex); + const size_t n{pages.size()}; + if (recv_sys.scanned_lsn == recv_sys.lsn) + { + sql_print_information("InnoDB: To recover: %zu pages", n); + service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, + "To recover: %zu pages", n); + } + else + { + sql_print_information("InnoDB: To recover: LSN " LSN_PF + "/" LSN_PF "; %zu pages", + recv_sys.lsn, recv_sys.scanned_lsn, n); + service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, + "To recover: LSN " LSN_PF + "/" LSN_PF "; %zu pages", + recv_sys.lsn, recv_sys.scanned_lsn, n); } } +/** Apply a recovery batch. +@param space_id current tablespace identifier +@param space current tablespace +@param free_block spare buffer block +@param last_batch whether it is possible to write more redo log +@return whether the caller must provide a new free_block */ +bool recv_sys_t::apply_batch(uint32_t space_id, fil_space_t *&space, + buf_block_t *&free_block, bool last_batch) +{ + mysql_mutex_assert_owner(&mutex); + ut_ad(pages_it != pages.end()); + ut_ad(!pages_it->second.log.empty()); + + mysql_mutex_lock(&buf_pool.mutex); + size_t n= 0, max_n= std::min(BUF_LRU_MIN_LEN, + UT_LIST_GET_LEN(buf_pool.LRU) + + UT_LIST_GET_LEN(buf_pool.free)); + mysql_mutex_unlock(&buf_pool.mutex); + + map::iterator begin= pages.end(); + page_id_t begin_id{~0ULL}; + + while (pages_it != pages.end() && n < max_n) + { + ut_ad(!buf_dblwr.is_inside(pages_it->first)); + if (!pages_it->second.being_processed) + { + if (space_id != pages_it->first.space()) + { + space_id= pages_it->first.space(); + if (space) + space->release(); + space= fil_space_t::get(space_id); + if (!space) + { + auto d= deferred_spaces.defers.find(space_id); + if (d == deferred_spaces.defers.end() || d->second.deleted) + /* For deleted files we preserve the deferred_spaces entry */; + else if (!free_block) + return true; + else + { + space= recover_deferred(pages_it, d->second.file_name, free_block); + deferred_spaces.defers.erase(d); + if (!space && !srv_force_recovery) + { + set_corrupt_fs(); + return false; + } + } + } + } + if (!space || space->is_freed(pages_it->first.page_no())) + pages_it->second.being_processed= -1; + else if (!n++) + { + begin= pages_it; + begin_id= pages_it->first; + } + } + pages_it++; + } + + if (!last_batch) + log_sys.latch.wr_unlock(); + + pages_it= begin; + + if (report(time(nullptr))) + report_progress(); + + if (!n) + goto wait; + + mysql_mutex_lock(&buf_pool.mutex); + + if (UNIV_UNLIKELY(UT_LIST_GET_LEN(buf_pool.free) < n)) + { + mysql_mutex_unlock(&buf_pool.mutex); + wait: + wait_for_pool(n); + if (n); + else if (!last_batch) + goto unlock_relock; + else + goto get_last; + pages_it= pages.lower_bound(begin_id); + ut_ad(pages_it != pages.end()); + } + else + mysql_mutex_unlock(&buf_pool.mutex); + + while (pages_it != pages.end()) + { + ut_ad(!buf_dblwr.is_inside(pages_it->first)); + if (!pages_it->second.being_processed) + { + const page_id_t id{pages_it->first}; + + if (space_id != id.space()) + { + space_id= id.space(); + if (space) + space->release(); + space= fil_space_t::get(space_id); + } + if (!space) + { + const auto it= deferred_spaces.defers.find(space_id); + if (it != deferred_spaces.defers.end() && !it->second.deleted) + /* The records must be processed after recover_deferred(). */ + goto next; + goto space_not_found; + } + else if (space->is_freed(id.page_no())) + { + space_not_found: + pages_it->second.being_processed= -1; + goto next; + } + else + { + page_recv_t &recs= pages_it->second; + ut_ad(!recs.log.empty()); + recs.being_processed= 1; + const lsn_t init_lsn{recs.skip_read ? mlog_init.last(id) : 0}; + mysql_mutex_unlock(&mutex); + buf_read_recover(space, id, recs, init_lsn); + } + + if (!--n) + { + if (last_batch) + goto relock_last; + goto relock; + } + mysql_mutex_lock(&mutex); + pages_it= pages.lower_bound(id); + } + else + next: + pages_it++; + } + + if (!last_batch) + { + unlock_relock: + mysql_mutex_unlock(&mutex); + relock: + log_sys.latch.wr_lock(SRW_LOCK_CALL); + relock_last: + mysql_mutex_lock(&mutex); + get_last: + pages_it= pages.lower_bound(begin_id); + } + + return false; +} + /** Attempt to initialize a page based on redo log records. -@param page_id page identifier -@param p iterator pointing to page_id +@param p iterator @param mtr mini-transaction @param b pre-allocated buffer pool block +@param init page initialization @return the recovered block @retval nullptr if the page cannot be initialized based on log records @retval -1 if the page cannot be recovered due to corruption */ -inline buf_block_t *recv_sys_t::recover_low(const page_id_t page_id, - map::iterator &p, mtr_t &mtr, - buf_block_t *b) +inline buf_block_t *recv_sys_t::recover_low(const map::iterator &p, mtr_t &mtr, + buf_block_t *b, lsn_t init_lsn) { - mysql_mutex_assert_owner(&mutex); - ut_ad(p->first == page_id); + mysql_mutex_assert_not_owner(&mutex); page_recv_t &recs= p->second; - ut_ad(recs.state == page_recv_t::RECV_WILL_NOT_READ); + ut_ad(recs.skip_read); + ut_ad(recs.being_processed == 1); buf_block_t* block= nullptr; - const lsn_t init_lsn= mlog_init.last(page_id); const lsn_t end_lsn= recs.log.last()->lsn; if (end_lsn < init_lsn) - DBUG_LOG("ib_log", "skip log for page " << page_id + DBUG_LOG("ib_log", "skip log for page " << p->first << " LSN " << end_lsn << " < " << init_lsn); - fil_space_t *space= fil_space_t::get(page_id.space()); + fil_space_t *space= fil_space_t::get(p->first.space()); mtr.start(); mtr.set_log_mode(MTR_LOG_NO_REDO); @@ -3216,81 +3593,76 @@ inline buf_block_t *recv_sys_t::recover_low(const page_id_t page_id, if (!space) { - if (page_id.page_no() != 0) + if (p->first.page_no() != 0) { nothing_recoverable: mtr.commit(); return nullptr; } - auto it= recv_spaces.find(page_id.space()); + auto it= recv_spaces.find(p->first.space()); ut_ad(it != recv_spaces.end()); uint32_t flags= it->second.flags; zip_size= fil_space_t::zip_size(flags); - block= buf_page_create_deferred(page_id.space(), zip_size, &mtr, b); + block= buf_page_create_deferred(p->first.space(), zip_size, &mtr, b); ut_ad(block == b); block->page.lock.x_lock_recursive(); } else { - block= buf_page_create(space, page_id.page_no(), zip_size, &mtr, b); + block= buf_page_create(space, p->first.page_no(), zip_size, &mtr, b); if (UNIV_UNLIKELY(block != b)) { /* The page happened to exist in the buffer pool, or it was just being read in. Before the exclusive page latch was acquired by buf_page_create(), all changes to the page must have been applied. */ - ut_ad(pages.find(page_id) == pages.end()); + ut_d(mysql_mutex_lock(&mutex)); + ut_ad(pages.find(p->first) == pages.end()); + ut_d(mysql_mutex_unlock(&mutex)); space->release(); goto nothing_recoverable; } } - ut_ad(&recs == &pages.find(page_id)->second); - map::iterator r= p++; - block= recv_recover_page(block, mtr, r, space, init_lsn); + ut_d(mysql_mutex_lock(&mutex)); + ut_ad(&recs == &pages.find(p->first)->second); + ut_d(mysql_mutex_unlock(&mutex)); + block= recv_recover_page(block, mtr, recs, space, init_lsn); ut_ad(mtr.has_committed()); - if (block) - { - recs.log.clear(); - pages.erase(r); - } - else - block= reinterpret_cast(-1); - - if (pages.empty()) - pthread_cond_signal(&cond); - if (space) space->release(); - return block; + return block ? block : reinterpret_cast(-1); } /** Attempt to initialize a page based on redo log records. @param page_id page identifier @return recovered block @retval nullptr if the page cannot be initialized based on log records */ -buf_block_t *recv_sys_t::recover_low(const page_id_t page_id) +ATTRIBUTE_COLD buf_block_t *recv_sys_t::recover_low(const page_id_t page_id) { - buf_block_t *free_block= buf_LRU_get_free_block(have_no_mutex); - buf_block_t *block= nullptr; - mysql_mutex_lock(&mutex); map::iterator p= pages.find(page_id); - if (p != pages.end() && p->second.state == page_recv_t::RECV_WILL_NOT_READ) + if (p != pages.end() && !p->second.being_processed && p->second.skip_read) { + p->second.being_processed= 1; + const lsn_t init_lsn{mlog_init.last(page_id)}; + mysql_mutex_unlock(&mutex); + buf_block_t *free_block= buf_LRU_get_free_block(have_no_mutex); mtr_t mtr; - block= recover_low(page_id, p, mtr, free_block); + buf_block_t *block= recover_low(p, mtr, free_block, init_lsn); + p->second.being_processed= -1; ut_ad(!block || block == reinterpret_cast(-1) || block == free_block); + if (UNIV_UNLIKELY(!block)) + buf_pool.free_block(free_block); + return block; } mysql_mutex_unlock(&mutex); - if (UNIV_UNLIKELY(!block)) - buf_pool.free_block(free_block); - return block; + return nullptr; } inline fil_space_t *fil_system_t::find(const char *path) const @@ -3338,41 +3710,15 @@ void recv_sys_t::apply(bool last_batch) mysql_mutex_assert_owner(&mutex); - timespec abstime; - - while (apply_batch_on) - { - if (is_corrupt_log()) - return; - if (last_batch) - my_cond_wait(&cond, &mutex.m_mutex); - else - { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif - log_sys.latch.wr_unlock(); - set_timespec_nsec(abstime, 500000000ULL); /* 0.5s */ - my_cond_timedwait(&cond, &mutex.m_mutex, &abstime); - mysql_mutex_unlock(&mutex); - log_sys.latch.wr_lock(SRW_LOCK_CALL); - mysql_mutex_lock(&mutex); - } - } - - mtr_t mtr; + garbage_collect(); if (!pages.empty()) { - const char *msg= last_batch - ? "Starting final batch to recover" - : "Starting a batch to recover"; - const size_t n= pages.size(); - sql_print_information("InnoDB: %s %zu pages from redo log.", msg, n); - sd_notifyf(0, "STATUS=%s %zu pages from redo log", msg, n); + ut_ad(!last_batch || lsn == scanned_lsn); + progress_time= time(nullptr); + report_progress(); apply_log_recs= true; - apply_batch_on= true; for (auto id= srv_undo_tablespaces_open; id--;) { @@ -3388,136 +3734,73 @@ void recv_sys_t::apply(bool last_batch) if (fil_space_t *space = fil_space_get(id + srv_undo_space_id_start)) { ut_ad(UT_LIST_GET_LEN(space->chain) == 1); + ut_ad(space->recv_size >= t.pages); fil_node_t *file= UT_LIST_GET_FIRST(space->chain); ut_ad(file->is_open()); os_file_truncate(file->name, file->handle, - os_offset_t{t.pages} << srv_page_size_shift, true); + os_offset_t{space->recv_size} << + srv_page_size_shift, true); } } } fil_system.extend_to_recv_size(); - /* We must release log_sys.latch and recv_sys.mutex before - invoking buf_LRU_get_free_block(). Allocating a block may initiate - a redo log write and therefore acquire log_sys.latch. To avoid - deadlocks, log_sys.latch must not be acquired while holding - recv_sys.mutex. */ - mysql_mutex_unlock(&mutex); - if (!last_batch) - log_sys.latch.wr_unlock(); + fil_space_t *space= nullptr; + uint32_t space_id= ~0; + buf_block_t *free_block= nullptr; - buf_block_t *free_block= buf_LRU_get_free_block(have_no_mutex); - - if (!last_batch) - log_sys.latch.wr_lock(SRW_LOCK_CALL); - mysql_mutex_lock(&mutex); - - for (map::iterator p= pages.begin(); p != pages.end(); ) + for (pages_it= pages.begin(); pages_it != pages.end(); + pages_it= pages.begin()) { - const page_id_t page_id= p->first; - ut_ad(!p->second.log.empty()); - - const uint32_t space_id= page_id.space(); - auto d= deferred_spaces.defers.find(space_id); - if (d != deferred_spaces.defers.end()) + if (!free_block) { - if (d->second.deleted) - { - /* For deleted files we must preserve the entry in deferred_spaces */ -erase_for_space: - while (p != pages.end() && p->first.space() == space_id) - { - map::iterator r= p++; - r->second.log.clear(); - pages.erase(r); - } - } - else if (recover_deferred(p, d->second.file_name, free_block)) - { - if (!srv_force_recovery) - set_corrupt_fs(); - deferred_spaces.defers.erase(d); - goto erase_for_space; - } - else - deferred_spaces.defers.erase(d); - if (!free_block) - goto next_free_block; - p= pages.lower_bound(page_id); - continue; + if (!last_batch) + log_sys.latch.wr_unlock(); + wait_for_pool(1); + pages_it= pages.begin(); + mysql_mutex_unlock(&mutex); + /* We must release log_sys.latch and recv_sys.mutex before + invoking buf_LRU_get_free_block(). Allocating a block may initiate + a redo log write and therefore acquire log_sys.latch. To avoid + deadlocks, log_sys.latch must not be acquired while holding + recv_sys.mutex. */ + free_block= buf_LRU_get_free_block(have_no_mutex); + if (!last_batch) + log_sys.latch.wr_lock(SRW_LOCK_CALL); + mysql_mutex_lock(&mutex); + pages_it= pages.begin(); } - switch (p->second.state) { - case page_recv_t::RECV_BEING_READ: - case page_recv_t::RECV_BEING_PROCESSED: - p++; - continue; - case page_recv_t::RECV_WILL_NOT_READ: - if (UNIV_LIKELY(!!recover_low(page_id, p, mtr, free_block))) - { -next_free_block: - mysql_mutex_unlock(&mutex); - if (!last_batch) - log_sys.latch.wr_unlock(); - free_block= buf_LRU_get_free_block(have_no_mutex); - if (!last_batch) - log_sys.latch.wr_lock(SRW_LOCK_CALL); - mysql_mutex_lock(&mutex); - break; - } - ut_ad(p == pages.end() || p->first > page_id); - continue; - case page_recv_t::RECV_NOT_PROCESSED: - recv_read_in_area(page_id, p, last_batch); - } - p= pages.lower_bound(page_id); - /* Ensure that progress will be made. */ - ut_ad(p == pages.end() || p->first > page_id || - p->second.state >= page_recv_t::RECV_BEING_READ); - } - - buf_pool.free_block(free_block); - - /* Wait until all the pages have been processed */ - for (;;) - { - const bool empty= pages.empty(); - if (empty && !os_aio_pending_reads()) - break; - - if (!is_corrupt_fs() && !is_corrupt_log()) + while (pages_it != pages.end()) { - if (last_batch) + if (is_corrupt_fs() || is_corrupt_log()) { - if (!empty) - my_cond_wait(&cond, &mutex.m_mutex); - else + if (space) + space->release(); + if (free_block) { mysql_mutex_unlock(&mutex); - os_aio_wait_until_no_pending_reads(false); + mysql_mutex_lock(&buf_pool.mutex); + buf_LRU_block_free_non_file_page(free_block); + mysql_mutex_unlock(&buf_pool.mutex); mysql_mutex_lock(&mutex); - ut_ad(pages.empty()); } + return; } - else - { -#ifndef SUX_LOCK_GENERIC - ut_ad(log_sys.latch.is_write_locked()); -#endif - log_sys.latch.wr_unlock(); - set_timespec_nsec(abstime, 500000000ULL); /* 0.5s */ - my_cond_timedwait(&cond, &mutex.m_mutex, &abstime); - mysql_mutex_unlock(&mutex); - log_sys.latch.wr_lock(SRW_LOCK_CALL); - mysql_mutex_lock(&mutex); - } - continue; + if (apply_batch(space_id, space, free_block, last_batch)) + break; } - if (is_corrupt_fs() && !srv_force_recovery) - sql_print_information("InnoDB: Set innodb_force_recovery=1" - " to ignore corrupted pages."); - return; + } + + if (space) + space->release(); + + if (free_block) + { + mysql_mutex_lock(&buf_pool.mutex); + buf_LRU_block_free_non_file_page(free_block); + mysql_mutex_unlock(&buf_pool.mutex); } } @@ -3526,21 +3809,22 @@ next_free_block: mysql_mutex_unlock(&mutex); - if (last_batch && srv_operation != SRV_OPERATION_RESTORE && - srv_operation != SRV_OPERATION_RESTORE_EXPORT) - /* Instead of flushing, last_batch sorts the buf_pool.flush_list - in ascending order of buf_page_t::oldest_modification. */ - log_sort_flush_list(); - else - buf_flush_sync_batch(lsn); - if (!last_batch) { + buf_flush_sync_batch(lsn); buf_pool_invalidate(); log_sys.latch.wr_lock(SRW_LOCK_CALL); } + else if (srv_operation == SRV_OPERATION_RESTORE || + srv_operation == SRV_OPERATION_RESTORE_EXPORT) + buf_flush_sync_batch(lsn); + else + /* Instead of flushing, last_batch sorts the buf_pool.flush_list + in ascending order of buf_page_t::oldest_modification. */ + log_sort_flush_list(); + #ifdef HAVE_PMEM - else if (log_sys.is_pmem()) + if (last_batch && log_sys.is_pmem()) mprotect(log_sys.buf, len, PROT_READ | PROT_WRITE); #endif @@ -3550,35 +3834,24 @@ next_free_block: clear(); } -/** Check whether the number of read redo log blocks exceeds the maximum. -@return whether the memory is exhausted */ -inline bool recv_sys_t::is_memory_exhausted() -{ - if (UT_LIST_GET_LEN(blocks) * 3 < buf_pool.get_n_pages()) - return false; - DBUG_PRINT("ib_log",("Ran out of memory and last stored lsn " LSN_PF - " last stored offset %zu\n", lsn, offset)); - return true; -} - /** Scan log_t::FORMAT_10_8 log store records to the parsing buffer. @param last_phase whether changes can be applied to the tablespaces @return whether rescan is needed (not everything was stored) */ static bool recv_scan_log(bool last_phase) { DBUG_ENTER("recv_scan_log"); - DBUG_ASSERT(!last_phase || recv_sys.file_checkpoint); ut_ad(log_sys.is_latest()); const size_t block_size_1{log_sys.get_block_size() - 1}; mysql_mutex_lock(&recv_sys.mutex); - recv_sys.clear(); ut_d(recv_sys.after_apply= last_phase); - ut_ad(!last_phase || recv_sys.file_checkpoint); + if (!last_phase) + recv_sys.clear(); + else + ut_ad(recv_sys.file_checkpoint); - store_t store= last_phase - ? STORE_IF_EXISTS : recv_sys.file_checkpoint ? STORE_YES : STORE_NO; + bool store{recv_sys.file_checkpoint != 0}; size_t buf_size= log_sys.buf_size; #ifdef HAVE_PMEM if (log_sys.is_pmem()) @@ -3595,6 +3868,7 @@ static bool recv_scan_log(bool last_phase) recv_sys.len= 0; } + lsn_t rewound_lsn= 0; for (ut_d(lsn_t source_offset= 0);;) { #ifndef SUX_LOCK_GENERIC @@ -3642,27 +3916,29 @@ static bool recv_scan_log(bool last_phase) if (UNIV_UNLIKELY(!recv_needed_recovery)) { - ut_ad(store == (recv_sys.file_checkpoint ? STORE_YES : STORE_NO)); + ut_ad(!last_phase); ut_ad(recv_sys.lsn >= log_sys.next_checkpoint_lsn); - for (;;) + if (!store) { - const byte& b{log_sys.buf[recv_sys.offset]}; - r= recv_sys.parse_pmem(store); - if (r == recv_sys_t::OK) + ut_ad(!recv_sys.file_checkpoint); + for (;;) { - if (store == STORE_NO && - (b == FILE_CHECKPOINT + 2 + 8 || (b & 0xf0) == FILE_MODIFY)) - continue; - } - else if (r == recv_sys_t::PREMATURE_EOF) - goto read_more; - else if (store != STORE_NO) - break; + const byte& b{log_sys.buf[recv_sys.offset]}; + r= recv_sys.parse_pmem(false); + switch (r) { + case recv_sys_t::PREMATURE_EOF: + goto read_more; + default: + ut_ad(r == recv_sys_t::GOT_EOF); + break; + case recv_sys_t::OK: + if (b == FILE_CHECKPOINT + 2 + 8 || (b & 0xf0) == FILE_MODIFY) + continue; + } - if (store == STORE_NO) - { const lsn_t end{recv_sys.file_checkpoint}; + ut_ad(!end || end == recv_sys.lsn); mysql_mutex_unlock(&recv_sys.mutex); if (!end) @@ -3672,45 +3948,73 @@ static bool recv_scan_log(bool last_phase) ") at " LSN_PF, log_sys.next_checkpoint_lsn, recv_sys.lsn); } - else - ut_ad(end == recv_sys.lsn); DBUG_RETURN(true); } - - recv_needed_recovery= true; - if (srv_read_only_mode) - { - mysql_mutex_unlock(&recv_sys.mutex); - DBUG_RETURN(false); + } + else + { + ut_ad(recv_sys.file_checkpoint != 0); + switch ((r= recv_sys.parse_pmem(false))) { + case recv_sys_t::PREMATURE_EOF: + goto read_more; + case recv_sys_t::GOT_EOF: + break; + default: + ut_ad(r == recv_sys_t::OK); + recv_needed_recovery= true; + if (srv_read_only_mode) + { + mysql_mutex_unlock(&recv_sys.mutex); + DBUG_RETURN(false); + } + sql_print_information("InnoDB: Starting crash recovery from" + " checkpoint LSN=" LSN_PF, + log_sys.next_checkpoint_lsn); } - sql_print_information("InnoDB: Starting crash recovery from" - " checkpoint LSN=" LSN_PF, - log_sys.next_checkpoint_lsn); - break; } } - while ((r= recv_sys.parse_pmem(store)) == recv_sys_t::OK) + if (!store) + skip_the_rest: + while ((r= recv_sys.parse_pmem(false)) == recv_sys_t::OK); + else { - if (store != STORE_NO && recv_sys.is_memory_exhausted()) + uint16_t count= 0; + while ((r= recv_sys.parse_pmem(last_phase)) == recv_sys_t::OK) + if (!++count && recv_sys.report(time(nullptr))) + { + const size_t n= recv_sys.pages.size(); + sql_print_information("InnoDB: Parsed redo log up to LSN=" LSN_PF + "; to recover: %zu pages", recv_sys.lsn, n); + service_manager_extend_timeout(INNODB_EXTEND_TIMEOUT_INTERVAL, + "Parsed redo log up to LSN=" LSN_PF + "; to recover: %zu pages", + recv_sys.lsn, n); + } + if (r == recv_sys_t::GOT_OOM) { - ut_ad(last_phase == (store == STORE_IF_EXISTS)); - if (store == STORE_YES) - { - store= STORE_NO; - recv_sys.last_stored_lsn= recv_sys.lsn; - } - else - { - ut_ad(store == STORE_IF_EXISTS); - recv_sys.apply(false); - } + ut_ad(!last_phase); + rewound_lsn= recv_sys.lsn; + store= false; + if (recv_sys.scanned_lsn <= 1) + goto skip_the_rest; + ut_ad(recv_sys.file_checkpoint); + goto func_exit; } } if (r != recv_sys_t::PREMATURE_EOF) { ut_ad(r == recv_sys_t::GOT_EOF); + got_eof: + ut_ad(recv_sys.is_initialised()); + if (recv_sys.scanned_lsn > 1) + { + ut_ad(recv_sys.scanned_lsn == recv_sys.lsn); + break; + } + recv_sys.scanned_lsn= recv_sys.lsn; + sql_print_information("InnoDB: End of log at LSN=" LSN_PF, recv_sys.lsn); break; } @@ -3723,7 +4027,7 @@ static bool recv_scan_log(bool last_phase) break; if (recv_sys.offset < log_sys.get_block_size()) - break; + goto got_eof; if (recv_sys.offset > buf_size / 4 || (recv_sys.offset > block_size_1 && @@ -3736,21 +4040,21 @@ static bool recv_scan_log(bool last_phase) } } - const bool corrupt= recv_sys.is_corrupt_log() || recv_sys.is_corrupt_fs(); - recv_sys.maybe_finish_batch(); if (last_phase) + { + ut_ad(!rewound_lsn); + ut_ad(recv_sys.lsn >= recv_sys.file_checkpoint); log_sys.set_recovered_lsn(recv_sys.lsn); + } + else if (rewound_lsn) + { + ut_ad(!store); + ut_ad(recv_sys.file_checkpoint); + recv_sys.lsn= rewound_lsn; + } +func_exit: mysql_mutex_unlock(&recv_sys.mutex); - - if (corrupt) - DBUG_RETURN(false); - - DBUG_PRINT("ib_log", - ("%s " LSN_PF " completed", last_phase ? "rescan" : "scan", - recv_sys.lsn)); - ut_ad(!last_phase || recv_sys.lsn >= recv_sys.file_checkpoint); - - DBUG_RETURN(store == STORE_NO); + DBUG_RETURN(!store); } /** Report a missing tablespace for which page-redo log exists. @@ -3846,8 +4150,8 @@ next: /* fall through */ case file_name_t::DELETED: recv_sys_t::map::iterator r = p++; - r->second.log.clear(); - recv_sys.pages.erase(r); + recv_sys.pages_it_invalidate(r); + recv_sys.erase(r); continue; } ut_ad(0); @@ -3871,8 +4175,6 @@ func_exit: continue; } - missing_tablespace = true; - if (srv_force_recovery) { sql_print_warning("InnoDB: Tablespace " UINT32PF " was not found at %.*s," @@ -3892,14 +4194,11 @@ func_exit: rs.first, int(rs.second.name.size()), rs.second.name.data()); + } else { + missing_tablespace = true; } } - if (!rescan || srv_force_recovery > 0) { - missing_tablespace = false; - } - - err = DB_SUCCESS; goto func_exit; } @@ -4133,35 +4432,41 @@ read_only_recovery: goto early_exit; } - /* If there is any missing tablespace and rescan is needed - then there is a possiblity that hash table will not contain - all space ids redo logs. Rescan the remaining unstored - redo logs for the validation of missing tablespace. */ - ut_ad(rescan || !missing_tablespace); + if (missing_tablespace) { + ut_ad(rescan); + /* If any tablespaces seem to be missing, + validate the remaining log records. */ - while (missing_tablespace) { - recv_sys.lsn = recv_sys.last_stored_lsn; - DBUG_PRINT("ib_log", ("Rescan of redo log to validate " - "the missing tablespace. Scan " - "from last stored LSN " LSN_PF, - recv_sys.lsn)); - rescan = recv_scan_log(false); - ut_ad(!recv_sys.is_corrupt_fs()); + do { + rescan = recv_scan_log(false); + ut_ad(!recv_sys.is_corrupt_fs()); - missing_tablespace = false; + if (recv_sys.is_corrupt_log()) { + goto err_exit; + } - if (recv_sys.is_corrupt_log()) { - goto err_exit; - } + missing_tablespace = false; - err = recv_validate_tablespace( - rescan, missing_tablespace); + err = recv_validate_tablespace( + rescan, missing_tablespace); - if (err != DB_SUCCESS) { - goto early_exit; - } + if (err != DB_SUCCESS) { + goto early_exit; + } + } while (missing_tablespace); rescan = true; + /* Because in the loop above we overwrote the + initially stored recv_sys.pages, we must + restart parsing the log from the very beginning. */ + + /* FIXME: Use a separate loop for checking for + tablespaces (not individual pages), while retaining + the initial recv_sys.pages. */ + mysql_mutex_lock(&recv_sys.mutex); + recv_sys.clear(); + recv_sys.lsn = log_sys.next_checkpoint_lsn; + mysql_mutex_unlock(&recv_sys.mutex); } if (srv_operation <= SRV_OPERATION_EXPORT_RESTORED) { @@ -4172,8 +4477,7 @@ read_only_recovery: ut_ad(srv_force_recovery <= SRV_FORCE_NO_UNDO_LOG_SCAN); if (rescan) { - recv_sys.lsn = log_sys.next_checkpoint_lsn; - rescan = recv_scan_log(true); + recv_scan_log(true); if ((recv_sys.is_corrupt_log() && !srv_force_recovery) || recv_sys.is_corrupt_fs()) { diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index 08f525be314..562843c97d6 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -3397,15 +3397,12 @@ os_file_get_status( return(ret); } - -extern void fil_aio_callback(const IORequest &request); - -static void io_callback(tpool::aiocb *cb) +static void io_callback_errorcheck(const tpool::aiocb *cb) { - const IORequest &request= *static_cast - (static_cast(cb->m_userdata)); if (cb->m_err != DB_SUCCESS) { + const IORequest &request= *static_cast + (static_cast(cb->m_userdata)); ib::fatal() << "IO Error: " << cb->m_err << " during " << (request.is_async() ? "async " : "sync ") << (request.is_LRU() ? "lru " : "") << @@ -3413,19 +3410,36 @@ static void io_callback(tpool::aiocb *cb) " of " << cb->m_len << " bytes, for file " << cb->m_fh << ", returned " << cb->m_ret_len; } - /* Return cb back to cache*/ - if (cb->m_opcode == tpool::aio_opcode::AIO_PREAD) - { - ut_ad(read_slots->contains(cb)); - fil_aio_callback(request); - read_slots->release(cb); - } - else - { - ut_ad(write_slots->contains(cb)); - fil_aio_callback(request); - write_slots->release(cb); - } +} + +static void fake_io_callback(void *c) +{ + tpool::aiocb *cb= static_cast(c); + ut_ad(read_slots->contains(cb)); + static_cast(static_cast(cb->m_userdata))-> + fake_read_complete(cb->m_offset); + read_slots->release(cb); +} + +static void read_io_callback(void *c) +{ + tpool::aiocb *cb= static_cast(c); + ut_ad(cb->m_opcode == tpool::aio_opcode::AIO_PREAD); + io_callback_errorcheck(cb); + ut_ad(read_slots->contains(cb)); + static_cast + (static_cast(cb->m_userdata))->read_complete(); + read_slots->release(cb); +} + +static void write_io_callback(void *c) +{ + tpool::aiocb *cb= static_cast(c); + ut_ad(cb->m_opcode == tpool::aio_opcode::AIO_PWRITE); + ut_ad(write_slots->contains(cb)); + static_cast + (static_cast(cb->m_userdata))->write_complete(); + write_slots->release(cb); } #ifdef LINUX_NATIVE_AIO @@ -3728,6 +3742,28 @@ void os_aio_wait_until_no_pending_reads(bool declare) tpool::tpool_wait_end(); } +/** Submit a fake read request during crash recovery. +@param type fake read request +@param offset additional context */ +void os_fake_read(const IORequest &type, os_offset_t offset) +{ + tpool::aiocb *cb= read_slots->acquire(); + + cb->m_group= read_slots->get_task_group(); + cb->m_fh= type.node->handle.m_file; + cb->m_buffer= nullptr; + cb->m_len= 0; + cb->m_offset= offset; + cb->m_opcode= tpool::aio_opcode::AIO_PREAD; + new (cb->m_userdata) IORequest{type}; + cb->m_internal_task.m_func= fake_io_callback; + cb->m_internal_task.m_arg= cb; + cb->m_internal_task.m_group= cb->m_group; + + srv_thread_pool->submit_task(&cb->m_internal_task); +} + + /** Request a read or write. @param type I/O request @param buf buffer @@ -3773,23 +3809,32 @@ func_exit: return err; } + io_slots* slots; + tpool::callback_func callback; + tpool::aio_opcode opcode; + if (type.is_read()) { ++os_n_file_reads; + slots = read_slots; + callback = read_io_callback; + opcode = tpool::aio_opcode::AIO_PREAD; } else { ++os_n_file_writes; + slots = write_slots; + callback = write_io_callback; + opcode = tpool::aio_opcode::AIO_PWRITE; } compile_time_assert(sizeof(IORequest) <= tpool::MAX_AIO_USERDATA_LEN); - io_slots* slots= type.is_read() ? read_slots : write_slots; tpool::aiocb* cb = slots->acquire(); cb->m_buffer = buf; - cb->m_callback = (tpool::callback_func)io_callback; + cb->m_callback = callback; cb->m_group = slots->get_task_group(); cb->m_fh = type.node->handle.m_file; cb->m_len = (int)n; cb->m_offset = offset; - cb->m_opcode = type.is_read() ? tpool::aio_opcode::AIO_PREAD : tpool::aio_opcode::AIO_PWRITE; + cb->m_opcode = opcode; new (cb->m_userdata) IORequest{type}; if (srv_thread_pool->submit_io(cb)) { @@ -3797,6 +3842,7 @@ func_exit: os_file_handle_error(type.node->name, type.is_read() ? "aio read" : "aio write"); err = DB_IO_ERROR; + type.node->space->release(); } goto func_exit; diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index eca839fa7be..d5ff314bc2e 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -213,14 +213,14 @@ row_ins_sec_index_entry_by_modify( made to the clustered index, and completed the secondary index creation before we got here. In this case, the change would already be there. The CREATE - INDEX should be waiting for a MySQL meta-data lock - upgrade at least until this INSERT or UPDATE - returns. After that point, set_committed(true) - would be invoked in commit_inplace_alter_table(). */ + INDEX should be in wait_while_table_is_used() at least + until this INSERT or UPDATE returns. After that point, + set_committed(true) would be invoked in + commit_inplace_alter_table(). */ ut_a(update->n_fields == 0); - ut_a(!cursor->index()->is_committed()); ut_ad(!dict_index_is_online_ddl(cursor->index())); - return(DB_SUCCESS); + return cursor->index()->is_committed() + ? DB_CORRUPTION : DB_SUCCESS; } if (mode == BTR_MODIFY_LEAF) { diff --git a/storage/innobase/row/row0purge.cc b/storage/innobase/row/row0purge.cc index f716625ea59..cc9a45ddc69 100644 --- a/storage/innobase/row/row0purge.cc +++ b/storage/innobase/row/row0purge.cc @@ -47,6 +47,7 @@ Created 3/14/1997 Heikki Tuuri #include "handler.h" #include "ha_innodb.h" #include "fil0fil.h" +#include "debug_sync.h" #include /************************************************************************* @@ -568,6 +569,8 @@ bool row_purge_del_mark(purge_node_t *node) const auto type= node->index->type; if (type & (DICT_FTS | DICT_CORRUPT)) continue; + if (node->index->online_status > ONLINE_INDEX_CREATION) + continue; if (UNIV_UNLIKELY(DICT_VIRTUAL & type) && !node->index->is_committed() && node->index->has_new_v_col()) continue; @@ -582,7 +585,17 @@ bool row_purge_del_mark(purge_node_t *node) mem_heap_free(heap); } - return row_purge_remove_clust_if_poss(node); + bool result= row_purge_remove_clust_if_poss(node); + +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF("enable_row_purge_del_mark_exit_sync_point", + debug_sync_set_action + (current_thd, + STRING_WITH_LEN("now SIGNAL row_purge_del_mark_finished")); + ); +#endif + + return result; } void purge_sys_t::wait_SYS() @@ -720,6 +733,11 @@ row_purge_upd_exist_or_extern_func( continue; } + if (node->index->online_status + > ONLINE_INDEX_CREATION) { + continue; + } + if (row_upd_changes_ord_field_binary(node->index, node->update, thr, NULL, NULL)) { /* Build the older version of the index entry */ @@ -796,6 +814,9 @@ skip_secondaries: buf_page_get(page_id_t(rseg.space->id, page_no), 0, RW_X_LATCH, &mtr)) { + block->page.set_accessed(); + buf_page_make_young_if_needed(&block->page); + byte* data_field = block->page.frame + offset + internal_offset; diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 04db19a0256..38548dea1b5 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -906,9 +906,6 @@ srv_export_innodb_status(void) export_vars.innodb_data_written = srv_stats.data_written + (dblwr << srv_page_size_shift); - export_vars.innodb_buffer_pool_read_requests - = buf_pool.stat.n_page_gets; - export_vars.innodb_buffer_pool_bytes_data = buf_pool.stat.LRU_bytes + (UT_LIST_GET_LEN(buf_pool.unzip_LRU) @@ -1147,17 +1144,14 @@ static tpool::waitable_task purge_coordinator_task static tpool::timer *purge_coordinator_timer; /** Wake up the purge threads if there is work to do. */ -void -srv_wake_purge_thread_if_not_active() +void srv_wake_purge_thread_if_not_active() { - ut_ad(!srv_read_only_mode); + ut_ad(!srv_read_only_mode); - if (purge_sys.enabled() && !purge_sys.paused() - && trx_sys.history_exists()) { - if(++purge_state.m_running == 1) { - srv_thread_pool->submit_task(&purge_coordinator_task); - } - } + if (purge_sys.enabled() && !purge_sys.paused() && + (srv_undo_log_truncate || trx_sys.history_exists()) && + ++purge_state.m_running == 1) + srv_thread_pool->submit_task(&purge_coordinator_task); } /** @return whether the purge tasks are active */ @@ -1498,13 +1492,20 @@ fewer_threads: m_history_length= history_size; if (!history_size) + { srv_dml_needed_delay= 0; - else if (trx_purge(n_use_threads, history_size, - !(++count % srv_purge_rseg_truncate_frequency) || - purge_sys.truncate.current || - (srv_shutdown_state != SRV_SHUTDOWN_NONE && - srv_fast_shutdown == 0))) - continue; + trx_purge_truncate_history(); + } + else + { + ulint n_pages_handled= trx_purge(n_use_threads, history_size); + if (!(++count % srv_purge_rseg_truncate_frequency) || + purge_sys.truncate.current || + (srv_shutdown_state != SRV_SHUTDOWN_NONE && srv_fast_shutdown == 0)) + trx_purge_truncate_history(); + if (n_pages_handled) + continue; + } if (srv_dml_needed_delay); else if (m_running == sigcount) diff --git a/storage/innobase/trx/trx0purge.cc b/storage/innobase/trx/trx0purge.cc index ed84e5f7b30..d601f30eb04 100644 --- a/storage/innobase/trx/trx0purge.cc +++ b/storage/innobase/trx/trx0purge.cc @@ -246,126 +246,119 @@ Remove the undo log segment from the rseg slot if it is too big for reuse. void trx_purge_add_undo_to_history(const trx_t* trx, trx_undo_t*& undo, mtr_t* mtr) { - DBUG_PRINT("trx", ("commit(" TRX_ID_FMT "," TRX_ID_FMT ")", - trx->id, trx_id_t{trx->rw_trx_hash_element->no})); - ut_ad(undo == trx->rsegs.m_redo.undo); - trx_rseg_t* rseg = trx->rsegs.m_redo.rseg; - ut_ad(undo->rseg == rseg); - buf_block_t* rseg_header = rseg->get(mtr, nullptr); - /* We are in transaction commit; we cannot return an error. If the - database is corrupted, it is better to crash it than to - intentionally violate ACID by committing something that is known to - be corrupted. */ - ut_ad(rseg_header); - buf_block_t* undo_page = trx_undo_set_state_at_finish( - undo, mtr); - trx_ulogf_t* undo_header = undo_page->page.frame - + undo->hdr_offset; + DBUG_PRINT("trx", ("commit(" TRX_ID_FMT "," TRX_ID_FMT ")", + trx->id, trx_id_t{trx->rw_trx_hash_element->no})); + ut_ad(undo->id < TRX_RSEG_N_SLOTS); + ut_ad(undo == trx->rsegs.m_redo.undo); + trx_rseg_t *rseg= trx->rsegs.m_redo.rseg; + ut_ad(undo->rseg == rseg); + buf_block_t *rseg_header= rseg->get(mtr, nullptr); + /* We are in transaction commit; we cannot return an error. If the + database is corrupted, it is better to crash it than to + intentionally violate ACID by committing something that is known to + be corrupted. */ + ut_ad(rseg_header); + buf_block_t *undo_page= + buf_page_get(page_id_t(rseg->space->id, undo->hdr_page_no), 0, + RW_X_LATCH, mtr); + /* This function is invoked during transaction commit, which is not + allowed to fail. If we get a corrupted undo header, we will crash here. */ + ut_a(undo_page); + trx_ulogf_t *undo_header= undo_page->page.frame + undo->hdr_offset; - ut_ad(rseg->needs_purge > trx->id); + ut_ad(rseg->needs_purge > trx->id); - if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT - + rseg_header->page.frame))) { - /* This database must have been upgraded from - before MariaDB 10.3.5. */ - trx_rseg_format_upgrade(rseg_header, mtr); - } + if (rseg->last_page_no == FIL_NULL) + { + rseg->last_page_no= undo->hdr_page_no; + rseg->set_last_commit(undo->hdr_offset, trx->rw_trx_hash_element->no); + } - if (undo->state != TRX_UNDO_CACHED) { - /* The undo log segment will not be reused */ - ut_a(undo->id < TRX_RSEG_N_SLOTS); - static_assert(FIL_NULL == 0xffffffff, ""); - mtr->memset(rseg_header, - TRX_RSEG + TRX_RSEG_UNDO_SLOTS - + undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff); + rseg->history_size++; - uint32_t hist_size = mach_read_from_4( - TRX_RSEG_HISTORY_SIZE + TRX_RSEG - + rseg_header->page.frame); + if (UNIV_UNLIKELY(mach_read_from_4(TRX_RSEG + TRX_RSEG_FORMAT + + rseg_header->page.frame))) + /* This database must have been upgraded from before MariaDB 10.3.5. */ + trx_rseg_format_upgrade(rseg_header, mtr); - ut_ad(undo->size == flst_get_len(TRX_UNDO_SEG_HDR - + TRX_UNDO_PAGE_LIST - + undo_page->page.frame)); + uint16_t undo_state; - mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_HISTORY_SIZE - + rseg_header->page.frame, - hist_size + undo->size); - mtr->write<8>(*rseg_header, TRX_RSEG + TRX_RSEG_MAX_TRX_ID - + rseg_header->page.frame, - trx_sys.get_max_trx_id()); - } + if (undo->size == 1 && + TRX_UNDO_PAGE_REUSE_LIMIT > + mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + + undo_page->page.frame)) + { + undo->state= undo_state= TRX_UNDO_CACHED; + UT_LIST_ADD_FIRST(rseg->undo_cached, undo); + } + else + { + ut_ad(undo->size == flst_get_len(TRX_UNDO_SEG_HDR + TRX_UNDO_PAGE_LIST + + undo_page->page.frame)); + /* The undo log segment will not be reused */ + static_assert(FIL_NULL == 0xffffffff, ""); + mtr->memset(rseg_header, TRX_RSEG + TRX_RSEG_UNDO_SLOTS + + undo->id * TRX_RSEG_SLOT_SIZE, 4, 0xff); + uint32_t hist_size= mach_read_from_4(TRX_RSEG_HISTORY_SIZE + TRX_RSEG + + rseg_header->page.frame); + mtr->write<4>(*rseg_header, TRX_RSEG + TRX_RSEG_HISTORY_SIZE + + rseg_header->page.frame, hist_size + undo->size); + mtr->write<8>(*rseg_header, TRX_RSEG + TRX_RSEG_MAX_TRX_ID + + rseg_header->page.frame, trx_sys.get_max_trx_id()); + ut_free(undo); + undo_state= TRX_UNDO_TO_PURGE; + } - /* After the purge thread has been given permission to exit, - we may roll back transactions (trx->undo_no==0) - in THD::cleanup() invoked from unlink_thd() in fast shutdown, - or in trx_rollback_recovered() in slow shutdown. + undo= nullptr; - Before any transaction-generating background threads or the - purge have been started, we can - start transactions in row_merge_drop_temp_indexes(), - and roll back recovered transactions. + /* After the purge thread has been given permission to exit, + we may roll back transactions (trx->undo_no==0) + in THD::cleanup() invoked from unlink_thd() in fast shutdown, + or in trx_rollback_recovered() in slow shutdown. - Arbitrary user transactions may be executed when all the undo log - related background processes (including purge) are disabled due to - innodb_force_recovery=2 or innodb_force_recovery=3. - DROP TABLE may be executed at any innodb_force_recovery level. + Before any transaction-generating background threads or the purge + have been started, we can start transactions in + row_merge_drop_temp_indexes(), and roll back recovered transactions. - During fast shutdown, we may also continue to execute - user transactions. */ - ut_ad(srv_undo_sources - || trx->undo_no == 0 - || (!purge_sys.enabled() - && (srv_is_being_started - || trx_rollback_is_active - || srv_force_recovery >= SRV_FORCE_NO_BACKGROUND)) - || srv_fast_shutdown); + Arbitrary user transactions may be executed when all the undo log + related background processes (including purge) are disabled due to + innodb_force_recovery=2 or innodb_force_recovery=3. DROP TABLE may + be executed at any innodb_force_recovery level. -#ifdef WITH_WSREP - if (wsrep_is_wsrep_xid(&trx->xid)) { - trx_rseg_update_wsrep_checkpoint(rseg_header, &trx->xid, mtr); - } + During fast shutdown, we may also continue to execute user + transactions. */ + ut_ad(srv_undo_sources || trx->undo_no == 0 || + (!purge_sys.enabled() && + (srv_is_being_started || + trx_rollback_is_active || + srv_force_recovery >= SRV_FORCE_NO_BACKGROUND)) || + srv_fast_shutdown); + +#ifdef WITH_WSREP + if (wsrep_is_wsrep_xid(&trx->xid)) + trx_rseg_update_wsrep_checkpoint(rseg_header, &trx->xid, mtr); #endif - if (trx->mysql_log_file_name && *trx->mysql_log_file_name) { - /* Update the latest MySQL binlog name and offset info - in rollback segment header if MySQL binlogging is on - or the database server is a MySQL replication save. */ - trx_rseg_update_binlog_offset( - rseg_header, trx->mysql_log_file_name, - trx->mysql_log_offset, mtr); - } + if (trx->mysql_log_file_name && *trx->mysql_log_file_name) + /* Update the latest binlog name and offset if log_bin=ON or this + is a replica. */ + trx_rseg_update_binlog_offset(rseg_header, trx->mysql_log_file_name, + trx->mysql_log_offset, mtr); - /* Add the log as the first in the history list */ + /* Add the log as the first in the history list */ - /* We are in transaction commit; we cannot return an error - when detecting corruption. It is better to crash the server - than to intentionally violate ACID by committing something - that is known to be corrupted. */ - ut_a(flst_add_first(rseg_header, TRX_RSEG + TRX_RSEG_HISTORY, undo_page, - static_cast(undo->hdr_offset - + TRX_UNDO_HISTORY_NODE), - mtr) == DB_SUCCESS); + /* We are in transaction commit; we cannot return an error + when detecting corruption. It is better to crash the server + than to intentionally violate ACID by committing something + that is known to be corrupted. */ + ut_a(flst_add_first(rseg_header, TRX_RSEG + TRX_RSEG_HISTORY, undo_page, + uint16_t(page_offset(undo_header) + + TRX_UNDO_HISTORY_NODE), mtr) == DB_SUCCESS); - mtr->write<8,mtr_t::MAYBE_NOP>(*undo_page, - undo_header + TRX_UNDO_TRX_NO, - trx->rw_trx_hash_element->no); - - if (rseg->last_page_no == FIL_NULL) { - rseg->last_page_no = undo->hdr_page_no; - rseg->set_last_commit(undo->hdr_offset, - trx->rw_trx_hash_element->no); - } - - rseg->history_size++; - - if (undo->state == TRX_UNDO_CACHED) { - UT_LIST_ADD_FIRST(rseg->undo_cached, undo); - } else { - ut_ad(undo->state == TRX_UNDO_TO_PURGE); - ut_free(undo); - } - - undo = NULL; + mtr->write<2>(*undo_page, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + + undo_page->page.frame, undo_state); + mtr->write<8,mtr_t::MAYBE_NOP>(*undo_page, undo_header + TRX_UNDO_TRX_NO, + trx->rw_trx_hash_element->no); } /** Free an undo log segment. @@ -565,10 +558,11 @@ __attribute__((optimize(0))) # endif #endif /** -Removes unnecessary history data from rollback segments. NOTE that when this -function is called, the caller must not have any latches on undo log pages! +Remove unnecessary history data from rollback segments. NOTE that when this +function is called, the caller (purge_coordinator_callback) +must not have any latches on undo log pages! */ -TRANSACTIONAL_TARGET static void trx_purge_truncate_history() +TRANSACTIONAL_TARGET void trx_purge_truncate_history() { ut_ad(purge_sys.head <= purge_sys.tail); purge_sys_t::iterator &head= purge_sys.head.trx_no @@ -590,7 +584,7 @@ TRANSACTIONAL_TARGET static void trx_purge_truncate_history() if (dberr_t e= trx_purge_truncate_rseg_history(rseg, head, !rseg.is_referenced() && - rseg.needs_purge <= head.trx_no)) + purge_sys.sees(rseg.needs_purge))) err= e; rseg.latch.wr_unlock(); } @@ -648,7 +642,7 @@ TRANSACTIONAL_TARGET static void trx_purge_truncate_history() rseg.latch.rd_lock(SRW_LOCK_CALL); ut_ad(rseg.skip_allocation()); - if (rseg.is_referenced() || rseg.needs_purge > head.trx_no) + if (rseg.is_referenced() || !purge_sys.sees(rseg.needs_purge)) { not_free: rseg.latch.rd_unlock(); @@ -662,7 +656,7 @@ not_free: for (const trx_undo_t *undo= UT_LIST_GET_FIRST(rseg.undo_cached); undo; undo= UT_LIST_GET_NEXT(undo_list, undo)) { - if (head.trx_no < undo->trx_id) + if (head.trx_no && head.trx_no < undo->trx_id) goto not_free; else cached+= undo->size; @@ -795,7 +789,7 @@ not_free: continue; ut_ad(!rseg.is_referenced()); - ut_ad(rseg.needs_purge <= head.trx_no); + ut_ad(!head.trx_no || rseg.needs_purge <= head.trx_no); buf_block_t *rblock= trx_rseg_header_create(&space, &rseg - trx_sys.rseg_array, @@ -1264,10 +1258,8 @@ TRANSACTIONAL_INLINE void purge_sys_t::clone_end_view() Run a purge batch. @param n_tasks number of purge tasks to submit to the queue @param history_size trx_sys.history_size() -@param truncate whether to truncate the history at the end of the batch @return number of undo log pages handled in the batch */ -TRANSACTIONAL_TARGET -ulint trx_purge(ulint n_tasks, ulint history_size, bool truncate) +TRANSACTIONAL_TARGET ulint trx_purge(ulint n_tasks, ulint history_size) { que_thr_t* thr = NULL; ulint n_pages_handled; @@ -1319,10 +1311,6 @@ ulint trx_purge(ulint n_tasks, ulint history_size, bool truncate) purge_sys.clone_end_view(); - if (truncate) { - trx_purge_truncate_history(); - } - MONITOR_INC_VALUE(MONITOR_PURGE_INVOKED, 1); MONITOR_INC_VALUE(MONITOR_PURGE_N_PAGE_HANDLED, n_pages_handled); diff --git a/storage/innobase/trx/trx0undo.cc b/storage/innobase/trx/trx0undo.cc index cbbf316fe69..6404a8d80a1 100644 --- a/storage/innobase/trx/trx0undo.cc +++ b/storage/innobase/trx/trx0undo.cc @@ -1458,37 +1458,6 @@ template buf_block_t* trx_undo_assign_low(trx_t *trx, trx_rseg_t *rseg, trx_undo_t **undo, mtr_t *mtr, dberr_t *err); -/******************************************************************//** -Sets the state of the undo log segment at a transaction finish. -@return undo log segment header page, x-latched */ -buf_block_t* -trx_undo_set_state_at_finish( -/*=========================*/ - trx_undo_t* undo, /*!< in: undo log memory copy */ - mtr_t* mtr) /*!< in: mtr */ -{ - ut_ad(undo->id < TRX_RSEG_N_SLOTS); - ut_ad(undo->rseg->is_persistent()); - - buf_block_t *block= - buf_page_get(page_id_t(undo->rseg->space->id, undo->hdr_page_no), 0, - RW_X_LATCH, mtr); - /* This function is invoked during transaction commit, which is not - allowed to fail. If we get a corrupted undo header, we will crash here. */ - ut_a(block); - const uint16_t state = undo->size == 1 && - TRX_UNDO_PAGE_REUSE_LIMIT > - mach_read_from_2(TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_FREE + - block->page.frame) - ? TRX_UNDO_CACHED - : TRX_UNDO_TO_PURGE; - - undo->state= state; - mtr->write<2>(*block, TRX_UNDO_SEG_HDR + TRX_UNDO_STATE + block->page.frame, - state); - return block; -} - /** Set the state of the undo log segment at a XA PREPARE or XA ROLLBACK. @param[in,out] trx transaction @param[in,out] undo undo log diff --git a/storage/myisam/mi_open.c b/storage/myisam/mi_open.c index b80c2b69f16..8b82a71ff7c 100644 --- a/storage/myisam/mi_open.c +++ b/storage/myisam/mi_open.c @@ -518,6 +518,7 @@ MI_INFO *mi_open(const char *name, int mode, uint open_flags) share->kfile=kfile; share->this_process=(ulong) getpid(); share->last_process= share->state.process; + share->base.base_key_parts= base_key_parts; share->base.key_parts=key_parts; share->base.all_key_parts=key_parts+unique_key_parts; if (!(share->last_version=share->state.version)) diff --git a/storage/myisam/myisamdef.h b/storage/myisam/myisamdef.h index 5ede6a6159c..852df82c7e8 100644 --- a/storage/myisam/myisamdef.h +++ b/storage/myisam/myisamdef.h @@ -132,7 +132,7 @@ typedef struct st_mi_base_info uint extra_alloc_bytes; uint extra_alloc_procent; /* The following are from the header */ - uint key_parts, all_key_parts; + uint key_parts, all_key_parts, base_key_parts; } MI_BASE_INFO; diff --git a/storage/myisammrg/myrg_open.c b/storage/myisammrg/myrg_open.c index d9ea4b754f2..4a983684394 100644 --- a/storage/myisammrg/myrg_open.c +++ b/storage/myisammrg/myrg_open.c @@ -432,17 +432,20 @@ int myrg_attach_children(MYRG_INFO *m_info, int handle_locking, first_child= FALSE; m_info->reclength= myisam->s->base.reclength; min_keys= myisam->s->base.keys; - key_parts= myisam->s->base.key_parts; + key_parts= myisam->s->base.base_key_parts; if (*need_compat_check && m_info->rec_per_key_part) { my_free(m_info->rec_per_key_part); m_info->rec_per_key_part= NULL; } - if (!m_info->rec_per_key_part) + if (!m_info->rec_per_key_part || m_info->key_parts != key_parts) { - if(!(m_info->rec_per_key_part= (ulong*) - my_malloc(rg_key_memory_MYRG_INFO, - key_parts * sizeof(long), MYF(MY_WME)))) + m_info->key_parts= key_parts; + /* The +1 is because by my_realloc() don't allow zero length */ + if (!(m_info->rec_per_key_part= (ulong*) + my_realloc(rg_key_memory_MYRG_INFO, m_info->rec_per_key_part, + key_parts * sizeof(long) +1, + MYF(MY_WME | MY_ALLOW_ZERO_PTR | MY_FREE_ON_ERROR)))) goto err; /* purecov: inspected */ errpos= 1; } @@ -457,7 +460,8 @@ int myrg_attach_children(MYRG_INFO *m_info, int handle_locking, myisam->open_flag|= HA_OPEN_MERGE_TABLE; /* Check table definition match. */ - if (m_info->reclength != myisam->s->base.reclength) + if (m_info->reclength != myisam->s->base.reclength || + key_parts != myisam->s->base.base_key_parts) { DBUG_PRINT("error", ("definition mismatch table: '%s' repair: %d", myisam->filename, diff --git a/storage/spider/mysql-test/spider/bugfix/t/mdev_30370.test b/storage/spider/mysql-test/spider/bugfix/t/mdev_30370.test index 788ea2323f7..99e56ab062a 100644 --- a/storage/spider/mysql-test/spider/bugfix/t/mdev_30370.test +++ b/storage/spider/mysql-test/spider/bugfix/t/mdev_30370.test @@ -2,4 +2,7 @@ --echo # MDEV-30370 mariadbd hangs when running with --wsrep-recover and --plugin-load-add=ha_spider.so --echo # ---exec $MYSQLD_BOOTSTRAP_CMD --wsrep-recover --plugin-load-add=ha_spider.so +let $MYSQLD_DATADIR=$MYSQLTEST_VARDIR/mdev_30370; +--mkdir $MYSQLD_DATADIR +--exec $MYSQLD_BOOTSTRAP_CMD --wsrep-recover --plugin-load-add=ha_spider.so --datadir=$MYSQLD_DATADIR +--rmdir $MYSQLD_DATADIR diff --git a/strings/json_lib.c b/strings/json_lib.c index 903dec978b4..920fb1d4a89 100644 --- a/strings/json_lib.c +++ b/strings/json_lib.c @@ -1324,7 +1324,7 @@ int json_skip_key(json_engine_t *j) } -#define SKIPPED_STEP_MARK ((int) ~0) +#define SKIPPED_STEP_MARK INT_MAX32 /* Current step of the patch matches the JSON construction. diff --git a/support-files/rpm/server.cnf b/support-files/rpm/server.cnf index bf9ed9c4f46..1db6cd186e8 100644 --- a/support-files/rpm/server.cnf +++ b/support-files/rpm/server.cnf @@ -39,8 +39,8 @@ # you can put MariaDB-only options here [mariadb] -# This group is only read by MariaDB-10.8 servers. +# This group is only read by MariaDB-10.11 servers. # If you use the same .cnf file for MariaDB of different versions, # use this group for options that older servers don't understand -[mariadb-10.8] +[mariadb-10.11] diff --git a/wsrep-lib b/wsrep-lib index 4951c383577..e238c0d240c 160000 --- a/wsrep-lib +++ b/wsrep-lib @@ -1 +1 @@ -Subproject commit 4951c38357737d568b554402bc5b6abe88a38fe1 +Subproject commit e238c0d240c2557229b0523a4a032f3cf8b41639