mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
Merge 10.11 into 11.0
This commit is contained in:
@ -1266,8 +1266,9 @@ static int get_options(int *argc, char ***argv)
|
||||
if (opt_slave_data)
|
||||
{
|
||||
opt_lock_all_tables= !opt_single_transaction;
|
||||
opt_master_data= 0;
|
||||
opt_delete_master_logs= 0;
|
||||
if (opt_slave_data != MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL)
|
||||
opt_master_data= 0;
|
||||
}
|
||||
|
||||
/* Ensure consistency of the set of binlog & locking options */
|
||||
@ -1280,10 +1281,7 @@ static int get_options(int *argc, char ***argv)
|
||||
return(EX_USAGE);
|
||||
}
|
||||
if (opt_master_data)
|
||||
{
|
||||
opt_lock_all_tables= !opt_single_transaction;
|
||||
opt_slave_data= 0;
|
||||
}
|
||||
if (opt_single_transaction || opt_lock_all_tables)
|
||||
lock_tables= 0;
|
||||
if (enclosed && opt_enclosed)
|
||||
@ -6220,17 +6218,12 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos,
|
||||
|
||||
}
|
||||
|
||||
/* SHOW MASTER STATUS reports file and position */
|
||||
print_comment(md_result_file, 0,
|
||||
"\n--\n-- Position to start replication or point-in-time "
|
||||
"recovery from\n--\n\n");
|
||||
fprintf(md_result_file,
|
||||
"%sCHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n",
|
||||
(use_gtid ? "-- " : comment_prefix), file, offset);
|
||||
/* gtid */
|
||||
if (have_mariadb_gtid)
|
||||
{
|
||||
print_comment(md_result_file, 0,
|
||||
"\n--\n-- GTID to start replication from\n--\n\n");
|
||||
"\n-- Preferably use GTID to start replication from GTID "
|
||||
"position:\n\n");
|
||||
if (use_gtid)
|
||||
fprintf(md_result_file,
|
||||
"%sCHANGE MASTER TO MASTER_USE_GTID=slave_pos;\n",
|
||||
@ -6239,6 +6232,19 @@ static int do_show_master_status(MYSQL *mysql_con, int consistent_binlog_pos,
|
||||
"%sSET GLOBAL gtid_slave_pos='%s';\n",
|
||||
(!use_gtid ? "-- " : comment_prefix), gtid_pos);
|
||||
}
|
||||
|
||||
/* SHOW MASTER STATUS reports file and position */
|
||||
print_comment(md_result_file, 0,
|
||||
"\n--\n-- Alternately, following is the position of the binary "
|
||||
"logging from SHOW MASTER STATUS at point of backup."
|
||||
"\n-- Use this when creating a replica of the primary server "
|
||||
"where the backup was made."
|
||||
"\n-- The new server will be connecting to the primary server "
|
||||
"where the backup was taken."
|
||||
"\n--\n\n");
|
||||
fprintf(md_result_file,
|
||||
"%sCHANGE MASTER TO MASTER_LOG_FILE='%s', MASTER_LOG_POS=%s;\n",
|
||||
(use_gtid ? "-- " : comment_prefix), file, offset);
|
||||
check_io(md_result_file);
|
||||
|
||||
if (!consistent_binlog_pos)
|
||||
@ -6317,7 +6323,6 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid,
|
||||
(opt_slave_data == MYSQL_OPT_SLAVE_DATA_COMMENTED_SQL) ? "-- " : "";
|
||||
const char *gtid_comment_prefix= (use_gtid ? comment_prefix : "-- ");
|
||||
const char *nogtid_comment_prefix= (!use_gtid ? comment_prefix : "-- ");
|
||||
int set_gtid_done= 0;
|
||||
|
||||
if (mysql_query_with_error_report(mysql_con, &slave,
|
||||
multi_source ?
|
||||
@ -6333,9 +6338,16 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid,
|
||||
return 1;
|
||||
}
|
||||
|
||||
while ((row= mysql_fetch_row(slave)))
|
||||
{
|
||||
if (multi_source && !set_gtid_done)
|
||||
print_comment(md_result_file, 0,
|
||||
"\n--\n-- The following is the SQL position of the replication "
|
||||
"taken from SHOW SLAVE STATUS at the time of backup.\n"
|
||||
"-- Use this position when creating a clone of, or replacement "
|
||||
"server, from where the backup was taken."
|
||||
"\n-- This new server will connects to the same primary "
|
||||
"server%s.\n--\n",
|
||||
multi_source ? "(s)" : "");
|
||||
|
||||
if (multi_source)
|
||||
{
|
||||
char gtid_pos[MAX_GTID_LENGTH];
|
||||
if (have_mariadb_gtid && get_gtid_pos(gtid_pos, 0))
|
||||
@ -6343,13 +6355,19 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid,
|
||||
mysql_free_result(slave);
|
||||
return 1;
|
||||
}
|
||||
if (opt_comments)
|
||||
fprintf(md_result_file, "\n--\n-- Gtid position to start replication "
|
||||
"from\n--\n\n");
|
||||
print_comment(md_result_file, 0,
|
||||
"-- GTID position to start replication:\n");
|
||||
fprintf(md_result_file, "%sSET GLOBAL gtid_slave_pos='%s';\n",
|
||||
gtid_comment_prefix, gtid_pos);
|
||||
set_gtid_done= 1;
|
||||
}
|
||||
if (use_gtid)
|
||||
print_comment(md_result_file, 0,
|
||||
"\n-- Use only the MASTER_USE_GTID=slave_pos or "
|
||||
"MASTER_LOG_FILE/MASTER_LOG_POS in the statements below."
|
||||
"\n\n");
|
||||
|
||||
while ((row= mysql_fetch_row(slave)))
|
||||
{
|
||||
if (row[9 + multi_source] && row[21 + multi_source])
|
||||
{
|
||||
if (use_gtid)
|
||||
@ -6363,11 +6381,6 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid,
|
||||
}
|
||||
|
||||
/* SHOW MASTER STATUS reports file and position */
|
||||
if (opt_comments)
|
||||
fprintf(md_result_file,
|
||||
"\n--\n-- Position to start replication or point-in-time "
|
||||
"recovery from (the master of this slave)\n--\n\n");
|
||||
|
||||
if (multi_source)
|
||||
fprintf(md_result_file, "%sCHANGE MASTER '%.80s' TO ",
|
||||
nogtid_comment_prefix, row[0]);
|
||||
@ -6388,6 +6401,7 @@ static int do_show_slave_status(MYSQL *mysql_con, int use_gtid,
|
||||
check_io(md_result_file);
|
||||
}
|
||||
}
|
||||
fprintf(md_result_file, "\n");
|
||||
mysql_free_result(slave);
|
||||
return 0;
|
||||
}
|
||||
|
@ -4,6 +4,9 @@ SET(WITH_PCRE "auto" CACHE STRING
|
||||
"Which pcre to use (possible values are 'bundled', 'system', or 'auto')")
|
||||
|
||||
MACRO(BUNDLE_PCRE2)
|
||||
SET(WITH_PCRE "bundled" CACHE STRING
|
||||
"Which pcre to use (possible values are 'bundled', 'system', or 'auto')")
|
||||
|
||||
SET(dir "${CMAKE_BINARY_DIR}/extra/pcre2")
|
||||
SET(PCRE_INCLUDE_DIRS ${dir}/src/pcre2-build ${dir}/src/pcre2/src)
|
||||
MESSAGE(STATUS "Will download and bundle pcre2")
|
||||
|
5
debian/autobake-deb.sh
vendored
5
debian/autobake-deb.sh
vendored
@ -32,12 +32,11 @@ then
|
||||
# build is not running on Gitlab-CI.
|
||||
sed '/-DPLUGIN_COLUMNSTORE=NO/d' -i debian/rules
|
||||
# Take the files and part of control from MCS directory
|
||||
if [ ! -f debian/mariadb-plugin-columnstore.install ]
|
||||
then
|
||||
cp -v storage/columnstore/columnstore/debian/mariadb-plugin-columnstore.* debian/
|
||||
# idempotent, except for the blank line, but that can be tolerated.
|
||||
sed -e '/Package: mariadb-plugin-columnstore/,/^$/d' -i debian/control
|
||||
echo >> debian/control
|
||||
sed "s/-10.6//" <storage/columnstore/columnstore/debian/control >> debian/control
|
||||
fi
|
||||
fi
|
||||
|
||||
# Look up distro-version specific stuff
|
||||
|
1
debian/mariadb-server.install
vendored
1
debian/mariadb-server.install
vendored
@ -70,6 +70,7 @@ usr/share/man/man1/myisam_ftdump.1
|
||||
usr/share/man/man1/myisamchk.1
|
||||
usr/share/man/man1/myisamlog.1
|
||||
usr/share/man/man1/myisampack.1
|
||||
usr/share/man/man1/wsrep_sst_backup.1
|
||||
usr/share/man/man1/wsrep_sst_common.1
|
||||
usr/share/man/man1/wsrep_sst_mariabackup.1
|
||||
usr/share/man/man1/wsrep_sst_mysqldump.1
|
||||
|
2
debian/mariadb-server.mariadb.init
vendored
2
debian/mariadb-server.mariadb.init
vendored
@ -88,7 +88,7 @@ sanity_checks() {
|
||||
# If datadir location is not changed int configuration
|
||||
# then it's not printed with /usr/sbin/mariadbd --print-defaults
|
||||
# then we use 'sane' default.
|
||||
if [ -z "$datadir"]
|
||||
if [ -z "$datadir" ]
|
||||
then
|
||||
datadir="/var/lib/mysql"
|
||||
fi
|
||||
|
1
debian/rules
vendored
1
debian/rules
vendored
@ -98,7 +98,6 @@ endif
|
||||
-DCOMPILATION_COMMENT="mariadb.org binary distribution" \
|
||||
-DMYSQL_SERVER_SUFFIX="-$(DEB_VERSION_REVISION)" \
|
||||
-DSYSTEM_TYPE="debian-$(DEB_HOST_GNU_SYSTEM)" \
|
||||
-DCMAKE_SYSTEM_PROCESSOR=$(DEB_HOST_ARCH) \
|
||||
-DBUILD_CONFIG=mysql_release \
|
||||
-DCONC_DEFAULT_CHARSET=utf8mb4 \
|
||||
-DPLUGIN_AWS_KEY_MANAGEMENT=NO \
|
||||
|
@ -56,7 +56,6 @@ ENDIF()
|
||||
MYSQL_ADD_EXECUTABLE(mariadb-backup
|
||||
xtrabackup.cc
|
||||
innobackupex.cc
|
||||
changed_page_bitmap.cc
|
||||
datasink.cc
|
||||
ds_buffer.cc
|
||||
ds_compress.cc
|
||||
|
@ -47,6 +47,12 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <limits>
|
||||
#ifdef HAVE_PWD_H
|
||||
#ifdef HAVE_SYS_TYPES_H
|
||||
#include <sys/types.h>
|
||||
#endif
|
||||
#include <pwd.h>
|
||||
#endif
|
||||
#include "common.h"
|
||||
#include "xtrabackup.h"
|
||||
#include "srv0srv.h"
|
||||
@ -65,7 +71,6 @@ char tool_args[2048];
|
||||
ulong mysql_server_version;
|
||||
|
||||
/* server capabilities */
|
||||
bool have_changed_page_bitmaps = false;
|
||||
bool have_backup_locks = false;
|
||||
bool have_lock_wait_timeout = false;
|
||||
bool have_galera_enabled = false;
|
||||
@ -92,11 +97,54 @@ MYSQL *mysql_connection;
|
||||
|
||||
extern my_bool opt_ssl_verify_server_cert, opt_use_ssl;
|
||||
|
||||
|
||||
/*
|
||||
get_os_user()
|
||||
Ressemles read_user_name() from libmariadb/libmariadb/mariadb_lib.c.
|
||||
*/
|
||||
|
||||
#if !defined(_WIN32)
|
||||
|
||||
#if defined(HAVE_GETPWUID) && defined(NO_GETPWUID_DECL)
|
||||
struct passwd *getpwuid(uid_t);
|
||||
char* getlogin(void);
|
||||
#endif
|
||||
|
||||
static const char *get_os_user() // Posix
|
||||
{
|
||||
if (!geteuid())
|
||||
return "root";
|
||||
#ifdef HAVE_GETPWUID
|
||||
struct passwd *pw;
|
||||
const char *str;
|
||||
if ((pw= getpwuid(geteuid())) != NULL)
|
||||
return pw->pw_name;
|
||||
if ((str= getlogin()) != NULL)
|
||||
return str;
|
||||
#endif
|
||||
if ((str= getenv("USER")) ||
|
||||
(str= getenv("LOGNAME")) ||
|
||||
(str= getenv("LOGIN")))
|
||||
return str;
|
||||
return NULL;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static const char *get_os_user() // Windows
|
||||
{
|
||||
return getenv("USERNAME");
|
||||
}
|
||||
|
||||
#endif // _WIN32
|
||||
|
||||
|
||||
MYSQL *
|
||||
xb_mysql_connect()
|
||||
{
|
||||
MYSQL *connection = mysql_init(NULL);
|
||||
char mysql_port_str[std::numeric_limits<int>::digits10 + 3];
|
||||
const char *user= opt_user ? opt_user : get_os_user();
|
||||
|
||||
sprintf(mysql_port_str, "%d", opt_port);
|
||||
|
||||
@ -126,7 +174,7 @@ xb_mysql_connect()
|
||||
|
||||
msg("Connecting to MariaDB server host: %s, user: %s, password: %s, "
|
||||
"port: %s, socket: %s", opt_host ? opt_host : "localhost",
|
||||
opt_user ? opt_user : "not set",
|
||||
user ? user : "not set",
|
||||
opt_password ? "set" : "not set",
|
||||
opt_port != 0 ? mysql_port_str : "not set",
|
||||
opt_socket ? opt_socket : "not set");
|
||||
@ -147,7 +195,7 @@ xb_mysql_connect()
|
||||
|
||||
if (!mysql_real_connect(connection,
|
||||
opt_host ? opt_host : "localhost",
|
||||
opt_user,
|
||||
user,
|
||||
opt_password,
|
||||
"" /*database*/, opt_port,
|
||||
opt_socket, 0)) {
|
||||
@ -512,24 +560,6 @@ Query the server to find out what backup capabilities it supports.
|
||||
bool
|
||||
detect_mysql_capabilities_for_backup()
|
||||
{
|
||||
const char *query = "SELECT 'INNODB_CHANGED_PAGES', COUNT(*) FROM "
|
||||
"INFORMATION_SCHEMA.PLUGINS "
|
||||
"WHERE PLUGIN_NAME LIKE 'INNODB_CHANGED_PAGES'";
|
||||
char *innodb_changed_pages = NULL;
|
||||
mysql_variable vars[] = {
|
||||
{"INNODB_CHANGED_PAGES", &innodb_changed_pages}, {NULL, NULL}};
|
||||
|
||||
if (xtrabackup_incremental) {
|
||||
|
||||
read_mysql_variables(mysql_connection, query, vars, true);
|
||||
|
||||
ut_ad(innodb_changed_pages != NULL);
|
||||
|
||||
have_changed_page_bitmaps = (atoi(innodb_changed_pages) == 1);
|
||||
|
||||
free_mysql_variables(vars);
|
||||
}
|
||||
|
||||
/* do some sanity checks */
|
||||
if (opt_galera_info && !have_galera_enabled) {
|
||||
msg("--galera-info is specified on the command "
|
||||
@ -1882,18 +1912,6 @@ select_history()
|
||||
return(true);
|
||||
}
|
||||
|
||||
bool
|
||||
flush_changed_page_bitmaps()
|
||||
{
|
||||
if (xtrabackup_incremental && have_changed_page_bitmaps &&
|
||||
!xtrabackup_incremental_force_scan) {
|
||||
xb_mysql_query(mysql_connection,
|
||||
"FLUSH NO_WRITE_TO_BINLOG CHANGED_PAGE_BITMAPS", false);
|
||||
}
|
||||
return(true);
|
||||
}
|
||||
|
||||
|
||||
/*********************************************************************//**
|
||||
Deallocate memory, disconnect from server, etc.
|
||||
@return true on success. */
|
||||
|
@ -7,7 +7,6 @@
|
||||
extern ulong mysql_server_version;
|
||||
|
||||
/* server capabilities */
|
||||
extern bool have_changed_page_bitmaps;
|
||||
extern bool have_backup_locks;
|
||||
extern bool have_lock_wait_timeout;
|
||||
extern bool have_galera_enabled;
|
||||
@ -35,9 +34,6 @@ capture_tool_command(int argc, char **argv);
|
||||
bool
|
||||
select_history();
|
||||
|
||||
bool
|
||||
flush_changed_page_bitmaps();
|
||||
|
||||
void
|
||||
backup_cleanup();
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,85 +0,0 @@
|
||||
/******************************************************
|
||||
XtraBackup: hot backup tool for InnoDB
|
||||
(c) 2009-2012 Percona Inc.
|
||||
Originally Created 3/3/2009 Yasufumi Kinoshita
|
||||
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
|
||||
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
|
||||
*******************************************************/
|
||||
|
||||
/* Changed page bitmap interface */
|
||||
|
||||
#ifndef XB_CHANGED_PAGE_BITMAP_H
|
||||
#define XB_CHANGED_PAGE_BITMAP_H
|
||||
|
||||
#include <ut0rbt.h>
|
||||
#include <fil0fil.h>
|
||||
|
||||
/* The changed page bitmap structure */
|
||||
typedef ib_rbt_t xb_page_bitmap;
|
||||
|
||||
struct xb_page_bitmap_range_struct;
|
||||
|
||||
/* The bitmap range iterator over one space id */
|
||||
typedef struct xb_page_bitmap_range_struct xb_page_bitmap_range;
|
||||
|
||||
/****************************************************************//**
|
||||
Read the disk bitmap and build the changed page bitmap tree for the
|
||||
LSN interval incremental_lsn to log_sys.next_checkpoint_lsn.
|
||||
|
||||
@return the built bitmap tree */
|
||||
xb_page_bitmap*
|
||||
xb_page_bitmap_init(void);
|
||||
/*=====================*/
|
||||
|
||||
/****************************************************************//**
|
||||
Free the bitmap tree. */
|
||||
void
|
||||
xb_page_bitmap_deinit(
|
||||
/*==================*/
|
||||
xb_page_bitmap* bitmap); /*!<in/out: bitmap tree */
|
||||
|
||||
|
||||
/****************************************************************//**
|
||||
Set up a new bitmap range iterator over a given space id changed
|
||||
pages in a given bitmap.
|
||||
|
||||
@return bitmap range iterator */
|
||||
xb_page_bitmap_range*
|
||||
xb_page_bitmap_range_init(
|
||||
/*======================*/
|
||||
xb_page_bitmap* bitmap, /*!< in: bitmap to iterate over */
|
||||
ulint space_id); /*!< in: space id */
|
||||
|
||||
/****************************************************************//**
|
||||
Get the next page id that has its bit set or cleared, i.e. equal to
|
||||
bit_value.
|
||||
|
||||
@return page id */
|
||||
ulint
|
||||
xb_page_bitmap_range_get_next_bit(
|
||||
/*==============================*/
|
||||
xb_page_bitmap_range* bitmap_range, /*!< in/out: bitmap range */
|
||||
ibool bit_value); /*!< in: bit value */
|
||||
|
||||
/****************************************************************//**
|
||||
Free the bitmap range iterator. */
|
||||
void
|
||||
xb_page_bitmap_range_deinit(
|
||||
/*========================*/
|
||||
xb_page_bitmap_range* bitmap_range); /*! in/out: bitmap range */
|
||||
|
||||
#endif
|
@ -143,7 +143,7 @@ static inline ATTRIBUTE_FORMAT(printf, 1,2) ATTRIBUTE_NORETURN void die(const ch
|
||||
# define POSIX_FADV_NORMAL
|
||||
# define POSIX_FADV_SEQUENTIAL
|
||||
# define POSIX_FADV_DONTNEED
|
||||
# define posix_fadvise(a,b,c,d) do {} while(0)
|
||||
# define posix_fadvise(fd, offset, len, advice) do { (void)offset; } while(0)
|
||||
#endif
|
||||
|
||||
/***********************************************************************
|
||||
|
@ -231,8 +231,7 @@ xb_fil_cur_open(
|
||||
/ cursor->page_size);
|
||||
|
||||
cursor->read_filter = read_filter;
|
||||
cursor->read_filter->init(&cursor->read_filter_ctxt, cursor,
|
||||
node->space->id);
|
||||
cursor->read_filter->init(&cursor->read_filter_ctxt, cursor);
|
||||
|
||||
return(XB_FIL_CUR_SUCCESS);
|
||||
}
|
||||
@ -502,10 +501,6 @@ xb_fil_cur_close(
|
||||
/*=============*/
|
||||
xb_fil_cur_t *cursor) /*!< in/out: source file cursor */
|
||||
{
|
||||
if (cursor->read_filter) {
|
||||
cursor->read_filter->deinit(&cursor->read_filter_ctxt);
|
||||
}
|
||||
|
||||
aligned_free(cursor->buf);
|
||||
cursor->buf = NULL;
|
||||
|
||||
|
@ -27,6 +27,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
|
||||
#include <my_dir.h>
|
||||
#include "read_filt.h"
|
||||
#include "mtr0types.h"
|
||||
#include "srv0start.h"
|
||||
#include "srv0srv.h"
|
||||
#include "xtrabackup.h"
|
||||
|
@ -32,29 +32,13 @@ Perform read filter context initialization that is common to all read
|
||||
filters. */
|
||||
static
|
||||
void
|
||||
common_init(
|
||||
/*========*/
|
||||
rf_pass_through_init(
|
||||
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
|
||||
const xb_fil_cur_t* cursor) /*!<in: file cursor */
|
||||
{
|
||||
ctxt->offset = 0;
|
||||
ctxt->data_file_size = cursor->statinfo.st_size;
|
||||
ctxt->buffer_capacity = cursor->buf_size;
|
||||
ctxt->page_size = cursor->page_size;
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Initialize the pass-through read filter. */
|
||||
static
|
||||
void
|
||||
rf_pass_through_init(
|
||||
/*=================*/
|
||||
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
|
||||
const xb_fil_cur_t* cursor, /*!<in: file cursor */
|
||||
ulint space_id __attribute__((unused)))
|
||||
/*!<in: space id we are reading */
|
||||
{
|
||||
common_init(ctxt, cursor);
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
@ -65,143 +49,25 @@ rf_pass_through_get_next_batch(
|
||||
/*===========================*/
|
||||
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
|
||||
context */
|
||||
ib_int64_t* read_batch_start, /*!<out: starting read
|
||||
int64_t* read_batch_start, /*!<out: starting read
|
||||
offset in bytes for the
|
||||
next batch of pages */
|
||||
ib_int64_t* read_batch_len) /*!<out: length in
|
||||
int64_t* read_batch_len) /*!<out: length in
|
||||
bytes of the next batch
|
||||
of pages */
|
||||
{
|
||||
*read_batch_start = ctxt->offset;
|
||||
*read_batch_len = ctxt->data_file_size - ctxt->offset;
|
||||
|
||||
if (*read_batch_len > (ib_int64_t)ctxt->buffer_capacity) {
|
||||
if (*read_batch_len > (int64_t)ctxt->buffer_capacity) {
|
||||
*read_batch_len = ctxt->buffer_capacity;
|
||||
}
|
||||
|
||||
ctxt->offset += *read_batch_len;
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Deinitialize the pass-through read filter. */
|
||||
static
|
||||
void
|
||||
rf_pass_through_deinit(
|
||||
/*===================*/
|
||||
xb_read_filt_ctxt_t* ctxt __attribute__((unused)))
|
||||
/*!<in: read filter context */
|
||||
{
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Initialize the changed page bitmap-based read filter. Assumes that
|
||||
the bitmap is already set up in changed_page_bitmap. */
|
||||
static
|
||||
void
|
||||
rf_bitmap_init(
|
||||
/*===========*/
|
||||
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
|
||||
context */
|
||||
const xb_fil_cur_t* cursor, /*!<in: read cursor */
|
||||
ulint space_id) /*!<in: space id */
|
||||
{
|
||||
common_init(ctxt, cursor);
|
||||
ctxt->bitmap_range = xb_page_bitmap_range_init(changed_page_bitmap,
|
||||
space_id);
|
||||
ctxt->filter_batch_end = 0;
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Get the next batch of pages for the bitmap read filter. */
|
||||
static
|
||||
void
|
||||
rf_bitmap_get_next_batch(
|
||||
/*=====================*/
|
||||
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
|
||||
context */
|
||||
ib_int64_t* read_batch_start, /*!<out: starting read
|
||||
offset in bytes for the
|
||||
next batch of pages */
|
||||
ib_int64_t* read_batch_len) /*!<out: length in
|
||||
bytes of the next batch
|
||||
of pages */
|
||||
{
|
||||
ulint start_page_id;
|
||||
const ulint page_size = ctxt->page_size;
|
||||
|
||||
start_page_id = (ulint)(ctxt->offset / page_size);
|
||||
|
||||
xb_a (ctxt->offset % page_size == 0);
|
||||
|
||||
if (start_page_id == ctxt->filter_batch_end) {
|
||||
|
||||
/* Used up all the previous bitmap range, get some more */
|
||||
ulint next_page_id;
|
||||
|
||||
/* Find the next changed page using the bitmap */
|
||||
next_page_id = xb_page_bitmap_range_get_next_bit
|
||||
(ctxt->bitmap_range, TRUE);
|
||||
|
||||
if (next_page_id == ULINT_UNDEFINED) {
|
||||
*read_batch_len = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
ctxt->offset = next_page_id * page_size;
|
||||
|
||||
/* Find the end of the current changed page block by searching
|
||||
for the next cleared bitmap bit */
|
||||
ctxt->filter_batch_end
|
||||
= xb_page_bitmap_range_get_next_bit(ctxt->bitmap_range,
|
||||
FALSE);
|
||||
xb_a(next_page_id < ctxt->filter_batch_end);
|
||||
}
|
||||
|
||||
*read_batch_start = ctxt->offset;
|
||||
if (ctxt->filter_batch_end == ULINT_UNDEFINED) {
|
||||
/* No more cleared bits in the bitmap, need to copy all the
|
||||
remaining pages. */
|
||||
*read_batch_len = ctxt->data_file_size - ctxt->offset;
|
||||
} else {
|
||||
*read_batch_len = ctxt->filter_batch_end * page_size
|
||||
- ctxt->offset;
|
||||
}
|
||||
|
||||
/* If the page block is larger than the buffer capacity, limit it to
|
||||
buffer capacity. The subsequent invocations will continue returning
|
||||
the current block in buffer-sized pieces until ctxt->filter_batch_end
|
||||
is reached, trigerring the next bitmap query. */
|
||||
if (*read_batch_len > (ib_int64_t)ctxt->buffer_capacity) {
|
||||
*read_batch_len = ctxt->buffer_capacity;
|
||||
}
|
||||
|
||||
ctxt->offset += *read_batch_len;
|
||||
xb_a (ctxt->offset % page_size == 0);
|
||||
xb_a (*read_batch_start % page_size == 0);
|
||||
xb_a (*read_batch_len % page_size == 0);
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Deinitialize the changed page bitmap-based read filter. */
|
||||
static
|
||||
void
|
||||
rf_bitmap_deinit(
|
||||
/*=============*/
|
||||
xb_read_filt_ctxt_t* ctxt) /*!<in/out: read filter context */
|
||||
{
|
||||
xb_page_bitmap_range_deinit(ctxt->bitmap_range);
|
||||
}
|
||||
|
||||
/* The pass-through read filter */
|
||||
xb_read_filt_t rf_pass_through = {
|
||||
&rf_pass_through_init,
|
||||
&rf_pass_through_get_next_batch,
|
||||
&rf_pass_through_deinit
|
||||
};
|
||||
|
||||
/* The changed page bitmap-based read filter */
|
||||
xb_read_filt_t rf_bitmap = {
|
||||
&rf_bitmap_init,
|
||||
&rf_bitmap_get_next_batch,
|
||||
&rf_bitmap_deinit
|
||||
};
|
||||
|
@ -25,42 +25,27 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
#ifndef XB_READ_FILT_H
|
||||
#define XB_READ_FILT_H
|
||||
|
||||
#include "changed_page_bitmap.h"
|
||||
|
||||
typedef uint32_t space_id_t;
|
||||
#include <cstdint>
|
||||
#include <cstddef>
|
||||
|
||||
struct xb_fil_cur_t;
|
||||
|
||||
/* The read filter context */
|
||||
struct xb_read_filt_ctxt_t {
|
||||
ib_int64_t offset; /*!< current file offset */
|
||||
ib_int64_t data_file_size; /*!< data file size */
|
||||
int64_t offset; /*!< current file offset */
|
||||
int64_t data_file_size; /*!< data file size */
|
||||
size_t buffer_capacity;/*!< read buffer capacity */
|
||||
space_id_t space_id; /*!< space id */
|
||||
/* The following fields used only in bitmap filter */
|
||||
/* Move these to union if any other filters are added in future */
|
||||
xb_page_bitmap_range *bitmap_range; /*!< changed page bitmap range
|
||||
iterator for space_id */
|
||||
ulint page_size; /*!< page size */
|
||||
ulint filter_batch_end;/*!< the ending page id of the
|
||||
current changed page block in
|
||||
the bitmap */
|
||||
/** TODO: remove this default constructor */
|
||||
xb_read_filt_ctxt_t() : page_size(0) {}
|
||||
};
|
||||
|
||||
/* The read filter */
|
||||
struct xb_read_filt_t {
|
||||
void (*init)(xb_read_filt_ctxt_t* ctxt,
|
||||
const xb_fil_cur_t* cursor,
|
||||
ulint space_id);
|
||||
const xb_fil_cur_t* cursor);
|
||||
void (*get_next_batch)(xb_read_filt_ctxt_t* ctxt,
|
||||
ib_int64_t* read_batch_start,
|
||||
ib_int64_t* read_batch_len);
|
||||
void (*deinit)(xb_read_filt_ctxt_t* ctxt);
|
||||
int64_t* read_batch_start,
|
||||
int64_t* read_batch_len);
|
||||
};
|
||||
|
||||
extern xb_read_filt_t rf_pass_through;
|
||||
extern xb_read_filt_t rf_bitmap;
|
||||
|
||||
#endif
|
||||
|
@ -101,7 +101,6 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
#include "ds_buffer.h"
|
||||
#include "ds_tmpfile.h"
|
||||
#include "xbstream.h"
|
||||
#include "changed_page_bitmap.h"
|
||||
#include "read_filt.h"
|
||||
#include "backup_wsrep.h"
|
||||
#include "innobackupex.h"
|
||||
@ -155,7 +154,6 @@ char *xtrabackup_incremental;
|
||||
lsn_t incremental_lsn;
|
||||
lsn_t incremental_to_lsn;
|
||||
lsn_t incremental_last_lsn;
|
||||
xb_page_bitmap *changed_page_bitmap;
|
||||
|
||||
char *xtrabackup_incremental_basedir; /* for --backup */
|
||||
char *xtrabackup_extra_lsndir; /* for --backup with --extra-lsndir */
|
||||
@ -424,6 +422,8 @@ pthread_cond_t scanned_lsn_cond;
|
||||
/** Store the deferred tablespace name during --backup */
|
||||
static std::set<std::string> defer_space_names;
|
||||
|
||||
typedef decltype(fil_space_t::id) space_id_t;
|
||||
|
||||
typedef std::map<space_id_t,std::string> space_id_to_name_t;
|
||||
|
||||
struct ddl_tracker_t {
|
||||
@ -2419,7 +2419,12 @@ static bool innodb_init()
|
||||
os_file_delete_if_exists_func(ib_logfile0.c_str(), nullptr);
|
||||
os_file_t file= os_file_create_func(ib_logfile0.c_str(),
|
||||
OS_FILE_CREATE, OS_FILE_NORMAL,
|
||||
OS_DATA_FILE_NO_O_DIRECT, false, &ret);
|
||||
#if defined _WIN32 || defined HAVE_FCNTL_DIRECT
|
||||
OS_DATA_FILE_NO_O_DIRECT,
|
||||
#else
|
||||
OS_DATA_FILE,
|
||||
#endif
|
||||
false, &ret);
|
||||
if (!ret)
|
||||
{
|
||||
invalid_log:
|
||||
@ -3017,12 +3022,7 @@ static my_bool xtrabackup_copy_datafile(ds_ctxt *ds_data,
|
||||
goto skip;
|
||||
}
|
||||
|
||||
if (!changed_page_bitmap) {
|
||||
read_filter = &rf_pass_through;
|
||||
}
|
||||
else {
|
||||
read_filter = &rf_bitmap;
|
||||
}
|
||||
|
||||
res = xb_fil_cur_open(&cursor, read_filter, node, thread_n, ULLONG_MAX);
|
||||
if (res == XB_FIL_CUR_SKIP) {
|
||||
@ -4787,11 +4787,6 @@ fail:
|
||||
|
||||
std::thread(log_copying_thread).detach();
|
||||
|
||||
/* FLUSH CHANGED_PAGE_BITMAPS call */
|
||||
if (!flush_changed_page_bitmaps()) {
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ut_a(xtrabackup_parallel > 0);
|
||||
|
||||
if (xtrabackup_parallel > 1) {
|
||||
@ -4865,9 +4860,6 @@ fail:
|
||||
goto fail;
|
||||
}
|
||||
|
||||
if (changed_page_bitmap) {
|
||||
xb_page_bitmap_deinit(changed_page_bitmap);
|
||||
}
|
||||
backup_datasinks.destroy();
|
||||
|
||||
msg("Redo log (from LSN " LSN_PF " to " LSN_PF ") was copied.",
|
||||
|
@ -24,7 +24,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
|
||||
#include <my_getopt.h>
|
||||
#include "datasink.h"
|
||||
#include "xbstream.h"
|
||||
#include "changed_page_bitmap.h"
|
||||
#include "fil0fil.h"
|
||||
#include <set>
|
||||
|
||||
#define XB_TOOL_NAME "mariadb-backup"
|
||||
@ -84,8 +84,6 @@ extern my_bool xb_backup_rocksdb;
|
||||
|
||||
extern uint opt_protocol;
|
||||
|
||||
extern xb_page_bitmap *changed_page_bitmap;
|
||||
|
||||
extern char *xtrabackup_incremental;
|
||||
extern my_bool xtrabackup_incremental_force_scan;
|
||||
|
||||
|
@ -111,7 +111,7 @@ C_MODE_START
|
||||
On AARCH64, we use the generic timer base register. We override clang
|
||||
implementation for aarch64 as it access a PMU register which is not
|
||||
guaranteed to be active.
|
||||
On RISC-V, we use the rdcycle instruction to read from mcycle register.
|
||||
On RISC-V, we use the rdtime instruction to read from mtime register.
|
||||
|
||||
Sadly, we have nothing for the Digital Alpha, MIPS, Motorola m68k,
|
||||
HP PA-RISC or other non-mainstream (or obsolete) processors.
|
||||
@ -211,15 +211,15 @@ static inline ulonglong my_timer_cycles(void)
|
||||
}
|
||||
#elif defined(__riscv)
|
||||
#define MY_TIMER_ROUTINE_CYCLES MY_TIMER_ROUTINE_RISCV
|
||||
/* Use RDCYCLE (and RDCYCLEH on riscv32) */
|
||||
/* Use RDTIME (and RDTIMEH on riscv32) */
|
||||
{
|
||||
# if __riscv_xlen == 32
|
||||
ulong result_lo, result_hi0, result_hi1;
|
||||
/* Implemented in assembly because Clang insisted on branching. */
|
||||
__asm __volatile__(
|
||||
"rdcycleh %0\n"
|
||||
"rdcycle %1\n"
|
||||
"rdcycleh %2\n"
|
||||
"rdtimeh %0\n"
|
||||
"rdtime %1\n"
|
||||
"rdtimeh %2\n"
|
||||
"sub %0, %0, %2\n"
|
||||
"seqz %0, %0\n"
|
||||
"sub %0, zero, %0\n"
|
||||
@ -228,7 +228,7 @@ static inline ulonglong my_timer_cycles(void)
|
||||
return (static_cast<ulonglong>(result_hi1) << 32) | result_lo;
|
||||
# else
|
||||
ulonglong result;
|
||||
__asm __volatile__("rdcycle %0" : "=r"(result));
|
||||
__asm __volatile__("rdtime %0" : "=r"(result));
|
||||
return result;
|
||||
}
|
||||
# endif
|
||||
|
44
include/mysql/service_print_check_msg.h
Normal file
44
include/mysql/service_print_check_msg.h
Normal file
@ -0,0 +1,44 @@
|
||||
/* Copyright (c) 2019, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */
|
||||
|
||||
#pragma once
|
||||
|
||||
/**
|
||||
@file include/mysql/service_print_check_msg.h
|
||||
This service provides functions to write messages for check or repair
|
||||
*/
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
|
||||
extern struct print_check_msg_service_st {
|
||||
void (*print_check_msg)(MYSQL_THD, const char *db_name, const char *table_name,
|
||||
const char *op, const char *msg_type, const char *message,
|
||||
my_bool print_to_log);
|
||||
} *print_check_msg_service;
|
||||
|
||||
#ifdef MYSQL_DYNAMIC_PLUGIN
|
||||
# define print_check_msg_context(_THD) print_check_msg_service->print_check_msg
|
||||
#else
|
||||
extern void print_check_msg(MYSQL_THD, const char *db_name, const char *table_name,
|
||||
const char *op, const char *msg_type, const char *message,
|
||||
my_bool print_to_log);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
@ -44,6 +44,7 @@
|
||||
#define VERSION_wsrep 0x0500
|
||||
#define VERSION_json 0x0100
|
||||
#define VERSION_thd_mdl 0x0100
|
||||
#define VERSION_print_check_msg 0x0100
|
||||
#define VERSION_sql_service 0x0101
|
||||
|
||||
#define VERSION_provider_bzip2 0x0100
|
||||
|
@ -25,6 +25,7 @@ SET(MYSQLSERVICES_SOURCES
|
||||
my_crypt_service.c
|
||||
my_md5_service.c
|
||||
my_print_error_service.c
|
||||
print_check_msg_service.c
|
||||
my_sha1_service.c
|
||||
my_sha2_service.c
|
||||
my_snprintf_service.c
|
||||
|
18
libservices/print_check_msg_service.c
Normal file
18
libservices/print_check_msg_service.c
Normal file
@ -0,0 +1,18 @@
|
||||
/* Copyright (c) 2024, MariaDB Plc
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <service_versions.h>
|
||||
SERVICE_VERSION print_check_msg_context= (void*) VERSION_print_check_msg;
|
@ -25,7 +25,7 @@ IF(NOT WITHOUT_SERVER)
|
||||
INSTALL_MANPAGES(Server
|
||||
wsrep_sst_rsync.1 wsrep_sst_common.1 wsrep_sst_mariabackup.1
|
||||
wsrep_sst_mysqldump.1 wsrep_sst_rsync_wan.1 galera_recovery.1
|
||||
galera_new_cluster.1)
|
||||
galera_new_cluster.1 wsrep_sst_backup.1)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
INSTALL_MANPAGES(Client
|
||||
|
@ -1,6 +1,6 @@
|
||||
'\" t
|
||||
.\"
|
||||
.TH "\fBMY_PRINT_DEFAULTS\fR" "1" "15 May 2020" "MariaDB 10.11" "MariaDB Database System"
|
||||
.TH "\fBMY_PRINT_DEFAULTS\fR" "1" "18 December 2023" "MariaDB 10.11" "MariaDB Database System"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * set default formatting
|
||||
.\" -----------------------------------------------------------------
|
||||
@ -146,6 +146,22 @@ In addition to the groups named on the command line, read groups that have the g
|
||||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
.\" my_print_defaults: --mariadbd option
|
||||
.\" mariadbd option: my_print_defaults
|
||||
\fB\-\-mariadbd\fR
|
||||
.sp
|
||||
Read the same set of groups that the mariadbd binary does.
|
||||
.RE
|
||||
.sp
|
||||
.RS 4
|
||||
.ie n \{\
|
||||
\h'-04'\(bu\h'+03'\c
|
||||
.\}
|
||||
.el \{\
|
||||
.sp -1
|
||||
.IP \(bu 2.3
|
||||
.\}
|
||||
|
||||
.\" my_print_defaults: --mysqld option
|
||||
.\" mysqld option: my_print_defaults
|
||||
\fB\-\-mysqld\fR
|
||||
|
16
man/wsrep_sst_backup.1
Normal file
16
man/wsrep_sst_backup.1
Normal file
@ -0,0 +1,16 @@
|
||||
'\" t
|
||||
.\"
|
||||
.TH "\FBWSREP_SST_BACKUP\FR" "1" "22 May 2022" "MariaDB 10\&.3" "MariaDB Database System"
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" * set default formatting
|
||||
.\" -----------------------------------------------------------------
|
||||
.\" disable hyphenation
|
||||
.nh
|
||||
.\" disable justification (adjust text to left margin only)
|
||||
.ad l
|
||||
.SH NAME
|
||||
wsrep_sst_backup \- backup helper script for the MariaDB Galera Cluster
|
||||
.SH DESCRIPTION
|
||||
Use: See source code of script\.
|
||||
.PP
|
||||
For more information, please refer to the MariaDB Knowledge Base, available online at https://mariadb.com/kb/
|
297
mysql-test/include/rpl_clone_slave_using_mariadb-backup.inc
Normal file
297
mysql-test/include/rpl_clone_slave_using_mariadb-backup.inc
Normal file
@ -0,0 +1,297 @@
|
||||
if ($cnf == "galera2_to_mariadb")
|
||||
{
|
||||
--let MASTER_MYPORT= $NODE_MYPORT_1
|
||||
--connect master, 127.0.0.1, root, , test, $NODE_MYPORT_1
|
||||
--connect slave, 127.0.0.1, root, , test, $NODE_MYPORT_3
|
||||
--disable_query_log
|
||||
--replace_result $MASTER_MYPORT ###
|
||||
--eval CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_USER='root', MASTER_PORT=$MASTER_MYPORT, MASTER_USE_GTID=NO;
|
||||
--enable_query_log
|
||||
START SLAVE;
|
||||
--source include/wait_for_slave_to_start.inc
|
||||
|
||||
--let XTRABACKUP_BACKUP_OPTIONS=--no-defaults --user=root --host='127.0.0.1' --port=$NODE_MYPORT_3
|
||||
--let XTRABACKUP_COPY_BACK_OPTIONS= --no-defaults
|
||||
}
|
||||
|
||||
if ($cnf == "mariadb_to_mariadb")
|
||||
{
|
||||
--let XTRABACKUP_BACKUP_OPTIONS=--defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group-suffix=.2
|
||||
--let XTRABACKUP_COPY_BACK_OPTIONS=--defaults-file=$MYSQLTEST_VARDIR/my.cnf --defaults-group-suffix=.2
|
||||
}
|
||||
|
||||
--connection master
|
||||
--let $MYSQLD_DATADIR_MASTER= `select @@datadir`
|
||||
--connection slave
|
||||
--let $MYSQLD_DATADIR_SLAVE= `select @@datadir`
|
||||
|
||||
# This test covers the filename:pos based synchronization
|
||||
# between the master and the slave.
|
||||
# If we ever need to test a GTID based synchronization,
|
||||
# it should be done in a separate test.
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Initial block with some transactions
|
||||
|
||||
--echo ### Slave: Make sure replication is not using GTID
|
||||
--connection slave
|
||||
--let $value= query_get_value(SHOW SLAVE STATUS, "Using_Gtid", 1)
|
||||
--echo # Using_Gtid=$value
|
||||
|
||||
--echo ### Master: Create and populate t1
|
||||
--connection master
|
||||
CREATE TABLE t1(a TEXT) ENGINE=InnoDB;
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#00:stmt#00 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#00:stmt#01 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#00:stmt#02 - slave run#0, before backup');
|
||||
COMMIT;
|
||||
--sync_slave_with_master
|
||||
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Run the last transaction before mariadb-backup --backup
|
||||
--echo ### Remember SHOW MASTER STATUS and @@gtid_binlog_pos
|
||||
--echo ### before and after the transaction.
|
||||
|
||||
--echo ### Master: Rember MASTER STATUS and @@gtid_binlog_pos before tr#01
|
||||
--connection master
|
||||
--let $master_before_tr01_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $master_before_tr01_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $master_before_tr01_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
--echo ### Slave: Remember MASTER STATUS and @@gtid_binlog_pos before tr#01
|
||||
--connection slave
|
||||
--let $slave_before_tr01_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $slave_before_tr01_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $slave_before_tr01_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
--echo ### Master: Run the actual last transaction before the backup
|
||||
--connection master
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#01:stmt#00 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#01:stmt#01 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#01:stmt#02 - slave run#0, before backup');
|
||||
COMMIT;
|
||||
--sync_slave_with_master
|
||||
|
||||
--echo ### Master: Remember MASTER STATUS and @@gtid_binlog_pos after tr#01
|
||||
--connection master
|
||||
--let $master_after_tr01_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $master_after_tr01_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $master_after_tr01_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
--echo ### Slave: Remember MASTER STATUS and @@gtid_binlog_pos after tr#01
|
||||
--connection slave
|
||||
--let $slave_after_tr01_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $slave_after_tr01_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $slave_after_tr01_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Running `mariadb-backup --backup,--prepare` and checking
|
||||
--echo ### that xtrabackup_slave_info and xtrabackup_binlog_info are OK
|
||||
|
||||
--echo ### Slave: Create a backup
|
||||
--let $backup_slave=$MYSQLTEST_VARDIR/tmp/backup-slave
|
||||
--disable_result_log
|
||||
--exec $XTRABACKUP $XTRABACKUP_BACKUP_OPTIONS --slave-info --backup --target-dir=$backup_slave
|
||||
--enable_result_log
|
||||
|
||||
--echo ### Slave: Prepare the backup
|
||||
--exec $XTRABACKUP --prepare --target-dir=$backup_slave
|
||||
|
||||
--echo ### Slave: xtrabackup files:
|
||||
--echo ############################ xtrabackup_slave_info
|
||||
--replace_result $master_after_tr01_show_master_status_file master_after_tr01_show_master_status_file $master_after_tr01_show_master_status_position master_after_tr01_show_master_status_position
|
||||
--cat_file $backup_slave/xtrabackup_slave_info
|
||||
--echo ############################ xtrabackup_binlog_info
|
||||
--replace_result $slave_after_tr01_show_master_status_file slave_after_tr01_show_master_status_file $slave_after_tr01_show_master_status_position slave_after_tr01_show_master_status_position $slave_after_tr01_gtid_binlog_pos slave_after_tr01_gtid_binlog_pos
|
||||
--cat_file $backup_slave/xtrabackup_binlog_info
|
||||
--echo ############################
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Run more transactions after the backup:
|
||||
--echo ### - while the slave is still running, then
|
||||
--echo ### - while the slave is shut down
|
||||
|
||||
--echo ### Master: Run another transaction while the slave is still running
|
||||
--connection master
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#02:stmt#00 - slave run#0, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#02:stmt#01 - slave run#0, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#02:stmt@02 - slave run#0, after backup');
|
||||
COMMIT;
|
||||
--sync_slave_with_master
|
||||
|
||||
--echo ### Master: Remember MASTER STATUS and @@gtid_binlog_pos after tr#02
|
||||
--connection master
|
||||
--let $master_after_tr02_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $master_after_tr02_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $master_after_tr02_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
--echo ### Slave: Remember MASTER STATUS and @@gtid_binlog_pos after tr#02
|
||||
--connection slave
|
||||
--let $slave_after_tr02_show_master_status_file=query_get_value(SHOW MASTER STATUS, File, 1)
|
||||
--let $slave_after_tr02_show_master_status_position=query_get_value(SHOW MASTER STATUS, Position, 1)
|
||||
--let $slave_after_tr02_gtid_binlog_pos=`SELECT @@global.gtid_binlog_pos`
|
||||
|
||||
|
||||
--echo ### Master: Checking SHOW BINLOG EVENTS
|
||||
|
||||
--connection master
|
||||
--vertical_results
|
||||
### The BEGIN event
|
||||
--replace_column 4 # 5 #
|
||||
--replace_result $master_after_tr01_show_master_status_file master_after_tr01_show_master_status_file $master_after_tr01_show_master_status_position master_after_tr01_show_master_status_position $master_after_tr02_gtid_binlog_pos master_after_tr02_gtid_binlog_pos
|
||||
--eval SHOW BINLOG EVENTS IN '$master_after_tr01_show_master_status_file' FROM $master_after_tr01_show_master_status_position LIMIT 0,1
|
||||
### The INSERT event
|
||||
--replace_column 2 # 4 # 5 #
|
||||
--replace_result $master_after_tr01_show_master_status_file master_after_tr01_show_master_status_file $master_after_tr01_show_master_status_position master_after_tr01_show_master_status_position
|
||||
# Hide the difference between row and stmt binary logging
|
||||
--replace_regex /use `test`; // /(Query|Annotate_rows)/Query_or_Annotate_rows/
|
||||
--eval SHOW BINLOG EVENTS IN '$master_after_tr01_show_master_status_file' FROM $master_after_tr01_show_master_status_position LIMIT 1,1
|
||||
--horizontal_results
|
||||
|
||||
--echo ### Slave: Checking SHOW BINLOG EVENTS
|
||||
--connection slave
|
||||
--vertical_results
|
||||
### The BEGIN event
|
||||
--replace_column 2 # 5 #
|
||||
--replace_result $slave_after_tr01_show_master_status_file slave_after_tr01_show_master_status_file $slave_after_tr01_show_master_status_position slave_after_tr01_show_master_status_position $slave_after_tr02_gtid_binlog_pos slave_after_tr02_gtid_binlog_pos
|
||||
--eval SHOW BINLOG EVENTS IN '$slave_after_tr01_show_master_status_file' FROM $slave_after_tr01_show_master_status_position LIMIT 0,1
|
||||
### The INSERT event
|
||||
--replace_column 2 # 4 # 5 #
|
||||
--replace_result $slave_after_tr01_show_master_status_file slave_after_tr01_show_master_status_file $slave_after_tr01_show_master_status_position slave_after_tr01_show_master_status_position $slave_after_tr02_gtid_binlog_pos slave_after_tr02_gtid_binlog_pos
|
||||
# Hide the difference between row and stmt binary logging
|
||||
--replace_regex /use `test`; // /(Query|Annotate_rows)/Query_or_Annotate_rows/
|
||||
--eval SHOW BINLOG EVENTS IN '$slave_after_tr01_show_master_status_file' FROM $slave_after_tr01_show_master_status_position LIMIT 1,1
|
||||
--horizontal_results
|
||||
|
||||
--echo ### Slave: Stop replication
|
||||
--connection slave
|
||||
STOP SLAVE;
|
||||
--source include/wait_for_slave_to_stop.inc
|
||||
RESET SLAVE;
|
||||
|
||||
--echo ### Slave: Shutdown the server
|
||||
|
||||
if ($cnf == "mariadb_to_mariadb")
|
||||
{
|
||||
--let $rpl_server_number= 2
|
||||
--source include/rpl_stop_server.inc
|
||||
}
|
||||
|
||||
if ($cnf == "galera2_to_mariadb")
|
||||
{
|
||||
--connection slave
|
||||
--source $MYSQL_TEST_DIR/include/shutdown_mysqld.inc
|
||||
}
|
||||
|
||||
--echo ### Master: Run a transaction while the slave is shut down
|
||||
--connection master
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#03:stmt#00 - after slave run#0, slave is shut down, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#03:stmt#01 - after slave run#0, slave is shut down, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#03:stmt#02 - after slave run#0, slave is shut down, after backup');
|
||||
COMMIT;
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Emulate starting a new virgin slave
|
||||
|
||||
--echo ### Slave: Remove the data directory
|
||||
--rmdir $MYSQLD_DATADIR_SLAVE
|
||||
|
||||
--echo ### Slave: Copy back the backup
|
||||
--exec $XTRABACKUP $XTRABACKUP_COPY_BACK_OPTIONS --copy-back --datadir=$MYSQLD_DATADIR_SLAVE --target-dir=$backup_slave
|
||||
|
||||
--echo ### Slave: Restart the server
|
||||
if ($cnf == "mariadb_to_mariadb")
|
||||
{
|
||||
--let $rpl_server_number= 2
|
||||
--source include/rpl_start_server.inc
|
||||
--source include/wait_until_connected_again.inc
|
||||
}
|
||||
|
||||
if ($cnf == "galera2_to_mariadb")
|
||||
{
|
||||
--connection slave
|
||||
--source $MYSQL_TEST_DIR/include/start_mysqld.inc
|
||||
}
|
||||
|
||||
--echo ### Slave: Display the restored data before START SLAVE
|
||||
--connection slave
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
|
||||
--echo ### Slave: Execute the CHANGE MASTER statement to set up the host and port
|
||||
--replace_result $MASTER_MYPORT ###
|
||||
--eval CHANGE MASTER '' TO MASTER_USER='root', MASTER_HOST='127.0.0.1', MASTER_PORT=$MASTER_MYPORT, MASTER_CONNECT_RETRY=1
|
||||
|
||||
--echo ### Slave: Execute the CHANGE MASTER statement from xtrabackup_slave_info
|
||||
--replace_result $master_after_tr01_show_master_status_file master_after_tr01_show_master_status_file $master_after_tr01_show_master_status_position master_after_tr01_show_master_status_position
|
||||
--source $backup_slave/xtrabackup_slave_info
|
||||
|
||||
--echo ### Slave: Execute START SLAVE
|
||||
--source include/start_slave.inc
|
||||
|
||||
--echo ### Master: Wait for the slave to apply all master events
|
||||
--connection master
|
||||
--sync_slave_with_master slave
|
||||
|
||||
--echo ### Slave: Make sure replication is not using GTID after the slave restart
|
||||
--connection slave
|
||||
--let $value= query_get_value(SHOW SLAVE STATUS, "Using_Gtid", 1)
|
||||
--echo # Using_Gtid=$value
|
||||
|
||||
--echo ### Slave: Display the restored data after START SLAVE
|
||||
--connection slave
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Continue master transactions, check the new slave replicates well.
|
||||
|
||||
--echo ### Master: Run a transaction after restarting replication
|
||||
--connection master
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#04:stmt#00 - slave run#1');
|
||||
INSERT INTO t1 VALUES ('tr#04:stmt#01 - slave run#1');
|
||||
INSERT INTO t1 VALUES ('tr#04:stmt#02 - slave run#1');
|
||||
COMMIT;
|
||||
--sync_slave_with_master
|
||||
|
||||
--echo ### Slave: Display the restored data + new transactions
|
||||
--connection slave
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
|
||||
|
||||
--echo ##############################################################
|
||||
--echo ### Cleanup
|
||||
|
||||
--echo ### Removing the backup directory
|
||||
--rmdir $backup_slave
|
||||
|
||||
--connection master
|
||||
DROP TABLE t1;
|
||||
--sync_slave_with_master
|
||||
|
||||
if ($cnf == "mariadb_to_mariadb")
|
||||
{
|
||||
--source include/rpl_end.inc
|
||||
}
|
||||
|
||||
if ($cnf == "galera2_to_mariadb")
|
||||
{
|
||||
STOP SLAVE;
|
||||
--source include/wait_for_slave_to_stop.inc
|
||||
RESET SLAVE ALL;
|
||||
|
||||
--connection master
|
||||
set global wsrep_on=OFF;
|
||||
RESET MASTER;
|
||||
set global wsrep_on=ON;
|
||||
}
|
@ -87,12 +87,16 @@ sub flush_out {
|
||||
$out_line = "";
|
||||
}
|
||||
|
||||
use if $^O eq "MSWin32", "threads::shared";
|
||||
my $flush_lock :shared;
|
||||
|
||||
# Print to stdout
|
||||
sub print_out {
|
||||
if(IS_WIN32PERL) {
|
||||
$out_line .= $_[0];
|
||||
# Flush buffered output on new lines.
|
||||
if (rindex($_[0], "\n") != -1) {
|
||||
lock($flush_lock);
|
||||
flush_out();
|
||||
}
|
||||
} else {
|
||||
|
@ -12,7 +12,6 @@ INSERT INTO t1 VALUES (1,REPEAT('a',100)),(2,REPEAT('v',200)),(3,REPEAT('r',300)
|
||||
INSERT INTO t1 VALUES (5,REPEAT('k',500)),(6,'April'),(7,7),(8,""),(9,"M"),(10,DEFAULT);
|
||||
ALTER TABLE t1 ANALYZE PARTITION p1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
ALTER TABLE t1 CHECK PARTITION p2;
|
||||
Table Op Msg_type Msg_text
|
||||
|
@ -4354,6 +4354,40 @@ a
|
||||
drop table t1, t2;
|
||||
drop view v1;
|
||||
drop procedure aproc;
|
||||
#
|
||||
# MDEV-31305: Aggregation over materialized derived table
|
||||
#
|
||||
CREATE VIEW v AS
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3;
|
||||
SELECT v.*, SUM(p) from v;
|
||||
dim1 dim2 dim3 p SUM(p)
|
||||
100 10 1 2 371
|
||||
SELECT d.*, SUM(p)
|
||||
FROM (
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3
|
||||
) d;
|
||||
dim1 dim2 dim3 p SUM(p)
|
||||
100 10 1 2 371
|
||||
WITH demo AS
|
||||
(
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3
|
||||
)
|
||||
SELECT d.*, SUM(p) FROM demo d;
|
||||
dim1 dim2 dim3 p SUM(p)
|
||||
100 10 1 2 371
|
||||
DROP VIEW v;
|
||||
# End of 10.4 tests
|
||||
#
|
||||
# MDEV-31143: view with ORDER BY used in query with rownum() in WHERE
|
||||
|
@ -2803,6 +2803,42 @@ drop table t1, t2;
|
||||
drop view v1;
|
||||
drop procedure aproc;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-31305: Aggregation over materialized derived table
|
||||
--echo #
|
||||
|
||||
--source include/have_sequence.inc
|
||||
|
||||
CREATE VIEW v AS
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3;
|
||||
|
||||
SELECT v.*, SUM(p) from v;
|
||||
|
||||
SELECT d.*, SUM(p)
|
||||
FROM (
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3
|
||||
) d;
|
||||
|
||||
WITH demo AS
|
||||
(
|
||||
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
|
||||
FLOOR(RAND(13) * 5) AS p
|
||||
FROM seq_100_to_105 seq1
|
||||
JOIN seq_10_to_15 seq2
|
||||
JOIN seq_1_to_5 seq3
|
||||
)
|
||||
SELECT d.*, SUM(p) FROM demo d;
|
||||
|
||||
DROP VIEW v;
|
||||
|
||||
--echo # End of 10.4 tests
|
||||
|
||||
--echo #
|
||||
|
322
mysql-test/main/distinct_notembedded.result
Normal file
322
mysql-test/main/distinct_notembedded.result
Normal file
@ -0,0 +1,322 @@
|
||||
#
|
||||
# MDEV-30660 COUNT DISTINCT seems unnecessarily slow when run on a PK
|
||||
#
|
||||
set @save_optimizer_trace = @@optimizer_trace;
|
||||
SET optimizer_trace='enabled=on';
|
||||
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b INT NOT NULL);
|
||||
INSERT INTO t1 VALUES (1,1), (2,1), (3,1);
|
||||
# Optimization is applied (aggregator=simple):
|
||||
SELECT COUNT(DISTINCT a) FROM t1;
|
||||
COUNT(DISTINCT a)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT AVG(DISTINCT a), SUM(DISTINCT b) FROM t1;
|
||||
AVG(DISTINCT a) SUM(DISTINCT b)
|
||||
2.0000 1
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "avg(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
},
|
||||
{
|
||||
"function": "sum(distinct t1.b)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
# Only `a` is unique but it's enough to eliminate DISTINCT:
|
||||
SELECT COUNT(DISTINCT b, a) FROM t1;
|
||||
COUNT(DISTINCT b, a)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.b,t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT COUNT(DISTINCT a, a + b) FROM t1;
|
||||
COUNT(DISTINCT a, a + b)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a,t1.a + t1.b)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT SUM(DISTINCT a), AVG(DISTINCT a), COUNT(DISTINCT a) FROM t1 WHERE a > 1;
|
||||
SUM(DISTINCT a) AVG(DISTINCT a) COUNT(DISTINCT a)
|
||||
5 2.5000 2
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "sum(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
},
|
||||
{
|
||||
"function": "avg(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
},
|
||||
{
|
||||
"function": "count(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
# Optimization is not applied 'cause function argument is not a field
|
||||
# (aggregator=distinct):
|
||||
SELECT SUM(DISTINCT a + b) FROM t1;
|
||||
SUM(DISTINCT a + b)
|
||||
9
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "sum(distinct t1.a + t1.b)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
SELECT COUNT(DISTINCT b) FROM t1;
|
||||
COUNT(DISTINCT b)
|
||||
1
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.b)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
SELECT AVG(DISTINCT b / a) FROM t1;
|
||||
AVG(DISTINCT b / a)
|
||||
0.61110000
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "avg(distinct t1.b / t1.a)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
EXPLAIN SELECT COUNT(DISTINCT (SELECT a)) FROM t1;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY t1 index NULL PRIMARY 4 NULL 3 Using index
|
||||
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct (/* select#2 */ select t1.a))",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
CREATE TABLE t2 (a INT);
|
||||
INSERT INTO t2 VALUES (1), (2);
|
||||
# Optimization is not applied 'cause there is more than one table
|
||||
SELECT COUNT(DISTINCT t1.a) FROM t1, t2;
|
||||
COUNT(DISTINCT t1.a)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
SELECT AVG(DISTINCT t1.a) FROM t1, t2;
|
||||
AVG(DISTINCT t1.a)
|
||||
2.0000
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "avg(distinct t1.a)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
# Const tables, optimization is applied
|
||||
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1) AS t2;
|
||||
COUNT(DISTINCT a)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT AVG(DISTINCT t1.a) FROM (SELECT 1 AS a) AS t2, t1, (SELECT 2 AS a) AS t3;
|
||||
AVG(DISTINCT t1.a)
|
||||
2.0000
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "avg(distinct t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1 UNION SELECT 2) AS t2;
|
||||
COUNT(DISTINCT a)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
# Unique index on two columns
|
||||
CREATE TABLE t3 (a INT NOT NULL, b INT NOT NULL);
|
||||
INSERT INTO t3 VALUES (1,1), (1,2), (1,3), (2,1), (2,2), (3,1), (3,2);
|
||||
CREATE UNIQUE INDEX t3_a_b ON t3 (a, b);
|
||||
# Optimization is applied:
|
||||
SELECT COUNT(DISTINCT a, b) FROM t3;
|
||||
COUNT(DISTINCT a, b)
|
||||
7
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t3.a,t3.b)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT COUNT(DISTINCT b, a) FROM t3;
|
||||
COUNT(DISTINCT b, a)
|
||||
7
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t3.b,t3.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT COUNT(DISTINCT b, a) FROM t3 WHERE a < 3;
|
||||
COUNT(DISTINCT b, a)
|
||||
5
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t3.b,t3.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
# Optimization is applied to one of the functions:
|
||||
SELECT COUNT(DISTINCT b), SUM(DISTINCT a), SUM(DISTINCT a + b) FROM t3 GROUP BY a;
|
||||
COUNT(DISTINCT b) SUM(DISTINCT a) SUM(DISTINCT a + b)
|
||||
3 1 9
|
||||
2 2 7
|
||||
2 3 9
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t3.b)",
|
||||
"aggregator_type": "simple"
|
||||
},
|
||||
{
|
||||
"function": "sum(distinct t3.a)",
|
||||
"aggregator_type": "distinct"
|
||||
},
|
||||
{
|
||||
"function": "sum(distinct t3.a + t3.b)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
# Can't apply optimization 'cause GROUP BY argument is not a field:
|
||||
SELECT COUNT(DISTINCT b) FROM t3 GROUP BY a+b;
|
||||
COUNT(DISTINCT b)
|
||||
1
|
||||
2
|
||||
3
|
||||
1
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t3.b)",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
# Test merged view
|
||||
CREATE VIEW v1 AS SELECT * FROM t1;
|
||||
# Optimization is applied
|
||||
SELECT COUNT(DISTINCT a, b) FROM v1;
|
||||
COUNT(DISTINCT a, b)
|
||||
3
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "count(distinct t1.a,t1.b)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
# GROUP_CONCAT implements non-standard distinct aggregator
|
||||
SELECT GROUP_CONCAT(b) FROM t1;
|
||||
GROUP_CONCAT(b)
|
||||
1,1,1
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "group_concat(t1.b separator ',')",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
]
|
||||
SELECT GROUP_CONCAT(DISTINCT b) FROM t1;
|
||||
GROUP_CONCAT(DISTINCT b)
|
||||
1
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"function": "group_concat(distinct t1.b separator ',')",
|
||||
"aggregator_type": "distinct"
|
||||
}
|
||||
]
|
||||
DROP TABLE t1, t2, t3;
|
||||
DROP VIEW v1;
|
||||
SET optimizer_trace = @save_optimizer_trace;
|
||||
#
|
||||
# end of 10.5 tests
|
||||
#
|
109
mysql-test/main/distinct_notembedded.test
Normal file
109
mysql-test/main/distinct_notembedded.test
Normal file
@ -0,0 +1,109 @@
|
||||
# Embedded doesn't have optimizer trace:
|
||||
--source include/not_embedded.inc
|
||||
--source include/have_sequence.inc
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-30660 COUNT DISTINCT seems unnecessarily slow when run on a PK
|
||||
--echo #
|
||||
|
||||
set @save_optimizer_trace = @@optimizer_trace;
|
||||
SET optimizer_trace='enabled=on';
|
||||
let $trace=
|
||||
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '\$**.prepare_sum_aggregators')) AS JS
|
||||
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
|
||||
|
||||
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b INT NOT NULL);
|
||||
INSERT INTO t1 VALUES (1,1), (2,1), (3,1);
|
||||
|
||||
--echo # Optimization is applied (aggregator=simple):
|
||||
SELECT COUNT(DISTINCT a) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT AVG(DISTINCT a), SUM(DISTINCT b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
--echo # Only `a` is unique but it's enough to eliminate DISTINCT:
|
||||
SELECT COUNT(DISTINCT b, a) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT COUNT(DISTINCT a, a + b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT SUM(DISTINCT a), AVG(DISTINCT a), COUNT(DISTINCT a) FROM t1 WHERE a > 1;
|
||||
eval $trace;
|
||||
|
||||
--echo # Optimization is not applied 'cause function argument is not a field
|
||||
--echo # (aggregator=distinct):
|
||||
SELECT SUM(DISTINCT a + b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT COUNT(DISTINCT b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT AVG(DISTINCT b / a) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
EXPLAIN SELECT COUNT(DISTINCT (SELECT a)) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
CREATE TABLE t2 (a INT);
|
||||
INSERT INTO t2 VALUES (1), (2);
|
||||
|
||||
--echo # Optimization is not applied 'cause there is more than one table
|
||||
SELECT COUNT(DISTINCT t1.a) FROM t1, t2;
|
||||
eval $trace;
|
||||
|
||||
SELECT AVG(DISTINCT t1.a) FROM t1, t2;
|
||||
eval $trace;
|
||||
|
||||
--echo # Const tables, optimization is applied
|
||||
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1) AS t2;
|
||||
eval $trace;
|
||||
|
||||
SELECT AVG(DISTINCT t1.a) FROM (SELECT 1 AS a) AS t2, t1, (SELECT 2 AS a) AS t3;
|
||||
eval $trace;
|
||||
|
||||
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1 UNION SELECT 2) AS t2;
|
||||
eval $trace;
|
||||
|
||||
--echo # Unique index on two columns
|
||||
CREATE TABLE t3 (a INT NOT NULL, b INT NOT NULL);
|
||||
INSERT INTO t3 VALUES (1,1), (1,2), (1,3), (2,1), (2,2), (3,1), (3,2);
|
||||
CREATE UNIQUE INDEX t3_a_b ON t3 (a, b);
|
||||
--echo # Optimization is applied:
|
||||
SELECT COUNT(DISTINCT a, b) FROM t3;
|
||||
eval $trace;
|
||||
|
||||
SELECT COUNT(DISTINCT b, a) FROM t3;
|
||||
eval $trace;
|
||||
|
||||
SELECT COUNT(DISTINCT b, a) FROM t3 WHERE a < 3;
|
||||
eval $trace;
|
||||
|
||||
--echo # Optimization is applied to one of the functions:
|
||||
SELECT COUNT(DISTINCT b), SUM(DISTINCT a), SUM(DISTINCT a + b) FROM t3 GROUP BY a;
|
||||
eval $trace;
|
||||
|
||||
--echo # Can't apply optimization 'cause GROUP BY argument is not a field:
|
||||
SELECT COUNT(DISTINCT b) FROM t3 GROUP BY a+b;
|
||||
eval $trace;
|
||||
|
||||
--echo # Test merged view
|
||||
CREATE VIEW v1 AS SELECT * FROM t1;
|
||||
--echo # Optimization is applied
|
||||
SELECT COUNT(DISTINCT a, b) FROM v1;
|
||||
eval $trace;
|
||||
|
||||
--echo # GROUP_CONCAT implements non-standard distinct aggregator
|
||||
SELECT GROUP_CONCAT(b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
SELECT GROUP_CONCAT(DISTINCT b) FROM t1;
|
||||
eval $trace;
|
||||
|
||||
DROP TABLE t1, t2, t3;
|
||||
DROP VIEW v1;
|
||||
SET optimizer_trace = @save_optimizer_trace;
|
||||
--echo #
|
||||
--echo # end of 10.5 tests
|
||||
--echo #
|
@ -5487,3 +5487,37 @@ Warning 1292 Truncated incorrect BINARY(2) value: '...random bytes...'
|
||||
#
|
||||
# End of 10.10 tests
|
||||
#
|
||||
#
|
||||
# Start of 10.11 tests
|
||||
#
|
||||
#
|
||||
# MDEV-33392 Server crashes when using RANDOM_BYTES function and GROUP BY clause on a column with a negative value
|
||||
#
|
||||
SET sql_mode='';
|
||||
CREATE TABLE t1 (a VARCHAR(255)) ENGINE=MyISAM;
|
||||
INSERT INTO t1 VALUES (9494),(9495),(9496),(9497),(9498),(9499),(9500),(9501),(9502),(9503);
|
||||
SELECT RANDOM_BYTES (-1) f1,a f2 FROM t1 GROUP BY f1,f2;
|
||||
f1 f2
|
||||
NULL 9494
|
||||
NULL 9495
|
||||
NULL 9496
|
||||
NULL 9497
|
||||
NULL 9498
|
||||
NULL 9499
|
||||
NULL 9500
|
||||
NULL 9501
|
||||
NULL 9502
|
||||
NULL 9503
|
||||
CREATE TABLE t2 AS SELECT RANDOM_BYTES (-1) f1,a f2 FROM t1 GROUP BY f1,f2;
|
||||
SHOW CREATE TABLE t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE `t2` (
|
||||
`f1` binary(0) DEFAULT NULL,
|
||||
`f2` varchar(255) DEFAULT NULL
|
||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
|
||||
DROP TABLE t2;
|
||||
DROP TABLE t1;
|
||||
SET sql_mode=DEFAULT;
|
||||
#
|
||||
# End of 10.11 tests
|
||||
#
|
||||
|
@ -2457,3 +2457,25 @@ select "a" in ("abc", (convert(random_bytes(8) ,binary(2))));
|
||||
--echo #
|
||||
--echo # End of 10.10 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # Start of 10.11 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33392 Server crashes when using RANDOM_BYTES function and GROUP BY clause on a column with a negative value
|
||||
--echo #
|
||||
|
||||
SET sql_mode='';
|
||||
CREATE TABLE t1 (a VARCHAR(255)) ENGINE=MyISAM;
|
||||
INSERT INTO t1 VALUES (9494),(9495),(9496),(9497),(9498),(9499),(9500),(9501),(9502),(9503);
|
||||
SELECT RANDOM_BYTES (-1) f1,a f2 FROM t1 GROUP BY f1,f2;
|
||||
CREATE TABLE t2 AS SELECT RANDOM_BYTES (-1) f1,a f2 FROM t1 GROUP BY f1,f2;
|
||||
SHOW CREATE TABLE t2;
|
||||
DROP TABLE t2;
|
||||
DROP TABLE t1;
|
||||
SET sql_mode=DEFAULT;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.11 tests
|
||||
--echo #
|
||||
|
@ -6,6 +6,8 @@
|
||||
--source include/no_valgrind_without_big.inc
|
||||
--source include/have_sequence.inc
|
||||
|
||||
--source include/innodb_stable_estimates.inc
|
||||
|
||||
SET SESSION DEFAULT_STORAGE_ENGINE='InnoDB';
|
||||
|
||||
set @innodb_stats_persistent_save= @@innodb_stats_persistent;
|
||||
|
0
mysql-test/main/lowercase_table2.result
Executable file → Normal file
0
mysql-test/main/lowercase_table2.result
Executable file → Normal file
@ -4,7 +4,7 @@ let $sys_errno=0;
|
||||
# Error 100 is returned by the powershell script
|
||||
# if MySql.Data is not installed
|
||||
--error 0,100
|
||||
--exec powershell -NoLogo -NoProfile -File main\mysql_connector_net.ps1
|
||||
--exec powershell -ExecutionPolicy Bypass -NoLogo -NoProfile -File main\mysql_connector_net.ps1
|
||||
if ($sys_errno != 0)
|
||||
{
|
||||
--skip Connector/NET is not installed
|
||||
|
@ -1790,6 +1790,12 @@ set statement optimizer_scan_setup_cost=0 for EXPLAIN SELECT MIN(d) FROM t1 wher
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"prepare_sum_aggregators": {
|
||||
"function": "min(t1.d)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -2010,6 +2016,18 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
|
||||
},
|
||||
{
|
||||
"test_if_skip_sort_order": []
|
||||
},
|
||||
{
|
||||
"prepare_sum_aggregators": {
|
||||
"function": "min(t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
},
|
||||
{
|
||||
"prepare_sum_aggregators": {
|
||||
"function": "max(t1.a)",
|
||||
"aggregator_type": "simple"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@ -12238,6 +12256,25 @@ JS
|
||||
drop table t1,t2,t3,t10,t11;
|
||||
set optimizer_trace=DEFAULT;
|
||||
#
|
||||
# MDEV-29179 Condition pushdown from HAVING into WHERE is not shown in optimizer trace
|
||||
#
|
||||
CREATE TABLE t1 (a INT, b VARCHAR(1), KEY (a), KEY(b,a)) ENGINE=MEMORY;
|
||||
INSERT INTO t1 VALUES (4,'n'),(1,'h'),(NULL,'w');
|
||||
SET optimizer_trace= 'enabled=on';
|
||||
SELECT b, a FROM t1 WHERE b <> 'p' OR a = 4 GROUP BY b, a HAVING a <= 7;
|
||||
b a
|
||||
h 1
|
||||
n 4
|
||||
SELECT json_detailed(json_extract(trace, '$**.steps[*].join_optimization.steps[*].condition_pushdown_from_having') ) exp1, JSON_VALID(trace) exp2 FROM information_schema.optimizer_trace;
|
||||
exp1 exp2
|
||||
[
|
||||
{
|
||||
"conds": "(t1.b <> 'p' or multiple equal(4, t1.a)) and t1.a <= 7",
|
||||
"having": null
|
||||
}
|
||||
] 1
|
||||
DROP TABLE t1;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
set optimizer_trace='enabled=on';
|
||||
|
@ -775,6 +775,16 @@ from information_schema.optimizer_trace;
|
||||
drop table t1,t2,t3,t10,t11;
|
||||
set optimizer_trace=DEFAULT;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-29179 Condition pushdown from HAVING into WHERE is not shown in optimizer trace
|
||||
--echo #
|
||||
|
||||
CREATE TABLE t1 (a INT, b VARCHAR(1), KEY (a), KEY(b,a)) ENGINE=MEMORY;
|
||||
INSERT INTO t1 VALUES (4,'n'),(1,'h'),(NULL,'w');
|
||||
SET optimizer_trace= 'enabled=on';
|
||||
SELECT b, a FROM t1 WHERE b <> 'p' OR a = 4 GROUP BY b, a HAVING a <= 7; SELECT json_detailed(json_extract(trace, '$**.steps[*].join_optimization.steps[*].condition_pushdown_from_having') ) exp1, JSON_VALID(trace) exp2 FROM information_schema.optimizer_trace;
|
||||
DROP TABLE t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
@ -2063,7 +2063,6 @@ ALTER TABLE t1 ANALYZE PARTITION p1 EXTENDED;
|
||||
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'EXTENDED' at line 1
|
||||
ALTER TABLE t1 ANALYZE PARTITION p1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
ALTER TABLE t1 CHECK PARTITION p1;
|
||||
Table Op Msg_type Msg_text
|
||||
|
@ -27,7 +27,6 @@ Table Op Msg_type Msg_text
|
||||
test.t1 repair error Wrong partition name or partition list
|
||||
ALTER TABLE t1 ANALYZE PARTITION p0;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
ALTER TABLE t1 CHECK PARTITION p0;
|
||||
Table Op Msg_type Msg_text
|
||||
|
@ -5808,5 +5808,123 @@ GROUP_CONCAT(@x)
|
||||
0
|
||||
DROP TABLE t;
|
||||
#
|
||||
# MDEV-15703: Crash in EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING DEFAULT
|
||||
#
|
||||
PREPARE stmt FROM 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)';
|
||||
EXECUTE stmt USING DEFAULT;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
DEALLOCATE PREPARE stmt;
|
||||
PREPARE stmt FROM 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)';
|
||||
EXECUTE stmt USING IGNORE;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
DEALLOCATE PREPARE stmt;
|
||||
EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING DEFAULT;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING IGNORE;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
EXECUTE IMMEDIATE 'BEGIN NOT ATOMIC DECLARE a INT DEFAULT ?; END' USING DEFAULT;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
EXECUTE IMMEDIATE 'BEGIN NOT ATOMIC DECLARE a INT DEFAULT ?; END' USING IGNORE;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
CREATE PROCEDURE p1(a INT) SELECT 1;
|
||||
EXECUTE IMMEDIATE 'CALL p1(?)' USING DEFAULT;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
EXECUTE IMMEDIATE 'CALL p1(?)' USING IGNORE;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
DROP PROCEDURE p1;
|
||||
EXECUTE IMMEDIATE 'SELECT ? UNION SELECT 1' USING DEFAULT;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
EXECUTE IMMEDIATE 'SELECT ? UNION SELECT 1' USING IGNORE;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION ALL SELECT 1) AS derived' USING DEFAULT;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION ALL SELECT 1) AS derived' USING IGNORE;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION DISTINCT SELECT 1) AS derived' USING DEFAULT;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION DISTINCT SELECT 1) AS derived' USING IGNORE;
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
# multi-update and DEFAULT
|
||||
CREATE TABLE t1 (a INT, b INT DEFAULT a);
|
||||
INSERT into t1 VALUES (1,2),(2,3);
|
||||
CREATE TABLE t2 (a INT, b INT DEFAULT a);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
UPDATE t1,t2 SET t1.b = DEFAULT, t2.b = DEFAULT WHERE t1.a=t2.a;
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
1 1
|
||||
2 2
|
||||
SELECT * FROM t2;
|
||||
a b
|
||||
1 1
|
||||
2 2
|
||||
# re-check the case for Prepared Statement with parameters
|
||||
TRUNCATE TABLE t1;
|
||||
TRUNCATE TABLE t2;
|
||||
INSERT INTO t1 VALUES (1,2),(2,3);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING DEFAULT, DEFAULT;
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
1 1
|
||||
2 2
|
||||
SELECT * FROM t2;
|
||||
a b
|
||||
1 1
|
||||
2 2
|
||||
DROP TABLE t1, t2;
|
||||
# multi-update and IGNORE
|
||||
CREATE TABLE t1 (a INT, b INT default a);
|
||||
INSERT INTO t1 VALUES (1,2),(2,3);
|
||||
CREATE TABLE t2 (a INT, b INT default a);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
UPDATE t1,t2 SET t1.b = IGNORE, t2.b = IGNORE WHERE t1.a=t2.a;
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
1 2
|
||||
2 3
|
||||
SELECT * FROM t2;
|
||||
a b
|
||||
1 NULL
|
||||
2 NULL
|
||||
# re-check the case for Prepared Statement with parameters
|
||||
TRUNCATE TABLE t1;
|
||||
TRUNCATE TABLE t2;
|
||||
INSERT INTO t1 VALUES (1,2),(2,3);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING IGNORE, IGNORE;
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
1 2
|
||||
2 3
|
||||
SELECT * FROM t2;
|
||||
a b
|
||||
1 10
|
||||
2 30
|
||||
DROP TABLE t1, t2;
|
||||
# multi-update and DEFAULT parameter (no default)
|
||||
CREATE TABLE t1 (a INT, b INT NOT NULL);
|
||||
INSERT INTO t1 VALUES (1,2),(2,3);
|
||||
CREATE TABLE t2 (a INT, b INT NOT NULL);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING DEFAULT, DEFAULT;
|
||||
ERROR HY000: Field 'b' doesn't have a default value
|
||||
DROP TABLE t1, t2;
|
||||
# multi-update and IGNORE parameter (no default)
|
||||
CREATE TABLE t1 (a INT, b INT NOT NULL);
|
||||
INSERT INTO t1 VALUES (1,2),(2,3);
|
||||
CREATE TABLE t2 (a INT, b INT NOT NULL);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING IGNORE, IGNORE;
|
||||
SELECT * FROM t1;
|
||||
a b
|
||||
1 2
|
||||
2 3
|
||||
SELECT * FROM t2;
|
||||
a b
|
||||
1 10
|
||||
2 30
|
||||
DROP TABLE t1, t2;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
|
@ -5248,6 +5248,125 @@ EXECUTE IMMEDIATE 'SELECT GROUP_CONCAT(@x) FROM t GROUP BY @x := f';
|
||||
|
||||
DROP TABLE t;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-15703: Crash in EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING DEFAULT
|
||||
--echo #
|
||||
|
||||
PREPARE stmt FROM 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)';
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE stmt USING DEFAULT;
|
||||
DEALLOCATE PREPARE stmt;
|
||||
|
||||
PREPARE stmt FROM 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)';
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE stmt USING IGNORE;
|
||||
DEALLOCATE PREPARE stmt;
|
||||
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING DEFAULT;
|
||||
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING IGNORE;
|
||||
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'BEGIN NOT ATOMIC DECLARE a INT DEFAULT ?; END' USING DEFAULT;
|
||||
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'BEGIN NOT ATOMIC DECLARE a INT DEFAULT ?; END' USING IGNORE;
|
||||
|
||||
CREATE PROCEDURE p1(a INT) SELECT 1;
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'CALL p1(?)' USING DEFAULT;
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'CALL p1(?)' USING IGNORE;
|
||||
DROP PROCEDURE p1;
|
||||
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'SELECT ? UNION SELECT 1' USING DEFAULT;
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'SELECT ? UNION SELECT 1' USING IGNORE;
|
||||
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION ALL SELECT 1) AS derived' USING DEFAULT;
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION ALL SELECT 1) AS derived' USING IGNORE;
|
||||
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION DISTINCT SELECT 1) AS derived' USING DEFAULT;
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION DISTINCT SELECT 1) AS derived' USING IGNORE;
|
||||
|
||||
--echo # multi-update and DEFAULT
|
||||
CREATE TABLE t1 (a INT, b INT DEFAULT a);
|
||||
INSERT into t1 VALUES (1,2),(2,3);
|
||||
CREATE TABLE t2 (a INT, b INT DEFAULT a);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
|
||||
UPDATE t1,t2 SET t1.b = DEFAULT, t2.b = DEFAULT WHERE t1.a=t2.a;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
|
||||
--echo # re-check the case for Prepared Statement with parameters
|
||||
TRUNCATE TABLE t1;
|
||||
TRUNCATE TABLE t2;
|
||||
INSERT INTO t1 VALUES (1,2),(2,3);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
|
||||
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING DEFAULT, DEFAULT;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
|
||||
# Cleanup
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo # multi-update and IGNORE
|
||||
CREATE TABLE t1 (a INT, b INT default a);
|
||||
INSERT INTO t1 VALUES (1,2),(2,3);
|
||||
CREATE TABLE t2 (a INT, b INT default a);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
|
||||
UPDATE t1,t2 SET t1.b = IGNORE, t2.b = IGNORE WHERE t1.a=t2.a;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
|
||||
--echo # re-check the case for Prepared Statement with parameters
|
||||
TRUNCATE TABLE t1;
|
||||
TRUNCATE TABLE t2;
|
||||
INSERT INTO t1 VALUES (1,2),(2,3);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
|
||||
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING IGNORE, IGNORE;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
|
||||
# Cleanup
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo # multi-update and DEFAULT parameter (no default)
|
||||
CREATE TABLE t1 (a INT, b INT NOT NULL);
|
||||
INSERT INTO t1 VALUES (1,2),(2,3);
|
||||
CREATE TABLE t2 (a INT, b INT NOT NULL);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
|
||||
--error ER_NO_DEFAULT_FOR_FIELD
|
||||
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING DEFAULT, DEFAULT;
|
||||
|
||||
# Cleanup
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo # multi-update and IGNORE parameter (no default)
|
||||
CREATE TABLE t1 (a INT, b INT NOT NULL);
|
||||
INSERT INTO t1 VALUES (1,2),(2,3);
|
||||
CREATE TABLE t2 (a INT, b INT NOT NULL);
|
||||
INSERT INTO t2 VALUES (1,10),(2,30);
|
||||
|
||||
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING IGNORE, IGNORE;
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
|
||||
# Cleanup
|
||||
DROP TABLE t1, t2;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
@ -9,23 +9,27 @@ use test;
|
||||
connection slave;
|
||||
-- SET GLOBAL gtid_slave_pos='';
|
||||
CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
STOP ALL SLAVES;
|
||||
-- SET GLOBAL gtid_slave_pos='';
|
||||
CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
START ALL SLAVES;
|
||||
STOP ALL SLAVES;
|
||||
-- SET GLOBAL gtid_slave_pos='';
|
||||
CHANGE MASTER '' TO MASTER_HOST='127.0.0.1', MASTER_PORT=MASTER_MYPORT, MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
START ALL SLAVES;
|
||||
start slave;
|
||||
Warnings:
|
||||
Note 1254 Slave is already running
|
||||
-- SET GLOBAL gtid_slave_pos='';
|
||||
CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
start slave;
|
||||
Warnings:
|
||||
Note 1254 Slave is already running
|
||||
*** Test mysqldump --dump-slave GTID functionality.
|
||||
*** Test mysqldump --dump-slave GTID/non-gtid functionality.
|
||||
connection master;
|
||||
SET gtid_seq_no = 1000;
|
||||
CREATE TABLE t1 (a INT PRIMARY KEY);
|
||||
@ -35,36 +39,170 @@ connection slave;
|
||||
CREATE TABLE t2 (a INT PRIMARY KEY);
|
||||
DROP TABLE t2;
|
||||
|
||||
1. --dump-slave=1
|
||||
1. --dump-slave=1 --gtid
|
||||
|
||||
SET GLOBAL gtid_slave_pos='0-1-1001';
|
||||
CHANGE MASTER '' TO MASTER_USE_GTID=slave_pos;
|
||||
-- CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
2. --dump-slave=2
|
||||
|
||||
1a. --dump-slave=1
|
||||
|
||||
-- SET GLOBAL gtid_slave_pos='0-1-1001';
|
||||
CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
|
||||
2. --dump-slave=2 --gtid
|
||||
|
||||
-- SET GLOBAL gtid_slave_pos='0-1-1001';
|
||||
-- CHANGE MASTER '' TO MASTER_USE_GTID=slave_pos;
|
||||
-- CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
*** Test mysqldump --master-data GTID functionality.
|
||||
|
||||
1. --master-data=1
|
||||
|
||||
-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
2. --dump-slave=2
|
||||
|
||||
-- SET GLOBAL gtid_slave_pos='0-1-1001';
|
||||
-- CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
*** Test mysqldump --master-data GTID/non-gtid functionality.
|
||||
|
||||
1. --master-data=1 --gtid
|
||||
|
||||
CHANGE MASTER TO MASTER_USE_GTID=slave_pos;
|
||||
SET GLOBAL gtid_slave_pos='0-2-1003';
|
||||
|
||||
2. --master-data=2
|
||||
|
||||
-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
1a. --master-data=1
|
||||
|
||||
-- SET GLOBAL gtid_slave_pos='0-2-1003';
|
||||
CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
2. --master-data=2 --gtid
|
||||
|
||||
-- CHANGE MASTER TO MASTER_USE_GTID=slave_pos;
|
||||
-- SET GLOBAL gtid_slave_pos='0-2-1003';
|
||||
-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
2a. --master-data=2
|
||||
|
||||
-- SET GLOBAL gtid_slave_pos='0-2-1003';
|
||||
-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
3. --master-data --single-transaction --gtid
|
||||
|
||||
CHANGE MASTER TO MASTER_USE_GTID=slave_pos;
|
||||
SET GLOBAL gtid_slave_pos='0-2-1003';
|
||||
-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
3a. --master-data --single-transaction
|
||||
|
||||
-- SET GLOBAL gtid_slave_pos='0-2-1003';
|
||||
CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
4. --master-data=2 --dump-slave=2 --single-transaction --gtid (MDEV-4827)
|
||||
|
||||
-- MariaDB dump--
|
||||
-- Host: localhost Database: test
|
||||
-- ------------------------------------------------------
|
||||
-- Server version
|
||||
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
|
||||
/*!40101 SET NAMES utf8mb4 */;
|
||||
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
|
||||
/*!40103 SET TIME_ZONE='+00:00' */;
|
||||
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
|
||||
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
|
||||
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
|
||||
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
|
||||
|
||||
-- Preferably use GTID to start replication from GTID position:
|
||||
|
||||
-- CHANGE MASTER TO MASTER_USE_GTID=slave_pos;
|
||||
-- SET GLOBAL gtid_slave_pos='0-2-1003';
|
||||
|
||||
3. --master-data --single-transaction
|
||||
--
|
||||
-- Alternately, following is the position of the binary logging from SHOW MASTER STATUS at point of backup.
|
||||
-- Use this when creating a replica of the primary server where the backup was made.
|
||||
-- The new server will be connecting to the primary server where the backup was taken.
|
||||
--
|
||||
|
||||
-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
CHANGE MASTER TO MASTER_USE_GTID=slave_pos;
|
||||
SET GLOBAL gtid_slave_pos='0-2-1003';
|
||||
|
||||
--
|
||||
-- The following is the SQL position of the replication taken from SHOW SLAVE STATUS at the time of backup.
|
||||
-- Use this position when creating a clone of, or replacement server, from where the backup was taken.
|
||||
-- This new server will connects to the same primary server(s).
|
||||
--
|
||||
-- GTID position to start replication:
|
||||
-- SET GLOBAL gtid_slave_pos='0-1-1001';
|
||||
|
||||
-- Use only the MASTER_USE_GTID=slave_pos or MASTER_LOG_FILE/MASTER_LOG_POS in the statements below.
|
||||
|
||||
-- CHANGE MASTER '' TO MASTER_USE_GTID=slave_pos;
|
||||
-- CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
|
||||
|
||||
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
|
||||
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
|
||||
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
|
||||
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
||||
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
|
||||
|
||||
-- Dump completed
|
||||
|
||||
4a. --master-data=2 --dump-slave=2 --single-transaction (MDEV-4827)
|
||||
|
||||
-- MariaDB dump--
|
||||
-- Host: localhost Database: test
|
||||
-- ------------------------------------------------------
|
||||
-- Server version
|
||||
/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
|
||||
/*!40101 SET NAMES utf8mb4 */;
|
||||
/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */;
|
||||
/*!40103 SET TIME_ZONE='+00:00' */;
|
||||
/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */;
|
||||
/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */;
|
||||
/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */;
|
||||
/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */;
|
||||
|
||||
-- Preferably use GTID to start replication from GTID position:
|
||||
|
||||
-- SET GLOBAL gtid_slave_pos='0-2-1003';
|
||||
|
||||
--
|
||||
-- Alternately, following is the position of the binary logging from SHOW MASTER STATUS at point of backup.
|
||||
-- Use this when creating a replica of the primary server where the backup was made.
|
||||
-- The new server will be connecting to the primary server where the backup was taken.
|
||||
--
|
||||
|
||||
-- CHANGE MASTER TO MASTER_LOG_FILE='slave-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
--
|
||||
-- The following is the SQL position of the replication taken from SHOW SLAVE STATUS at the time of backup.
|
||||
-- Use this position when creating a clone of, or replacement server, from where the backup was taken.
|
||||
-- This new server will connects to the same primary server(s).
|
||||
--
|
||||
-- GTID position to start replication:
|
||||
-- SET GLOBAL gtid_slave_pos='0-1-1001';
|
||||
-- CHANGE MASTER '' TO MASTER_LOG_FILE='master-bin.000001', MASTER_LOG_POS=BINLOG_START;
|
||||
|
||||
/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */;
|
||||
|
||||
/*!40101 SET SQL_MODE=@OLD_SQL_MODE */;
|
||||
/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */;
|
||||
/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */;
|
||||
/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
|
||||
/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
|
||||
/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
|
||||
/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */;
|
||||
|
||||
-- Dump completed
|
||||
connection master;
|
||||
CREATE TABLE t (
|
||||
id int
|
||||
@ -77,8 +215,8 @@ include/stop_slave.inc
|
||||
change master to master_use_gtid=slave_pos;
|
||||
connection master;
|
||||
# Ensuring the binlog dump thread is killed on primary...
|
||||
-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000002', MASTER_LOG_POS=BINLOG_START;
|
||||
-- SET GLOBAL gtid_slave_pos='0-1-1005';
|
||||
-- CHANGE MASTER TO MASTER_LOG_FILE='master-bin.000002', MASTER_LOG_POS=BINLOG_START;
|
||||
connection slave;
|
||||
include/start_slave.inc
|
||||
include/rpl_end.inc
|
||||
|
@ -37,7 +37,7 @@ start slave;
|
||||
start slave;
|
||||
|
||||
|
||||
--echo *** Test mysqldump --dump-slave GTID functionality.
|
||||
--echo *** Test mysqldump --dump-slave GTID/non-gtid functionality.
|
||||
|
||||
--connection master
|
||||
SET gtid_seq_no = 1000;
|
||||
@ -52,37 +52,80 @@ CREATE TABLE t2 (a INT PRIMARY KEY);
|
||||
DROP TABLE t2;
|
||||
|
||||
--echo
|
||||
--echo 1. --dump-slave=1
|
||||
--echo 1. --dump-slave=1 --gtid
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --dump-slave=1 --gtid test
|
||||
|
||||
--echo
|
||||
--echo 2. --dump-slave=2
|
||||
--echo 1a. --dump-slave=1
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --dump-slave=1 test
|
||||
|
||||
--echo
|
||||
--echo 2. --dump-slave=2 --gtid
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --dump-slave=2 --gtid test
|
||||
|
||||
|
||||
--echo *** Test mysqldump --master-data GTID functionality.
|
||||
--echo
|
||||
--echo 1. --master-data=1
|
||||
--echo 2. --dump-slave=2
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --dump-slave=2 test
|
||||
|
||||
|
||||
--echo *** Test mysqldump --master-data GTID/non-gtid functionality.
|
||||
--echo
|
||||
--echo 1. --master-data=1 --gtid
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --master-data=1 --gtid test
|
||||
|
||||
--echo
|
||||
--echo 2. --master-data=2
|
||||
--echo 1a. --master-data=1
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --master-data=1 test
|
||||
|
||||
--echo
|
||||
--echo 2. --master-data=2 --gtid
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --master-data=2 --gtid test
|
||||
|
||||
--echo
|
||||
--echo 3. --master-data --single-transaction
|
||||
--echo 2a. --master-data=2
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --master-data=2 test
|
||||
|
||||
--echo
|
||||
--echo 3. --master-data --single-transaction --gtid
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --master-data --single-transaction --gtid test
|
||||
|
||||
--echo
|
||||
--echo 3a. --master-data --single-transaction
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/
|
||||
--exec $MYSQL_DUMP_SLAVE --compact --master-data --single-transaction test
|
||||
|
||||
--echo
|
||||
--echo 4. --master-data=2 --dump-slave=2 --single-transaction --gtid (MDEV-4827)
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ /MariaDB dump.*/MariaDB dump/ /Dump completed.*/Dump completed/ /Server version.*/Server version/
|
||||
--exec $MYSQL_DUMP_SLAVE --master-data=2 --dump-slave=2 --single-transaction --gtid test
|
||||
--echo
|
||||
|
||||
--echo
|
||||
--echo 4a. --master-data=2 --dump-slave=2 --single-transaction (MDEV-4827)
|
||||
--echo
|
||||
--replace_regex /MASTER_LOG_POS=[0-9]+/MASTER_LOG_POS=BINLOG_START/ /MariaDB dump.*/MariaDB dump/ /Dump completed.*/Dump completed/ /Server version.*/Server version/
|
||||
--exec $MYSQL_DUMP_SLAVE --master-data=2 --dump-slave=2 --single-transaction test
|
||||
--echo
|
||||
#
|
||||
# MDEV-32611 Added test for mysqldump --delete-master-logs option.
|
||||
# This options is alias of
|
||||
|
@ -88,15 +88,151 @@ sel
|
||||
]
|
||||
set optimizer_trace=@tmp;
|
||||
drop table t0,t1,t10;
|
||||
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
|
||||
set histogram_size=@save_histogram_size;
|
||||
set use_stat_tables= @save_use_stat_tables;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
#
|
||||
# MDEV-33314: Crash inside calculate_cond_selectivity_for_table() with many columns
|
||||
#
|
||||
set optimizer_use_condition_selectivity= 4;
|
||||
set use_stat_tables= preferably;
|
||||
#
|
||||
# create table t1 (col0 int, col1 int, col2 int, ...);
|
||||
#
|
||||
$create_tbl;
|
||||
#
|
||||
# insert into t1 select seq, ... seq from seq_1_to_10;
|
||||
#
|
||||
$insert_cmd;
|
||||
analyze table t1 persistent for all;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
set @trace_tmp=@@optimizer_trace;
|
||||
set optimizer_trace=1;
|
||||
#
|
||||
# Basic testcase: don't crash for many-column selectivity
|
||||
# explain extended select * from t1 where col0>1 and col1>1 and col2>1 and ...
|
||||
#
|
||||
$query_tbl;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"cost": 0.0322836,
|
||||
"nested_loop": [
|
||||
{
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "ALL",
|
||||
"loops": 1,
|
||||
"rows": 100,
|
||||
"cost": 0.0322836,
|
||||
"filtered": 53.32928848,
|
||||
"attached_condition": "t1.col0 > 1 and t1.col1 > 1 and t1.col2 > 1 and t1.col3 > 1 and t1.col4 > 1 and t1.col5 > 1 and t1.col6 > 1 and t1.col7 > 1 and t1.col8 > 1 and t1.col9 > 1 and t1.col10 > 1 and t1.col11 > 1 and t1.col12 > 1 and t1.col13 > 1 and t1.col14 > 1 and t1.col15 > 1 and t1.col16 > 1 and t1.col17 > 1 and t1.col18 > 1 and t1.col19 > 1 and t1.col20 > 1 and t1.col21 > 1 and t1.col22 > 1 and t1.col23 > 1 and t1.col24 > 1 and t1.col25 > 1 and t1.col26 > 1 and t1.col27 > 1 and t1.col28 > 1 and t1.col29 > 1 and t1.col30 > 1 and t1.col31 > 1 and t1.col32 > 1 and t1.col33 > 1 and t1.col34 > 1 and t1.col35 > 1 and t1.col36 > 1 and t1.col37 > 1 and t1.col38 > 1 and t1.col39 > 1 and t1.col40 > 1 and t1.col41 > 1 and t1.col42 > 1 and t1.col43 > 1 and t1.col44 > 1 and t1.col45 > 1 and t1.col46 > 1 and t1.col47 > 1 and t1.col48 > 1 and t1.col49 > 1 and t1.col50 > 1 and t1.col51 > 1 and t1.col52 > 1 and t1.col53 > 1 and t1.col54 > 1 and t1.col55 > 1 and t1.col56 > 1 and t1.col57 > 1 and t1.col58 > 1 and t1.col59 > 1 and t1.col60 > 1 and t1.col61 > 1 and t1.col62 > 1 and t1.col63 > 1 and t1.col64 > 1 and t1.col65 > 1 and t1.col66 > 1 and t1.col67 > 1 and t1.col68 > 1 and t1.col69 > 1 and t1.col70 > 1 and t1.col71 > 1 and t1.col72 > 1 and t1.col73 > 1 and t1.col74 > 1 and t1.col75 > 1 and t1.col76 > 1 and t1.col77 > 1 and t1.col78 > 1 and t1.col79 > 1 and t1.col80 > 1 and t1.col81 > 1 and t1.col82 > 1 and t1.col83 > 1 and t1.col84 > 1 and t1.col85 > 1 and t1.col86 > 1 and t1.col87 > 1 and t1.col88 > 1 and t1.col89 > 1 and t1.col90 > 1 and t1.col91 > 1 and t1.col92 > 1 and t1.col93 > 1 and t1.col94 > 1 and t1.col95 > 1 and t1.col96 > 1 and t1.col97 > 1 and t1.col98 > 1 and t1.col99 > 1 and t1.col100 > 1 and t1.col101 > 1 and t1.col102 > 1 and t1.col103 > 1 and t1.col104 > 1 and t1.col105 > 1 and t1.col106 > 1 and t1.col107 > 1 and t1.col108 > 1 and t1.col109 > 1 and t1.col110 > 1 and t1.col111 > 1 and t1.col112 > 1 and t1.col113 > 1 and t1.col114 > 1 and t1.col115 > 1 and t1.col116 > 1 and t1.col117 > 1 and t1.col118 > 1 and t1.col119 > 1 and t1.col120 > 1 and t1.col121 > 1 and t1.col122 > 1 and t1.col123 > 1 and t1.col124 > 1 and t1.col125 > 1 and t1.col126 > 1 and t1.col127 > 1 and t1.col128 > 1 and t1.col129 > 1 and t1.col130 > 1 and t1.col131 > 1 and t1.col132 > 1 and t1.col133 > 1 and t1.col134 > 1 and t1.col135 > 1 and t1.col136 > 1 and t1.col137 > 1 and t1.col138 > 1 and t1.col139 > 1 and t1.col140 > 1 and t1.col141 > 1 and t1.col142 > 1 and t1.col143 > 1 and t1.col144 > 1 and t1.col145 > 1 and t1.col146 > 1 and t1.col147 > 1 and t1.col148 > 1 and t1.col149 > 1 and t1.col150 > 1 and t1.col151 > 1 and t1.col152 > 1 and t1.col153 > 1 and t1.col154 > 1 and t1.col155 > 1 and t1.col156 > 1 and t1.col157 > 1 and t1.col158 > 1 and t1.col159 > 1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
select
|
||||
json_detailed(json_extract(trace,'$**.selectivity_for_columns[0]')) as JS
|
||||
from
|
||||
information_schema.optimizer_trace;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"column_name": "col0",
|
||||
"ranges":
|
||||
["1 < col0"],
|
||||
"selectivity_from_histogram": 0.996078431
|
||||
}
|
||||
]
|
||||
$query_tbl;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"cost": 0.0322836,
|
||||
"nested_loop": [
|
||||
{
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "ALL",
|
||||
"loops": 1,
|
||||
"rows": 100,
|
||||
"cost": 0.0322836,
|
||||
"filtered": 53.32928848,
|
||||
"attached_condition": "t1.col0 > 1 and t1.col1 > 1 and t1.col2 > 1 and t1.col3 > 1 and t1.col4 > 1 and t1.col5 > 1 and t1.col6 > 1 and t1.col7 > 1 and t1.col8 > 1 and t1.col9 > 1 and t1.col10 > 1 and t1.col11 > 1 and t1.col12 > 1 and t1.col13 > 1 and t1.col14 > 1 and t1.col15 > 1 and t1.col16 > 1 and t1.col17 > 1 and t1.col18 > 1 and t1.col19 > 1 and t1.col20 > 1 and t1.col21 > 1 and t1.col22 > 1 and t1.col23 > 1 and t1.col24 > 1 and t1.col25 > 1 and t1.col26 > 1 and t1.col27 > 1 and t1.col28 > 1 and t1.col29 > 1 and t1.col30 > 1 and t1.col31 > 1 and t1.col32 > 1 and t1.col33 > 1 and t1.col34 > 1 and t1.col35 > 1 and t1.col36 > 1 and t1.col37 > 1 and t1.col38 > 1 and t1.col39 > 1 and t1.col40 > 1 and t1.col41 > 1 and t1.col42 > 1 and t1.col43 > 1 and t1.col44 > 1 and t1.col45 > 1 and t1.col46 > 1 and t1.col47 > 1 and t1.col48 > 1 and t1.col49 > 1 and t1.col50 > 1 and t1.col51 > 1 and t1.col52 > 1 and t1.col53 > 1 and t1.col54 > 1 and t1.col55 > 1 and t1.col56 > 1 and t1.col57 > 1 and t1.col58 > 1 and t1.col59 > 1 and t1.col60 > 1 and t1.col61 > 1 and t1.col62 > 1 and t1.col63 > 1 and t1.col64 > 1 and t1.col65 > 1 and t1.col66 > 1 and t1.col67 > 1 and t1.col68 > 1 and t1.col69 > 1 and t1.col70 > 1 and t1.col71 > 1 and t1.col72 > 1 and t1.col73 > 1 and t1.col74 > 1 and t1.col75 > 1 and t1.col76 > 1 and t1.col77 > 1 and t1.col78 > 1 and t1.col79 > 1 and t1.col80 > 1 and t1.col81 > 1 and t1.col82 > 1 and t1.col83 > 1 and t1.col84 > 1 and t1.col85 > 1 and t1.col86 > 1 and t1.col87 > 1 and t1.col88 > 1 and t1.col89 > 1 and t1.col90 > 1 and t1.col91 > 1 and t1.col92 > 1 and t1.col93 > 1 and t1.col94 > 1 and t1.col95 > 1 and t1.col96 > 1 and t1.col97 > 1 and t1.col98 > 1 and t1.col99 > 1 and t1.col100 > 1 and t1.col101 > 1 and t1.col102 > 1 and t1.col103 > 1 and t1.col104 > 1 and t1.col105 > 1 and t1.col106 > 1 and t1.col107 > 1 and t1.col108 > 1 and t1.col109 > 1 and t1.col110 > 1 and t1.col111 > 1 and t1.col112 > 1 and t1.col113 > 1 and t1.col114 > 1 and t1.col115 > 1 and t1.col116 > 1 and t1.col117 > 1 and t1.col118 > 1 and t1.col119 > 1 and t1.col120 > 1 and t1.col121 > 1 and t1.col122 > 1 and t1.col123 > 1 and t1.col124 > 1 and t1.col125 > 1 and t1.col126 > 1 and t1.col127 > 1 and t1.col128 > 1 and t1.col129 > 1 and t1.col130 > 1 and t1.col131 > 1 and t1.col132 > 1 and t1.col133 > 1 and t1.col134 > 1 and t1.col135 > 1 and t1.col136 > 1 and t1.col137 > 1 and t1.col138 > 1 and t1.col139 > 1 and t1.col140 > 1 and t1.col141 > 1 and t1.col142 > 1 and t1.col143 > 1 and t1.col144 > 1 and t1.col145 > 1 and t1.col146 > 1 and t1.col147 > 1 and t1.col148 > 1 and t1.col149 > 1 and t1.col150 > 1 and t1.col151 > 1 and t1.col152 > 1 and t1.col153 > 1 and t1.col154 > 1 and t1.col155 > 1 and t1.col156 > 1 and t1.col157 > 1 and t1.col158 > 1 and t1.col159 > 1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
select
|
||||
json_detailed(json_extract(trace,'$**.selectivity_for_columns[159]')) as JS
|
||||
from
|
||||
information_schema.optimizer_trace;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"column_name": "col159",
|
||||
"ranges":
|
||||
["1 < col159"],
|
||||
"selectivity_from_histogram": 0.996078431
|
||||
}
|
||||
]
|
||||
#
|
||||
# Check if not being able to infer anything for the first MAX_KEY
|
||||
# columns doesn't prevent further inferences.
|
||||
#
|
||||
# explain extended select * from t1
|
||||
# where (1>2 or col0>1 or col1>1 or ...) and col99>1
|
||||
#
|
||||
$query_tbl;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"cost": 0.0322836,
|
||||
"nested_loop": [
|
||||
{
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "ALL",
|
||||
"loops": 1,
|
||||
"rows": 100,
|
||||
"cost": 0.0322836,
|
||||
"filtered": 99.60784149,
|
||||
"attached_condition": "(t1.col1 > 1 or t1.col2 > 1 or t1.col3 > 1 or t1.col4 > 1 or t1.col5 > 1 or t1.col6 > 1 or t1.col7 > 1 or t1.col8 > 1 or t1.col9 > 1 or t1.col10 > 1 or t1.col11 > 1 or t1.col12 > 1 or t1.col13 > 1 or t1.col14 > 1 or t1.col15 > 1 or t1.col16 > 1 or t1.col17 > 1 or t1.col18 > 1 or t1.col19 > 1 or t1.col20 > 1 or t1.col21 > 1 or t1.col22 > 1 or t1.col23 > 1 or t1.col24 > 1 or t1.col25 > 1 or t1.col26 > 1 or t1.col27 > 1 or t1.col28 > 1 or t1.col29 > 1 or t1.col30 > 1 or t1.col31 > 1 or t1.col32 > 1 or t1.col33 > 1 or t1.col34 > 1 or t1.col35 > 1 or t1.col36 > 1 or t1.col37 > 1 or t1.col38 > 1 or t1.col39 > 1 or t1.col40 > 1 or t1.col41 > 1 or t1.col42 > 1 or t1.col43 > 1 or t1.col44 > 1 or t1.col45 > 1 or t1.col46 > 1 or t1.col47 > 1 or t1.col48 > 1 or t1.col49 > 1 or t1.col50 > 1 or t1.col51 > 1 or t1.col52 > 1 or t1.col53 > 1 or t1.col54 > 1 or t1.col55 > 1 or t1.col56 > 1 or t1.col57 > 1 or t1.col58 > 1 or t1.col59 > 1 or t1.col60 > 1 or t1.col61 > 1 or t1.col62 > 1 or t1.col63 > 1 or t1.col64 > 1 or t1.col65 > 1 or t1.col66 > 1 or t1.col67 > 1 or t1.col68 > 1 or t1.col69 > 1 or t1.col70 > 1 or t1.col71 > 1 or t1.col72 > 1 or t1.col73 > 1 or t1.col74 > 1 or t1.col75 > 1 or t1.col76 > 1 or t1.col77 > 1 or t1.col78 > 1 or t1.col79 > 1 or t1.col80 > 1 or t1.col81 > 1 or t1.col82 > 1 or t1.col83 > 1 or t1.col84 > 1 or t1.col85 > 1 or t1.col86 > 1 or t1.col87 > 1 or t1.col88 > 1 or t1.col89 > 1 or t1.col90 > 1 or t1.col91 > 1 or t1.col92 > 1 or t1.col93 > 1 or t1.col94 > 1 or t1.col95 > 1 or t1.col96 > 1 or t1.col97 > 1 or t1.col98 > 1 or t1.col99 > 1 or t1.col100 > 1 or t1.col101 > 1 or t1.col102 > 1 or t1.col103 > 1 or t1.col104 > 1 or t1.col105 > 1 or t1.col106 > 1 or t1.col107 > 1 or t1.col108 > 1 or t1.col109 > 1 or t1.col110 > 1 or t1.col111 > 1 or t1.col112 > 1 or t1.col113 > 1 or t1.col114 > 1 or t1.col115 > 1 or t1.col116 > 1 or t1.col117 > 1 or t1.col118 > 1 or t1.col119 > 1 or t1.col120 > 1 or t1.col121 > 1 or t1.col122 > 1 or t1.col123 > 1 or t1.col124 > 1 or t1.col125 > 1 or t1.col126 > 1 or t1.col127 > 1 or t1.col128 > 1 or t1.col129 > 1 or t1.col130 > 1 or t1.col131 > 1 or t1.col132 > 1 or t1.col133 > 1 or t1.col134 > 1 or t1.col135 > 1 or t1.col136 > 1 or t1.col137 > 1 or t1.col138 > 1 or t1.col139 > 1 or t1.col140 > 1 or t1.col141 > 1 or t1.col142 > 1 or t1.col143 > 1 or t1.col144 > 1 or t1.col145 > 1 or t1.col146 > 1 or t1.col147 > 1 or t1.col148 > 1 or t1.col149 > 1 or t1.col150 > 1 or t1.col151 > 1 or t1.col152 > 1 or t1.col153 > 1 or t1.col154 > 1 or t1.col155 > 1 or t1.col156 > 1 or t1.col157 > 1 or t1.col158 > 1) and t1.col159 > 1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
select
|
||||
json_detailed(json_extract(trace,'$**.selectivity_for_columns')) as JS
|
||||
from
|
||||
information_schema.optimizer_trace;
|
||||
JS
|
||||
[
|
||||
[
|
||||
{
|
||||
"column_name": "col159",
|
||||
"ranges":
|
||||
["1 < col159"],
|
||||
"selectivity_from_histogram": 0.996078431
|
||||
}
|
||||
]
|
||||
]
|
||||
set optimizer_trace=@trace_tmp;
|
||||
drop table t1;
|
||||
#
|
||||
# Clean up
|
||||
#
|
||||
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
|
||||
set use_stat_tables= @save_use_stat_tables;
|
||||
set @@global.histogram_size=@save_histogram_size;
|
||||
set optimizer_switch=@save_optimizer_switch_for_selectivity_test;
|
||||
SET SESSION STORAGE_ENGINE=DEFAULT;
|
||||
|
@ -83,13 +83,149 @@ sel
|
||||
]
|
||||
set optimizer_trace=@tmp;
|
||||
drop table t0,t1,t10;
|
||||
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
|
||||
set histogram_size=@save_histogram_size;
|
||||
set use_stat_tables= @save_use_stat_tables;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
#
|
||||
# MDEV-33314: Crash inside calculate_cond_selectivity_for_table() with many columns
|
||||
#
|
||||
set optimizer_use_condition_selectivity= 4;
|
||||
set use_stat_tables= preferably;
|
||||
#
|
||||
# create table t1 (col0 int, col1 int, col2 int, ...);
|
||||
#
|
||||
$create_tbl;
|
||||
#
|
||||
# insert into t1 select seq, ... seq from seq_1_to_10;
|
||||
#
|
||||
$insert_cmd;
|
||||
analyze table t1 persistent for all;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
set @trace_tmp=@@optimizer_trace;
|
||||
set optimizer_trace=1;
|
||||
#
|
||||
# Basic testcase: don't crash for many-column selectivity
|
||||
# explain extended select * from t1 where col0>1 and col1>1 and col2>1 and ...
|
||||
#
|
||||
$query_tbl;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"cost": 0.0295225,
|
||||
"nested_loop": [
|
||||
{
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "ALL",
|
||||
"loops": 1,
|
||||
"rows": 100,
|
||||
"cost": 0.0295225,
|
||||
"filtered": 53.32928848,
|
||||
"attached_condition": "t1.col0 > 1 and t1.col1 > 1 and t1.col2 > 1 and t1.col3 > 1 and t1.col4 > 1 and t1.col5 > 1 and t1.col6 > 1 and t1.col7 > 1 and t1.col8 > 1 and t1.col9 > 1 and t1.col10 > 1 and t1.col11 > 1 and t1.col12 > 1 and t1.col13 > 1 and t1.col14 > 1 and t1.col15 > 1 and t1.col16 > 1 and t1.col17 > 1 and t1.col18 > 1 and t1.col19 > 1 and t1.col20 > 1 and t1.col21 > 1 and t1.col22 > 1 and t1.col23 > 1 and t1.col24 > 1 and t1.col25 > 1 and t1.col26 > 1 and t1.col27 > 1 and t1.col28 > 1 and t1.col29 > 1 and t1.col30 > 1 and t1.col31 > 1 and t1.col32 > 1 and t1.col33 > 1 and t1.col34 > 1 and t1.col35 > 1 and t1.col36 > 1 and t1.col37 > 1 and t1.col38 > 1 and t1.col39 > 1 and t1.col40 > 1 and t1.col41 > 1 and t1.col42 > 1 and t1.col43 > 1 and t1.col44 > 1 and t1.col45 > 1 and t1.col46 > 1 and t1.col47 > 1 and t1.col48 > 1 and t1.col49 > 1 and t1.col50 > 1 and t1.col51 > 1 and t1.col52 > 1 and t1.col53 > 1 and t1.col54 > 1 and t1.col55 > 1 and t1.col56 > 1 and t1.col57 > 1 and t1.col58 > 1 and t1.col59 > 1 and t1.col60 > 1 and t1.col61 > 1 and t1.col62 > 1 and t1.col63 > 1 and t1.col64 > 1 and t1.col65 > 1 and t1.col66 > 1 and t1.col67 > 1 and t1.col68 > 1 and t1.col69 > 1 and t1.col70 > 1 and t1.col71 > 1 and t1.col72 > 1 and t1.col73 > 1 and t1.col74 > 1 and t1.col75 > 1 and t1.col76 > 1 and t1.col77 > 1 and t1.col78 > 1 and t1.col79 > 1 and t1.col80 > 1 and t1.col81 > 1 and t1.col82 > 1 and t1.col83 > 1 and t1.col84 > 1 and t1.col85 > 1 and t1.col86 > 1 and t1.col87 > 1 and t1.col88 > 1 and t1.col89 > 1 and t1.col90 > 1 and t1.col91 > 1 and t1.col92 > 1 and t1.col93 > 1 and t1.col94 > 1 and t1.col95 > 1 and t1.col96 > 1 and t1.col97 > 1 and t1.col98 > 1 and t1.col99 > 1 and t1.col100 > 1 and t1.col101 > 1 and t1.col102 > 1 and t1.col103 > 1 and t1.col104 > 1 and t1.col105 > 1 and t1.col106 > 1 and t1.col107 > 1 and t1.col108 > 1 and t1.col109 > 1 and t1.col110 > 1 and t1.col111 > 1 and t1.col112 > 1 and t1.col113 > 1 and t1.col114 > 1 and t1.col115 > 1 and t1.col116 > 1 and t1.col117 > 1 and t1.col118 > 1 and t1.col119 > 1 and t1.col120 > 1 and t1.col121 > 1 and t1.col122 > 1 and t1.col123 > 1 and t1.col124 > 1 and t1.col125 > 1 and t1.col126 > 1 and t1.col127 > 1 and t1.col128 > 1 and t1.col129 > 1 and t1.col130 > 1 and t1.col131 > 1 and t1.col132 > 1 and t1.col133 > 1 and t1.col134 > 1 and t1.col135 > 1 and t1.col136 > 1 and t1.col137 > 1 and t1.col138 > 1 and t1.col139 > 1 and t1.col140 > 1 and t1.col141 > 1 and t1.col142 > 1 and t1.col143 > 1 and t1.col144 > 1 and t1.col145 > 1 and t1.col146 > 1 and t1.col147 > 1 and t1.col148 > 1 and t1.col149 > 1 and t1.col150 > 1 and t1.col151 > 1 and t1.col152 > 1 and t1.col153 > 1 and t1.col154 > 1 and t1.col155 > 1 and t1.col156 > 1 and t1.col157 > 1 and t1.col158 > 1 and t1.col159 > 1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
select
|
||||
json_detailed(json_extract(trace,'$**.selectivity_for_columns[0]')) as JS
|
||||
from
|
||||
information_schema.optimizer_trace;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"column_name": "col0",
|
||||
"ranges":
|
||||
["1 < col0"],
|
||||
"selectivity_from_histogram": 0.996078431
|
||||
}
|
||||
]
|
||||
$query_tbl;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"cost": 0.0295225,
|
||||
"nested_loop": [
|
||||
{
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "ALL",
|
||||
"loops": 1,
|
||||
"rows": 100,
|
||||
"cost": 0.0295225,
|
||||
"filtered": 53.32928848,
|
||||
"attached_condition": "t1.col0 > 1 and t1.col1 > 1 and t1.col2 > 1 and t1.col3 > 1 and t1.col4 > 1 and t1.col5 > 1 and t1.col6 > 1 and t1.col7 > 1 and t1.col8 > 1 and t1.col9 > 1 and t1.col10 > 1 and t1.col11 > 1 and t1.col12 > 1 and t1.col13 > 1 and t1.col14 > 1 and t1.col15 > 1 and t1.col16 > 1 and t1.col17 > 1 and t1.col18 > 1 and t1.col19 > 1 and t1.col20 > 1 and t1.col21 > 1 and t1.col22 > 1 and t1.col23 > 1 and t1.col24 > 1 and t1.col25 > 1 and t1.col26 > 1 and t1.col27 > 1 and t1.col28 > 1 and t1.col29 > 1 and t1.col30 > 1 and t1.col31 > 1 and t1.col32 > 1 and t1.col33 > 1 and t1.col34 > 1 and t1.col35 > 1 and t1.col36 > 1 and t1.col37 > 1 and t1.col38 > 1 and t1.col39 > 1 and t1.col40 > 1 and t1.col41 > 1 and t1.col42 > 1 and t1.col43 > 1 and t1.col44 > 1 and t1.col45 > 1 and t1.col46 > 1 and t1.col47 > 1 and t1.col48 > 1 and t1.col49 > 1 and t1.col50 > 1 and t1.col51 > 1 and t1.col52 > 1 and t1.col53 > 1 and t1.col54 > 1 and t1.col55 > 1 and t1.col56 > 1 and t1.col57 > 1 and t1.col58 > 1 and t1.col59 > 1 and t1.col60 > 1 and t1.col61 > 1 and t1.col62 > 1 and t1.col63 > 1 and t1.col64 > 1 and t1.col65 > 1 and t1.col66 > 1 and t1.col67 > 1 and t1.col68 > 1 and t1.col69 > 1 and t1.col70 > 1 and t1.col71 > 1 and t1.col72 > 1 and t1.col73 > 1 and t1.col74 > 1 and t1.col75 > 1 and t1.col76 > 1 and t1.col77 > 1 and t1.col78 > 1 and t1.col79 > 1 and t1.col80 > 1 and t1.col81 > 1 and t1.col82 > 1 and t1.col83 > 1 and t1.col84 > 1 and t1.col85 > 1 and t1.col86 > 1 and t1.col87 > 1 and t1.col88 > 1 and t1.col89 > 1 and t1.col90 > 1 and t1.col91 > 1 and t1.col92 > 1 and t1.col93 > 1 and t1.col94 > 1 and t1.col95 > 1 and t1.col96 > 1 and t1.col97 > 1 and t1.col98 > 1 and t1.col99 > 1 and t1.col100 > 1 and t1.col101 > 1 and t1.col102 > 1 and t1.col103 > 1 and t1.col104 > 1 and t1.col105 > 1 and t1.col106 > 1 and t1.col107 > 1 and t1.col108 > 1 and t1.col109 > 1 and t1.col110 > 1 and t1.col111 > 1 and t1.col112 > 1 and t1.col113 > 1 and t1.col114 > 1 and t1.col115 > 1 and t1.col116 > 1 and t1.col117 > 1 and t1.col118 > 1 and t1.col119 > 1 and t1.col120 > 1 and t1.col121 > 1 and t1.col122 > 1 and t1.col123 > 1 and t1.col124 > 1 and t1.col125 > 1 and t1.col126 > 1 and t1.col127 > 1 and t1.col128 > 1 and t1.col129 > 1 and t1.col130 > 1 and t1.col131 > 1 and t1.col132 > 1 and t1.col133 > 1 and t1.col134 > 1 and t1.col135 > 1 and t1.col136 > 1 and t1.col137 > 1 and t1.col138 > 1 and t1.col139 > 1 and t1.col140 > 1 and t1.col141 > 1 and t1.col142 > 1 and t1.col143 > 1 and t1.col144 > 1 and t1.col145 > 1 and t1.col146 > 1 and t1.col147 > 1 and t1.col148 > 1 and t1.col149 > 1 and t1.col150 > 1 and t1.col151 > 1 and t1.col152 > 1 and t1.col153 > 1 and t1.col154 > 1 and t1.col155 > 1 and t1.col156 > 1 and t1.col157 > 1 and t1.col158 > 1 and t1.col159 > 1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
select
|
||||
json_detailed(json_extract(trace,'$**.selectivity_for_columns[159]')) as JS
|
||||
from
|
||||
information_schema.optimizer_trace;
|
||||
JS
|
||||
[
|
||||
{
|
||||
"column_name": "col159",
|
||||
"ranges":
|
||||
["1 < col159"],
|
||||
"selectivity_from_histogram": 0.996078431
|
||||
}
|
||||
]
|
||||
#
|
||||
# Check if not being able to infer anything for the first MAX_KEY
|
||||
# columns doesn't prevent further inferences.
|
||||
#
|
||||
# explain extended select * from t1
|
||||
# where (1>2 or col0>1 or col1>1 or ...) and col99>1
|
||||
#
|
||||
$query_tbl;
|
||||
EXPLAIN
|
||||
{
|
||||
"query_block": {
|
||||
"select_id": 1,
|
||||
"cost": 0.0295225,
|
||||
"nested_loop": [
|
||||
{
|
||||
"table": {
|
||||
"table_name": "t1",
|
||||
"access_type": "ALL",
|
||||
"loops": 1,
|
||||
"rows": 100,
|
||||
"cost": 0.0295225,
|
||||
"filtered": 99.60784149,
|
||||
"attached_condition": "(t1.col1 > 1 or t1.col2 > 1 or t1.col3 > 1 or t1.col4 > 1 or t1.col5 > 1 or t1.col6 > 1 or t1.col7 > 1 or t1.col8 > 1 or t1.col9 > 1 or t1.col10 > 1 or t1.col11 > 1 or t1.col12 > 1 or t1.col13 > 1 or t1.col14 > 1 or t1.col15 > 1 or t1.col16 > 1 or t1.col17 > 1 or t1.col18 > 1 or t1.col19 > 1 or t1.col20 > 1 or t1.col21 > 1 or t1.col22 > 1 or t1.col23 > 1 or t1.col24 > 1 or t1.col25 > 1 or t1.col26 > 1 or t1.col27 > 1 or t1.col28 > 1 or t1.col29 > 1 or t1.col30 > 1 or t1.col31 > 1 or t1.col32 > 1 or t1.col33 > 1 or t1.col34 > 1 or t1.col35 > 1 or t1.col36 > 1 or t1.col37 > 1 or t1.col38 > 1 or t1.col39 > 1 or t1.col40 > 1 or t1.col41 > 1 or t1.col42 > 1 or t1.col43 > 1 or t1.col44 > 1 or t1.col45 > 1 or t1.col46 > 1 or t1.col47 > 1 or t1.col48 > 1 or t1.col49 > 1 or t1.col50 > 1 or t1.col51 > 1 or t1.col52 > 1 or t1.col53 > 1 or t1.col54 > 1 or t1.col55 > 1 or t1.col56 > 1 or t1.col57 > 1 or t1.col58 > 1 or t1.col59 > 1 or t1.col60 > 1 or t1.col61 > 1 or t1.col62 > 1 or t1.col63 > 1 or t1.col64 > 1 or t1.col65 > 1 or t1.col66 > 1 or t1.col67 > 1 or t1.col68 > 1 or t1.col69 > 1 or t1.col70 > 1 or t1.col71 > 1 or t1.col72 > 1 or t1.col73 > 1 or t1.col74 > 1 or t1.col75 > 1 or t1.col76 > 1 or t1.col77 > 1 or t1.col78 > 1 or t1.col79 > 1 or t1.col80 > 1 or t1.col81 > 1 or t1.col82 > 1 or t1.col83 > 1 or t1.col84 > 1 or t1.col85 > 1 or t1.col86 > 1 or t1.col87 > 1 or t1.col88 > 1 or t1.col89 > 1 or t1.col90 > 1 or t1.col91 > 1 or t1.col92 > 1 or t1.col93 > 1 or t1.col94 > 1 or t1.col95 > 1 or t1.col96 > 1 or t1.col97 > 1 or t1.col98 > 1 or t1.col99 > 1 or t1.col100 > 1 or t1.col101 > 1 or t1.col102 > 1 or t1.col103 > 1 or t1.col104 > 1 or t1.col105 > 1 or t1.col106 > 1 or t1.col107 > 1 or t1.col108 > 1 or t1.col109 > 1 or t1.col110 > 1 or t1.col111 > 1 or t1.col112 > 1 or t1.col113 > 1 or t1.col114 > 1 or t1.col115 > 1 or t1.col116 > 1 or t1.col117 > 1 or t1.col118 > 1 or t1.col119 > 1 or t1.col120 > 1 or t1.col121 > 1 or t1.col122 > 1 or t1.col123 > 1 or t1.col124 > 1 or t1.col125 > 1 or t1.col126 > 1 or t1.col127 > 1 or t1.col128 > 1 or t1.col129 > 1 or t1.col130 > 1 or t1.col131 > 1 or t1.col132 > 1 or t1.col133 > 1 or t1.col134 > 1 or t1.col135 > 1 or t1.col136 > 1 or t1.col137 > 1 or t1.col138 > 1 or t1.col139 > 1 or t1.col140 > 1 or t1.col141 > 1 or t1.col142 > 1 or t1.col143 > 1 or t1.col144 > 1 or t1.col145 > 1 or t1.col146 > 1 or t1.col147 > 1 or t1.col148 > 1 or t1.col149 > 1 or t1.col150 > 1 or t1.col151 > 1 or t1.col152 > 1 or t1.col153 > 1 or t1.col154 > 1 or t1.col155 > 1 or t1.col156 > 1 or t1.col157 > 1 or t1.col158 > 1) and t1.col159 > 1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
select
|
||||
json_detailed(json_extract(trace,'$**.selectivity_for_columns')) as JS
|
||||
from
|
||||
information_schema.optimizer_trace;
|
||||
JS
|
||||
[
|
||||
[
|
||||
{
|
||||
"column_name": "col159",
|
||||
"ranges":
|
||||
["1 < col159"],
|
||||
"selectivity_from_histogram": 0.996078431
|
||||
}
|
||||
]
|
||||
]
|
||||
set optimizer_trace=@trace_tmp;
|
||||
drop table t1;
|
||||
#
|
||||
# Clean up
|
||||
#
|
||||
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
|
||||
set use_stat_tables= @save_use_stat_tables;
|
||||
set @@global.histogram_size=@save_histogram_size;
|
||||
|
@ -105,17 +105,113 @@ from information_schema.optimizer_trace;
|
||||
set optimizer_trace=@tmp;
|
||||
drop table t0,t1,t10;
|
||||
|
||||
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
|
||||
set histogram_size=@save_histogram_size;
|
||||
set use_stat_tables= @save_use_stat_tables;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-33314: Crash inside calculate_cond_selectivity_for_table() with many columns
|
||||
--echo #
|
||||
set optimizer_use_condition_selectivity= 4;
|
||||
set use_stat_tables= preferably;
|
||||
|
||||
let $N_CONDS=160;
|
||||
let $N_LAST_COND=159;
|
||||
--echo #
|
||||
--echo # create table t1 (col0 int, col1 int, col2 int, ...);
|
||||
--echo #
|
||||
let $create_tbl= create table t1 ( col0 int;
|
||||
let $i=1;
|
||||
|
||||
while ($i < $N_CONDS) {
|
||||
let $create_tbl= $create_tbl, col$i int;
|
||||
let $i=`select $i + 1`;
|
||||
}
|
||||
|
||||
let $create_tbl= $create_tbl );
|
||||
#echo $create_tbl;
|
||||
evalp $create_tbl;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # insert into t1 select seq, ... seq from seq_1_to_10;
|
||||
--echo #
|
||||
let $insert_cmd= insert into t1 select seq;
|
||||
let $i=1;
|
||||
|
||||
while ($i < $N_CONDS) {
|
||||
let $insert_cmd = $insert_cmd ,seq;
|
||||
let $i=`select $i + 1`;
|
||||
}
|
||||
let $insert_cmd= $insert_cmd from seq_1_to_100;
|
||||
|
||||
# echo $insert_cmd;
|
||||
evalp $insert_cmd;
|
||||
|
||||
analyze table t1 persistent for all;
|
||||
set @trace_tmp=@@optimizer_trace;
|
||||
set optimizer_trace=1;
|
||||
|
||||
--echo #
|
||||
--echo # Basic testcase: don't crash for many-column selectivity
|
||||
--echo # explain extended select * from t1 where col0>1 and col1>1 and col2>1 and ...
|
||||
--echo #
|
||||
let $query_tbl= explain format=json select * from t1 where col0>1;
|
||||
|
||||
let $i=1;
|
||||
while ($i < $N_CONDS) {
|
||||
let $query_tbl= $query_tbl and col$i>1;
|
||||
let $i=`select $i + 1`;
|
||||
}
|
||||
|
||||
#echo $query_tbl;
|
||||
evalp $query_tbl;
|
||||
|
||||
select
|
||||
json_detailed(json_extract(trace,'$**.selectivity_for_columns[0]')) as JS
|
||||
from
|
||||
information_schema.optimizer_trace;
|
||||
|
||||
evalp $query_tbl;
|
||||
eval select
|
||||
json_detailed(json_extract(trace,'\$**.selectivity_for_columns[$N_LAST_COND]')) as JS
|
||||
from
|
||||
information_schema.optimizer_trace;
|
||||
|
||||
|
||||
--echo #
|
||||
--echo # Check if not being able to infer anything for the first MAX_KEY
|
||||
--echo # columns doesn't prevent further inferences.
|
||||
--echo #
|
||||
--echo # explain extended select * from t1
|
||||
--echo # where (1>2 or col0>1 or col1>1 or ...) and col99>1
|
||||
--echo #
|
||||
let $query_tbl= explain format=json select * from t1 where (1>2 ;
|
||||
|
||||
let $i=1;
|
||||
while ($i < $N_LAST_COND) {
|
||||
let $query_tbl= $query_tbl or col$i>1;
|
||||
let $i=`select $i + 1`;
|
||||
}
|
||||
let $query_tbl= $query_tbl) and col$N_LAST_COND>1;
|
||||
|
||||
#echo $query_tbl;
|
||||
evalp $query_tbl;
|
||||
|
||||
select
|
||||
json_detailed(json_extract(trace,'$**.selectivity_for_columns')) as JS
|
||||
from
|
||||
information_schema.optimizer_trace;
|
||||
|
||||
set optimizer_trace=@trace_tmp;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # Clean up
|
||||
--echo #
|
||||
--source include/restore_charset.inc
|
||||
set optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity;
|
||||
set use_stat_tables= @save_use_stat_tables;
|
||||
set @@global.histogram_size=@save_histogram_size;
|
||||
|
@ -207,9 +207,12 @@ id select_type table type possible_keys key key_len ref rows r_rows filtered r_f
|
||||
SET debug_dbug=@old_debug;
|
||||
# Try to do SHOW ANALYZE for a query that runs a SET command:
|
||||
#
|
||||
set @show_explain_probe_select_id=2;
|
||||
create table t2 (a int);
|
||||
insert into t2 values (1),(2);
|
||||
set @show_explain_probe_select_id=3;
|
||||
SET debug_dbug='+d,show_explain_probe_join_exec_start';
|
||||
set @foo= (select max(a) from t0 where sin(a) >0);
|
||||
set @foo= (select max(a) from t2
|
||||
where a + (select max(a) from t0 where t0.a>t2.a) < 10000);
|
||||
connection default;
|
||||
show analyze for $thr2;
|
||||
ERROR HY000: Target is not executing an operation with a query plan
|
||||
@ -217,6 +220,7 @@ kill query $thr2;
|
||||
connection con1;
|
||||
ERROR 70100: Query execution was interrupted
|
||||
SET debug_dbug=@old_debug;
|
||||
drop table t2;
|
||||
#
|
||||
# Attempt SHOW ANALYZE for an UPDATE
|
||||
#
|
||||
|
@ -213,9 +213,15 @@ SET debug_dbug=@old_debug;
|
||||
|
||||
--echo # Try to do SHOW ANALYZE for a query that runs a SET command:
|
||||
--echo #
|
||||
set @show_explain_probe_select_id=2; # <---
|
||||
create table t2 (a int);
|
||||
insert into t2 values (1),(2);
|
||||
set @show_explain_probe_select_id=3; # Stop in the subquery.
|
||||
SET debug_dbug='+d,show_explain_probe_join_exec_start';
|
||||
send set @foo= (select max(a) from t0 where sin(a) >0);
|
||||
# t2 has 2 rows so we will stop in the subquery twice:
|
||||
# - first one to serve the SHOW ANALYZE request
|
||||
# - second one when waiting to be KILLed.
|
||||
send set @foo= (select max(a) from t2
|
||||
where a + (select max(a) from t0 where t0.a>t2.a) < 10000);
|
||||
connection default;
|
||||
--source include/wait_condition.inc
|
||||
--error ER_TARGET_NOT_EXPLAINABLE
|
||||
@ -225,7 +231,7 @@ connection con1;
|
||||
--error ER_QUERY_INTERRUPTED
|
||||
reap;
|
||||
SET debug_dbug=@old_debug;
|
||||
|
||||
drop table t2;
|
||||
|
||||
--echo #
|
||||
--echo # Attempt SHOW ANALYZE for an UPDATE
|
||||
|
@ -34,13 +34,12 @@ set session use_stat_tables='preferably';
|
||||
# Must NOT show "Engine-independent statistics collected":
|
||||
alter table t1 analyze partition p0;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
# Should not have Handler_read_rnd_next=34
|
||||
show session status like 'Handler_read_rnd%';
|
||||
Variable_name Value
|
||||
Handler_read_rnd 0
|
||||
Handler_read_rnd_deleted 0
|
||||
Handler_read_rnd_next 34
|
||||
Handler_read_rnd_next 0
|
||||
drop table t1;
|
||||
SET use_stat_tables = DEFAULT;
|
||||
|
@ -17,7 +17,6 @@ SET use_stat_tables = PREFERABLY;
|
||||
CREATE TABLE t1 ( a INT ) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
|
||||
ALTER TABLE t1 ANALYZE PARTITION p1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
include/show_binlog_events.inc
|
||||
Log_name Pos Event_type Server_id End_log_pos Info
|
||||
|
@ -2617,9 +2617,9 @@ ERROR HY000: 'ignore' is not allowed in this context
|
||||
VALUES (DEFAULT);
|
||||
ERROR HY000: 'default' is not allowed in this context
|
||||
EXECUTE IMMEDIATE 'VALUES (?)' USING IGNORE;
|
||||
ERROR HY000: 'ignore' is not allowed in this context
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
EXECUTE IMMEDIATE 'VALUES (?)' USING DEFAULT;
|
||||
ERROR HY000: 'default' is not allowed in this context
|
||||
ERROR HY000: Default/ignore value is not supported for such parameter usage
|
||||
#
|
||||
# MDEV-24675: TVC using subqueries
|
||||
#
|
||||
|
@ -1368,9 +1368,9 @@ DELIMITER ;$$
|
||||
VALUES (IGNORE);
|
||||
--error ER_NOT_ALLOWED_IN_THIS_CONTEXT
|
||||
VALUES (DEFAULT);
|
||||
--error ER_NOT_ALLOWED_IN_THIS_CONTEXT
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'VALUES (?)' USING IGNORE;
|
||||
--error ER_NOT_ALLOWED_IN_THIS_CONTEXT
|
||||
--error ER_INVALID_DEFAULT_PARAM
|
||||
EXECUTE IMMEDIATE 'VALUES (?)' USING DEFAULT;
|
||||
|
||||
--echo #
|
||||
|
@ -607,4 +607,68 @@ drop table t1;
|
||||
DROP FUNCTION avgcost;
|
||||
DROP FUNCTION avg2;
|
||||
DROP FUNCTION myfunc_double;
|
||||
#
|
||||
# MDEV-24507: Server Crash using UDF in WHERE clause of VIEW
|
||||
#
|
||||
CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "UDF_EXAMPLE_LIB";
|
||||
create table t1(pk int primary key, a varchar(20));
|
||||
create table t2(pk int primary key, a varchar(20));
|
||||
create view v1 as select pk, a from t1 union select pk, a from t2;
|
||||
insert into t1 values (1, "One"), (3, "Three"), (5, "Five");
|
||||
insert into t2 values (2, "Dos"), (4, "Quatro"), (6, "Seis");
|
||||
select pk, myfunc_int(a) from t1;
|
||||
pk myfunc_int(a)
|
||||
1 3
|
||||
3 5
|
||||
5 4
|
||||
select pk, myfunc_int(a) from t2;
|
||||
pk myfunc_int(a)
|
||||
2 3
|
||||
4 6
|
||||
6 4
|
||||
select pk, myfunc_int(a) from v1;
|
||||
pk myfunc_int(a)
|
||||
1 3
|
||||
3 5
|
||||
5 4
|
||||
2 3
|
||||
4 6
|
||||
6 4
|
||||
select pk from t1 where myfunc_int(a) > 4;
|
||||
pk
|
||||
3
|
||||
select pk from (select pk, a from t1) A where myfunc_int(A.a) > 4;
|
||||
pk
|
||||
3
|
||||
set @save_optimizer_switch = @@optimizer_switch;
|
||||
set optimizer_switch = 'derived_merge=OFF';
|
||||
select pk, myfunc_int(a) from t1;
|
||||
pk myfunc_int(a)
|
||||
1 3
|
||||
3 5
|
||||
5 4
|
||||
select pk, myfunc_int(a) from t2;
|
||||
pk myfunc_int(a)
|
||||
2 3
|
||||
4 6
|
||||
6 4
|
||||
select pk, myfunc_int(a) from v1;
|
||||
pk myfunc_int(a)
|
||||
1 3
|
||||
3 5
|
||||
5 4
|
||||
2 3
|
||||
4 6
|
||||
6 4
|
||||
select pk from t1 where myfunc_int(a) > 4;
|
||||
pk
|
||||
3
|
||||
select pk from (select pk, a from t1) A where myfunc_int(A.a) > 4;
|
||||
pk
|
||||
3
|
||||
set optimizer_switch = @save_optimizer_switch;
|
||||
drop view v1;
|
||||
drop table t2;
|
||||
drop table t1;
|
||||
drop function myfunc_int;
|
||||
# End of 10.4 tests
|
||||
|
@ -647,4 +647,38 @@ DROP FUNCTION avgcost;
|
||||
DROP FUNCTION avg2;
|
||||
DROP FUNCTION myfunc_double;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-24507: Server Crash using UDF in WHERE clause of VIEW
|
||||
--echo #
|
||||
|
||||
--replace_result $UDF_EXAMPLE_SO UDF_EXAMPLE_LIB
|
||||
eval CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "$UDF_EXAMPLE_SO";
|
||||
|
||||
create table t1(pk int primary key, a varchar(20));
|
||||
create table t2(pk int primary key, a varchar(20));
|
||||
create view v1 as select pk, a from t1 union select pk, a from t2;
|
||||
|
||||
insert into t1 values (1, "One"), (3, "Three"), (5, "Five");
|
||||
insert into t2 values (2, "Dos"), (4, "Quatro"), (6, "Seis");
|
||||
|
||||
select pk, myfunc_int(a) from t1;
|
||||
select pk, myfunc_int(a) from t2;
|
||||
select pk, myfunc_int(a) from v1;
|
||||
select pk from t1 where myfunc_int(a) > 4;
|
||||
select pk from (select pk, a from t1) A where myfunc_int(A.a) > 4;
|
||||
|
||||
set @save_optimizer_switch = @@optimizer_switch;
|
||||
set optimizer_switch = 'derived_merge=OFF';
|
||||
select pk, myfunc_int(a) from t1;
|
||||
select pk, myfunc_int(a) from t2;
|
||||
select pk, myfunc_int(a) from v1;
|
||||
select pk from t1 where myfunc_int(a) > 4;
|
||||
select pk from (select pk, a from t1) A where myfunc_int(A.a) > 4;
|
||||
|
||||
set optimizer_switch = @save_optimizer_switch;
|
||||
drop view v1;
|
||||
drop table t2;
|
||||
drop table t1;
|
||||
drop function myfunc_int;
|
||||
|
||||
--echo # End of 10.4 tests
|
||||
|
@ -3104,7 +3104,7 @@ sub mysql_install_db {
|
||||
mtr_add_arg($args, "--core-file");
|
||||
mtr_add_arg($args, "--console");
|
||||
mtr_add_arg($args, "--character-set-server=latin1");
|
||||
mtr_add_arg($args, "--disable-performance-schema");
|
||||
mtr_add_arg($args, "--loose-disable-performance-schema");
|
||||
|
||||
if ( $opt_debug )
|
||||
{
|
||||
|
BIN
mysql-test/std_data/autoinc_import_101.frm
Normal file
BIN
mysql-test/std_data/autoinc_import_101.frm
Normal file
Binary file not shown.
BIN
mysql-test/std_data/autoinc_import_57.frm
Normal file
BIN
mysql-test/std_data/autoinc_import_57.frm
Normal file
Binary file not shown.
6
mysql-test/std_data/mdev-25731.dat
Normal file
6
mysql-test/std_data/mdev-25731.dat
Normal file
@ -0,0 +1,6 @@
|
||||
1
|
||||
2
|
||||
3
|
||||
1
|
||||
5
|
||||
6
|
@ -9,7 +9,7 @@ INSERT INTO t2 VALUES(2);
|
||||
SELECT * FROM t1;
|
||||
ERROR 42000: Unknown storage engine 'InnoDB'
|
||||
SELECT * FROM t1;
|
||||
Got one of the listed errors
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
SELECT * FROM t2;
|
||||
a
|
||||
2
|
||||
|
@ -31,7 +31,7 @@ foobar 2
|
||||
# Restart server with keysbad3.txt
|
||||
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keysbad3.txt
|
||||
SELECT * FROM t1;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keysbad3.txt
|
||||
DROP TABLE t1;
|
||||
# Start server with keys3.txt
|
||||
@ -43,31 +43,31 @@ INSERT INTO t2 VALUES ('foobar',1,2);
|
||||
# Restart server with keys2.txt
|
||||
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
|
||||
SELECT * FROM t2;
|
||||
ERROR 42S02: Table 'test.t2' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
SELECT * FROM t2 where id = 1;
|
||||
ERROR HY000: Table test/t2 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
SELECT * FROM t2 where b = 1;
|
||||
ERROR HY000: Table test/t2 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
INSERT INTO t2 VALUES ('tmp',3,3);
|
||||
ERROR HY000: Table test/t2 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
DELETE FROM t2 where b = 3;
|
||||
ERROR HY000: Table test/t2 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
DELETE FROM t2 where id = 3;
|
||||
ERROR HY000: Table test/t2 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
UPDATE t2 set b = b +1;
|
||||
ERROR HY000: Table test/t2 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
OPTIMIZE TABLE t2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t2 optimize Error Table test/t2 is corrupted. Please drop the table and recreate.
|
||||
test.t2 optimize Error Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
test.t2 optimize error Corrupt
|
||||
ALTER TABLE t2 ADD COLUMN d INT;
|
||||
ERROR HY000: Table test/t2 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
ANALYZE TABLE t2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t2 analyze Error Table test/t2 is corrupted. Please drop the table and recreate.
|
||||
test.t2 analyze Error Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
test.t2 analyze error Corrupt
|
||||
TRUNCATE TABLE t2;
|
||||
ERROR HY000: Table test/t2 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
DROP TABLE t2;
|
||||
|
||||
# Start server with keys2.txt
|
||||
|
@ -13,24 +13,24 @@ ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
|
||||
INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
|
||||
# restart: --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
|
||||
SELECT * FROM t1;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
Error 1932 Table 'test.t1' doesn't exist in engine
|
||||
Error 1877 Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
ALTER TABLE t1 ENGINE=InnoDB;
|
||||
ERROR HY000: Table test/t1 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
Error 1877 Table test/t1 is corrupted. Please drop the table and recreate.
|
||||
Error 1877 Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
OPTIMIZE TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize Error Table test/t1 is corrupted. Please drop the table and recreate.
|
||||
test.t1 optimize Error Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
test.t1 optimize error Corrupt
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
CHECK TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check Error Table test/t1 is corrupted. Please drop the table and recreate.
|
||||
test.t1 check Error Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
test.t1 check error Corrupt
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
@ -40,7 +40,7 @@ backup: t1
|
||||
UNLOCK TABLES;
|
||||
# restart: --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
|
||||
ALTER TABLE t1 DISCARD TABLESPACE;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
DROP TABLE t1;
|
||||
CREATE TABLE t1 (pk INT PRIMARY KEY, f VARCHAR(8)) ENGINE=InnoDB
|
||||
ENCRYPTED=YES ENCRYPTION_KEY_ID=4;
|
||||
@ -61,7 +61,7 @@ t1 CREATE TABLE `t1` (
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci `ENCRYPTED`=YES `ENCRYPTION_KEY_ID`=4
|
||||
# restart: --innodb-encrypt-tables --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
|
||||
RENAME TABLE t1 TO t1new;
|
||||
ERROR HY000: Error on rename of './test/t1' to './test/t1new' (errno: 155 "The table does not exist in the storage engine")
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
ALTER TABLE t1 RENAME TO t1new;
|
||||
ERROR HY000: Table test/t1 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
DROP TABLE t1;
|
||||
|
@ -11,13 +11,13 @@ INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
|
||||
# restart: --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
|
||||
OPTIMIZE TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize Error Table 'test.t1' doesn't exist in engine
|
||||
test.t1 optimize status Operation failed
|
||||
test.t1 optimize Error Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
test.t1 optimize error Corrupt
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
CHECK TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check Error Table test/t1 is corrupted. Please drop the table and recreate.
|
||||
test.t1 check Error Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
test.t1 check error Corrupt
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
|
@ -1,7 +1,7 @@
|
||||
call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file '.*test.t[123]\\.ibd' cannot be decrypted; key_version=1");
|
||||
call mtr.add_suppression("InnoDB: Recovery failed to read page");
|
||||
call mtr.add_suppression("InnoDB: Unable to decompress ..test.t[1-3]\\.ibd\\[page id: space=[1-9][0-9]*, page number=[0-9]+\\]");
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[12]` is corrupted");
|
||||
call mtr.add_suppression("Table `test`\\.`t[12]` is corrupted");
|
||||
# Restart mysqld --file-key-management-filename=keys2.txt
|
||||
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
|
||||
set GLOBAL innodb_default_encryption_key_id=4;
|
||||
@ -16,9 +16,9 @@ insert into t3 values (1, repeat('secret',6000));
|
||||
# Restart mysqld --file-key-management-filename=keys3.txt
|
||||
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys3.txt
|
||||
select count(*) from t1 FORCE INDEX (b) where b like 'secret%';
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
select count(*) from t2 FORCE INDEX (b) where b like 'secret%';
|
||||
ERROR 42S02: Table 'test.t2' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t2` is corrupted. Please drop the table and recreate.
|
||||
select count(*) from t3 FORCE INDEX (b) where b like 'secret%';
|
||||
count(*)
|
||||
1
|
||||
|
@ -1,8 +1,7 @@
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[15]` (has an unreadable root page|is corrupted)");
|
||||
call mtr.add_suppression("Table `test`\\.`t[15]` (has an unreadable root page|is corrupted)");
|
||||
call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file '.*test.t[15]\\.ibd' cannot be decrypted\\.");
|
||||
call mtr.add_suppression("InnoDB: Recovery failed to read page");
|
||||
call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=[1-9][0-9]*, page number=3\\] in file .*test.t[15].ibd looks corrupted; key_version=1");
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[15]` is corrupted");
|
||||
call mtr.add_suppression("Couldn't load plugins from 'file_key_management");
|
||||
# restart: --innodb-encrypt-tables=ON --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
|
||||
create table t5 (
|
||||
@ -24,9 +23,9 @@ insert into t1 values (1,2,'maria','db','encryption');
|
||||
alter table t1 encrypted='yes' `encryption_key_id`=1;
|
||||
# restart: --innodb-encrypt-tables=OFF
|
||||
select * from t1;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
select * from t5;
|
||||
ERROR 42S02: Table 'test.t5' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t5` is corrupted. Please drop the table and recreate.
|
||||
# restart: --innodb-encrypt-tables=ON --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
|
||||
drop table t1;
|
||||
drop table t5;
|
||||
|
@ -1,4 +1,4 @@
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` (has an unreadable root page|is corrupted)");
|
||||
call mtr.add_suppression("Table `test`\\.`t[13]` (has an unreadable root page|is corrupted)");
|
||||
call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=\\d+, page number=[36]\\] in file .*test.t[123]\\.ibd looks corrupted; key_version=");
|
||||
call mtr.add_suppression("\\[ERROR\\] InnoDB: We detected index corruption in an InnoDB type table");
|
||||
call mtr.add_suppression("\\[ERROR\\] (mysqld|mariadbd).*: Index for table 't2' is corrupt; try to repair it");
|
||||
@ -17,11 +17,11 @@ COMMIT;
|
||||
# Corrupt tables
|
||||
# restart
|
||||
SELECT * FROM t1;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
SELECT * FROM t2;
|
||||
Got one of the listed errors
|
||||
SELECT * FROM t3;
|
||||
ERROR 42S02: Table 'test.t3' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t3` is corrupted. Please drop the table and recreate.
|
||||
# Restore the original tables
|
||||
# restart
|
||||
DROP TABLE t1,t2,t3;
|
||||
|
@ -38,11 +38,11 @@ SELECT COUNT(1) FROM t2;
|
||||
COUNT(1)
|
||||
2048
|
||||
SELECT COUNT(1) FROM t2,t1 where t2.a = t1.a;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist in engine
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
SELECT COUNT(1) FROM t1 where b = 'ab';
|
||||
ERROR HY000: Table test/t1 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
SELECT COUNT(1) FROM t1;
|
||||
ERROR HY000: Table test/t1 is corrupted. Please drop the table and recreate.
|
||||
ERROR HY000: Table `test`.`t1` is corrupted. Please drop the table and recreate.
|
||||
|
||||
# Start server with keys2.txt
|
||||
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
|
||||
|
@ -22,6 +22,15 @@ insert into t2 select * from t1;
|
||||
insert into t3 select * from t1;
|
||||
insert into t4 select * from t1;
|
||||
commit;
|
||||
|
||||
# Flush all dirty pages from buffer pool
|
||||
SET @no_checkpoint_save_pct= @@GLOBAL.innodb_max_dirty_pages_pct;
|
||||
SET @no_checkpoint_save_pct_lwm= @@GLOBAL.innodb_max_dirty_pages_pct_lwm;
|
||||
SET GLOBAL innodb_max_dirty_pages_pct_lwm=0.0;
|
||||
SET GLOBAL innodb_max_dirty_pages_pct=0.0;
|
||||
SET GLOBAL innodb_max_dirty_pages_pct= @no_checkpoint_save_pct;
|
||||
SET GLOBAL innodb_max_dirty_pages_pct_lwm= @no_checkpoint_save_pct_lwm;
|
||||
|
||||
CREATE TABLE t5 (a VARCHAR(8)) ENGINE=InnoDB ENCRYPTED=YES;
|
||||
SET GLOBAL innodb_flush_log_at_trx_commit=1;
|
||||
begin;
|
||||
@ -41,6 +50,6 @@ SELECT * FROM INFORMATION_SCHEMA.ENGINES
|
||||
WHERE engine = 'innodb'
|
||||
AND support IN ('YES', 'DEFAULT', 'ENABLED');
|
||||
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
|
||||
FOUND 1 /\[ERROR\] InnoDB: Encryption key is not found for .*test.t1.ibd/ in mysqld.1.err
|
||||
FOUND 1 /\[ERROR\] InnoDB: Encryption key is not found for .*test.t[1-5].ibd/ in mysqld.1.err
|
||||
# restart: --file-key-management-filename=MYSQL_TEST_DIR/std_data/keys2.txt
|
||||
drop table t1,t2,t3,t4,t5;
|
||||
|
@ -60,7 +60,7 @@ call mtr.add_suppression("Table .*t1.* is corrupted. Please drop the table and r
|
||||
let $restart_parameters=--innodb_force_recovery=1 --skip-innodb-buffer-pool-load-at-startup;
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE,ER_TABLE_CORRUPT
|
||||
--error ER_TABLE_CORRUPT
|
||||
SELECT * FROM t1;
|
||||
SELECT * FROM t2;
|
||||
CHECK TABLE t2;
|
||||
|
@ -41,7 +41,7 @@ SELECT * FROM t1;
|
||||
-- source include/restart_mysqld.inc
|
||||
|
||||
--disable_warnings
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
SELECT * FROM t1;
|
||||
--enable_warnings
|
||||
|
||||
@ -70,7 +70,7 @@ INSERT INTO t2 VALUES ('foobar',1,2);
|
||||
-- source include/restart_mysqld.inc
|
||||
|
||||
--disable_warnings
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
SELECT * FROM t2;
|
||||
|
||||
--error ER_TABLE_CORRUPT
|
||||
|
@ -30,7 +30,7 @@ INSERT INTO t1 VALUES (1,'foo'),(2,'bar');
|
||||
--let $restart_parameters=--plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys3.txt
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
SELECT * FROM t1;
|
||||
--replace_regex /key_id [1-9][0-9]*/\1 /
|
||||
SHOW WARNINGS;
|
||||
@ -61,7 +61,7 @@ UNLOCK TABLES;
|
||||
--let $restart_parameters=--plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys3.txt
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
ALTER TABLE t1 DISCARD TABLESPACE;
|
||||
# Drop table will succeed.
|
||||
DROP TABLE t1;
|
||||
@ -93,7 +93,7 @@ SHOW CREATE TABLE t1;
|
||||
--let $restart_parameters= --innodb-encrypt-tables --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys3.txt
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
--error ER_ERROR_ON_RENAME
|
||||
--error ER_TABLE_CORRUPT
|
||||
RENAME TABLE t1 TO t1new;
|
||||
--error ER_TABLE_CORRUPT
|
||||
ALTER TABLE t1 RENAME TO t1new;
|
||||
|
@ -7,7 +7,7 @@
|
||||
call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file '.*test.t[123]\\.ibd' cannot be decrypted; key_version=1");
|
||||
call mtr.add_suppression("InnoDB: Recovery failed to read page");
|
||||
call mtr.add_suppression("InnoDB: Unable to decompress ..test.t[1-3]\\.ibd\\[page id: space=[1-9][0-9]*, page number=[0-9]+\\]");
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[12]` is corrupted");
|
||||
call mtr.add_suppression("Table `test`\\.`t[12]` is corrupted");
|
||||
|
||||
--echo # Restart mysqld --file-key-management-filename=keys2.txt
|
||||
-- let $restart_parameters=--file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
|
||||
@ -26,9 +26,9 @@ insert into t3 values (1, repeat('secret',6000));
|
||||
-- let $restart_parameters=--file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys3.txt
|
||||
-- source include/restart_mysqld.inc
|
||||
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
select count(*) from t1 FORCE INDEX (b) where b like 'secret%';
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
select count(*) from t2 FORCE INDEX (b) where b like 'secret%';
|
||||
select count(*) from t3 FORCE INDEX (b) where b like 'secret%';
|
||||
|
||||
|
@ -7,11 +7,10 @@
|
||||
# MDEV-9559: Server without encryption configs crashes if selecting from an implicitly encrypted table
|
||||
#
|
||||
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[15]` (has an unreadable root page|is corrupted)");
|
||||
call mtr.add_suppression("Table `test`\\.`t[15]` (has an unreadable root page|is corrupted)");
|
||||
call mtr.add_suppression("InnoDB: The page \\[page id: space=[1-9][0-9]*, page number=[1-9][0-9]*\\] in file '.*test.t[15]\\.ibd' cannot be decrypted\\.");
|
||||
call mtr.add_suppression("InnoDB: Recovery failed to read page");
|
||||
call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=[1-9][0-9]*, page number=3\\] in file .*test.t[15].ibd looks corrupted; key_version=1");
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[15]` is corrupted");
|
||||
|
||||
# Suppression for builds where file_key_management plugin is linked statically
|
||||
call mtr.add_suppression("Couldn't load plugins from 'file_key_management");
|
||||
@ -43,9 +42,9 @@ alter table t1 encrypted='yes' `encryption_key_id`=1;
|
||||
--let $restart_parameters=--innodb-encrypt-tables=OFF
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
select * from t1;
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
select * from t5;
|
||||
|
||||
--let $restart_parameters=--innodb-encrypt-tables=ON --plugin-load-add=file_key_management --file-key-management --file-key-management-filename=$MYSQL_TEST_DIR/std_data/keys2.txt
|
||||
|
@ -7,7 +7,7 @@
|
||||
# Don't test under embedded
|
||||
-- source include/not_embedded.inc
|
||||
|
||||
call mtr.add_suppression("InnoDB: Table `test`\\.`t[13]` (has an unreadable root page|is corrupted)");
|
||||
call mtr.add_suppression("Table `test`\\.`t[13]` (has an unreadable root page|is corrupted)");
|
||||
call mtr.add_suppression("InnoDB: Encrypted page \\[page id: space=\\d+, page number=[36]\\] in file .*test.t[123]\\.ibd looks corrupted; key_version=");
|
||||
call mtr.add_suppression("\\[ERROR\\] InnoDB: We detected index corruption in an InnoDB type table");
|
||||
call mtr.add_suppression("\\[ERROR\\] (mysqld|mariadbd).*: Index for table 't2' is corrupt; try to repair it");
|
||||
@ -67,11 +67,11 @@ EOF
|
||||
|
||||
--source include/start_mysqld.inc
|
||||
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
SELECT * FROM t1;
|
||||
--error ER_GET_ERRMSG,ER_NOT_KEYFILE
|
||||
SELECT * FROM t2;
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
SELECT * FROM t3;
|
||||
|
||||
--source include/shutdown_mysqld.inc
|
||||
|
@ -46,7 +46,7 @@ CREATE TABLE t4(a int not null primary key auto_increment, b varchar(128)) engin
|
||||
SELECT SLEEP(5);
|
||||
SELECT COUNT(1) FROM t3;
|
||||
SELECT COUNT(1) FROM t2;
|
||||
--error ER_NO_SUCH_TABLE_IN_ENGINE
|
||||
--error ER_TABLE_CORRUPT
|
||||
SELECT COUNT(1) FROM t2,t1 where t2.a = t1.a;
|
||||
--error ER_TABLE_CORRUPT
|
||||
SELECT COUNT(1) FROM t1 where b = 'ab';
|
||||
|
@ -42,7 +42,9 @@ insert into t3 select * from t1;
|
||||
insert into t4 select * from t1;
|
||||
commit;
|
||||
|
||||
let $no_checkpoint_flush= 1;
|
||||
--source ../../suite/innodb/include/no_checkpoint_start.inc
|
||||
|
||||
#
|
||||
# We test redo log page read at recv_read_page using
|
||||
# keys that are not in std_data/keys.txt. If checkpoint
|
||||
@ -75,7 +77,7 @@ WHERE engine = 'innodb'
|
||||
AND support IN ('YES', 'DEFAULT', 'ENABLED');
|
||||
|
||||
let SEARCH_FILE = $MYSQLTEST_VARDIR/log/mysqld.1.err;
|
||||
let SEARCH_PATTERN = \[ERROR\] InnoDB: Encryption key is not found for .*test.t1.ibd;
|
||||
let SEARCH_PATTERN = \[ERROR\] InnoDB: Encryption key is not found for .*test.t[1-5].ibd;
|
||||
--source include/search_pattern_in_file.inc
|
||||
|
||||
#
|
||||
|
@ -33,7 +33,6 @@ t1 CREATE TABLE `t1` (
|
||||
PARTITION `p5` VALUES LESS THAN MAXVALUE ENGINE = ENGINE)
|
||||
ALTER TABLE t1 ANALYZE PARTITION p1,p2;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 analyze status Engine-independent statistics collected
|
||||
test.t1 analyze status OK
|
||||
SELECT * FROM t1 ORDER BY c1;
|
||||
c1 c2
|
||||
|
@ -536,6 +536,23 @@ use federated;
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM t1 where id=3) dt3
|
||||
WHERE id=2) dt2) dt;
|
||||
id name
|
||||
PREPARE stmt FROM "
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM t1 where id=3) dt3
|
||||
WHERE id=3) dt2) dt;
|
||||
";
|
||||
EXECUTE stmt;
|
||||
id name
|
||||
3 xxx
|
||||
EXECUTE stmt;
|
||||
id name
|
||||
3 xxx
|
||||
DEALLOCATE PREPARE stmt;
|
||||
EXPLAIN
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM t1 where id=3) dt3
|
||||
WHERE id=3) dt2) dt;
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY <derived4> ALL NULL NULL NULL NULL 5 Using where
|
||||
4 PUSHED DERIVED NULL NULL NULL NULL NULL NULL NULL NULL
|
||||
connection slave;
|
||||
CREATE TABLE federated.t10 (a INT,b INT);
|
||||
CREATE TABLE federated.t11 (a INT, b INT);
|
||||
@ -556,6 +573,54 @@ WHERE id=2) dt2) dt
|
||||
a b a b id name
|
||||
1 1 NULL NULL NULL NULL
|
||||
2 2 NULL NULL NULL NULL
|
||||
#
|
||||
# MDEV-31361: Second execution of PS for query with derived table
|
||||
#
|
||||
connection slave;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
CREATE TABLE federated.t1 (
|
||||
id int(20) NOT NULL,
|
||||
name varchar(16) NOT NULL default ''
|
||||
)
|
||||
DEFAULT CHARSET=latin1;
|
||||
INSERT INTO federated.t1 VALUES
|
||||
(3,'xxx'), (7,'yyy'), (4,'xxx'), (1,'zzz'), (5,'yyy');
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
CREATE TABLE federated.t1 (
|
||||
id int(20) NOT NULL,
|
||||
name varchar(16) NOT NULL default ''
|
||||
)
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:SLAVE_PORT/federated/t1';
|
||||
use federated;
|
||||
SELECT * FROM
|
||||
(SELECT * FROM
|
||||
(SELECT * FROM
|
||||
(SELECT * FROM t1 where id>3) dt3
|
||||
WHERE id>3) dt2
|
||||
) dt;
|
||||
id name
|
||||
7 yyy
|
||||
4 xxx
|
||||
5 yyy
|
||||
PREPARE stmt FROM "SELECT * FROM
|
||||
(SELECT * FROM
|
||||
(SELECT * FROM
|
||||
(SELECT * FROM t1 where id>3) dt3
|
||||
WHERE id>3) dt2
|
||||
) dt";
|
||||
EXECUTE stmt;
|
||||
id name
|
||||
7 yyy
|
||||
4 xxx
|
||||
5 yyy
|
||||
EXECUTE stmt;
|
||||
id name
|
||||
7 yyy
|
||||
4 xxx
|
||||
5 yyy
|
||||
DEALLOCATE PREPARE stmt;
|
||||
set global federated_pushdown=0;
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
@ -95,12 +95,9 @@ DEFAULT CHARSET=latin1;
|
||||
INSERT INTO federated.t3 VALUES
|
||||
('yyy'), ('www'), ('yyy'), ('xxx'), ('www'), ('yyy'), ('www');
|
||||
|
||||
#Enable after fix MDEV-31361
|
||||
--disable_ps2_protocol
|
||||
SELECT *
|
||||
FROM federated.t3, (SELECT * FROM federated.t1 WHERE id > 3) t
|
||||
WHERE federated.t3.name=t.name;
|
||||
--enable_ps2_protocol
|
||||
|
||||
EXPLAIN
|
||||
SELECT *
|
||||
@ -358,6 +355,18 @@ use federated;
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM t1 where id=3) dt3
|
||||
WHERE id=2) dt2) dt;
|
||||
|
||||
PREPARE stmt FROM "
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM t1 where id=3) dt3
|
||||
WHERE id=3) dt2) dt;
|
||||
";
|
||||
EXECUTE stmt;
|
||||
EXECUTE stmt;
|
||||
DEALLOCATE PREPARE stmt;
|
||||
|
||||
EXPLAIN
|
||||
SELECT * FROM (SELECT * FROM (SELECT * FROM (SELECT * FROM t1 where id=3) dt3
|
||||
WHERE id=3) dt2) dt;
|
||||
|
||||
connection slave;
|
||||
CREATE TABLE federated.t10 (a INT,b INT);
|
||||
CREATE TABLE federated.t11 (a INT, b INT);
|
||||
@ -383,6 +392,52 @@ SELECT * FROM t10 LEFT JOIN
|
||||
WHERE id=2) dt2) dt
|
||||
) ON t10.a=t11.a;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-31361: Second execution of PS for query with derived table
|
||||
--echo #
|
||||
|
||||
connection slave;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
||||
CREATE TABLE federated.t1 (
|
||||
id int(20) NOT NULL,
|
||||
name varchar(16) NOT NULL default ''
|
||||
)
|
||||
DEFAULT CHARSET=latin1;
|
||||
|
||||
INSERT INTO federated.t1 VALUES
|
||||
(3,'xxx'), (7,'yyy'), (4,'xxx'), (1,'zzz'), (5,'yyy');
|
||||
|
||||
connection master;
|
||||
DROP TABLE IF EXISTS federated.t1;
|
||||
|
||||
--replace_result $SLAVE_MYPORT SLAVE_PORT
|
||||
eval
|
||||
CREATE TABLE federated.t1 (
|
||||
id int(20) NOT NULL,
|
||||
name varchar(16) NOT NULL default ''
|
||||
)
|
||||
ENGINE="FEDERATED" DEFAULT CHARSET=latin1
|
||||
CONNECTION='mysql://root@127.0.0.1:$SLAVE_MYPORT/federated/t1';
|
||||
|
||||
use federated;
|
||||
|
||||
let $q=
|
||||
SELECT * FROM
|
||||
(SELECT * FROM
|
||||
(SELECT * FROM
|
||||
(SELECT * FROM t1 where id>3) dt3
|
||||
WHERE id>3) dt2
|
||||
) dt;
|
||||
|
||||
eval $q;
|
||||
|
||||
eval PREPARE stmt FROM "$q";
|
||||
EXECUTE stmt;
|
||||
EXECUTE stmt;
|
||||
DEALLOCATE PREPARE stmt;
|
||||
|
||||
|
||||
set global federated_pushdown=0;
|
||||
|
||||
source include/federated_cleanup.inc;
|
||||
|
@ -17,7 +17,6 @@ galera_ssl_upgrade : [Warning] Failed to load slave replication state from table
|
||||
galera_parallel_simple : timeout related to wsrep_sync_wait
|
||||
galera_insert_bulk : MDEV-30536 no expected deadlock in galera_insert_bulk test
|
||||
galera_sequences : MDEV-32561 WSREP FSM failure: no such a transition REPLICATING -> COMMITTED
|
||||
galera_shutdown_nonprim : MDEV-32635 galera_shutdown_nonprim: mysql_shutdown failed
|
||||
versioning_trx_id : MDEV-18590 : galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch
|
||||
galera_concurrent_ctas : MDEV-32779 galera_concurrent_ctas: assertion in the galera::ReplicatorSMM::finish_cert()
|
||||
galera_as_slave_replay : MDEV-32780 galera_as_slave_replay: assertion in the wsrep::transaction::before_rollback()
|
||||
@ -26,5 +25,4 @@ galera_sst_mysqldump_with_key : MDEV-32782 galera_sst_mysqldump_with_key test fa
|
||||
mdev-31285 : MDEV-25089 Assertion `error.len > 0' failed in galera::ReplicatorSMM::handle_apply_error()
|
||||
galera_var_ignore_apply_errors : MENT-1997 galera_var_ignore_apply_errors test freezes
|
||||
MW-402 : temporarily disabled at the request of Codership
|
||||
MDEV-22232 : temporarily disabled at the request of Codership
|
||||
galera_desync_overlapped : MDEV-21538 galera_desync_overlapped MTR failed: Result content mismatch
|
||||
|
@ -3,21 +3,21 @@ connection node_1;
|
||||
connect con1,127.0.0.1,root,,test,$NODE_MYPORT_1;
|
||||
--- CTAS with empty result set ---
|
||||
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
|
||||
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_alter WAIT_FOR bf_abort';
|
||||
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_run WAIT_FOR bf_abort';
|
||||
CREATE TABLE t2 SELECT * FROM t1;
|
||||
connection node_1;
|
||||
SET DEBUG_SYNC = 'now WAIT_FOR may_alter';
|
||||
ALTER TABLE t1 DROP FOREIGN KEY b, ALGORITHM=COPY;
|
||||
SET DEBUG_SYNC = 'now WAIT_FOR may_run';
|
||||
TRUNCATE TABLE t1;
|
||||
connection con1;
|
||||
ERROR 70100: Query execution was interrupted
|
||||
SET DEBUG_SYNC = 'RESET';
|
||||
--- CTAS with non-empty result set ---
|
||||
INSERT INTO t1 VALUES (10), (20), (30);
|
||||
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_alter WAIT_FOR bf_abort';
|
||||
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_run WAIT_FOR bf_abort';
|
||||
CREATE TABLE t2 SELECT * FROM t1;
|
||||
connection node_1;
|
||||
SET DEBUG_SYNC = 'now WAIT_FOR may_alter';
|
||||
ALTER TABLE t1 DROP FOREIGN KEY b, ALGORITHM=COPY;
|
||||
SET DEBUG_SYNC = 'now WAIT_FOR may_run';
|
||||
TRUNCATE TABLE t1;
|
||||
connection con1;
|
||||
ERROR 70100: Query execution was interrupted
|
||||
SET DEBUG_SYNC = 'RESET';
|
||||
|
@ -14,7 +14,7 @@ c1
|
||||
INSERT INTO t1 VALUES (4),(3),(1),(2);
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
CREATE TABLE t1 (pk INT PRIMARY KEY, b INT) ENGINE=SEQUENCE;
|
||||
ERROR 42S01: Table 't1' already exists
|
||||
ERROR 42000: This version of MariaDB doesn't yet support 'non-InnoDB sequences in Galera cluster'
|
||||
ALTER TABLE t1 DROP COLUMN c2;
|
||||
ERROR 42000: Can't DROP COLUMN `c2`; check that it exists
|
||||
SELECT get_lock ('test', 1.5);
|
||||
|
38
mysql-test/suite/galera/r/MDEV-25731.result
Normal file
38
mysql-test/suite/galera/r/MDEV-25731.result
Normal file
@ -0,0 +1,38 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
connection node_1;
|
||||
SET GLOBAL wsrep_load_data_splitting=ON;
|
||||
Warnings:
|
||||
Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
|
||||
SET GLOBAL wsrep_replicate_myisam=ON;
|
||||
Warnings:
|
||||
Warning 1287 '@@wsrep_replicate_myisam' is deprecated and will be removed in a future release. Please use '@@wsrep_mode=REPLICATE_MYISAM' instead
|
||||
CREATE TABLE t1 (c1 int) ENGINE=MYISAM;
|
||||
LOAD DATA INFILE '../../std_data/mdev-25731.dat' IGNORE INTO TABLE t1 LINES TERMINATED BY '\n';
|
||||
Warnings:
|
||||
Warning 1235 wsrep_load_data_splitting for other than InnoDB tables
|
||||
SELECT COUNT(*) AS EXPECT_6 FROM t1;
|
||||
EXPECT_6
|
||||
6
|
||||
connection node_2;
|
||||
SELECT COUNT(*) AS EXPECT_6 FROM t1;
|
||||
EXPECT_6
|
||||
6
|
||||
connection node_1;
|
||||
ALTER TABLE t1 ENGINE=InnoDB;
|
||||
LOAD DATA INFILE '../../std_data/mdev-25731.dat' IGNORE INTO TABLE t1 LINES TERMINATED BY '\n';
|
||||
SELECT COUNT(*) AS EXPECT_12 FROM t1;
|
||||
EXPECT_12
|
||||
12
|
||||
connection node_2;
|
||||
SELECT COUNT(*) AS EXPECT_12 FROM t1;
|
||||
EXPECT_12
|
||||
12
|
||||
connection node_1;
|
||||
DROP TABLE t1;
|
||||
SET GLOBAL wsrep_load_data_splitting=OFF;
|
||||
Warnings:
|
||||
Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
|
||||
SET GLOBAL wsrep_replicate_myisam=OFF;
|
||||
Warnings:
|
||||
Warning 1287 '@@wsrep_replicate_myisam' is deprecated and will be removed in a future release. Please use '@@wsrep_mode=REPLICATE_MYISAM' instead
|
@ -1,7 +1,8 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
connection node_1;
|
||||
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
|
||||
SET @wsrep_slave_threads_orig = @@wsrep_slave_threads;
|
||||
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) Engine=InnoDB;
|
||||
SET GLOBAL wsrep_slave_threads = 10;
|
||||
# Set slave threads to 10 step 1
|
||||
SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
|
||||
@ -9,7 +10,7 @@ EXPECT_10
|
||||
10
|
||||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
connection node_2;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
INSERT INTO t1 VALUES (NULL);
|
||||
connection node_1;
|
||||
# Wait until one of the appliers has exited
|
||||
SELECT VARIABLE_VALUE AS EXPECT_9 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
|
||||
@ -27,33 +28,14 @@ EXPECT_20
|
||||
20
|
||||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
connection node_2;
|
||||
INSERT INTO t1 VALUES (1);
|
||||
INSERT INTO t1 VALUES (2);
|
||||
INSERT INTO t1 VALUES (3);
|
||||
INSERT INTO t1 VALUES (4);
|
||||
INSERT INTO t1 VALUES (5);
|
||||
INSERT INTO t1 VALUES (6);
|
||||
INSERT INTO t1 VALUES (7);
|
||||
INSERT INTO t1 VALUES (8);
|
||||
INSERT INTO t1 VALUES (9);
|
||||
INSERT INTO t1 VALUES (10);
|
||||
INSERT INTO t1 VALUES (11);
|
||||
INSERT INTO t1 VALUES (12);
|
||||
INSERT INTO t1 VALUES (13);
|
||||
INSERT INTO t1 VALUES (14);
|
||||
INSERT INTO t1 VALUES (16);
|
||||
INSERT INTO t1 VALUES (17);
|
||||
INSERT INTO t1 VALUES (18);
|
||||
INSERT INTO t1 VALUES (19);
|
||||
INSERT INTO t1 VALUES (20);
|
||||
connection node_1;
|
||||
# Wait until 19 of the appliers has exited
|
||||
SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
|
||||
EXPECT_1
|
||||
1
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
20
|
||||
SELECT COUNT(*) AS EXPECT_51 FROM t1;
|
||||
EXPECT_51
|
||||
51
|
||||
SET GLOBAL wsrep_slave_threads = 10;
|
||||
# Set slave threads to 10 step 3
|
||||
SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
|
||||
@ -62,22 +44,12 @@ EXPECT_10
|
||||
connection node_1;
|
||||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
connection node_2;
|
||||
INSERT INTO t1 VALUES (21);
|
||||
INSERT INTO t1 VALUES (22);
|
||||
INSERT INTO t1 VALUES (23);
|
||||
INSERT INTO t1 VALUES (24);
|
||||
INSERT INTO t1 VALUES (25);
|
||||
INSERT INTO t1 VALUES (26);
|
||||
INSERT INTO t1 VALUES (27);
|
||||
INSERT INTO t1 VALUES (28);
|
||||
INSERT INTO t1 VALUES (29);
|
||||
INSERT INTO t1 VALUES (30);
|
||||
connection node_1;
|
||||
# Wait until slave threads back to 1
|
||||
SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
|
||||
EXPECT_1
|
||||
1
|
||||
SELECT COUNT(*) FROM t1;
|
||||
COUNT(*)
|
||||
30
|
||||
SELECT COUNT(*) AS EXPECT_101 FROM t1;
|
||||
EXPECT_101
|
||||
101
|
||||
DROP TABLE t1;
|
||||
|
@ -1,44 +1,68 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
connection node_1;
|
||||
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
|
||||
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
|
||||
INSERT INTO t1 VALUES (1, 'a');
|
||||
INSERT INTO t1 VALUES (2, 'a');
|
||||
connection node_1;
|
||||
SET AUTOCOMMIT=ON;
|
||||
START TRANSACTION;
|
||||
UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
|
||||
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
|
||||
connection node_1a;
|
||||
SET SESSION wsrep_sync_wait=0;
|
||||
LOCK TABLE t2 WRITE;
|
||||
connection node_1;
|
||||
SET @@debug_dbug = "d,sync.wsrep_before_mdl_wait";
|
||||
SET DEBUG_SYNC= 'wsrep_before_mdl_wait SIGNAL before_mdl_wait WAIT_FOR mdl_wait_continue';
|
||||
SELECT * FROM t2;;
|
||||
connection node_1a;
|
||||
# Wait until select is blocked before MDL lock wait
|
||||
SET DEBUG_SYNC= 'now WAIT_FOR before_mdl_wait';
|
||||
connection node_1a;
|
||||
SET @@debug_dbug = "d,sync.wsrep_after_BF_victim_lock";
|
||||
connection node_2;
|
||||
UPDATE t1 SET f2 = 'c' WHERE f1 = 1;
|
||||
connection node_1a;
|
||||
SET @@debug_dbug = "";
|
||||
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_before_mdl_wait";
|
||||
connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
|
||||
connection node_1b;
|
||||
SET SESSION wsrep_sync_wait=0;
|
||||
# Wait for conflicting update to block
|
||||
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_after_BF_victim_lock";
|
||||
connect node_1c, 127.0.0.1, root, , test, $NODE_MYPORT_1;
|
||||
connection node_1c;
|
||||
connection node_1a;
|
||||
SET DEBUG_SYNC = "now SIGNAL BF_victim_continue";
|
||||
UNLOCK TABLES;
|
||||
connection node_1;
|
||||
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
|
||||
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
|
||||
COUNT(*) = 1
|
||||
connection node_1;
|
||||
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'a';
|
||||
EXPECT_1
|
||||
1
|
||||
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
|
||||
COUNT(*) = 1
|
||||
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'c';
|
||||
EXPECT_1
|
||||
1
|
||||
SELECT * FROM t1;
|
||||
f1 f2
|
||||
1 c
|
||||
2 a
|
||||
connection node_2;
|
||||
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
|
||||
COUNT(*) = 1
|
||||
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'a';
|
||||
EXPECT_1
|
||||
1
|
||||
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
|
||||
COUNT(*) = 1
|
||||
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'c';
|
||||
EXPECT_1
|
||||
1
|
||||
SELECT * FROM t1;
|
||||
f1 f2
|
||||
1 c
|
||||
2 a
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
connection node_1a;
|
||||
SET DEBUG_SYNC = "RESET";
|
||||
connection node_1b;
|
||||
SET DEBUG_SYNC = "RESET";
|
||||
connection node_1;
|
||||
disconnect node_1a;
|
||||
disconnect node_1b;
|
||||
disconnect node_1c;
|
||||
|
@ -15,16 +15,37 @@ UPDATE t1 SET f1 = 9;
|
||||
UPDATE t2 SET f1 = 9 WHERE f1 = 1;
|
||||
DELETE FROM t1 WHERE f1 = 9;
|
||||
DELETE FROM t2 WHERE f1 = 9;
|
||||
TRUNCATE TABLE t1;
|
||||
TRUNCATE TABLE t1;
|
||||
SELECT * FROM t1 ORDER BY f1;
|
||||
f1
|
||||
SELECT * FROM t2 ORDER BY f1;
|
||||
f1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
connection node_2;
|
||||
SELECT COUNT(*) = 0 FROM t1;
|
||||
COUNT(*) = 0
|
||||
1
|
||||
SELECT COUNT(*) = 0 FROM t2;
|
||||
COUNT(*) = 0
|
||||
0
|
||||
SELECT * FROM t1 ORDER BY f1;
|
||||
f1
|
||||
SELECT * FROM t2 ORDER BY f1;
|
||||
f1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
6
|
||||
TRUNCATE TABLE t1;
|
||||
TRUNCATE TABLE t2;
|
||||
SELECT * FROM t1 ORDER BY f1;
|
||||
f1
|
||||
SELECT * FROM t2 ORDER BY f1;
|
||||
f1
|
||||
connection node_2;
|
||||
SELECT * FROM t1 ORDER BY f1;
|
||||
f1
|
||||
SELECT * FROM t2 ORDER BY f1;
|
||||
f1
|
||||
connection node_1;
|
||||
SET GLOBAL wsrep_mode=DEFAULT;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
SET GLOBAL wsrep_mode=DEFAULT;
|
||||
|
12
mysql-test/suite/galera/r/galera_sequence_engine.result
Normal file
12
mysql-test/suite/galera/r/galera_sequence_engine.result
Normal file
@ -0,0 +1,12 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
SET GLOBAL wsrep_ignore_apply_errors=0;
|
||||
SET SESSION AUTOCOMMIT=0;
|
||||
SET SESSION max_error_count=0;
|
||||
CREATE TABLE t0 (id GEOMETRY,parent_id GEOMETRY)ENGINE=SEQUENCE;
|
||||
ERROR 42000: This version of MariaDB doesn't yet support 'non-InnoDB sequences in Galera cluster'
|
||||
connection node_2;
|
||||
SHOW CREATE TABLE t0;
|
||||
ERROR 42S02: Table 'test.t0' doesn't exist
|
||||
connection node_1;
|
||||
SET GLOBAL wsrep_ignore_apply_errors=DEFAULT;
|
@ -5,7 +5,12 @@ connection node_2;
|
||||
connection node_1;
|
||||
SET GLOBAL wsrep_provider_options = 'pc.weight=2';
|
||||
connection node_2;
|
||||
SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
|
||||
SET SESSION wsrep_sync_wait = 0;
|
||||
SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
|
||||
connection node_1;
|
||||
connection node_2;
|
||||
SHOW STATUS LIKE 'wsrep_cluster_status';
|
||||
Variable_name Value
|
||||
wsrep_cluster_status non-Primary
|
||||
connection node_1;
|
||||
SET GLOBAL wsrep_provider_options = 'pc.weight = 1';
|
||||
|
243
mysql-test/suite/galera/r/mdev-22063.result
Normal file
243
mysql-test/suite/galera/r/mdev-22063.result
Normal file
@ -0,0 +1,243 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
# Case 1 CREATE SEQUENCE with no NOCACHE
|
||||
CREATE SEQUENCE s ENGINE=InnoDB;
|
||||
ERROR 42000: This version of MariaDB doesn't yet support 'CACHE without INCREMENT BY 0 in Galera cluster'
|
||||
CREATE SEQUENCE s NOCACHE ENGINE=InnoDB;
|
||||
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
|
||||
START TRANSACTION;
|
||||
REPLACE INTO s VALUES (1,1,9223372036854775806,1,1,1000,0,0);
|
||||
OPTIMIZE TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
|
||||
test.t1 optimize status OK
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
SELECT * FROM s;
|
||||
next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count
|
||||
1 1 9223372036854775806 1 1 1000 0 0
|
||||
connection node_2;
|
||||
SELECT * FROM t1;
|
||||
a
|
||||
SELECT * FROM s;
|
||||
next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count
|
||||
1 1 9223372036854775806 1 1 1000 0 0
|
||||
connection node_1;
|
||||
DROP TABLE t1;
|
||||
DROP SEQUENCE s;
|
||||
# Case 2 REPLACE INTO ... SELECT with error
|
||||
CREATE TABLE t (id INT KEY,a YEAR,INDEX (id,a)) engine=innodb;
|
||||
REPLACE INTO t (id,a)SELECT /*!99997 */ 1;
|
||||
ERROR 21S01: Column count doesn't match value count at row 1
|
||||
REPLACE INTO t (id,a)SELECT /*!99997 */ 1,2;
|
||||
SELECT * FROM t;
|
||||
id a
|
||||
1 2002
|
||||
CREATE TABLE t2 (id INT KEY,a YEAR,INDEX (id,a)) engine=myisam;
|
||||
REPLACE INTO t2 (id,a)SELECT /*!99997 */ 1;
|
||||
ERROR 21S01: Column count doesn't match value count at row 1
|
||||
REPLACE INTO t2 (id,a)SELECT /*!99997 */ 1,2;
|
||||
Warnings:
|
||||
Warning 138 Galera cluster does support consistency check only for InnoDB tables.
|
||||
SELECT * FROM t2;
|
||||
id a
|
||||
1 2002
|
||||
CREATE TABLE t3 (id INT KEY,a YEAR,INDEX (id,a)) engine=aria;
|
||||
REPLACE INTO t3 (id,a)SELECT /*!99997 */ 1;
|
||||
ERROR 21S01: Column count doesn't match value count at row 1
|
||||
REPLACE INTO t3 (id,a)SELECT /*!99997 */ 1,2;
|
||||
Warnings:
|
||||
Warning 138 Galera cluster does support consistency check only for InnoDB tables.
|
||||
SELECT * FROM t3;
|
||||
id a
|
||||
1 2002
|
||||
connection node_2;
|
||||
SELECT * FROM t;
|
||||
id a
|
||||
1 2002
|
||||
SELECT * FROM t2;
|
||||
id a
|
||||
1 2002
|
||||
SELECT * FROM t3;
|
||||
id a
|
||||
1 2002
|
||||
connection node_1;
|
||||
DROP TABLE t,t2,t3;
|
||||
# Bigger REPLACE ... AS SELECT test
|
||||
CREATE TABLE t1(id int not null primary key ,b int) ENGINE=InnoDB;
|
||||
CREATE TABLE t2(id int not null primary key ,b int) ENGINE=MyISAM;
|
||||
CREATE TABLE t3(id int not null primary key ,b int) ENGINE=Aria;
|
||||
CREATE TABLE t4(id int not null primary key ,b int) ENGINE=InnoDB;
|
||||
CREATE TABLE t5(id int not null primary key ,b int) ENGINE=InnoDB;
|
||||
CREATE TABLE t6(id int not null primary key ,b int) ENGINE=InnoDB;
|
||||
CREATE TABLE t7(id int not null primary key ,b int) ENGINE=MyISAM;
|
||||
CREATE TABLE t8(id int not null primary key ,b int) ENGINE=Aria;
|
||||
INSERT INTO t1(id) SELECT seq FROM seq_1_to_1000;
|
||||
INSERT INTO t2(id) SELECT seq FROM seq_1_to_1000;
|
||||
INSERT INTO t3(id) SELECT seq FROM seq_1_to_1000;
|
||||
REPLACE INTO t4 SELECT * FROM t1;
|
||||
REPLACE INTO t5 SELECT * FROM t2;
|
||||
REPLACE INTO t6 SELECT * FROM t3;
|
||||
ERROR HY000: Transactional commit not supported by involved engine(s)
|
||||
REPLACE INTO t7 SELECT * FROM t2;
|
||||
REPLACE INTO t8 SELECT * FROM t3;
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_0 FROM t6;
|
||||
EXPECT_0
|
||||
0
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
|
||||
EXPECT_1000
|
||||
1000
|
||||
connection node_2;
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_0 FROM t6;
|
||||
EXPECT_0
|
||||
0
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
|
||||
EXPECT_1000
|
||||
1000
|
||||
connection node_1;
|
||||
DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8;
|
||||
# Bigger INSERT INTO ... SELECT test
|
||||
CREATE TABLE t1(id int not null primary key ,b int) ENGINE=InnoDB;
|
||||
CREATE TABLE t2(id int not null primary key ,b int) ENGINE=MyISAM;
|
||||
CREATE TABLE t3(id int not null primary key ,b int) ENGINE=Aria;
|
||||
CREATE TABLE t4(id int not null primary key ,b int) ENGINE=InnoDB;
|
||||
CREATE TABLE t5(id int not null primary key ,b int) ENGINE=InnoDB;
|
||||
CREATE TABLE t6(id int not null primary key ,b int) ENGINE=InnoDB;
|
||||
CREATE TABLE t7(id int not null primary key ,b int) ENGINE=MyISAM;
|
||||
CREATE TABLE t8(id int not null primary key ,b int) ENGINE=Aria;
|
||||
INSERT INTO t1(id) SELECT seq FROM seq_1_to_1000;
|
||||
INSERT INTO t2(id) SELECT seq FROM seq_1_to_1000;
|
||||
INSERT INTO t3(id) SELECT seq FROM seq_1_to_1000;
|
||||
INSERT INTO t4 SELECT * FROM t1;
|
||||
INSERT INTO t5 SELECT * FROM t2;
|
||||
INSERT INTO t6 SELECT * FROM t3;
|
||||
ERROR HY000: Transactional commit not supported by involved engine(s)
|
||||
INSERT INTO t7 SELECT * FROM t2;
|
||||
INSERT INTO t8 SELECT * FROM t3;
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_0 FROM t6;
|
||||
EXPECT_0
|
||||
0
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
|
||||
EXPECT_1000
|
||||
1000
|
||||
connection node_2;
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_0 FROM t6;
|
||||
EXPECT_0
|
||||
0
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
|
||||
EXPECT_1000
|
||||
1000
|
||||
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
|
||||
EXPECT_1000
|
||||
1000
|
||||
connection node_1;
|
||||
DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8;
|
||||
CREATE TABLE t1(pk int not null primary key) engine=innodb;
|
||||
INSERT INTO t1 values (1),(2),(3),(4);
|
||||
CREATE VIEW view_t1 AS SELECT * FROM t1;
|
||||
INSERT INTO view_t1 VALUES (5);
|
||||
SELECT * FROM t1;
|
||||
pk
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
DROP TABLE t1;
|
||||
DROP VIEW view_t1;
|
||||
CREATE TABLE t1(pk int not null primary key) engine=myisam;
|
||||
INSERT INTO t1 values (1),(2),(3),(4);
|
||||
CREATE VIEW view_t1 AS SELECT * FROM t1;
|
||||
INSERT INTO view_t1 VALUES (5);
|
||||
SELECT * FROM t1;
|
||||
pk
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
DROP TABLE t1;
|
||||
DROP VIEW view_t1;
|
||||
CREATE TABLE t1(pk int not null primary key) engine=aria;
|
||||
INSERT INTO t1 values (1),(2),(3),(4);
|
||||
CREATE VIEW view_t1 AS SELECT * FROM t1;
|
||||
INSERT INTO view_t1 VALUES (5);
|
||||
SELECT * FROM t1;
|
||||
pk
|
||||
1
|
||||
2
|
||||
3
|
||||
4
|
||||
5
|
||||
DROP TABLE t1;
|
||||
DROP VIEW view_t1;
|
||||
SET GLOBAL wsrep_mode=DEFAULT;
|
@ -1,23 +1,8 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
connection node_1;
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
CREATE TABLE t ENGINE=InnoDB WITH SYSTEM VERSIONING AS SELECT 1 AS i;
|
||||
ERROR 42000: This version of MariaDB doesn't yet support 'SYSTEM VERSIONING AS SELECT in Galera cluster'
|
||||
connection node_2;
|
||||
SHOW CREATE TABLE t;
|
||||
Table Create Table
|
||||
t CREATE TABLE `t` (
|
||||
`i` int(1) NOT NULL
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci WITH SYSTEM VERSIONING
|
||||
SELECT * from t;
|
||||
i
|
||||
1
|
||||
DROP TABLE IF EXISTS t;
|
||||
COMMIT;
|
||||
connection node_2;
|
||||
SET SESSION wsrep_sync_wait=0;
|
||||
Killing server ...
|
||||
Starting server ...
|
||||
connection node_2;
|
||||
call mtr.add_suppression("WSREP: Event .*Write_rows_v1 apply failed:.*");
|
||||
call mtr.add_suppression("SREP: Failed to apply write set: gtid:.*");
|
||||
ERROR 42S02: Table 'test.t' doesn't exist
|
||||
|
@ -0,0 +1,211 @@
|
||||
connection node_2;
|
||||
connection node_1;
|
||||
#
|
||||
# MDEV-33355 Add a Galera-2-node-to-MariaDB replication MTR test cloning the slave with mariadb-backup
|
||||
#
|
||||
connect master, 127.0.0.1, root, , test, $NODE_MYPORT_1;
|
||||
connect slave, 127.0.0.1, root, , test, $NODE_MYPORT_3;
|
||||
START SLAVE;
|
||||
include/wait_for_slave_to_start.inc
|
||||
connection master;
|
||||
connection slave;
|
||||
##############################################################
|
||||
### Initial block with some transactions
|
||||
### Slave: Make sure replication is not using GTID
|
||||
connection slave;
|
||||
# Using_Gtid=No
|
||||
### Master: Create and populate t1
|
||||
connection master;
|
||||
CREATE TABLE t1(a TEXT) ENGINE=InnoDB;
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#00:stmt#00 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#00:stmt#01 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#00:stmt#02 - slave run#0, before backup');
|
||||
COMMIT;
|
||||
connection slave;
|
||||
##############################################################
|
||||
### Run the last transaction before mariadb-backup --backup
|
||||
### Remember SHOW MASTER STATUS and @@gtid_binlog_pos
|
||||
### before and after the transaction.
|
||||
### Master: Rember MASTER STATUS and @@gtid_binlog_pos before tr#01
|
||||
connection master;
|
||||
### Slave: Remember MASTER STATUS and @@gtid_binlog_pos before tr#01
|
||||
connection slave;
|
||||
### Master: Run the actual last transaction before the backup
|
||||
connection master;
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#01:stmt#00 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#01:stmt#01 - slave run#0, before backup');
|
||||
INSERT INTO t1 VALUES ('tr#01:stmt#02 - slave run#0, before backup');
|
||||
COMMIT;
|
||||
connection slave;
|
||||
### Master: Remember MASTER STATUS and @@gtid_binlog_pos after tr#01
|
||||
connection master;
|
||||
### Slave: Remember MASTER STATUS and @@gtid_binlog_pos after tr#01
|
||||
connection slave;
|
||||
##############################################################
|
||||
### Running `mariadb-backup --backup,--prepare` and checking
|
||||
### that xtrabackup_slave_info and xtrabackup_binlog_info are OK
|
||||
### Slave: Create a backup
|
||||
### Slave: Prepare the backup
|
||||
### Slave: xtrabackup files:
|
||||
############################ xtrabackup_slave_info
|
||||
CHANGE MASTER TO MASTER_LOG_FILE='master_after_tr01_show_master_status_file', MASTER_LOG_POS=master_after_tr01_show_master_status_position;
|
||||
############################ xtrabackup_binlog_info
|
||||
slave_after_tr01_show_master_status_file slave_after_tr01_show_master_status_position slave_after_tr01_gtid_binlog_pos
|
||||
############################
|
||||
##############################################################
|
||||
### Run more transactions after the backup:
|
||||
### - while the slave is still running, then
|
||||
### - while the slave is shut down
|
||||
### Master: Run another transaction while the slave is still running
|
||||
connection master;
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#02:stmt#00 - slave run#0, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#02:stmt#01 - slave run#0, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#02:stmt@02 - slave run#0, after backup');
|
||||
COMMIT;
|
||||
connection slave;
|
||||
### Master: Remember MASTER STATUS and @@gtid_binlog_pos after tr#02
|
||||
connection master;
|
||||
### Slave: Remember MASTER STATUS and @@gtid_binlog_pos after tr#02
|
||||
connection slave;
|
||||
### Master: Checking SHOW BINLOG EVENTS
|
||||
connection master;
|
||||
SHOW BINLOG EVENTS IN 'master_after_tr01_show_master_status_file' FROM master_after_tr01_show_master_status_position LIMIT 0,1;
|
||||
Log_name master_after_tr01_show_master_status_file
|
||||
Pos master_after_tr01_show_master_status_position
|
||||
Event_type Gtid
|
||||
Server_id #
|
||||
End_log_pos #
|
||||
Info BEGIN GTID master_after_tr02_gtid_binlog_pos
|
||||
SHOW BINLOG EVENTS IN 'master_after_tr01_show_master_status_file' FROM master_after_tr01_show_master_status_position LIMIT 1,1;
|
||||
Log_name master_after_tr01_show_master_status_file
|
||||
Pos #
|
||||
Event_type Query_or_Annotate_rows
|
||||
Server_id #
|
||||
End_log_pos #
|
||||
Info INSERT INTO t1 VALUES ('tr#02:stmt#00 - slave run#0, after backup')
|
||||
### Slave: Checking SHOW BINLOG EVENTS
|
||||
connection slave;
|
||||
SHOW BINLOG EVENTS IN 'slave_after_tr01_show_master_status_file' FROM slave_after_tr01_show_master_status_position LIMIT 0,1;
|
||||
Log_name slave_after_tr01_show_master_status_file
|
||||
Pos #
|
||||
Event_type Gtid
|
||||
Server_id 1
|
||||
End_log_pos #
|
||||
Info BEGIN GTID slave_after_tr02_gtid_binlog_pos
|
||||
SHOW BINLOG EVENTS IN 'slave_after_tr01_show_master_status_file' FROM slave_after_tr01_show_master_status_position LIMIT 1,1;
|
||||
Log_name slave_after_tr01_show_master_status_file
|
||||
Pos #
|
||||
Event_type Query_or_Annotate_rows
|
||||
Server_id #
|
||||
End_log_pos #
|
||||
Info INSERT INTO t1 VALUES ('tr#02:stmt#00 - slave run#0, after backup')
|
||||
### Slave: Stop replication
|
||||
connection slave;
|
||||
STOP SLAVE;
|
||||
include/wait_for_slave_to_stop.inc
|
||||
RESET SLAVE;
|
||||
Warnings:
|
||||
Note 4190 RESET SLAVE is implicitly changing the value of 'Using_Gtid' from 'No' to 'Slave_Pos'
|
||||
### Slave: Shutdown the server
|
||||
connection slave;
|
||||
### Master: Run a transaction while the slave is shut down
|
||||
connection master;
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#03:stmt#00 - after slave run#0, slave is shut down, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#03:stmt#01 - after slave run#0, slave is shut down, after backup');
|
||||
INSERT INTO t1 VALUES ('tr#03:stmt#02 - after slave run#0, slave is shut down, after backup');
|
||||
COMMIT;
|
||||
##############################################################
|
||||
### Emulate starting a new virgin slave
|
||||
### Slave: Remove the data directory
|
||||
### Slave: Copy back the backup
|
||||
### Slave: Restart the server
|
||||
connection slave;
|
||||
# restart
|
||||
### Slave: Display the restored data before START SLAVE
|
||||
connection slave;
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
a
|
||||
tr#00:stmt#00 - slave run#0, before backup
|
||||
tr#00:stmt#01 - slave run#0, before backup
|
||||
tr#00:stmt#02 - slave run#0, before backup
|
||||
tr#01:stmt#00 - slave run#0, before backup
|
||||
tr#01:stmt#01 - slave run#0, before backup
|
||||
tr#01:stmt#02 - slave run#0, before backup
|
||||
### Slave: Execute the CHANGE MASTER statement to set up the host and port
|
||||
CHANGE MASTER '' TO MASTER_USER='root', MASTER_HOST='127.0.0.1', MASTER_PORT=###, MASTER_CONNECT_RETRY=1;
|
||||
### Slave: Execute the CHANGE MASTER statement from xtrabackup_slave_info
|
||||
CHANGE MASTER TO MASTER_LOG_FILE='master_after_tr01_show_master_status_file', MASTER_LOG_POS=master_after_tr01_show_master_status_position;
|
||||
Warnings:
|
||||
Note 4190 CHANGE MASTER TO is implicitly changing the value of 'Using_Gtid' from 'Slave_Pos' to 'No'
|
||||
### Slave: Execute START SLAVE
|
||||
include/start_slave.inc
|
||||
### Master: Wait for the slave to apply all master events
|
||||
connection master;
|
||||
connection slave;
|
||||
### Slave: Make sure replication is not using GTID after the slave restart
|
||||
connection slave;
|
||||
# Using_Gtid=No
|
||||
### Slave: Display the restored data after START SLAVE
|
||||
connection slave;
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
a
|
||||
tr#00:stmt#00 - slave run#0, before backup
|
||||
tr#00:stmt#01 - slave run#0, before backup
|
||||
tr#00:stmt#02 - slave run#0, before backup
|
||||
tr#01:stmt#00 - slave run#0, before backup
|
||||
tr#01:stmt#01 - slave run#0, before backup
|
||||
tr#01:stmt#02 - slave run#0, before backup
|
||||
tr#02:stmt#00 - slave run#0, after backup
|
||||
tr#02:stmt#01 - slave run#0, after backup
|
||||
tr#02:stmt@02 - slave run#0, after backup
|
||||
tr#03:stmt#00 - after slave run#0, slave is shut down, after backup
|
||||
tr#03:stmt#01 - after slave run#0, slave is shut down, after backup
|
||||
tr#03:stmt#02 - after slave run#0, slave is shut down, after backup
|
||||
##############################################################
|
||||
### Continue master transactions, check the new slave replicates well.
|
||||
### Master: Run a transaction after restarting replication
|
||||
connection master;
|
||||
START TRANSACTION;
|
||||
INSERT INTO t1 VALUES ('tr#04:stmt#00 - slave run#1');
|
||||
INSERT INTO t1 VALUES ('tr#04:stmt#01 - slave run#1');
|
||||
INSERT INTO t1 VALUES ('tr#04:stmt#02 - slave run#1');
|
||||
COMMIT;
|
||||
connection slave;
|
||||
### Slave: Display the restored data + new transactions
|
||||
connection slave;
|
||||
SELECT * FROM t1 ORDER BY a;
|
||||
a
|
||||
tr#00:stmt#00 - slave run#0, before backup
|
||||
tr#00:stmt#01 - slave run#0, before backup
|
||||
tr#00:stmt#02 - slave run#0, before backup
|
||||
tr#01:stmt#00 - slave run#0, before backup
|
||||
tr#01:stmt#01 - slave run#0, before backup
|
||||
tr#01:stmt#02 - slave run#0, before backup
|
||||
tr#02:stmt#00 - slave run#0, after backup
|
||||
tr#02:stmt#01 - slave run#0, after backup
|
||||
tr#02:stmt@02 - slave run#0, after backup
|
||||
tr#03:stmt#00 - after slave run#0, slave is shut down, after backup
|
||||
tr#03:stmt#01 - after slave run#0, slave is shut down, after backup
|
||||
tr#03:stmt#02 - after slave run#0, slave is shut down, after backup
|
||||
tr#04:stmt#00 - slave run#1
|
||||
tr#04:stmt#01 - slave run#1
|
||||
tr#04:stmt#02 - slave run#1
|
||||
##############################################################
|
||||
### Cleanup
|
||||
### Removing the backup directory
|
||||
connection master;
|
||||
DROP TABLE t1;
|
||||
connection slave;
|
||||
STOP SLAVE;
|
||||
include/wait_for_slave_to_stop.inc
|
||||
RESET SLAVE ALL;
|
||||
Warnings:
|
||||
Note 4190 RESET SLAVE is implicitly changing the value of 'Using_Gtid' from 'No' to 'Slave_Pos'
|
||||
connection master;
|
||||
set global wsrep_on=OFF;
|
||||
RESET MASTER;
|
||||
set global wsrep_on=ON;
|
@ -18,19 +18,16 @@
|
||||
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
|
||||
|
||||
# Run CTAS until the resulting table gets created,
|
||||
# then it gets BF aborted by ALTER.
|
||||
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_alter WAIT_FOR bf_abort';
|
||||
# then it gets BF aborted by other DDL.
|
||||
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_run WAIT_FOR bf_abort';
|
||||
--send
|
||||
CREATE TABLE t2 SELECT * FROM t1;
|
||||
|
||||
# Wait for CTAS to reach the table create point,
|
||||
# start executing ALTER and BF abort CTAS.
|
||||
# start executing other DDL and BF abort CTAS.
|
||||
--connection node_1
|
||||
SET DEBUG_SYNC = 'now WAIT_FOR may_alter';
|
||||
--disable_result_log
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
ALTER TABLE t1 DROP FOREIGN KEY b, ALGORITHM=COPY;
|
||||
--enable_result_log
|
||||
SET DEBUG_SYNC = 'now WAIT_FOR may_run';
|
||||
TRUNCATE TABLE t1;
|
||||
|
||||
--connection con1
|
||||
# CTAS gets BF aborted.
|
||||
@ -46,19 +43,16 @@ SET DEBUG_SYNC = 'RESET';
|
||||
INSERT INTO t1 VALUES (10), (20), (30);
|
||||
|
||||
# Run CTAS until the resulting table gets created,
|
||||
# then it gets BF aborted by ALTER.
|
||||
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_alter WAIT_FOR bf_abort';
|
||||
# then it gets BF aborted by other DDL.
|
||||
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_run WAIT_FOR bf_abort';
|
||||
--send
|
||||
CREATE TABLE t2 SELECT * FROM t1;
|
||||
|
||||
# Wait for CTAS to reach the table create point,
|
||||
# start executing ALTER and BF abort CTAS.
|
||||
# start executing other DDL and BF abort CTAS.
|
||||
--connection node_1
|
||||
SET DEBUG_SYNC = 'now WAIT_FOR may_alter';
|
||||
--disable_result_log
|
||||
--error ER_ERROR_ON_RENAME
|
||||
ALTER TABLE t1 DROP FOREIGN KEY b, ALGORITHM=COPY;
|
||||
--enable_result_log
|
||||
SET DEBUG_SYNC = 'now WAIT_FOR may_run';
|
||||
TRUNCATE TABLE t1;
|
||||
|
||||
--connection con1
|
||||
# CTAS gets BF aborted.
|
||||
|
@ -11,7 +11,11 @@ SET SESSION autocommit=0;
|
||||
SELECT * FROM t1 WHERE c1 <=0 ORDER BY c1 DESC;
|
||||
--error ER_LOCK_DEADLOCK
|
||||
INSERT INTO t1 VALUES (4),(3),(1),(2);
|
||||
--error ER_TABLE_EXISTS_ERROR
|
||||
#
|
||||
# This is because support for CREATE TABLE ENGINE=SEQUENCE
|
||||
# is done before we check does table exists already.
|
||||
#
|
||||
--error ER_NOT_SUPPORTED_YET
|
||||
CREATE TABLE t1 (pk INT PRIMARY KEY, b INT) ENGINE=SEQUENCE;
|
||||
--error ER_CANT_DROP_FIELD_OR_KEY
|
||||
ALTER TABLE t1 DROP COLUMN c2;
|
||||
|
27
mysql-test/suite/galera/t/MDEV-25731.test
Normal file
27
mysql-test/suite/galera/t/MDEV-25731.test
Normal file
@ -0,0 +1,27 @@
|
||||
--source include/galera_cluster.inc
|
||||
--source include/have_aria.inc
|
||||
|
||||
--connection node_1
|
||||
SET GLOBAL wsrep_load_data_splitting=ON;
|
||||
SET GLOBAL wsrep_replicate_myisam=ON;
|
||||
CREATE TABLE t1 (c1 int) ENGINE=MYISAM;
|
||||
LOAD DATA INFILE '../../std_data/mdev-25731.dat' IGNORE INTO TABLE t1 LINES TERMINATED BY '\n';
|
||||
SELECT COUNT(*) AS EXPECT_6 FROM t1;
|
||||
|
||||
--connection node_2
|
||||
SELECT COUNT(*) AS EXPECT_6 FROM t1;
|
||||
|
||||
--connection node_1
|
||||
ALTER TABLE t1 ENGINE=InnoDB;
|
||||
LOAD DATA INFILE '../../std_data/mdev-25731.dat' IGNORE INTO TABLE t1 LINES TERMINATED BY '\n';
|
||||
SELECT COUNT(*) AS EXPECT_12 FROM t1;
|
||||
|
||||
--connection node_2
|
||||
SELECT COUNT(*) AS EXPECT_12 FROM t1;
|
||||
|
||||
--connection node_1
|
||||
DROP TABLE t1;
|
||||
SET GLOBAL wsrep_load_data_splitting=OFF;
|
||||
SET GLOBAL wsrep_replicate_myisam=OFF;
|
||||
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
[mysqld.1]
|
||||
wsrep-debug=SERVER
|
||||
loose-wsrep-mw-336=1
|
||||
|
||||
[mysqld.2]
|
||||
wsrep-debug=SERVER
|
||||
loose-wsrep-mw-336=2
|
||||
|
@ -3,11 +3,12 @@
|
||||
#
|
||||
|
||||
--source include/galera_cluster.inc
|
||||
--source include/have_innodb.inc
|
||||
--source include/force_restart.inc
|
||||
--source include/have_sequence.inc
|
||||
|
||||
--connection node_1
|
||||
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
|
||||
SET @wsrep_slave_threads_orig = @@wsrep_slave_threads;
|
||||
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) Engine=InnoDB;
|
||||
|
||||
SET GLOBAL wsrep_slave_threads = 10;
|
||||
|
||||
@ -22,7 +23,7 @@ SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE V
|
||||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
|
||||
--connection node_2
|
||||
INSERT INTO t1 VALUES (1);
|
||||
INSERT INTO t1 VALUES (NULL);
|
||||
|
||||
--connection node_1
|
||||
--echo # Wait until one of the appliers has exited
|
||||
@ -54,27 +55,19 @@ SELECT VARIABLE_VALUE AS EXPECT_20 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE V
|
||||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
|
||||
--connection node_2
|
||||
INSERT INTO t1 VALUES (1);
|
||||
INSERT INTO t1 VALUES (2);
|
||||
INSERT INTO t1 VALUES (3);
|
||||
INSERT INTO t1 VALUES (4);
|
||||
INSERT INTO t1 VALUES (5);
|
||||
INSERT INTO t1 VALUES (6);
|
||||
INSERT INTO t1 VALUES (7);
|
||||
INSERT INTO t1 VALUES (8);
|
||||
INSERT INTO t1 VALUES (9);
|
||||
INSERT INTO t1 VALUES (10);
|
||||
INSERT INTO t1 VALUES (11);
|
||||
INSERT INTO t1 VALUES (12);
|
||||
INSERT INTO t1 VALUES (13);
|
||||
INSERT INTO t1 VALUES (14);
|
||||
INSERT INTO t1 VALUES (16);
|
||||
INSERT INTO t1 VALUES (17);
|
||||
INSERT INTO t1 VALUES (18);
|
||||
INSERT INTO t1 VALUES (19);
|
||||
INSERT INTO t1 VALUES (20);
|
||||
--disable_query_log
|
||||
let $c = 50;
|
||||
while ($c) {
|
||||
INSERT INTO t1 VALUES(NULL); COMMIT;
|
||||
dec $c;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
--connection node_1
|
||||
--let $wait_condition = SELECT COUNT(*) = 51 FROM t1;
|
||||
--let $wait_condition_on_error_output = SELECT COUNT(*) FROM t1;
|
||||
--source include/wait_condition_with_debug.inc
|
||||
|
||||
--echo # Wait until 19 of the appliers has exited
|
||||
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
|
||||
--let $wait_condition_on_error_output = SELECT COUNT(*), 1 as EXPECTED_VALUE FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle'; show processlist
|
||||
@ -82,7 +75,7 @@ INSERT INTO t1 VALUES (20);
|
||||
|
||||
SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
|
||||
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT COUNT(*) AS EXPECT_51 FROM t1;
|
||||
|
||||
SET GLOBAL wsrep_slave_threads = 10;
|
||||
--echo # Set slave threads to 10 step 3
|
||||
@ -96,16 +89,13 @@ SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE V
|
||||
SET GLOBAL wsrep_slave_threads = 1;
|
||||
|
||||
--connection node_2
|
||||
INSERT INTO t1 VALUES (21);
|
||||
INSERT INTO t1 VALUES (22);
|
||||
INSERT INTO t1 VALUES (23);
|
||||
INSERT INTO t1 VALUES (24);
|
||||
INSERT INTO t1 VALUES (25);
|
||||
INSERT INTO t1 VALUES (26);
|
||||
INSERT INTO t1 VALUES (27);
|
||||
INSERT INTO t1 VALUES (28);
|
||||
INSERT INTO t1 VALUES (29);
|
||||
INSERT INTO t1 VALUES (30);
|
||||
--disable_query_log
|
||||
let $c = 50;
|
||||
while ($c) {
|
||||
INSERT INTO t1 VALUES(NULL); COMMIT;
|
||||
dec $c;
|
||||
}
|
||||
--enable_query_log
|
||||
|
||||
--connection node_1
|
||||
--echo # Wait until slave threads back to 1
|
||||
@ -115,6 +105,10 @@ INSERT INTO t1 VALUES (30);
|
||||
|
||||
SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
|
||||
|
||||
SELECT COUNT(*) FROM t1;
|
||||
SELECT COUNT(*) AS EXPECT_101 FROM t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
|
||||
--disable_query_log
|
||||
SET GLOBAL wsrep_slave_threads = @wsrep_slave_threads_orig;
|
||||
--enable_query_log
|
||||
|
@ -3,70 +3,92 @@
|
||||
#
|
||||
|
||||
--source include/galera_cluster.inc
|
||||
--source include/have_debug.inc
|
||||
--source include/have_debug_sync.inc
|
||||
|
||||
--connection node_1
|
||||
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
|
||||
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
|
||||
INSERT INTO t1 VALUES (1, 'a');
|
||||
INSERT INTO t1 VALUES (2, 'a');
|
||||
|
||||
--connection node_1
|
||||
SET AUTOCOMMIT=ON;
|
||||
START TRANSACTION;
|
||||
|
||||
UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
|
||||
|
||||
# block access to t2
|
||||
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
|
||||
--connection node_1a
|
||||
SET SESSION wsrep_sync_wait=0;
|
||||
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't2'
|
||||
--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.TABLES
|
||||
--source include/wait_condition_with_debug.inc
|
||||
LOCK TABLE t2 WRITE;
|
||||
|
||||
# Block before MLD lock wait
|
||||
# Block before MDL lock wait
|
||||
--connection node_1
|
||||
SET @@debug_dbug = "d,sync.wsrep_before_mdl_wait";
|
||||
SET DEBUG_SYNC= 'wsrep_before_mdl_wait SIGNAL before_mdl_wait WAIT_FOR mdl_wait_continue';
|
||||
--send SELECT * FROM t2;
|
||||
|
||||
# Wait for SELECT to be blocked
|
||||
--connection node_1a
|
||||
#--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIS WHERE STATE = 'System lock';
|
||||
#--source include/wait_condition.inc
|
||||
#--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'init' AND INFO = 'COMMIT';
|
||||
#--source include/wait_condition.inc
|
||||
--echo # Wait until select is blocked before MDL lock wait
|
||||
SET DEBUG_SYNC= 'now WAIT_FOR before_mdl_wait';
|
||||
|
||||
# block applier to wait after BF victim is locked
|
||||
--connection node_1a
|
||||
SET @@debug_dbug = "d,sync.wsrep_after_BF_victim_lock";
|
||||
|
||||
# Issue a conflicting update on node #2
|
||||
--connection node_2
|
||||
UPDATE t1 SET f2 = 'c' WHERE f1 = 1;
|
||||
|
||||
# Unblock the SELECT, to enter wsrep_thd_is_BF
|
||||
--connection node_1a
|
||||
SET @@debug_dbug = "";
|
||||
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_before_mdl_wait";
|
||||
--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1
|
||||
--connection node_1b
|
||||
SET SESSION wsrep_sync_wait=0;
|
||||
--echo # Wait for conflicting update to block
|
||||
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Update_rows_log_event:%';
|
||||
--source include/wait_condition.inc
|
||||
|
||||
# unblock applier to try to BF the SELECT
|
||||
# Unblock the SELECT, to enter wsrep_thd_is_BF
|
||||
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_after_BF_victim_lock";
|
||||
|
||||
--connect node_1c, 127.0.0.1, root, , test, $NODE_MYPORT_1
|
||||
--connection node_1c
|
||||
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Update_rows_log_event:%';
|
||||
--source include/wait_condition.inc
|
||||
|
||||
--connection node_1a
|
||||
# unblock applier to try to BF the SELECT
|
||||
SET DEBUG_SYNC = "now SIGNAL BF_victim_continue";
|
||||
|
||||
# table lock is not needed anymore
|
||||
UNLOCK TABLES;
|
||||
|
||||
# SELECT succeeds
|
||||
# SELECT returns deadlock
|
||||
--connection node_1
|
||||
|
||||
--error ER_LOCK_DEADLOCK
|
||||
--reap
|
||||
|
||||
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
|
||||
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
|
||||
--connection node_1
|
||||
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'a';
|
||||
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'c';
|
||||
SELECT * FROM t1;
|
||||
|
||||
--connection node_2
|
||||
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
|
||||
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
|
||||
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'a';
|
||||
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'c';
|
||||
SELECT * FROM t1;
|
||||
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
|
||||
--connection node_1a
|
||||
SET DEBUG_SYNC = "RESET";
|
||||
--connection node_1b
|
||||
SET DEBUG_SYNC = "RESET";
|
||||
|
||||
--connection node_1
|
||||
--disconnect node_1a
|
||||
--disconnect node_1b
|
||||
--disconnect node_1c
|
||||
|
||||
|
@ -2,22 +2,24 @@
|
||||
--source include/have_innodb.inc
|
||||
|
||||
#
|
||||
# This tests simple autocommit replication of MyISAM tables. No updates arrive on the slave.
|
||||
# This tests simple autocommit replication of MyISAM tables.
|
||||
#
|
||||
|
||||
# Without a PK
|
||||
|
||||
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
|
||||
|
||||
# Without a PK
|
||||
|
||||
CREATE TABLE t1 (f1 INTEGER) ENGINE=MyISAM;
|
||||
|
||||
INSERT INTO t1 VALUES (1);
|
||||
INSERT INTO t1 VALUES (2), (3);
|
||||
# This is TOI
|
||||
INSERT INTO t1 SELECT 4 FROM DUAL UNION ALL SELECT 5 FROM DUAL;
|
||||
|
||||
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=MyISAM;
|
||||
INSERT INTO t2 VALUES (1);
|
||||
INSERT INTO t2 VALUES (2), (3);
|
||||
# This is TOI
|
||||
INSERT INTO t2 SELECT 4 FROM DUAL UNION ALL SELECT 5 FROM DUAL;
|
||||
|
||||
# Error
|
||||
@ -34,16 +36,26 @@ UPDATE t2 SET f1 = 9 WHERE f1 = 1;
|
||||
DELETE FROM t1 WHERE f1 = 9;
|
||||
DELETE FROM t2 WHERE f1 = 9;
|
||||
|
||||
SELECT * FROM t1 ORDER BY f1;
|
||||
SELECT * FROM t2 ORDER BY f1;
|
||||
|
||||
--connection node_2
|
||||
SELECT * FROM t1 ORDER BY f1;
|
||||
SELECT * FROM t2 ORDER BY f1;
|
||||
|
||||
# TRUNCATE
|
||||
|
||||
TRUNCATE TABLE t1;
|
||||
TRUNCATE TABLE t1;
|
||||
TRUNCATE TABLE t2;
|
||||
|
||||
SELECT * FROM t1 ORDER BY f1;
|
||||
SELECT * FROM t2 ORDER BY f1;
|
||||
|
||||
--connection node_2
|
||||
SELECT COUNT(*) = 0 FROM t1;
|
||||
SELECT COUNT(*) = 0 FROM t2;
|
||||
SELECT * FROM t1 ORDER BY f1;
|
||||
SELECT * FROM t2 ORDER BY f1;
|
||||
|
||||
--connection node_1
|
||||
SET GLOBAL wsrep_mode=DEFAULT;
|
||||
DROP TABLE t1;
|
||||
DROP TABLE t2;
|
||||
SET GLOBAL wsrep_mode=DEFAULT;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user