1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-29 05:21:33 +03:00

Merge 10.6 into 10.11

This commit is contained in:
Marko Mäkelä
2024-02-14 16:12:53 +02:00
165 changed files with 3458 additions and 2319 deletions

View File

@ -32,12 +32,11 @@ then
# build is not running on Gitlab-CI.
sed '/-DPLUGIN_COLUMNSTORE=NO/d' -i debian/rules
# Take the files and part of control from MCS directory
if [ ! -f debian/mariadb-plugin-columnstore.install ]
then
cp -v storage/columnstore/columnstore/debian/mariadb-plugin-columnstore.* debian/
echo >> debian/control
sed "s/-10.6//" <storage/columnstore/columnstore/debian/control >> debian/control
fi
cp -v storage/columnstore/columnstore/debian/mariadb-plugin-columnstore.* debian/
# idempotent, except for the blank line, but that can be tolerated.
sed -e '/Package: mariadb-plugin-columnstore/,/^$/d' -i debian/control
echo >> debian/control
sed "s/-10.6//" <storage/columnstore/columnstore/debian/control >> debian/control
fi
# Look up distro-version specific stuff

View File

@ -83,6 +83,7 @@ usr/share/man/man1/myisampack.1
usr/share/man/man1/mysqld_multi.1
usr/share/man/man1/mysqld_safe.1
usr/share/man/man1/mysqld_safe_helper.1
usr/share/man/man1/wsrep_sst_backup.1
usr/share/man/man1/wsrep_sst_common.1
usr/share/man/man1/wsrep_sst_mariabackup.1
usr/share/man/man1/wsrep_sst_mysqldump.1

View File

@ -56,7 +56,6 @@ ENDIF()
MYSQL_ADD_EXECUTABLE(mariadb-backup
xtrabackup.cc
innobackupex.cc
changed_page_bitmap.cc
datasink.cc
ds_buffer.cc
ds_compress.cc

View File

@ -71,7 +71,6 @@ char tool_args[2048];
ulong mysql_server_version;
/* server capabilities */
bool have_changed_page_bitmaps = false;
bool have_backup_locks = false;
bool have_lock_wait_timeout = false;
bool have_galera_enabled = false;
@ -561,24 +560,6 @@ Query the server to find out what backup capabilities it supports.
bool
detect_mysql_capabilities_for_backup()
{
const char *query = "SELECT 'INNODB_CHANGED_PAGES', COUNT(*) FROM "
"INFORMATION_SCHEMA.PLUGINS "
"WHERE PLUGIN_NAME LIKE 'INNODB_CHANGED_PAGES'";
char *innodb_changed_pages = NULL;
mysql_variable vars[] = {
{"INNODB_CHANGED_PAGES", &innodb_changed_pages}, {NULL, NULL}};
if (xtrabackup_incremental) {
read_mysql_variables(mysql_connection, query, vars, true);
ut_ad(innodb_changed_pages != NULL);
have_changed_page_bitmaps = (atoi(innodb_changed_pages) == 1);
free_mysql_variables(vars);
}
/* do some sanity checks */
if (opt_galera_info && !have_galera_enabled) {
msg("--galera-info is specified on the command "
@ -1931,18 +1912,6 @@ select_history()
return(true);
}
bool
flush_changed_page_bitmaps()
{
if (xtrabackup_incremental && have_changed_page_bitmaps &&
!xtrabackup_incremental_force_scan) {
xb_mysql_query(mysql_connection,
"FLUSH NO_WRITE_TO_BINLOG CHANGED_PAGE_BITMAPS", false);
}
return(true);
}
/*********************************************************************//**
Deallocate memory, disconnect from server, etc.
@return true on success. */

View File

@ -7,7 +7,6 @@
extern ulong mysql_server_version;
/* server capabilities */
extern bool have_changed_page_bitmaps;
extern bool have_backup_locks;
extern bool have_lock_wait_timeout;
extern bool have_galera_enabled;
@ -35,9 +34,6 @@ capture_tool_command(int argc, char **argv);
bool
select_history();
bool
flush_changed_page_bitmaps();
void
backup_cleanup();

File diff suppressed because it is too large Load Diff

View File

@ -1,85 +0,0 @@
/******************************************************
XtraBackup: hot backup tool for InnoDB
(c) 2009-2012 Percona Inc.
Originally Created 3/3/2009 Yasufumi Kinoshita
Written by Alexey Kopytov, Aleksandr Kuzminsky, Stewart Smith, Vadim Tkachenko,
Yasufumi Kinoshita, Ignacio Nin and Baron Schwartz.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
*******************************************************/
/* Changed page bitmap interface */
#ifndef XB_CHANGED_PAGE_BITMAP_H
#define XB_CHANGED_PAGE_BITMAP_H
#include <ut0rbt.h>
#include <fil0fil.h>
/* The changed page bitmap structure */
typedef ib_rbt_t xb_page_bitmap;
struct xb_page_bitmap_range_struct;
/* The bitmap range iterator over one space id */
typedef struct xb_page_bitmap_range_struct xb_page_bitmap_range;
/****************************************************************//**
Read the disk bitmap and build the changed page bitmap tree for the
LSN interval incremental_lsn to log_sys.next_checkpoint_lsn.
@return the built bitmap tree */
xb_page_bitmap*
xb_page_bitmap_init(void);
/*=====================*/
/****************************************************************//**
Free the bitmap tree. */
void
xb_page_bitmap_deinit(
/*==================*/
xb_page_bitmap* bitmap); /*!<in/out: bitmap tree */
/****************************************************************//**
Set up a new bitmap range iterator over a given space id changed
pages in a given bitmap.
@return bitmap range iterator */
xb_page_bitmap_range*
xb_page_bitmap_range_init(
/*======================*/
xb_page_bitmap* bitmap, /*!< in: bitmap to iterate over */
ulint space_id); /*!< in: space id */
/****************************************************************//**
Get the next page id that has its bit set or cleared, i.e. equal to
bit_value.
@return page id */
ulint
xb_page_bitmap_range_get_next_bit(
/*==============================*/
xb_page_bitmap_range* bitmap_range, /*!< in/out: bitmap range */
ibool bit_value); /*!< in: bit value */
/****************************************************************//**
Free the bitmap range iterator. */
void
xb_page_bitmap_range_deinit(
/*========================*/
xb_page_bitmap_range* bitmap_range); /*! in/out: bitmap range */
#endif

View File

@ -143,7 +143,7 @@ static inline ATTRIBUTE_FORMAT(printf, 1,2) ATTRIBUTE_NORETURN void die(const ch
# define POSIX_FADV_NORMAL
# define POSIX_FADV_SEQUENTIAL
# define POSIX_FADV_DONTNEED
# define posix_fadvise(a,b,c,d) do {} while(0)
# define posix_fadvise(fd, offset, len, advice) do { (void)offset; } while(0)
#endif
/***********************************************************************

View File

@ -239,8 +239,7 @@ xb_fil_cur_open(
/ cursor->page_size);
cursor->read_filter = read_filter;
cursor->read_filter->init(&cursor->read_filter_ctxt, cursor,
node->space->id);
cursor->read_filter->init(&cursor->read_filter_ctxt, cursor);
return(XB_FIL_CUR_SUCCESS);
}
@ -510,10 +509,6 @@ xb_fil_cur_close(
/*=============*/
xb_fil_cur_t *cursor) /*!< in/out: source file cursor */
{
if (cursor->read_filter) {
cursor->read_filter->deinit(&cursor->read_filter_ctxt);
}
aligned_free(cursor->buf);
cursor->buf = NULL;

View File

@ -27,6 +27,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
#include <my_dir.h>
#include "read_filt.h"
#include "mtr0types.h"
#include "srv0start.h"
#include "srv0srv.h"
#include "xtrabackup.h"

View File

@ -32,29 +32,13 @@ Perform read filter context initialization that is common to all read
filters. */
static
void
common_init(
/*========*/
rf_pass_through_init(
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
const xb_fil_cur_t* cursor) /*!<in: file cursor */
{
ctxt->offset = 0;
ctxt->data_file_size = cursor->statinfo.st_size;
ctxt->buffer_capacity = cursor->buf_size;
ctxt->page_size = cursor->page_size;
}
/****************************************************************//**
Initialize the pass-through read filter. */
static
void
rf_pass_through_init(
/*=================*/
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter context */
const xb_fil_cur_t* cursor, /*!<in: file cursor */
ulint space_id __attribute__((unused)))
/*!<in: space id we are reading */
{
common_init(ctxt, cursor);
}
/****************************************************************//**
@ -65,143 +49,25 @@ rf_pass_through_get_next_batch(
/*===========================*/
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
context */
ib_int64_t* read_batch_start, /*!<out: starting read
int64_t* read_batch_start, /*!<out: starting read
offset in bytes for the
next batch of pages */
ib_int64_t* read_batch_len) /*!<out: length in
int64_t* read_batch_len) /*!<out: length in
bytes of the next batch
of pages */
{
*read_batch_start = ctxt->offset;
*read_batch_len = ctxt->data_file_size - ctxt->offset;
if (*read_batch_len > (ib_int64_t)ctxt->buffer_capacity) {
if (*read_batch_len > (int64_t)ctxt->buffer_capacity) {
*read_batch_len = ctxt->buffer_capacity;
}
ctxt->offset += *read_batch_len;
}
/****************************************************************//**
Deinitialize the pass-through read filter. */
static
void
rf_pass_through_deinit(
/*===================*/
xb_read_filt_ctxt_t* ctxt __attribute__((unused)))
/*!<in: read filter context */
{
}
/****************************************************************//**
Initialize the changed page bitmap-based read filter. Assumes that
the bitmap is already set up in changed_page_bitmap. */
static
void
rf_bitmap_init(
/*===========*/
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
context */
const xb_fil_cur_t* cursor, /*!<in: read cursor */
ulint space_id) /*!<in: space id */
{
common_init(ctxt, cursor);
ctxt->bitmap_range = xb_page_bitmap_range_init(changed_page_bitmap,
space_id);
ctxt->filter_batch_end = 0;
}
/****************************************************************//**
Get the next batch of pages for the bitmap read filter. */
static
void
rf_bitmap_get_next_batch(
/*=====================*/
xb_read_filt_ctxt_t* ctxt, /*!<in/out: read filter
context */
ib_int64_t* read_batch_start, /*!<out: starting read
offset in bytes for the
next batch of pages */
ib_int64_t* read_batch_len) /*!<out: length in
bytes of the next batch
of pages */
{
ulint start_page_id;
const ulint page_size = ctxt->page_size;
start_page_id = (ulint)(ctxt->offset / page_size);
xb_a (ctxt->offset % page_size == 0);
if (start_page_id == ctxt->filter_batch_end) {
/* Used up all the previous bitmap range, get some more */
ulint next_page_id;
/* Find the next changed page using the bitmap */
next_page_id = xb_page_bitmap_range_get_next_bit
(ctxt->bitmap_range, TRUE);
if (next_page_id == ULINT_UNDEFINED) {
*read_batch_len = 0;
return;
}
ctxt->offset = next_page_id * page_size;
/* Find the end of the current changed page block by searching
for the next cleared bitmap bit */
ctxt->filter_batch_end
= xb_page_bitmap_range_get_next_bit(ctxt->bitmap_range,
FALSE);
xb_a(next_page_id < ctxt->filter_batch_end);
}
*read_batch_start = ctxt->offset;
if (ctxt->filter_batch_end == ULINT_UNDEFINED) {
/* No more cleared bits in the bitmap, need to copy all the
remaining pages. */
*read_batch_len = ctxt->data_file_size - ctxt->offset;
} else {
*read_batch_len = ctxt->filter_batch_end * page_size
- ctxt->offset;
}
/* If the page block is larger than the buffer capacity, limit it to
buffer capacity. The subsequent invocations will continue returning
the current block in buffer-sized pieces until ctxt->filter_batch_end
is reached, trigerring the next bitmap query. */
if (*read_batch_len > (ib_int64_t)ctxt->buffer_capacity) {
*read_batch_len = ctxt->buffer_capacity;
}
ctxt->offset += *read_batch_len;
xb_a (ctxt->offset % page_size == 0);
xb_a (*read_batch_start % page_size == 0);
xb_a (*read_batch_len % page_size == 0);
}
/****************************************************************//**
Deinitialize the changed page bitmap-based read filter. */
static
void
rf_bitmap_deinit(
/*=============*/
xb_read_filt_ctxt_t* ctxt) /*!<in/out: read filter context */
{
xb_page_bitmap_range_deinit(ctxt->bitmap_range);
}
/* The pass-through read filter */
xb_read_filt_t rf_pass_through = {
&rf_pass_through_init,
&rf_pass_through_get_next_batch,
&rf_pass_through_deinit
};
/* The changed page bitmap-based read filter */
xb_read_filt_t rf_bitmap = {
&rf_bitmap_init,
&rf_bitmap_get_next_batch,
&rf_bitmap_deinit
};

View File

@ -25,42 +25,27 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
#ifndef XB_READ_FILT_H
#define XB_READ_FILT_H
#include "changed_page_bitmap.h"
typedef uint32_t space_id_t;
#include <cstdint>
#include <cstddef>
struct xb_fil_cur_t;
/* The read filter context */
struct xb_read_filt_ctxt_t {
ib_int64_t offset; /*!< current file offset */
ib_int64_t data_file_size; /*!< data file size */
int64_t offset; /*!< current file offset */
int64_t data_file_size; /*!< data file size */
size_t buffer_capacity;/*!< read buffer capacity */
space_id_t space_id; /*!< space id */
/* The following fields used only in bitmap filter */
/* Move these to union if any other filters are added in future */
xb_page_bitmap_range *bitmap_range; /*!< changed page bitmap range
iterator for space_id */
ulint page_size; /*!< page size */
ulint filter_batch_end;/*!< the ending page id of the
current changed page block in
the bitmap */
/** TODO: remove this default constructor */
xb_read_filt_ctxt_t() : page_size(0) {}
};
/* The read filter */
struct xb_read_filt_t {
void (*init)(xb_read_filt_ctxt_t* ctxt,
const xb_fil_cur_t* cursor,
ulint space_id);
const xb_fil_cur_t* cursor);
void (*get_next_batch)(xb_read_filt_ctxt_t* ctxt,
ib_int64_t* read_batch_start,
ib_int64_t* read_batch_len);
void (*deinit)(xb_read_filt_ctxt_t* ctxt);
int64_t* read_batch_start,
int64_t* read_batch_len);
};
extern xb_read_filt_t rf_pass_through;
extern xb_read_filt_t rf_bitmap;
#endif

View File

@ -101,7 +101,6 @@ Street, Fifth Floor, Boston, MA 02110-1335 USA
#include "ds_buffer.h"
#include "ds_tmpfile.h"
#include "xbstream.h"
#include "changed_page_bitmap.h"
#include "read_filt.h"
#include "backup_wsrep.h"
#include "innobackupex.h"
@ -155,7 +154,6 @@ char *xtrabackup_incremental;
lsn_t incremental_lsn;
lsn_t incremental_to_lsn;
lsn_t incremental_last_lsn;
xb_page_bitmap *changed_page_bitmap;
char *xtrabackup_incremental_basedir; /* for --backup */
char *xtrabackup_extra_lsndir; /* for --backup with --extra-lsndir */
@ -422,6 +420,8 @@ pthread_cond_t scanned_lsn_cond;
/** Store the deferred tablespace name during --backup */
static std::set<std::string> defer_space_names;
typedef decltype(fil_space_t::id) space_id_t;
typedef std::map<space_id_t,std::string> space_id_to_name_t;
struct ddl_tracker_t {
@ -3031,12 +3031,7 @@ static my_bool xtrabackup_copy_datafile(ds_ctxt *ds_data,
goto skip;
}
if (!changed_page_bitmap) {
read_filter = &rf_pass_through;
}
else {
read_filter = &rf_bitmap;
}
read_filter = &rf_pass_through;
res = xb_fil_cur_open(&cursor, read_filter, node, thread_n, ULLONG_MAX);
if (res == XB_FIL_CUR_SKIP) {
@ -4801,11 +4796,6 @@ fail:
std::thread(log_copying_thread).detach();
/* FLUSH CHANGED_PAGE_BITMAPS call */
if (!flush_changed_page_bitmaps()) {
goto fail;
}
ut_a(xtrabackup_parallel > 0);
if (xtrabackup_parallel > 1) {
@ -4879,9 +4869,6 @@ fail:
goto fail;
}
if (changed_page_bitmap) {
xb_page_bitmap_deinit(changed_page_bitmap);
}
backup_datasinks.destroy();
msg("Redo log (from LSN " LSN_PF " to " LSN_PF ") was copied.",

View File

@ -24,7 +24,7 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA
#include <my_getopt.h>
#include "datasink.h"
#include "xbstream.h"
#include "changed_page_bitmap.h"
#include "fil0fil.h"
#include <set>
#define XB_TOOL_NAME "mariadb-backup"
@ -84,8 +84,6 @@ extern my_bool xb_backup_rocksdb;
extern uint opt_protocol;
extern xb_page_bitmap *changed_page_bitmap;
extern char *xtrabackup_incremental;
extern my_bool xtrabackup_incremental_force_scan;

View File

@ -14,7 +14,8 @@
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
SET(MAN1_WSREP wsrep_sst_rsync.1 wsrep_sst_common.1 wsrep_sst_mariabackup.1
wsrep_sst_mysqldump.1 wsrep_sst_rsync_wan.1 galera_recovery.1 galera_new_cluster.1)
wsrep_sst_mysqldump.1 wsrep_sst_rsync_wan.1 galera_recovery.1 galera_new_cluster.1
wsrep_sst_backup.1)
SET(MAN1_SERVER innochecksum.1 myisam_ftdump.1 myisamchk.1
aria_chk.1 aria_dump_log.1 aria_ftdump.1 aria_pack.1 aria_read_log.1
aria_s3_copy.1

16
man/wsrep_sst_backup.1 Normal file
View File

@ -0,0 +1,16 @@
'\" t
.\"
.TH "\FBWSREP_SST_BACKUP\FR" "1" "22 May 2022" "MariaDB 10\&.3" "MariaDB Database System"
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
.\" disable hyphenation
.nh
.\" disable justification (adjust text to left margin only)
.ad l
.SH NAME
wsrep_sst_backup \- backup helper script for the MariaDB Galera Cluster
.SH DESCRIPTION
Use: See source code of script\.
.PP
For more information, please refer to the MariaDB Knowledge Base, available online at https://mariadb.com/kb/

View File

@ -12,7 +12,6 @@ INSERT INTO t1 VALUES (1,REPEAT('a',100)),(2,REPEAT('v',200)),(3,REPEAT('r',300)
INSERT INTO t1 VALUES (5,REPEAT('k',500)),(6,'April'),(7,7),(8,""),(9,"M"),(10,DEFAULT);
ALTER TABLE t1 ANALYZE PARTITION p1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
ALTER TABLE t1 CHECK PARTITION p2;
Table Op Msg_type Msg_text

View File

@ -4291,6 +4291,40 @@ a
drop table t1, t2;
drop view v1;
drop procedure aproc;
#
# MDEV-31305: Aggregation over materialized derived table
#
CREATE VIEW v AS
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
FLOOR(RAND(13) * 5) AS p
FROM seq_100_to_105 seq1
JOIN seq_10_to_15 seq2
JOIN seq_1_to_5 seq3;
SELECT v.*, SUM(p) from v;
dim1 dim2 dim3 p SUM(p)
100 10 1 2 371
SELECT d.*, SUM(p)
FROM (
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
FLOOR(RAND(13) * 5) AS p
FROM seq_100_to_105 seq1
JOIN seq_10_to_15 seq2
JOIN seq_1_to_5 seq3
) d;
dim1 dim2 dim3 p SUM(p)
100 10 1 2 371
WITH demo AS
(
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
FLOOR(RAND(13) * 5) AS p
FROM seq_100_to_105 seq1
JOIN seq_10_to_15 seq2
JOIN seq_1_to_5 seq3
)
SELECT d.*, SUM(p) FROM demo d;
dim1 dim2 dim3 p SUM(p)
100 10 1 2 371
DROP VIEW v;
# End of 10.4 tests
#
# MDEV-31143: view with ORDER BY used in query with rownum() in WHERE

View File

@ -2795,6 +2795,42 @@ drop table t1, t2;
drop view v1;
drop procedure aproc;
--echo #
--echo # MDEV-31305: Aggregation over materialized derived table
--echo #
--source include/have_sequence.inc
CREATE VIEW v AS
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
FLOOR(RAND(13) * 5) AS p
FROM seq_100_to_105 seq1
JOIN seq_10_to_15 seq2
JOIN seq_1_to_5 seq3;
SELECT v.*, SUM(p) from v;
SELECT d.*, SUM(p)
FROM (
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
FLOOR(RAND(13) * 5) AS p
FROM seq_100_to_105 seq1
JOIN seq_10_to_15 seq2
JOIN seq_1_to_5 seq3
) d;
WITH demo AS
(
SELECT seq1.seq AS dim1, seq2.seq AS dim2, seq3.seq AS dim3,
FLOOR(RAND(13) * 5) AS p
FROM seq_100_to_105 seq1
JOIN seq_10_to_15 seq2
JOIN seq_1_to_5 seq3
)
SELECT d.*, SUM(p) FROM demo d;
DROP VIEW v;
--echo # End of 10.4 tests
--echo #

View File

@ -0,0 +1,322 @@
#
# MDEV-30660 COUNT DISTINCT seems unnecessarily slow when run on a PK
#
set @save_optimizer_trace = @@optimizer_trace;
SET optimizer_trace='enabled=on';
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b INT NOT NULL);
INSERT INTO t1 VALUES (1,1), (2,1), (3,1);
# Optimization is applied (aggregator=simple):
SELECT COUNT(DISTINCT a) FROM t1;
COUNT(DISTINCT a)
3
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t1.a)",
"aggregator_type": "simple"
}
]
SELECT AVG(DISTINCT a), SUM(DISTINCT b) FROM t1;
AVG(DISTINCT a) SUM(DISTINCT b)
2.0000 1
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "avg(distinct t1.a)",
"aggregator_type": "simple"
},
{
"function": "sum(distinct t1.b)",
"aggregator_type": "distinct"
}
]
# Only `a` is unique but it's enough to eliminate DISTINCT:
SELECT COUNT(DISTINCT b, a) FROM t1;
COUNT(DISTINCT b, a)
3
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t1.b,t1.a)",
"aggregator_type": "simple"
}
]
SELECT COUNT(DISTINCT a, a + b) FROM t1;
COUNT(DISTINCT a, a + b)
3
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t1.a,t1.a + t1.b)",
"aggregator_type": "simple"
}
]
SELECT SUM(DISTINCT a), AVG(DISTINCT a), COUNT(DISTINCT a) FROM t1 WHERE a > 1;
SUM(DISTINCT a) AVG(DISTINCT a) COUNT(DISTINCT a)
5 2.5000 2
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "sum(distinct t1.a)",
"aggregator_type": "simple"
},
{
"function": "avg(distinct t1.a)",
"aggregator_type": "simple"
},
{
"function": "count(distinct t1.a)",
"aggregator_type": "simple"
}
]
# Optimization is not applied 'cause function argument is not a field
# (aggregator=distinct):
SELECT SUM(DISTINCT a + b) FROM t1;
SUM(DISTINCT a + b)
9
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "sum(distinct t1.a + t1.b)",
"aggregator_type": "distinct"
}
]
SELECT COUNT(DISTINCT b) FROM t1;
COUNT(DISTINCT b)
1
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t1.b)",
"aggregator_type": "distinct"
}
]
SELECT AVG(DISTINCT b / a) FROM t1;
AVG(DISTINCT b / a)
0.61110000
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "avg(distinct t1.b / t1.a)",
"aggregator_type": "distinct"
}
]
EXPLAIN SELECT COUNT(DISTINCT (SELECT a)) FROM t1;
id select_type table type possible_keys key key_len ref rows Extra
1 PRIMARY t1 index NULL PRIMARY 4 NULL 3 Using index
2 DEPENDENT SUBQUERY NULL NULL NULL NULL NULL NULL NULL No tables used
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct (/* select#2 */ select t1.a))",
"aggregator_type": "distinct"
}
]
CREATE TABLE t2 (a INT);
INSERT INTO t2 VALUES (1), (2);
# Optimization is not applied 'cause there is more than one table
SELECT COUNT(DISTINCT t1.a) FROM t1, t2;
COUNT(DISTINCT t1.a)
3
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t1.a)",
"aggregator_type": "distinct"
}
]
SELECT AVG(DISTINCT t1.a) FROM t1, t2;
AVG(DISTINCT t1.a)
2.0000
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "avg(distinct t1.a)",
"aggregator_type": "distinct"
}
]
# Const tables, optimization is applied
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1) AS t2;
COUNT(DISTINCT a)
3
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t1.a)",
"aggregator_type": "simple"
}
]
SELECT AVG(DISTINCT t1.a) FROM (SELECT 1 AS a) AS t2, t1, (SELECT 2 AS a) AS t3;
AVG(DISTINCT t1.a)
2.0000
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "avg(distinct t1.a)",
"aggregator_type": "simple"
}
]
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1 UNION SELECT 2) AS t2;
COUNT(DISTINCT a)
3
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t1.a)",
"aggregator_type": "distinct"
}
]
# Unique index on two columns
CREATE TABLE t3 (a INT NOT NULL, b INT NOT NULL);
INSERT INTO t3 VALUES (1,1), (1,2), (1,3), (2,1), (2,2), (3,1), (3,2);
CREATE UNIQUE INDEX t3_a_b ON t3 (a, b);
# Optimization is applied:
SELECT COUNT(DISTINCT a, b) FROM t3;
COUNT(DISTINCT a, b)
7
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t3.a,t3.b)",
"aggregator_type": "simple"
}
]
SELECT COUNT(DISTINCT b, a) FROM t3;
COUNT(DISTINCT b, a)
7
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t3.b,t3.a)",
"aggregator_type": "simple"
}
]
SELECT COUNT(DISTINCT b, a) FROM t3 WHERE a < 3;
COUNT(DISTINCT b, a)
5
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t3.b,t3.a)",
"aggregator_type": "simple"
}
]
# Optimization is applied to one of the functions:
SELECT COUNT(DISTINCT b), SUM(DISTINCT a), SUM(DISTINCT a + b) FROM t3 GROUP BY a;
COUNT(DISTINCT b) SUM(DISTINCT a) SUM(DISTINCT a + b)
3 1 9
2 2 7
2 3 9
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t3.b)",
"aggregator_type": "simple"
},
{
"function": "sum(distinct t3.a)",
"aggregator_type": "distinct"
},
{
"function": "sum(distinct t3.a + t3.b)",
"aggregator_type": "distinct"
}
]
# Can't apply optimization 'cause GROUP BY argument is not a field:
SELECT COUNT(DISTINCT b) FROM t3 GROUP BY a+b;
COUNT(DISTINCT b)
1
2
3
1
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t3.b)",
"aggregator_type": "distinct"
}
]
# Test merged view
CREATE VIEW v1 AS SELECT * FROM t1;
# Optimization is applied
SELECT COUNT(DISTINCT a, b) FROM v1;
COUNT(DISTINCT a, b)
3
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "count(distinct t1.a,t1.b)",
"aggregator_type": "simple"
}
]
# GROUP_CONCAT implements non-standard distinct aggregator
SELECT GROUP_CONCAT(b) FROM t1;
GROUP_CONCAT(b)
1,1,1
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "group_concat(t1.b separator ',')",
"aggregator_type": "simple"
}
]
SELECT GROUP_CONCAT(DISTINCT b) FROM t1;
GROUP_CONCAT(DISTINCT b)
1
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
JS
[
{
"function": "group_concat(distinct t1.b separator ',')",
"aggregator_type": "distinct"
}
]
DROP TABLE t1, t2, t3;
DROP VIEW v1;
SET optimizer_trace = @save_optimizer_trace;
#
# end of 10.5 tests
#

View File

@ -0,0 +1,109 @@
# Embedded doesn't have optimizer trace:
--source include/not_embedded.inc
--source include/have_sequence.inc
--echo #
--echo # MDEV-30660 COUNT DISTINCT seems unnecessarily slow when run on a PK
--echo #
set @save_optimizer_trace = @@optimizer_trace;
SET optimizer_trace='enabled=on';
let $trace=
SELECT JSON_DETAILED(JSON_EXTRACT(trace, '\$**.prepare_sum_aggregators')) AS JS
FROM INFORMATION_SCHEMA.OPTIMIZER_TRACE;
CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b INT NOT NULL);
INSERT INTO t1 VALUES (1,1), (2,1), (3,1);
--echo # Optimization is applied (aggregator=simple):
SELECT COUNT(DISTINCT a) FROM t1;
eval $trace;
SELECT AVG(DISTINCT a), SUM(DISTINCT b) FROM t1;
eval $trace;
--echo # Only `a` is unique but it's enough to eliminate DISTINCT:
SELECT COUNT(DISTINCT b, a) FROM t1;
eval $trace;
SELECT COUNT(DISTINCT a, a + b) FROM t1;
eval $trace;
SELECT SUM(DISTINCT a), AVG(DISTINCT a), COUNT(DISTINCT a) FROM t1 WHERE a > 1;
eval $trace;
--echo # Optimization is not applied 'cause function argument is not a field
--echo # (aggregator=distinct):
SELECT SUM(DISTINCT a + b) FROM t1;
eval $trace;
SELECT COUNT(DISTINCT b) FROM t1;
eval $trace;
SELECT AVG(DISTINCT b / a) FROM t1;
eval $trace;
EXPLAIN SELECT COUNT(DISTINCT (SELECT a)) FROM t1;
eval $trace;
CREATE TABLE t2 (a INT);
INSERT INTO t2 VALUES (1), (2);
--echo # Optimization is not applied 'cause there is more than one table
SELECT COUNT(DISTINCT t1.a) FROM t1, t2;
eval $trace;
SELECT AVG(DISTINCT t1.a) FROM t1, t2;
eval $trace;
--echo # Const tables, optimization is applied
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1) AS t2;
eval $trace;
SELECT AVG(DISTINCT t1.a) FROM (SELECT 1 AS a) AS t2, t1, (SELECT 2 AS a) AS t3;
eval $trace;
SELECT COUNT(DISTINCT a) FROM t1, (SELECT 1 UNION SELECT 2) AS t2;
eval $trace;
--echo # Unique index on two columns
CREATE TABLE t3 (a INT NOT NULL, b INT NOT NULL);
INSERT INTO t3 VALUES (1,1), (1,2), (1,3), (2,1), (2,2), (3,1), (3,2);
CREATE UNIQUE INDEX t3_a_b ON t3 (a, b);
--echo # Optimization is applied:
SELECT COUNT(DISTINCT a, b) FROM t3;
eval $trace;
SELECT COUNT(DISTINCT b, a) FROM t3;
eval $trace;
SELECT COUNT(DISTINCT b, a) FROM t3 WHERE a < 3;
eval $trace;
--echo # Optimization is applied to one of the functions:
SELECT COUNT(DISTINCT b), SUM(DISTINCT a), SUM(DISTINCT a + b) FROM t3 GROUP BY a;
eval $trace;
--echo # Can't apply optimization 'cause GROUP BY argument is not a field:
SELECT COUNT(DISTINCT b) FROM t3 GROUP BY a+b;
eval $trace;
--echo # Test merged view
CREATE VIEW v1 AS SELECT * FROM t1;
--echo # Optimization is applied
SELECT COUNT(DISTINCT a, b) FROM v1;
eval $trace;
--echo # GROUP_CONCAT implements non-standard distinct aggregator
SELECT GROUP_CONCAT(b) FROM t1;
eval $trace;
SELECT GROUP_CONCAT(DISTINCT b) FROM t1;
eval $trace;
DROP TABLE t1, t2, t3;
DROP VIEW v1;
SET optimizer_trace = @save_optimizer_trace;
--echo #
--echo # end of 10.5 tests
--echo #

0
mysql-test/main/lowercase_table2.result Executable file → Normal file
View File

View File

@ -736,13 +736,17 @@ The following specify which files/extra groups are read (specified before remain
max_connections*5 or max_connections + table_cache*2
(whichever is larger) number of file descriptors
(Automatically configured unless set explicitly)
--optimizer-adjust-secondary-key-costs=#
0 = No changes. 1 = Update secondary key costs for ranges
to be at least 5x of clustered primary key costs. 2 =
Remove 'max_seek optimization' for secondary keys and
slight adjustment of filter cost. This option will be
deleted in MariaDB 11.0 as it is not needed with the new
11.0 optimizer.
--optimizer-adjust-secondary-key-costs=name
A bit field with the following values:
adjust_secondary_key_cost = Update secondary key costs
for ranges to be at least 5x of clustered primary key
costs. disable_max_seek = Disable 'max_seek optimization'
for secondary keys and slight adjustment of filter cost.
disable_forced_index_in_group_by = Disable automatic
forced index in GROUP BY. This variable will be deleted
in MariaDB 11.0 as it is not needed with the new 11.0
optimizer.
Use 'ALL' to set all combinations.
--optimizer-extra-pruning-depth=#
If the optimizer needs to enumerate join prefix of this
size or larger, then it will try aggressively prune away
@ -1732,7 +1736,7 @@ old-alter-table DEFAULT
old-mode UTF8_IS_UTF8MB3
old-passwords FALSE
old-style-user-limits FALSE
optimizer-adjust-secondary-key-costs 0
optimizer-adjust-secondary-key-costs
optimizer-extra-pruning-depth 8
optimizer-max-sel-arg-weight 32000
optimizer-max-sel-args 16000

View File

@ -1609,6 +1609,12 @@ EXPLAIN SELECT MIN(d) FROM t1 where b=2 and c=3 group by a {
},
{
"test_if_skip_sort_order": []
},
{
"prepare_sum_aggregators": {
"function": "min(t1.d)",
"aggregator_type": "simple"
}
}
]
}
@ -1818,6 +1824,18 @@ EXPLAIN SELECT id,MIN(a),MAX(a) FROM t1 WHERE a>=20010104e0 GROUP BY id {
},
{
"test_if_skip_sort_order": []
},
{
"prepare_sum_aggregators": {
"function": "min(t1.a)",
"aggregator_type": "simple"
}
},
{
"prepare_sum_aggregators": {
"function": "max(t1.a)",
"aggregator_type": "simple"
}
}
]
}
@ -10528,6 +10546,25 @@ JS
drop table t1,t2,t3,t10,t11;
set optimizer_trace=DEFAULT;
#
# MDEV-29179 Condition pushdown from HAVING into WHERE is not shown in optimizer trace
#
CREATE TABLE t1 (a INT, b VARCHAR(1), KEY (a), KEY(b,a)) ENGINE=MEMORY;
INSERT INTO t1 VALUES (4,'n'),(1,'h'),(NULL,'w');
SET optimizer_trace= 'enabled=on';
SELECT b, a FROM t1 WHERE b <> 'p' OR a = 4 GROUP BY b, a HAVING a <= 7;
b a
h 1
n 4
SELECT json_detailed(json_extract(trace, '$**.steps[*].join_optimization.steps[*].condition_pushdown_from_having') ) exp1, JSON_VALID(trace) exp2 FROM information_schema.optimizer_trace;
exp1 exp2
[
{
"conds": "(t1.b <> 'p' or multiple equal(4, t1.a)) and t1.a <= 7",
"having": null
}
] 1
DROP TABLE t1;
#
# End of 10.4 tests
#
set optimizer_trace='enabled=on';

View File

@ -803,6 +803,16 @@ from information_schema.optimizer_trace;
drop table t1,t2,t3,t10,t11;
set optimizer_trace=DEFAULT;
--echo #
--echo # MDEV-29179 Condition pushdown from HAVING into WHERE is not shown in optimizer trace
--echo #
CREATE TABLE t1 (a INT, b VARCHAR(1), KEY (a), KEY(b,a)) ENGINE=MEMORY;
INSERT INTO t1 VALUES (4,'n'),(1,'h'),(NULL,'w');
SET optimizer_trace= 'enabled=on';
SELECT b, a FROM t1 WHERE b <> 'p' OR a = 4 GROUP BY b, a HAVING a <= 7; SELECT json_detailed(json_extract(trace, '$**.steps[*].join_optimization.steps[*].condition_pushdown_from_having') ) exp1, JSON_VALID(trace) exp2 FROM information_schema.optimizer_trace;
DROP TABLE t1;
--echo #
--echo # End of 10.4 tests
--echo #

View File

@ -2063,7 +2063,6 @@ ALTER TABLE t1 ANALYZE PARTITION p1 EXTENDED;
ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'EXTENDED' at line 1
ALTER TABLE t1 ANALYZE PARTITION p1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
ALTER TABLE t1 CHECK PARTITION p1;
Table Op Msg_type Msg_text

View File

@ -27,7 +27,6 @@ Table Op Msg_type Msg_text
test.t1 repair error Wrong partition name or partition list
ALTER TABLE t1 ANALYZE PARTITION p0;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
ALTER TABLE t1 CHECK PARTITION p0;
Table Op Msg_type Msg_text

View File

@ -5812,5 +5812,123 @@ GROUP_CONCAT(@x)
0
DROP TABLE t;
#
# MDEV-15703: Crash in EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING DEFAULT
#
PREPARE stmt FROM 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)';
EXECUTE stmt USING DEFAULT;
ERROR HY000: Default/ignore value is not supported for such parameter usage
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)';
EXECUTE stmt USING IGNORE;
ERROR HY000: Default/ignore value is not supported for such parameter usage
DEALLOCATE PREPARE stmt;
EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING DEFAULT;
ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING IGNORE;
ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'BEGIN NOT ATOMIC DECLARE a INT DEFAULT ?; END' USING DEFAULT;
ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'BEGIN NOT ATOMIC DECLARE a INT DEFAULT ?; END' USING IGNORE;
ERROR HY000: Default/ignore value is not supported for such parameter usage
CREATE PROCEDURE p1(a INT) SELECT 1;
EXECUTE IMMEDIATE 'CALL p1(?)' USING DEFAULT;
ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'CALL p1(?)' USING IGNORE;
ERROR HY000: Default/ignore value is not supported for such parameter usage
DROP PROCEDURE p1;
EXECUTE IMMEDIATE 'SELECT ? UNION SELECT 1' USING DEFAULT;
ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'SELECT ? UNION SELECT 1' USING IGNORE;
ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION ALL SELECT 1) AS derived' USING DEFAULT;
ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION ALL SELECT 1) AS derived' USING IGNORE;
ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION DISTINCT SELECT 1) AS derived' USING DEFAULT;
ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION DISTINCT SELECT 1) AS derived' USING IGNORE;
ERROR HY000: Default/ignore value is not supported for such parameter usage
# multi-update and DEFAULT
CREATE TABLE t1 (a INT, b INT DEFAULT a);
INSERT into t1 VALUES (1,2),(2,3);
CREATE TABLE t2 (a INT, b INT DEFAULT a);
INSERT INTO t2 VALUES (1,10),(2,30);
UPDATE t1,t2 SET t1.b = DEFAULT, t2.b = DEFAULT WHERE t1.a=t2.a;
SELECT * FROM t1;
a b
1 1
2 2
SELECT * FROM t2;
a b
1 1
2 2
# re-check the case for Prepared Statement with parameters
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
INSERT INTO t1 VALUES (1,2),(2,3);
INSERT INTO t2 VALUES (1,10),(2,30);
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING DEFAULT, DEFAULT;
SELECT * FROM t1;
a b
1 1
2 2
SELECT * FROM t2;
a b
1 1
2 2
DROP TABLE t1, t2;
# multi-update and IGNORE
CREATE TABLE t1 (a INT, b INT default a);
INSERT INTO t1 VALUES (1,2),(2,3);
CREATE TABLE t2 (a INT, b INT default a);
INSERT INTO t2 VALUES (1,10),(2,30);
UPDATE t1,t2 SET t1.b = IGNORE, t2.b = IGNORE WHERE t1.a=t2.a;
SELECT * FROM t1;
a b
1 2
2 3
SELECT * FROM t2;
a b
1 NULL
2 NULL
# re-check the case for Prepared Statement with parameters
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
INSERT INTO t1 VALUES (1,2),(2,3);
INSERT INTO t2 VALUES (1,10),(2,30);
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING IGNORE, IGNORE;
SELECT * FROM t1;
a b
1 2
2 3
SELECT * FROM t2;
a b
1 10
2 30
DROP TABLE t1, t2;
# multi-update and DEFAULT parameter (no default)
CREATE TABLE t1 (a INT, b INT NOT NULL);
INSERT INTO t1 VALUES (1,2),(2,3);
CREATE TABLE t2 (a INT, b INT NOT NULL);
INSERT INTO t2 VALUES (1,10),(2,30);
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING DEFAULT, DEFAULT;
ERROR HY000: Field 'b' doesn't have a default value
DROP TABLE t1, t2;
# multi-update and IGNORE parameter (no default)
CREATE TABLE t1 (a INT, b INT NOT NULL);
INSERT INTO t1 VALUES (1,2),(2,3);
CREATE TABLE t2 (a INT, b INT NOT NULL);
INSERT INTO t2 VALUES (1,10),(2,30);
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING IGNORE, IGNORE;
SELECT * FROM t1;
a b
1 2
2 3
SELECT * FROM t2;
a b
1 10
2 30
DROP TABLE t1, t2;
#
# End of 10.4 tests
#

View File

@ -5248,6 +5248,125 @@ EXECUTE IMMEDIATE 'SELECT GROUP_CONCAT(@x) FROM t GROUP BY @x := f';
DROP TABLE t;
--echo #
--echo # MDEV-15703: Crash in EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING DEFAULT
--echo #
PREPARE stmt FROM 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)';
--error ER_INVALID_DEFAULT_PARAM
EXECUTE stmt USING DEFAULT;
DEALLOCATE PREPARE stmt;
PREPARE stmt FROM 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)';
--error ER_INVALID_DEFAULT_PARAM
EXECUTE stmt USING IGNORE;
DEALLOCATE PREPARE stmt;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING DEFAULT;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'CREATE OR REPLACE TABLE t1 (a INT DEFAULT ?)' USING IGNORE;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'BEGIN NOT ATOMIC DECLARE a INT DEFAULT ?; END' USING DEFAULT;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'BEGIN NOT ATOMIC DECLARE a INT DEFAULT ?; END' USING IGNORE;
CREATE PROCEDURE p1(a INT) SELECT 1;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'CALL p1(?)' USING DEFAULT;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'CALL p1(?)' USING IGNORE;
DROP PROCEDURE p1;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'SELECT ? UNION SELECT 1' USING DEFAULT;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'SELECT ? UNION SELECT 1' USING IGNORE;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION ALL SELECT 1) AS derived' USING DEFAULT;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION ALL SELECT 1) AS derived' USING IGNORE;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION DISTINCT SELECT 1) AS derived' USING DEFAULT;
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'SELECT * FROM (SELECT ? UNION DISTINCT SELECT 1) AS derived' USING IGNORE;
--echo # multi-update and DEFAULT
CREATE TABLE t1 (a INT, b INT DEFAULT a);
INSERT into t1 VALUES (1,2),(2,3);
CREATE TABLE t2 (a INT, b INT DEFAULT a);
INSERT INTO t2 VALUES (1,10),(2,30);
UPDATE t1,t2 SET t1.b = DEFAULT, t2.b = DEFAULT WHERE t1.a=t2.a;
SELECT * FROM t1;
SELECT * FROM t2;
--echo # re-check the case for Prepared Statement with parameters
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
INSERT INTO t1 VALUES (1,2),(2,3);
INSERT INTO t2 VALUES (1,10),(2,30);
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING DEFAULT, DEFAULT;
SELECT * FROM t1;
SELECT * FROM t2;
# Cleanup
DROP TABLE t1, t2;
--echo # multi-update and IGNORE
CREATE TABLE t1 (a INT, b INT default a);
INSERT INTO t1 VALUES (1,2),(2,3);
CREATE TABLE t2 (a INT, b INT default a);
INSERT INTO t2 VALUES (1,10),(2,30);
UPDATE t1,t2 SET t1.b = IGNORE, t2.b = IGNORE WHERE t1.a=t2.a;
SELECT * FROM t1;
SELECT * FROM t2;
--echo # re-check the case for Prepared Statement with parameters
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
INSERT INTO t1 VALUES (1,2),(2,3);
INSERT INTO t2 VALUES (1,10),(2,30);
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING IGNORE, IGNORE;
SELECT * FROM t1;
SELECT * FROM t2;
# Cleanup
DROP TABLE t1, t2;
--echo # multi-update and DEFAULT parameter (no default)
CREATE TABLE t1 (a INT, b INT NOT NULL);
INSERT INTO t1 VALUES (1,2),(2,3);
CREATE TABLE t2 (a INT, b INT NOT NULL);
INSERT INTO t2 VALUES (1,10),(2,30);
--error ER_NO_DEFAULT_FOR_FIELD
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING DEFAULT, DEFAULT;
# Cleanup
DROP TABLE t1, t2;
--echo # multi-update and IGNORE parameter (no default)
CREATE TABLE t1 (a INT, b INT NOT NULL);
INSERT INTO t1 VALUES (1,2),(2,3);
CREATE TABLE t2 (a INT, b INT NOT NULL);
INSERT INTO t2 VALUES (1,10),(2,30);
EXECUTE IMMEDIATE 'UPDATE t1,t2 SET t1.b = ?, t2.b = ? WHERE t1.a=t2.a' USING IGNORE, IGNORE;
SELECT * FROM t1;
SELECT * FROM t2;
# Cleanup
DROP TABLE t1, t2;
--echo #
--echo # End of 10.4 tests
--echo #

View File

@ -80,3 +80,38 @@ json_detailed(json_extract(@trace, '$**.considered_access_paths'))
]
]
drop table t1, name, flag2;
select @@optimizer_adjust_secondary_key_costs;
@@optimizer_adjust_secondary_key_costs
set @@optimizer_adjust_secondary_key_costs=7;
select @@optimizer_adjust_secondary_key_costs;
@@optimizer_adjust_secondary_key_costs
adjust_secondary_key_cost,disable_max_seek,disable_forced_index_in_group_by
set @@optimizer_adjust_secondary_key_costs=default;
#
# MDEV-33306 Optimizer choosing incorrect index in 10.6, 10.5 but not in 10.4
#
create table t1 (a int primary key, b int, c int, d int, key(b),key(c)) engine=innodb;
insert into t1 select seq, mod(seq,10), mod(seq,2), seq from seq_1_to_50000;
explain select b, sum(d) from t1 where c=0 group by b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 index c b 5 NULL # Using where
select b, sum(d) from t1 where c=0 group by b;
b sum(d)
0 125025000
2 124985000
4 124995000
6 125005000
8 125015000
set @@optimizer_adjust_secondary_key_costs="disable_forced_index_in_group_by";
explain select b, sum(d) from t1 where c=0 group by b;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t1 ALL c NULL NULL NULL # Using where; Using temporary; Using filesort
select b, sum(d) from t1 where c=0 group by b;
b sum(d)
0 125025000
2 124985000
4 124995000
6 125005000
8 125015000
drop table t1;

View File

@ -1,5 +1,6 @@
--source include/have_sequence.inc
--source include/not_embedded.inc
--source include/have_innodb.inc
#
# Show the costs for rowid filter
@ -51,3 +52,23 @@ select json_detailed(json_extract(@trace, '$**.considered_access_paths'));
--enable_ps_protocol
drop table t1, name, flag2;
select @@optimizer_adjust_secondary_key_costs;
set @@optimizer_adjust_secondary_key_costs=7;
select @@optimizer_adjust_secondary_key_costs;
set @@optimizer_adjust_secondary_key_costs=default;
--echo #
--echo # MDEV-33306 Optimizer choosing incorrect index in 10.6, 10.5 but not in 10.4
--echo #
create table t1 (a int primary key, b int, c int, d int, key(b),key(c)) engine=innodb;
insert into t1 select seq, mod(seq,10), mod(seq,2), seq from seq_1_to_50000;
--replace_column 9 #
explain select b, sum(d) from t1 where c=0 group by b;
select b, sum(d) from t1 where c=0 group by b;
set @@optimizer_adjust_secondary_key_costs="disable_forced_index_in_group_by";
--replace_column 9 #
explain select b, sum(d) from t1 where c=0 group by b;
select b, sum(d) from t1 where c=0 group by b;
drop table t1;

View File

@ -34,13 +34,12 @@ set session use_stat_tables='preferably';
# Must NOT show "Engine-independent statistics collected":
alter table t1 analyze partition p0;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
# Should not have Handler_read_rnd_next=34
show session status like 'Handler_read_rnd%';
Variable_name Value
Handler_read_rnd 0
Handler_read_rnd_deleted 0
Handler_read_rnd_next 34
Handler_read_rnd_next 0
drop table t1;
SET use_stat_tables = DEFAULT;

View File

@ -17,7 +17,6 @@ SET use_stat_tables = PREFERABLY;
CREATE TABLE t1 ( a INT ) ENGINE=MyISAM PARTITION BY HASH(a) PARTITIONS 2;
ALTER TABLE t1 ANALYZE PARTITION p1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
include/show_binlog_events.inc
Log_name Pos Event_type Server_id End_log_pos Info

View File

@ -2621,9 +2621,9 @@ ERROR HY000: 'ignore' is not allowed in this context
VALUES (DEFAULT);
ERROR HY000: 'default' is not allowed in this context
EXECUTE IMMEDIATE 'VALUES (?)' USING IGNORE;
ERROR HY000: 'ignore' is not allowed in this context
ERROR HY000: Default/ignore value is not supported for such parameter usage
EXECUTE IMMEDIATE 'VALUES (?)' USING DEFAULT;
ERROR HY000: 'default' is not allowed in this context
ERROR HY000: Default/ignore value is not supported for such parameter usage
#
# MDEV-24675: TVC using subqueries
#

View File

@ -1358,9 +1358,9 @@ DELIMITER ;$$
VALUES (IGNORE);
--error ER_NOT_ALLOWED_IN_THIS_CONTEXT
VALUES (DEFAULT);
--error ER_NOT_ALLOWED_IN_THIS_CONTEXT
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'VALUES (?)' USING IGNORE;
--error ER_NOT_ALLOWED_IN_THIS_CONTEXT
--error ER_INVALID_DEFAULT_PARAM
EXECUTE IMMEDIATE 'VALUES (?)' USING DEFAULT;
--echo #

View File

@ -607,4 +607,68 @@ drop table t1;
DROP FUNCTION avgcost;
DROP FUNCTION avg2;
DROP FUNCTION myfunc_double;
#
# MDEV-24507: Server Crash using UDF in WHERE clause of VIEW
#
CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "UDF_EXAMPLE_LIB";
create table t1(pk int primary key, a varchar(20));
create table t2(pk int primary key, a varchar(20));
create view v1 as select pk, a from t1 union select pk, a from t2;
insert into t1 values (1, "One"), (3, "Three"), (5, "Five");
insert into t2 values (2, "Dos"), (4, "Quatro"), (6, "Seis");
select pk, myfunc_int(a) from t1;
pk myfunc_int(a)
1 3
3 5
5 4
select pk, myfunc_int(a) from t2;
pk myfunc_int(a)
2 3
4 6
6 4
select pk, myfunc_int(a) from v1;
pk myfunc_int(a)
1 3
3 5
5 4
2 3
4 6
6 4
select pk from t1 where myfunc_int(a) > 4;
pk
3
select pk from (select pk, a from t1) A where myfunc_int(A.a) > 4;
pk
3
set @save_optimizer_switch = @@optimizer_switch;
set optimizer_switch = 'derived_merge=OFF';
select pk, myfunc_int(a) from t1;
pk myfunc_int(a)
1 3
3 5
5 4
select pk, myfunc_int(a) from t2;
pk myfunc_int(a)
2 3
4 6
6 4
select pk, myfunc_int(a) from v1;
pk myfunc_int(a)
1 3
3 5
5 4
2 3
4 6
6 4
select pk from t1 where myfunc_int(a) > 4;
pk
3
select pk from (select pk, a from t1) A where myfunc_int(A.a) > 4;
pk
3
set optimizer_switch = @save_optimizer_switch;
drop view v1;
drop table t2;
drop table t1;
drop function myfunc_int;
# End of 10.4 tests

View File

@ -647,4 +647,38 @@ DROP FUNCTION avgcost;
DROP FUNCTION avg2;
DROP FUNCTION myfunc_double;
--echo #
--echo # MDEV-24507: Server Crash using UDF in WHERE clause of VIEW
--echo #
--replace_result $UDF_EXAMPLE_SO UDF_EXAMPLE_LIB
eval CREATE FUNCTION myfunc_int RETURNS INTEGER SONAME "$UDF_EXAMPLE_SO";
create table t1(pk int primary key, a varchar(20));
create table t2(pk int primary key, a varchar(20));
create view v1 as select pk, a from t1 union select pk, a from t2;
insert into t1 values (1, "One"), (3, "Three"), (5, "Five");
insert into t2 values (2, "Dos"), (4, "Quatro"), (6, "Seis");
select pk, myfunc_int(a) from t1;
select pk, myfunc_int(a) from t2;
select pk, myfunc_int(a) from v1;
select pk from t1 where myfunc_int(a) > 4;
select pk from (select pk, a from t1) A where myfunc_int(A.a) > 4;
set @save_optimizer_switch = @@optimizer_switch;
set optimizer_switch = 'derived_merge=OFF';
select pk, myfunc_int(a) from t1;
select pk, myfunc_int(a) from t2;
select pk, myfunc_int(a) from v1;
select pk from t1 where myfunc_int(a) > 4;
select pk from (select pk, a from t1) A where myfunc_int(A.a) > 4;
set optimizer_switch = @save_optimizer_switch;
drop view v1;
drop table t2;
drop table t1;
drop function myfunc_int;
--echo # End of 10.4 tests

View File

@ -0,0 +1,6 @@
1
2
3
1
5
6

View File

@ -33,7 +33,6 @@ t1 CREATE TABLE `t1` (
PARTITION `p5` VALUES LESS THAN MAXVALUE ENGINE = ENGINE)
ALTER TABLE t1 ANALYZE PARTITION p1,p2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
SELECT * FROM t1 ORDER BY c1;
c1 c2

View File

@ -17,7 +17,6 @@ galera_ssl_upgrade : [Warning] Failed to load slave replication state from table
galera_parallel_simple : timeout related to wsrep_sync_wait
galera_insert_bulk : MDEV-30536 no expected deadlock in galera_insert_bulk test
galera_sequences : MDEV-32561 WSREP FSM failure: no such a transition REPLICATING -> COMMITTED
galera_shutdown_nonprim : MDEV-32635 galera_shutdown_nonprim: mysql_shutdown failed
versioning_trx_id : MDEV-18590 : galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch
galera_concurrent_ctas : MDEV-32779 galera_concurrent_ctas: assertion in the galera::ReplicatorSMM::finish_cert()
galera_as_slave_replay : MDEV-32780 galera_as_slave_replay: assertion in the wsrep::transaction::before_rollback()
@ -26,5 +25,4 @@ galera_sst_mysqldump_with_key : MDEV-32782 galera_sst_mysqldump_with_key test fa
mdev-31285 : MDEV-25089 Assertion `error.len > 0' failed in galera::ReplicatorSMM::handle_apply_error()
galera_var_ignore_apply_errors : MENT-1997 galera_var_ignore_apply_errors test freezes
MW-402 : temporarily disabled at the request of Codership
MDEV-22232 : temporarily disabled at the request of Codership
galera_desync_overlapped : MDEV-21538 galera_desync_overlapped MTR failed: Result content mismatch

View File

@ -3,21 +3,21 @@ connection node_1;
connect con1,127.0.0.1,root,,test,$NODE_MYPORT_1;
--- CTAS with empty result set ---
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_alter WAIT_FOR bf_abort';
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_run WAIT_FOR bf_abort';
CREATE TABLE t2 SELECT * FROM t1;
connection node_1;
SET DEBUG_SYNC = 'now WAIT_FOR may_alter';
ALTER TABLE t1 DROP FOREIGN KEY b, ALGORITHM=COPY;
SET DEBUG_SYNC = 'now WAIT_FOR may_run';
TRUNCATE TABLE t1;
connection con1;
ERROR 70100: Query execution was interrupted
SET DEBUG_SYNC = 'RESET';
--- CTAS with non-empty result set ---
INSERT INTO t1 VALUES (10), (20), (30);
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_alter WAIT_FOR bf_abort';
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_run WAIT_FOR bf_abort';
CREATE TABLE t2 SELECT * FROM t1;
connection node_1;
SET DEBUG_SYNC = 'now WAIT_FOR may_alter';
ALTER TABLE t1 DROP FOREIGN KEY b, ALGORITHM=COPY;
SET DEBUG_SYNC = 'now WAIT_FOR may_run';
TRUNCATE TABLE t1;
connection con1;
ERROR 70100: Query execution was interrupted
SET DEBUG_SYNC = 'RESET';

View File

@ -14,7 +14,7 @@ c1
INSERT INTO t1 VALUES (4),(3),(1),(2);
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
CREATE TABLE t1 (pk INT PRIMARY KEY, b INT) ENGINE=SEQUENCE;
ERROR 42S01: Table 't1' already exists
ERROR 42000: This version of MariaDB doesn't yet support 'non-InnoDB sequences in Galera cluster'
ALTER TABLE t1 DROP COLUMN c2;
ERROR 42000: Can't DROP COLUMN `c2`; check that it exists
SELECT get_lock ('test', 1.5);

View File

@ -0,0 +1,38 @@
connection node_2;
connection node_1;
connection node_1;
SET GLOBAL wsrep_load_data_splitting=ON;
Warnings:
Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
SET GLOBAL wsrep_replicate_myisam=ON;
Warnings:
Warning 1287 '@@wsrep_replicate_myisam' is deprecated and will be removed in a future release. Please use '@@wsrep_mode=REPLICATE_MYISAM' instead
CREATE TABLE t1 (c1 int) ENGINE=MYISAM;
LOAD DATA INFILE '../../std_data/mdev-25731.dat' IGNORE INTO TABLE t1 LINES TERMINATED BY '\n';
Warnings:
Warning 1235 wsrep_load_data_splitting for other than InnoDB tables
SELECT COUNT(*) AS EXPECT_6 FROM t1;
EXPECT_6
6
connection node_2;
SELECT COUNT(*) AS EXPECT_6 FROM t1;
EXPECT_6
6
connection node_1;
ALTER TABLE t1 ENGINE=InnoDB;
LOAD DATA INFILE '../../std_data/mdev-25731.dat' IGNORE INTO TABLE t1 LINES TERMINATED BY '\n';
SELECT COUNT(*) AS EXPECT_12 FROM t1;
EXPECT_12
12
connection node_2;
SELECT COUNT(*) AS EXPECT_12 FROM t1;
EXPECT_12
12
connection node_1;
DROP TABLE t1;
SET GLOBAL wsrep_load_data_splitting=OFF;
Warnings:
Warning 1287 '@@wsrep_load_data_splitting' is deprecated and will be removed in a future release
SET GLOBAL wsrep_replicate_myisam=OFF;
Warnings:
Warning 1287 '@@wsrep_replicate_myisam' is deprecated and will be removed in a future release. Please use '@@wsrep_mode=REPLICATE_MYISAM' instead

View File

@ -1,7 +1,8 @@
connection node_2;
connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
SET @wsrep_slave_threads_orig = @@wsrep_slave_threads;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) Engine=InnoDB;
SET GLOBAL wsrep_slave_threads = 10;
# Set slave threads to 10 step 1
SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
@ -9,7 +10,7 @@ EXPECT_10
10
SET GLOBAL wsrep_slave_threads = 1;
connection node_2;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (NULL);
connection node_1;
# Wait until one of the appliers has exited
SELECT VARIABLE_VALUE AS EXPECT_9 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
@ -27,33 +28,14 @@ EXPECT_20
20
SET GLOBAL wsrep_slave_threads = 1;
connection node_2;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (2);
INSERT INTO t1 VALUES (3);
INSERT INTO t1 VALUES (4);
INSERT INTO t1 VALUES (5);
INSERT INTO t1 VALUES (6);
INSERT INTO t1 VALUES (7);
INSERT INTO t1 VALUES (8);
INSERT INTO t1 VALUES (9);
INSERT INTO t1 VALUES (10);
INSERT INTO t1 VALUES (11);
INSERT INTO t1 VALUES (12);
INSERT INTO t1 VALUES (13);
INSERT INTO t1 VALUES (14);
INSERT INTO t1 VALUES (16);
INSERT INTO t1 VALUES (17);
INSERT INTO t1 VALUES (18);
INSERT INTO t1 VALUES (19);
INSERT INTO t1 VALUES (20);
connection node_1;
# Wait until 19 of the appliers has exited
SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
EXPECT_1
1
SELECT COUNT(*) FROM t1;
COUNT(*)
20
SELECT COUNT(*) AS EXPECT_51 FROM t1;
EXPECT_51
51
SET GLOBAL wsrep_slave_threads = 10;
# Set slave threads to 10 step 3
SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
@ -62,22 +44,12 @@ EXPECT_10
connection node_1;
SET GLOBAL wsrep_slave_threads = 1;
connection node_2;
INSERT INTO t1 VALUES (21);
INSERT INTO t1 VALUES (22);
INSERT INTO t1 VALUES (23);
INSERT INTO t1 VALUES (24);
INSERT INTO t1 VALUES (25);
INSERT INTO t1 VALUES (26);
INSERT INTO t1 VALUES (27);
INSERT INTO t1 VALUES (28);
INSERT INTO t1 VALUES (29);
INSERT INTO t1 VALUES (30);
connection node_1;
# Wait until slave threads back to 1
SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
EXPECT_1
1
SELECT COUNT(*) FROM t1;
COUNT(*)
30
SELECT COUNT(*) AS EXPECT_101 FROM t1;
EXPECT_101
101
DROP TABLE t1;

View File

@ -1,44 +1,68 @@
connection node_2;
connection node_1;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
INSERT INTO t1 VALUES (1, 'a');
INSERT INTO t1 VALUES (2, 'a');
connection node_1;
SET AUTOCOMMIT=ON;
START TRANSACTION;
UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1a;
SET SESSION wsrep_sync_wait=0;
LOCK TABLE t2 WRITE;
connection node_1;
SET @@debug_dbug = "d,sync.wsrep_before_mdl_wait";
SET DEBUG_SYNC= 'wsrep_before_mdl_wait SIGNAL before_mdl_wait WAIT_FOR mdl_wait_continue';
SELECT * FROM t2;;
connection node_1a;
# Wait until select is blocked before MDL lock wait
SET DEBUG_SYNC= 'now WAIT_FOR before_mdl_wait';
connection node_1a;
SET @@debug_dbug = "d,sync.wsrep_after_BF_victim_lock";
connection node_2;
UPDATE t1 SET f2 = 'c' WHERE f1 = 1;
connection node_1a;
SET @@debug_dbug = "";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_before_mdl_wait";
connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1b;
SET SESSION wsrep_sync_wait=0;
# Wait for conflicting update to block
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_after_BF_victim_lock";
connect node_1c, 127.0.0.1, root, , test, $NODE_MYPORT_1;
connection node_1c;
connection node_1a;
SET DEBUG_SYNC = "now SIGNAL BF_victim_continue";
UNLOCK TABLES;
connection node_1;
ERROR 40001: Deadlock found when trying to get lock; try restarting transaction
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
COUNT(*) = 1
connection node_1;
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'a';
EXPECT_1
1
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
COUNT(*) = 1
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'c';
EXPECT_1
1
SELECT * FROM t1;
f1 f2
1 c
2 a
connection node_2;
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
COUNT(*) = 1
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'a';
EXPECT_1
1
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
COUNT(*) = 1
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'c';
EXPECT_1
1
SELECT * FROM t1;
f1 f2
1 c
2 a
DROP TABLE t1;
DROP TABLE t2;
connection node_1a;
SET DEBUG_SYNC = "RESET";
connection node_1b;
SET DEBUG_SYNC = "RESET";
connection node_1;
disconnect node_1a;
disconnect node_1b;
disconnect node_1c;

View File

@ -15,16 +15,37 @@ UPDATE t1 SET f1 = 9;
UPDATE t2 SET f1 = 9 WHERE f1 = 1;
DELETE FROM t1 WHERE f1 = 9;
DELETE FROM t2 WHERE f1 = 9;
TRUNCATE TABLE t1;
TRUNCATE TABLE t1;
SELECT * FROM t1 ORDER BY f1;
f1
SELECT * FROM t2 ORDER BY f1;
f1
2
3
4
5
6
connection node_2;
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
1
SELECT COUNT(*) = 0 FROM t2;
COUNT(*) = 0
0
SELECT * FROM t1 ORDER BY f1;
f1
SELECT * FROM t2 ORDER BY f1;
f1
2
3
4
5
6
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
SELECT * FROM t1 ORDER BY f1;
f1
SELECT * FROM t2 ORDER BY f1;
f1
connection node_2;
SELECT * FROM t1 ORDER BY f1;
f1
SELECT * FROM t2 ORDER BY f1;
f1
connection node_1;
SET GLOBAL wsrep_mode=DEFAULT;
DROP TABLE t1;
DROP TABLE t2;
SET GLOBAL wsrep_mode=DEFAULT;

View File

@ -0,0 +1,12 @@
connection node_2;
connection node_1;
SET GLOBAL wsrep_ignore_apply_errors=0;
SET SESSION AUTOCOMMIT=0;
SET SESSION max_error_count=0;
CREATE TABLE t0 (id GEOMETRY,parent_id GEOMETRY)ENGINE=SEQUENCE;
ERROR 42000: This version of MariaDB doesn't yet support 'non-InnoDB sequences in Galera cluster'
connection node_2;
SHOW CREATE TABLE t0;
ERROR 42S02: Table 'test.t0' doesn't exist
connection node_1;
SET GLOBAL wsrep_ignore_apply_errors=DEFAULT;

View File

@ -5,7 +5,12 @@ connection node_2;
connection node_1;
SET GLOBAL wsrep_provider_options = 'pc.weight=2';
connection node_2;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
SET SESSION wsrep_sync_wait = 0;
SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
connection node_1;
connection node_2;
SHOW STATUS LIKE 'wsrep_cluster_status';
Variable_name Value
wsrep_cluster_status non-Primary
connection node_1;
SET GLOBAL wsrep_provider_options = 'pc.weight = 1';

View File

@ -0,0 +1,243 @@
connection node_2;
connection node_1;
# Case 1 CREATE SEQUENCE with no NOCACHE
CREATE SEQUENCE s ENGINE=InnoDB;
ERROR 42000: This version of MariaDB doesn't yet support 'CACHE without INCREMENT BY 0 in Galera cluster'
CREATE SEQUENCE s NOCACHE ENGINE=InnoDB;
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
START TRANSACTION;
REPLACE INTO s VALUES (1,1,9223372036854775806,1,1,1000,0,0);
OPTIMIZE TABLE t1;
Table Op Msg_type Msg_text
test.t1 optimize note Table does not support optimize, doing recreate + analyze instead
test.t1 optimize status OK
SELECT * FROM t1;
a
SELECT * FROM s;
next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count
1 1 9223372036854775806 1 1 1000 0 0
connection node_2;
SELECT * FROM t1;
a
SELECT * FROM s;
next_not_cached_value minimum_value maximum_value start_value increment cache_size cycle_option cycle_count
1 1 9223372036854775806 1 1 1000 0 0
connection node_1;
DROP TABLE t1;
DROP SEQUENCE s;
# Case 2 REPLACE INTO ... SELECT with error
CREATE TABLE t (id INT KEY,a YEAR,INDEX (id,a)) engine=innodb;
REPLACE INTO t (id,a)SELECT /*!99997 */ 1;
ERROR 21S01: Column count doesn't match value count at row 1
REPLACE INTO t (id,a)SELECT /*!99997 */ 1,2;
SELECT * FROM t;
id a
1 2002
CREATE TABLE t2 (id INT KEY,a YEAR,INDEX (id,a)) engine=myisam;
REPLACE INTO t2 (id,a)SELECT /*!99997 */ 1;
ERROR 21S01: Column count doesn't match value count at row 1
REPLACE INTO t2 (id,a)SELECT /*!99997 */ 1,2;
Warnings:
Warning 138 Galera cluster does support consistency check only for InnoDB tables.
SELECT * FROM t2;
id a
1 2002
CREATE TABLE t3 (id INT KEY,a YEAR,INDEX (id,a)) engine=aria;
REPLACE INTO t3 (id,a)SELECT /*!99997 */ 1;
ERROR 21S01: Column count doesn't match value count at row 1
REPLACE INTO t3 (id,a)SELECT /*!99997 */ 1,2;
Warnings:
Warning 138 Galera cluster does support consistency check only for InnoDB tables.
SELECT * FROM t3;
id a
1 2002
connection node_2;
SELECT * FROM t;
id a
1 2002
SELECT * FROM t2;
id a
1 2002
SELECT * FROM t3;
id a
1 2002
connection node_1;
DROP TABLE t,t2,t3;
# Bigger REPLACE ... AS SELECT test
CREATE TABLE t1(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t2(id int not null primary key ,b int) ENGINE=MyISAM;
CREATE TABLE t3(id int not null primary key ,b int) ENGINE=Aria;
CREATE TABLE t4(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t5(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t6(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t7(id int not null primary key ,b int) ENGINE=MyISAM;
CREATE TABLE t8(id int not null primary key ,b int) ENGINE=Aria;
INSERT INTO t1(id) SELECT seq FROM seq_1_to_1000;
INSERT INTO t2(id) SELECT seq FROM seq_1_to_1000;
INSERT INTO t3(id) SELECT seq FROM seq_1_to_1000;
REPLACE INTO t4 SELECT * FROM t1;
REPLACE INTO t5 SELECT * FROM t2;
REPLACE INTO t6 SELECT * FROM t3;
ERROR HY000: Transactional commit not supported by involved engine(s)
REPLACE INTO t7 SELECT * FROM t2;
REPLACE INTO t8 SELECT * FROM t3;
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_0 FROM t6;
EXPECT_0
0
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
EXPECT_1000
1000
connection node_2;
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_0 FROM t6;
EXPECT_0
0
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
EXPECT_1000
1000
connection node_1;
DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8;
# Bigger INSERT INTO ... SELECT test
CREATE TABLE t1(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t2(id int not null primary key ,b int) ENGINE=MyISAM;
CREATE TABLE t3(id int not null primary key ,b int) ENGINE=Aria;
CREATE TABLE t4(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t5(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t6(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t7(id int not null primary key ,b int) ENGINE=MyISAM;
CREATE TABLE t8(id int not null primary key ,b int) ENGINE=Aria;
INSERT INTO t1(id) SELECT seq FROM seq_1_to_1000;
INSERT INTO t2(id) SELECT seq FROM seq_1_to_1000;
INSERT INTO t3(id) SELECT seq FROM seq_1_to_1000;
INSERT INTO t4 SELECT * FROM t1;
INSERT INTO t5 SELECT * FROM t2;
INSERT INTO t6 SELECT * FROM t3;
ERROR HY000: Transactional commit not supported by involved engine(s)
INSERT INTO t7 SELECT * FROM t2;
INSERT INTO t8 SELECT * FROM t3;
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_0 FROM t6;
EXPECT_0
0
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
EXPECT_1000
1000
connection node_2;
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_0 FROM t6;
EXPECT_0
0
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
EXPECT_1000
1000
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
EXPECT_1000
1000
connection node_1;
DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8;
CREATE TABLE t1(pk int not null primary key) engine=innodb;
INSERT INTO t1 values (1),(2),(3),(4);
CREATE VIEW view_t1 AS SELECT * FROM t1;
INSERT INTO view_t1 VALUES (5);
SELECT * FROM t1;
pk
1
2
3
4
5
DROP TABLE t1;
DROP VIEW view_t1;
CREATE TABLE t1(pk int not null primary key) engine=myisam;
INSERT INTO t1 values (1),(2),(3),(4);
CREATE VIEW view_t1 AS SELECT * FROM t1;
INSERT INTO view_t1 VALUES (5);
SELECT * FROM t1;
pk
1
2
3
4
5
DROP TABLE t1;
DROP VIEW view_t1;
CREATE TABLE t1(pk int not null primary key) engine=aria;
INSERT INTO t1 values (1),(2),(3),(4);
CREATE VIEW view_t1 AS SELECT * FROM t1;
INSERT INTO view_t1 VALUES (5);
SELECT * FROM t1;
pk
1
2
3
4
5
DROP TABLE t1;
DROP VIEW view_t1;
SET GLOBAL wsrep_mode=DEFAULT;

View File

@ -1,23 +1,8 @@
connection node_2;
connection node_1;
connection node_1;
connection node_2;
connection node_1;
CREATE TABLE t ENGINE=InnoDB WITH SYSTEM VERSIONING AS SELECT 1 AS i;
ERROR 42000: This version of MariaDB doesn't yet support 'SYSTEM VERSIONING AS SELECT in Galera cluster'
connection node_2;
SHOW CREATE TABLE t;
Table Create Table
t CREATE TABLE `t` (
`i` int(1) NOT NULL
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci WITH SYSTEM VERSIONING
SELECT * from t;
i
1
DROP TABLE IF EXISTS t;
COMMIT;
connection node_2;
SET SESSION wsrep_sync_wait=0;
Killing server ...
Starting server ...
connection node_2;
call mtr.add_suppression("WSREP: Event .*Write_rows_v1 apply failed:.*");
call mtr.add_suppression("SREP: Failed to apply write set: gtid:.*");
ERROR 42S02: Table 'test.t' doesn't exist

View File

@ -18,19 +18,16 @@
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
# Run CTAS until the resulting table gets created,
# then it gets BF aborted by ALTER.
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_alter WAIT_FOR bf_abort';
# then it gets BF aborted by other DDL.
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_run WAIT_FOR bf_abort';
--send
CREATE TABLE t2 SELECT * FROM t1;
# Wait for CTAS to reach the table create point,
# start executing ALTER and BF abort CTAS.
# start executing other DDL and BF abort CTAS.
--connection node_1
SET DEBUG_SYNC = 'now WAIT_FOR may_alter';
--disable_result_log
--error ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t1 DROP FOREIGN KEY b, ALGORITHM=COPY;
--enable_result_log
SET DEBUG_SYNC = 'now WAIT_FOR may_run';
TRUNCATE TABLE t1;
--connection con1
# CTAS gets BF aborted.
@ -46,19 +43,16 @@ SET DEBUG_SYNC = 'RESET';
INSERT INTO t1 VALUES (10), (20), (30);
# Run CTAS until the resulting table gets created,
# then it gets BF aborted by ALTER.
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_alter WAIT_FOR bf_abort';
# then it gets BF aborted by other DDL.
SET DEBUG_SYNC = 'create_table_select_before_create SIGNAL may_run WAIT_FOR bf_abort';
--send
CREATE TABLE t2 SELECT * FROM t1;
# Wait for CTAS to reach the table create point,
# start executing ALTER and BF abort CTAS.
# start executing other DDL and BF abort CTAS.
--connection node_1
SET DEBUG_SYNC = 'now WAIT_FOR may_alter';
--disable_result_log
--error ER_ERROR_ON_RENAME
ALTER TABLE t1 DROP FOREIGN KEY b, ALGORITHM=COPY;
--enable_result_log
SET DEBUG_SYNC = 'now WAIT_FOR may_run';
TRUNCATE TABLE t1;
--connection con1
# CTAS gets BF aborted.

View File

@ -11,7 +11,11 @@ SET SESSION autocommit=0;
SELECT * FROM t1 WHERE c1 <=0 ORDER BY c1 DESC;
--error ER_LOCK_DEADLOCK
INSERT INTO t1 VALUES (4),(3),(1),(2);
--error ER_TABLE_EXISTS_ERROR
#
# This is because support for CREATE TABLE ENGINE=SEQUENCE
# is done before we check does table exists already.
#
--error ER_NOT_SUPPORTED_YET
CREATE TABLE t1 (pk INT PRIMARY KEY, b INT) ENGINE=SEQUENCE;
--error ER_CANT_DROP_FIELD_OR_KEY
ALTER TABLE t1 DROP COLUMN c2;

View File

@ -0,0 +1,27 @@
--source include/galera_cluster.inc
--source include/have_aria.inc
--connection node_1
SET GLOBAL wsrep_load_data_splitting=ON;
SET GLOBAL wsrep_replicate_myisam=ON;
CREATE TABLE t1 (c1 int) ENGINE=MYISAM;
LOAD DATA INFILE '../../std_data/mdev-25731.dat' IGNORE INTO TABLE t1 LINES TERMINATED BY '\n';
SELECT COUNT(*) AS EXPECT_6 FROM t1;
--connection node_2
SELECT COUNT(*) AS EXPECT_6 FROM t1;
--connection node_1
ALTER TABLE t1 ENGINE=InnoDB;
LOAD DATA INFILE '../../std_data/mdev-25731.dat' IGNORE INTO TABLE t1 LINES TERMINATED BY '\n';
SELECT COUNT(*) AS EXPECT_12 FROM t1;
--connection node_2
SELECT COUNT(*) AS EXPECT_12 FROM t1;
--connection node_1
DROP TABLE t1;
SET GLOBAL wsrep_load_data_splitting=OFF;
SET GLOBAL wsrep_replicate_myisam=OFF;

View File

@ -2,6 +2,8 @@
[mysqld.1]
wsrep-debug=SERVER
loose-wsrep-mw-336=1
[mysqld.2]
wsrep-debug=SERVER
loose-wsrep-mw-336=2

View File

@ -3,11 +3,12 @@
#
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/force_restart.inc
--source include/have_sequence.inc
--connection node_1
CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB;
SET @wsrep_slave_threads_orig = @@wsrep_slave_threads;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY AUTO_INCREMENT) Engine=InnoDB;
SET GLOBAL wsrep_slave_threads = 10;
@ -22,7 +23,7 @@ SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE V
SET GLOBAL wsrep_slave_threads = 1;
--connection node_2
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (NULL);
--connection node_1
--echo # Wait until one of the appliers has exited
@ -54,27 +55,19 @@ SELECT VARIABLE_VALUE AS EXPECT_20 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE V
SET GLOBAL wsrep_slave_threads = 1;
--connection node_2
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (2);
INSERT INTO t1 VALUES (3);
INSERT INTO t1 VALUES (4);
INSERT INTO t1 VALUES (5);
INSERT INTO t1 VALUES (6);
INSERT INTO t1 VALUES (7);
INSERT INTO t1 VALUES (8);
INSERT INTO t1 VALUES (9);
INSERT INTO t1 VALUES (10);
INSERT INTO t1 VALUES (11);
INSERT INTO t1 VALUES (12);
INSERT INTO t1 VALUES (13);
INSERT INTO t1 VALUES (14);
INSERT INTO t1 VALUES (16);
INSERT INTO t1 VALUES (17);
INSERT INTO t1 VALUES (18);
INSERT INTO t1 VALUES (19);
INSERT INTO t1 VALUES (20);
--disable_query_log
let $c = 50;
while ($c) {
INSERT INTO t1 VALUES(NULL); COMMIT;
dec $c;
}
--enable_query_log
--connection node_1
--let $wait_condition = SELECT COUNT(*) = 51 FROM t1;
--let $wait_condition_on_error_output = SELECT COUNT(*) FROM t1;
--source include/wait_condition_with_debug.inc
--echo # Wait until 19 of the appliers has exited
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
--let $wait_condition_on_error_output = SELECT COUNT(*), 1 as EXPECTED_VALUE FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE = 'wsrep applier idle'; show processlist
@ -82,7 +75,7 @@ INSERT INTO t1 VALUES (20);
SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
SELECT COUNT(*) FROM t1;
SELECT COUNT(*) AS EXPECT_51 FROM t1;
SET GLOBAL wsrep_slave_threads = 10;
--echo # Set slave threads to 10 step 3
@ -96,16 +89,13 @@ SELECT VARIABLE_VALUE AS EXPECT_10 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE V
SET GLOBAL wsrep_slave_threads = 1;
--connection node_2
INSERT INTO t1 VALUES (21);
INSERT INTO t1 VALUES (22);
INSERT INTO t1 VALUES (23);
INSERT INTO t1 VALUES (24);
INSERT INTO t1 VALUES (25);
INSERT INTO t1 VALUES (26);
INSERT INTO t1 VALUES (27);
INSERT INTO t1 VALUES (28);
INSERT INTO t1 VALUES (29);
INSERT INTO t1 VALUES (30);
--disable_query_log
let $c = 50;
while ($c) {
INSERT INTO t1 VALUES(NULL); COMMIT;
dec $c;
}
--enable_query_log
--connection node_1
--echo # Wait until slave threads back to 1
@ -115,6 +105,10 @@ INSERT INTO t1 VALUES (30);
SELECT VARIABLE_VALUE AS EXPECT_1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_applier_thread_count';
SELECT COUNT(*) FROM t1;
SELECT COUNT(*) AS EXPECT_101 FROM t1;
DROP TABLE t1;
--disable_query_log
SET GLOBAL wsrep_slave_threads = @wsrep_slave_threads_orig;
--enable_query_log

View File

@ -3,70 +3,92 @@
#
--source include/galera_cluster.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
--connection node_1
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY, f2 CHAR(1)) engine=innodb;
INSERT INTO t1 VALUES (1, 'a');
INSERT INTO t1 VALUES (2, 'a');
--connection node_1
SET AUTOCOMMIT=ON;
START TRANSACTION;
UPDATE t1 SET f2 = 'b' WHERE f1 = 1;
# block access to t2
--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1
--connection node_1a
SET SESSION wsrep_sync_wait=0;
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't2'
--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.TABLES
--source include/wait_condition_with_debug.inc
LOCK TABLE t2 WRITE;
# Block before MLD lock wait
# Block before MDL lock wait
--connection node_1
SET @@debug_dbug = "d,sync.wsrep_before_mdl_wait";
SET DEBUG_SYNC= 'wsrep_before_mdl_wait SIGNAL before_mdl_wait WAIT_FOR mdl_wait_continue';
--send SELECT * FROM t2;
# Wait for SELECT to be blocked
--connection node_1a
#--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIS WHERE STATE = 'System lock';
#--source include/wait_condition.inc
#--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE = 'init' AND INFO = 'COMMIT';
#--source include/wait_condition.inc
--echo # Wait until select is blocked before MDL lock wait
SET DEBUG_SYNC= 'now WAIT_FOR before_mdl_wait';
# block applier to wait after BF victim is locked
--connection node_1a
SET @@debug_dbug = "d,sync.wsrep_after_BF_victim_lock";
# Issue a conflicting update on node #2
--connection node_2
UPDATE t1 SET f2 = 'c' WHERE f1 = 1;
# Unblock the SELECT, to enter wsrep_thd_is_BF
--connection node_1a
SET @@debug_dbug = "";
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_before_mdl_wait";
--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1
--connection node_1b
SET SESSION wsrep_sync_wait=0;
--echo # Wait for conflicting update to block
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Update_rows_log_event:%';
--source include/wait_condition.inc
# unblock applier to try to BF the SELECT
# Unblock the SELECT, to enter wsrep_thd_is_BF
SET DEBUG_SYNC = "now SIGNAL signal.wsrep_after_BF_victim_lock";
--connect node_1c, 127.0.0.1, root, , test, $NODE_MYPORT_1
--connection node_1c
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE STATE LIKE 'Update_rows_log_event:%';
--source include/wait_condition.inc
--connection node_1a
# unblock applier to try to BF the SELECT
SET DEBUG_SYNC = "now SIGNAL BF_victim_continue";
# table lock is not needed anymore
UNLOCK TABLES;
# SELECT succeeds
# SELECT returns deadlock
--connection node_1
--error ER_LOCK_DEADLOCK
--reap
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
--connection node_1
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'a';
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'c';
SELECT * FROM t1;
--connection node_2
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'a';
SELECT COUNT(*) = 1 FROM t1 WHERE f2 = 'c';
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'a';
SELECT COUNT(*) AS EXPECT_1 FROM t1 WHERE f2 = 'c';
SELECT * FROM t1;
DROP TABLE t1;
DROP TABLE t2;
--connection node_1a
SET DEBUG_SYNC = "RESET";
--connection node_1b
SET DEBUG_SYNC = "RESET";
--connection node_1
--disconnect node_1a
--disconnect node_1b
--disconnect node_1c

View File

@ -2,22 +2,24 @@
--source include/have_innodb.inc
#
# This tests simple autocommit replication of MyISAM tables. No updates arrive on the slave.
# This tests simple autocommit replication of MyISAM tables.
#
# Without a PK
SET GLOBAL wsrep_mode=REPLICATE_MYISAM;
# Without a PK
CREATE TABLE t1 (f1 INTEGER) ENGINE=MyISAM;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (2), (3);
# This is TOI
INSERT INTO t1 SELECT 4 FROM DUAL UNION ALL SELECT 5 FROM DUAL;
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=MyISAM;
INSERT INTO t2 VALUES (1);
INSERT INTO t2 VALUES (2), (3);
# This is TOI
INSERT INTO t2 SELECT 4 FROM DUAL UNION ALL SELECT 5 FROM DUAL;
# Error
@ -34,16 +36,26 @@ UPDATE t2 SET f1 = 9 WHERE f1 = 1;
DELETE FROM t1 WHERE f1 = 9;
DELETE FROM t2 WHERE f1 = 9;
SELECT * FROM t1 ORDER BY f1;
SELECT * FROM t2 ORDER BY f1;
--connection node_2
SELECT * FROM t1 ORDER BY f1;
SELECT * FROM t2 ORDER BY f1;
# TRUNCATE
TRUNCATE TABLE t1;
TRUNCATE TABLE t1;
TRUNCATE TABLE t2;
SELECT * FROM t1 ORDER BY f1;
SELECT * FROM t2 ORDER BY f1;
--connection node_2
SELECT COUNT(*) = 0 FROM t1;
SELECT COUNT(*) = 0 FROM t2;
SELECT * FROM t1 ORDER BY f1;
SELECT * FROM t2 ORDER BY f1;
--connection node_1
SET GLOBAL wsrep_mode=DEFAULT;
DROP TABLE t1;
DROP TABLE t2;
SET GLOBAL wsrep_mode=DEFAULT;

View File

@ -0,0 +1,16 @@
--source include/galera_cluster.inc
--source include/have_sequence.inc
SET GLOBAL wsrep_ignore_apply_errors=0;
SET SESSION AUTOCOMMIT=0;
SET SESSION max_error_count=0;
--error ER_NOT_SUPPORTED_YET
CREATE TABLE t0 (id GEOMETRY,parent_id GEOMETRY)ENGINE=SEQUENCE;
--connection node_2
--error ER_NO_SUCH_TABLE
SHOW CREATE TABLE t0;
--connection node_1
SET GLOBAL wsrep_ignore_apply_errors=DEFAULT;

View File

@ -16,21 +16,27 @@ SET GLOBAL wsrep_provider_options = 'pc.weight=2';
--connection node_2
# Isolate node_2 from the group and wait until wsrep_ready becomes OFF.
SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
SET SESSION wsrep_sync_wait = 0;
--let $wait_condition = SELECT VARIABLE_VALUE = 'OFF' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready'
--source include/wait_condition.inc
# Verify that graceful shutdown succeeds.
--source include/shutdown_mysqld.inc
--source include/start_mysqld.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
SET GLOBAL wsrep_provider_options = 'gmcast.isolate = 1';
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
--connection node_2
--let $wait_condition = SELECT VARIABLE_VALUE = 'OFF' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready'
--source include/wait_condition.inc
SHOW STATUS LIKE 'wsrep_cluster_status';
# Verify that graceful shutdown succeeds.
--source include/shutdown_mysqld.inc
--source include/start_mysqld.inc
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
# Restore original settings.
SET GLOBAL wsrep_provider_options = 'pc.weight = 1';
--source include/auto_increment_offset_restore.inc

View File

@ -0,0 +1,188 @@
--source include/galera_cluster.inc
--source include/have_innodb.inc
--source include/have_log_bin.inc
--source include/have_sequence.inc
--source include/have_aria.inc
--echo # Case 1 CREATE SEQUENCE with no NOCACHE
--error ER_NOT_SUPPORTED_YET
CREATE SEQUENCE s ENGINE=InnoDB;
CREATE SEQUENCE s NOCACHE ENGINE=InnoDB;
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
START TRANSACTION;
REPLACE INTO s VALUES (1,1,9223372036854775806,1,1,1000,0,0);
OPTIMIZE TABLE t1;
SELECT * FROM t1;
SELECT * FROM s;
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 's'
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM test.s;
--source include/wait_condition.inc
SELECT * FROM t1;
SELECT * FROM s;
--connection node_1
DROP TABLE t1;
DROP SEQUENCE s;
--echo # Case 2 REPLACE INTO ... SELECT with error
CREATE TABLE t (id INT KEY,a YEAR,INDEX (id,a)) engine=innodb;
--error ER_WRONG_VALUE_COUNT_ON_ROW
REPLACE INTO t (id,a)SELECT /*!99997 */ 1;
REPLACE INTO t (id,a)SELECT /*!99997 */ 1,2;
SELECT * FROM t;
CREATE TABLE t2 (id INT KEY,a YEAR,INDEX (id,a)) engine=myisam;
--error ER_WRONG_VALUE_COUNT_ON_ROW
REPLACE INTO t2 (id,a)SELECT /*!99997 */ 1;
REPLACE INTO t2 (id,a)SELECT /*!99997 */ 1,2;
SELECT * FROM t2;
CREATE TABLE t3 (id INT KEY,a YEAR,INDEX (id,a)) engine=aria;
--error ER_WRONG_VALUE_COUNT_ON_ROW
REPLACE INTO t3 (id,a)SELECT /*!99997 */ 1;
REPLACE INTO t3 (id,a)SELECT /*!99997 */ 1,2;
SELECT * FROM t3;
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't3'
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM test.t3;
--source include/wait_condition.inc
SELECT * FROM t;
SELECT * FROM t2;
SELECT * FROM t3;
--connection node_1
DROP TABLE t,t2,t3;
--echo # Bigger REPLACE ... AS SELECT test
CREATE TABLE t1(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t2(id int not null primary key ,b int) ENGINE=MyISAM;
CREATE TABLE t3(id int not null primary key ,b int) ENGINE=Aria;
CREATE TABLE t4(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t5(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t6(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t7(id int not null primary key ,b int) ENGINE=MyISAM;
CREATE TABLE t8(id int not null primary key ,b int) ENGINE=Aria;
INSERT INTO t1(id) SELECT seq FROM seq_1_to_1000;
INSERT INTO t2(id) SELECT seq FROM seq_1_to_1000;
INSERT INTO t3(id) SELECT seq FROM seq_1_to_1000;
REPLACE INTO t4 SELECT * FROM t1;
REPLACE INTO t5 SELECT * FROM t2;
# For some reason Aria storage engine does register_ha
--error ER_ERROR_DURING_COMMIT
REPLACE INTO t6 SELECT * FROM t3;
REPLACE INTO t7 SELECT * FROM t2;
REPLACE INTO t8 SELECT * FROM t3;
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
SELECT COUNT(*) AS EXPECT_0 FROM t6;
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 8 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME LIKE 't_'
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1000 FROM test.t8;
--source include/wait_condition.inc
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
SELECT COUNT(*) AS EXPECT_0 FROM t6;
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
--connection node_1
DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8;
--echo # Bigger INSERT INTO ... SELECT test
CREATE TABLE t1(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t2(id int not null primary key ,b int) ENGINE=MyISAM;
CREATE TABLE t3(id int not null primary key ,b int) ENGINE=Aria;
CREATE TABLE t4(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t5(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t6(id int not null primary key ,b int) ENGINE=InnoDB;
CREATE TABLE t7(id int not null primary key ,b int) ENGINE=MyISAM;
CREATE TABLE t8(id int not null primary key ,b int) ENGINE=Aria;
INSERT INTO t1(id) SELECT seq FROM seq_1_to_1000;
INSERT INTO t2(id) SELECT seq FROM seq_1_to_1000;
INSERT INTO t3(id) SELECT seq FROM seq_1_to_1000;
INSERT INTO t4 SELECT * FROM t1;
INSERT INTO t5 SELECT * FROM t2;
# For some reason Aria storage engine does register_ha
--error ER_ERROR_DURING_COMMIT
INSERT INTO t6 SELECT * FROM t3;
INSERT INTO t7 SELECT * FROM t2;
INSERT INTO t8 SELECT * FROM t3;
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
SELECT COUNT(*) AS EXPECT_0 FROM t6;
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 8 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = 'test' AND TABLE_NAME LIKE 't_'
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1000 FROM test.t8;
--source include/wait_condition.inc
SELECT COUNT(*) AS EXPECT_1000 FROM t1;
SELECT COUNT(*) AS EXPECT_1000 FROM t2;
SELECT COUNT(*) AS EXPECT_1000 FROM t3;
SELECT COUNT(*) AS EXPECT_1000 FROM t4;
SELECT COUNT(*) AS EXPECT_1000 FROM t5;
SELECT COUNT(*) AS EXPECT_0 FROM t6;
SELECT COUNT(*) AS EXPECT_1000 FROM t7;
SELECT COUNT(*) AS EXPECT_1000 FROM t8;
--connection node_1
DROP TABLE t1,t2,t3,t4,t5,t6,t7,t8;
#
# View
#
CREATE TABLE t1(pk int not null primary key) engine=innodb;
INSERT INTO t1 values (1),(2),(3),(4);
CREATE VIEW view_t1 AS SELECT * FROM t1;
INSERT INTO view_t1 VALUES (5);
SELECT * FROM t1;
DROP TABLE t1;
DROP VIEW view_t1;
CREATE TABLE t1(pk int not null primary key) engine=myisam;
INSERT INTO t1 values (1),(2),(3),(4);
CREATE VIEW view_t1 AS SELECT * FROM t1;
INSERT INTO view_t1 VALUES (5);
SELECT * FROM t1;
DROP TABLE t1;
DROP VIEW view_t1;
CREATE TABLE t1(pk int not null primary key) engine=aria;
INSERT INTO t1 values (1),(2),(3),(4);
CREATE VIEW view_t1 AS SELECT * FROM t1;
INSERT INTO view_t1 VALUES (5);
SELECT * FROM t1;
DROP TABLE t1;
DROP VIEW view_t1;
SET GLOBAL wsrep_mode=DEFAULT;

View File

@ -1,34 +1,15 @@
--source include/galera_cluster.inc
--let $node_1 = node_1
--let $node_2 = node_2
--source include/auto_increment_offset_save.inc
--connection node_1
#
# Below should not cause nodes to be inconsistent (they could if we
# allow TOI as some error are ignored on applier
#
--error ER_NOT_SUPPORTED_YET
CREATE TABLE t ENGINE=InnoDB WITH SYSTEM VERSIONING AS SELECT 1 AS i;
--connection node_2
--error ER_NO_SUCH_TABLE
SHOW CREATE TABLE t;
SELECT * from t;
DROP TABLE IF EXISTS t;
COMMIT;
#
# Restart node_2, force SST because database is inconsistent compared to node_1
#
--connection node_2
SET SESSION wsrep_sync_wait=0;
--source include/kill_galera.inc
--remove_file $MYSQLTEST_VARDIR/mysqld.2/data/grastate.dat
--echo Starting server ...
let $restart_noprint=2;
--source include/start_mysqld.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 'ON' FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_ready';
--source include/wait_condition.inc
--connection node_2
call mtr.add_suppression("WSREP: Event .*Write_rows_v1 apply failed:.*");
call mtr.add_suppression("SREP: Failed to apply write set: gtid:.*");
--source include/auto_increment_offset_restore.inc

View File

@ -9,6 +9,7 @@ INSERT INTO t1 VALUES (01), (02), (03), (04), (05);
connection node_2;
Unloading wsrep provider ...
SET GLOBAL wsrep_cluster_address = '';
connection node_1;
connection node_3;
Unloading wsrep provider ...
SET GLOBAL wsrep_cluster_address = '';
@ -33,14 +34,16 @@ SET GLOBAL wsrep_provider_options = 'dbug=';
SET GLOBAL wsrep_provider_options = 'signal=ist_sender_send_after_get_buffers';
INSERT INTO t1 VALUES (51), (52), (53), (54), (55);
connection node_2;
connection node_1;
connection node_3;
connection node_1;
connection node_2;
SELECT COUNT(*) = 30 FROM t1;
COUNT(*) = 30
1
SELECT COUNT(*) = 3 FROM t2;
COUNT(*) = 3
1
SELECT COUNT(*) AS EXPECT_30 FROM t1;
EXPECT_30
30
SELECT COUNT(*) AS EXPECT_3 FROM t2;
EXPECT_3
3
SELECT LENGTH(f1) = 512 * 1024 FROM t2;
LENGTH(f1) = 512 * 1024
1
@ -48,12 +51,12 @@ LENGTH(f1) = 512 * 1024
1
CALL mtr.add_suppression("WSREP: Unsupported protocol downgrade: incremental data collection disabled");
connection node_3;
SELECT COUNT(*) = 30 FROM t1;
COUNT(*) = 30
1
SELECT COUNT(*) = 3 FROM t2;
COUNT(*) = 3
1
SELECT COUNT(*) AS EXPECT_30 FROM t1;
EXPECT_30
30
SELECT COUNT(*) AS EXPECT_3 FROM t2;
EXPECT_3
3
SELECT LENGTH(f1) = 512 * 1024 FROM t2;
LENGTH(f1) = 512 * 1024
1

View File

@ -92,3 +92,7 @@ connection node_2;
call mtr.add_suppression("WSREP: Rejecting JOIN message from \(.*\): new State Transfer required.");
connection node_3;
call mtr.add_suppression("WSREP: Rejecting JOIN message from \(.*\): new State Transfer required.");
disconnect node_1a;
disconnect node_3;
disconnect node_2;
disconnect node_1;

View File

@ -2,10 +2,22 @@
[mysqld.1]
wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.ignore_sb=true;gcache.size=1M'
auto_increment_increment=1
auto_increment_offset=1
# this will force server restarts before this test
loose-galera-ist-gcache-rollover=1
wsrep-debug=1
[mysqld.2]
wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.2.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.ignore_sb=true;gcache.size=1M'
auto_increment_increment=2
auto_increment_offset=2
loose-galera-ist-gcache-rollover=2
wsrep-debug=1
[mysqld.3]
wsrep_provider_options='repl.causal_read_timeout=PT90S;base_port=@mysqld.3.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.ignore_sb=true;gcache.size=1M'
auto_increment_increment=3
auto_increment_offset=3
loose-galera-ist-gcache-rollover=3
wsrep-debug=1

View File

@ -12,6 +12,7 @@
--source include/have_innodb.inc
--source include/have_debug_sync.inc
--source include/galera_have_debug_sync.inc
--source include/force_restart.inc
--let $galera_connection_name = node_3
--let $galera_server_number = 3
@ -24,6 +25,9 @@
--source ../galera/include/auto_increment_offset_save.inc
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--let $wait_condition_on_error_output = SELECT * FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME LIKE 'wsrep%'; show processlist
--source include/wait_condition_with_debug.inc
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY);
INSERT INTO t1 VALUES (01), (02), (03), (04), (05);
@ -32,12 +36,15 @@ INSERT INTO t1 VALUES (01), (02), (03), (04), (05);
--let $wsrep_cluster_address_orig2 = `select @@wsrep_cluster_address`
--source suite/galera/include/galera_stop_replication.inc
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
--connection node_3
--let $wsrep_cluster_address_orig3 = `select @@wsrep_cluster_address`
--source suite/galera/include/galera_stop_replication.inc
--connection node_1
--source include/wait_until_connected_again.inc
INSERT INTO t1 VALUES (11), (12), (13), (14), (15);
# Wait until nodes #2 and #3 have left
@ -88,29 +95,39 @@ INSERT INTO t1 VALUES (51), (52), (53), (54), (55);
--connection node_2
--source include/wait_until_connected_again.inc
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 2 OR VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
--connection node_3
--source include/wait_until_connected_again.inc
sleep 5;
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
# Final checks
--connection node_2
SELECT COUNT(*) = 30 FROM t1;
SELECT COUNT(*) = 3 FROM t2;
--let $wait_condition = SELECT COUNT(*) = 30 FROM t1
--source include/wait_condition.inc
SELECT COUNT(*) AS EXPECT_30 FROM t1;
SELECT COUNT(*) AS EXPECT_3 FROM t2;
SELECT LENGTH(f1) = 512 * 1024 FROM t2;
CALL mtr.add_suppression("WSREP: Unsupported protocol downgrade: incremental data collection disabled");
# Final checks
--connection node_3
SELECT COUNT(*) = 30 FROM t1;
SELECT COUNT(*) = 3 FROM t2;
--let $wait_condition = SELECT COUNT(*) = 30 FROM t1
--source include/wait_condition.inc
SELECT COUNT(*) AS EXPECT_30 FROM t1;
SELECT COUNT(*) AS EXPECT_3 FROM t2;
SELECT LENGTH(f1) = 512 * 1024 FROM t2;
CALL mtr.add_suppression("WSREP: Unsupported protocol downgrade: incremental data collection disabled");
DROP TABLE t1, t2;
# Restore original auto_increment_offset values.
--source ../galera/include/auto_increment_offset_restore.inc
--let $galera_cluster_size=3
--source ../galera/include/auto_increment_offset_restore.inc
--source include/galera_end.inc

View File

@ -278,4 +278,9 @@ call mtr.add_suppression("WSREP: Rejecting JOIN message from \(.*\): new State T
--connection node_3
call mtr.add_suppression("WSREP: Rejecting JOIN message from \(.*\): new State Transfer required.");
--disconnect node_1a
# Restore original auto_increment_offset values.
--let $galera_cluster_size=3
--source ../galera/include/auto_increment_offset_restore.inc
--source include/galera_end.inc

View File

@ -7,11 +7,11 @@ connection node_3;
connection node_1;
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
connection node_2;
SELECT COUNT(*) = 0 FROM t1;
COUNT(*) = 0
1
SELECT COUNT(*) AS EXPECT_0 FROM t1;
EXPECT_0
0
connection node_1;
CREATE TABLE t2 (f1 INTEGER);
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2;
LOCK TABLE t2 WRITE;
connection node_1;
@ -37,12 +37,12 @@ count_match
count_match
1
connection node_1;
SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
COUNT(*)
SELECT COUNT(*) AS EXPECT_0 FROM mysql.wsrep_streaming_log;
EXPECT_0
0
connection node_2;
SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
COUNT(*)
SELECT COUNT(*) AS EXPECT_0 FROM mysql.wsrep_streaming_log;
EXPECT_0
0
connection node_1;
DROP TABLE t1;

View File

@ -15,18 +15,23 @@
--source ../galera/include/auto_increment_offset_save.inc
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 3 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size';
--source include/wait_condition.inc
CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
# Block node #2's applier before table t1's inserts have come into play
--connection node_2
SELECT COUNT(*) = 0 FROM t1;
SELECT COUNT(*) AS EXPECT_0 FROM t1;
--connection node_1
CREATE TABLE t2 (f1 INTEGER);
CREATE TABLE t2 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB;
--connect node_2a, 127.0.0.1, root, , test, $NODE_MYPORT_2
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't2'
--source include/wait_condition.inc
LOCK TABLE t2 WRITE;
--connection node_1
@ -77,10 +82,10 @@ if ($mysql_errno == 1213) {
--enable_query_log
--connection node_1
SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
SELECT COUNT(*) AS EXPECT_0 FROM mysql.wsrep_streaming_log;
--connection node_2
SELECT COUNT(*) FROM mysql.wsrep_streaming_log;
SELECT COUNT(*) AS EXPECT_0 FROM mysql.wsrep_streaming_log;
--connection node_1
DROP TABLE t1;

View File

@ -15,6 +15,22 @@ CREATE TABLE imp_t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
DROP TABLE imp_t1, t1;
SET GLOBAL innodb_checksum_algorithm=@save_innodb_checksum_algorithm;
#
# MDEV-33400 Adaptive hash index corruption after DISCARD TABLESPACE
#
SET @save_adaptive=@@GLOBAL.innodb_adaptive_hash_index;
SET GLOBAL innodb_adaptive_hash_index=ON;
CREATE TABLE t (a INT PRIMARY KEY) ENGINE=INNODB;
INSERT INTO t SELECT * FROM seq_1_to_131;
ALTER TABLE t ADD hid INT DEFAULT 2;
INSERT INTO t VALUES (251,1);
ALTER TABLE t DISCARD TABLESPACE;
CHECK TABLE mysql.innodb_table_stats;
Table Op Msg_type Msg_text
mysql.innodb_table_stats check status OK
DROP TABLE t;
SET GLOBAL innodb_adaptive_hash_index=@save_adaptive;
# End of 10.4 tests
#
# MDEV-18288: Transportable Tablespaces leave AUTO_INCREMENT in mismatched
# state, causing INSERT errors in newly imported tables when .cfg is not used.
#
@ -49,6 +65,7 @@ id
4
5
DROP TABLE t2;
# End of 10.5 tests
#
# MDEV-27006 Assertion `!lock_trx_has_sys_table_locks(trx)'
# failed in dberr_t row_discard_tablespace_for_mysql

View File

@ -1,4 +1,5 @@
--source include/have_innodb.inc
--source include/have_sequence.inc
call mtr.add_suppression("Index for table 'imp_t1' is corrupt; try to repair it");
@ -22,6 +23,24 @@ DROP TABLE imp_t1, t1;
SET GLOBAL innodb_checksum_algorithm=@save_innodb_checksum_algorithm;
--echo #
--echo # MDEV-33400 Adaptive hash index corruption after DISCARD TABLESPACE
--echo #
SET @save_adaptive=@@GLOBAL.innodb_adaptive_hash_index;
SET GLOBAL innodb_adaptive_hash_index=ON;
CREATE TABLE t (a INT PRIMARY KEY) ENGINE=INNODB;
INSERT INTO t SELECT * FROM seq_1_to_131;
ALTER TABLE t ADD hid INT DEFAULT 2;
INSERT INTO t VALUES (251,1);
ALTER TABLE t DISCARD TABLESPACE;
CHECK TABLE mysql.innodb_table_stats;
DROP TABLE t;
SET GLOBAL innodb_adaptive_hash_index=@save_adaptive;
--echo # End of 10.4 tests
--echo #
--echo # MDEV-18288: Transportable Tablespaces leave AUTO_INCREMENT in mismatched
--echo # state, causing INSERT errors in newly imported tables when .cfg is not used.
@ -63,6 +82,8 @@ INSERT INTO t2() VALUES();
SELECT * FROM t2 ORDER BY id;
DROP TABLE t2;
--echo # End of 10.5 tests
--echo #
--echo # MDEV-27006 Assertion `!lock_trx_has_sys_table_locks(trx)'
--echo # failed in dberr_t row_discard_tablespace_for_mysql

View File

@ -0,0 +1,11 @@
--- create.result
+++ create.reject
@@ -207,7 +207,7 @@
UNIQUE KEY `FTS_DOC_ID_INDEX` (`FTS_DOC_ID` DESC)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
ALTER TABLE t1 ADD FULLTEXT INDEX(b), ALGORITHM=INPLACE;
-ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Not implemented for system-versioned operations. Try ALGORITHM=COPY
+ERROR HY000: Index 'FTS_DOC_ID_INDEX' is of wrong type for an InnoDB FULLTEXT index
ALTER TABLE t1 ADD FULLTEXT INDEX(b), ALGORITHM=COPY;
ERROR HY000: Index 'FTS_DOC_ID_INDEX' is of wrong type for an InnoDB FULLTEXT index
DROP TABLE t1;

View File

@ -207,7 +207,7 @@ t1 CREATE TABLE `t1` (
UNIQUE KEY `FTS_DOC_ID_INDEX` (`FTS_DOC_ID` DESC)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 COLLATE=latin1_swedish_ci
ALTER TABLE t1 ADD FULLTEXT INDEX(b), ALGORITHM=INPLACE;
ERROR HY000: Index 'FTS_DOC_ID_INDEX' is of wrong type for an InnoDB FULLTEXT index
ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Not implemented for system-versioned operations. Try ALGORITHM=COPY
ALTER TABLE t1 ADD FULLTEXT INDEX(b), ALGORITHM=COPY;
ERROR HY000: Index 'FTS_DOC_ID_INDEX' is of wrong type for an InnoDB FULLTEXT index
DROP TABLE t1;

View File

@ -0,0 +1,57 @@
--- innodb-fts-ddl.result
+++ innodb-fts-ddl.reject
@@ -11,8 +11,10 @@
('MySQL vs. YourSQL','In the following database comparison ...'),
('MySQL Security','When configured properly, MySQL ...');
ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY;
-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE
-ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=INPLACE;
+ERROR 0A000: ALGORITHM=NOCOPY is not supported. Reason: Not implemented for system-versioned operations. Try ALGORITHM=COPY
+ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body);
+affected rows: 6
+info: Records: 6 Duplicates: 0 Warnings: 0
SELECT * FROM fts_test WHERE MATCH (title, body)
AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE);
id title body
@@ -26,7 +28,9 @@
('1001 MySQL Tricks','1. Never run mysqld as root. 2. ...'),
('MySQL vs. YourSQL','In the following database comparison ...'),
('MySQL Security','When configured properly, MySQL ...');
-ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY;
+ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body);
+affected rows: 12
+info: Records: 12 Duplicates: 0 Warnings: 0
SELECT * FROM fts_test WHERE MATCH (title, body)
AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE);
id title body
@@ -76,8 +80,10 @@
('MySQL vs. YourSQL','In the following database comparison ...'),
('MySQL Security','When configured properly, MySQL ...');
CREATE FULLTEXT INDEX idx on fts_test (title, body) LOCK=NONE;
-ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation requires a lock. Try LOCK=SHARED
-ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY;
+ERROR 0A000: LOCK=NONE is not supported. Reason: Not implemented for system-versioned operations. Try LOCK=SHARED
+ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body);
+affected rows: 6
+info: Records: 6 Duplicates: 0 Warnings: 0
ALTER TABLE fts_test ROW_FORMAT=REDUNDANT, LOCK=NONE;
ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation requires a lock. Try LOCK=SHARED
ALTER TABLE fts_test ROW_FORMAT=REDUNDANT;
@@ -162,7 +168,7 @@
(20, 'MySQL Security','When configured properly, MySQL ...');
ALTER TABLE articles ADD FULLTEXT INDEX idx (title),
ADD FULLTEXT INDEX idx3 (title), ALGORITHM=INPLACE;
-ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: InnoDB presently supports one FULLTEXT index creation at a time. Try ALGORITHM=COPY
+ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Not implemented for system-versioned operations. Try ALGORITHM=COPY
ALTER TABLE articles ADD FULLTEXT INDEX idx (title),
ADD FULLTEXT INDEX idx3 (title);
affected rows: 6
@@ -274,7 +280,7 @@
call mtr.add_suppression("InnoDB: Failed to create");
CREATE TABLE t1(a TEXT, FTS_DOC_ID BIGINT UNSIGNED NOT NULL UNIQUE) ENGINE=InnoDB;
ALTER TABLE t1 ADD FULLTEXT(a), ALGORITHM=INPLACE;
-ERROR HY000: Got error 11 "Resource temporarily unavailable" from storage engine InnoDB
+ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Not implemented for system-versioned operations. Try ALGORITHM=COPY
DROP TABLE t1;
CREATE TABLE t1 (a VARCHAR(3)) ENGINE=InnoDB;
ALTER TABLE t1 ADD FULLTEXT KEY(a), ADD COLUMN b VARCHAR(3), ADD FULLTEXT KEY(b);

View File

@ -0,0 +1,57 @@
--- innodb-fts-ddl.result
+++ innodb-fts-ddl.reject
@@ -11,8 +11,10 @@
('MySQL vs. YourSQL','In the following database comparison ...'),
('MySQL Security','When configured properly, MySQL ...');
ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY;
-ERROR 0A000: ALGORITHM=NOCOPY is not supported for this operation. Try ALGORITHM=INPLACE
-ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=INPLACE;
+ERROR 0A000: ALGORITHM=NOCOPY is not supported. Reason: Not implemented for system-versioned operations. Try ALGORITHM=COPY
+ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body);
+affected rows: 6
+info: Records: 6 Duplicates: 0 Warnings: 0
SELECT * FROM fts_test WHERE MATCH (title, body)
AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE);
id title body
@@ -26,7 +28,9 @@
('1001 MySQL Tricks','1. Never run mysqld as root. 2. ...'),
('MySQL vs. YourSQL','In the following database comparison ...'),
('MySQL Security','When configured properly, MySQL ...');
-ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY;
+ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body);
+affected rows: 12
+info: Records: 12 Duplicates: 0 Warnings: 0
SELECT * FROM fts_test WHERE MATCH (title, body)
AGAINST ('Tutorial' IN NATURAL LANGUAGE MODE);
id title body
@@ -76,8 +80,10 @@
('MySQL vs. YourSQL','In the following database comparison ...'),
('MySQL Security','When configured properly, MySQL ...');
CREATE FULLTEXT INDEX idx on fts_test (title, body) LOCK=NONE;
-ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation requires a lock. Try LOCK=SHARED
-ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY;
+ERROR 0A000: LOCK=NONE is not supported. Reason: Not implemented for system-versioned operations. Try LOCK=SHARED
+ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body);
+affected rows: 6
+info: Records: 6 Duplicates: 0 Warnings: 0
ALTER TABLE fts_test ROW_FORMAT=REDUNDANT, LOCK=NONE;
ERROR 0A000: LOCK=NONE is not supported. Reason: Fulltext index creation requires a lock. Try LOCK=SHARED
ALTER TABLE fts_test ROW_FORMAT=REDUNDANT;
@@ -162,7 +168,7 @@
(20, 'MySQL Security','When configured properly, MySQL ...');
ALTER TABLE articles ADD FULLTEXT INDEX idx (title),
ADD FULLTEXT INDEX idx3 (title), ALGORITHM=INPLACE;
-ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: InnoDB presently supports one FULLTEXT index creation at a time. Try ALGORITHM=COPY
+ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Not implemented for system-versioned operations. Try ALGORITHM=COPY
ALTER TABLE articles ADD FULLTEXT INDEX idx (title),
ADD FULLTEXT INDEX idx3 (title);
affected rows: 6
@@ -274,7 +280,7 @@
call mtr.add_suppression("InnoDB: Failed to create");
CREATE TABLE t1(a TEXT, FTS_DOC_ID BIGINT UNSIGNED NOT NULL UNIQUE) ENGINE=InnoDB;
ALTER TABLE t1 ADD FULLTEXT(a), ALGORITHM=INPLACE;
-ERROR HY000: Got error 11 "Resource temporarily unavailable" from storage engine InnoDB
+ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Not implemented for system-versioned operations. Try ALGORITHM=COPY
DROP TABLE t1;
CREATE TABLE t1 (a VARCHAR(3)) ENGINE=InnoDB;
ALTER TABLE t1 ADD FULLTEXT KEY(a), ADD COLUMN b VARCHAR(3), ADD FULLTEXT KEY(b);

View File

@ -0,0 +1,10 @@
--- innodb-fts-fic.result
+++ innodb-fts-fic.reject
@@ -172,7 +172,6 @@
(1, 'MySQL Tutorial','DBMS stands for DataBase ...'),
(2, 'How To Use MySQL Well','After you went through a ...');
CREATE FULLTEXT INDEX idx ON wp(title, text);
-ERROR HY000: Column 'FTS_DOC_ID' is of wrong type for an InnoDB FULLTEXT index
DROP TABLE wp;
CREATE TABLE wp(
FTS_DOC_ID bigint unsigned PRIMARY KEY,

View File

@ -0,0 +1,10 @@
--- innodb-fts-fic.result
+++ innodb-fts-fic.reject
@@ -172,7 +172,6 @@
(1, 'MySQL Tutorial','DBMS stands for DataBase ...'),
(2, 'How To Use MySQL Well','After you went through a ...');
CREATE FULLTEXT INDEX idx ON wp(title, text);
-ERROR HY000: Column 'FTS_DOC_ID' is of wrong type for an InnoDB FULLTEXT index
DROP TABLE wp;
CREATE TABLE wp(
FTS_DOC_ID bigint unsigned PRIMARY KEY,

View File

@ -0,0 +1,66 @@
--- misc_debug.result
+++ misc_debug.reject
@@ -7,14 +7,14 @@
SET @saved_debug_dbug = @@SESSION.debug_dbug;
SET SESSION debug_dbug="+d,ib_dict_create_index_tree_fail";
CREATE FULLTEXT INDEX idx ON articles(body);
-ERROR HY000: Out of memory.
+ERROR HY000: Can't create table `test`.`articles` (errno: 128 "Out of memory in engine")
SET SESSION debug_dbug=@saved_debug_dbug;
ALTER TABLE articles STATS_PERSISTENT=DEFAULT;
DROP TABLE articles;
CREATE TABLE t (a INT, b TEXT) engine=innodb;
SET debug_dbug='+d,alter_table_rollback_new_index';
-ALTER TABLE t ADD FULLTEXT INDEX (b(64));
-ERROR HY000: Unknown error
+ALTER TABLE t ADD FULLTEXT INDEX (b(64)), ALGORITHM=INPLACE;
+ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Not implemented for system-versioned operations. Try ALGORITHM=COPY
SET SESSION debug_dbug=@saved_debug_dbug;
DROP TABLE t;
CREATE TABLE t1 (pk INT, a VARCHAR(8), PRIMARY KEY(pk),
@@ -28,32 +28,6 @@
DROP TABLE t2, t1;
SET SESSION debug_dbug=@saved_debug_dbug;
#
-# MDEV-25200 Index count mismatch due to aborted FULLTEXT INDEX
-#
-CREATE TABLE t1(a INT, b TEXT, c TEXT, FULLTEXT INDEX(b)) ENGINE=InnoDB;
-INSERT INTO t1 VALUES(1, "test", "test_1");
-connect con1,localhost,root,,test;
-SET DEBUG_DBUG="+d,innodb_OOM_inplace_alter";
-SET DEBUG_SYNC='innodb_commit_inplace_alter_table_enter SIGNAL s2 WAIT_FOR g2';
-ALTER TABLE t1 ADD FULLTEXT(c);
-connection default;
-SET DEBUG_SYNC='now WAIT_FOR s2';
-START TRANSACTION;
-SELECT * FROM t1;
-a b c
-1 test test_1
-SET DEBUG_SYNC='now SIGNAL g2';
-connection con1;
-ERROR HY000: Out of memory.
-disconnect con1;
-connection default;
-SET DEBUG_SYNC=RESET;
-ALTER TABLE t1 ADD bl INT AS (LENGTH(b)) VIRTUAL;
-CHECK TABLE t1;
-Table Op Msg_type Msg_text
-test.t1 check status OK
-DROP TABLE t1;
-#
# MDEV-25663 Double free of transaction during TRUNCATE
#
call mtr.add_suppression("InnoDB: \\(Too many concurrent transactions\\)");
@@ -65,12 +39,3 @@
SET debug_dbug=@saved_debug_dbug;
DROP TABLE t1;
# End of 10.3 tests
-CREATE TABLE t1(f1 INT NOT NULL, f2 CHAR(100))ENGINE=InnoDB;
-SET DEBUG_DBUG="+d,stats_lock_fail";
-ALTER TABLE t1 ADD FULLTEXT(f2);
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-SET debug_dbug=@saved_debug_dbug;
-ALTER TABLE t1 DISCARD TABLESPACE;
-ALTER TABLE t1 ADD FULLTEXT(f2);
-ERROR HY000: Tablespace has been discarded for table `t1`
-DROP TABLE t1;

View File

@ -0,0 +1,66 @@
--- misc_debug.result
+++ misc_debug.reject
@@ -7,14 +7,14 @@
SET @saved_debug_dbug = @@SESSION.debug_dbug;
SET SESSION debug_dbug="+d,ib_dict_create_index_tree_fail";
CREATE FULLTEXT INDEX idx ON articles(body);
-ERROR HY000: Out of memory.
+ERROR HY000: Can't create table `test`.`articles` (errno: 128 "Out of memory in engine")
SET SESSION debug_dbug=@saved_debug_dbug;
ALTER TABLE articles STATS_PERSISTENT=DEFAULT;
DROP TABLE articles;
CREATE TABLE t (a INT, b TEXT) engine=innodb;
SET debug_dbug='+d,alter_table_rollback_new_index';
-ALTER TABLE t ADD FULLTEXT INDEX (b(64));
-ERROR HY000: Unknown error
+ALTER TABLE t ADD FULLTEXT INDEX (b(64)), ALGORITHM=INPLACE;
+ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Not implemented for system-versioned operations. Try ALGORITHM=COPY
SET SESSION debug_dbug=@saved_debug_dbug;
DROP TABLE t;
CREATE TABLE t1 (pk INT, a VARCHAR(8), PRIMARY KEY(pk),
@@ -28,32 +28,6 @@
DROP TABLE t2, t1;
SET SESSION debug_dbug=@saved_debug_dbug;
#
-# MDEV-25200 Index count mismatch due to aborted FULLTEXT INDEX
-#
-CREATE TABLE t1(a INT, b TEXT, c TEXT, FULLTEXT INDEX(b)) ENGINE=InnoDB;
-INSERT INTO t1 VALUES(1, "test", "test_1");
-connect con1,localhost,root,,test;
-SET DEBUG_DBUG="+d,innodb_OOM_inplace_alter";
-SET DEBUG_SYNC='innodb_commit_inplace_alter_table_enter SIGNAL s2 WAIT_FOR g2';
-ALTER TABLE t1 ADD FULLTEXT(c);
-connection default;
-SET DEBUG_SYNC='now WAIT_FOR s2';
-START TRANSACTION;
-SELECT * FROM t1;
-a b c
-1 test test_1
-SET DEBUG_SYNC='now SIGNAL g2';
-connection con1;
-ERROR HY000: Out of memory.
-disconnect con1;
-connection default;
-SET DEBUG_SYNC=RESET;
-ALTER TABLE t1 ADD bl INT AS (LENGTH(b)) VIRTUAL;
-CHECK TABLE t1;
-Table Op Msg_type Msg_text
-test.t1 check status OK
-DROP TABLE t1;
-#
# MDEV-25663 Double free of transaction during TRUNCATE
#
call mtr.add_suppression("InnoDB: \\(Too many concurrent transactions\\)");
@@ -65,12 +39,3 @@
SET debug_dbug=@saved_debug_dbug;
DROP TABLE t1;
# End of 10.3 tests
-CREATE TABLE t1(f1 INT NOT NULL, f2 CHAR(100))ENGINE=InnoDB;
-SET DEBUG_DBUG="+d,stats_lock_fail";
-ALTER TABLE t1 ADD FULLTEXT(f2);
-ERROR HY000: Lock wait timeout exceeded; try restarting transaction
-SET debug_dbug=@saved_debug_dbug;
-ALTER TABLE t1 DISCARD TABLESPACE;
-ALTER TABLE t1 ADD FULLTEXT(f2);
-ERROR HY000: Tablespace has been discarded for table `t1`
-DROP TABLE t1;

View File

@ -0,0 +1,12 @@
--- sync_ddl.result
+++ sync_ddl.reject
@@ -100,7 +100,7 @@
ADD COLUMN id2 INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
DROP INDEX idx1,
ADD FULLTEXT INDEX idx2(value);
-affected rows: 0
-info: Records: 0 Duplicates: 0 Warnings: 0
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
DROP TABLE t1;
SET GLOBAL debug_dbug = @save_debug;

View File

@ -0,0 +1,12 @@
--- sync_ddl.result
+++ sync_ddl.reject
@@ -100,7 +100,7 @@
ADD COLUMN id2 INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
DROP INDEX idx1,
ADD FULLTEXT INDEX idx2(value);
-affected rows: 0
-info: Records: 0 Duplicates: 0 Warnings: 0
+affected rows: 2
+info: Records: 2 Duplicates: 0 Warnings: 0
DROP TABLE t1;
SET GLOBAL debug_dbug = @save_debug;

View File

@ -99,7 +99,8 @@ ALTER TABLE t1
DROP COLUMN id1,
ADD COLUMN id2 INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
DROP INDEX idx1,
ADD FULLTEXT INDEX idx2(value),
ALGORITHM=INPLACE;
ADD FULLTEXT INDEX idx2(value);
affected rows: 0
info: Records: 0 Duplicates: 0 Warnings: 0
DROP TABLE t1;
SET GLOBAL debug_dbug = @save_debug;

View File

@ -131,8 +131,14 @@ ENGINE=InnoDB;
CREATE TABLE t1(a INT PRIMARY KEY, b TEXT, FTS_DOC_ID BIGINT UNSIGNED NOT NULL,
UNIQUE KEY FTS_DOC_ID_INDEX(FTS_DOC_ID DESC)) ENGINE=InnoDB;
SHOW CREATE TABLE t1;
if ($MTR_COMBINATION_ORIG) {
--error ER_INNODB_FT_WRONG_DOCID_INDEX
ALTER TABLE t1 ADD FULLTEXT INDEX(b), ALGORITHM=INPLACE;
}
if (!$MTR_COMBINATION_ORIG) {
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE t1 ADD FULLTEXT INDEX(b), ALGORITHM=INPLACE;
}
--error ER_INNODB_FT_WRONG_DOCID_INDEX
ALTER TABLE t1 ADD FULLTEXT INDEX(b), ALGORITHM=COPY;
DROP TABLE t1;

View File

@ -20,11 +20,20 @@ INSERT INTO fts_test (title,body) VALUES
('MySQL Security','When configured properly, MySQL ...');
# Table does rebuild when fts index builds for the first time
# Create the FTS index
if ($MTR_COMBINATION_ORIG) {
--error ER_ALTER_OPERATION_NOT_SUPPORTED
ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY;
# Create the FTS index
ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=INPLACE;
}
if (!$MTR_COMBINATION_ORIG) {
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY;
--enable_info
ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body);
--disable_info
}
# Select word "tutorial" in the table
SELECT * FROM fts_test WHERE MATCH (title, body)
@ -43,7 +52,14 @@ INSERT INTO fts_test (title,body) VALUES
('MySQL Security','When configured properly, MySQL ...');
# FTS_DOC_ID hidden column and FTS_DOC_ID index exist
if ($MTR_COMBINATION_ORIG) {
ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY;
}
if (!$MTR_COMBINATION_ORIG) {
--enable_info
ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body);
--disable_info
}
# Select word "tutorial" in the table
SELECT * FROM fts_test WHERE MATCH (title, body)
@ -112,7 +128,14 @@ INSERT INTO fts_test (title,body) VALUES
# column already exists. This has not been implemented yet.
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
CREATE FULLTEXT INDEX idx on fts_test (title, body) LOCK=NONE;
if ($MTR_COMBINATION_ORIG) {
ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body), ALGORITHM=NOCOPY;
}
if (!$MTR_COMBINATION_ORIG) {
--enable_info
ALTER TABLE fts_test ADD FULLTEXT `idx` (title, body);
--disable_info
}
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE fts_test ROW_FORMAT=REDUNDANT, LOCK=NONE;
@ -349,8 +372,14 @@ let $fts_aux_file= `select concat('FTS_',right(concat(repeat('0',16), lower(hex(
write_file $MYSQLD_DATADIR/test/$fts_aux_file;
EOF
--replace_regex /".*" from/"Resource temporarily unavailable" from/
if ($MTR_COMBINATION_ORIG) {
--error ER_GET_ERRNO
ALTER TABLE t1 ADD FULLTEXT(a), ALGORITHM=INPLACE;
}
if (!$MTR_COMBINATION_ORIG) {
--error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE t1 ADD FULLTEXT(a), ALGORITHM=INPLACE;
}
DROP TABLE t1;
remove_file $MYSQLD_DATADIR/test/$fts_aux_file;

View File

@ -211,8 +211,13 @@ INSERT INTO wp (FTS_DOC_ID, title, text) VALUES
(1, 'MySQL Tutorial','DBMS stands for DataBase ...'),
(2, 'How To Use MySQL Well','After you went through a ...');
if ($MTR_COMBINATION_ORIG) {
--error ER_INNODB_FT_WRONG_DOCID_COLUMN
CREATE FULLTEXT INDEX idx ON wp(title, text);
}
if (!$MTR_COMBINATION_ORIG) {
CREATE FULLTEXT INDEX idx ON wp(title, text);
}
DROP TABLE wp;
CREATE TABLE wp(

View File

@ -23,8 +23,14 @@ CREATE TABLE articles (
# The newly create dict_index_t should be removed from fts cache
SET @saved_debug_dbug = @@SESSION.debug_dbug;
SET SESSION debug_dbug="+d,ib_dict_create_index_tree_fail";
if ($MTR_COMBINATION_ORIG) {
--error ER_OUT_OF_RESOURCES
CREATE FULLTEXT INDEX idx ON articles(body);
}
if (!$MTR_COMBINATION_ORIG) {
--error ER_CANT_CREATE_TABLE
CREATE FULLTEXT INDEX idx ON articles(body);
}
SET SESSION debug_dbug=@saved_debug_dbug;
# This simply go through ha_innobase::commit_inplace_alter_table
@ -37,8 +43,14 @@ DROP TABLE articles;
CREATE TABLE t (a INT, b TEXT) engine=innodb;
SET debug_dbug='+d,alter_table_rollback_new_index';
if ($MTR_COMBINATION_ORIG) {
-- error ER_UNKNOWN_ERROR
ALTER TABLE t ADD FULLTEXT INDEX (b(64));
}
if (!$MTR_COMBINATION_ORIG) {
-- error ER_ALTER_OPERATION_NOT_SUPPORTED_REASON
ALTER TABLE t ADD FULLTEXT INDEX (b(64)), ALGORITHM=INPLACE;
}
SET SESSION debug_dbug=@saved_debug_dbug;
DROP TABLE t;
@ -57,6 +69,7 @@ ALTER TABLE t1 FORCE;
DROP TABLE t2, t1;
SET SESSION debug_dbug=@saved_debug_dbug;
if ($MTR_COMBINATION_ORIG) {
--echo #
--echo # MDEV-25200 Index count mismatch due to aborted FULLTEXT INDEX
--echo #
@ -82,6 +95,7 @@ ALTER TABLE t1 ADD bl INT AS (LENGTH(b)) VIRTUAL;
CHECK TABLE t1;
DROP TABLE t1;
--source include/wait_until_count_sessions.inc
}
--echo #
--echo # MDEV-25663 Double free of transaction during TRUNCATE
@ -104,6 +118,7 @@ SET debug_dbug=@saved_debug_dbug;
DROP TABLE t1;
--echo # End of 10.3 tests
if ($MTR_COMBINATION_ORIG) {
# Fulltext fails in commit phase
CREATE TABLE t1(f1 INT NOT NULL, f2 CHAR(100))ENGINE=InnoDB;
@ -115,3 +130,4 @@ ALTER TABLE t1 DISCARD TABLESPACE;
--error ER_TABLESPACE_DISCARDED
ALTER TABLE t1 ADD FULLTEXT(f2);
DROP TABLE t1;
}

View File

@ -182,12 +182,13 @@ INSERT INTO t1 (value) VALUES
('collation of latin1_bin to make it case sensitive')
;
--enable_info
ALTER TABLE t1
DROP COLUMN id1,
ADD COLUMN id2 INT NOT NULL AUTO_INCREMENT PRIMARY KEY,
DROP INDEX idx1,
ADD FULLTEXT INDEX idx2(value),
ALGORITHM=INPLACE;
ADD FULLTEXT INDEX idx2(value);
--disable_info
DROP TABLE t1;

View File

@ -1 +1 @@
--innodb --loose-changed_page_bitmaps --innodb-sys-tables --innodb-flush-log-at-trx-commit=2 --sequence
--innodb --innodb-sys-tables --innodb-flush-log-at-trx-commit=2 --sequence

View File

@ -60,7 +60,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -521,7 +520,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -993,7 +991,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -1466,7 +1463,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -1933,7 +1929,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -2406,7 +2401,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -2884,7 +2878,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -3360,7 +3353,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -3826,7 +3818,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -4287,7 +4278,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -4759,7 +4749,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -5232,7 +5221,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -5699,7 +5687,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -6172,7 +6159,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -6650,7 +6636,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -7126,7 +7111,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -15108,7 +15092,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -15569,7 +15552,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -16041,7 +16023,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -16514,7 +16495,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -16981,7 +16961,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -17454,7 +17433,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -17932,7 +17910,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -18408,7 +18385,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template

View File

@ -60,7 +60,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -530,7 +529,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -1017,7 +1015,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -1511,7 +1508,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -1995,7 +1991,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -2489,7 +2484,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -2988,7 +2982,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -3485,7 +3478,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -3974,7 +3966,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -4444,7 +4435,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -4931,7 +4921,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -5425,7 +5414,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -5909,7 +5897,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -6403,7 +6390,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -6902,7 +6888,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -7399,7 +7384,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION part_1,part_2;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -15700,7 +15684,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -16170,7 +16153,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -16657,7 +16639,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -17151,7 +17132,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -17635,7 +17615,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -18129,7 +18108,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -18628,7 +18606,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
@ -19125,7 +19102,6 @@ SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template
WHERE f_int1 BETWEEN 1 AND @max_row_div2 - 1;
ALTER TABLE t1 ANALYZE PARTITION ALL;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
INSERT INTO t1(f_int1,f_int2,f_char1,f_char2,f_charbig)
SELECT f_int1,f_int2,f_char1,f_char2,f_charbig FROM t0_template

View File

@ -1020,7 +1020,6 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
MySQL_Test_DB.t1 analyze status Engine-independent statistics collected
MySQL_Test_DB.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;

View File

@ -1020,7 +1020,6 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
MySQL_Test_DB.t1 analyze status Engine-independent statistics collected
MySQL_Test_DB.t1 analyze note The storage engine for the table doesn't support analyze
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;

View File

@ -1020,7 +1020,6 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
MySQL_Test_DB.t1 analyze status Engine-independent statistics collected
MySQL_Test_DB.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;

View File

@ -987,7 +987,6 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
mysql_test_db.t1 analyze status Engine-independent statistics collected
mysql_test_db.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;

View File

@ -987,7 +987,6 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
mysql_test_db.t1 analyze status Engine-independent statistics collected
mysql_test_db.t1 analyze note The storage engine for the table doesn't support analyze
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;

View File

@ -987,7 +987,6 @@ a b
2001 Second in MAX
ALTER TABLE t1 ANALYZE PARTITION MAX;
Table Op Msg_type Msg_text
mysql_test_db.t1 analyze status Engine-independent statistics collected
mysql_test_db.t1 analyze status OK
# Truncate without FLUSH
ALTER TABLE t1 TRUNCATE PARTITION MAX;

View File

@ -11,6 +11,7 @@ SELECT * FROM t1;
i
1
connection slave;
include/save_master_gtid.inc
connection slave;
call mtr.add_suppression("Slave I/O: Relay log write failure: could not queue event from master.*");
# Case 0 : Start slave with IGNORE_DOMAIN_IDS=(), then restart
@ -24,6 +25,7 @@ DO_DOMAIN_IDS (BEFORE) :
IGNORE_DOMAIN_IDS (BEFORE) :
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(), MASTER_USE_GTID=slave_pos;
include/start_slave.inc
include/sync_with_master_gtid.inc
DO_DOMAIN_IDS (AFTER) :
IGNORE_DOMAIN_IDS (AFTER) :
SET @saved_dbug = @@GLOBAL.debug_dbug;
@ -33,6 +35,7 @@ START TRANSACTION;
INSERT INTO t1 VALUES(2);
INSERT INTO t1 VALUES(3);
COMMIT;
include/save_master_gtid.inc
SELECT * FROM t1;
i
1
@ -46,6 +49,7 @@ i
SET @@global.debug_dbug=@saved_dbug;
START SLAVE io_thread;
include/wait_for_slave_io_to_start.inc
include/sync_with_master_gtid.inc
SELECT * FROM t1;
i
1
@ -59,6 +63,7 @@ DO_DOMAIN_IDS (BEFORE) :
IGNORE_DOMAIN_IDS (BEFORE) :
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(1), MASTER_USE_GTID=slave_pos;
include/start_slave.inc
include/sync_with_master_gtid.inc
DO_DOMAIN_IDS (AFTER) :
IGNORE_DOMAIN_IDS (AFTER) : 1
SET @@global.debug_dbug="d,kill_slave_io_before_commit";
@ -67,6 +72,7 @@ START TRANSACTION;
INSERT INTO t1 VALUES(4);
INSERT INTO t1 VALUES(5);
COMMIT;
include/save_master_gtid.inc
SELECT * FROM t1;
i
1
@ -84,6 +90,7 @@ i
SET @@global.debug_dbug=@saved_dbug;
START SLAVE io_thread;
include/wait_for_slave_io_to_start.inc
include/sync_with_master_gtid.inc
SELECT * FROM t1;
i
1
@ -97,6 +104,7 @@ DO_DOMAIN_IDS (BEFORE) :
IGNORE_DOMAIN_IDS (BEFORE) : 1
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(), MASTER_USE_GTID=slave_pos;
include/start_slave.inc
include/sync_with_master_gtid.inc
DO_DOMAIN_IDS (AFTER) :
IGNORE_DOMAIN_IDS (AFTER) :
SET @@global.debug_dbug="d,kill_slave_io_before_commit";
@ -114,6 +122,7 @@ START TRANSACTION;
INSERT INTO t1 VALUES(10);
INSERT INTO t1 VALUES(11);
COMMIT;
include/save_master_gtid.inc
SELECT * FROM t1;
i
1
@ -140,6 +149,7 @@ DO_DOMAIN_IDS (BEFORE) :
IGNORE_DOMAIN_IDS (BEFORE) :
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(1), MASTER_USE_GTID=slave_pos;
include/start_slave.inc
include/sync_with_master_gtid.inc
DO_DOMAIN_IDS (AFTER) :
IGNORE_DOMAIN_IDS (AFTER) : 1
SELECT * FROM t1;
@ -157,6 +167,7 @@ DO_DOMAIN_IDS (BEFORE) :
IGNORE_DOMAIN_IDS (BEFORE) : 1
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(1), MASTER_USE_GTID=slave_pos;
include/start_slave.inc
include/sync_with_master_gtid.inc
DO_DOMAIN_IDS (AFTER) :
IGNORE_DOMAIN_IDS (AFTER) : 1
SET @@global.debug_dbug="d,kill_slave_io_before_commit";
@ -166,6 +177,7 @@ START TRANSACTION;
INSERT INTO t1 VALUES(12);
INSERT INTO t1 VALUES(13);
COMMIT;
include/save_master_gtid.inc
START TRANSACTION;
INSERT INTO t1 VALUES(14);
INSERT INTO t1 VALUES(15);
@ -204,11 +216,16 @@ i
10
11
SET @@global.debug_dbug=@saved_dbug;
include/sync_with_master_gtid.inc
include/stop_slave_sql.inc
DO_DOMAIN_IDS (BEFORE) :
IGNORE_DOMAIN_IDS (BEFORE) : 1
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(), MASTER_USE_GTID=slave_pos;
connection master;
include/save_master_gtid.inc
connection slave;
include/start_slave.inc
include/sync_with_master_gtid.inc
DO_DOMAIN_IDS (AFTER) :
IGNORE_DOMAIN_IDS (AFTER) :
SELECT * FROM t1;
@ -230,6 +247,7 @@ DO_DOMAIN_IDS (BEFORE) :
IGNORE_DOMAIN_IDS (BEFORE) :
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(1), MASTER_USE_GTID=slave_pos;
include/start_slave.inc
include/sync_with_master_gtid.inc
DO_DOMAIN_IDS (AFTER) :
IGNORE_DOMAIN_IDS (AFTER) : 1
SET @@global.debug_dbug="d,kill_slave_io_after_2_events";
@ -239,6 +257,7 @@ START TRANSACTION;
INSERT INTO t1 VALUES(18);
INSERT INTO t1 VALUES(19);
COMMIT;
include/save_master_gtid.inc
START TRANSACTION;
INSERT INTO t1 VALUES(20);
INSERT INTO t1 VALUES(21);
@ -287,11 +306,16 @@ i
16
17
SET @@global.debug_dbug=@saved_dbug;
include/sync_with_master_gtid.inc
include/stop_slave_sql.inc
DO_DOMAIN_IDS (BEFORE) :
IGNORE_DOMAIN_IDS (BEFORE) : 1
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(), MASTER_USE_GTID=slave_pos;
connection master;
include/save_master_gtid.inc
connection slave;
include/start_slave.inc
include/sync_with_master_gtid.inc
DO_DOMAIN_IDS (AFTER) :
IGNORE_DOMAIN_IDS (AFTER) :
SELECT * FROM t1;
@ -317,6 +341,7 @@ DO_DOMAIN_IDS (BEFORE) :
IGNORE_DOMAIN_IDS (BEFORE) :
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(), MASTER_USE_GTID=slave_pos;
include/start_slave.inc
include/sync_with_master_gtid.inc
DO_DOMAIN_IDS (AFTER) :
IGNORE_DOMAIN_IDS (AFTER) :
SET @@global.debug_dbug="d,kill_slave_io_after_2_events";
@ -335,6 +360,7 @@ START TRANSACTION;
INSERT INTO t1 VALUES(28);
INSERT INTO t1 VALUES(29);
COMMIT;
include/save_master_gtid.inc
SELECT * FROM t1;
i
1
@ -389,6 +415,7 @@ DO_DOMAIN_IDS (BEFORE) :
IGNORE_DOMAIN_IDS (BEFORE) :
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(1), MASTER_USE_GTID=slave_pos;
include/start_slave.inc
include/sync_with_master_gtid.inc
DO_DOMAIN_IDS (AFTER) :
IGNORE_DOMAIN_IDS (AFTER) : 1
SELECT * FROM t1;

View File

@ -55,7 +55,6 @@ PARTITION pmax VALUES LESS THAN (MAXVALUE));
INSERT INTO t1 VALUES (1), (10), (100), (1000);
ALTER TABLE t1 ANALYZE PARTITION p0;
Table Op Msg_type Msg_text
test.t1 analyze status Engine-independent statistics collected
test.t1 analyze status OK
ALTER TABLE t1 OPTIMIZE PARTITION p0;
Table Op Msg_type Msg_text

View File

@ -15,11 +15,15 @@ call mtr.add_suppression("reply failed");
call mtr.add_suppression("Replication event checksum verification");
call mtr.add_suppression("Relay log write failure");
call mtr.add_suppression("Failed to kill the active semi-sync connection");
set @sav_enabled_server_2= @@GLOBAL.rpl_semi_sync_slave_enabled;
set @sav_server_2_dbug= @@GLOBAL.debug_dbug;
connection server_3;
call mtr.add_suppression("reply failed");
call mtr.add_suppression("Replication event checksum verification");
call mtr.add_suppression("Relay log write failure");
call mtr.add_suppression("Failed to kill the active semi-sync connection");
set @sav_enabled_server_3= @@GLOBAL.rpl_semi_sync_slave_enabled;
set @sav_server_3_dbug= @@GLOBAL.debug_dbug;
connection server_1;
CREATE TABLE t1 (a int);
connection server_2;
@ -40,15 +44,15 @@ connection server_1;
#-- Enable semi-sync on slaves
let slave_last= 3
connection server_2;
set global rpl_semi_sync_slave_enabled = 1;
include/stop_slave.inc
set global rpl_semi_sync_slave_enabled = 1;
include/start_slave.inc
show status like 'Rpl_semi_sync_slave_status';
Variable_name Value
Rpl_semi_sync_slave_status ON
connection server_3;
set global rpl_semi_sync_slave_enabled = 1;
include/stop_slave.inc
set global rpl_semi_sync_slave_enabled = 1;
include/start_slave.inc
show status like 'Rpl_semi_sync_slave_status';
Variable_name Value
@ -67,24 +71,20 @@ show status like 'Rpl_semi_sync_master_clients';
Variable_name Value
Rpl_semi_sync_master_clients 2
#-- Prepare servers to simulate delay or error
connection server_1;
SET @@GLOBAL.debug_dbug= "";
connection server_2;
SET @@GLOBAL.debug_dbug= "+d,simulate_delay_semisync_slave_reply";
connection server_3;
SET @@GLOBAL.debug_dbug= "+d,simulate_delay_semisync_slave_reply";
#--
#-- Test begins
connection server_1_con2;
#-- Give enough time after timeout/ack received to query yes_tx/no_tx
SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait";
connection server_1;
#-- Begin semi-sync transaction
INSERT INTO t1 VALUES (1);
connection server_1_con2;
#-- Wait until master recognizes a connection is awaiting semi-sync ACK
show status like 'Rpl_semi_sync_master_wait_sessions';
Variable_name Value
Rpl_semi_sync_master_wait_sessions 1
#-- Give enough time after timeout/ack received to query yes_tx/no_tx
SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait";
#-- Begin master shutdown
SHUTDOWN WAIT FOR ALL SLAVES;
connection server_1;
@ -111,22 +111,19 @@ count(*)=1
#-- Re-synchronize slaves with master and disable semi-sync
#-- Stop slaves
connection server_2;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_slave_enabled= 0;
include/stop_slave.inc
include/stop_slave_io.inc
include/stop_slave_sql.inc
SET @@GLOBAL.debug_dbug= @sav_server_2_dbug;
SET @@GLOBAL.rpl_semi_sync_slave_enabled= @sav_enabled_server_2;
connection server_3;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_slave_enabled= 0;
include/stop_slave.inc
include/stop_slave_io.inc
include/stop_slave_sql.inc
SET @@GLOBAL.debug_dbug= @sav_server_3_dbug;
SET @@GLOBAL.rpl_semi_sync_slave_enabled= @sav_enabled_server_3;
#-- Bring the master back up
connection server_1_con2;
connection default;
connection server_1;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_master_enabled = 0;
show status like 'Rpl_semi_sync_master_status';
Variable_name Value
Rpl_semi_sync_master_status OFF
TRUNCATE TABLE t1;
#-- Bring slaves back up
connection server_2;
@ -157,15 +154,15 @@ connection server_1;
#-- Enable semi-sync on slaves
let slave_last= 3
connection server_2;
set global rpl_semi_sync_slave_enabled = 1;
include/stop_slave.inc
set global rpl_semi_sync_slave_enabled = 1;
include/start_slave.inc
show status like 'Rpl_semi_sync_slave_status';
Variable_name Value
Rpl_semi_sync_slave_status ON
connection server_3;
set global rpl_semi_sync_slave_enabled = 1;
include/stop_slave.inc
set global rpl_semi_sync_slave_enabled = 1;
include/start_slave.inc
show status like 'Rpl_semi_sync_slave_status';
Variable_name Value
@ -184,24 +181,20 @@ show status like 'Rpl_semi_sync_master_clients';
Variable_name Value
Rpl_semi_sync_master_clients 2
#-- Prepare servers to simulate delay or error
connection server_1;
SET @@GLOBAL.debug_dbug= "+d,mysqld_delay_kill_threads_phase_1";
connection server_2;
SET @@GLOBAL.debug_dbug= "+d,corrupt_queue_event";
SET @@GLOBAL.debug_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141";
connection server_3;
SET @@GLOBAL.debug_dbug= "+d,corrupt_queue_event";
SET @@GLOBAL.debug_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141";
#--
#-- Test begins
connection server_1_con2;
#-- Give enough time after timeout/ack received to query yes_tx/no_tx
SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait";
connection server_1;
#-- Begin semi-sync transaction
INSERT INTO t1 VALUES (1);
connection server_1_con2;
#-- Wait until master recognizes a connection is awaiting semi-sync ACK
show status like 'Rpl_semi_sync_master_wait_sessions';
Variable_name Value
Rpl_semi_sync_master_wait_sessions 1
#-- Give enough time after timeout/ack received to query yes_tx/no_tx
SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait";
#-- Begin master shutdown
SHUTDOWN WAIT FOR ALL SLAVES;
connection server_1;
@ -226,24 +219,33 @@ count(*)=0
1
#
#-- Re-synchronize slaves with master and disable semi-sync
#-- FIXME: workaround for MDEV-28141, preventing errored replicas from
# killing their semi-sync connections
connection server_2;
set debug_sync= "now wait_for at_semisync_kill_connection";
set debug_sync= "now signal continue_semisync_kill_connection";
# Wait for debug_sync signal to have been received before issuing RESET
set debug_sync= "reset";
connection server_3;
set debug_sync= "now wait_for at_semisync_kill_connection";
set debug_sync= "now signal continue_semisync_kill_connection";
# Wait for debug_sync signal to have been received before issuing RESET
set debug_sync= "reset";
#-- Stop slaves
connection server_2;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_slave_enabled= 0;
include/stop_slave.inc
include/stop_slave_io.inc
include/stop_slave_sql.inc
SET @@GLOBAL.debug_dbug= @sav_server_2_dbug;
SET @@GLOBAL.rpl_semi_sync_slave_enabled= @sav_enabled_server_2;
connection server_3;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_slave_enabled= 0;
include/stop_slave.inc
include/stop_slave_io.inc
include/stop_slave_sql.inc
SET @@GLOBAL.debug_dbug= @sav_server_3_dbug;
SET @@GLOBAL.rpl_semi_sync_slave_enabled= @sav_enabled_server_3;
#-- Bring the master back up
connection server_1_con2;
connection default;
connection server_1;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_master_enabled = 0;
show status like 'Rpl_semi_sync_master_status';
Variable_name Value
Rpl_semi_sync_master_status OFF
TRUNCATE TABLE t1;
#-- Bring slaves back up
connection server_2;
@ -275,15 +277,15 @@ connection server_1;
#-- Enable semi-sync on slaves
let slave_last= 3
connection server_2;
set global rpl_semi_sync_slave_enabled = 1;
include/stop_slave.inc
set global rpl_semi_sync_slave_enabled = 1;
include/start_slave.inc
show status like 'Rpl_semi_sync_slave_status';
Variable_name Value
Rpl_semi_sync_slave_status ON
connection server_3;
set global rpl_semi_sync_slave_enabled = 1;
include/stop_slave.inc
set global rpl_semi_sync_slave_enabled = 1;
include/start_slave.inc
show status like 'Rpl_semi_sync_slave_status';
Variable_name Value
@ -302,24 +304,20 @@ show status like 'Rpl_semi_sync_master_clients';
Variable_name Value
Rpl_semi_sync_master_clients 2
#-- Prepare servers to simulate delay or error
connection server_1;
SET @@GLOBAL.debug_dbug= "+d,mysqld_delay_kill_threads_phase_1";
connection server_2;
SET @@GLOBAL.debug_dbug= "+d,corrupt_queue_event";
SET @@GLOBAL.debug_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141";
connection server_3;
SET @@GLOBAL.debug_dbug= "+d,simulate_delay_semisync_slave_reply";
#--
#-- Test begins
connection server_1_con2;
#-- Give enough time after timeout/ack received to query yes_tx/no_tx
SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait";
connection server_1;
#-- Begin semi-sync transaction
INSERT INTO t1 VALUES (1);
connection server_1_con2;
#-- Wait until master recognizes a connection is awaiting semi-sync ACK
show status like 'Rpl_semi_sync_master_wait_sessions';
Variable_name Value
Rpl_semi_sync_master_wait_sessions 1
#-- Give enough time after timeout/ack received to query yes_tx/no_tx
SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait";
#-- Begin master shutdown
SHUTDOWN WAIT FOR ALL SLAVES;
connection server_1;
@ -344,24 +342,28 @@ count(*)=1
1
#
#-- Re-synchronize slaves with master and disable semi-sync
#-- FIXME: workaround for MDEV-28141, preventing errored replicas from
# killing their semi-sync connections
connection server_2;
set debug_sync= "now wait_for at_semisync_kill_connection";
set debug_sync= "now signal continue_semisync_kill_connection";
# Wait for debug_sync signal to have been received before issuing RESET
set debug_sync= "reset";
#-- Stop slaves
connection server_2;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_slave_enabled= 0;
include/stop_slave.inc
include/stop_slave_io.inc
include/stop_slave_sql.inc
SET @@GLOBAL.debug_dbug= @sav_server_2_dbug;
SET @@GLOBAL.rpl_semi_sync_slave_enabled= @sav_enabled_server_2;
connection server_3;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_slave_enabled= 0;
include/stop_slave.inc
include/stop_slave_io.inc
include/stop_slave_sql.inc
SET @@GLOBAL.debug_dbug= @sav_server_3_dbug;
SET @@GLOBAL.rpl_semi_sync_slave_enabled= @sav_enabled_server_3;
#-- Bring the master back up
connection server_1_con2;
connection default;
connection server_1;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_master_enabled = 0;
show status like 'Rpl_semi_sync_master_status';
Variable_name Value
Rpl_semi_sync_master_status OFF
TRUNCATE TABLE t1;
#-- Bring slaves back up
connection server_2;
@ -399,15 +401,15 @@ connection server_1;
#-- Enable semi-sync on slaves
let slave_last= 3
connection server_2;
set global rpl_semi_sync_slave_enabled = 1;
include/stop_slave.inc
set global rpl_semi_sync_slave_enabled = 1;
include/start_slave.inc
show status like 'Rpl_semi_sync_slave_status';
Variable_name Value
Rpl_semi_sync_slave_status ON
connection server_3;
set global rpl_semi_sync_slave_enabled = 1;
include/stop_slave.inc
set global rpl_semi_sync_slave_enabled = 1;
include/start_slave.inc
show status like 'Rpl_semi_sync_slave_status';
Variable_name Value
@ -426,24 +428,20 @@ show status like 'Rpl_semi_sync_master_clients';
Variable_name Value
Rpl_semi_sync_master_clients 2
#-- Prepare servers to simulate delay or error
connection server_1;
SET @@GLOBAL.debug_dbug= "+d,mysqld_delay_kill_threads_phase_1";
connection server_2;
SET @@GLOBAL.debug_dbug= "+d,corrupt_queue_event,slave_delay_killing_semisync_connection";
SET @@GLOBAL.debug_dbug= "+d,corrupt_queue_event,delay_semisync_kill_connection_for_mdev_28141";
connection server_3;
SET @@GLOBAL.debug_dbug= "+d,simulate_delay_semisync_slave_reply";
#--
#-- Test begins
connection server_1_con2;
#-- Give enough time after timeout/ack received to query yes_tx/no_tx
SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait";
connection server_1;
#-- Begin semi-sync transaction
INSERT INTO t1 VALUES (1);
connection server_1_con2;
#-- Wait until master recognizes a connection is awaiting semi-sync ACK
show status like 'Rpl_semi_sync_master_wait_sessions';
Variable_name Value
Rpl_semi_sync_master_wait_sessions 1
#-- Give enough time after timeout/ack received to query yes_tx/no_tx
SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait";
#-- Begin master shutdown
SHUTDOWN WAIT FOR ALL SLAVES;
connection server_1;
@ -468,24 +466,28 @@ count(*)=1
1
#
#-- Re-synchronize slaves with master and disable semi-sync
#-- FIXME: workaround for MDEV-28141, preventing errored replicas from
# killing their semi-sync connections
connection server_2;
set debug_sync= "now wait_for at_semisync_kill_connection";
set debug_sync= "now signal continue_semisync_kill_connection";
# Wait for debug_sync signal to have been received before issuing RESET
set debug_sync= "reset";
#-- Stop slaves
connection server_2;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_slave_enabled= 0;
include/stop_slave.inc
include/stop_slave_io.inc
include/stop_slave_sql.inc
SET @@GLOBAL.debug_dbug= @sav_server_2_dbug;
SET @@GLOBAL.rpl_semi_sync_slave_enabled= @sav_enabled_server_2;
connection server_3;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_slave_enabled= 0;
include/stop_slave.inc
include/stop_slave_io.inc
include/stop_slave_sql.inc
SET @@GLOBAL.debug_dbug= @sav_server_3_dbug;
SET @@GLOBAL.rpl_semi_sync_slave_enabled= @sav_enabled_server_3;
#-- Bring the master back up
connection server_1_con2;
connection default;
connection server_1;
SET @@GLOBAL.debug_dbug= "";
SET @@GLOBAL.rpl_semi_sync_master_enabled = 0;
show status like 'Rpl_semi_sync_master_status';
Variable_name Value
Rpl_semi_sync_master_status OFF
TRUNCATE TABLE t1;
#-- Bring slaves back up
connection server_2;
@ -509,9 +511,13 @@ COUNT(*)=0
#############################
connection server_2;
include/stop_slave.inc
SET @@GLOBAL.rpl_semi_sync_slave_enabled = @sav_enabled_server_2;
SET @@GLOBAL.debug_dbug= @sav_server_2_dbug;
include/start_slave.inc
connection server_3;
include/stop_slave.inc
SET @@GLOBAL.rpl_semi_sync_slave_enabled = @sav_enabled_server_3;
SET @@GLOBAL.debug_dbug= @sav_server_3_dbug;
include/start_slave.inc
connection server_1;
drop table t1;

View File

@ -9,6 +9,7 @@ CREATE TABLE t1(i INT) ENGINE=INNODB;
INSERT INTO t1 VALUES(1);
SELECT * FROM t1;
sync_slave_with_master;
--source include/save_master_gtid.inc
connection slave;
@ -28,7 +29,7 @@ let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Igno
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(), MASTER_USE_GTID=slave_pos;
--source include/start_slave.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
let $do_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -44,7 +45,7 @@ START TRANSACTION;
INSERT INTO t1 VALUES(2);
INSERT INTO t1 VALUES(3);
COMMIT;
save_master_pos;
--source include/save_master_gtid.inc
SELECT * FROM t1;
connection slave;
@ -55,7 +56,7 @@ SET @@global.debug_dbug=@saved_dbug;
START SLAVE io_thread;
--source include/wait_for_slave_io_to_start.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
SELECT * FROM t1;
--echo # Case 1 : Start slave with IGNORE_DOMAIN_IDS=(1), then restart
@ -70,7 +71,7 @@ let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Igno
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(1), MASTER_USE_GTID=slave_pos;
--source include/start_slave.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
let $do_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -86,7 +87,7 @@ INSERT INTO t1 VALUES(4);
INSERT INTO t1 VALUES(5);
COMMIT;
save_master_pos;
--source include/save_master_gtid.inc
SELECT * FROM t1;
connection slave;
@ -97,7 +98,7 @@ SET @@global.debug_dbug=@saved_dbug;
START SLAVE io_thread;
--source include/wait_for_slave_io_to_start.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
SELECT * FROM t1;
--echo # Case 2 : Start slave with IGNORE_DOMAIN_IDS=(), then restart
@ -112,7 +113,7 @@ let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Igno
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(), MASTER_USE_GTID=slave_pos;
--source include/start_slave.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
let $do_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -140,7 +141,7 @@ INSERT INTO t1 VALUES(10);
INSERT INTO t1 VALUES(11);
COMMIT;
save_master_pos;
--source include/save_master_gtid.inc
SELECT * FROM t1;
connection slave;
@ -157,7 +158,7 @@ let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Igno
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(1), MASTER_USE_GTID=slave_pos;
--source include/start_slave.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
let $do_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -178,7 +179,7 @@ let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Igno
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(1), MASTER_USE_GTID=slave_pos;
--source include/start_slave.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
let $do_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -196,6 +197,15 @@ INSERT INTO t1 VALUES(12);
INSERT INTO t1 VALUES(13);
COMMIT; # IO thread gets killed here.
# MDEV-14357
# As the prior transaction will be ignored on slave because its domain id is
# ignored, the replica's gtid_slave_pos will be updated to have seen it,
# despite its eventual failure to queue the whole transaction to the relay log.
# So for test consistency, we need to synchronize the SQL thread with this
# position; otherwise, when restarting the server after resetting
# IGNORE_DOMAIN_IDS, we will re-fetch this event and execute it.
--source include/save_master_gtid.inc
START TRANSACTION;
INSERT INTO t1 VALUES(14);
INSERT INTO t1 VALUES(15);
@ -207,7 +217,6 @@ INSERT INTO t1 VALUES(16);
INSERT INTO t1 VALUES(17);
COMMIT;
save_master_pos;
SELECT * FROM t1;
connection slave;
@ -217,6 +226,11 @@ SELECT * FROM t1;
SET @@global.debug_dbug=@saved_dbug;
# MDEV-14357
# Ensure the SQL thread is updated with the GTID of the ignored transaction
# so we don't fetch it and execute it after restarting without any ignored
# domain ids.
--source include/sync_with_master_gtid.inc
--source include/stop_slave_sql.inc
let $do_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -224,8 +238,12 @@ let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Igno
--echo IGNORE_DOMAIN_IDS (BEFORE) : $ignore_domain_ids_before
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(), MASTER_USE_GTID=slave_pos;
--connection master
--source include/save_master_gtid.inc
--connection slave
--source include/start_slave.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
let $do_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -246,7 +264,7 @@ let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Igno
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(1), MASTER_USE_GTID=slave_pos;
--source include/start_slave.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
let $do_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -264,6 +282,11 @@ INSERT INTO t1 VALUES(18);
INSERT INTO t1 VALUES(19); # IO thread gets killed here.
COMMIT;
# MDEV-14357
# Synchronize gtid_slave_pos with the ignored event. See prior comments about
# MDEV-14357 for details.
--source include/save_master_gtid.inc
START TRANSACTION;
INSERT INTO t1 VALUES(20);
INSERT INTO t1 VALUES(21);
@ -275,7 +298,6 @@ INSERT INTO t1 VALUES(22);
INSERT INTO t1 VALUES(23);
COMMIT;
save_master_pos;
SELECT * FROM t1;
connection slave;
@ -285,6 +307,10 @@ SELECT * FROM t1;
SET @@global.debug_dbug=@saved_dbug;
# MDEV-14357
# Synchronize gtid_slave_pos with the ignored event. See prior comments about
# MDEV-14357 for details.
--source include/sync_with_master_gtid.inc
--source include/stop_slave_sql.inc
let $do_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -292,8 +318,12 @@ let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Igno
--echo IGNORE_DOMAIN_IDS (BEFORE) : $ignore_domain_ids_before
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(), MASTER_USE_GTID=slave_pos;
--connection master
--source include/save_master_gtid.inc
--connection slave
--source include/start_slave.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
let $do_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -314,7 +344,7 @@ let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Igno
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(), MASTER_USE_GTID=slave_pos;
--source include/start_slave.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
let $do_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);
@ -343,7 +373,7 @@ INSERT INTO t1 VALUES(28);
INSERT INTO t1 VALUES(29);
COMMIT;
save_master_pos;
--source include/save_master_gtid.inc
SELECT * FROM t1;
connection slave;
@ -361,7 +391,7 @@ let $ignore_domain_ids_before= query_get_value(SHOW SLAVE STATUS, Replicate_Igno
CHANGE MASTER TO IGNORE_DOMAIN_IDS=(1), MASTER_USE_GTID=slave_pos;
--source include/start_slave.inc
sync_with_master;
--source include/sync_with_master_gtid.inc
let $do_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Do_Domain_Ids, 1);
let $ignore_domain_ids_after= query_get_value(SHOW SLAVE STATUS, Replicate_Ignore_Domain_Ids, 1);

View File

@ -3,7 +3,6 @@
# replicas before shutting down.
#
# Parameters:
# server_1_dbug (string) Debug setting for primary (server 1)
# server_2_dbug (string) Debug setting to simulate delay or error on
# the first replica (server 2)
# server_3_dbug (string) Debug setting to simulate delay or error on
@ -32,8 +31,8 @@ while (`SELECT $i <= $slave_last`)
--connection server_$i
--sync_with_master
set global rpl_semi_sync_slave_enabled = 1;
source include/stop_slave.inc;
set global rpl_semi_sync_slave_enabled = 1;
source include/start_slave.inc;
show status like 'Rpl_semi_sync_slave_status';
@ -56,8 +55,6 @@ show status like 'Rpl_semi_sync_master_status';
show status like 'Rpl_semi_sync_master_clients';
--echo #-- Prepare servers to simulate delay or error
--connection server_1
--eval SET @@GLOBAL.debug_dbug= $server_1_dbug
--connection server_2
--eval SET @@GLOBAL.debug_dbug= $server_2_dbug
--connection server_3
@ -66,6 +63,14 @@ show status like 'Rpl_semi_sync_master_clients';
--echo #--
--echo #-- Test begins
--connection server_1_con2
--echo #-- Give enough time after timeout/ack received to query yes_tx/no_tx
SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait";
--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
wait
EOF
--connection server_1
--echo #-- Begin semi-sync transaction
--send INSERT INTO t1 VALUES (1)
@ -75,14 +80,6 @@ show status like 'Rpl_semi_sync_master_clients';
let $status_var= Rpl_semi_sync_master_wait_sessions;
let $status_var_value= 1;
source include/wait_for_status_var.inc;
show status like 'Rpl_semi_sync_master_wait_sessions';
--write_file $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
wait
EOF
--echo #-- Give enough time after timeout/ack received to query yes_tx/no_tx
SET @@GLOBAL.debug_dbug= "+d,delay_shutdown_phase_2_after_semisync_wait";
--echo #-- Begin master shutdown
--send SHUTDOWN WAIT FOR ALL SLAVES
@ -111,19 +108,72 @@ show status like 'Rpl_semi_sync_master_no_tx';
--echo #
--echo #-- Re-synchronize slaves with master and disable semi-sync
if (`SELECT ($server_2_expect_row_count + $server_3_expect_row_count) < 2`)
{
--echo #-- FIXME: workaround for MDEV-28141, preventing errored replicas from
--echo # killing their semi-sync connections
# I.e. we can't create a new kill connection to the primary if we know that the
# primary is shutting down for risk of Packets out of order error. So we wait
# to hit a debug_sync point before the creation of the new kill_connection, and
# don't progress until the primary has been shutdown, so no new connection can
# be formed.
# Note this is only needed in the error case (using corrupt_queue_event), as
# the running io_thread will otherwise automatically detect that the primary
# has shutdown before progressing to the cleanup of the io thread.
}
if (!$server_2_expect_row_count)
{
--connection server_2
set debug_sync= "now wait_for at_semisync_kill_connection";
set debug_sync= "now signal continue_semisync_kill_connection";
--echo # Wait for debug_sync signal to have been received before issuing RESET
let $wait_condition= select count(*)=0 from information_schema.processlist where state like "debug sync point%";
source include/wait_condition.inc;
set debug_sync= "reset";
}
if (!$server_3_expect_row_count)
{
--connection server_3
set debug_sync= "now wait_for at_semisync_kill_connection";
set debug_sync= "now signal continue_semisync_kill_connection";
--echo # Wait for debug_sync signal to have been received before issuing RESET
let $wait_condition= select count(*)=0 from information_schema.processlist where state like "debug sync point%";
source include/wait_condition.inc;
set debug_sync= "reset";
}
--echo #-- Stop slaves
--connection server_2
--eval SET @@GLOBAL.debug_dbug= "$sav_server_2_dbug"
--eval SET @@GLOBAL.rpl_semi_sync_slave_enabled= 0
--let $rpl_only_running_threads= 1
source include/stop_slave.inc;
# If server_2_expect_row_count is 0, we are simulating an error on the replica
# and the IO thread will end with errno 1595.
# Otherwise, we still expect error, because the master has shutdown at this
# point, and the IO thread may or may not have realized the shutdown, and
# started to try to automatically reconnect. This may result in the IO thread
# giving a 2003 error if the slave tries to reconnect to a shutdown master.
# Additionally disable warnings because the slave may have stopped in err
# automatically, and we don't want a sporadic "Slave is already stopped"
# warning.
--disable_warnings
--let $rpl_allow_error= 1
--source include/stop_slave_io.inc
--enable_warnings
--let $rpl_allow_error=
--source include/stop_slave_sql.inc
SET @@GLOBAL.debug_dbug= @sav_server_2_dbug;
SET @@GLOBAL.rpl_semi_sync_slave_enabled= @sav_enabled_server_2;
--connection server_3
--eval SET @@GLOBAL.debug_dbug= "$sav_server_3_dbug"
--eval SET @@GLOBAL.rpl_semi_sync_slave_enabled= 0
--let $rpl_only_running_threads= 1
source include/stop_slave.inc;
# Expect error for IO thread, see above comment for stopping server_2
--disable_warnings
--let $rpl_allow_error= 1
--source include/stop_slave_io.inc
--enable_warnings
--let $rpl_allow_error=
--source include/stop_slave_sql.inc
SET @@GLOBAL.debug_dbug= @sav_server_3_dbug;
SET @@GLOBAL.rpl_semi_sync_slave_enabled= @sav_enabled_server_3;
--echo #-- Bring the master back up
--connection server_1_con2
@ -142,13 +192,6 @@ EOF
--enable_reconnect
--source include/wait_until_connected_again.inc
--eval SET @@GLOBAL.debug_dbug= "$sav_master_dbug"
let $status_var= Rpl_semi_sync_master_clients;
let $status_var_value= 0;
source include/wait_for_status_var.inc;
--eval SET @@GLOBAL.rpl_semi_sync_master_enabled = 0
show status like 'Rpl_semi_sync_master_status';
TRUNCATE TABLE t1;
--save_master_pos

Some files were not shown because too many files have changed in this diff Show More