1
0
mirror of https://github.com/MariaDB/server.git synced 2025-11-08 00:28:29 +03:00

Merge branch '11.4' into bb-11.8-release

This commit is contained in:
Oleksandr Byelkin
2025-10-24 12:25:01 +02:00
290 changed files with 6655 additions and 15331 deletions

View File

@@ -54,4 +54,4 @@ jobs:
$cidir = "$tmp/ci"
mkdir $cidir
fsutil file setCaseSensitiveInfo $cidir enable
perl bld\mysql-test\mysql-test-run.pl --force --parallel=$parallel --suite=main,innodb --vardir=$cidir/var --mysqld=--lower-case-table-names=0 --mysqld=--loose-innodb-flush-log-at-trx-commit=2
perl bld\mysql-test\mysql-test-run.pl --force --parallel=$parallel --suite=main,innodb --vardir=$cidir/var --mysqld=--lower-case-table-names=0 --mysqld=--loose-innodb-flush-log-at-trx-commit=2 --mysqld=--debug-no-sync

View File

@@ -296,10 +296,14 @@ IF(CMAKE_C_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_VERSION VERSION
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_GLIBCXX_DEBUG -D_GLIBCXX_ASSERTIONS")
ENDIF()
OPTION(ENABLE_GCOV "Enable gcov (debug, Linux builds only)" OFF)
OPTION(ENABLE_GCOV "Enable gcov (debug, macOS and Linux builds only)" OFF)
IF (ENABLE_GCOV)
IF (CMAKE_CXX_COMPILER_ID STREQUAL "AppleClang")
MY_CHECK_AND_SET_COMPILER_FLAG("--coverage" DEBUG)
ELSE()
MY_CHECK_AND_SET_COMPILER_FLAG("-DHAVE_gcov -fprofile-arcs -ftest-coverage -lgcov" DEBUG)
ENDIF()
ENDIF()
OPTION(WITHOUT_PACKED_SORT_KEYS "disable packed sort keys" OFF)
IF(WITHOUT_PACKED_SORT_KEYS)

View File

@@ -286,6 +286,7 @@ FUNCTION(INSTALL_RUNTIME_DEPS)
POST_EXCLUDE_REGEXES
".*system32/.*\\.dll" # Windows stuff
POST_INCLUDE_REGEXES
"libssl" "libcrypto" # Account for OpenSSL libraries in system32
DIRECTORIES
$<$<BOOL:${VCPKG_INSTALLED_DIR}>:${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/bin
$<$<AND:$<CONFIG:Debug>,$<BOOL:${VCPKG_INSTALLED_DIR}>>:${VCPKG_INSTALLED_DIR}/${VCPKG_TARGET_TRIPLET}/debug/bin>

View File

@@ -15,8 +15,8 @@ MACRO(BUNDLE_LIBFMT)
ExternalProject_Add(
libfmt
PREFIX "${dir}"
URL "https://github.com/fmtlib/fmt/releases/download/11.1.4/fmt-11.1.4.zip"
URL_MD5 ad6a56b15cddf4aad2a234e7cfc9e8c9
URL "https://github.com/fmtlib/fmt/releases/download/12.0.0/fmt-12.0.0.zip"
URL_MD5 9bd04e6e8c5b1733e4eefb473604219d
INSTALL_COMMAND ""
CONFIGURE_COMMAND ""
BUILD_COMMAND ""

6
debian/mariadb-server.links vendored Normal file
View File

@@ -0,0 +1,6 @@
#!/usr/bin/dh-exec
usr/lib/${DEB_HOST_MULTIARCH}/libmariadb3/plugin/caching_sha2_password.so usr/lib/mysql/plugin/caching_sha2_password.so
usr/lib/${DEB_HOST_MULTIARCH}/libmariadb3/plugin/client_ed25519.so usr/lib/mysql/plugin/client_ed25519.so
usr/lib/${DEB_HOST_MULTIARCH}/libmariadb3/plugin/dialog.so usr/lib/mysql/plugin/dialog.so
usr/lib/${DEB_HOST_MULTIARCH}/libmariadb3/plugin/mysql_clear_password.so usr/lib/mysql/plugin/mysql_clear_password.so
usr/lib/${DEB_HOST_MULTIARCH}/libmariadb3/plugin/sha256_password.so usr/lib/mysql/plugin/sha256_password.so

View File

@@ -51,7 +51,6 @@ The parts not included are excluded by #ifndef UNIV_INNOCHECKSUM. */
#include "page0zip.h" /* page_zip_*() */
#include "trx0undo.h" /* TRX_* */
#include "fil0crypt.h" /* fil_space_verify_crypt_checksum */
#include <string.h>
#ifndef PRIuMAX
@@ -74,6 +73,8 @@ static ulint extent_size;
static ulint xdes_size;
ulong srv_page_size;
uint32_t srv_page_size_shift;
static uint32_t dblwr_1;
static uint32_t dblwr_2;
/* Current page number (0 based). */
uint32_t cur_page_num;
/* Current space. */
@@ -97,8 +98,10 @@ FILE* log_file = NULL;
/* Enabled for log write option. */
static bool is_log_enabled = false;
static bool skip_freed_pages;
static uint32_t tablespace_flags= 0;
static byte field_ref_zero_buf[UNIV_PAGE_SIZE_MAX];
const byte *field_ref_zero = field_ref_zero_buf;
constexpr uint32_t USE_FSP_FLAGS{UINT32_MAX};
#ifndef _WIN32
/* advisory lock for non-window system. */
@@ -253,12 +256,9 @@ void print_leaf_stats(
}
/** Init the page size for the tablespace.
@param[in] buf buffer used to read the page */
static void init_page_size(const byte* buf)
@param[in] flags InnoDB tablespace flags */
static void init_page_size_from_flags(const uint32_t flags)
{
const unsigned flags = mach_read_from_4(buf + FIL_PAGE_DATA
+ FSP_SPACE_FLAGS);
if (fil_space_t::full_crc32(flags)) {
const uint32_t ssize = FSP_FLAGS_FCRC32_GET_PAGE_SSIZE(flags);
srv_page_size_shift = UNIV_ZIP_SIZE_SHIFT_MIN - 1 + ssize;
@@ -540,24 +540,15 @@ static bool is_page_corrupted(byte *buf, bool is_encrypted, uint32_t flags)
return(is_corrupted);
}
/********************************************//*
Check if page is doublewrite buffer or not.
@param [in] page buffer page
@retval true if page is doublewrite buffer otherwise false.
*/
static
bool
is_page_doublewritebuffer(
const byte* page)
/** Check if page is doublewrite buffer or not.
@retval true if page is doublewrite buffer otherwise false. */
static bool is_page_doublewritebuffer()
{
if ((cur_page_num >= extent_size)
&& (cur_page_num < extent_size * 3)) {
/* page is doublewrite buffer. */
return (true);
}
return (false);
if (cur_space != 0) return false;
const uint32_t extent{static_cast<uint32_t>(
cur_page_num & ~(extent_size - 1))};
return cur_page_num > FSP_DICT_HDR_PAGE_NO &&
extent && (extent == dblwr_1 || extent == dblwr_2);
}
/*******************************************************//*
@@ -764,7 +755,7 @@ Parse the page and collect/dump the information about page type
@param [in] file file for diagnosis.
@param [in] is_encrypted tablespace is encrypted
*/
void
static void
parse_page(
const byte* page,
byte* xdes,
@@ -784,6 +775,12 @@ parse_page(
str = skip_page ? "Double_write_buffer" : "-";
page_no = mach_read_from_4(page + FIL_PAGE_OFFSET);
if (skip_freed_pages) {
/** Skip doublewrite pages when -r is enabled */
if (is_page_doublewritebuffer()) {
return;
}
const byte *des= xdes + XDES_ARR_OFFSET +
xdes_size * ((page_no & (physical_page_size - 1))
/ extent_size);
@@ -978,6 +975,18 @@ parse_page(
fprintf(file, "#::" UINT32PF "\t\t|\t\tTransaction system "
"page\t\t|\t%s\n", cur_page_num, str);
}
if (cur_space == 0 &&
(mach_read_from_4(page + TRX_SYS_DOUBLEWRITE +
TRX_SYS_DOUBLEWRITE_MAGIC) ==
TRX_SYS_DOUBLEWRITE_MAGIC_N)) {
dblwr_1 = mach_read_from_4(
page + TRX_SYS_DOUBLEWRITE +
TRX_SYS_DOUBLEWRITE_BLOCK1);
dblwr_2 = mach_read_from_4(
page + TRX_SYS_DOUBLEWRITE +
TRX_SYS_DOUBLEWRITE_BLOCK2);
}
break;
case FIL_PAGE_TYPE_FSP_HDR:
@@ -1220,6 +1229,9 @@ static struct my_option innochecksum_options[] = {
{"skip-freed-pages", 'r', "skip freed pages for the tablespace",
&skip_freed_pages, &skip_freed_pages, 0, GET_BOOL, NO_ARG,
0, 0, 0, 0, 0, 0},
{"tablespace-flags", 0, "InnoDB tablespace flags (default: 4294967295 "
"= read from page 0)", &tablespace_flags, &tablespace_flags, 0,
GET_UINT, REQUIRED_ARG, USE_FSP_FLAGS, 0, USE_FSP_FLAGS, 0, 0, 0},
{0, 0, 0, 0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0}
};
@@ -1280,6 +1292,14 @@ innochecksum_get_one_option(
my_end(0);
exit(EXIT_SUCCESS);
break;
default:
if (tablespace_flags != USE_FSP_FLAGS &&
!fil_space_t::is_valid_flags(tablespace_flags, false) &&
!fil_space_t::is_valid_flags(tablespace_flags, true)) {
fprintf(stderr, "Error: Provided --tablespace-flags "
"is not valid.");
return true;
}
}
return(false);
@@ -1410,6 +1430,87 @@ rewrite_checksum(
&& !write_file(filename, fil_in, buf, flags, pos);
}
/** Read and validate page 0, then initialize tablespace flags
and page size.
@param fil_in File pointer
@param buf Buffer to read page into
@return whether the page was read successfully */
static bool read_and_validate_page0(FILE *fil_in, byte *buf)
{
/* Read the minimum page size first */
size_t initial_page_size= UNIV_ZIP_SIZE_MIN;
if (tablespace_flags != USE_FSP_FLAGS)
{
init_page_size_from_flags(tablespace_flags);
initial_page_size= physical_page_size;
}
/* Read just enough to get the tablespace flags */
size_t bytes= fread(buf, 1, initial_page_size, fil_in);
if (bytes != initial_page_size)
{
fprintf(stderr, "Error: Was not able to read the "
"minimum page size of %zu bytes. Bytes read "
"was %zu\n", initial_page_size, bytes);
return false;
}
/* Read space_id and page offset */
cur_space= mach_read_from_4(buf + FIL_PAGE_SPACE_ID);
cur_page_num= mach_read_from_4(buf + FIL_PAGE_OFFSET);
/* Get tablespace flags from the FSP header */
uint32_t flags= mach_read_from_4(buf + FSP_HEADER_OFFSET +
FSP_SPACE_FLAGS);
if (tablespace_flags != USE_FSP_FLAGS)
{
if (cur_page_num == 0 && flags != tablespace_flags)
fprintf(stderr, "Error: Mismatch between provided tablespace "
"flags (0x%x) and file flags (0x%x)\n",
tablespace_flags, flags);
}
else
{
if (cur_page_num)
{
fprintf(stderr, "Error: First page of the tablespace file "
"should be 0, but encountered page number %" PRIu32 ". "
"If you are checking multi file system "
"tablespace files, please specify the correct "
"tablespace flags using --tablespace-flags option.\n",
cur_page_num);
return false;
}
/* Initialize page size parameters based on flags */
init_page_size_from_flags(flags);
/* Read the rest of the page if it's larger than the minimum size */
if (physical_page_size > UNIV_ZIP_SIZE_MIN)
{
/* Read rest of the page 0 to determine crypt_data */
ulint bytes= read_file(buf, true, physical_page_size, fil_in);
if (bytes != physical_page_size)
{
fprintf(stderr, "Error: Was not able to read the rest of the "
"page of " ULINTPF " bytes. Bytes read was " ULINTPF "\n",
physical_page_size - UNIV_ZIP_SIZE_MIN, bytes);
return false;
}
}
tablespace_flags= flags;
}
if (physical_page_size < UNIV_ZIP_SIZE_MIN ||
physical_page_size > UNIV_PAGE_SIZE_MAX)
{
fprintf(stderr, "Error: Invalid page size " ULINTPF
" encountered\n", physical_page_size);
return false;
}
return true;
}
int main(
int argc,
char **argv)
@@ -1545,51 +1646,13 @@ int main(
}
}
/* Read the minimum page size. */
bytes = fread(buf, 1, UNIV_ZIP_SIZE_MIN, fil_in);
partial_page_read = true;
if (bytes != UNIV_ZIP_SIZE_MIN) {
fprintf(stderr, "Error: Was not able to read the "
"minimum page size ");
fprintf(stderr, "of %d bytes. Bytes read was " ULINTPF "\n",
UNIV_ZIP_SIZE_MIN, bytes);
/* Read and validate page 0 */
if (!read_and_validate_page0(fil_in, buf)) {
exit_status = 1;
goto my_exit;
}
/* enable variable is_system_tablespace when space_id of given
file is zero. Use to skip the checksum verification and rewrite
for doublewrite pages. */
cur_space = mach_read_from_4(buf + FIL_PAGE_SPACE_ID);
cur_page_num = mach_read_from_4(buf + FIL_PAGE_OFFSET);
/* Determine page size, zip_size and page compression
from fsp_flags and encryption metadata from page 0 */
init_page_size(buf);
uint32_t flags = mach_read_from_4(FSP_HEADER_OFFSET + FSP_SPACE_FLAGS + buf);
if (physical_page_size == UNIV_ZIP_SIZE_MIN) {
partial_page_read = false;
} else {
/* Read rest of the page 0 to determine crypt_data */
bytes = read_file(buf, partial_page_read, physical_page_size, fil_in);
if (bytes != physical_page_size) {
fprintf(stderr, "Error: Was not able to read the "
"rest of the page ");
fprintf(stderr, "of " ULINTPF " bytes. Bytes read was " ULINTPF "\n",
physical_page_size - UNIV_ZIP_SIZE_MIN, bytes);
exit_status = 1;
goto my_exit;
}
partial_page_read = false;
}
/* Now that we have full page 0 in buffer, check encryption */
/* Check if tablespace is encrypted */
bool is_encrypted = check_encryption(filename, buf);
/* Verify page 0 contents. Note that we can't allow
@@ -1600,7 +1663,8 @@ int main(
allow_mismatches = 0;
exit_status = verify_checksum(buf, is_encrypted,
&mismatch_count, flags);
&mismatch_count,
tablespace_flags);
if (exit_status) {
fprintf(stderr, "Error: Page 0 checksum mismatch, can't continue. \n");
@@ -1611,7 +1675,8 @@ int main(
if ((exit_status = rewrite_checksum(
filename, fil_in, buf,
&pos, is_encrypted, flags))) {
&pos, is_encrypted,
tablespace_flags))) {
goto my_exit;
}
@@ -1807,7 +1872,7 @@ unexpected_eof:
first_non_zero:
if (is_system_tablespace) {
/* enable when page is double write buffer.*/
skip_page = is_page_doublewritebuffer(buf);
skip_page = is_page_doublewritebuffer();
} else {
skip_page = false;
}
@@ -1828,13 +1893,16 @@ first_non_zero:
&& !is_page_free(xdes, physical_page_size, cur_page_num)
&& (exit_status = verify_checksum(
buf, is_encrypted,
&mismatch_count, flags))) {
&mismatch_count,
tablespace_flags))) {
goto my_exit;
}
if ((exit_status = rewrite_checksum(
if (!is_page_doublewritebuffer() &&
(exit_status = rewrite_checksum(
filename, fil_in, buf,
&pos, is_encrypted, flags))) {
&pos, is_encrypted,
tablespace_flags))) {
goto my_exit;
}

View File

@@ -353,8 +353,13 @@ int main(int argc,char *argv[])
{
found= 1;
if (verbose)
{
int hundred= code / 100;
printf("MariaDB error code %3d (%s): %s\n"
"Learn more: https://mariadb.com/kb/en/e%3d/\n", code, name, msg, code);
"Learn more: https://mariadb.com/docs/server/reference/"
"error-codes/mariadb-error-codes-%d00-to-%d99/e%3d\n",
code, name, msg, hundred, hundred, code);
}
else
puts(msg);
}

View File

@@ -33,9 +33,16 @@ select @pages_accessed > 1000 and @pages_accessed < 1500;
@pages_accessed > 1000 and @pages_accessed < 1500
1
set @total_read = (@pages_read_count + @pages_prefetch_read_count);
select @pages_accessed*0.75 < @total_read, @total_read < @pages_accessed*1.25;
@pages_accessed*0.75 < @total_read @total_read < @pages_accessed*1.25
set @low_ok= @pages_accessed*0.75 < @total_read;
set @high_ok= @total_read < @pages_accessed*1.50;
select @low_ok, @high_ok;
@low_ok @high_ok
1 1
select
if(@low_ok and @high_ok,0,@pages_accessed) unexpected_accessed,
if(@low_ok and @high_ok,0,@total_read) unexpected_read;
unexpected_accessed unexpected_read
0 0
set @innodb_pages_read1=
(select variable_value
from information_schema.session_status

View File

@@ -48,8 +48,13 @@ set @pages_prefetch_read_count= cast(json_value(@js,'$.pages_prefetch_read_count
select @pages_accessed > 1000 and @pages_accessed < 1500;
set @total_read = (@pages_read_count + @pages_prefetch_read_count);
set @low_ok= @pages_accessed*0.75 < @total_read;
set @high_ok= @total_read < @pages_accessed*1.50;
select @pages_accessed*0.75 < @total_read, @total_read < @pages_accessed*1.25;
select @low_ok, @high_ok;
select
if(@low_ok and @high_ok,0,@pages_accessed) unexpected_accessed,
if(@low_ok and @high_ok,0,@total_read) unexpected_read;
set @innodb_pages_read1=
(select variable_value

View File

@@ -5289,6 +5289,8 @@ SET @obj1='{ "a": 1,"b": 2,"c": 3}';
SELECT JSON_OBJECT_FILTER_KEYS (@obj1,@arr1);
JSON_OBJECT_FILTER_KEYS (@obj1,@arr1)
NULL
SET character_set_database=default;
SET CHARACTER SET default;
# End of 11.2 Test
# Beginning of 11.4 Test
#
@@ -5300,4 +5302,26 @@ NULL
SELECT json_array_intersect(@a,@b);
json_array_intersect(@a,@b)
NULL
# MDEV-36809: json_array_intersect crashs when unused table ref provided
#
select json_array_intersect('[["1", "7"], ["2", "6"], ["4", "5"], ["3", "8"]]', '[["2","6"],["3","8"],["4","5"],["1","7"]]') as result from mysql.user;
result
[["2", "6"], ["3", "8"], ["4", "5"], ["1", "7"]]
[["2", "6"], ["3", "8"], ["4", "5"], ["1", "7"]]
[["2", "6"], ["3", "8"], ["4", "5"], ["1", "7"]]
[["2", "6"], ["3", "8"], ["4", "5"], ["1", "7"]]
[["2", "6"], ["3", "8"], ["4", "5"], ["1", "7"]]
SELECT ( WITH x AS ( WITH x ( x ) AS ( SELECT ( 1.000000 ) ) SELECT x FROM x ) SELECT * FROM x WHERE ( SELECT AVG ( x ) OVER ( ORDER BY JSON_ARRAY_INTERSECT ( '[["1", "7"], ["2", "6"], ["3", "8"]]' , '[["2","6"],["3","8"],["4","5"],["1","7"]]' ) ) FROM x ) ) as result;
result
1.000000
#
# MDEV-37864: mysql-test/mtr --cursor main.func_json fails
#
select json_array_intersect('[["1", "7"], ["2", "6"], ["4", "5"], ["3", "8"]]', '[["2","6"],["3","8"],["4","5"],["1","7"]]') as result from mysql.user;
result
[["2", "6"], ["3", "8"], ["4", "5"], ["1", "7"]]
[["2", "6"], ["3", "8"], ["4", "5"], ["1", "7"]]
[["2", "6"], ["3", "8"], ["4", "5"], ["1", "7"]]
[["2", "6"], ["3", "8"], ["4", "5"], ["1", "7"]]
[["2", "6"], ["3", "8"], ["4", "5"], ["1", "7"]]
# End of 11.4 Test

View File

@@ -4177,6 +4177,9 @@ SET CHARACTER SET utf8;
SET @obj1='{ "a": 1,"b": 2,"c": 3}';
SELECT JSON_OBJECT_FILTER_KEYS (@obj1,@arr1);
SET character_set_database=default;
SET CHARACTER SET default;
--echo # End of 11.2 Test
--echo # Beginning of 11.4 Test
@@ -4190,4 +4193,15 @@ SELECT JSON_OBJECT_FILTER_KEYS (@obj1,@arr1);
SELECT json_array_intersect(@a,@b);
--echo # MDEV-36809: json_array_intersect crashs when unused table ref provided
--echo #
select json_array_intersect('[["1", "7"], ["2", "6"], ["4", "5"], ["3", "8"]]', '[["2","6"],["3","8"],["4","5"],["1","7"]]') as result from mysql.user;
SELECT ( WITH x AS ( WITH x ( x ) AS ( SELECT ( 1.000000 ) ) SELECT x FROM x ) SELECT * FROM x WHERE ( SELECT AVG ( x ) OVER ( ORDER BY JSON_ARRAY_INTERSECT ( '[["1", "7"], ["2", "6"], ["3", "8"]]' , '[["2","6"],["3","8"],["4","5"],["1","7"]]' ) ) FROM x ) ) as result;
--echo #
--echo # MDEV-37864: mysql-test/mtr --cursor main.func_json fails
--echo #
select json_array_intersect('[["1", "7"], ["2", "6"], ["4", "5"], ["3", "8"]]', '[["2","6"],["3","8"],["4","5"],["1","7"]]') as result from mysql.user;
--echo # End of 11.4 Test

View File

@@ -5624,8 +5624,18 @@ DROP TABLE t2;
DROP TABLE t1;
SET sql_mode=DEFAULT;
#
# End of 10.11 tests
# MDEV-37740 LOCATE(X,Y,NULL) is not NULL
#
select locate(1,2,NULL);
locate(1,2,NULL)
NULL
#
# MDEV-37835 mysqli silently trims each json_arrayagg result to modulo 64KB
#
select group_concat(v) from ( select '$a' as v union all select '$b' as v) t;
group_concat(v)
a...aaaaa,b...bbbbb
# End of 10.11 tests
#
# MDEV-9069 extend AES_ENCRYPT() and AES_DECRYPT() to support IV and the algorithm
#

View File

@@ -2576,8 +2576,19 @@ DROP TABLE t1;
SET sql_mode=DEFAULT;
--echo #
--echo # End of 10.11 tests
--echo # MDEV-37740 LOCATE(X,Y,NULL) is not NULL
--echo #
select locate(1,2,NULL);
--echo #
--echo # MDEV-37835 mysqli silently trims each json_arrayagg result to modulo 64KB
--echo #
let $a=`select repeat('a', 65540)`;
let $b=`select repeat('b', 65540)`;
replace_regex /a{65535}/a.../ /b{65535}/b.../;
evalp select group_concat(v) from ( select '$a' as v union all select '$b' as v) t;
--echo # End of 10.11 tests
--echo #
--echo # MDEV-9069 extend AES_ENCRYPT() and AES_DECRYPT() to support IV and the algorithm

View File

@@ -860,3 +860,11 @@ TRUNCATE(ST_Distance_Sphere(@zenica, @sarajevo), 10)
SELECT TRUNCATE(ST_Distance_Sphere(@sarajevo, @zenica), 10);
TRUNCATE(ST_Distance_Sphere(@sarajevo, @zenica), 10)
55878.5933759170
#
# MDEV-31499 Assertion `(0)' failed in Gis_geometry_collection::init_from_opresult.
#
SELECT ST_NUMGEOMETRIES(
ST_INTERSECTION(
ST_MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3.8571428571428568 2.857142857142857,5.571428571428571 4.571428571428571,9 4,3.8571428571428568 2.857142857142857)),((4.5 4.75,3 5,4.6 7.4,6 6,4.5 4.75))) '), ST_MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 4,3 5,2 5,2 7,5 4,3 4),(5 4,7.4 7,8 7,8 4,5 4))) ') )) as V;
V
3

View File

@@ -476,3 +476,13 @@ set @zenica = ST_GeomFromText('POINT(17.907743 44.203438)');
set @sarajevo = ST_GeomFromText('POINT(18.413076 43.856258)');
SELECT TRUNCATE(ST_Distance_Sphere(@zenica, @sarajevo), 10);
SELECT TRUNCATE(ST_Distance_Sphere(@sarajevo, @zenica), 10);
--echo #
--echo # MDEV-31499 Assertion `(0)' failed in Gis_geometry_collection::init_from_opresult.
--echo #
SELECT ST_NUMGEOMETRIES(
ST_INTERSECTION(
ST_MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3.8571428571428568 2.857142857142857,5.571428571428571 4.571428571428571,9 4,3.8571428571428568 2.857142857142857)),((4.5 4.75,3 5,4.6 7.4,6 6,4.5 4.75))) '), ST_MULTIPOLYGONFROMTEXT(' MULTIPOLYGON(((3 4,3 5,2 5,2 7,5 4,3 4),(5 4,7.4 7,8 7,8 4,5 4))) ') )) as V;

View File

@@ -1655,3 +1655,8 @@ DROP TABLE t1;
#
# End of 10.1 tests
#
CREATE TABLE t1 (c POINT NOT NULL,SPATIAL (c));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT ('POINT(1 0)'));
UPDATE t1 SET c='';
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
DROP TABLE t1;

View File

@@ -1043,3 +1043,15 @@ DROP TABLE t1;
--echo #
--echo # End of 10.1 tests
--echo #
#
# Bug #31766 SIGSEGV in maria_rtree_split_page | maria_rtree_add_key
#
CREATE TABLE t1 (c POINT NOT NULL,SPATIAL (c));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT ('POINT(1 0)'));
--error ER_CANT_CREATE_GEOMETRY_OBJECT
UPDATE t1 SET c='';
DROP TABLE t1;

View File

@@ -2062,4 +2062,67 @@ LEFT JOIN (t1 a LEFT JOIN t1 b ON t1.i = b.i) ON c.i = t1.i);
1
1
DROP TABLE t1;
#
# MDEV-35206: Assertion in JOIN::dbug_verify_sj_inner_tables
#
SET @save_optimizer_join_limit_pref_ratio= @@optimizer_join_limit_pref_ratio;
SET @save_optimizer_search_depth= @@optimizer_search_depth;
CREATE TABLE t1 (c1 VARCHAR(64) DEFAULT NULL, c2 VARCHAR(8) DEFAULT NULL);
INSERT INTO t1 (c1) values ('one');
INSERT INTO t1 (c2) values ('2');
SET optimizer_join_limit_pref_ratio=10;
SET optimizer_search_depth=1;
SELECT
c1
FROM
t1
WHERE
c2 IN (SELECT c2
FROM t1
WHERE c1 IN (SELECT c1
FROM t1
WHERE c1 IN (NULL)
)
)
ORDER BY c1 LIMIT 1;
c1
DROP TABLE t1;
#
# similar issue with join::cur_embedding_map
#
CREATE TABLE t10 (a int, b int, index(b));
INSERT INTO t10 SELECT seq, seq FROM seq_1_to_10;
CREATE TABLE t11(a int, b int);
CREATE TABLE t12(a int, b int, index(b));
INSERT INTO t11 select seq, seq FROM seq_1_to_20;
INSERT INTO t12 select seq, seq FROM seq_1_to_40;
CREATE TABLE t13(a int, b int);
CREATE TABLE t14(a int, b int, index(b));
INSERT INTO t13 select seq, seq FROM seq_1_to_20;
INSERT INTO t14 select seq, seq FROM seq_1_to_40;
ANALYZE TABLE t10, t11, t12;
Table Op Msg_type Msg_text
test.t10 analyze status Engine-independent statistics collected
test.t10 analyze status Table is already up to date
test.t11 analyze status Engine-independent statistics collected
test.t11 analyze status OK
test.t12 analyze status Engine-independent statistics collected
test.t12 analyze status Table is already up to date
EXPLAIN SELECT *
FROM
t10 LEFT JOIN
(
t11 JOIN t12 ON t11.b=t12.b
left join (t13 join t14 on t13.b=t14.b) on t13.a=t11.a
) ON t10.a=t11.a
ORDER BY t10.b LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t10 ALL NULL NULL NULL NULL 10 Using temporary; Using filesort
1 SIMPLE t11 ALL NULL NULL NULL NULL 20 Using where
1 SIMPLE t12 ref b b 5 test.t11.b 1
1 SIMPLE t13 ALL NULL NULL NULL NULL 20 Using where
1 SIMPLE t14 ref b b 5 test.t13.b 1
DROP TABLE t10, t11, t12, t13, t14;
SET optimizer_join_limit_pref_ratio= @save_optimizer_join_limit_pref_ratio;
SET optimizer_search_depth= @save_optimizer_search_depth;
# end of 10.11 tests

View File

@@ -2,6 +2,7 @@
--disable_warnings
DROP TABLE IF EXISTS t0,t1,t2,t3,t4,t5,t6,t7,t8,t9;
--enable_warnings
--source include/have_sequence.inc
SET @save_optimizer_switch=@@optimizer_switch;
SET optimizer_switch=ifnull(@optimizer_switch_for_join_nested_test,'outer_join_with_cache=off');
@@ -1471,4 +1472,69 @@ SELECT 1 FROM t1 WHERE i IN
LEFT JOIN (t1 a LEFT JOIN t1 b ON t1.i = b.i) ON c.i = t1.i);
DROP TABLE t1;
--echo #
--echo # MDEV-35206: Assertion in JOIN::dbug_verify_sj_inner_tables
--echo #
SET @save_optimizer_join_limit_pref_ratio= @@optimizer_join_limit_pref_ratio;
SET @save_optimizer_search_depth= @@optimizer_search_depth;
CREATE TABLE t1 (c1 VARCHAR(64) DEFAULT NULL, c2 VARCHAR(8) DEFAULT NULL);
INSERT INTO t1 (c1) values ('one');
INSERT INTO t1 (c2) values ('2');
SET optimizer_join_limit_pref_ratio=10;
SET optimizer_search_depth=1;
SELECT
c1
FROM
t1
WHERE
c2 IN (SELECT c2
FROM t1
WHERE c1 IN (SELECT c1
FROM t1
WHERE c1 IN (NULL)
)
)
ORDER BY c1 LIMIT 1;
DROP TABLE t1;
--echo #
--echo # similar issue with join::cur_embedding_map
--echo #
CREATE TABLE t10 (a int, b int, index(b));
INSERT INTO t10 SELECT seq, seq FROM seq_1_to_10;
CREATE TABLE t11(a int, b int);
CREATE TABLE t12(a int, b int, index(b));
INSERT INTO t11 select seq, seq FROM seq_1_to_20;
INSERT INTO t12 select seq, seq FROM seq_1_to_40;
CREATE TABLE t13(a int, b int);
CREATE TABLE t14(a int, b int, index(b));
INSERT INTO t13 select seq, seq FROM seq_1_to_20;
INSERT INTO t14 select seq, seq FROM seq_1_to_40;
ANALYZE TABLE t10, t11, t12;
EXPLAIN SELECT *
FROM
t10 LEFT JOIN
(
t11 JOIN t12 ON t11.b=t12.b
left join (t13 join t14 on t13.b=t14.b) on t13.a=t11.a
) ON t10.a=t11.a
ORDER BY t10.b LIMIT 1;
DROP TABLE t10, t11, t12, t13, t14;
SET optimizer_join_limit_pref_ratio= @save_optimizer_join_limit_pref_ratio;
SET optimizer_search_depth= @save_optimizer_search_depth;
--echo # end of 10.11 tests

View File

@@ -2071,6 +2071,69 @@ LEFT JOIN (t1 a LEFT JOIN t1 b ON t1.i = b.i) ON c.i = t1.i);
1
1
DROP TABLE t1;
#
# MDEV-35206: Assertion in JOIN::dbug_verify_sj_inner_tables
#
SET @save_optimizer_join_limit_pref_ratio= @@optimizer_join_limit_pref_ratio;
SET @save_optimizer_search_depth= @@optimizer_search_depth;
CREATE TABLE t1 (c1 VARCHAR(64) DEFAULT NULL, c2 VARCHAR(8) DEFAULT NULL);
INSERT INTO t1 (c1) values ('one');
INSERT INTO t1 (c2) values ('2');
SET optimizer_join_limit_pref_ratio=10;
SET optimizer_search_depth=1;
SELECT
c1
FROM
t1
WHERE
c2 IN (SELECT c2
FROM t1
WHERE c1 IN (SELECT c1
FROM t1
WHERE c1 IN (NULL)
)
)
ORDER BY c1 LIMIT 1;
c1
DROP TABLE t1;
#
# similar issue with join::cur_embedding_map
#
CREATE TABLE t10 (a int, b int, index(b));
INSERT INTO t10 SELECT seq, seq FROM seq_1_to_10;
CREATE TABLE t11(a int, b int);
CREATE TABLE t12(a int, b int, index(b));
INSERT INTO t11 select seq, seq FROM seq_1_to_20;
INSERT INTO t12 select seq, seq FROM seq_1_to_40;
CREATE TABLE t13(a int, b int);
CREATE TABLE t14(a int, b int, index(b));
INSERT INTO t13 select seq, seq FROM seq_1_to_20;
INSERT INTO t14 select seq, seq FROM seq_1_to_40;
ANALYZE TABLE t10, t11, t12;
Table Op Msg_type Msg_text
test.t10 analyze status Engine-independent statistics collected
test.t10 analyze status Table is already up to date
test.t11 analyze status Engine-independent statistics collected
test.t11 analyze status OK
test.t12 analyze status Engine-independent statistics collected
test.t12 analyze status Table is already up to date
EXPLAIN SELECT *
FROM
t10 LEFT JOIN
(
t11 JOIN t12 ON t11.b=t12.b
left join (t13 join t14 on t13.b=t14.b) on t13.a=t11.a
) ON t10.a=t11.a
ORDER BY t10.b LIMIT 1;
id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t10 ALL NULL NULL NULL NULL 10 Using temporary; Using filesort
1 SIMPLE t11 hash_ALL NULL #hash#$hj 5 test.t10.a 20 Using where; Using join buffer (flat, BNLH join)
1 SIMPLE t12 ref b b 5 test.t11.b 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
1 SIMPLE t13 hash_ALL NULL #hash#$hj 5 test.t10.a 20 Using where; Using join buffer (incremental, BNLH join)
1 SIMPLE t14 ref b b 5 test.t13.b 1 Using join buffer (incremental, BKA join); Key-ordered Rowid-ordered scan
DROP TABLE t10, t11, t12, t13, t14;
SET optimizer_join_limit_pref_ratio= @save_optimizer_join_limit_pref_ratio;
SET optimizer_search_depth= @save_optimizer_search_depth;
# end of 10.11 tests
CREATE TABLE t5 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b));
CREATE TABLE t6 (a int, b int, c int, PRIMARY KEY(a), KEY b_i (b));

View File

@@ -2911,4 +2911,22 @@ Z 1 Y 1 X 1
drop view v0, v1, v2, v3;
drop table t1, t2, t3;
# end of 10.3 tests
#
# MDEV-37653 Unexpected result of prepared statement when use boolean value as parameters
#
create table t0(c0 real);
create table t1 like t0;
insert into t1 values (1);
insert into t0 values (1);
select t1.c0, t0.c0 from t1 left join t0 on 0 where not (t0.c0 is true);
c0 c0
1 NULL
select t1.c0, t0.c0 from t1 left join t0 on 0 where not (t0.c0 is false);
c0 c0
1 NULL
select t1.c0, t0.c0 from t1 left join t0 on false where (false or ((t0.c0 is true) in (false)));
c0 c0
1 NULL
drop table t0, t1;
# end of 10.11 tests
SET optimizer_switch=@org_optimizer_switch;

View File

@@ -2434,4 +2434,18 @@ drop table t1, t2, t3;
--echo # end of 10.3 tests
--echo #
--echo # MDEV-37653 Unexpected result of prepared statement when use boolean value as parameters
--echo #
create table t0(c0 real);
create table t1 like t0;
insert into t1 values (1);
insert into t0 values (1);
select t1.c0, t0.c0 from t1 left join t0 on 0 where not (t0.c0 is true);
select t1.c0, t0.c0 from t1 left join t0 on 0 where not (t0.c0 is false);
select t1.c0, t0.c0 from t1 left join t0 on false where (false or ((t0.c0 is true) in (false)));
drop table t0, t1;
--echo # end of 10.11 tests
SET optimizer_switch=@org_optimizer_switch;

View File

@@ -2918,4 +2918,22 @@ Z 1 Y 1 X 1
drop view v0, v1, v2, v3;
drop table t1, t2, t3;
# end of 10.3 tests
#
# MDEV-37653 Unexpected result of prepared statement when use boolean value as parameters
#
create table t0(c0 real);
create table t1 like t0;
insert into t1 values (1);
insert into t0 values (1);
select t1.c0, t0.c0 from t1 left join t0 on 0 where not (t0.c0 is true);
c0 c0
1 NULL
select t1.c0, t0.c0 from t1 left join t0 on 0 where not (t0.c0 is false);
c0 c0
1 NULL
select t1.c0, t0.c0 from t1 left join t0 on false where (false or ((t0.c0 is true) in (false)));
c0 c0
1 NULL
drop table t0, t1;
# end of 10.11 tests
SET optimizer_switch=@org_optimizer_switch;

View File

@@ -2889,6 +2889,37 @@ a
1
DROP TABLE t1;
#
# MDEV-20498 Assertion `table_share->tmp_table != NO_TMP_TABLE || m_lock_type == 1' failed upon REBUILD PARTITION.
# ALTER TABLE DROP COLUMN, ADD COLUMN misplacing rows.
#
CREATE TABLE t1 (a INT)
PARTITION BY LIST (a) (
PARTITION p0 VALUES IN (8),
PARTITION p1 VALUES IN (4,7,0),
PARTITION p2 VALUES IN (9,2,5),
PARTITION p3 VALUES IN (3,1,6));
INSERT INTO t1 VALUES (8),(5),(4),(0);
ALTER TABLE t1 DROP a, ADD a INT NOT NULL DEFAULT 0, algorithm=inplace;
ERROR 0A000: ALGORITHM=INPLACE is not supported for this operation. Try ALGORITHM=COPY
ALTER TABLE t1 DROP a, ADD a INT NOT NULL DEFAULT 0;
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
DROP TABLE t1;
#
# MDEV-20498 Assertion `table_share->tmp_table != NO_TMP_TABLE || m_lock_type == 1' failed upon REBUILD PARTITION.
# ALTER TABLE REBUILD PARTITION crashed on misplaced rows.
#
CREATE TABLE mdev20498 (a INT) ENGINE=myisam PARTITION BY LIST (a)
(PARTITION p0 VALUES IN (0), PARTITION p1 VALUES IN (1));
INSERT INTO mdev20498 values (0), (0), (1), (1);
FLUSH TABLES;
CALL mtr.add_suppression("corrupted: row in wrong partition:");
ALTER TABLE mdev20498 REBUILD PARTITION p0;
ERROR HY000: Found a row in wrong partition (0 != 1) a:1
ALTER TABLE mdev20498 REBUILD PARTITION p0, p1;
DROP TABLE mdev20498;
#
# End of 10.6 tests
#
#

View File

@@ -3099,6 +3099,50 @@ INSERT INTO t1 VALUES (1),(2);
SELECT * FROM t1 WHERE a LIKE '1';
DROP TABLE t1;
--echo #
--echo # MDEV-20498 Assertion `table_share->tmp_table != NO_TMP_TABLE || m_lock_type == 1' failed upon REBUILD PARTITION.
--echo # ALTER TABLE DROP COLUMN, ADD COLUMN misplacing rows.
--echo #
CREATE TABLE t1 (a INT)
PARTITION BY LIST (a) (
PARTITION p0 VALUES IN (8),
PARTITION p1 VALUES IN (4,7,0),
PARTITION p2 VALUES IN (9,2,5),
PARTITION p3 VALUES IN (3,1,6));
INSERT INTO t1 VALUES (8),(5),(4),(0);
--error ER_ALTER_OPERATION_NOT_SUPPORTED
ALTER TABLE t1 DROP a, ADD a INT NOT NULL DEFAULT 0, algorithm=inplace;
ALTER TABLE t1 DROP a, ADD a INT NOT NULL DEFAULT 0;
CHECK TABLE t1;
DROP TABLE t1;
--echo #
--echo # MDEV-20498 Assertion `table_share->tmp_table != NO_TMP_TABLE || m_lock_type == 1' failed upon REBUILD PARTITION.
--echo # ALTER TABLE REBUILD PARTITION crashed on misplaced rows.
--echo #
CREATE TABLE mdev20498 (a INT) ENGINE=myisam PARTITION BY LIST (a)
(PARTITION p0 VALUES IN (0), PARTITION p1 VALUES IN (1));
INSERT INTO mdev20498 values (0), (0), (1), (1);
FLUSH TABLES;
let $datadir=`select @@datadir`;
CALL mtr.add_suppression("corrupted: row in wrong partition:");
move_file $datadir/test/mdev20498#P#p0.MYD $datadir/test//tmp.MYD;
move_file $datadir/test/mdev20498#P#p1.MYD $datadir/test/mdev20498#P#p0.MYD;
move_file $datadir/test/tmp.MYD $datadir/test/mdev20498#P#p1.MYD;
move_file $datadir/test/mdev20498#P#p0.MYI $datadir/test/tmp.MYI;
move_file $datadir/test/mdev20498#P#p1.MYI $datadir/test/mdev20498#P#p0.MYI;
move_file $datadir/test/tmp.MYI $datadir/test/mdev20498#P#p1.MYI;
--error ER_ROW_IN_WRONG_PARTITION
ALTER TABLE mdev20498 REBUILD PARTITION p0;
ALTER TABLE mdev20498 REBUILD PARTITION p0, p1;
DROP TABLE mdev20498;
--echo #
--echo # End of 10.6 tests
--echo #

View File

@@ -3,6 +3,6 @@ Win32 error code 150: System trace information was not specified in your CONFIG.
OS error code 23: Too many open files in system
Win32 error code 23: Data error (cyclic redundancy check).
MariaDB error code 1062 (ER_DUP_ENTRY): Duplicate entry '%-.192sT' for key %d
Learn more: https://mariadb.com/kb/en/e1062/
Learn more: https://mariadb.com/docs/server/reference/error-codes/mariadb-error-codes-1000-to-1099/e1062
Win32 error code 1062: The service has not been started.
Illegal error code: 30000

View File

@@ -1,10 +1,10 @@
Illegal error code: 10000
MariaDB error code 1062 (ER_DUP_ENTRY): Duplicate entry '%-.192sT' for key %d
Learn more: https://mariadb.com/kb/en/e1062/
Learn more: https://mariadb.com/docs/server/reference/error-codes/mariadb-error-codes-1000-to-1099/e1062
MariaDB error code 1408 (ER_STARTUP): %s: ready for connections.
Version: '%s' socket: '%s' port: %d %s
Learn more: https://mariadb.com/kb/en/e1408/
Learn more: https://mariadb.com/docs/server/reference/error-codes/mariadb-error-codes-1400-to-1499/e1408
MariaDB error code 1459 (ER_TABLE_NEEDS_UPGRADE): Upgrade required. Please do "REPAIR %s %sQ" or dump/reload to fix it!
Learn more: https://mariadb.com/kb/en/e1459/
Learn more: https://mariadb.com/docs/server/reference/error-codes/mariadb-error-codes-1400-to-1499/e1459
MariaDB error code 1461 (ER_MAX_PREPARED_STMT_COUNT_REACHED): Can't create more than max_prepared_stmt_count statements (current value: %u)
Learn more: https://mariadb.com/kb/en/e1461/
Learn more: https://mariadb.com/docs/server/reference/error-codes/mariadb-error-codes-1400-to-1499/e1461

View File

@@ -84,7 +84,7 @@ create database testdb;
use testdb;
create procedure p1 () select 1;
connect testuser,localhost,testuser,,;
select user,db from information_schema.processlist where user='root';
select user,db from information_schema.processlist where id=$default_id;
user db
call testdb.p1();
ERROR 42000: execute command denied to user 'testuser'@'%' for routine 'testdb.p1'
@@ -93,7 +93,7 @@ GRANT PROCESS ON *.* to PUBLIC;
GRANT EXECUTE ON testdb.* to PUBLIC;
disconnect testuser;
connect testuser,localhost,testuser,,;
select user,db from information_schema.processlist where user='root';
select user,db from information_schema.processlist where id=$default_id;
user db
root testdb
call testdb.p1();
@@ -104,7 +104,7 @@ disconnect testuser;
# check that the privileges are correctly read by acl_load
flush privileges;
connect testuser,localhost,testuser,,;
select user,db from information_schema.processlist where user='root';
select user,db from information_schema.processlist where id=$default_id;
user db
root testdb
call testdb.p1();

View File

@@ -78,10 +78,11 @@ create user testuser;
create database testdb;
use testdb;
create procedure p1 () select 1;
let $default_id= `select connection_id()`;
connect (testuser,localhost,testuser,,);
select user,db from information_schema.processlist where user='root';
evalp select user,db from information_schema.processlist where id=$default_id;
--error ER_PROCACCESS_DENIED_ERROR
call testdb.p1();
@@ -94,7 +95,7 @@ GRANT EXECUTE ON testdb.* to PUBLIC;
disconnect testuser;
connect (testuser,localhost,testuser,,);
select user,db from information_schema.processlist where user='root';
evalp select user,db from information_schema.processlist where id=$default_id;
call testdb.p1();
connection default;
@@ -105,7 +106,7 @@ flush privileges;
connect (testuser,localhost,testuser,,);
select user,db from information_schema.processlist where user='root';
evalp select user,db from information_schema.processlist where id=$default_id;
call testdb.p1();
connection default;

View File

@@ -10,9 +10,9 @@ eval create table t1 (a int) engine=myisam data directory='$MYSQL_TMP_DIR';
insert t1 values (1);
--system ln -s $MYSQL_TMP_DIR/foobar5543 $MYSQL_TMP_DIR/t1.TMD
# Some systems fail with errcode 31 (FreeBSD), 40 (Linux), 85 (AIX),
# or 90 (MIPS) when doing openat,
# 62 (macOS), or 90 (MIPS) when doing openat,
# while others don't have openat and fail with errcode 20.
--replace_regex / '.*\/t1/ 'MYSQL_TMP_DIR\/t1/ /[49]0|31|85/20/ /".*"/"<errmsg>"/
--replace_regex / '.*\/t1/ 'MYSQL_TMP_DIR\/t1/ /[49]0|31|85|62/20/ /".*"/"<errmsg>"/
repair table t1;
drop table t1;
@@ -20,7 +20,7 @@ drop table t1;
eval create table t2 (a int) engine=aria data directory='$MYSQL_TMP_DIR';
insert t2 values (1);
--system ln -s $MYSQL_TMP_DIR/foobar5543 $MYSQL_TMP_DIR/t2.TMD
--replace_regex / '.*\/t2/ 'MYSQL_TMP_DIR\/t2/ /[49]0|31|85/20/ /".*"/"<errmsg>"/
--replace_regex / '.*\/t2/ 'MYSQL_TMP_DIR\/t2/ /[49]0|31|85|62/20/ /".*"/"<errmsg>"/
repair table t2;
drop table t2;

View File

@@ -348,7 +348,6 @@ drop table t1;
--error ER_DBACCESS_DENIED_ERROR
drop database mysqltest;
disconnect con1;
--source include/wait_until_disconnected.inc
connect (con2,localhost,mysqltest_2,,"*NO-ONE*");
connection con2;
@@ -361,7 +360,6 @@ drop table mysqltest.t1;
--error ER_DBACCESS_DENIED_ERROR
drop database mysqltest;
disconnect con2;
--source include/wait_until_disconnected.inc
connect (con3,localhost,mysqltest_3,,"*NO-ONE*");
connection con3;
@@ -371,7 +369,6 @@ show create database mysqltest;
drop table mysqltest.t1;
drop database mysqltest;
disconnect con3;
--source include/wait_until_disconnected.inc
connection default;
set names binary;
@@ -1032,7 +1029,6 @@ connect (con4,localhost,mysqltest_4,,mysqltest);
connection con4;
show create database mysqltest;
disconnect con4;
--source include/wait_until_disconnected.inc
connection default;
delete from mysql.user where user='mysqltest_4';
delete from mysql.db where user='mysqltest_4';
@@ -1268,7 +1264,6 @@ SHOW ENGINE MYISAM STATUS;
--enable_result_log
disconnect conn1;
--source include/wait_until_disconnected.inc
connection default;
DROP USER test_u@localhost;
@@ -1292,7 +1287,6 @@ connection con1;
SHOW CREATE TABLE t1;
disconnect con1;
--source include/wait_until_disconnected.inc
connection default;
UNLOCK TABLES;
@@ -1364,7 +1358,6 @@ connection con1;
ALTER TABLE t1 CHARACTER SET = utf8;
disconnect con1;
--source include/wait_until_disconnected.inc
connection default;
COMMIT;
@@ -1376,6 +1369,9 @@ DROP TABLE t1;
--echo # Bug#57306 SHOW PROCESSLIST does not display string literals well.
--echo #
let $count_sessions= 1;
source include/wait_until_count_sessions.inc;
SET NAMES latin1;
SELECT GET_LOCK('t', 1000);
--connect (con1,localhost,root,,)
@@ -1398,7 +1394,6 @@ SELECT RELEASE_LOCK('t');
--connection con1
--reap
--disconnect con1
--source include/wait_until_disconnected.inc
--connection default
SET NAMES latin1;

View File

@@ -2745,6 +2745,17 @@ ORDER BY l_orderkey, l_linenumber;
l_orderkey l_linenumber l_quantity
DROP TABLE t1;
# end of 10.6 tests
#
# MDEV-30721: Assertion `fixed()' failed in Item_cond_and::val_bool()
# with degenerate JTBM semi-join
#
CREATE TABLE t1 (c INT KEY);
INSERT INTO t1 (c) VALUES (0);
SELECT * FROM t1 WHERE (0,0) IN (SELECT MAX(c),MIN(c) FROM t1);
c
0
DROP TABLE t1;
# end of 10.11 tests
set @subselect_mat_test_optimizer_switch_value=null;
set @@optimizer_switch='materialization=on,in_to_exists=off,semijoin=off';
set optimizer_switch='mrr=on,mrr_sort_keys=on,index_condition_pushdown=on';

View File

@@ -2771,3 +2771,14 @@ ORDER BY l_orderkey, l_linenumber;
l_orderkey l_linenumber l_quantity
DROP TABLE t1;
# end of 10.6 tests
#
# MDEV-30721: Assertion `fixed()' failed in Item_cond_and::val_bool()
# with degenerate JTBM semi-join
#
CREATE TABLE t1 (c INT KEY);
INSERT INTO t1 (c) VALUES (0);
SELECT * FROM t1 WHERE (0,0) IN (SELECT MAX(c),MIN(c) FROM t1);
c
0
DROP TABLE t1;
# end of 10.11 tests

View File

@@ -2463,3 +2463,20 @@ ORDER BY l_orderkey, l_linenumber;
DROP TABLE t1;
--echo # end of 10.6 tests
--echo #
--echo # MDEV-30721: Assertion `fixed()' failed in Item_cond_and::val_bool()
--echo # with degenerate JTBM semi-join
--echo #
CREATE TABLE t1 (c INT KEY);
INSERT INTO t1 (c) VALUES (0);
# This query triggered an assertion because the Item_cond_and created
# during condition merging in and_new_conditions_to_optimized_cond()
# was evaluated before being fixed.
SELECT * FROM t1 WHERE (0,0) IN (SELECT MAX(c),MIN(c) FROM t1);
DROP TABLE t1;
--echo # end of 10.11 tests

View File

@@ -0,0 +1,13 @@
CREATE TABLE t1 ( a double, key (a)) ;
INSERT INTO t1 VALUES (1),(2),(-3);
SELECT t1.a FROM ( SELECT a AS a1 FROM t1 ) dt
JOIN t1 ON a1 LIKE EXISTS ( SELECT a + RAND () FROM t1 UNION SELECT a FROM t1) ;
DROP TABLE t1;
CREATE TABLE t1 ( a VARCHAR(100), b bool) ;
INSERT INTO t1 VALUES ('-101',-87),('-95',59),(NULL,48);
SELECT
(SELECT 1 FROM (SELECT 1 HAVING rand() ) dt1
UNION
SELECT a FROM t1 WHERE b IN (SELECT a FROM t1) LIMIT 1)
FROM t1;
DROP TABLE t1;

View File

@@ -0,0 +1,28 @@
#
# MDEV-32397 join_read_first, keyread SEGV crash
#
CREATE TABLE t1 ( a double, key (a)) ;
INSERT INTO t1 VALUES (1),(2),(-3);
# We disable the result log because RAND() is unpredictable and seeding RAND
# doesn't make it stable when using the PS protocol.
--disable_result_log
SELECT t1.a FROM ( SELECT a AS a1 FROM t1 ) dt
JOIN t1 ON a1 LIKE EXISTS ( SELECT a + RAND () FROM t1 UNION SELECT a FROM t1) ;
--enable_result_log
DROP TABLE t1;
#
# MDEV-32403 test_if_quick_select: Segv
#
CREATE TABLE t1 ( a VARCHAR(100), b bool) ;
INSERT INTO t1 VALUES ('-101',-87),('-95',59),(NULL,48);
# We disable the result log because RAND() is unpredictable and seeding RAND
# doesn't make it stable when using the PS protocol.
--disable_result_log
SELECT
(SELECT 1 FROM (SELECT 1 HAVING rand() ) dt1
UNION
SELECT a FROM t1 WHERE b IN (SELECT a FROM t1) LIMIT 1)
FROM t1;
--enable_result_log
DROP TABLE t1;

View File

@@ -97,5 +97,13 @@ ERROR HY000: The value of gtid domain being deleted ('4294967296') exceeds its m
FLUSH BINARY LOGS DELETE_DOMAIN_ID = (4294967295);
Warnings:
Warning 1076 The gtid domain being deleted ('4294967295') is not in the current binlog state
RESET MASTER;
SET @@GLOBAL.gtid_domain_id=0;
SET @@SESSION.gtid_domain_id=0;
CREATE USER u;
FLUSH BINARY LOGS;
PURGE BINARY LOGS TO 'master-bin.000002';
FLUSH BINARY LOGS DELETE_DOMAIN_ID = (0), BINARY LOGS DELETE_DOMAIN_ID = (0);
DROP USER u;
DROP TABLE t;
RESET MASTER;

View File

@@ -0,0 +1,4 @@
[thread]
--thread-handling=one-thread-per-connection
[pool]
--loose-thread-handling=pool-of-threads

View File

@@ -7,6 +7,9 @@
--source include/have_log_bin.inc
--source include/have_binlog_format_row.inc
call mtr.add_suppression("Write to binary log failed: Multi-statement transaction required more than .max_binlog_cache_size.*");
if ($MTR_COMBINATION_POOL) {
--source include/have_pool_of_threads.inc
}
SET AUTOCOMMIT=0;
# Create 1st table

View File

@@ -160,6 +160,22 @@ RESET MASTER;
--error 0
--eval FLUSH BINARY LOGS DELETE_DOMAIN_ID = ($d_max)
#
# MDEV-37885
# Server crash or ASAN errors in rpl_binlog_state::drop_domain
#
RESET MASTER;
SET @@GLOBAL.gtid_domain_id=0;
SET @@SESSION.gtid_domain_id=0;
CREATE USER u;
FLUSH BINARY LOGS;
--let $purge_to_binlog= query_get_value(SHOW MASTER STATUS, File, 1)
--eval PURGE BINARY LOGS TO '$purge_to_binlog'
--error 0
FLUSH BINARY LOGS DELETE_DOMAIN_ID = (0), BINARY LOGS DELETE_DOMAIN_ID = (0);
DROP USER u;
#
# Cleanup
#

View File

@@ -437,3 +437,27 @@ Warnings:
Note 1544 Event execution time is in the past. Event has been disabled
drop event event_35981;
drop database events_test;
#
# MDEV-37744 Table Charset Mismatch (Primary/Replica) via Event
#
set global event_scheduler=1;
create schema andre default charset utf8mb4 collate utf8mb4_general_ci;
use andre;
create event daily_table_creation
on schedule every 1 day starts now() + interval 5 second do
create table andre_table (
id int(11) not null auto_increment primary key,
col_1 varchar(50) not null
);
show create table andre_table;
Table Create Table
andre_table CREATE TABLE `andre_table` (
`id` int(11) NOT NULL AUTO_INCREMENT,
`col_1` varchar(50) NOT NULL,
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci
set global event_scheduler=0;
drop event daily_table_creation;
drop schema andre;
use test;
# End of 10.11 tests

View File

@@ -526,3 +526,26 @@ let $wait_condition=
drop database events_test;
--enable_service_connection
--echo #
--echo # MDEV-37744 Table Charset Mismatch (Primary/Replica) via Event
--echo #
set global event_scheduler=1;
create schema andre default charset utf8mb4 collate utf8mb4_general_ci;
use andre;
create event daily_table_creation
on schedule every 1 day starts now() + interval 5 second do
create table andre_table (
id int(11) not null auto_increment primary key,
col_1 varchar(50) not null
);
let $wait_condition= select count(*)= 1 from information_schema.tables where table_name = 'andre_table';
source include/wait_condition.inc;
show create table andre_table;
set global event_scheduler=0;
drop event daily_table_creation;
drop schema andre;
use test;
--echo # End of 10.11 tests

View File

@@ -0,0 +1,66 @@
connection node_2;
connection node_1;
connection node_1;
connection node_2;
connection node_1;
SET GLOBAL wsrep_provider_options ='pc.ignore_sb=true;pc.weight=2';
connection node_2;
SET GLOBAL read_only=1;
SET SESSION wsrep_trx_fragment_size = 64;
set default_storage_engine=SEQUENCE;
SET AUTOCOMMIT = OFF;
CREATE TABLE t1(c1 NUMERIC NOT NULL);
ERROR HY000: Can't create table `test`.`t1` (errno: 131 "Command not supported by the engine")
CREATE TABLE t1 (id INT PRIMARY KEY) engine=innodb;
SET SESSION SQL_MODE='HIGH_NOT_PRECEDENCE';
INSERT INTO t1 VALUES (1),(2),(3);
SET GLOBAL SQL_MODE='NO_ENGINE_SUBSTITUTION';
DROP TABLE dummy;
ERROR 42S02: Unknown table 'test.dummy'
INSERT INTO t1 VALUES('a');
Warnings:
Warning 1366 Incorrect integer value: 'a' for column `test`.`t1`.`id` at row 1
# In this point there should be one fragment
select count(*) AS EXPECT_1 from mysql.wsrep_streaming_log;
EXPECT_1
1
SET @@global.wsrep_cluster_address='gcomm://';
SET SESSION SQL_MODE='TRADITIONAL ';
# Killing cluster because we have messed with wsrep_cluster_address
connection node_2;
SET SESSION wsrep_sync_wait = 0;
Killing server ...
connection node_1;
SET SESSION wsrep_sync_wait = 0;
Killing server ...
connection node_2;
call mtr.add_suppression("WSREP: BF applier thread");
call mtr.add_suppression("WSREP: Event");
call mtr.add_suppression("WSREP: SR trx recovery applying returned 1290");
call mtr.add_suppression("WSREP: Adopting a transaction");
call mtr.add_suppression("WSREP: Could not find applier context for");
call mtr.add_suppression("WSREP: It may not be safe to bootstrap the cluster");
call mtr.add_suppression("WSREP: Failed to vote on request for");
select count(*) AS EXPECT_0 from mysql.wsrep_streaming_log;
EXPECT_0
0
SELECT * FROM t1;
c1
connection node_1;
select count(*) AS EXPECT_0 from mysql.wsrep_streaming_log;
EXPECT_0
0
SELECT * FROM t1;
c1
DROP TABLE t1;
SET GLOBAL wsrep_provider_options ='pc.ignore_sb=false;pc.weight=1';
connection node_2;
connection node_1;
call mtr.add_suppression("WSREP: Event");
call mtr.add_suppression("WSREP: Inconsistency detected: Inconsistent by consensus on");
call mtr.add_suppression("WSREP: Failed to apply write set: gtid");
call mtr.add_suppression("WSREP: It may not be safe to bootstrap the cluster");
call mtr.add_suppression("WSREP: Could not find applier context for");
call mtr.add_suppression("WSREP: Failed to vote on request for");
disconnect node_2;
disconnect node_1;

View File

@@ -0,0 +1,16 @@
connection node_2;
connection node_1;
#
# wsrep_provider_options should be already > 2k length
#
AS_EXPECT_1
1
#
# Setting single value should pass
#
SET GLOBAL wsrep_provider_options='pc.ignore_sb=false;pc.weight=2';
#
# This failed before change with ER_WRONG_STRING_LENGTH
#
disconnect node_2;
disconnect node_1;

View File

@@ -0,0 +1,100 @@
--source include/galera_cluster.inc
--source include/have_sequence.inc
# Save original auto_increment_offset values.
--let $node_1=node_1
--let $node_2=node_2
--source include/auto_increment_offset_save.inc
--connection node_1
SET GLOBAL wsrep_provider_options ='pc.ignore_sb=true;pc.weight=2';
--connection node_2
--let $wsrep_cluster_address_saved = `SELECT @@global.wsrep_cluster_address`
SET GLOBAL read_only=1;
SET SESSION wsrep_trx_fragment_size = 64;
set default_storage_engine=SEQUENCE;
SET AUTOCOMMIT = OFF;
--error 1005
CREATE TABLE t1(c1 NUMERIC NOT NULL);
CREATE TABLE t1 (id INT PRIMARY KEY) engine=innodb;
SET SESSION SQL_MODE='HIGH_NOT_PRECEDENCE';
INSERT INTO t1 VALUES (1),(2),(3);
SET GLOBAL SQL_MODE='NO_ENGINE_SUBSTITUTION';
--error 1051
DROP TABLE dummy;
INSERT INTO t1 VALUES('a');
--echo # In this point there should be one fragment
select count(*) AS EXPECT_1 from mysql.wsrep_streaming_log;
SET @@global.wsrep_cluster_address='gcomm://';
SET SESSION SQL_MODE='TRADITIONAL ';
#
# Kill the entire cluster and restart
#
--echo # Killing cluster because we have messed with wsrep_cluster_address
--connection node_2
SET SESSION wsrep_sync_wait = 0;
--source include/kill_galera.inc
--connection node_1
SET SESSION wsrep_sync_wait = 0;
--source include/kill_galera.inc
--remove_file $MYSQLTEST_VARDIR/mysqld.1/data/grastate.dat
--let $start_mysqld_params =--wsrep-new-cluster
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.1.expect
--source include/start_mysqld.inc
--source include/wait_until_ready.inc
--connection node_2
--remove_file $MYSQLTEST_VARDIR/mysqld.2/data/grastate.dat
--let $start_mysqld_params =
--let $_expect_file_name= $MYSQLTEST_VARDIR/tmp/mysqld.2.expect
--source include/start_mysqld.inc
--source include/wait_until_ready.inc
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
call mtr.add_suppression("WSREP: BF applier thread");
call mtr.add_suppression("WSREP: Event");
call mtr.add_suppression("WSREP: SR trx recovery applying returned 1290");
call mtr.add_suppression("WSREP: Adopting a transaction");
call mtr.add_suppression("WSREP: Could not find applier context for");
call mtr.add_suppression("WSREP: It may not be safe to bootstrap the cluster");
call mtr.add_suppression("WSREP: Failed to vote on request for");
select count(*) AS EXPECT_0 from mysql.wsrep_streaming_log;
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'
--source include/wait_condition.inc
SELECT * FROM t1;
--connection node_1
--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'
--source include/wait_condition.inc
--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'
--source include/wait_condition.inc
select count(*) AS EXPECT_0 from mysql.wsrep_streaming_log;
SELECT * FROM t1;
DROP TABLE t1;
SET GLOBAL wsrep_provider_options ='pc.ignore_sb=false;pc.weight=1';
--connection node_2
--let $wait_condition = SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 't1'
--source include/wait_condition.inc
--connection node_1
call mtr.add_suppression("WSREP: Event");
call mtr.add_suppression("WSREP: Inconsistency detected: Inconsistent by consensus on");
call mtr.add_suppression("WSREP: Failed to apply write set: gtid");
call mtr.add_suppression("WSREP: It may not be safe to bootstrap the cluster");
call mtr.add_suppression("WSREP: Could not find applier context for");
call mtr.add_suppression("WSREP: Failed to vote on request for");
# Restore original auto_increment_offset values.
--source include/auto_increment_offset_restore.inc
--source include/galera_end.inc

View File

@@ -0,0 +1,11 @@
!include ../galera_2nodes.cnf
[mysqld]
loose-galera-ssl-upgrade=1
wsrep-debug=1
[mysqld.1]
wsrep_provider_options='socket.ssl=yes;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/galera-cert.pem;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/galera-key.pem;repl.causal_read_timeout=PT90S;base_port=@mysqld.1.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.wait_prim_timeout=PT60S;gcache.size=10M'
[mysqld.2]
wsrep_provider_options='socket.ssl=yes;socket.ssl_cert=@ENV.MYSQL_TEST_DIR/std_data/galera-cert.pem;socket.ssl_key=@ENV.MYSQL_TEST_DIR/std_data/galera-key.pem;repl.causal_read_timeout=PT90S;base_port=@mysqld.2.#galera_port;evs.suspect_timeout=PT10S;evs.inactive_timeout=PT30S;evs.install_timeout=PT15S;pc.wait_prim_timeout=PT60S;gcache.size=10M'

View File

@@ -0,0 +1,28 @@
--source include/galera_cluster.inc
--source include/have_ssl_communication.inc
--source include/have_openssl.inc
--let $wsrep_provider_options_orig = `SELECT @@wsrep_provider_options`
--echo #
--echo # wsrep_provider_options should be already > 2k length
--echo #
--disable_query_log
--eval SELECT LENGTH('$wsrep_provider_options_orig') > 2000 AS_EXPECT_1;
--enable_query_log
--echo #
--echo # Setting single value should pass
--echo #
SET GLOBAL wsrep_provider_options='pc.ignore_sb=false;pc.weight=2';
--echo #
--echo # This failed before change with ER_WRONG_STRING_LENGTH
--echo #
--disable_query_log
--eval SET GLOBAL wsrep_provider_options = "$wsrep_provider_options_orig"
--enable_query_log
--source include/galera_end.inc

View File

@@ -0,0 +1,31 @@
CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=0;
SET GLOBAL innodb_log_checkpoint_now=ON;
FLUSH TABLE t1 FOR EXPORT;
UNLOCK TABLES;
INSERT INTO t1 SET a=1;
FLUSH TABLE t1 FOR EXPORT;
UNLOCK TABLES;
# Skip InnoDB Doublewrite Buffer
Should not exist when summary excludes dblwr pages
Should exist when summary includes dblwr pages
# restart
CREATE TABLE ibd_1(f1 INT PRIMARY KEY)ENGINE=InnoDB;
INSERT INTO ibd_1 VALUES(1), (2), (3), (4);
SET GLOBAL innodb_file_per_table=0;
CREATE TABLE ibd_2(f1 INT PRIMARY KEY)ENGINE=InnoDB;
INSERT INTO ibd_2 SELECT seq FROM seq_1_to_8451;
# Pass wrong tablespace flag for ibdata2
FOUND 1 /Error: Page 0 checksum mismatch/ in result.log
# Pass wrong tablespace flag for ibdata1
FOUND 1 /Error: Mismatch between provided tablespace flags/ in result.log
# Pass invalid tablespace flag for ibdata1
FOUND 1 /Error: Provided --tablespace-flags is not valid/ in result.log
# innochecksum should be succesfull
NOT FOUND /Fail/ in result.log
# Create a tablespace with page number > 2^31
# Test innochecksum with the modified ibdata3
FOUND 1 /Error: First page of the tablespace file should be 0, but encountered page number 2147483649/ in result.log
# Test innochecksum with the modified ibdata3 with tablespace flags
FOUND 1 /Exceeded the maximum allowed checksum mismatch/ in result.log
# restart
DROP TABLE t1, ibd_1, ibd_2;

View File

@@ -44,9 +44,10 @@ innodb_system 77594624
# restart: --debug_dbug=d,unused_undo_free_fail_1
# Fail to free the segment due to previous shutdown
FOUND 1 /InnoDB: Cannot free the unused segments in system tablespace because a previous shutdown was not with innodb_fast_shutdown=0.*/ in mysqld.1.err
SELECT NAME, FILE_SIZE FROM information_schema.innodb_sys_tablespaces WHERE SPACE = 0;
NAME FILE_SIZE
innodb_system 15728640
SELECT NAME, IF(file_size>=14680064,'ok',file_size)
FROM information_schema.innodb_sys_tablespaces WHERE SPACE = 0;
NAME IF(file_size>=14680064,'ok',file_size)
innodb_system ok
SET GLOBAL innodb_fast_shutdown= 0;
# Fail to free the segment while finding the used segments
# restart: --debug_dbug=d,unused_undo_free_fail_2

View File

@@ -0,0 +1,2 @@
--innodb_data_file_path=ibdata1:3M;ibdata2:1M:autoextend
--innodb_sys_tablespaces

View File

@@ -0,0 +1,179 @@
--source include/have_innodb.inc
--source include/not_embedded.inc
--source include/have_debug.inc
--source include/have_sequence.inc
let MYSQLD_DATADIR= `SELECT @@datadir`;
CREATE TABLE t1(a INT PRIMARY KEY) ENGINE=InnoDB STATS_PERSISTENT=0;
SET GLOBAL innodb_log_checkpoint_now=ON;
FLUSH TABLE t1 FOR EXPORT;
UNLOCK TABLES;
INSERT INTO t1 SET a=1;
FLUSH TABLE t1 FOR EXPORT;
UNLOCK TABLES;
let INDEX_ID= `select INDEX_ID FROM INFORMATION_SCHEMA.INNODB_SYS_INDEXES WHERE TABLE_ID = (SELECT TABLE_ID FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME="test/t1")`;
let FLAG= `SELECT flag FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES WHERE NAME="innodb_system"`;
let $shutdown_timeout=0;
--source include/shutdown_mysqld.inc
let $resultlog=$MYSQLTEST_VARDIR/tmp/result.log;
let $resultlog_1=$MYSQLTEST_VARDIR/tmp/result_1.log;
exec $INNOCHECKSUM -S -r $MYSQLD_DATADIR/ibdata1 > $resultlog;
--echo # Skip InnoDB Doublewrite Buffer
exec $INNOCHECKSUM -S $MYSQLD_DATADIR/ibdata1 > $resultlog_1;
perl;
use strict;
use warnings;
use File::Spec::Functions qw(catfile);
sub check_index_in_file {
my ($filename, $search_id, $file_desc, $expect_found) = @_;
my $found = 0;
open(my $fh, '<', $filename) or die "Could not open file '$filename': $!\n";
my $line_count = 0;
while (my $line = <$fh>) {
$line_count++;
chomp $line;
# Skip empty lines and header lines
next if $line =~ /^\s*$/;
next if $line =~ /index_id.*#pages/; # Skip header line
# Split the line by whitespace
my @fields = grep { $_ ne '' } split(/\s+/, $line);
next unless @fields; # Skip if no fields
# The first field is the index_id
my $current_id = $fields[0];
# Check if this is the ID we're looking for
if (defined $current_id && $current_id eq $search_id) {
$found = 1;
last;
}
}
close $fh;
if ($found != $expect_found) {
print "\n=== UNEXPECTED RESULT - SHOWING FILE CONTENTS ($file_desc) ===\n";
open($fh, '<', $filename) or die "Cannot reopen file: $!";
print while <$fh>;
close $fh;
print "=== END OF FILE ===\n\n";
}
}
my $search_id = $ENV{'INDEX_ID'};
# Define file paths
my $resultlog = catfile($ENV{'MYSQLTEST_VARDIR'}, 'tmp', 'result.log');
my $resultlog_1 = catfile($ENV{'MYSQLTEST_VARDIR'}, 'tmp', 'result_1.log');
# Check both files
print "Should not exist when summary excludes dblwr pages\n";
check_index_in_file($resultlog, $search_id, 'result.log', 0);
print "Should exist when summary includes dblwr pages\n";
check_index_in_file($resultlog_1, $search_id, 'result_1.log', 1);
EOF
remove_file $resultlog;
remove_file $resultlog_1;
# We expect deterministic checksum error when we pass
# incorrect wrong flags to innochecksum. Below restart is needed
# to make sure that we flush all the pages.
--source include/start_mysqld.inc
CREATE TABLE ibd_1(f1 INT PRIMARY KEY)ENGINE=InnoDB;
INSERT INTO ibd_1 VALUES(1), (2), (3), (4);
disable_warnings;
SET GLOBAL innodb_file_per_table=0;
enable_warnings;
CREATE TABLE ibd_2(f1 INT PRIMARY KEY)ENGINE=InnoDB;
INSERT INTO ibd_2 SELECT seq FROM seq_1_to_8451;
let $shutdown_timeout=;
--source include/shutdown_mysqld.inc
--echo # Pass wrong tablespace flag for ibdata2
--error 1
exec $INNOCHECKSUM -S --tablespace_flags=39 $MYSQLD_DATADIR/ibdata2 > $resultlog 2>&1;
let SEARCH_FILE=$resultlog;
let SEARCH_PATTERN=Error: Page 0 checksum mismatch;
--source include/search_pattern_in_file.inc
remove_file $resultlog;
--echo # Pass wrong tablespace flag for ibdata1
--error 1
exec $INNOCHECKSUM -S --tablespace_flags=39 $MYSQLD_DATADIR/ibdata1 > $resultlog 2>&1;
let SEARCH_PATTERN=Error: Mismatch between provided tablespace flags;
--source include/search_pattern_in_file.inc
remove_file $resultlog;
--echo # Pass invalid tablespace flag for ibdata1
--error 1
exec $INNOCHECKSUM -S --tablespace_flags=89 $MYSQLD_DATADIR/ibdata1 > $resultlog 2>&1;
let SEARCH_PATTERN=Error: Provided --tablespace-flags is not valid;
--source include/search_pattern_in_file.inc
remove_file $resultlog;
--echo # innochecksum should be succesfull
exec $INNOCHECKSUM -S --tablespace_flags=$FLAG $MYSQLD_DATADIR/ibdata2 > $resultlog 2>&1;
let SEARCH_PATTERN=Fail;
--source include/search_pattern_in_file.inc
--echo # Create a tablespace with page number > 2^31
--let $ibdata3 = $MYSQLD_DATADIR/ibdata3
perl;
do "$ENV{MTR_SUITE_DIR}/include/crc32.pl";
my $polynomial = 0x82f63b78; # CRC-32C
my $ps = 16384; # Default InnoDB page size
my $page_num = 2147483649; # 2^31 + 1
my $filename = "$ENV{MYSQLD_DATADIR}/ibdata3";
# Create a zero-filled page
my $page = "\0" x $ps;
# Set page number (FIL_PAGE_OFFSET)
substr($page, 4, 4) = pack('NN', $page_num);
# Set a valid LSN (log sequence number)
my $lsn = 0x12345678;
substr($page, 16, 8) = pack('NN', $lsn & 0xFFFFFFFF, $lsn >> 32);
# Calculate the checksum
my $checksum = pack('N',
mycrc32(substr($page, 4, 22), 0, $polynomial) ^
mycrc32(substr($page, 38, $ps - 38 - 8), 0, $polynomial)
);
# Write the checksum at the beginning (FIL_PAGE_SPACE_OR_CHKSUM)
# and at the end (FIL_PAGE_END_LSN_OLD_CHKSUM) of the page
substr($page, 0, 4) = $checksum;
substr($page, $ps - 8, 4) = $checksum;
# Write the page to file
open(my $fh, '>', $filename) or die "Could not open file $filename: $!";
binmode $fh;
print $fh $page;
close $fh;
EOF
--echo # Test innochecksum with the modified ibdata3
--error 1
exec $INNOCHECKSUM -S $ibdata3 > $resultlog 2>&1;
let SEARCH_PATTERN=Error: First page of the tablespace file should be 0, but encountered page number 2147483649;
--source include/search_pattern_in_file.inc
remove_file $resultlog;
--echo # Test innochecksum with the modified ibdata3 with tablespace flags
--error 1
exec $INNOCHECKSUM -S --tablespace_flags=$FLAG $ibdata3 > $resultlog 2>&1;
let SEARCH_PATTERN=Exceeded the maximum allowed checksum mismatch;
--source include/search_pattern_in_file.inc
remove_file $ibdata3;
--source include/start_mysqld.inc
DROP TABLE t1, ibd_1, ibd_2;
remove_file $resultlog;

View File

@@ -54,7 +54,8 @@ let $shutdown_timeout=;
let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err;
--source include/search_pattern_in_file.inc
SELECT NAME, FILE_SIZE FROM information_schema.innodb_sys_tablespaces WHERE SPACE = 0;
SELECT NAME, IF(file_size>=14680064,'ok',file_size)
FROM information_schema.innodb_sys_tablespaces WHERE SPACE = 0;
SET GLOBAL innodb_fast_shutdown= 0;
--echo # Fail to free the segment while finding the used segments

View File

@@ -1,10 +1,14 @@
call mtr.add_suppression("InnoDB: Table '.*' tablespace is set as discarded.");
call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded.");
call mtr.add_suppression("\\[Warning\\] option 'innodb-ft-min-token-size': unsigned value 0 adjusted to 1");
CREATE TABLE mdev21563(f1 VARCHAR(100), FULLTEXT idx(f1))ENGINE=InnoDB;
set debug_dbug="+d,fts_instrument_sync_request";
INSERT INTO mdev21563 VALUES('This is a test');
ALTER TABLE mdev21563 DISCARD TABLESPACE;
# restart
# restart: --innodb_ft_min_token_size=0
SELECT @@global.innodb_ft_min_token_size;
@@global.innodb_ft_min_token_size
1
DROP TABLE mdev21563;
#
# MDEV-29342 Assertion failure in file que0que.cc line 728

View File

@@ -3,12 +3,15 @@
--source include/not_embedded.inc
call mtr.add_suppression("InnoDB: Table '.*' tablespace is set as discarded.");
call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded.");
call mtr.add_suppression("\\[Warning\\] option 'innodb-ft-min-token-size': unsigned value 0 adjusted to 1");
CREATE TABLE mdev21563(f1 VARCHAR(100), FULLTEXT idx(f1))ENGINE=InnoDB;
set debug_dbug="+d,fts_instrument_sync_request";
INSERT INTO mdev21563 VALUES('This is a test');
ALTER TABLE mdev21563 DISCARD TABLESPACE;
--let $restart_parameters=--innodb_ft_min_token_size=0
--source include/restart_mysqld.inc
SELECT @@global.innodb_ft_min_token_size;
DROP TABLE mdev21563;
--echo #
@@ -21,6 +24,7 @@ set debug_dbug="+d,fts_instrument_sync_request";
INSERT INTO t1 VALUES('test');
set debug_dbug=@save_dbug;
INSERT INTO t1 VALUES('This is a fts issue');
let $restart_parameters=;
--source include/restart_mysqld.inc
set debug_dbug="+d,fts_instrument_sync_request";
UPDATE t1 SET f1="mariadb";

View File

@@ -1655,3 +1655,8 @@ DROP TABLE t1;
#
# End of 10.1 tests
#
CREATE TABLE t1 (c POINT NOT NULL,SPATIAL (c));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT ('POINT(1 0)'));
UPDATE t1 SET c='';
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
DROP TABLE t1;

View File

@@ -14,6 +14,12 @@ INSERT INTO t1 SELECT * from t1;
INSERT INTO t1 SELECT * from t1;
INSERT INTO t1 SELECT * from t1;
INSERT INTO t1 SELECT * from t1;
CREATE TABLE t2 (j LONGBLOB) ENGINE = InnoDB
ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2;
CREATE TABLE t3 (j LONGBLOB) ENGINE = InnoDB
ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
CREATE TABLE t4(c1 INT PRIMARY KEY,c2 VARCHAR(20),
INDEX(c2(10))) ENGINE=InnoDB;
# stop the server
Variables (--variable-name=value)
@@ -35,6 +41,7 @@ log (No default value)
leaf FALSE
merge 0
skip-freed-pages FALSE
tablespace-flags 4294967295
[1]:# check the both short and long options for "help"
[2]:# Run the innochecksum when file isn't provided.
# It will print the innochecksum usage similar to --help option.
@@ -69,6 +76,9 @@ See https://mariadb.com/kb/en/library/innochecksum/ for usage hints.
pages
-r, --skip-freed-pages
skip freed pages for the tablespace
--tablespace-flags=#
InnoDB tablespace flags (default: 4294967295 = read from
page 0)
Variables (--variable-name=value)
and boolean options {FALSE|TRUE} Value (after reading options)
@@ -88,10 +98,12 @@ log (No default value)
leaf FALSE
merge 0
skip-freed-pages FALSE
tablespace-flags 4294967295
[3]:# check the both short and long options for "count" and exit
Number of pages:#
Number of pages:#
[4]:# Print the version of innochecksum and exit
innochecksum from #.#.#-MariaDB, client #.# for OS (ARCH)
[5]:# check t1.ibd and t2.ibd summary with and without tablespace_flags
# Restart the DB server
DROP TABLE t1;
DROP TABLE t4, t3, t2, t1;

View File

@@ -140,6 +140,7 @@ log (No default value)
leaf FALSE
merge 0
skip-freed-pages FALSE
tablespace-flags 4294967295
[5]: Page type dump for with shortform for tab1.ibd

View File

@@ -1 +1,2 @@
--skip-innodb-doublewrite
--innodb_sys_tablespaces

View File

@@ -25,6 +25,23 @@ while ($i > 0) {
dec $i;
}
CREATE TABLE t2 (j LONGBLOB) ENGINE = InnoDB
ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=2;
CREATE TABLE t3 (j LONGBLOB) ENGINE = InnoDB
ROW_FORMAT=COMPRESSED KEY_BLOCK_SIZE=4;
CREATE TABLE t4(c1 INT PRIMARY KEY,c2 VARCHAR(20),
INDEX(c2(10))) ENGINE=InnoDB;
let FLAG= `SELECT flag FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES WHERE NAME="test/t1"`;
let FLAG2= `SELECT flag FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES WHERE NAME="test/t2"`;
let FLAG3= `SELECT flag FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES WHERE NAME="test/t3"`;
let FLAG4= `SELECT flag FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESPACES WHERE NAME="test/t4"`;
--echo # stop the server
--source include/shutdown_mysqld.inc
@@ -81,9 +98,23 @@ EOF
--echo [4]:# Print the version of innochecksum and exit
--replace_regex /for \S+/for OS/ /\d+/#/ /#[-_A-Za-z0-9]*-MariaDB,/#-MariaDB,/ /\(.*\)/(ARCH)/ /^.*innochecksum(\.exe)?/innochecksum/
--exec $INNOCHECKSUM -V $MYSQLD_DATADIR/test/t1.ibd
--exec $INNOCHECKSUM -V
--echo [5]:# check t1.ibd and t2.ibd summary with and without tablespace_flags
let $resultlog=$MYSQLTEST_VARDIR/tmp/result.log;
--exec $INNOCHECKSUM -S $MYSQLD_DATADIR/test/t1.ibd -s 3 -e 5 > $resultlog
--exec $INNOCHECKSUM -S $MYSQLD_DATADIR/test/t2.ibd -s 3 -e 5 > $resultlog
--exec $INNOCHECKSUM -S $MYSQLD_DATADIR/test/t3.ibd -s 3 -e 5 > $resultlog
--exec $INNOCHECKSUM -S $MYSQLD_DATADIR/test/t4.ibd -s 3 -e 5 > $resultlog
--exec $INNOCHECKSUM -S $MYSQLD_DATADIR/test/t1.ibd --tablespace_flags=$FLAG -s 3 -e 5 > $resultlog
--exec $INNOCHECKSUM -S $MYSQLD_DATADIR/test/t2.ibd --tablespace_flags=$FLAG2 -s 3 -e 5 > $resultlog
--exec $INNOCHECKSUM -S $MYSQLD_DATADIR/test/t3.ibd --tablespace_flags=$FLAG3 -s 3 -e 5 > $resultlog
--exec $INNOCHECKSUM -S $MYSQLD_DATADIR/test/t4.ibd --tablespace_flags=$FLAG4 -s 3 -e 5 > $resultlog
remove_file $resultlog;
--echo # Restart the DB server
--source include/start_mysqld.inc
DROP TABLE t1;
DROP TABLE t4, t3, t2, t1;

View File

@@ -373,7 +373,7 @@ id intcol
3 5678
4 5678
Warnings:
Warning 1366 Incorrect integer value: 'asd' for column ``.`(temporary)`.`intcol` at row 1
Warning 1366 Incorrect integer value: 'asd' for column `*any*`.`json_table`.`intcol` at row 1
SELECT COUNT(*) FROM JSON_TABLE('[1, 2]', '$[*]' COLUMNS( I INT PATH '$')) tt;
COUNT(*)
2
@@ -931,7 +931,7 @@ id intcol
3 5678
4 5678
Warnings:
Warning 1366 Incorrect integer value: 'asd' for column ``.`(temporary)`.`intcol` at row 1
Warning 1366 Incorrect integer value: 'asd' for column `*any*`.`json_table`.`intcol` at row 1
#
# MDEV-25377 JSON_TABLE: Wrong value with implicit conversion.
#
@@ -941,7 +941,7 @@ converted original
1 1
127 1000
Warnings:
Warning 1366 Incorrect integer value: 'foo' for column ``.`(temporary)`.`converted` at row 1
Warning 1366 Incorrect integer value: 'foo' for column `*any*`.`json_table`.`converted` at row 1
Warning 1264 Out of range value for column 'converted' at row 3
select * from json_table('{"a":"foo", "b":1, "c":1000}', '$.*' columns(converted tinyint path '$', original text path '$')) as jt order by converted;
converted original
@@ -949,9 +949,9 @@ converted original
1 1
127 1000
Warnings:
Warning 1366 Incorrect integer value: 'foo' for column ``.`(temporary)`.`converted` at row 1
Warning 1366 Incorrect integer value: 'foo' for column `*any*`.`json_table`.`converted` at row 1
Warning 1264 Out of range value for column 'converted' at row 1
Warning 1366 Incorrect integer value: 'foo' for column ``.`(temporary)`.`converted` at row 1
Warning 1366 Incorrect integer value: 'foo' for column `*any*`.`json_table`.`converted` at row 1
Warning 1264 Out of range value for column 'converted' at row 3
select * from json_table('{"a":"foo", "b":1, "c":1000}', '$.*' columns(converted tinyint path '$', original text path '$')) as jt order by original;
converted original
@@ -960,7 +960,7 @@ converted original
0 foo
Warnings:
Warning 1264 Out of range value for column 'converted' at row 2
Warning 1366 Incorrect integer value: 'foo' for column ``.`(temporary)`.`converted` at row 3
Warning 1366 Incorrect integer value: 'foo' for column `*any*`.`json_table`.`converted` at row 3
select * from
json_table('[{"color": "blue", "price": { "high": 10, "low": 5}},
{"color": "white", "price": "pretty low"},
@@ -1218,6 +1218,22 @@ ERROR 42000: You have an error in your SQL syntax; check the manual that corresp
# End of 10.9 tests
#
#
# MDEV-27898 CREATE VIEW AS SELECT FROM JSON_TABLE column requires global privileges
#
create view v1 as (select * from
json_table('[{"a":"1"}]', '$[*]' columns(a int path '$.a') ) as jt);
create user u1@localhost;
grant ALL on test.* to u1@localhost;
connect con1,localhost,u1,,test;
create view v2 as
(select * from json_table('[{"a":"1"}]', '$[*]' columns(a int path '$.a') ) as jt);
disconnect con1;
connection default;
DROP VIEW v2;
DROP VIEW v1;
DROP USER u1@localhost;
# End of 10.11 tests
#
# MDEV-29390: Improve coverage for UPDATE and DELETE statements in MTR test suites
#
# Multi-update with JSON_TABLE

View File

@@ -62,8 +62,8 @@ id jpath_i jpath_r jsn_path jexst
4 0 0.33 0.33 0
5 0 0 "asd" 0
Warnings:
Warning 1366 Incorrect integer value: 'asd' for column ``.`(temporary)`.`jpath_i` at row 5
Warning 1366 Incorrect double value: 'asd' for column ``.`(temporary)`.`jpath_r` at row 5
Warning 1366 Incorrect integer value: 'asd' for column `*any*`.`json_table`.`jpath_i` at row 5
Warning 1366 Incorrect double value: 'asd' for column `*any*`.`json_table`.`jpath_r` at row 5
select * from
json_table(
'[{"x":"3"},{"a":2},{"b":1},{"a":0},{"a":[1,2]}]',
@@ -410,7 +410,7 @@ SELECT * FROM JSON_TABLE('"asdf"',
a
0
Warnings:
Warning 1366 Incorrect integer value: 'asdf' for column ``.`(temporary)`.`a` at row 1
Warning 1366 Incorrect integer value: 'asdf' for column `*any*`.`json_table`.`a` at row 1
SELECT * FROM
JSON_TABLE('[{"a":1},{"a":2}]',
'$' COLUMNS (a INT PATH '$[*].a' ERROR ON ERROR)) AS jt;
@@ -547,9 +547,9 @@ tm dt i f d
Warnings:
Warning 1265 Data truncated for column 'tm' at row 1
Warning 1265 Data truncated for column 'dt' at row 1
Warning 1366 Incorrect integer value: 'asdf' for column ``.`(temporary)`.`i` at row 1
Warning 1366 Incorrect double value: 'asdf' for column ``.`(temporary)`.`f` at row 1
Warning 1366 Incorrect decimal value: 'asdf' for column ``.`(temporary)`.`d` at row 1
Warning 1366 Incorrect integer value: 'asdf' for column `*any*`.`json_table`.`i` at row 1
Warning 1366 Incorrect double value: 'asdf' for column `*any*`.`json_table`.`f` at row 1
Warning 1366 Incorrect decimal value: 'asdf' for column `*any*`.`json_table`.`d` at row 1
SELECT * FROM
JSON_TABLE('{}', '$' COLUMNS (x INT PATH '$.x' DEFAULT NULL ON EMPTY)) jt;
x
@@ -865,8 +865,8 @@ col18
0.000
0.000
Warnings:
Warning 1366 Incorrect decimal value: 'asdf' for column ``.`(temporary)`.`col18` at row 1
Warning 1366 Incorrect decimal value: 'ghjk' for column ``.`(temporary)`.`col18` at row 2
Warning 1366 Incorrect decimal value: 'asdf' for column `*any*`.`json_table`.`col18` at row 1
Warning 1366 Incorrect decimal value: 'ghjk' for column `*any*`.`json_table`.`col18` at row 2
CREATE TABLE t1(jd JSON);
INSERT INTO t1 VALUES('["asdf"]'),('["ghjk"]');
SELECT * FROM t1,
@@ -878,8 +878,8 @@ jd col18
["asdf"] 0.000
["ghjk"] 0.000
Warnings:
Warning 1366 Incorrect decimal value: 'asdf' for column ``.`(temporary)`.`col18` at row 1
Warning 1366 Incorrect decimal value: 'ghjk' for column ``.`(temporary)`.`col18` at row 1
Warning 1366 Incorrect decimal value: 'asdf' for column `*any*`.`json_table`.`col18` at row 1
Warning 1366 Incorrect decimal value: 'ghjk' for column `*any*`.`json_table`.`col18` at row 1
DROP TABLE t1;
#
# Bug#25540027: SIG 11 IN FIND_FIELD_IN_TABLE | SQL/SQL_BASE.CC

View File

@@ -1045,6 +1045,29 @@ COLUMNS
--echo # End of 10.9 tests
--echo #
--echo #
--echo # MDEV-27898 CREATE VIEW AS SELECT FROM JSON_TABLE column requires global privileges
--echo #
create view v1 as (select * from
json_table('[{"a":"1"}]', '$[*]' columns(a int path '$.a') ) as jt);
create user u1@localhost;
grant ALL on test.* to u1@localhost;
--connect (con1,localhost,u1,,test)
create view v2 as
(select * from json_table('[{"a":"1"}]', '$[*]' columns(a int path '$.a') ) as jt);
disconnect con1;
connection default;
DROP VIEW v2;
DROP VIEW v1;
DROP USER u1@localhost;
--echo # End of 10.11 tests
--echo #
--echo # MDEV-29390: Improve coverage for UPDATE and DELETE statements in MTR test suites
--echo #

View File

@@ -1484,3 +1484,8 @@ COUNT(*)
2
DROP TABLE t1;
End of 5.0 tests.
CREATE TABLE t1 (c POINT NOT NULL,SPATIAL (c));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT ('POINT(1 0)'));
UPDATE t1 SET c='';
ERROR 22003: Cannot get geometry object from data you send to the GEOMETRY field
DROP TABLE t1;

View File

@@ -886,3 +886,13 @@ SELECT COUNT(*) FROM t1 IGNORE INDEX (b) WHERE
DROP TABLE t1;
--echo End of 5.0 tests.
#
# Bug #31766 SIGSEGV in maria_rtree_split_page | maria_rtree_add_key
#
CREATE TABLE t1 (c POINT NOT NULL,SPATIAL (c));
INSERT INTO t1 VALUES (ST_GEOMFROMTEXT ('POINT(1 0)'));
--error ER_CANT_CREATE_GEOMETRY_OBJECT
UPDATE t1 SET c='';
DROP TABLE t1;

View File

@@ -2,3 +2,18 @@ CREATE TABLE t (a INT, KEY(a)) ENGINE=Aria PARTITION BY KEY(a) PARTITIONS 2;
SELECT * FROM t PARTITION (p1);
a
DROP TABLE t;
CREATE TABLE t (a INT KEY) ENGINE=Aria PARTITION BY LIST (a) (
PARTITION p0 VALUES IN (1,2),
PARTITION p1 VALUES IN (3,4)
);
CREATE TABLE t1 (a INT KEY) ENGINE=Aria;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (3);
Swap table t1 and partition p0 of table t.
FLUSH TABLES;
REPAIR TABLE t;
Table Op Msg_type Msg_text
test.t repair warning Moved 1 misplaced rows
test.t repair status OK
DROP TABLE t;
DROP TABLE t1;

View File

@@ -9,3 +9,37 @@
CREATE TABLE t (a INT, KEY(a)) ENGINE=Aria PARTITION BY KEY(a) PARTITIONS 2;
SELECT * FROM t PARTITION (p1);
DROP TABLE t;
#
# MDEV-33190
# Errors or assertion failure upon REPAIR on partitioned Aria table with misplaced rows
#
CREATE TABLE t (a INT KEY) ENGINE=Aria PARTITION BY LIST (a) (
PARTITION p0 VALUES IN (1,2),
PARTITION p1 VALUES IN (3,4)
);
CREATE TABLE t1 (a INT KEY) ENGINE=Aria;
INSERT INTO t1 VALUES (1);
INSERT INTO t1 VALUES (3);
--echo Swap table t1 and partition p0 of table t.
FLUSH TABLES;
let $datadir=`select @@datadir`;
move_file $datadir/test/t1.MAD $datadir/test/tmp.MAD;
move_file $datadir/test/t1.MAI $datadir/test/tmp.MAI;
move_file $datadir/test/t#P#p0.MAD $datadir/test/t1.MAD;
move_file $datadir/test/t#P#p0.MAI $datadir/test/t1.MAI;
move_file $datadir/test/tmp.MAD $datadir/test/t#P#p0.MAD;
move_file $datadir/test/tmp.MAI $datadir/test/t#P#p0.MAI;
REPAIR TABLE t;
DROP TABLE t;
DROP TABLE t1;

View File

@@ -3928,6 +3928,46 @@ drop table tm, t;
# End of 10.8 tests
#
#
# MDEV-35816 ASAN use-after-poison in st_select_lex::print
#
CREATE TABLE t1 (a INT);
INSERT INTO t1 (a) VALUES (1),(2),(3),(4),(5);
SET SESSION optimizer_trace = 'enabled=on';
PREPARE stmt FROM 'SELECT STRAIGHT_JOIN * FROM t1 WHERE a IN (WITH cte AS (SELECT a FROM t1) SELECT * FROM cte)';
EXECUTE stmt;
a
1
2
3
4
5
EXECUTE stmt;
a
1
2
3
4
5
PREPARE nested FROM 'SELECT STRAIGHT_JOIN * FROM t1 WHERE a IN (WITH cte AS (WITH cte2 AS (SELECT a FROM t1) SELECT * from cte2) SELECT * FROM cte)';
EXECUTE nested;
a
1
2
3
4
5
EXECUTE nested;
a
1
2
3
4
5
DROP TABLE t1;
#
# End of 10.11 tests
#
#
# MDEV-30088 Assertion `cond_selectivity <= 1.0' failed in get_range_limit_read_cost
#
CREATE TABLE t1 (a TIMESTAMP, KEY(a)) ENGINE=MRG_MyISAM;
@@ -3992,4 +4032,3 @@ UPDATE v1 SET a=0;
DROP VIEW v1;
DROP TABLE t1;
# End of 11.1 tests
ALTER DATABASE test CHARACTER SET utf8mb4 COLLATE utf8mb4_uca1400_ai_ci;

View File

@@ -2890,6 +2890,24 @@ drop table tm, t;
--echo # End of 10.8 tests
--echo #
--echo #
--echo # MDEV-35816 ASAN use-after-poison in st_select_lex::print
--echo #
CREATE TABLE t1 (a INT);
INSERT INTO t1 (a) VALUES (1),(2),(3),(4),(5);
SET SESSION optimizer_trace = 'enabled=on';
PREPARE stmt FROM 'SELECT STRAIGHT_JOIN * FROM t1 WHERE a IN (WITH cte AS (SELECT a FROM t1) SELECT * FROM cte)';
EXECUTE stmt;
EXECUTE stmt;
PREPARE nested FROM 'SELECT STRAIGHT_JOIN * FROM t1 WHERE a IN (WITH cte AS (WITH cte2 AS (SELECT a FROM t1) SELECT * from cte2) SELECT * FROM cte)';
EXECUTE nested;
EXECUTE nested;
DROP TABLE t1;
--echo #
--echo # End of 10.11 tests
--echo #
--echo #
--echo # MDEV-30088 Assertion `cond_selectivity <= 1.0' failed in get_range_limit_read_cost
--echo #
@@ -2949,5 +2967,3 @@ DROP VIEW v1;
DROP TABLE t1;
--echo # End of 11.1 tests
--source include/test_db_charset_restore.inc

View File

@@ -0,0 +1,49 @@
include/master-slave.inc
[connection master]
#
# MDEV-37453 Parallel Replication Crash During Backup
#
connection master;
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE = innodb;
INSERT INTO t1 VALUES (1, 0);
INSERT INTO t1 VALUES (2, 0);
connection slave;
include/stop_slave.inc
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
SET @old_parallel_threads = @@GLOBAL.slave_parallel_threads;
SET @old_parallel_mode = @@GLOBAL.slave_parallel_mode;
SET @@global.slave_parallel_threads= 2;
SET @@global.slave_parallel_mode = 'optimistic';
connection master;
begin /* trx1 */;
delete from t1 where a = 1;
update t1 set b = 1 where a = 2;
commit;
begin /* trx2 */;
delete from t1 where a = 2;
commit;
connect aux_slave,127.0.0.1,root,,test,$SLAVE_MYPORT,;
BEGIN;
DELETE FROM t1 WHERE a = 1;
connection slave;
include/start_slave.inc
connection aux_slave;
connect backup_slave,127.0.0.1,root,,test,$SLAVE_MYPORT,;
BACKUP STAGE START;
BACKUP STAGE BLOCK_COMMIT;
connection aux_slave;
ROLLBACK;
connection aux_slave;
connection backup_slave;
BACKUP STAGE END;
connection slave;
include/diff_tables.inc [master:t1,slave:t1]
connection slave;
include/stop_slave.inc
SET @@global.slave_parallel_threads= @old_parallel_threads;
SET @@global.slave_parallel_mode = @old_parallel_mode;
include/start_slave.inc
connection server_1;
DROP TABLE t1;
include/rpl_end.inc
# End of the tests

View File

@@ -0,0 +1,99 @@
--source include/have_innodb.inc
--source include/have_binlog_format_mixed.inc
--source include/master-slave.inc
--echo #
--echo # MDEV-37453 Parallel Replication Crash During Backup
--echo #
# Retrying after parallel conflict transition must be able to do that
# cleanly despite possible "hard" Backup MDL lock in the way.
# The retrying transaction will complete successfully.
# The plot:
# Two transactions are run by two parallel workers. The 2nd (in binlog order)
# transaction depends on the 1st.
# 1. Block the 1st and let the 2nd reach
# Waiting-for-Prior-Transaction-to-Commit (WfPTtC).
# 2. At this point issue BACKUP commands of which BLOCK_COMMIT will
# later force the 2nd transaction to wait for the BACKUP MDL lock.
# 3. Release locks to the 1st transaction which would kick the 2nd out
# of waiting-for-prior-commit only to return negative from the
# BACKUP MDL acquisition attempt (found itself killed).
# 4. Finally, prove of safety: the 2nd transaction retries successfully.
--connection master
CREATE TABLE t1 (a INT PRIMARY KEY, b INT) ENGINE = innodb;
INSERT INTO t1 VALUES (1, 0);
INSERT INTO t1 VALUES (2, 0);
--sync_slave_with_master
--source include/stop_slave.inc
ALTER TABLE mysql.gtid_slave_pos ENGINE=InnoDB;
SET @old_parallel_threads = @@GLOBAL.slave_parallel_threads;
SET @old_parallel_mode = @@GLOBAL.slave_parallel_mode;
SET @@global.slave_parallel_threads= 2;
SET @@global.slave_parallel_mode = 'optimistic';
--connection master
begin /* trx1 */;
delete from t1 where a = 1;
update t1 set b = 1 where a = 2;
commit;
begin /* trx2 */;
delete from t1 where a = 2;
commit;
--save_master_pos
--connect (aux_slave,127.0.0.1,root,,test,$SLAVE_MYPORT,)
BEGIN;
# block the 1st worker and wait for the 2nd ready to commit
DELETE FROM t1 WHERE a = 1;
--connection slave
--source include/start_slave.inc
--connection aux_slave
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit"
--source include/wait_condition.inc
# While the 1st worker is locked out run backup
--connect (backup_slave,127.0.0.1,root,,test,$SLAVE_MYPORT,)
BACKUP STAGE START;
BACKUP STAGE BLOCK_COMMIT;
# release the 1st work
--connection aux_slave
let $status_var= Slave_retried_transactions;
let $status_var_value= query_get_value(SHOW STATUS LIKE '$status_var', Value, 1);
--sleep 1
ROLLBACK;
# that will kick the 2nd out of the current WfPTtC into next retry one
--connection aux_slave
--let $wait_condition= SELECT COUNT(*) = 1 FROM information_schema.processlist WHERE state = "Waiting for prior transaction to commit"
--source include/wait_condition.inc
let $status_var_comparsion= >;
--source include/wait_for_status_var.inc
--connection backup_slave
BACKUP STAGE END;
--connection slave
--sync_with_master
--let $diff_tables= master:t1,slave:t1
--source include/diff_tables.inc
# Clean up.
--connection slave
--source include/stop_slave.inc
SET @@global.slave_parallel_threads= @old_parallel_threads;
SET @@global.slave_parallel_mode = @old_parallel_mode;
--source include/start_slave.inc
--connection server_1
DROP TABLE t1;
--source include/rpl_end.inc
--echo # End of the tests

View File

@@ -1,10 +1,10 @@
#
# test cleanup of sys_var classes
#
set global init_connect=".................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................";
ERROR HY000: String '......................................................................' is too long for init_connect (should be no longer than 2000)
set global ft_boolean_syntax=".................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................";
ERROR HY000: String '......................................................................' is too long for ft_boolean_syntax (should be no longer than 2000)
set global init_connect=".................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................";
ERROR HY000: String '......................................................................' is too long for init_connect (should be no longer than 4096)
set global ft_boolean_syntax=".................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................................";
ERROR HY000: String '......................................................................' is too long for ft_boolean_syntax (should be no longer than 4096)
#
# end of test mdev_15935
#

View File

@@ -794,7 +794,7 @@ DEFAULT_VALUE 3
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BIGINT UNSIGNED
VARIABLE_COMMENT InnoDB Fulltext search minimum token size in characters
NUMERIC_MIN_VALUE 0
NUMERIC_MIN_VALUE 1
NUMERIC_MAX_VALUE 16
NUMERIC_BLOCK_SIZE 0
ENUM_VALUE_LIST NULL

View File

@@ -1,25 +1,24 @@
SET @global_thread_cache_size = @@GLOBAL.thread_cache_size;
FLUSH STATUS;
'# Test1#'
SET @@GLOBAL.thread_cache_size=3;
SHOW STATUS LIKE 'Threads_cached';
Variable_name Value
Threads_cached 0
0 Expected
FLUSH THREADS;
select variable_value<=@@global.thread_cache_size from information_schema.global_status where variable_name="Threads_cached";
variable_value<=@@global.thread_cache_size
1
CONNECT conn1,localhost,root,,;
CONNECT conn2,localhost,root,,;
CONNECT conn3,localhost,root,,;
CONNECT conn4,localhost,root,,;
connection default;
SHOW STATUS LIKE 'Threads_cached';
Variable_name Value
Threads_cached 0
0 Expected
select variable_value<=@@global.thread_cache_size from information_schema.global_status where variable_name="Threads_cached";
variable_value<=@@global.thread_cache_size
1
disconnect conn1;
disconnect conn2;
disconnect conn3;
disconnect conn4;
SET @@GLOBAL.thread_cache_size= 1;
FLUSH THREADS;
CONNECT conn1,localhost,root,,;
CONNECT conn2,localhost,root,,;
connection default;

View File

@@ -2,7 +2,7 @@
--echo # test cleanup of sys_var classes
--echo #
--let $long_string=`select repeat('.', 2001)`
--let $long_string=`select repeat('.', 4097)`
--error ER_WRONG_STRING_LENGTH
eval set global init_connect="$long_string";
--error ER_WRONG_STRING_LENGTH

View File

@@ -31,12 +31,10 @@
--source include/one_thread_per_connection.inc
SET @global_thread_cache_size = @@GLOBAL.thread_cache_size;
FLUSH STATUS;
-- ECHO '# Test1#'
SET @@GLOBAL.thread_cache_size=3;
SHOW STATUS LIKE 'Threads_cached';
--echo 0 Expected
FLUSH THREADS;
select variable_value<=@@global.thread_cache_size from information_schema.global_status where variable_name="Threads_cached";
##################################
# Make 4 connections #
@@ -53,8 +51,7 @@ CONNECTION default;
let $wait_condition= SELECT COUNT(*)= 5 FROM INFORMATION_SCHEMA.PROCESSLIST;
--source include/wait_condition.inc
SHOW STATUS LIKE 'Threads_cached';
--echo 0 Expected
select variable_value<=@@global.thread_cache_size from information_schema.global_status where variable_name="Threads_cached";
####################################
#Disconnecting all the connections #
@@ -81,6 +78,7 @@ let $wait_condition= SELECT variable_value = 3 FROM INFORMATION_SCHEMA.global_st
# Decreasing cache size to 1
#
SET @@GLOBAL.thread_cache_size= 1;
FLUSH THREADS;
CONNECT (conn1,localhost,root,,);
CONNECT (conn2,localhost,root,,);

View File

@@ -12,7 +12,7 @@ SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIK
'wsrep_provider_repl_proto_max',
'wsrep_provider_gmcast_listen_addr');
COUNT(*)
83
84
SELECT * FROM INFORMATION_SCHEMA.SYSTEM_VARIABLES
WHERE VARIABLE_NAME LIKE 'wsrep_provider_%' AND VARIABLE_NAME NOT IN (
'wsrep_provider',
@@ -507,6 +507,21 @@ ENUM_VALUE_LIST NULL
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
GLOBAL_VALUE_PATH NULL
VARIABLE_NAME WSREP_PROVIDER_GCS_CHECK_APPL_PROTO
SESSION_VALUE NULL
GLOBAL_VALUE ON
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE ON
VARIABLE_SCOPE GLOBAL
VARIABLE_TYPE BOOLEAN
VARIABLE_COMMENT Wsrep provider option
NUMERIC_MIN_VALUE NULL
NUMERIC_MAX_VALUE NULL
NUMERIC_BLOCK_SIZE NULL
ENUM_VALUE_LIST OFF,ON
READ_ONLY NO
COMMAND_LINE_ARGUMENT REQUIRED
GLOBAL_VALUE_PATH NULL
VARIABLE_NAME WSREP_PROVIDER_GCS_FC_DEBUG
SESSION_VALUE NULL
GLOBAL_VALUE 0

View File

@@ -1,7 +1,7 @@
--source include/have_wsrep.inc
--source include/have_innodb.inc
--let $galera_version=26.4.21
--let $galera_version=26.4.24
source include/check_galera_version.inc;
SELECT COUNT(*) FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep_provider%' AND VARIABLE_NAME NOT IN (

View File

@@ -24,8 +24,8 @@ PLUGIN_TYPE DATA TYPE
PLUGIN_AUTHOR MariaDB Corporation
PLUGIN_DESCRIPTION Data type INET4
PLUGIN_LICENSE GPL
PLUGIN_MATURITY Gamma
PLUGIN_AUTH_VERSION 1.0
PLUGIN_MATURITY Stable
PLUGIN_AUTH_VERSION 1.0.1
#
# End of 10.10 tests
#

View File

@@ -188,8 +188,8 @@ maria_declare_plugin(type_inet)
0x0100, // Numeric version 0xAABB means AA.BB version
NULL, // Status variables
NULL, // System variables
"1.0", // String version representation
MariaDB_PLUGIN_MATURITY_GAMMA // Maturity(see include/mysql/plugin.h)*/
"1.0.1", // String version representation
MariaDB_PLUGIN_MATURITY_STABLE// Maturity(see include/mysql/plugin.h)*/
},
{
MariaDB_DATA_TYPE_PLUGIN, // the plugin type (see include/mysql/plugin.h)

View File

@@ -1,6 +1,6 @@
#!/usr/bin/env perl
#
# $Id: mytop,v 1.99-maria6 2019/10/22 14:53:51 jweisbuch Exp $
# $Id: mytop,v 1.99-maria8 2025/07/16 17:59:26 jweisbuch Exp $
=pod
@@ -21,7 +21,7 @@ use Socket;
use List::Util qw(min max);
use File::Basename;
$main::VERSION = "1.99-maria6";
$main::VERSION = "1.99-maria8";
my $path_for_script = dirname($0);
$| = 1;
@@ -258,6 +258,10 @@ if ($config{socket} and -S $config{socket})
{
$dsn .= "${prefix}_socket=$config{socket}";
}
elsif($config{host} eq "localhost")
{
$dsn .= "host=$config{host}";
}
else
{
$dsn .= "host=$config{host};port=$config{port}";

View File

@@ -1267,6 +1267,13 @@ check_sockets_utils()
lsof_available=0
sockstat_available=0
ss_available=0
raw_socket_check=0
if [ -n "$(commandex selinuxenabled)" ] && selinuxenabled; then
raw_socket_check=1
wsrep_log_info "/proc/net/tcp{,6} is being used directly to avoid excessive selinux AVC notices"
return 0
fi
socket_utility="$(commandex ss)"
if [ -n "$socket_utility" ]; then
@@ -1335,7 +1342,11 @@ check_port()
local rc=2 # ENOENT
if [ $ss_available -ne 0 ]; then
if [ $raw_socket_check -ne 0 ]; then
for key in $(awk -v p="$port" 'BEGIN { hex_port = sprintf(":%04X", p) } $2 ~ hex_port && $4 == "0A" { print $10 }' /proc/net/tcp /proc/net/tcp6); do
return 0
done
elif [ $ss_available -ne 0 ]; then
$socket_utility $ss_opts -t "( sport = :$port )" 2>/dev/null | \
grep -q -E "[[:space:]]users:[[:space:]]?\\(.*\\(\"($utils)[^[:space:]]*\"[^)]*,pid=$pid(,[^)]*)?\\)" && rc=0
elif [ $sockstat_available -ne 0 ]; then

View File

@@ -105,7 +105,9 @@ check_pid_and_port()
local final
if ! check_port $pid "$port" "$utils"; then
if [ $ss_available -ne 0 -o $sockstat_available -ne 0 ]; then
if [ $raw_socket_check -ne 0 ]; then
return 1
elif [ $ss_available -ne 0 -o $sockstat_available -ne 0 ]; then
if [ $ss_available -ne 0 ]; then
port_info=$($socket_utility $ss_opts -t "( sport = :$port )" 2>/dev/null | \
grep -E '[[:space:]]users:[[:space:]]?\(' | \
@@ -163,7 +165,10 @@ check_pid_and_port()
fi
fi
check_pid "$pid_file" && [ $CHECK_PID -eq $pid ]
if [ $raw_socket_check -ne 0 ]; then
return 0
fi
check_pid "$pid_file" && [ "$CHECK_PID" -eq "$pid" ]
}
get_binlog

View File

@@ -1371,30 +1371,13 @@ Event_job_data::execute(THD *thd, bool drop)
wsrep_open(thd);
wsrep_before_command(thd);
#endif /* WITH_WSREP */
/*
MySQL parser currently assumes that current database is either
present in THD or all names in all statements are fully specified.
And yet not fully specified names inside stored programs must be
be supported, even if the current database is not set:
CREATE PROCEDURE db1.p1() BEGIN CREATE TABLE t1; END//
-- in this example t1 should be always created in db1 and the statement
must parse even if there is no current database.
To support this feature and still address the parser limitation,
we need to set the current database here.
We don't have to call mysql_change_db, since the checks performed
in it are unnecessary for the purpose of parsing, and
mysql_change_db will be invoked anyway later, to activate the
procedure database before it's executed.
*/
thd->set_db(&dbname);
lex_start(thd);
#ifndef NO_EMBEDDED_ACCESS_CHECKS
if (event_sctx.change_security_context(thd,
&definer_user, &definer_host,
&dbname, &save_sctx))
if (event_sctx.change_security_context(thd, &definer_user, &definer_host,
&dbname, &save_sctx) ||
mysql_change_db(thd, &dbname, false))
{
sql_print_error("Event Scheduler: "
"[%s].[%s.%s] execution failed, "

View File

@@ -926,6 +926,7 @@ int Gcalc_operation_reducer::count_slice(Gcalc_scan_iterator *si)
{
add_poly_border(1, cur_t, prev_state, events);
prev_state^= 1;
prev_range= prev_state ? cur_t : 0;
}
if (!events->is_bottom())
{

View File

@@ -2253,6 +2253,13 @@ int ha_partition::copy_partitions(ulonglong * const copied,
}
else
{
if (m_new_file[new_part]->m_lock_type != F_WRLCK)
{
m_last_part= reorg_part;
m_err_rec= table->record[0];
result= HA_ERR_ROW_IN_WRONG_PARTITION;
goto error;
}
/* Copy record to new handler */
(*copied)++;
DBUG_ASSERT(!m_new_file[new_part]->row_logging);
@@ -10415,11 +10422,12 @@ void ha_partition::print_error(int error, myf errflag)
}
else if (error == HA_ERR_ROW_IN_WRONG_PARTITION)
{
/* Should only happen on DELETE or UPDATE! */
/* Should only happen on DELETE, UPDATE or REBUILD PARTITION! */
DBUG_ASSERT(thd_sql_command(thd) == SQLCOM_DELETE ||
thd_sql_command(thd) == SQLCOM_DELETE_MULTI ||
thd_sql_command(thd) == SQLCOM_UPDATE ||
thd_sql_command(thd) == SQLCOM_UPDATE_MULTI);
thd_sql_command(thd) == SQLCOM_UPDATE_MULTI ||
thd_sql_command(thd) == SQLCOM_ALTER_TABLE);
DBUG_ASSERT(m_err_rec);
if (m_err_rec)
{
@@ -12244,11 +12252,14 @@ int ha_partition::direct_delete_rows(ha_rows *delete_rows_result)
if ((error= (m_pre_calling ?
file->pre_direct_delete_rows() :
file->ha_direct_delete_rows(&delete_rows))))
{
if (rnd_seq)
{
if (m_pre_calling)
file->ha_pre_rnd_end();
else
file->ha_rnd_end();
}
DBUG_RETURN(error);
}
delete_rows_result+= delete_rows;

View File

@@ -2126,7 +2126,8 @@ err:
thd->rgi_slave->is_parallel_exec);
}
end:
if (mdl_backup.ticket)
// reset the pointer to the ticket when it's stack instantiated
if (thd->backup_commit_lock == &mdl_backup)
{
/*
We do not always immediately release transactional locks
@@ -2134,6 +2135,7 @@ end:
thus we release the commit blocker lock as soon as it's
not needed.
*/
if (mdl_backup.ticket)
thd->mdl_context.release_lock(mdl_backup.ticket);
thd->backup_commit_lock= 0;
}

View File

@@ -4938,7 +4938,7 @@ public:
}
int save_in_field(Field *field, bool no_conversions) override;
const Type_handler *type_handler() const override
{ return &type_handler_varchar; }
{ return Type_handler::string_type_handler(max_length); }
Item *clone_item(THD *thd) const override;
Item *safe_charset_converter(THD *thd, CHARSET_INFO *tocs) override
{

View File

@@ -264,6 +264,8 @@ public:
bool fix_length_and_dec(THD *thd) override;
void print(String *str, enum_query_type query_type) override;
enum precedence precedence() const override { return CMP_PRECEDENCE; }
table_map not_null_tables() const override
{ return is_top_level_item() ? not_null_tables_cache : 0; }
bool count_sargable_conds(void *arg) override;
SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param, Item **cond_ptr) override;
SEL_ARG *get_mm_leaf(RANGE_OPT_PARAM *param, Field *field,

View File

@@ -3245,7 +3245,10 @@ longlong Item_func_locate::val_int()
start0= start= args[2]->val_int();
if ((start <= 0) || (start > a->length()))
{
null_value= args[2]->is_null();
return 0;
}
start0--; start--;
/* start is now sufficiently valid to pass to charpos function */

View File

@@ -5241,14 +5241,14 @@ bool Item_func_json_key_value::fix_length_and_dec(THD *thd)
}
static bool create_hash(json_engine_t *value, HASH *items, bool &hash_inited,
static bool create_hash(json_engine_t *value, HASH *items, bool &item_hash_inited,
MEM_ROOT *hash_root)
{
int level= value->stack_p;
if (my_hash_init(PSI_INSTRUMENT_ME, items, value->s.cs, 0, 0, 0,
get_key_name, NULL, 0))
return true;
hash_inited= true;
item_hash_inited= true;
while (json_scan_next(value) == 0 && value->stack_p >= level)
{
@@ -5317,6 +5317,11 @@ static bool get_current_value(json_engine_t *js, const uchar *&value_start,
return false;
}
static my_bool restore_entry(void *element, void *arg)
{
HASH *items = (HASH*) arg;
return my_hash_insert(items, (const uchar*) element);
}
/*
If the outermost layer of JSON is an array,
@@ -5329,14 +5334,16 @@ static bool get_current_value(json_engine_t *js, const uchar *&value_start,
FALSE - if two array documents have intersection
TRUE - If two array documents do not have intersection
*/
static bool get_intersect_between_arrays(String *str, json_engine_t *value,
HASH items)
bool Item_func_json_array_intersect::
get_intersect_between_arrays(String *str, json_engine_t *value,
HASH *items, HASH *seen)
{
bool res= true, has_value= false;
int level= value->stack_p;
String temp_str(0);
temp_str.length(0);
temp_str.append('[');
while (json_scan_next(value) == 0 && value->stack_p >= level)
{
const uchar *value_start= NULL;
@@ -5372,14 +5379,14 @@ static bool get_intersect_between_arrays(String *str, json_engine_t *value,
of times the value appears in the hash table.
*/
uchar * found= NULL;
if ((found= my_hash_search(&items,
if ((found= my_hash_search(items,
(const uchar *) new_entry,
strlen(new_entry))))
{
has_value= true;
temp_str.append( (const char*) value_start, value_len);
temp_str.append(',');
if (my_hash_delete(&items, found))
if (my_hash_delete(items, found) || my_hash_insert(seen, (const uchar *)found))
{
free(new_entry);
goto error;
@@ -5398,6 +5405,8 @@ static bool get_intersect_between_arrays(String *str, json_engine_t *value,
}
error:
my_hash_iterate(seen, restore_entry, items);
my_hash_reset(seen);
return res;
}
@@ -5416,12 +5425,14 @@ String* Item_func_json_array_intersect::val_str(String *str)
{
if (args[0]->null_value)
goto null_return;
if (hash_inited)
if (item_hash_inited)
my_hash_free(&items);
if (seen_hash_inited)
my_hash_free(&seen);
if (root_inited)
free_root(&hash_root, MYF(0));
root_inited= false;
hash_inited= false;
item_hash_inited= false;
prepare_json_and_create_hash(&je1, js1);
}
@@ -5437,7 +5448,7 @@ String* Item_func_json_array_intersect::val_str(String *str)
if (json_read_value(&je2) || je2.value_type != JSON_VALUE_ARRAY)
goto error_return;
if (get_intersect_between_arrays(str, &je2, items))
if (get_intersect_between_arrays(str, &je2, &items, &seen))
goto error_return;
if (str->length())
@@ -5464,7 +5475,7 @@ null_return:
return NULL;
}
void Item_func_json_array_intersect::prepare_json_and_create_hash(json_engine_t *je1, String *js)
bool Item_func_json_array_intersect::prepare_json_and_create_hash(json_engine_t *je1, String *js)
{
json_scan_start(je1, js->charset(), (const uchar *) js->ptr(),
(const uchar *) js->ptr() + js->length());
@@ -5472,20 +5483,27 @@ void Item_func_json_array_intersect::prepare_json_and_create_hash(json_engine_t
Scan value uses the hash table to get the intersection of two arrays.
*/
if (my_hash_init(PSI_INSTRUMENT_ME, &seen, je1->s.cs, 0, 0, 0,
get_key_name, NULL, 0))
return true;
seen_hash_inited= true;
if (!root_inited)
init_alloc_root(PSI_NOT_INSTRUMENTED, &hash_root, 1024, 0, MYF(0));
root_inited= true;
if (json_read_value(je1) || je1->value_type != JSON_VALUE_ARRAY ||
create_hash(je1, &items, hash_inited, &hash_root))
create_hash(je1, &items, item_hash_inited, &hash_root))
{
if (je1->s.error)
report_json_error(js, je1, 0);
null_value= 1;
}
max_length= (args[0]->max_length < args[1]->max_length) ?
args[0]->max_length : args[1]->max_length;
max_length= 2*(args[0]->max_length < args[1]->max_length ?
args[0]->max_length : args[1]->max_length);
return false;
}
bool Item_func_json_array_intersect::fix_length_and_dec(THD *thd)
@@ -5508,8 +5526,10 @@ bool Item_func_json_array_intersect::fix_length_and_dec(THD *thd)
js1= args[0]->val_json(&tmp_js1);
if (js1)
prepare_json_and_create_hash(&je1, js1);
if (js1 && prepare_json_and_create_hash(&je1, js1))
{
return TRUE;
}
end:
set_maybe_null();

View File

@@ -881,14 +881,15 @@ public:
class Item_func_json_array_intersect: public Item_str_func
{
protected:
String tmp_js1, tmp_js2;
bool hash_inited, root_inited;
HASH items;
String tmp_js1, tmp_js2, temp_str;
bool item_hash_inited, seen_hash_inited, root_inited;
HASH items, seen;
MEM_ROOT hash_root;
bool parse_for_each_row;
public:
Item_func_json_array_intersect(THD *thd, Item *a, Item *b):
Item_str_func(thd, a, b) { hash_inited= root_inited= parse_for_each_row= false; }
Item_str_func(thd, a, b)
{ item_hash_inited= seen_hash_inited= root_inited= parse_for_each_row= false; }
String *val_str(String *) override;
bool fix_length_and_dec(THD *thd) override;
LEX_CSTRING func_name_cstring() const override
@@ -901,12 +902,16 @@ public:
void cleanup() override
{
Item_str_func::cleanup();
if (hash_inited)
if (item_hash_inited)
my_hash_free(&items);
if (seen_hash_inited)
my_hash_free(&seen);
if (root_inited)
free_root(&hash_root, MYF(0));
}
void prepare_json_and_create_hash(json_engine_t *je1, String *js);
bool prepare_json_and_create_hash(json_engine_t *je1, String *js);
bool get_intersect_between_arrays(String *str, json_engine_t *value,
HASH *items, HASH *seen);
};
class Item_func_json_object_filter_keys: public Item_str_func

View File

@@ -716,6 +716,8 @@ TABLE *Create_json_table::start(THD *thd,
if (!(table= Create_tmp_table::start(thd, param, table_alias)))
DBUG_RETURN(0);
share= table->s;
share->db= any_db;
share->table_name= { STRING_WITH_LEN("json_table") };
share->not_usable_by_query_cache= FALSE;
share->db_plugin= NULL;
if (!(table->file= new (&table->mem_root) ha_json_table(share, jt)))

View File

@@ -5354,7 +5354,11 @@ int Rows_log_event::do_apply_event(rpl_group_info *rgi)
slave_rows_error_report(ERROR_LEVEL, thd->is_error() ? 0 : error,
rgi, thd, table, get_type_str(),
RPL_LOG_NAME, log_pos);
if (thd->slave_thread)
if (thd->slave_thread
#ifdef WITH_WSREP
|| (WSREP(thd) && wsrep_thd_is_applying(thd))
#endif /* WITH_WSREP */
)
free_root(thd->mem_root, MYF(MY_KEEP_PREALLOC));
}

View File

@@ -6335,7 +6335,7 @@ Item *and_new_conditions_to_optimized_cond(THD *thd, Item *cond,
}
}
if (!cond)
if (!cond || cond->fix_fields_if_needed(thd, &cond))
return NULL;
if (*cond_eq)
@@ -6368,9 +6368,6 @@ Item *and_new_conditions_to_optimized_cond(THD *thd, Item *cond,
if (cond && is_simplified_cond)
cond= cond->remove_eq_conds(thd, cond_value, true);
if (cond && cond->fix_fields_if_needed(thd, NULL))
return NULL;
return cond;
}

View File

@@ -2420,7 +2420,7 @@ rpl_binlog_state::drop_domain(DYNAMIC_ARRAY *ids,
// compose a sequence of unique pointers to domain object
for (k= 0; k < domain_unique.elements; k++)
{
if ((rpl_binlog_state::element*) dynamic_array_ptr(&domain_unique, k)
if (*(rpl_binlog_state::element**) dynamic_array_ptr(&domain_unique, k)
== elem)
break; // domain_id's elem has been already in
}

View File

@@ -13229,6 +13229,13 @@ void fill_effective_table_privileges(THD *thd, GRANT_INFO *grant,
DBUG_VOID_RETURN;
}
/* JSON_TABLE and other db detached table */
if (db == any_db.str)
{
grant->privilege= SELECT_ACL;
DBUG_VOID_RETURN;
}
/* global privileges */
grant->privilege= sctx->master_access;
@@ -14436,22 +14443,9 @@ static int server_mpvio_write_packet(MYSQL_PLUGIN_VIO *param,
res= send_server_handshake_packet(mpvio, (char*) packet, packet_len);
else if (mpvio->status == MPVIO_EXT::RESTART)
res= send_plugin_request_packet(mpvio, packet, packet_len);
else if (packet_len > 0 && (*packet < 2 || *packet > 253))
{
/*
we cannot allow plugin data packet to start from 0, 255 or 254 -
as the client will treat it as an OK, ERROR or "change plugin" packet.
We'll escape these bytes with \1. Consequently, we
have to escape \1 byte too.
*/
else /* plugin data, prefixed with 1 */
res= net_write_command(&mpvio->auth_info.thd->net, 1, (uchar*)"", 0,
packet, packet_len);
}
else
{
res= my_net_write(&mpvio->auth_info.thd->net, packet, packet_len) ||
net_flush(&mpvio->auth_info.thd->net);
}
mpvio->cached_client_reply.plugin= ""_LEX_CSTRING;
mpvio->status= MPVIO_EXT::FAILURE; // the status is no longer RESTART
mpvio->packets_written++;

View File

@@ -6541,6 +6541,8 @@ start_new_trans::start_new_trans(THD *thd)
thd->server_status&= ~(SERVER_STATUS_IN_TRANS |
SERVER_STATUS_IN_TRANS_READONLY);
thd->server_status|= SERVER_STATUS_AUTOCOMMIT;
org_rgi_slave= thd->rgi_slave;
thd->rgi_slave= NULL;
}
@@ -6557,6 +6559,7 @@ void start_new_trans::restore_old_transaction()
MYSQL_COMMIT_TRANSACTION(org_thd->m_transaction_psi);
org_thd->m_transaction_psi= m_transaction_psi;
org_thd->variables.wsrep_on= wsrep_on;
org_thd->rgi_slave= org_rgi_slave;
org_thd= 0;
}

View File

@@ -6176,6 +6176,11 @@ class start_new_trans
uint in_sub_stmt;
uint server_status;
my_bool wsrep_on;
/*
THD:rgi_slave may hold a part of the replicated "old" transaction's
execution context. Therefore it has to be reset/restored too.
*/
rpl_group_info* org_rgi_slave;
public:
start_new_trans(THD *thd);
@@ -7919,27 +7924,23 @@ public:
If command creates or drops a database
*/
#define CF_DB_CHANGE (1U << 23)
#ifdef WITH_WSREP
/**
DDL statement that may be subject to error filtering.
*/
#define CF_WSREP_MAY_IGNORE_ERRORS (1U << 24)
/**
Basic DML statements that create writeset.
*/
#define CF_WSREP_BASIC_DML (1u << 25)
#endif /* WITH_WSREP */
/* Bits in server_command_flags */
/**
Statement that deletes existing rows (DELETE, DELETE_MULTI)
*/
#define CF_DELETES_DATA (1U << 24)
#ifdef WITH_WSREP
/**
DDL statement that may be subject to error filtering.
*/
#define CF_WSREP_MAY_IGNORE_ERRORS (1U << 25)
/**
Basic DML statements that create writeset.
*/
#define CF_WSREP_BASIC_DML (1u << 26)
#endif /* WITH_WSREP */
/* Bits in server_command_flags */
/**
Skip the increase of the global query id counter. Commonly set for
commands that are stateless (won't cause any change on the server

View File

@@ -360,7 +360,7 @@ Diagnostics_area::set_ok_status(ulonglong affected_rows,
with an OK packet.
*/
if (unlikely(is_error() || is_disabled()))
return;
DBUG_VOID_RETURN;
/*
When running a bulk operation, m_status will be DA_OK for the first
operation and set to DA_OK_BULK for all following operations.

Some files were not shown because too many files have changed in this diff Show More