1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-07 00:04:31 +03:00

Merge 10.9 into 10.10

This commit is contained in:
Marko Mäkelä
2022-08-29 14:04:25 +03:00
46 changed files with 752 additions and 245 deletions

View File

@@ -1,16 +0,0 @@
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA
# This file includes OSX specific options and quirks, related to system checks

View File

@@ -23,7 +23,6 @@ IF(MSVC)
SET(BFD_H_EXISTS 0 CACHE INTERNAL "")
SET(HAVE_ACCESS 1 CACHE INTERNAL "")
SET(HAVE_ALARM CACHE INTERNAL "")
SET(HAVE_ALIGNED_ALLOC CACHE INTERNAL "")
SET(HAVE_ALLOCA_H CACHE INTERNAL "")
SET(HAVE_ARPA_INET_H CACHE INTERNAL "")
SET(HAVE_BACKTRACE CACHE INTERNAL "")

View File

@@ -19,7 +19,6 @@
/* Headers we may want to use. */
#cmakedefine STDC_HEADERS 1
#cmakedefine _GNU_SOURCE 1
#cmakedefine HAVE_ALIGNED_ALLOC 1
#cmakedefine HAVE_ALLOCA_H 1
#cmakedefine HAVE_ARPA_INET_H 1
#cmakedefine HAVE_ASM_TERMBITS_H 1

View File

@@ -326,14 +326,6 @@ ENDIF()
CHECK_FUNCTION_EXISTS (accept4 HAVE_ACCEPT4)
CHECK_FUNCTION_EXISTS (access HAVE_ACCESS)
CHECK_FUNCTION_EXISTS (alarm HAVE_ALARM)
IF (CMAKE_SYSTEM_NAME MATCHES "Linux" AND NOT WITH_ASAN)
# When an old custom memory allocator library is used, aligned_alloc()
# could invoke the built-in allocator in libc, not matching
# the overriden free() in the custom memory allocator.
SET(HAVE_ALIGNED_ALLOC 0)
ELSE()
CHECK_FUNCTION_EXISTS (aligned_alloc HAVE_ALIGNED_ALLOC)
ENDIF()
SET(HAVE_ALLOCA 1)
CHECK_FUNCTION_EXISTS (backtrace HAVE_BACKTRACE)
CHECK_FUNCTION_EXISTS (backtrace_symbols HAVE_BACKTRACE_SYMBOLS)

View File

@@ -34,9 +34,10 @@ typedef struct {
pthread_t id;
uint num;
pthread_mutex_t data_mutex;
pthread_cond_t avail_cond;
pthread_cond_t data_cond;
pthread_cond_t done_cond;
my_bool data_avail;
pthread_t data_avail;
my_bool cancelled;
const char *from;
size_t from_len;
@@ -195,9 +196,13 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len)
threads = comp_ctxt->threads;
nthreads = comp_ctxt->nthreads;
const pthread_t self = pthread_self();
ptr = (const char *) buf;
while (len > 0) {
uint max_thread;
bool wait = nthreads == 1;
retry:
bool submitted = false;
/* Send data to worker threads for compression */
for (i = 0; i < nthreads; i++) {
@@ -206,16 +211,33 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len)
thd = threads + i;
pthread_mutex_lock(&thd->data_mutex);
if (thd->data_avail == pthread_t(~0UL)) {
} else if (!wait) {
skip:
pthread_mutex_unlock(&thd->data_mutex);
continue;
} else {
for (;;) {
pthread_cond_wait(&thd->avail_cond,
&thd->data_mutex);
if (thd->data_avail
== pthread_t(~0UL)) {
break;
}
goto skip;
}
}
chunk_len = (len > COMPRESS_CHUNK_SIZE) ?
COMPRESS_CHUNK_SIZE : len;
thd->from = ptr;
thd->from_len = chunk_len;
thd->data_avail = TRUE;
thd->data_avail = self;
pthread_cond_signal(&thd->data_cond);
pthread_mutex_unlock(&thd->data_mutex);
submitted = true;
len -= chunk_len;
if (len == 0) {
break;
@@ -223,13 +245,20 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len)
ptr += chunk_len;
}
max_thread = (i < nthreads) ? i : nthreads - 1;
if (!submitted) {
wait = true;
goto retry;
}
/* Reap and stream the compressed data */
for (i = 0; i <= max_thread; i++) {
for (i = 0; i < nthreads; i++) {
thd = threads + i;
pthread_mutex_lock(&thd->data_mutex);
if (thd->data_avail != self) {
pthread_mutex_unlock(&thd->data_mutex);
continue;
}
while (!thd->to_len) {
pthread_cond_wait(&thd->done_cond,
&thd->data_mutex);
@@ -247,6 +276,8 @@ compress_write(ds_file_t *file, const uchar *buf, size_t len)
}
thd->to_len = 0;
thd->data_avail = pthread_t(~0UL);
pthread_cond_signal(&thd->avail_cond);
pthread_mutex_unlock(&thd->data_mutex);
if (fail) {
@@ -334,6 +365,7 @@ destroy_worker_thread(comp_thread_ctxt_t *thd)
pthread_join(thd->id, NULL);
pthread_cond_destroy(&thd->avail_cond);
pthread_cond_destroy(&thd->data_cond);
pthread_cond_destroy(&thd->done_cond);
pthread_mutex_destroy(&thd->data_mutex);
@@ -364,11 +396,14 @@ create_worker_threads(uint n)
/* Initialize and data mutex and condition var */
if (pthread_mutex_init(&thd->data_mutex, NULL) ||
pthread_cond_init(&thd->avail_cond, NULL) ||
pthread_cond_init(&thd->data_cond, NULL) ||
pthread_cond_init(&thd->done_cond, NULL)) {
goto err;
}
thd->data_avail = pthread_t(~0UL);
if (pthread_create(&thd->id, NULL, compress_worker_thread_func,
thd)) {
msg("compress: pthread_create() failed: "
@@ -410,13 +445,13 @@ compress_worker_thread_func(void *arg)
pthread_mutex_lock(&thd->data_mutex);
while (1) {
while (!thd->data_avail && !thd->cancelled) {
while (!thd->cancelled
&& (thd->to_len || thd->data_avail == pthread_t(~0UL))) {
pthread_cond_wait(&thd->data_cond, &thd->data_mutex);
}
if (thd->cancelled)
break;
thd->data_avail = FALSE;
thd->to_len = qlz_compress(thd->from, thd->to, thd->from_len,
&thd->state);

View File

@@ -2533,11 +2533,11 @@ xb_write_delta_metadata(const char *filename, const xb_delta_info_t *info)
/* ================= backup ================= */
void xtrabackup_io_throttling()
{
if (!xtrabackup_backup)
if (!xtrabackup_backup || !xtrabackup_throttle)
return;
mysql_mutex_lock(&recv_sys.mutex);
if (xtrabackup_throttle && (io_ticket--) < 0)
if (io_ticket-- < 0)
mysql_cond_wait(&wait_throttle, &recv_sys.mutex);
mysql_mutex_unlock(&recv_sys.mutex);
}

View File

@@ -14,8 +14,7 @@
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */
#ifdef HAVE_ALIGNED_ALLOC
#elif defined __linux__
#if defined __linux__
# include <malloc.h>
#endif
@@ -23,8 +22,6 @@ inline void *aligned_malloc(size_t size, size_t alignment)
{
#ifdef _WIN32
return _aligned_malloc(size, alignment);
#elif defined HAVE_ALIGNED_ALLOC
return aligned_alloc(alignment, size);
#elif defined __linux__
return memalign(alignment, size);
#else

View File

@@ -822,7 +822,7 @@ CREATE TABLE t2 SELECT JSON_ARRAY_INSERT(fld, '$.[0]', '0') FROM t1;
SHOW CREATE TABLE t2;
Table Create Table
t2 CREATE TABLE `t2` (
`JSON_ARRAY_INSERT(fld, '$.[0]', '0')` varchar(25) DEFAULT NULL
`JSON_ARRAY_INSERT(fld, '$.[0]', '0')` varchar(21) DEFAULT NULL
) ENGINE=MyISAM DEFAULT CHARSET=latin1
DROP TABLE t1, t2;
SET sql_mode=default;
@@ -1016,6 +1016,33 @@ j
{"ID": "4", "Name": "Betty", "Age": 19}
drop table t1;
#
# MDEV-27151: JSON_VALUE() does not parse NULL properties properly
#
#
# It is correct for JSON_EXTRACT() to give null instead of "NULL" because
# it returns the json literal that is put inside json.
# Hence it should return null as in 'null' string and not SQL NULL.
# JSON_VALUE() returns the "VALUE" so it is correct for it to return SQl NULL
#
SELECT NULL;
NULL
NULL
SELECT JSON_VALUE('{"nulltest": null}', '$.nulltest');
JSON_VALUE('{"nulltest": null}', '$.nulltest')
NULL
SELECT 1 + NULL;
1 + NULL
NULL
SELECT 1 + JSON_VALUE('{"nulltest": null}', '$.nulltest');
1 + JSON_VALUE('{"nulltest": null}', '$.nulltest')
NULL
SELECT NULL;
NULL
NULL
SELECT JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a');
JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a')
null
#
# End of 10.3 tests
#
#
@@ -1437,6 +1464,21 @@ f
DROP VIEW v;
DROP TABLE t;
#
# MDEV-29264 JSON functions overflow error based ON LONGTEXT field
#
CREATE TABLE t(l1 LONGTEXT, l2 LONGTEXT, l3 LONGTEXT, l4 LONGTEXT);
INSERT INTO t VALUES('k1', 'v1', 'k2', 'v2');
SELECT JSON_ARRAY(l1, l2, l3, l4), JSON_OBJECT(l1, l2, l3, l4) from t;
JSON_ARRAY(l1, l2, l3, l4) JSON_OBJECT(l1, l2, l3, l4)
["k1", "v1", "k2", "v2"] {"k1": "v1", "k2": "v2"}
SELECT JSON_ARRAY_APPEND(JSON_ARRAY(l1, l2, l3, l4), '$[0]', 'k3'), JSON_ARRAY_INSERT(JSON_ARRAY(l1, l2, l3, l4), '$[0]', 'k3') from t;
JSON_ARRAY_APPEND(JSON_ARRAY(l1, l2, l3, l4), '$[0]', 'k3') JSON_ARRAY_INSERT(JSON_ARRAY(l1, l2, l3, l4), '$[0]', 'k3')
[["k1", "k3"], "v1", "k2", "v2"] ["k3", "k1", "v1", "k2", "v2"]
SELECT JSON_INSERT(JSON_OBJECT(l1, l2, l3, l4), '$.k3', 'v3'),JSON_SET(JSON_OBJECT(l1, l2, l3, l4), '$.k2', 'new v2'),JSON_REPLACE(JSON_OBJECT(l1, l2, l3, l4), '$.k2', 'new v2') from t;
JSON_INSERT(JSON_OBJECT(l1, l2, l3, l4), '$.k3', 'v3') JSON_SET(JSON_OBJECT(l1, l2, l3, l4), '$.k2', 'new v2') JSON_REPLACE(JSON_OBJECT(l1, l2, l3, l4), '$.k2', 'new v2')
{"k1": "v1", "k2": "v2", "k3": "v3"} {"k1": "v1", "k2": "new v2"} {"k1": "v1", "k2": "new v2"}
DROP TABLE t;
#
# End of 10.5 tests
#
#
@@ -2295,5 +2337,16 @@ SELECT * FROM JSON_TABLE('{"foo":["bar","qux"]}','$**.*[0]' COLUMNS(col1 CHAR(8)
col1
bar
#
# MDEV-29212: json_overlaps() does not check nested key-value pair correctly
#
SET @json1 = '{"kk":{"k1":"v1","k2":"v2"}}';
SET @json2 = '{"kk":{"k1":"v1","k2":"v2","k3":"v3"}}';
SELECT JSON_OVERLAPS(@json2, @json1);
JSON_OVERLAPS(@json2, @json1)
0
SELECT JSON_OVERLAPS(@json1, @json2);
JSON_OVERLAPS(@json1, @json2)
0
#
# End of 10.9 Test
#

View File

@@ -627,6 +627,25 @@ SELECT * FROM t1 WHERE JSON_EXTRACT(j, '$.Age')=19;
drop table t1;
--echo #
--echo # MDEV-27151: JSON_VALUE() does not parse NULL properties properly
--echo #
--echo #
--echo # It is correct for JSON_EXTRACT() to give null instead of "NULL" because
--echo # it returns the json literal that is put inside json.
--echo # Hence it should return null as in 'null' string and not SQL NULL.
--echo # JSON_VALUE() returns the "VALUE" so it is correct for it to return SQl NULL
--echo #
SELECT NULL;
SELECT JSON_VALUE('{"nulltest": null}', '$.nulltest');
SELECT 1 + NULL;
SELECT 1 + JSON_VALUE('{"nulltest": null}', '$.nulltest');
SELECT NULL;
SELECT JSON_EXTRACT('{"a":null, "b":10, "c":"null"}', '$.a');
--echo #
--echo # End of 10.3 tests
--echo #
@@ -927,6 +946,17 @@ SELECT JSON_ARRAYAGG(a) AS f FROM v;
DROP VIEW v;
DROP TABLE t;
--echo #
--echo # MDEV-29264 JSON functions overflow error based ON LONGTEXT field
--echo #
CREATE TABLE t(l1 LONGTEXT, l2 LONGTEXT, l3 LONGTEXT, l4 LONGTEXT);
INSERT INTO t VALUES('k1', 'v1', 'k2', 'v2');
SELECT JSON_ARRAY(l1, l2, l3, l4), JSON_OBJECT(l1, l2, l3, l4) from t;
SELECT JSON_ARRAY_APPEND(JSON_ARRAY(l1, l2, l3, l4), '$[0]', 'k3'), JSON_ARRAY_INSERT(JSON_ARRAY(l1, l2, l3, l4), '$[0]', 'k3') from t;
SELECT JSON_INSERT(JSON_OBJECT(l1, l2, l3, l4), '$.k3', 'v3'),JSON_SET(JSON_OBJECT(l1, l2, l3, l4), '$.k2', 'new v2'),JSON_REPLACE(JSON_OBJECT(l1, l2, l3, l4), '$.k2', 'new v2') from t;
DROP TABLE t;
--echo #
--echo # End of 10.5 tests
--echo #
@@ -1547,6 +1577,15 @@ SELECT JSON_EXISTS(@json, '$[2][2][1 to 4]');
SELECT * FROM JSON_TABLE('{"foo":["bar","qux"]}','$**.*[0]' COLUMNS(col1 CHAR(8) PATH '$[0]')) AS jt;
--echo #
--echo # MDEV-29212: json_overlaps() does not check nested key-value pair correctly
--echo #
SET @json1 = '{"kk":{"k1":"v1","k2":"v2"}}';
SET @json2 = '{"kk":{"k1":"v1","k2":"v2","k3":"v3"}}';
SELECT JSON_OVERLAPS(@json2, @json1);
SELECT JSON_OVERLAPS(@json1, @json2);
--echo #
--echo # End of 10.9 Test
--echo #

View File

@@ -654,3 +654,25 @@ SET time_zone=DEFAULT;
#
# End of 10.4 tests
#
#
# MDEV-27101 Subquery using the ALL keyword on TIMESTAMP columns produces a wrong result
#
SET time_zone='Europe/Moscow';
CREATE TABLE t1 (a TIMESTAMP NULL);
SET timestamp=1288477526;
/* this is summer time, earlier */
INSERT INTO t1 VALUES (NOW());
SET timestamp=1288477526+3599;
/* this is winter time, later */
INSERT INTO t1 VALUES (NOW());
SELECT a, UNIX_TIMESTAMP(a) FROM t1 ORDER BY a;
a UNIX_TIMESTAMP(a)
2010-10-31 02:25:26 1288477526
2010-10-31 02:25:25 1288481125
SELECT a, UNIX_TIMESTAMP(a) FROM t1 WHERE a <= ALL (SELECT * FROM t1);
a UNIX_TIMESTAMP(a)
2010-10-31 02:25:26 1288477526
SELECT a, UNIX_TIMESTAMP(a) FROM t1 WHERE a >= ALL (SELECT * FROM t1);
a UNIX_TIMESTAMP(a)
2010-10-31 02:25:25 1288481125
DROP TABLE t1;

View File

@@ -598,3 +598,18 @@ SET time_zone=DEFAULT;
--echo #
--echo # End of 10.4 tests
--echo #
--echo #
--echo # MDEV-27101 Subquery using the ALL keyword on TIMESTAMP columns produces a wrong result
--echo #
SET time_zone='Europe/Moscow';
CREATE TABLE t1 (a TIMESTAMP NULL);
SET timestamp=1288477526; /* this is summer time, earlier */
INSERT INTO t1 VALUES (NOW());
SET timestamp=1288477526+3599; /* this is winter time, later */
INSERT INTO t1 VALUES (NOW());
SELECT a, UNIX_TIMESTAMP(a) FROM t1 ORDER BY a;
SELECT a, UNIX_TIMESTAMP(a) FROM t1 WHERE a <= ALL (SELECT * FROM t1);
SELECT a, UNIX_TIMESTAMP(a) FROM t1 WHERE a >= ALL (SELECT * FROM t1);
DROP TABLE t1;

View File

@@ -95,3 +95,17 @@ ALTER TABLE t1 MODIFY msg_2 VARCHAR(100) CHARACTER SET utf8
COLLATE utf8_unicode_ci, ALGORITHM=inplace;
ERROR 0A000: ALGORITHM=INPLACE is not supported. Reason: Cannot change column type. Try ALGORITHM=COPY
DROP TABLE t1;
#
# MDEV-29314 Assertion `n_fields > n_cols' failed
# in dict_index_t::init_change_cols
#
CREATE TABLE t (a VARCHAR(16) COLLATE utf8_bin,
FULLTEXT (a)) ENGINE=InnoDB COLLATE utf8_unicode_520_ci;
ALTER TABLE t MODIFY COLUMN a VARCHAR(512);
SHOW CREATE TABLE t;
Table Create Table
t CREATE TABLE `t` (
`a` varchar(512) COLLATE utf8mb3_unicode_520_ci DEFAULT NULL,
FULLTEXT KEY `a` (`a`)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb3 COLLATE=utf8mb3_unicode_520_ci
DROP TABLE t;

View File

@@ -1,18 +1,18 @@
--- check_ibd_filesize.result
+++ check_ibd_filesize.result,32k
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:29:25.129637040 +0530
@@ -3,18 +3,12 @@
# SPACE IN 5.7 THAN IN 5.6
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 196608
-# bytes: 65536
+# bytes: 131072
INSERT INTO t1 SELECT * FROM seq_1_to_25000;
-# bytes: 9437184
+# bytes: 786432
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 196608
-# bytes: 65536
+# bytes: 131072
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
-# bytes: 4194304
-DROP TABLE t1;

View File

@@ -1,17 +1,17 @@
--- check_ibd_filesize.result
+++ check_ibd_filesize.result,4k
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:31:39.288769153 +0530
@@ -3,18 +3,18 @@
# SPACE IN 5.7 THAN IN 5.6
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 24576
-# bytes: 65536
+# bytes: 16384
INSERT INTO t1 SELECT * FROM seq_1_to_25000;
# bytes: 9437184
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 24576
-# bytes: 65536
+# bytes: 16384
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
# bytes: 4194304
DROP TABLE t1;

View File

@@ -1,18 +1,18 @@
--- check_ibd_filesize.result
+++ check_ibd_filesize.result,64k
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:30:28.957174270 +0530
@@ -3,18 +3,12 @@
# SPACE IN 5.7 THAN IN 5.6
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 393216
-# bytes: 65536
+# bytes: 262144
INSERT INTO t1 SELECT * FROM seq_1_to_25000;
-# bytes: 9437184
+# bytes: 983040
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 393216
-# bytes: 65536
+# bytes: 262144
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
-# bytes: 4194304
-DROP TABLE t1;

View File

@@ -1,17 +1,17 @@
--- check_ibd_filesize.result
+++ check_ibd_filesize.result,8k
--- mysql-test/suite/innodb/r/check_ibd_filesize.result 2022-08-16 17:28:06.462350465 +0530
+++ mysql-test/suite/innodb/r/check_ibd_filesize.reject 2022-08-16 17:31:03.516962339 +0530
@@ -3,18 +3,18 @@
# SPACE IN 5.7 THAN IN 5.6
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 49152
-# bytes: 65536
+# bytes: 32768
INSERT INTO t1 SELECT * FROM seq_1_to_25000;
# bytes: 9437184
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
-# bytes: 98304
+# bytes: 49152
-# bytes: 65536
+# bytes: 32768
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
# bytes: 4194304
DROP TABLE t1;

View File

@@ -3,12 +3,12 @@
# SPACE IN 5.7 THAN IN 5.6
#
CREATE TABLE t1 (a INT PRIMARY KEY) ENGINE=InnoDB;
# bytes: 98304
# bytes: 65536
INSERT INTO t1 SELECT * FROM seq_1_to_25000;
# bytes: 9437184
DROP TABLE t1;
CREATE TABLE t1 (a INT PRIMARY KEY, b BLOB) ENGINE=InnoDB;
# bytes: 98304
# bytes: 65536
INSERT INTO t1 SELECT seq,REPEAT('a',30000) FROM seq_1_to_20;
# bytes: 4194304
DROP TABLE t1;

View File

@@ -122,4 +122,14 @@ ALTER TABLE t1 MODIFY msg_2 VARCHAR(100) CHARACTER SET utf8
COLLATE utf8_unicode_ci, ALGORITHM=inplace;
DROP TABLE t1;
--echo #
--echo # MDEV-29314 Assertion `n_fields > n_cols' failed
--echo # in dict_index_t::init_change_cols
--echo #
CREATE TABLE t (a VARCHAR(16) COLLATE utf8_bin,
FULLTEXT (a)) ENGINE=InnoDB COLLATE utf8_unicode_520_ci;
ALTER TABLE t MODIFY COLUMN a VARCHAR(512);
SHOW CREATE TABLE t;
DROP TABLE t;
--source include/wait_until_count_sessions.inc

View File

@@ -5,6 +5,7 @@
#
--source include/have_innodb.inc
--source include/innodb_page_size.inc
# Embedded server does not restart of server
--source include/not_embedded.inc
--source include/no_valgrind_without_big.inc

View File

@@ -1024,7 +1024,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 122880
The size of the tab5.ibd file: 106496
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
@@ -1331,7 +1331,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 245760
The size of the tab5.ibd file: 212992
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
@@ -2637,7 +2637,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 122880
The size of the tab5.ibd file: 106496
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
@@ -2946,7 +2946,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 245760
The size of the tab5.ibd file: 212992
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
@@ -4151,7 +4151,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 122880
The size of the tab5.ibd file: 106496
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
@@ -4439,7 +4439,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 245760
The size of the tab5.ibd file: 212992
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
@@ -5697,7 +5697,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 122880
The size of the tab5.ibd file: 106496
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
@@ -6004,7 +6004,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 245760
The size of the tab5.ibd file: 212992
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
@@ -7288,7 +7288,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 122880
The size of the tab5.ibd file: 106496
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data
@@ -7595,7 +7595,7 @@ AND compress_ops BETWEEN @inl_val AND 1000
AND table_name='tab5' AND database_name='test'
AND index_name like 'idx%' ;
compress_stat 1
The size of the tab5.ibd file: 245760
The size of the tab5.ibd file: 212992
# for determintic resons simple data should be inserted.
# insert some 100 records
# Load the data

View File

@@ -12,6 +12,10 @@ INSERT INTO t VALUES(2);
echo # xtrabackup prepare;
--disable_result_log
# Because MDEV-24626 in 10.6 optimized file creation, we could end up with
# t.new.qp instead of t.ibd.qp unless a log checkpoint happened to be
# triggered between CREATE TABLE and the backup run.
--replace_result t.new t.ibd
list_files $targetdir/test *.qp;
exec $XTRABACKUP --decompress --remove-original --target-dir=$targetdir;
list_files $targetdir/test *.qp;

View File

@@ -31,13 +31,13 @@ FOUND 1 /Database page corruption detected.*/ in backup.log
FOUND 1 /completed OK!/ in backup.log
--- "innodb_corrupted_pages" file content: ---
test/t1_corrupted
6 8 9
4 6 7
test/t2_corrupted
7 8 10
5 6 8
test/t4_corrupted_new
1
test/t5_corrupted_to_rename_renamed
6
4
test/t7_corrupted_to_alter
3
------
@@ -48,19 +48,19 @@ INSERT INTO t3_inc VALUES (3), (4), (5), (6), (7), (8), (9);
# Backup must not fail, but "innodb_corrupted_pages" file must be created due to --log-innodb-page-corruption option
--- "innodb_corrupted_pages" file content: ---
test/t1_corrupted
6 8 9
4 6 7
test/t1_inc_corrupted
6 8 9
4 6 7
test/t2_corrupted
7 8 10
5 6 8
test/t2_inc_corrupted
7 8 10
5 6 8
test/t4_inc_corrupted_new
1
test/t5_corrupted_to_rename_renamed
6
4
test/t5_inc_corrupted_to_rename_renamed
6
4
test/t7_inc_corrupted_to_alter
3
------
@@ -85,16 +85,16 @@ DROP TABLE t7_inc_corrupted_to_alter;
# Full backup with --log-innodb-page-corruption
--- "innodb_corrupted_pages" file content: ---
test/t3
6 8
4 6
------
# Extend some tablespace and corrupt extended pages for incremental backup
# restart
# Incremental backup --log-innodb-page-corruption
--- "innodb_corrupted_pages" file content: ---
test/t3
6 8
4 6
test/t3_inc
6 8
4 6
------
# Full backup prepare
# "innodb_corrupted_pages" file must not exist after successful prepare

View File

@@ -2214,6 +2214,22 @@ id name
DROP TABLE divisions;
DROP TABLE companies;
#
# MDEV-27099 Subquery using the ALL keyword on INET6 columns produces a wrong result
#
CREATE TABLE t1 (d INET6);
INSERT INTO t1 VALUES ('1::0'), ('12::0');
SELECT * FROM t1 ORDER BY d;
d
1::
12::
SELECT * FROM t1 WHERE d <= ALL (SELECT * FROM t1);
d
1::
SELECT * FROM t1 WHERE d >= ALL (SELECT * FROM t1);
d
12::
DROP TABLE t1;
#
# MDEV-27015 Assertion `!is_null()' failed in FixedBinTypeBundle<FbtImpl>::Fbt FixedBinTypeBundle<FbtImpl>::Field_fbt::to_fbt()
#
CREATE TABLE t1 (id int NOT NULL PRIMARY KEY, a INET6(6) DEFAULT '::10');

View File

@@ -1630,6 +1630,17 @@ SELECT * FROM companies;
DROP TABLE divisions;
DROP TABLE companies;
--echo #
--echo # MDEV-27099 Subquery using the ALL keyword on INET6 columns produces a wrong result
--echo #
CREATE TABLE t1 (d INET6);
INSERT INTO t1 VALUES ('1::0'), ('12::0');
SELECT * FROM t1 ORDER BY d;
SELECT * FROM t1 WHERE d <= ALL (SELECT * FROM t1);
SELECT * FROM t1 WHERE d >= ALL (SELECT * FROM t1);
DROP TABLE t1;
--echo #
--echo # MDEV-27015 Assertion `!is_null()' failed in FixedBinTypeBundle<FbtImpl>::Fbt FixedBinTypeBundle<FbtImpl>::Field_fbt::to_fbt()
--echo #

View File

@@ -677,10 +677,6 @@ bool Item_func_json_query::fix_length_and_dec(THD *thd)
}
/*
Returns NULL, not an error if the found value
is not a scalar.
*/
bool Json_path_extractor::extract(String *str, Item *item_js, Item *item_jp,
CHARSET_INFO *cs)
{
@@ -713,6 +709,9 @@ continue_search:
if (json_read_value(&je))
return true;
if (je.value_type == JSON_VALUE_NULL)
return true;
if (unlikely(check_and_get_value(&je, str, &error)))
{
if (error)
@@ -1209,7 +1208,6 @@ my_decimal *Item_func_json_extract::val_decimal(my_decimal *to)
case JSON_VALUE_ARRAY:
case JSON_VALUE_FALSE:
case JSON_VALUE_UNINITIALIZED:
// TODO: fix: NULL should be NULL
case JSON_VALUE_NULL:
int2my_decimal(E_DEC_FATAL_ERROR, 0, false/*unsigned_flag*/, to);
return to;
@@ -1854,7 +1852,7 @@ bool Item_func_json_array::fix_length_and_dec(THD *thd)
return TRUE;
for (n_arg=0 ; n_arg < arg_count ; n_arg++)
char_length+= args[n_arg]->max_char_length() + 4;
char_length+= static_cast<ulonglong>(args[n_arg]->max_char_length()) + 4;
fix_char_length_ulonglong(char_length);
tmp_val.set_charset(collation.collation);
@@ -1913,7 +1911,8 @@ bool Item_func_json_array_append::fix_length_and_dec(THD *thd)
for (n_arg= 1; n_arg < arg_count; n_arg+= 2)
{
paths[n_arg/2].set_constant_flag(args[n_arg]->const_item());
char_length+= args[n_arg/2+1]->max_char_length() + 4;
char_length+=
static_cast<ulonglong>(args[n_arg+1]->max_char_length()) + 4;
}
fix_char_length_ulonglong(char_length);
@@ -3082,7 +3081,8 @@ bool Item_func_json_insert::fix_length_and_dec(THD *thd)
for (n_arg= 1; n_arg < arg_count; n_arg+= 2)
{
paths[n_arg/2].set_constant_flag(args[n_arg]->const_item());
char_length+= args[n_arg/2+1]->max_char_length() + 4;
char_length+=
static_cast<ulonglong>(args[n_arg+1]->max_char_length()) + 4;
}
fix_char_length_ulonglong(char_length);
@@ -4360,7 +4360,7 @@ bool json_compare_arr_and_obj(json_engine_t *js, json_engine_t *value)
return TRUE;
*value= loc_val;
}
if (!json_value_scalar(js))
if (js->value_type == JSON_VALUE_ARRAY)
json_skip_level(js);
}
return FALSE;
@@ -4446,10 +4446,49 @@ int json_find_overlap_with_array(json_engine_t *js, json_engine_t *value,
}
int compare_nested_object(json_engine_t *js, json_engine_t *value)
{
int result= 0;
const char *value_begin= (const char*)value->s.c_str-1;
const char *js_begin= (const char*)js->s.c_str-1;
json_skip_level(value);
json_skip_level(js);
const char *value_end= (const char*)value->s.c_str;
const char *js_end= (const char*)js->s.c_str;
String a(value_begin, value_end-value_begin,value->s.cs);
String b(js_begin, js_end-js_begin, js->s.cs);
DYNAMIC_STRING a_res, b_res;
if (init_dynamic_string(&a_res, NULL, 4096, 1024) ||
init_dynamic_string(&b_res, NULL, 4096, 1024))
{
goto error;
}
if (json_normalize(&a_res, a.ptr(), a.length(), value->s.cs) ||
json_normalize(&b_res, b.ptr(), b.length(), value->s.cs))
{
goto error;
}
result= strcmp(a_res.str, b_res.str) ? 0 : 1;
error:
dynstr_free(&a_res);
dynstr_free(&b_res);
return MY_TEST(result);
}
int json_find_overlap_with_object(json_engine_t *js, json_engine_t *value,
bool compare_whole)
{
if (value->value_type == JSON_VALUE_OBJECT)
{
if (compare_whole)
{
return compare_nested_object(js, value);
}
else
{
/* Find at least one common key-value pair */
json_string_t key_name;
@@ -4489,33 +4528,49 @@ int json_find_overlap_with_object(json_engine_t *js, json_engine_t *value,
found_value= check_overlaps(js, value, true);
if (found_value)
{
if (!compare_whole)
/*
We have found at least one common key-value pair now.
No need to check for more key-value pairs. So skip remaining
jsons and return TRUE.
*/
json_skip_current_level(js, value);
return TRUE;
*js= loc_js;
}
else
{
if (compare_whole)
{
json_skip_current_level(js, value);
return FALSE;
}
/*
Key is found but value is not found. We have already
exhausted both values for current key. Hence "reset"
only js (first argument i.e json document) and
continue.
*/
*js= loc_js;
continue;
}
}
else
{
if (compare_whole)
{
json_skip_current_level(js, value);
/*
key is not found. So no need to check for value for that key.
Read the value anyway so we get the "type" of json value.
If is is non-scalar then skip the entire value
(scalar values get exhausted while reading so no need to skip them).
Then reset the json doc again.
*/
if (json_read_value(value))
return FALSE;
}
json_skip_key(value);
if (!json_value_scalar(value))
json_skip_level(value);
*js= loc_js;
}
}
/*
At this point we have already returned true if any intersection exists.
So skip jsons if not exhausted and return false.
*/
json_skip_current_level(js, value);
return compare_whole ? TRUE : FALSE;
return FALSE;
}
}
else if (value->value_type == JSON_VALUE_ARRAY)
{

View File

@@ -3662,6 +3662,41 @@ void select_max_min_finder_subselect::cleanup()
}
void select_max_min_finder_subselect::set_op(const Type_handler *th)
{
if (th->is_val_native_ready())
{
op= &select_max_min_finder_subselect::cmp_native;
return;
}
switch (th->cmp_type()) {
case REAL_RESULT:
op= &select_max_min_finder_subselect::cmp_real;
break;
case INT_RESULT:
op= &select_max_min_finder_subselect::cmp_int;
break;
case STRING_RESULT:
op= &select_max_min_finder_subselect::cmp_str;
break;
case DECIMAL_RESULT:
op= &select_max_min_finder_subselect::cmp_decimal;
break;
case TIME_RESULT:
if (th->field_type() == MYSQL_TYPE_TIME)
op= &select_max_min_finder_subselect::cmp_time;
else
op= &select_max_min_finder_subselect::cmp_str;
break;
case ROW_RESULT:
// This case should never be chosen
DBUG_ASSERT(0);
op= 0;
}
}
int select_max_min_finder_subselect::send_data(List<Item> &items)
{
DBUG_ENTER("select_max_min_finder_subselect::send_data");
@@ -3680,30 +3715,7 @@ int select_max_min_finder_subselect::send_data(List<Item> &items)
if (!cache)
{
cache= val_item->get_cache(thd);
switch (val_item->cmp_type()) {
case REAL_RESULT:
op= &select_max_min_finder_subselect::cmp_real;
break;
case INT_RESULT:
op= &select_max_min_finder_subselect::cmp_int;
break;
case STRING_RESULT:
op= &select_max_min_finder_subselect::cmp_str;
break;
case DECIMAL_RESULT:
op= &select_max_min_finder_subselect::cmp_decimal;
break;
case TIME_RESULT:
if (val_item->field_type() == MYSQL_TYPE_TIME)
op= &select_max_min_finder_subselect::cmp_time;
else
op= &select_max_min_finder_subselect::cmp_str;
break;
case ROW_RESULT:
// This case should never be chosen
DBUG_ASSERT(0);
op= 0;
}
set_op(val_item->type_handler());
}
cache->store(val_item);
it->store(0, cache);
@@ -3797,6 +3809,26 @@ bool select_max_min_finder_subselect::cmp_str()
return (sortcmp(val1, val2, cache->collation.collation) < 0);
}
bool select_max_min_finder_subselect::cmp_native()
{
NativeBuffer<STRING_BUFFER_USUAL_SIZE> cvalue, mvalue;
Item *maxmin= ((Item_singlerow_subselect *)item)->element_index(0);
bool cvalue_is_null= cache->val_native(thd, &cvalue);
bool mvalue_is_null= maxmin->val_native(thd, &mvalue);
/* Ignore NULLs for ANY and keep them for ALL subqueries */
if (cvalue_is_null)
return (is_all && !mvalue_is_null) || (!is_all && mvalue_is_null);
if (mvalue_is_null)
return !is_all;
const Type_handler *th= cache->type_handler();
return fmax ? th->cmp_native(cvalue, mvalue) > 0 :
th->cmp_native(cvalue, mvalue) < 0;
}
int select_exists_subselect::send_data(List<Item> &items)
{
DBUG_ENTER("select_exists_subselect::send_data");

View File

@@ -6721,6 +6721,7 @@ class select_max_min_finder_subselect :public select_subselect
bool (select_max_min_finder_subselect::*op)();
bool fmax;
bool is_all;
void set_op(const Type_handler *ha);
public:
select_max_min_finder_subselect(THD *thd_arg, Item_subselect *item_arg,
bool mx, bool all):
@@ -6733,6 +6734,7 @@ public:
bool cmp_decimal();
bool cmp_str();
bool cmp_time();
bool cmp_native();
};
/* EXISTS subselect interface class */

View File

@@ -513,9 +513,28 @@ btr_page_alloc_low(
page should be initialized. */
dberr_t* err) /*!< out: error code */
{
buf_block_t *root= btr_root_block_get(index, RW_SX_LATCH, mtr, err);
const auto savepoint= mtr->get_savepoint();
buf_block_t *root= btr_root_block_get(index, RW_NO_LATCH, mtr, err);
if (UNIV_UNLIKELY(!root))
return root;
if (mtr->have_u_or_x_latch(*root))
{
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!root->index || !root->index->freed());
#endif
mtr->release_block_at_savepoint(savepoint, root);
}
else
{
mtr->u_lock_register(savepoint);
root->page.lock.u_lock();
#ifdef BTR_CUR_HASH_ADAPT
if (root->index)
mtr_t::defer_drop_ahi(root, MTR_MEMO_PAGE_SX_FIX);
#endif
}
fseg_header_t *seg_header= root->page.frame +
(level ? PAGE_HEADER + PAGE_BTR_SEG_TOP : PAGE_HEADER + PAGE_BTR_SEG_LEAF);
return fseg_alloc_free_page_general(seg_header, hint_page_no, file_direction,
@@ -584,8 +603,8 @@ dberr_t btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
bool blob, bool space_latched)
{
ut_ad(mtr->memo_contains_flagged(block, MTR_MEMO_PAGE_X_FIX));
#ifdef BTR_CUR_HASH_ADAPT
if (block->index && !block->index->freed())
#if defined BTR_CUR_HASH_ADAPT && defined UNIV_DEBUG
if (btr_search_check_marked_free_index(block))
{
ut_ad(!blob);
ut_ad(page_is_leaf(block->page.frame));
@@ -610,9 +629,28 @@ dberr_t btr_page_free(dict_index_t* index, buf_block_t* block, mtr_t* mtr,
fil_space_t *space= index->table->space;
dberr_t err;
if (page_t* root = btr_root_get(index, mtr, &err))
const auto savepoint= mtr->get_savepoint();
if (buf_block_t *root= btr_root_block_get(index, RW_NO_LATCH, mtr, &err))
{
err= fseg_free_page(&root[blob || page_is_leaf(block->page.frame)
if (mtr->have_u_or_x_latch(*root))
{
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!root->index || !root->index->freed());
#endif
mtr->release_block_at_savepoint(savepoint, root);
}
else
{
mtr->u_lock_register(savepoint);
root->page.lock.u_lock();
#ifdef BTR_CUR_HASH_ADAPT
if (root->index)
mtr_t::defer_drop_ahi(root, MTR_MEMO_PAGE_SX_FIX);
#endif
}
err= fseg_free_page(&root->page.frame[blob ||
page_is_leaf(block->page.frame)
? PAGE_HEADER + PAGE_BTR_SEG_LEAF
: PAGE_HEADER + PAGE_BTR_SEG_TOP],
space, page, mtr, space_latched);

View File

@@ -283,7 +283,7 @@ latch_block:
block->page.fix();
block->page.lock.x_lock();
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!block->index || !block->index->freed());
ut_ad(!btr_search_check_marked_free_index(block));
#endif
if (UNIV_LIKELY_NULL(rtr_info)) {
@@ -7020,7 +7020,7 @@ btr_store_big_rec_extern_fields(
rec_block->page.fix();
rec_block->page.lock.x_lock();
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!rec_block->index || !rec_block->index->freed());
ut_ad(!btr_search_check_marked_free_index(rec_block));
#endif
uint32_t hint_prev = prev_page_no;
@@ -7398,7 +7398,7 @@ skip_free:
block->fix();
block->page.lock.x_lock();
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!block->index || !block->index->freed());
ut_ad(!btr_search_check_marked_free_index(block));
#endif
const page_t* page = buf_block_get_frame(ext_block);

View File

@@ -1280,8 +1280,11 @@ fail_and_release_page:
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
has already been removed from the buf_pool.page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void btr_search_drop_page_hash_index(buf_block_t* block)
i.e.: it is in state BUF_BLOCK_REMOVE_HASH
@param[in] garbage_collect drop ahi only if the index is marked
as freed */
void btr_search_drop_page_hash_index(buf_block_t* block,
bool garbage_collect)
{
ulint n_fields;
ulint n_bytes;
@@ -1317,13 +1320,21 @@ retry:
auto part = btr_search_sys.get_part(index_id,
block->page.id().space());
part->latch.rd_lock(SRW_LOCK_CALL);
dict_index_t* index = block->index;
bool is_freed = index && index->freed();
if (is_freed) {
part->latch.rd_unlock();
part->latch.wr_lock(SRW_LOCK_CALL);
} else {
part->latch.rd_lock(SRW_LOCK_CALL);
if (index != block->index) {
part->latch.wr_unlock();
goto retry;
}
} else if (garbage_collect) {
part->latch.rd_unlock();
return;
}
assert_block_ahi_valid(block);
@@ -1798,12 +1809,13 @@ drop_exit:
return;
}
ahi_latch->rd_lock(SRW_LOCK_CALL);
if (index->freed()) {
ahi_latch->rd_unlock();
goto drop_exit;
}
ahi_latch->rd_lock(SRW_LOCK_CALL);
if (block->index) {
uint16_t n_fields = block->curr_n_fields;
uint16_t n_bytes = block->curr_n_bytes;
@@ -2395,5 +2407,20 @@ btr_search_validate()
return(true);
}
#ifdef UNIV_DEBUG
bool btr_search_check_marked_free_index(const buf_block_t *block)
{
const index_id_t index_id= btr_page_get_index_id(block->page.frame);
auto part= btr_search_sys.get_part(index_id, block->page.id().space());
part->latch.rd_lock(SRW_LOCK_CALL);
bool is_freed= block->index && block->index->freed();
part->latch.rd_unlock();
return is_freed;
}
#endif /* UNIV_DEBUG */
#endif /* defined UNIV_AHI_DEBUG || defined UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */

View File

@@ -2750,11 +2750,7 @@ re_evict:
&& state < buf_page_t::WRITE_FIX));
#ifdef BTR_CUR_HASH_ADAPT
if (dict_index_t* index = block->index) {
if (index->freed()) {
btr_search_drop_page_hash_index(block);
}
}
btr_search_drop_page_hash_index(block, true);
#endif /* BTR_CUR_HASH_ADAPT */
dberr_t e;
@@ -2812,11 +2808,9 @@ get_latch:
}
get_latch_valid:
#ifdef BTR_CUR_HASH_ADAPT
if (dict_index_t* index = block->index) {
if (index->freed()) {
if (block->index) {
mtr_t::defer_drop_ahi(block, fix_type);
}
}
#endif /* BTR_CUR_HASH_ADAPT */
mtr->memo_push(block, fix_type);
break;

View File

@@ -544,19 +544,9 @@ static void buf_tmp_reserve_compression_buf(buf_tmp_buffer_t* slot)
buffer be bigger than input buffer. Adjust the allocated size. */
ulint size= srv_page_size;
if (provider_service_lzo->is_loaded)
{
size= LZO1X_1_15_MEM_COMPRESS;
#ifdef HAVE_ALIGNED_ALLOC
size= MY_ALIGN(size, srv_page_size);
#endif
}
else if (provider_service_snappy->is_loaded)
{
size= snappy_max_compressed_length(size);
#ifdef HAVE_ALIGNED_ALLOC
size= MY_ALIGN(size, srv_page_size);
#endif
}
slot->comp_buf= static_cast<byte*>(aligned_malloc(size, srv_page_size));
}

View File

@@ -662,7 +662,7 @@ dict_table_t::parse_name<>(char(&)[NAME_LEN + 1], char(&)[NAME_LEN + 1],
@param[in] table_op operation to perform when opening
@return table object after locking MDL shared
@retval nullptr if the table is not readable, or if trylock && MDL blocked */
template<bool trylock>
template<bool trylock, bool purge_thd>
dict_table_t*
dict_acquire_mdl_shared(dict_table_t *table,
THD *thd,
@@ -674,9 +674,11 @@ dict_acquire_mdl_shared(dict_table_t *table,
MDL_context *mdl_context= static_cast<MDL_context*>(thd_mdl_context(thd));
size_t db_len;
dict_table_t *not_found= nullptr;
if (trylock)
{
static_assert(!trylock || !purge_thd, "usage");
dict_sys.freeze(SRW_LOCK_CALL);
db_len= dict_get_db_name_len(table->name.m_name);
dict_sys.unfreeze();
@@ -748,7 +750,13 @@ retry:
}
}
retry_table_open:
dict_sys.freeze(SRW_LOCK_CALL);
if (purge_thd && purge_sys.must_wait_FTS())
{
not_found= reinterpret_cast<dict_table_t*>(-1);
goto return_without_mdl;
}
table= dict_sys.find_table(table_id);
if (table)
table->acquire();
@@ -756,6 +764,11 @@ retry:
{
dict_sys.unfreeze();
dict_sys.lock(SRW_LOCK_CALL);
if (purge_thd && purge_sys.must_wait_FTS())
{
dict_sys.unlock();
goto retry_table_open;
}
table= dict_load_table_on_id(table_id,
table_op == DICT_TABLE_OP_LOAD_TABLESPACE
? DICT_ERR_IGNORE_RECOVER_LOCK
@@ -777,7 +790,7 @@ return_without_mdl:
mdl_context->release_lock(*mdl);
*mdl= nullptr;
}
return nullptr;
return not_found;
}
size_t db1_len, tbl1_len;
@@ -814,9 +827,9 @@ return_without_mdl:
goto retry;
}
template dict_table_t* dict_acquire_mdl_shared<false>
template dict_table_t* dict_acquire_mdl_shared<false, false>
(dict_table_t*,THD*,MDL_ticket**,dict_table_op_t);
template dict_table_t* dict_acquire_mdl_shared<true>
template dict_table_t* dict_acquire_mdl_shared<true, false>
(dict_table_t*,THD*,MDL_ticket**,dict_table_op_t);
/** Look up a table by numeric identifier.
@@ -842,13 +855,14 @@ dict_table_open_on_id(table_id_t table_id, bool dict_locked,
{
if (purge_thd && purge_sys.must_wait_FTS())
{
table= nullptr;
table= reinterpret_cast<dict_table_t*>(-1);
goto func_exit;
}
table->acquire();
if (thd && !dict_locked)
table= dict_acquire_mdl_shared<false>(table, thd, mdl, table_op);
table= dict_acquire_mdl_shared<false, purge_thd>(
table, thd, mdl, table_op);
}
else if (table_op != DICT_TABLE_OP_OPEN_ONLY_IF_CACHED)
{
@@ -866,7 +880,7 @@ dict_table_open_on_id(table_id_t table_id, bool dict_locked,
if (purge_thd && purge_sys.must_wait_FTS())
{
dict_sys.unlock();
return nullptr;
return reinterpret_cast<dict_table_t*>(-1);
}
table->acquire();
}
@@ -876,7 +890,8 @@ dict_table_open_on_id(table_id_t table_id, bool dict_locked,
if (table && thd)
{
dict_sys.freeze(SRW_LOCK_CALL);
table= dict_acquire_mdl_shared<false>(table, thd, mdl, table_op);
table= dict_acquire_mdl_shared<false, purge_thd>(
table, thd, mdl, table_op);
dict_sys.unfreeze();
}
return table;

View File

@@ -1662,6 +1662,7 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err,
fseg_inode_t* inode;
ib_id_t seg_id;
uint32_t n_reserved;
bool reserved_extent = false;
DBUG_ENTER("fseg_create");
@@ -1675,14 +1676,6 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err,
ut_ad(!block || block->page.id().space() == space->id);
if (!has_done_reservation) {
*err = fsp_reserve_free_extents(&n_reserved, space, 2,
FSP_NORMAL, mtr);
if (UNIV_UNLIKELY(*err != DB_SUCCESS)) {
DBUG_RETURN(nullptr);
}
}
buf_block_t* header = fsp_get_header(space, mtr, err);
if (!header) {
block = nullptr;
@@ -1691,10 +1684,32 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err,
buf_block_t* iblock;
inode_alloc:
inode = fsp_alloc_seg_inode(space, header, &iblock, mtr, err);
if (inode == NULL) {
if (!inode) {
block = nullptr;
reserve_extent:
if (!has_done_reservation && !reserved_extent) {
*err = fsp_reserve_free_extents(&n_reserved, space, 2,
FSP_NORMAL, mtr);
if (UNIV_UNLIKELY(*err != DB_SUCCESS)) {
DBUG_RETURN(nullptr);
}
/* Extents reserved successfully. So
try allocating the page or inode */
reserved_extent = true;
if (inode) {
goto page_alloc;
}
goto inode_alloc;
}
if (inode) {
fsp_free_seg_inode(space, inode, iblock, mtr);
}
goto funct_exit;
}
@@ -1722,6 +1737,7 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err,
FSEG_FRAG_SLOT_SIZE * FSEG_FRAG_ARR_N_SLOTS, 0xff);
if (!block) {
page_alloc:
block = fseg_alloc_free_page_low(space,
inode, iblock, 0, FSP_UP,
#ifdef UNIV_DEBUG
@@ -1729,13 +1745,9 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err,
#endif /* UNIV_DEBUG */
mtr, mtr, err);
/* The allocation cannot fail if we have already reserved a
space for the page. */
ut_ad(!has_done_reservation || block != NULL);
if (!block) {
fsp_free_seg_inode(space, inode, iblock, mtr);
goto funct_exit;
ut_ad(!has_done_reservation);
goto reserve_extent;
}
ut_d(const auto x = block->page.lock.x_lock_count());
@@ -1757,7 +1769,7 @@ fseg_create(fil_space_t *space, ulint byte_offset, mtr_t *mtr, dberr_t *err,
+ block->page.frame, space->id);
funct_exit:
if (!has_done_reservation) {
if (!has_done_reservation && reserved_extent) {
space->release_free_extents(n_reserved);
}

View File

@@ -1,7 +1,7 @@
/*****************************************************************************
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2021, MariaDB Corporation.
Copyright (c) 2017, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
@@ -100,8 +100,11 @@ btr_search_move_or_delete_hash_entries(
index page for which we know that
block->buf_fix_count == 0 or it is an index page which
has already been removed from the buf_pool.page_hash
i.e.: it is in state BUF_BLOCK_REMOVE_HASH */
void btr_search_drop_page_hash_index(buf_block_t* block);
i.e.: it is in state BUF_BLOCK_REMOVE_HASH
@param[in] garbage_collect drop ahi only if the index is marked
as freed */
void btr_search_drop_page_hash_index(buf_block_t* block,
bool garbage_collect= false);
/** Drop possible adaptive hash index entries when a page is evicted
from the buffer pool or freed in a file, or the index is being dropped.
@@ -146,16 +149,23 @@ static inline void btr_search_s_lock_all();
/** Unlock all search latches from shared mode. */
static inline void btr_search_s_unlock_all();
# ifdef UNIV_DEBUG
/** @return if the index is marked as freed */
bool btr_search_check_marked_free_index(const buf_block_t *block);
# endif /* UNIV_DEBUG */
#else /* BTR_CUR_HASH_ADAPT */
# define btr_search_sys_create()
# define btr_search_sys_free()
# define btr_search_drop_page_hash_index(block)
# define btr_search_drop_page_hash_index(block, garbage_collect)
# define btr_search_s_lock_all(index)
# define btr_search_s_unlock_all(index)
# define btr_search_info_update(index, cursor)
# define btr_search_move_or_delete_hash_entries(new_block, block)
# define btr_search_update_hash_on_insert(cursor, ahi_latch)
# define btr_search_update_hash_on_delete(cursor)
# ifdef UNIV_DEBUG
# define btr_search_check_marked_free_index(block)
# endif /* UNIV_DEBUG */
#endif /* BTR_CUR_HASH_ADAPT */
#ifdef BTR_CUR_ADAPT

View File

@@ -132,7 +132,7 @@ enum dict_table_op_t {
@param[in] table_op operation to perform when opening
@return table object after locking MDL shared
@retval NULL if the table is not readable, or if trylock && MDL blocked */
template<bool trylock>
template<bool trylock, bool purge_thd= false>
dict_table_t*
dict_acquire_mdl_shared(dict_table_t *table,
THD *thd,

View File

@@ -1336,7 +1336,7 @@ public:
@param n_cols number of columns whose collation is changing */
void init_change_cols(unsigned n_cols)
{
ut_ad(n_fields > n_cols);
ut_ad(n_fields > n_cols || type & DICT_FTS);
change_col_info= static_cast<col_info*>
(mem_heap_zalloc(heap, sizeof(col_info)));
change_col_info->n_cols= n_cols;

View File

@@ -178,6 +178,10 @@ struct mtr_t {
@param block buffer pool block to search for */
bool have_x_latch(const buf_block_t &block) const;
/** Check if we are holding a block latch in S or U mode
@param block buffer pool block to search for */
bool have_u_or_x_latch(const buf_block_t &block) const;
/** Copy the tablespaces associated with the mini-transaction
(needed for generating FILE_MODIFY records)
@param[in] mtr mini-transaction that may modify
@@ -334,6 +338,15 @@ public:
@param rw_latch RW_S_LATCH, RW_SX_LATCH, RW_X_LATCH, RW_NO_LATCH */
void page_lock(buf_block_t *block, ulint rw_latch);
/** Register a page latch on a buffer-fixed block was buffer-fixed.
@param latch latch type */
void u_lock_register(ulint savepoint)
{
mtr_memo_slot_t *slot= m_memo.at<mtr_memo_slot_t*>(savepoint);
ut_ad(slot->type == MTR_MEMO_BUF_FIX);
slot->type= MTR_MEMO_PAGE_SX_FIX;
}
/** Upgrade U locks on a block to X */
void page_lock_upgrade(const buf_block_t &block);
/** Upgrade X lock to X */

View File

@@ -829,7 +829,29 @@ processed:
fil_space_t *space= fil_space_t::create(it->first, flags,
FIL_TYPE_TABLESPACE, crypt_data);
ut_ad(space);
space->add(name.c_str(), OS_FILE_CLOSED, size, false, false);
const char *filename= name.c_str();
if (srv_operation == SRV_OPERATION_RESTORE)
{
const char* tbl_name = strrchr(filename, '/');
#ifdef _WIN32
if (const char *last = strrchr(filename, '\\'))
{
if (last > tbl_name)
tbl_name = last;
}
#endif
if (tbl_name)
{
while (--tbl_name > filename &&
#ifdef _WIN32
*tbl_name != '\\' &&
#endif
*tbl_name != '/');
if (tbl_name > filename)
filename= tbl_name + 1;
}
}
space->add(filename, OS_FILE_CLOSED, size, false, false);
space->recv_size= it->second.size;
space->size_in_header= size;
return space;
@@ -1213,9 +1235,6 @@ static void fil_name_process(const char *name, ulint len, uint32_t space_id,
d->deleted = true;
goto got_deleted;
}
if (ftype == FILE_RENAME) {
d->file_name= fname.name;
}
goto reload;
}

View File

@@ -1612,6 +1612,21 @@ struct FindBlockX
}
};
/** Find out whether a block was not X or U latched by the mini-transaction */
struct FindBlockUX
{
const buf_block_t &block;
FindBlockUX(const buf_block_t &block): block(block) {}
/** @return whether the block was not found x-latched */
bool operator()(const mtr_memo_slot_t *slot) const
{
return slot->object != &block ||
!(slot->type & (MTR_MEMO_PAGE_X_FIX | MTR_MEMO_PAGE_SX_FIX));
}
};
#ifdef UNIV_DEBUG
/** Assert that the block is not present in the mini-transaction */
struct FindNoBlock
@@ -1642,6 +1657,14 @@ bool mtr_t::have_x_latch(const buf_block_t &block) const
return true;
}
bool mtr_t::have_u_or_x_latch(const buf_block_t &block) const
{
if (m_memo.for_each_block(CIterate<FindBlockUX>(FindBlockUX(block))))
return false;
ut_ad(block.page.lock.have_u_or_x());
return true;
}
/** Check if we are holding exclusive tablespace latch
@param space tablespace to search for
@param shared whether to look for shared latch, instead of exclusive

View File

@@ -941,7 +941,8 @@ try_again:
table_id, false, DICT_TABLE_OP_NORMAL, node->purge_thd,
&node->mdl_ticket);
if (!node->table && purge_sys.must_wait_FTS()) {
if (node->table == reinterpret_cast<dict_table_t*>(-1)) {
/* purge stop signal */
goto try_again;
}

View File

@@ -290,6 +290,11 @@ bool trx_rseg_read_wsrep_checkpoint(XID& xid)
buf_block_t *trx_rseg_t::get(mtr_t *mtr, dberr_t *err) const
{
if (!space)
{
if (err) *err= DB_TABLESPACE_NOT_FOUND;
return nullptr;
}
return buf_page_get_gen(page_id(), 0, RW_X_LATCH, nullptr,
BUF_GET, mtr, err);
}
@@ -435,6 +440,8 @@ static dberr_t trx_undo_lists_init(trx_rseg_t *rseg, trx_id_t &max_trx_id,
static dberr_t trx_rseg_mem_restore(trx_rseg_t *rseg, trx_id_t &max_trx_id,
mtr_t *mtr)
{
if (!rseg->space)
return DB_TABLESPACE_NOT_FOUND;
dberr_t err;
const buf_block_t *rseg_hdr=
buf_page_get_gen(rseg->page_id(), 0, RW_S_LATCH, nullptr, BUF_GET, mtr,

View File

@@ -0,0 +1,38 @@
#
# MDEV-29008 Server crash or assertion `field' failed in spider_db_open_item_ident / group by handler
#
for master_1
for child2
child2_1
child2_2
child2_3
for child3
connection child2_1;
CREATE DATABASE auto_test_remote;
USE auto_test_remote;
CREATE TABLE tbl_a (
a INT,
b INT
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
INSERT INTO tbl_a VALUES (1,2),(3,4);
connection master_1;
CREATE DATABASE auto_test_local;
USE auto_test_local;
CREATE TABLE tbl_a (
a INT,
b INT
) ENGINE=Spider DEFAULT CHARSET=utf8 COMMENT='table "tbl_a", srv "s_2_1"';
SELECT MIN(t2.a) AS f1, t1.b AS f2 FROM tbl_a AS t1 JOIN tbl_a AS t2 GROUP BY f2 ORDER BY f1, f2;
f1 f2
1 2
1 4
connection master_1;
DROP DATABASE IF EXISTS auto_test_local;
connection child2_1;
DROP DATABASE IF EXISTS auto_test_remote;
for master_1
for child2
child2_1
child2_2
child2_3
for child3

View File

@@ -0,0 +1,3 @@
!include include/default_mysqld.cnf
!include ../my_1_1.cnf
!include ../my_2_1.cnf

View File

@@ -0,0 +1,39 @@
--echo #
--echo # MDEV-29008 Server crash or assertion `field' failed in spider_db_open_item_ident / group by handler
--echo #
--disable_query_log
--disable_result_log
--source ../../t/test_init.inc
--enable_result_log
--enable_query_log
--connection child2_1
CREATE DATABASE auto_test_remote;
USE auto_test_remote;
eval CREATE TABLE tbl_a (
a INT,
b INT
) $CHILD2_1_ENGINE $CHILD2_1_CHARSET;
INSERT INTO tbl_a VALUES (1,2),(3,4);
--connection master_1
CREATE DATABASE auto_test_local;
USE auto_test_local;
eval CREATE TABLE tbl_a (
a INT,
b INT
) $MASTER_1_ENGINE $MASTER_1_CHARSET COMMENT='table "tbl_a", srv "s_2_1"';
SELECT MIN(t2.a) AS f1, t1.b AS f2 FROM tbl_a AS t1 JOIN tbl_a AS t2 GROUP BY f2 ORDER BY f1, f2;
--connection master_1
DROP DATABASE IF EXISTS auto_test_local;
--connection child2_1
DROP DATABASE IF EXISTS auto_test_remote;
--disable_query_log
--disable_result_log
--source ../t/test_deinit.inc
--enable_query_log
--enable_result_log

View File

@@ -1,5 +1,5 @@
/* Copyright (C) 2008-2019 Kentoku Shiba
Copyright (C) 2019, 2020, MariaDB Corporation.
Copyright (C) 2019, 2022, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@@ -7625,8 +7625,6 @@ int spider_db_open_item_ident(
SPIDER_FIELD_HOLDER *field_holder = field_chain->field_holder;
spider = field_holder->spider;
share = spider->share;
field = spider->field_exchange(field);
DBUG_ASSERT(field);
if ((error_num = share->dbton_share[dbton_id]->
append_column_name_with_alias(str, field->field_index,
field_holder->alias->ptr(), field_holder->alias->length())))