1
0
mirror of https://github.com/MariaDB/server.git synced 2025-08-08 11:22:35 +03:00

Merge 10.9 into 10.10

This commit is contained in:
Nayuta Yanagisawa
2022-09-09 20:10:23 +09:00
25 changed files with 346 additions and 201 deletions

View File

@@ -33,7 +33,7 @@ PROJECT(MySQL)
# in RPM's: # in RPM's:
#set(CPACK_RPM_SPEC_MORE_DEFINE "%define __spec_install_post /bin/true") #set(CPACK_RPM_SPEC_MORE_DEFINE "%define __spec_install_post /bin/true")
FOREACH(p CMP0022 CMP0046 CMP0040 CMP0048 CMP0054 CMP0075 CMP0069) FOREACH(p CMP0022 CMP0046 CMP0040 CMP0048 CMP0054 CMP0075 CMP0069 CMP0135)
IF(POLICY ${p}) IF(POLICY ${p})
CMAKE_POLICY(SET ${p} NEW) CMAKE_POLICY(SET ${p} NEW)
ENDIF() ENDIF()

View File

@@ -125,7 +125,7 @@ case "${LSBNAME}" in
replace_uring_with_aio replace_uring_with_aio
disable_libfmt disable_libfmt
;& ;&
impish|jammy) impish|jammy|kinetic)
# mariadb-plugin-rocksdb s390x not supported by us (yet) # mariadb-plugin-rocksdb s390x not supported by us (yet)
# ubuntu doesn't support mips64el yet, so keep this just # ubuntu doesn't support mips64el yet, so keep this just
# in case something changes. # in case something changes.

90
debian/salsa-ci.yml vendored
View File

@@ -494,7 +494,73 @@ mysql-8.0 Jammy to mariadb upgrade:
variables: variables:
- $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/
mariadb.org-10.7 to mariadb-10.10 upgrade: mariadb.org 10.9 to mariadb upgrade:
stage: upgrade extras
needs:
- job: build
image: debian:${RELEASE}
artifacts:
when: always
name: "$CI_BUILD_NAME"
paths:
- ${WORKING_DIR}/debug
script:
- *test-prepare-container
- apt install -y curl
- curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc
- echo "deb https://deb.mariadb.org/10.9/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list
- apt-get update
- apt-get install -y mariadb-server-10.9
- *test-verify-initial
# Install MariaDB built in this commit
# Force downgrades so our version installs on top of upstream revision, e.g. 1:10.9.1-1 vs 1:10.9.1+mariadb~sid
- apt-get install -y --allow-downgrades ./*.deb
# Verify installation of MariaDB built in this commit
- dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed
- mariadb --version # Client version
- service mariadb status # There is no init.d/mysql in MariaDB 10.5+
- *test-verify-final
variables:
GIT_STRATEGY: none
except:
variables:
- $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/
allow_failure: true
mariadb.org-10.8 to mariadb upgrade:
stage: upgrade extras
needs:
- job: build
image: debian:${RELEASE}
artifacts:
when: always
name: "$CI_BUILD_NAME"
paths:
- ${WORKING_DIR}/debug
script:
- *test-prepare-container
- apt install -y curl
- curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc
- echo "deb https://deb.mariadb.org/10.8/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list
- apt-get update
- apt-get install -y mariadb-server-10.8
- *test-verify-initial
# Install MariaDB built in this commit
# Force downgrades so our version installs on top of upstream revision, e.g. 1:10.9.1-1 vs 1:10.9.1+mariadb~sid
- apt-get install -y --allow-downgrades ./*.deb
# Verify installation of MariaDB built in this commit
- dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed
- mariadb --version # Client version
- service mariadb status # There is no init.d/mysql in MariaDB 10.5+
- *test-verify-final
variables:
GIT_STRATEGY: none
except:
variables:
- $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/
allow_failure: true
mariadb.org-10.7 to mariadb upgrade:
stage: upgrade extras stage: upgrade extras
needs: needs:
- job: build - job: build
@@ -510,13 +576,10 @@ mariadb.org-10.7 to mariadb-10.10 upgrade:
- curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc - curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc
- echo "deb https://deb.mariadb.org/10.7/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list - echo "deb https://deb.mariadb.org/10.7/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list
- apt-get update - apt-get update
# Package libmariadbclient-dev from mariadb.org conflicts with libmariadb-dev in Sid, so cannot use wildcard that would include it
# Enable this line when there is a way to install them only from the mariadb.org repo
# - apt-get install -y 'mariadb*' libmariadb3 'libmariadb-*' 'libmariadbd*'
- apt-get install -y mariadb-server-10.7 - apt-get install -y mariadb-server-10.7
- *test-verify-initial - *test-verify-initial
# Install MariaDB built in this commit # Install MariaDB built in this commit
# Force downgrades so our version installs on top of upstream revision, e.g. 1:10.5.5-1 vs 1:10.5.5+mariadb~sid # Force downgrades so our version installs on top of upstream revision, e.g. 1:10.9.1-1 vs 1:10.9.1+mariadb~sid
- apt-get install -y --allow-downgrades ./*.deb - apt-get install -y --allow-downgrades ./*.deb
# Verify installation of MariaDB built in this commit # Verify installation of MariaDB built in this commit
- dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed - dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed
@@ -529,12 +592,8 @@ mariadb.org-10.7 to mariadb-10.10 upgrade:
variables: variables:
- $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/
allow_failure: true allow_failure: true
# Installation on Sid fails on missing liburing1 because upstream 10.8
# MariaDB.org buildbot has not run 'apt upgrade' for a long time.
# Remove this allow_failure once buildbot has built a new 10.8
# release using latest liburing-dev in Debian Sid.
mariadb.org-10.6 to mariadb-10.8 upgrade: mariadb.org-10.6 to mariadb upgrade:
stage: upgrade extras stage: upgrade extras
needs: needs:
- job: build - job: build
@@ -550,13 +609,10 @@ mariadb.org-10.6 to mariadb-10.8 upgrade:
- curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc - curl -sS https://mariadb.org/mariadb_release_signing_key.asc -o /etc/apt/trusted.gpg.d/mariadb.asc
- echo "deb https://deb.mariadb.org/10.6/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list - echo "deb https://deb.mariadb.org/10.6/debian ${RELEASE} main" > /etc/apt/sources.list.d/mariadb.list
- apt-get update - apt-get update
# Package libmariadbclient-dev from mariadb.org conflicts with libmariadb-dev in Sid, so cannot use wildcard that would include it
# Enable this line when there is a way to install them only from the mariadb.org repo
# - apt-get install -y 'mariadb*' libmariadb3 'libmariadb-*' 'libmariadbd*'
- apt-get install -y mariadb-server-10.6 - apt-get install -y mariadb-server-10.6
- *test-verify-initial - *test-verify-initial
# Install MariaDB built in this commit # Install MariaDB built in this commit
# Force downgrades so our version installs on top of upstream revision, e.g. 1:10.5.5-1 vs 1:10.5.5+mariadb~sid # Force downgrades so our version installs on top of upstream revision, e.g. 1:10.9.1-1 vs 1:10.9.1+mariadb~sid
- apt-get install -y --allow-downgrades ./*.deb - apt-get install -y --allow-downgrades ./*.deb
# Verify installation of MariaDB built in this commit # Verify installation of MariaDB built in this commit
- dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed - dpkg -l | grep -iE 'maria|mysql|galera' || true # List installed
@@ -569,12 +625,8 @@ mariadb.org-10.6 to mariadb-10.8 upgrade:
variables: variables:
- $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/ - $CI_COMMIT_TAG != null && $SALSA_CI_ENABLE_PIPELINE_ON_TAGS !~ /^(1|yes|true)$/
allow_failure: true allow_failure: true
# Installation on Sid fails on missing liburing1 because upstream 10.10
# MariaDB.org buildbot has not run 'apt upgrade' for a long time.
# Remove this allow_failure once buildbot has built a new 10.10
# release using latest liburing-dev in Debian Sid.
mariadb.org-10.5 to mariadb-10.10 upgrade: mariadb.org-10.5 to mariadb upgrade:
stage: upgrade extras stage: upgrade extras
needs: needs:
- job: build - job: build

View File

@@ -44,13 +44,13 @@ drop table t0,t1,t2;
create table t1 ( create table t1 (
pk int, a int, b int, pk int, a int, b int,
primary key (pk), index idx1(b), index idx2(b) primary key (pk), index idx1(b), index idx2(b)
) engine=innodb; ) engine=innodb STATS_AUTO_RECALC=0;
Warnings: Warnings:
Note 1831 Duplicate index `idx2`. This is deprecated and will be disallowed in a future release Note 1831 Duplicate index `idx2`. This is deprecated and will be disallowed in a future release
insert into t1 values (1,6,0),(2,1,0),(3,5,2),(4,8,0); insert into t1 values (1,6,0),(2,1,0),(3,5,2),(4,8,0);
create table t2 (c int) engine=innodb; create table t2 (c int) engine=innodb STATS_AUTO_RECALC=0;
insert into t2 values (1),(2); insert into t2 values (1),(2);
create table t3 (d int) engine=innodb; create table t3 (d int) engine=innodb STATS_AUTO_RECALC=0;
insert into t3 values (3),(-1),(4); insert into t3 values (3),(-1),(4);
set @save_optimizer_switch=@@optimizer_switch; set @save_optimizer_switch=@@optimizer_switch;
set optimizer_switch='extended_keys=on'; set optimizer_switch='extended_keys=on';

View File

@@ -54,11 +54,11 @@ drop table t0,t1,t2;
create table t1 ( create table t1 (
pk int, a int, b int, pk int, a int, b int,
primary key (pk), index idx1(b), index idx2(b) primary key (pk), index idx1(b), index idx2(b)
) engine=innodb; ) engine=innodb STATS_AUTO_RECALC=0;
insert into t1 values (1,6,0),(2,1,0),(3,5,2),(4,8,0); insert into t1 values (1,6,0),(2,1,0),(3,5,2),(4,8,0);
create table t2 (c int) engine=innodb; create table t2 (c int) engine=innodb STATS_AUTO_RECALC=0;
insert into t2 values (1),(2); insert into t2 values (1),(2);
create table t3 (d int) engine=innodb; create table t3 (d int) engine=innodb STATS_AUTO_RECALC=0;
insert into t3 values (3),(-1),(4); insert into t3 values (3),(-1),(4);
set @save_optimizer_switch=@@optimizer_switch; set @save_optimizer_switch=@@optimizer_switch;

View File

@@ -1,4 +1,3 @@
source include/not_windows.inc;
source include/not_embedded.inc; source include/not_embedded.inc;
source include/have_debug.inc; source include/have_debug.inc;
--echo # --echo #
@@ -8,7 +7,7 @@ call mtr.add_suppression('Thread .* did not exit');
set @old_dbug=@@global.debug_dbug; set @old_dbug=@@global.debug_dbug;
set global debug_dbug='+d,CONNECT_wait'; set global debug_dbug='+d,CONNECT_wait';
select variable_value into @cons from information_schema.global_status where variable_name='connections'; select variable_value into @cons from information_schema.global_status where variable_name='connections';
exec $MYSQL -e 'select sleep(3600)' >/dev/null 2>&1 &; exec $MYSQL -e "select sleep(3600)" >/dev/null 2>&1 &;
let $wait_condition= select variable_value>@cons from information_schema.global_status where variable_name='connections'; let $wait_condition= select variable_value>@cons from information_schema.global_status where variable_name='connections';
source include/wait_condition.inc; source include/wait_condition.inc;
source include/restart_mysqld.inc; source include/restart_mysqld.inc;

View File

@@ -176,5 +176,25 @@ t3 CREATE TABLE `t3` (
PRIMARY KEY (`id`), PRIMARY KEY (`id`),
UNIQUE KEY `v2` (`v2`) UNIQUE KEY `v2` (`v2`)
) ENGINE=InnoDB DEFAULT CHARSET=latin1 ) ENGINE=InnoDB DEFAULT CHARSET=latin1
DROP TABLE t1,t2,t3; DROP TABLE t2,t3;
#
# MDEV-29440 InnoDB instant ALTER TABLE recovery wrongly uses
# READ COMMITTED isolation level instead of READ UNCOMMITTED
#
CREATE TABLE t2(a INT UNSIGNED PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t2 VALUES (1),(2),(3),(4),(5),(6);
connect ddl, localhost, root;
SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
ALTER TABLE t2 ADD COLUMN b TINYINT UNSIGNED NOT NULL DEFAULT 42 FIRST;
connection default;
SET DEBUG_SYNC='now WAIT_FOR ddl';
SET GLOBAL innodb_flush_log_at_trx_commit=1;
DELETE FROM t1;
# Kill the server
disconnect ddl;
# restart
CHECK TABLE t2;
Table Op Msg_type Msg_text
test.t2 check status OK
DROP TABLE t1,t2;
db.opt db.opt

View File

@@ -1,11 +1,12 @@
CREATE TABLE t(a INT PRIMARY KEY) ENGINE=InnoDB; CREATE TABLE t(a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t VALUES (3); INSERT INTO t VALUES (3);
BEGIN; BEGIN;
connection default;
UPDATE t SET a = 2; UPDATE t SET a = 2;
connect con1,localhost,root; connect con1,localhost,root;
SET DEBUG_SYNC="lock_wait_start SIGNAL del_locked";
DELETE FROM t; DELETE FROM t;
connection default; connection default;
SET DEBUG_SYNC="now WAIT_FOR del_locked";
UPDATE t SET a = 1; UPDATE t SET a = 1;
COMMIT; COMMIT;
connection con1; connection con1;
@@ -17,4 +18,5 @@ connection default;
SELECT count(*) FROM t; SELECT count(*) FROM t;
count(*) count(*)
1 1
SET DEBUG_SYNC="reset";
DROP TABLE t; DROP TABLE t;

View File

@@ -198,6 +198,30 @@ disconnect ddl;
SHOW CREATE TABLE t1; SHOW CREATE TABLE t1;
SHOW CREATE TABLE t2; SHOW CREATE TABLE t2;
SHOW CREATE TABLE t3; SHOW CREATE TABLE t3;
DROP TABLE t1,t2,t3; DROP TABLE t2,t3;
--echo #
--echo # MDEV-29440 InnoDB instant ALTER TABLE recovery wrongly uses
--echo # READ COMMITTED isolation level instead of READ UNCOMMITTED
--echo #
CREATE TABLE t2(a INT UNSIGNED PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t2 VALUES (1),(2),(3),(4),(5),(6);
connect ddl, localhost, root;
SET DEBUG_SYNC='innodb_alter_inplace_before_commit SIGNAL ddl WAIT_FOR ever';
--send
ALTER TABLE t2 ADD COLUMN b TINYINT UNSIGNED NOT NULL DEFAULT 42 FIRST;
connection default;
SET DEBUG_SYNC='now WAIT_FOR ddl';
SET GLOBAL innodb_flush_log_at_trx_commit=1;
DELETE FROM t1;
--source include/kill_mysqld.inc
disconnect ddl;
--source include/start_mysqld.inc
CHECK TABLE t2;
DROP TABLE t1,t2;
--list_files $MYSQLD_DATADIR/test --list_files $MYSQLD_DATADIR/test

View File

@@ -1,23 +1,20 @@
--source include/have_innodb.inc --source include/have_innodb.inc
--source include/count_sessions.inc --source include/count_sessions.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
CREATE TABLE t(a INT PRIMARY KEY) ENGINE=InnoDB; CREATE TABLE t(a INT PRIMARY KEY) ENGINE=InnoDB;
INSERT INTO t VALUES (3); INSERT INTO t VALUES (3);
BEGIN; BEGIN;
connection default;
UPDATE t SET a = 2; UPDATE t SET a = 2;
connect con1,localhost,root; connect con1,localhost,root;
SET DEBUG_SYNC="lock_wait_start SIGNAL del_locked";
send DELETE FROM t; send DELETE FROM t;
connection default; connection default;
let $wait_condition= SET DEBUG_SYNC="now WAIT_FOR del_locked";
select count(*) = 1 from information_schema.processlist
where state = "Updating" and info = "DELETE FROM t";
--source include/wait_condition.inc
UPDATE t SET a = 1; UPDATE t SET a = 1;
COMMIT; COMMIT;
@@ -30,5 +27,6 @@ connection default;
--echo # The above DELETE must delete all the rows in the table, so the --echo # The above DELETE must delete all the rows in the table, so the
--echo # following SELECT must show 0 rows. --echo # following SELECT must show 0 rows.
SELECT count(*) FROM t; SELECT count(*) FROM t;
SET DEBUG_SYNC="reset";
DROP TABLE t; DROP TABLE t;
--source include/wait_until_count_sessions.inc --source include/wait_until_count_sessions.inc

View File

@@ -11,12 +11,12 @@ DROP TABLE mdev21563;
# #
CREATE TABLE t1(f1 CHAR(100), FULLTEXT idx(f1))ENGINE=InnoDB; CREATE TABLE t1(f1 CHAR(100), FULLTEXT idx(f1))ENGINE=InnoDB;
INSERT INTO t1 VALUES('mysql'), ('innodb'); INSERT INTO t1 VALUES('mysql'), ('innodb');
set debug_dbug="+d,fts_instrument_sync_debug"; set debug_dbug="+d,fts_instrument_sync_request";
INSERT INTO t1 VALUES('test'); INSERT INTO t1 VALUES('test');
set debug_dbug="-d,fts_instrument_sync_debug"; set debug_dbug="-d,fts_instrument_sync_request";
INSERT INTO t1 VALUES('This is a fts issue'); INSERT INTO t1 VALUES('This is a fts issue');
# restart # restart
set debug_dbug="+d,fts_instrument_sync_debug"; set debug_dbug="+d,fts_instrument_sync_request";
UPDATE t1 SET f1="mariadb"; UPDATE t1 SET f1="mariadb";
set debug_dbug="-d,fts_instrument_sync_debug"; set debug_dbug="-d,fts_instrument_sync_request";
DROP TABLE t1; DROP TABLE t1;

View File

@@ -16,12 +16,12 @@ DROP TABLE mdev21563;
--echo # --echo #
CREATE TABLE t1(f1 CHAR(100), FULLTEXT idx(f1))ENGINE=InnoDB; CREATE TABLE t1(f1 CHAR(100), FULLTEXT idx(f1))ENGINE=InnoDB;
INSERT INTO t1 VALUES('mysql'), ('innodb'); INSERT INTO t1 VALUES('mysql'), ('innodb');
set debug_dbug="+d,fts_instrument_sync_debug"; set debug_dbug="+d,fts_instrument_sync_request";
INSERT INTO t1 VALUES('test'); INSERT INTO t1 VALUES('test');
set debug_dbug="-d,fts_instrument_sync_debug"; set debug_dbug="-d,fts_instrument_sync_request";
INSERT INTO t1 VALUES('This is a fts issue'); INSERT INTO t1 VALUES('This is a fts issue');
--source include/restart_mysqld.inc --source include/restart_mysqld.inc
set debug_dbug="+d,fts_instrument_sync_debug"; set debug_dbug="+d,fts_instrument_sync_request";
UPDATE t1 SET f1="mariadb"; UPDATE t1 SET f1="mariadb";
set debug_dbug="-d,fts_instrument_sync_debug"; set debug_dbug="-d,fts_instrument_sync_request";
DROP TABLE t1; DROP TABLE t1;

View File

@@ -1108,6 +1108,12 @@ static int check_connection(THD *thd)
void setup_connection_thread_globals(THD *thd) void setup_connection_thread_globals(THD *thd)
{ {
DBUG_EXECUTE_IF("CONNECT_wait", {
extern Dynamic_array<MYSQL_SOCKET> listen_sockets;
DBUG_ASSERT(listen_sockets.size());
while (listen_sockets.size())
my_sleep(1000);
});
thd->store_globals(); thd->store_globals();
} }
@@ -1359,14 +1365,6 @@ void do_handle_one_connection(CONNECT *connect, bool put_in_cache)
return; return;
} }
DBUG_EXECUTE_IF("CONNECT_wait",
{
extern Dynamic_array<MYSQL_SOCKET> listen_sockets;
DBUG_ASSERT(listen_sockets.size());
while (listen_sockets.size())
my_sleep(1000);
});
/* /*
If a thread was created to handle this connection: If a thread was created to handle this connection:
increment slow_launch_threads counter if it took more than increment slow_launch_threads counter if it took more than

View File

@@ -250,14 +250,6 @@ static THD *threadpool_add_connection(CONNECT *connect, TP_connection *c)
{ {
THD *thd= NULL; THD *thd= NULL;
DBUG_EXECUTE_IF("CONNECT_wait",
{
extern Dynamic_array<MYSQL_SOCKET> listen_sockets;
DBUG_ASSERT(listen_sockets.size());
while (listen_sockets.size())
my_sleep(1000);
});
/* /*
Create a new connection context: mysys_thread_var and PSI thread Create a new connection context: mysys_thread_var and PSI thread
Store them in THD. Store them in THD.

View File

@@ -74,6 +74,8 @@ dict_load_index_low(
byte* table_id, /*!< in/out: table id (8 bytes), byte* table_id, /*!< in/out: table id (8 bytes),
an "in" value if mtr an "in" value if mtr
and "out" when !mtr */ and "out" when !mtr */
bool uncommitted, /*!< in: false=READ COMMITTED,
true=READ UNCOMMITTED */
mem_heap_t* heap, /*!< in/out: temporary memory heap */ mem_heap_t* heap, /*!< in/out: temporary memory heap */
const rec_t* rec, /*!< in: SYS_INDEXES record */ const rec_t* rec, /*!< in: SYS_INDEXES record */
mtr_t* mtr, /*!< in/out: mini-transaction, mtr_t* mtr, /*!< in/out: mini-transaction,
@@ -83,30 +85,30 @@ dict_load_index_low(
dict_index_t** index); /*!< out,own: index, or NULL */ dict_index_t** index); /*!< out,own: index, or NULL */
/** Load a table column definition from a SYS_COLUMNS record to dict_table_t. /** Load a table column definition from a SYS_COLUMNS record to dict_table_t.
@return error message @param table table, or nullptr if the output will be in column
@retval NULL on success */ @param use_uncommitted 0=READ COMMITTED, 1=detect, 2=READ UNCOMMITTED
static @param heap memory heap for temporary storage
const char* @param column pointer to output buffer, or nullptr if table!=nullptr
dict_load_column_low( @param table_id table identifier
dict_table_t* table, /*!< in/out: table, could be NULL @param col_name column name
if we just populate a dict_column_t @param rec SYS_COLUMNS record
struct with information from @param mtr mini-transaction
a SYS_COLUMNS record */ @param nth_v_col nullptr, or pointer to a counter of virtual columns
mem_heap_t* heap, /*!< in/out: memory heap @return error message
for temporary storage */ @retval nullptr on success */
dict_col_t* column, /*!< out: dict_column_t to fill, static const char *dict_load_column_low(dict_table_t *table,
or NULL if table != NULL */ unsigned use_uncommitted,
table_id_t* table_id, /*!< out: table id */ mem_heap_t *heap, dict_col_t *column,
const char** col_name, /*!< out: column name */ table_id_t *table_id,
const rec_t* rec, /*!< in: SYS_COLUMNS record */ const char **col_name,
mtr_t* mtr, /*!< in/out: mini-transaction */ const rec_t *rec,
ulint* nth_v_col); /*!< out: if not NULL, this mtr_t *mtr,
records the "n" of "nth" virtual ulint *nth_v_col);
column */
/** Load a virtual column "mapping" (to base columns) information /** Load a virtual column "mapping" (to base columns) information
from a SYS_VIRTUAL record from a SYS_VIRTUAL record
@param[in,out] table table @param[in,out] table table
@param[in] uncommitted false=READ COMMITTED, true=READ UNCOMMITTED
@param[in,out] column mapped base column's dict_column_t @param[in,out] column mapped base column's dict_column_t
@param[in,out] table_id table id @param[in,out] table_id table id
@param[in,out] pos virtual column position @param[in,out] pos virtual column position
@@ -118,6 +120,7 @@ static
const char* const char*
dict_load_virtual_low( dict_load_virtual_low(
dict_table_t* table, dict_table_t* table,
bool uncommitted,
dict_col_t** column, dict_col_t** column,
table_id_t* table_id, table_id_t* table_id,
ulint* pos, ulint* pos,
@@ -133,6 +136,8 @@ dict_load_field_low(
byte* index_id, /*!< in/out: index id (8 bytes) byte* index_id, /*!< in/out: index id (8 bytes)
an "in" value if index != NULL an "in" value if index != NULL
and "out" if index == NULL */ and "out" if index == NULL */
bool uncommitted, /*!< in: false=READ COMMITTED,
true=READ UNCOMMITTED */
dict_index_t* index, /*!< in/out: index, could be NULL dict_index_t* index, /*!< in/out: index, could be NULL
if we just populate a dict_field_t if we just populate a dict_field_t
struct with information from struct with information from
@@ -251,18 +256,16 @@ dict_process_sys_indexes_rec(
dict_index_t* index, /*!< out: index to be filled */ dict_index_t* index, /*!< out: index to be filled */
table_id_t* table_id) /*!< out: index table id */ table_id_t* table_id) /*!< out: index table id */
{ {
const char* err_msg; byte buf[8];
byte buf[8];
ut_d(index->is_dummy = true); ut_d(index->is_dummy = true);
ut_d(index->in_instant_init = false); ut_d(index->in_instant_init = false);
/* Parse the record, and get "dict_index_t" struct filled */ /* Parse the record, and get "dict_index_t" struct filled */
err_msg = dict_load_index_low(buf, heap, rec, nullptr, nullptr, &index); const char *err_msg= dict_load_index_low(buf, false, heap, rec,
nullptr, nullptr, &index);
*table_id = mach_read_from_8(buf); *table_id= mach_read_from_8(buf);
return err_msg;
return(err_msg);
} }
/********************************************************************//** /********************************************************************//**
@@ -283,7 +286,7 @@ dict_process_sys_columns_rec(
const char* err_msg; const char* err_msg;
/* Parse the record, and get "dict_col_t" struct filled */ /* Parse the record, and get "dict_col_t" struct filled */
err_msg = dict_load_column_low(NULL, heap, column, err_msg = dict_load_column_low(NULL, 0, heap, column,
table_id, col_name, rec, nullptr, table_id, col_name, rec, nullptr,
nth_v_col); nth_v_col);
@@ -304,7 +307,8 @@ dict_process_sys_virtual_rec(
ulint* pos, ulint* pos,
ulint* base_pos) ulint* base_pos)
{ {
return dict_load_virtual_low(nullptr, nullptr, table_id, pos, base_pos, rec); return dict_load_virtual_low(nullptr, false, nullptr, table_id,
pos, base_pos, rec);
} }
/********************************************************************//** /********************************************************************//**
@@ -328,7 +332,7 @@ dict_process_sys_fields_rec(
mach_write_to_8(last_index_id, last_id); mach_write_to_8(last_index_id, last_id);
err_msg = dict_load_field_low(buf, NULL, sys_field, err_msg = dict_load_field_low(buf, false, nullptr, sys_field,
pos, last_index_id, heap, nullptr, rec); pos, last_index_id, heap, nullptr, rec);
*index_id = mach_read_from_8(buf); *index_id = mach_read_from_8(buf);
@@ -631,6 +635,7 @@ enum table_read_status { READ_OK= 0, READ_ERROR, READ_NOT_FOUND };
/** Read and return 5 integer fields from a SYS_TABLES record. /** Read and return 5 integer fields from a SYS_TABLES record.
@param[in] rec A record of SYS_TABLES @param[in] rec A record of SYS_TABLES
@param[in] uncommitted true=use READ UNCOMMITTED, false=READ COMMITTED
@param[in] mtr mini-transaction @param[in] mtr mini-transaction
@param[out] table_id Pointer to the table_id for this table @param[out] table_id Pointer to the table_id for this table
@param[out] space_id Pointer to the space_id for this table @param[out] space_id Pointer to the space_id for this table
@@ -645,6 +650,7 @@ static
table_read_status table_read_status
dict_sys_tables_rec_read( dict_sys_tables_rec_read(
const rec_t* rec, const rec_t* rec,
bool uncommitted,
mtr_t* mtr, mtr_t* mtr,
table_id_t* table_id, table_id_t* table_id,
uint32_t* space_id, uint32_t* space_id,
@@ -661,7 +667,7 @@ dict_sys_tables_rec_read(
rec, DICT_FLD__SYS_TABLES__DB_TRX_ID, &len); rec, DICT_FLD__SYS_TABLES__DB_TRX_ID, &len);
ut_ad(len == 6 || len == UNIV_SQL_NULL); ut_ad(len == 6 || len == UNIV_SQL_NULL);
trx_id_t id = len == 6 ? trx_read_trx_id(field) : 0; trx_id_t id = len == 6 ? trx_read_trx_id(field) : 0;
if (id && trx_sys.find(nullptr, id, false)) { if (id && !uncommitted && trx_sys.find(nullptr, id, false)) {
const auto savepoint = mtr->get_savepoint(); const auto savepoint = mtr->get_savepoint();
heap = mem_heap_create(1024); heap = mem_heap_create(1024);
dict_index_t* index = UT_LIST_GET_FIRST( dict_index_t* index = UT_LIST_GET_FIRST(
@@ -904,7 +910,8 @@ void dict_check_tablespaces_and_store_max_id()
DBUG_PRINT("dict_check_sys_tables", DBUG_PRINT("dict_check_sys_tables",
("name: %*.s", static_cast<int>(len), field)); ("name: %*.s", static_cast<int>(len), field));
if (dict_sys_tables_rec_read(rec, &mtr, &table_id, &space_id, if (dict_sys_tables_rec_read(rec, false,
&mtr, &table_id, &space_id,
&n_cols, &flags, &flags2, nullptr) &n_cols, &flags, &flags2, nullptr)
!= READ_OK != READ_OK
|| space_id == TRX_SYS_SPACE) { || space_id == TRX_SYS_SPACE) {
@@ -974,29 +981,31 @@ void dict_check_tablespaces_and_store_max_id()
/** Error message for a delete-marked record in dict_load_column_low() */ /** Error message for a delete-marked record in dict_load_column_low() */
static const char *dict_load_column_del= "delete-marked record in SYS_COLUMNS"; static const char *dict_load_column_del= "delete-marked record in SYS_COLUMNS";
/** Error message for a missing record in dict_load_column_low() */
static const char *dict_load_column_none= "SYS_COLUMNS record not found"; static const char *dict_load_column_none= "SYS_COLUMNS record not found";
/** Message for incomplete instant ADD/DROP in dict_load_column_low() */
static const char *dict_load_column_instant= "incomplete instant ADD/DROP";
/** Load a table column definition from a SYS_COLUMNS record to dict_table_t. /** Load a table column definition from a SYS_COLUMNS record to dict_table_t.
@return error message @param table table, or nullptr if the output will be in column
@retval NULL on success */ @param use_uncommitted 0=READ COMMITTED, 1=detect, 2=READ UNCOMMITTED
static @param heap memory heap for temporary storage
const char* @param column pointer to output buffer, or nullptr if table!=nullptr
dict_load_column_low( @param table_id table identifier
dict_table_t* table, /*!< in/out: table, could be NULL @param col_name column name
if we just populate a dict_column_t @param rec SYS_COLUMNS record
struct with information from @param mtr mini-transaction
a SYS_COLUMNS record */ @param nth_v_col nullptr, or pointer to a counter of virtual columns
mem_heap_t* heap, /*!< in/out: memory heap @return error message
for temporary storage */ @retval nullptr on success */
dict_col_t* column, /*!< out: dict_column_t to fill, static const char *dict_load_column_low(dict_table_t *table,
or NULL if table != NULL */ unsigned use_uncommitted,
table_id_t* table_id, /*!< out: table id */ mem_heap_t *heap, dict_col_t *column,
const char** col_name, /*!< out: column name */ table_id_t *table_id,
const rec_t* rec, /*!< in: SYS_COLUMNS record */ const char **col_name,
mtr_t* mtr, /*!< in/out: mini-transaction */ const rec_t *rec,
ulint* nth_v_col) /*!< out: if not NULL, this mtr_t *mtr,
records the "n" of "nth" virtual ulint *nth_v_col)
column */
{ {
char* name; char* name;
const byte* field; const byte* field;
@@ -1042,7 +1051,11 @@ err_len:
const trx_id_t trx_id = trx_read_trx_id(field); const trx_id_t trx_id = trx_read_trx_id(field);
if (trx_id && mtr && trx_sys.find(nullptr, trx_id, false)) { if (trx_id && mtr && use_uncommitted < 2
&& trx_sys.find(nullptr, trx_id, false)) {
if (use_uncommitted) {
return dict_load_column_instant;
}
const auto savepoint = mtr->get_savepoint(); const auto savepoint = mtr->get_savepoint();
dict_index_t* index = UT_LIST_GET_FIRST( dict_index_t* index = UT_LIST_GET_FIRST(
dict_sys.sys_columns->indexes); dict_sys.sys_columns->indexes);
@@ -1171,6 +1184,7 @@ static const char *dict_load_virtual_none= "SYS_VIRTUAL record not found";
/** Load a virtual column "mapping" (to base columns) information /** Load a virtual column "mapping" (to base columns) information
from a SYS_VIRTUAL record from a SYS_VIRTUAL record
@param[in,out] table table @param[in,out] table table
@param[in] uncommitted false=READ COMMITTED, true=READ UNCOMMITTED
@param[in,out] column mapped base column's dict_column_t @param[in,out] column mapped base column's dict_column_t
@param[in,out] table_id table id @param[in,out] table_id table id
@param[in,out] pos virtual column position @param[in,out] pos virtual column position
@@ -1182,6 +1196,7 @@ static
const char* const char*
dict_load_virtual_low( dict_load_virtual_low(
dict_table_t* table, dict_table_t* table,
bool uncommitted,
dict_col_t** column, dict_col_t** column,
table_id_t* table_id, table_id_t* table_id,
ulint* pos, ulint* pos,
@@ -1245,10 +1260,11 @@ err_len:
const trx_id_t trx_id = trx_read_trx_id(field); const trx_id_t trx_id = trx_read_trx_id(field);
if (trx_id && column && trx_sys.find(nullptr, trx_id, false)) { if (trx_id && column && !uncommitted
&& trx_sys.find(nullptr, trx_id, false)) {
if (!rec_get_deleted_flag(rec, 0)) { if (!rec_get_deleted_flag(rec, 0)) {
return dict_load_virtual_none; return dict_load_virtual_none;
} }
} else if (rec_get_deleted_flag(rec, 0)) { } else if (rec_get_deleted_flag(rec, 0)) {
ut_ad(trx_id != 0); ut_ad(trx_id != 0);
return dict_load_virtual_del; return dict_load_virtual_del;
@@ -1261,16 +1277,17 @@ err_len:
return(NULL); return(NULL);
} }
/********************************************************************//** /** Load the definitions for table columns.
Loads definitions for table columns. */ @param table table
@param use_uncommitted 0=READ COMMITTED, 1=detect, 2=READ UNCOMMITTED
@param heap memory heap for temporary storage
@return error code
@retval DB_SUCCESS on success
@retval DB_SUCCESS_LOCKED_REC on success if use_uncommitted=1
and instant ADD/DROP/reorder was detected */
MY_ATTRIBUTE((nonnull, warn_unused_result)) MY_ATTRIBUTE((nonnull, warn_unused_result))
static static dberr_t dict_load_columns(dict_table_t *table, unsigned use_uncommitted,
dberr_t mem_heap_t *heap)
dict_load_columns(
/*==============*/
dict_table_t* table, /*!< in/out: table */
mem_heap_t* heap) /*!< in/out: memory heap
for temporary storage */
{ {
btr_pcur_t pcur; btr_pcur_t pcur;
mtr_t mtr; mtr_t mtr;
@@ -1318,7 +1335,8 @@ dict_load_columns(
const rec_t* rec = btr_pcur_get_rec(&pcur); const rec_t* rec = btr_pcur_get_rec(&pcur);
err_msg = btr_pcur_is_on_user_rec(&pcur) err_msg = btr_pcur_is_on_user_rec(&pcur)
? dict_load_column_low(table, heap, NULL, NULL, ? dict_load_column_low(table, use_uncommitted,
heap, NULL, NULL,
&name, rec, &mtr, &nth_v_col) &name, rec, &mtr, &nth_v_col)
: dict_load_column_none; : dict_load_column_none;
@@ -1326,6 +1344,9 @@ dict_load_columns(
} else if (err_msg == dict_load_column_del) { } else if (err_msg == dict_load_column_del) {
n_skipped++; n_skipped++;
goto next_rec; goto next_rec;
} else if (err_msg == dict_load_column_instant) {
err = DB_SUCCESS_LOCKED_REC;
goto func_exit;
} else if (err_msg == dict_load_column_none } else if (err_msg == dict_load_column_none
&& strstr(table->name.m_name, && strstr(table->name.m_name,
"/" TEMP_FILE_PREFIX_INNODB)) { "/" TEMP_FILE_PREFIX_INNODB)) {
@@ -1382,13 +1403,13 @@ func_exit:
} }
/** Loads SYS_VIRTUAL info for one virtual column /** Loads SYS_VIRTUAL info for one virtual column
@param[in,out] table table @param table table definition
@param[in] nth_v_col virtual column sequence num @param uncommitted false=READ COMMITTED, true=READ UNCOMMITTED
*/ @param nth_v_col virtual column position */
MY_ATTRIBUTE((nonnull, warn_unused_result)) MY_ATTRIBUTE((nonnull, warn_unused_result))
static static
dberr_t dberr_t
dict_load_virtual_col(dict_table_t *table, ulint nth_v_col) dict_load_virtual_col(dict_table_t *table, bool uncommitted, ulint nth_v_col)
{ {
const dict_v_col_t* v_col = dict_table_get_nth_v_col(table, nth_v_col); const dict_v_col_t* v_col = dict_table_get_nth_v_col(table, nth_v_col);
@@ -1438,7 +1459,7 @@ dict_load_virtual_col(dict_table_t *table, ulint nth_v_col)
ulint pos; ulint pos;
const char* err_msg const char* err_msg
= btr_pcur_is_on_user_rec(&pcur) = btr_pcur_is_on_user_rec(&pcur)
? dict_load_virtual_low(table, ? dict_load_virtual_low(table, uncommitted,
&v_col->base_col[i - skipped], &v_col->base_col[i - skipped],
NULL, NULL,
&pos, NULL, &pos, NULL,
@@ -1468,12 +1489,13 @@ func_exit:
} }
/** Loads info from SYS_VIRTUAL for virtual columns. /** Loads info from SYS_VIRTUAL for virtual columns.
@param[in,out] table table */ @param table table definition
@param uncommitted false=READ COMMITTED, true=READ UNCOMMITTED */
MY_ATTRIBUTE((nonnull, warn_unused_result)) MY_ATTRIBUTE((nonnull, warn_unused_result))
static dberr_t dict_load_virtual(dict_table_t *table) static dberr_t dict_load_virtual(dict_table_t *table, bool uncommitted)
{ {
for (ulint i= 0; i < table->n_v_cols; i++) for (ulint i= 0; i < table->n_v_cols; i++)
if (dberr_t err= dict_load_virtual_col(table, i)) if (dberr_t err= dict_load_virtual_col(table, uncommitted, i))
return err; return err;
return DB_SUCCESS; return DB_SUCCESS;
} }
@@ -1492,6 +1514,8 @@ dict_load_field_low(
byte* index_id, /*!< in/out: index id (8 bytes) byte* index_id, /*!< in/out: index id (8 bytes)
an "in" value if index != NULL an "in" value if index != NULL
and "out" if index == NULL */ and "out" if index == NULL */
bool uncommitted, /*!< in: false=READ COMMITTED,
true=READ UNCOMMITTED */
dict_index_t* index, /*!< in/out: index, could be NULL dict_index_t* index, /*!< in/out: index, could be NULL
if we just populate a dict_field_t if we just populate a dict_field_t
struct with information from struct with information from
@@ -1586,7 +1610,8 @@ err_len:
if (!trx_id) { if (!trx_id) {
ut_ad(!rec_get_deleted_flag(rec, 0)); ut_ad(!rec_get_deleted_flag(rec, 0));
} else if (mtr && trx_sys.find(nullptr, trx_id, false)) { } else if (!mtr || uncommitted) {
} else if (trx_sys.find(nullptr, trx_id, false)) {
const auto savepoint = mtr->get_savepoint(); const auto savepoint = mtr->get_savepoint();
dict_index_t* sys_field = UT_LIST_GET_FIRST( dict_index_t* sys_field = UT_LIST_GET_FIRST(
dict_sys.sys_fields->indexes); dict_sys.sys_fields->indexes);
@@ -1628,15 +1653,15 @@ err_len:
return(NULL); return(NULL);
} }
/********************************************************************//** /**
Loads definitions for index fields. Load definitions for index fields.
@return DB_SUCCESS if ok, DB_CORRUPTION if corruption */ @param index index whose fields are to be loaded
static @param uncommitted false=READ COMMITTED, true=READ UNCOMMITTED
ulint @param heap memory heap for temporary storage
dict_load_fields( @return error code
/*=============*/ @return DB_SUCCESS if the fields were loaded successfully */
dict_index_t* index, /*!< in/out: index whose fields to load */ static dberr_t dict_load_fields(dict_index_t *index, bool uncommitted,
mem_heap_t* heap) /*!< in: memory heap for temporary storage */ mem_heap_t *heap)
{ {
btr_pcur_t pcur; btr_pcur_t pcur;
mtr_t mtr; mtr_t mtr;
@@ -1671,8 +1696,8 @@ dict_load_fields(
for (ulint i = 0; i < index->n_fields; i++) { for (ulint i = 0; i < index->n_fields; i++) {
const char *err_msg = btr_pcur_is_on_user_rec(&pcur) const char *err_msg = btr_pcur_is_on_user_rec(&pcur)
? dict_load_field_low(index_id, index, ? dict_load_field_low(index_id, uncommitted, index,
NULL, NULL, NULL, nullptr, nullptr, nullptr,
heap, &mtr, heap, &mtr,
btr_pcur_get_rec(&pcur)) btr_pcur_get_rec(&pcur))
: dict_load_field_none; : dict_load_field_none;
@@ -1719,6 +1744,8 @@ dict_load_index_low(
byte* table_id, /*!< in/out: table id (8 bytes), byte* table_id, /*!< in/out: table id (8 bytes),
an "in" value if mtr an "in" value if mtr
and "out" when !mtr */ and "out" when !mtr */
bool uncommitted, /*!< in: false=READ COMMITTED,
true=READ UNCOMMITTED */
mem_heap_t* heap, /*!< in/out: temporary memory heap */ mem_heap_t* heap, /*!< in/out: temporary memory heap */
const rec_t* rec, /*!< in: SYS_INDEXES record */ const rec_t* rec, /*!< in: SYS_INDEXES record */
mtr_t* mtr, /*!< in/out: mini-transaction, mtr_t* mtr, /*!< in/out: mini-transaction,
@@ -1800,7 +1827,7 @@ err_len:
const trx_id_t trx_id = trx_read_trx_id(field); const trx_id_t trx_id = trx_read_trx_id(field);
if (!trx_id) { if (!trx_id) {
ut_ad(!rec_get_deleted_flag(rec, 0)); ut_ad(!rec_get_deleted_flag(rec, 0));
} else if (!mtr) { } else if (!mtr || uncommitted) {
} else if (trx_sys.find(nullptr, trx_id, false)) { } else if (trx_sys.find(nullptr, trx_id, false)) {
const auto savepoint = mtr->get_savepoint(); const auto savepoint = mtr->get_savepoint();
dict_index_t* sys_index = UT_LIST_GET_FIRST( dict_index_t* sys_index = UT_LIST_GET_FIRST(
@@ -1878,20 +1905,18 @@ err_len:
return(NULL); return(NULL);
} }
/********************************************************************//** /** Load definitions for table indexes. Adds them to the data dictionary cache.
Loads definitions for table indexes. Adds them to the data dictionary @param table table definition
cache. @param uncommitted false=READ COMMITTED, true=READ UNCOMMITTED
@return DB_SUCCESS if ok, DB_CORRUPTION if corruption of dictionary @param heap memory heap for temporary storage
table or DB_UNSUPPORTED if table has unknown index type */ @param ignore_err errors to be ignored when loading the index definition
@return error code
@retval DB_SUCCESS if all indexes were successfully loaded
@retval DB_CORRUPTION if corruption of dictionary table
@retval DB_UNSUPPORTED if table has unknown index type */
static MY_ATTRIBUTE((nonnull)) static MY_ATTRIBUTE((nonnull))
dberr_t dberr_t dict_load_indexes(dict_table_t *table, bool uncommitted,
dict_load_indexes( mem_heap_t *heap, dict_err_ignore_t ignore_err)
/*==============*/
dict_table_t* table, /*!< in/out: table */
mem_heap_t* heap, /*!< in: memory heap for temporary storage */
dict_err_ignore_t ignore_err)
/*!< in: error to be ignored when
loading the index definition */
{ {
dict_index_t* sys_index; dict_index_t* sys_index;
btr_pcur_t pcur; btr_pcur_t pcur;
@@ -1954,8 +1979,8 @@ dict_load_indexes(
} }
} }
err_msg = dict_load_index_low(table_id, heap, rec, &mtr, table, err_msg = dict_load_index_low(table_id, uncommitted, heap, rec,
&index); &mtr, table, &index);
ut_ad(!index == !!err_msg); ut_ad(!index == !!err_msg);
if (err_msg == dict_load_index_none) { if (err_msg == dict_load_index_none) {
@@ -2018,7 +2043,8 @@ dict_load_indexes(
} else if (index->page == FIL_NULL } else if (index->page == FIL_NULL
&& table->is_readable() && table->is_readable()
&& (!(index->type & DICT_FTS))) { && (!(index->type & DICT_FTS))) {
if (ignore_err != DICT_ERR_IGNORE_DROP) { if (!uncommitted
&& ignore_err != DICT_ERR_IGNORE_DROP) {
ib::error_or_warn(!(ignore_err ib::error_or_warn(!(ignore_err
& DICT_ERR_IGNORE_INDEX)) & DICT_ERR_IGNORE_INDEX))
<< "Index " << index->name << "Index " << index->name
@@ -2062,7 +2088,10 @@ corrupted:
of the database server */ of the database server */
dict_mem_index_free(index); dict_mem_index_free(index);
} else { } else {
dict_load_fields(index, heap); error = dict_load_fields(index, uncommitted, heap);
if (error != DB_SUCCESS) {
goto func_exit;
}
/* The data dictionary tables should never contain /* The data dictionary tables should never contain
invalid index definitions. If we ignored this error invalid index definitions. If we ignored this error
@@ -2125,11 +2154,12 @@ func_exit:
/** Load a table definition from a SYS_TABLES record to dict_table_t. /** Load a table definition from a SYS_TABLES record to dict_table_t.
Do not load any columns or indexes. Do not load any columns or indexes.
@param[in,out] mtr mini-transaction @param[in,out] mtr mini-transaction
@param[in] uncommitted whether to use READ UNCOMMITTED isolation level
@param[in] rec SYS_TABLES record @param[in] rec SYS_TABLES record
@param[out,own] table table, or nullptr @param[out,own] table table, or nullptr
@return error message @return error message
@retval nullptr on success */ @retval nullptr on success */
const char *dict_load_table_low(mtr_t *mtr, const char *dict_load_table_low(mtr_t *mtr, bool uncommitted,
const rec_t *rec, dict_table_t **table) const rec_t *rec, dict_table_t **table)
{ {
table_id_t table_id; table_id_t table_id;
@@ -2142,7 +2172,8 @@ const char *dict_load_table_low(mtr_t *mtr,
return(error_text); return(error_text);
} }
if (auto r = dict_sys_tables_rec_read(rec, mtr, &table_id, &space_id, if (auto r = dict_sys_tables_rec_read(rec, uncommitted, mtr,
&table_id, &space_id,
&t_num, &flags, &flags2, &t_num, &flags, &flags2,
&trx_id)) { &trx_id)) {
*table = NULL; *table = NULL;
@@ -2285,8 +2316,6 @@ static dict_table_t *dict_load_table_one(const span<const char> &name,
ut_ad(dict_sys.locked()); ut_ad(dict_sys.locked());
mtr.start();
dict_index_t *sys_index = dict_sys.sys_tables->indexes.start; dict_index_t *sys_index = dict_sys.sys_tables->indexes.start;
ut_ad(!dict_sys.sys_tables->not_redundant()); ut_ad(!dict_sys.sys_tables->not_redundant());
ut_ad(name_of_col_is(dict_sys.sys_tables, sys_index, ut_ad(name_of_col_is(dict_sys.sys_tables, sys_index,
@@ -2310,6 +2339,9 @@ static dict_table_t *dict_load_table_one(const span<const char> &name,
dfield_set_data(&dfield, name.data(), name.size()); dfield_set_data(&dfield, name.data(), name.size());
dict_index_copy_types(&tuple, sys_index, 1); dict_index_copy_types(&tuple, sys_index, 1);
bool uncommitted = false;
reload:
mtr.start();
dberr_t err = btr_pcur_open_on_user_rec(sys_index, &tuple, PAGE_CUR_GE, dberr_t err = btr_pcur_open_on_user_rec(sys_index, &tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, &pcur, &mtr); BTR_SEARCH_LEAF, &pcur, &mtr);
@@ -2329,7 +2361,8 @@ err_exit:
} }
dict_table_t* table; dict_table_t* table;
if (const char* err_msg = dict_load_table_low(&mtr, rec, &table)) { if (const char* err_msg =
dict_load_table_low(&mtr, uncommitted, rec, &table)) {
if (err_msg != dict_load_table_flags) { if (err_msg != dict_load_table_flags) {
ib::error() << err_msg; ib::error() << err_msg;
} }
@@ -2339,15 +2372,32 @@ err_exit:
goto err_exit; goto err_exit;
} }
const unsigned use_uncommitted = uncommitted
? 2
: table->id == mach_read_from_8(
rec + rec_get_field_start_offs(
rec, DICT_FLD__SYS_TABLES__ID));
mtr.commit(); mtr.commit();
mem_heap_t* heap = mem_heap_create(32000); mem_heap_t* heap = mem_heap_create(32000);
dict_load_tablespace(table, ignore_err); dict_load_tablespace(table, ignore_err);
if (dict_load_columns(table, heap) || dict_load_virtual(table)) { switch (dict_load_columns(table, use_uncommitted, heap)) {
evict: case DB_SUCCESS_LOCKED_REC:
dict_sys.remove(table); ut_ad(!uncommitted);
uncommitted = true;
dict_mem_table_free(table);
mem_heap_free(heap);
goto reload;
case DB_SUCCESS:
if (!dict_load_virtual(table, uncommitted)) {
break;
}
/* fall through */
default:
dict_mem_table_free(table);
mem_heap_free(heap); mem_heap_free(heap);
DBUG_RETURN(nullptr); DBUG_RETURN(nullptr);
} }
@@ -2372,7 +2422,7 @@ evict:
? DICT_ERR_IGNORE_ALL ? DICT_ERR_IGNORE_ALL
: ignore_err; : ignore_err;
err = dict_load_indexes(table, heap, index_load_err); err = dict_load_indexes(table, uncommitted, heap, index_load_err);
if (err == DB_TABLE_CORRUPT) { if (err == DB_TABLE_CORRUPT) {
/* Refuse to load the table if the table has a corrupted /* Refuse to load the table if the table has a corrupted
@@ -2380,7 +2430,10 @@ evict:
ut_ad(index_load_err != DICT_ERR_IGNORE_DROP); ut_ad(index_load_err != DICT_ERR_IGNORE_DROP);
ib::error() << "Refusing to load corrupted table " ib::error() << "Refusing to load corrupted table "
<< table->name; << table->name;
goto evict; evict:
dict_sys.remove(table);
mem_heap_free(heap);
DBUG_RETURN(nullptr);
} }
if (err != DB_SUCCESS || !table->is_readable()) { if (err != DB_SUCCESS || !table->is_readable()) {
@@ -2435,14 +2488,12 @@ corrupted:
changed when dict_load_foreigns() is called below */ changed when dict_load_foreigns() is called below */
table->fk_max_recusive_level = 0; table->fk_max_recusive_level = 0;
/* If the force recovery flag is set, we open the table irrespective /* We will load the foreign key information only if
of the error condition, since the user may want to dump data from the
clustered index. However we load the foreign key information only if
all indexes were loaded. */ all indexes were loaded. */
if (!table->is_readable()) { if (!table->is_readable()) {
/* Don't attempt to load the indexes from disk. */ /* Don't attempt to load the indexes from disk. */
} else if (err == DB_SUCCESS) { } else if (err == DB_SUCCESS) {
err = dict_load_foreigns(table->name.m_name, nullptr, err = dict_load_foreigns(table->name.m_name, nullptr, false,
0, true, ignore_err, fk_tables); 0, true, ignore_err, fk_tables);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
@@ -2603,7 +2654,7 @@ dict_load_sys_table(
heap = mem_heap_create(1000); heap = mem_heap_create(1000);
dict_load_indexes(table, heap, DICT_ERR_IGNORE_NONE); dict_load_indexes(table, false, heap, DICT_ERR_IGNORE_NONE);
mem_heap_free(heap); mem_heap_free(heap);
} }
@@ -2773,6 +2824,8 @@ dberr_t
dict_load_foreign( dict_load_foreign(
/*==============*/ /*==============*/
const char* table_name, /*!< in: table name */ const char* table_name, /*!< in: table name */
bool uncommitted, /*!< in: use READ UNCOMMITTED
transaction isolation level */
const char** col_names, const char** col_names,
/*!< in: column names, or NULL /*!< in: column names, or NULL
to use foreign->foreign_table->col_names */ to use foreign->foreign_table->col_names */
@@ -2860,7 +2913,8 @@ err_exit:
const trx_id_t tid = trx_read_trx_id(field); const trx_id_t tid = trx_read_trx_id(field);
if (tid && tid != trx_id && trx_sys.find(nullptr, tid, false)) { if (tid && tid != trx_id && !uncommitted
&& trx_sys.find(nullptr, tid, false)) {
const auto savepoint = mtr.get_savepoint(); const auto savepoint = mtr.get_savepoint();
rec_offs* offsets = rec_get_offsets( rec_offs* offsets = rec_get_offsets(
rec, sys_index, nullptr, true, ULINT_UNDEFINED, &heap); rec, sys_index, nullptr, true, ULINT_UNDEFINED, &heap);
@@ -2989,6 +3043,8 @@ dict_load_foreigns(
const char* table_name, /*!< in: table name */ const char* table_name, /*!< in: table name */
const char** col_names, /*!< in: column names, or NULL const char** col_names, /*!< in: column names, or NULL
to use table->col_names */ to use table->col_names */
bool uncommitted, /*!< in: use READ UNCOMMITTED
transaction isolation level */
trx_id_t trx_id, /*!< in: DDL transaction id, trx_id_t trx_id, /*!< in: DDL transaction id,
or 0 to check or 0 to check
recursive load of tables recursive load of tables
@@ -3103,7 +3159,7 @@ loop:
/* Load the foreign constraint definition to the dictionary cache */ /* Load the foreign constraint definition to the dictionary cache */
err = len < sizeof fk_id err = len < sizeof fk_id
? dict_load_foreign(table_name, col_names, trx_id, ? dict_load_foreign(table_name, uncommitted, col_names, trx_id,
check_recursive, check_charsets, check_recursive, check_charsets,
{fk_id, len}, ignore_err, fk_tables) {fk_id, len}, ignore_err, fk_tables)
: DB_CORRUPTION; : DB_CORRUPTION;

View File

@@ -3314,7 +3314,7 @@ fts_add_doc_by_id(
dict_index_t* fts_id_index; dict_index_t* fts_id_index;
ibool is_id_cluster; ibool is_id_cluster;
fts_cache_t* cache = ftt->table->fts->cache; fts_cache_t* cache = ftt->table->fts->cache;
bool need_sync= false;
ut_ad(cache->get_docs); ut_ad(cache->get_docs);
/* If Doc ID has been supplied by the user, then the table /* If Doc ID has been supplied by the user, then the table
@@ -3443,7 +3443,7 @@ fts_add_doc_by_id(
shouldn't hold the cache lock for shouldn't hold the cache lock for
longer time. So cache should sync longer time. So cache should sync
whenever cache size exceeds 512 KB */ whenever cache size exceeds 512 KB */
bool need_sync = need_sync =
cache->total_size > 512*1024; cache->total_size > 512*1024;
mysql_mutex_unlock(&table->fts->cache->lock); mysql_mutex_unlock(&table->fts->cache->lock);
@@ -3464,10 +3464,6 @@ fts_add_doc_by_id(
need_sync= true; need_sync= true;
); );
if (need_sync) {
fts_sync_table(table);
}
mtr_start(&mtr); mtr_start(&mtr);
if (i < num_idx - 1) { if (i < num_idx - 1) {
@@ -3493,6 +3489,10 @@ func_exit:
ut_free(pcur.old_rec_buf); ut_free(pcur.old_rec_buf);
mem_heap_free(heap); mem_heap_free(heap);
if (need_sync) {
fts_sync_table(table);
}
} }
@@ -3898,6 +3898,7 @@ err_exit:
ib::error() << "(" << error << ") writing" ib::error() << "(" << error << ") writing"
" word node to FTS auxiliary index table " " word node to FTS auxiliary index table "
<< table->name; << table->name;
break;
} }
} }
@@ -3999,6 +4000,7 @@ fts_sync_commit(
mysql_mutex_unlock(&cache->lock); mysql_mutex_unlock(&cache->lock);
fts_sql_commit(trx); fts_sql_commit(trx);
} else { } else {
mysql_mutex_unlock(&cache->lock);
fts_sql_rollback(trx); fts_sql_rollback(trx);
ib::error() << "(" << error << ") during SYNC of " ib::error() << "(" << error << ") during SYNC of "
"table " << sync->table->name; "table " << sync->table->name;

View File

@@ -12748,7 +12748,7 @@ int create_table_info_t::create_table(bool create_fk)
if (err == DB_SUCCESS) { if (err == DB_SUCCESS) {
/* Check that also referencing constraints are ok */ /* Check that also referencing constraints are ok */
dict_names_t fk_tables; dict_names_t fk_tables;
err = dict_load_foreigns(m_table_name, nullptr, err = dict_load_foreigns(m_table_name, nullptr, false,
m_trx->id, true, m_trx->id, true,
DICT_ERR_IGNORE_NONE, fk_tables); DICT_ERR_IGNORE_NONE, fk_tables);
while (err == DB_SUCCESS && !fk_tables.empty()) { while (err == DB_SUCCESS && !fk_tables.empty()) {

View File

@@ -9900,7 +9900,7 @@ innobase_update_foreign_cache(
dict_names_t fk_tables; dict_names_t fk_tables;
err = dict_load_foreigns(user_table->name.m_name, err = dict_load_foreigns(user_table->name.m_name,
ctx->col_names, 1, true, ctx->col_names, false, 1, true,
DICT_ERR_IGNORE_NONE, DICT_ERR_IGNORE_NONE,
fk_tables); fk_tables);
@@ -9911,7 +9911,7 @@ innobase_update_foreign_cache(
loaded with "foreign_key checks" off, loaded with "foreign_key checks" off,
so let's retry the loading with charset_check is off */ so let's retry the loading with charset_check is off */
err = dict_load_foreigns(user_table->name.m_name, err = dict_load_foreigns(user_table->name.m_name,
ctx->col_names, 1, false, ctx->col_names, false, 1, false,
DICT_ERR_IGNORE_NONE, DICT_ERR_IGNORE_NONE,
fk_tables); fk_tables);

View File

@@ -4566,7 +4566,7 @@ static const char *i_s_sys_tables_rec(const btr_pcur_t &pcur, mtr_t *mtr,
} }
if (rec) if (rec)
return dict_load_table_low(mtr, rec, table); return dict_load_table_low(mtr, false, rec, table);
*table= dict_sys.load_table *table= dict_sys.load_table
(span<const char>{reinterpret_cast<const char*>(pcur.old_rec), len}); (span<const char>{reinterpret_cast<const char*>(pcur.old_rec), len});

View File

@@ -89,6 +89,8 @@ dict_load_foreigns(
const char* table_name, /*!< in: table name */ const char* table_name, /*!< in: table name */
const char** col_names, /*!< in: column names, or NULL const char** col_names, /*!< in: column names, or NULL
to use table->col_names */ to use table->col_names */
bool uncommitted, /*!< in: use READ UNCOMMITTED
transaction isolation level */
trx_id_t trx_id, /*!< in: DDL transaction id, trx_id_t trx_id, /*!< in: DDL transaction id,
or 0 to check or 0 to check
recursive load of tables recursive load of tables
@@ -125,11 +127,12 @@ dict_getnext_system(
/** Load a table definition from a SYS_TABLES record to dict_table_t. /** Load a table definition from a SYS_TABLES record to dict_table_t.
Do not load any columns or indexes. Do not load any columns or indexes.
@param[in,out] mtr mini-transaction @param[in,out] mtr mini-transaction
@param[in] uncommitted whether to use READ UNCOMMITTED isolation level
@param[in] rec SYS_TABLES record @param[in] rec SYS_TABLES record
@param[out,own] table table, or nullptr @param[out,own] table table, or nullptr
@return error message @return error message
@retval nullptr on success */ @retval nullptr on success */
const char *dict_load_table_low(mtr_t *mtr, const char *dict_load_table_low(mtr_t *mtr, bool uncommitted,
const rec_t *rec, dict_table_t **table) const rec_t *rec, dict_table_t **table)
MY_ATTRIBUTE((nonnull, warn_unused_result)); MY_ATTRIBUTE((nonnull, warn_unused_result));

View File

@@ -2847,7 +2847,7 @@ row_rename_table_for_mysql(
dict_names_t fk_tables; dict_names_t fk_tables;
err = dict_load_foreigns( err = dict_load_foreigns(
new_name, nullptr, trx->id, new_name, nullptr, false, trx->id,
!old_is_tmp || trx->check_foreigns, !old_is_tmp || trx->check_foreigns,
use_fk use_fk
? DICT_ERR_IGNORE_NONE ? DICT_ERR_IGNORE_NONE

View File

@@ -64,12 +64,11 @@ SELECT * FROM tbl_c WHERE greeting = "Aloha!"
id greeting id greeting
2 Aloha! 2 Aloha!
connection child2_1; connection child2_1;
SELECT argument FROM mysql.general_log WHERE argument LIKE 'select %'; SELECT argument FROM mysql.general_log WHERE argument LIKE 'select `id`,`greeting` from %';
argument argument
select `id`,`greeting` from `auto_test_remote`.`tbl_a` where `greeting` = 'Aloha!' and ((`greeting` = 'Aloha!')) select `id`,`greeting` from `auto_test_remote`.`tbl_a` where `greeting` = 'Aloha!' and ((`greeting` = 'Aloha!'))
select `id`,`greeting` from `auto_test_remote`.`tbl_b` where `greeting` like 'Aloha%' and ((`greeting` = 'Aloha!')) select `id`,`greeting` from `auto_test_remote`.`tbl_b` where `greeting` like 'Aloha%' and ((`greeting` = 'Aloha!'))
select `id`,`greeting` from `auto_test_remote`.`tbl_c` where `greeting` like 'Aloha%' and ((`greeting` = 'Aloha!')) select `id`,`greeting` from `auto_test_remote`.`tbl_c` where `greeting` like 'Aloha%' and ((`greeting` = 'Aloha!'))
SELECT argument FROM mysql.general_log WHERE argument LIKE 'select %'
connection child2_1; connection child2_1;
SET @@global.general_log = @general_log_backup; SET @@global.general_log = @general_log_backup;
SET @@global.log_output = @log_output_backup; SET @@global.log_output = @log_output_backup;

View File

@@ -75,7 +75,7 @@ SELECT * FROM tbl_c WHERE greeting = "Aloha!"
AND CASE greeting WHEN "Aloha!" THEN "one" ELSE 'more' END = "one"; # hack to disable GBH AND CASE greeting WHEN "Aloha!" THEN "one" ELSE 'more' END = "one"; # hack to disable GBH
--connection child2_1 --connection child2_1
SELECT argument FROM mysql.general_log WHERE argument LIKE 'select %'; SELECT argument FROM mysql.general_log WHERE argument LIKE 'select `id`,`greeting` from %';
--connection child2_1 --connection child2_1
SET @@global.general_log = @general_log_backup; SET @@global.general_log = @general_log_backup;