1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-29 05:21:33 +03:00

Merge 10.6 into 10.7

This commit is contained in:
Marko Mäkelä
2022-06-14 10:17:36 +03:00
24 changed files with 271 additions and 434 deletions

View File

@ -1,6 +1,13 @@
# These should be moved, see https://jira.mariadb.org/browse/MDEV-21654
arch-dependent-file-in-usr-share usr/share/mysql/mysql-test/suite/plugins/pam/pam_mariadb_mtr.so
arch-independent-package-contains-binary-or-object usr/share/mysql/mysql-test/suite/plugins/pam/pam_mariadb_mtr.so
# Mainly for support for *BSD family. Not right way to do but this is test package and not for production
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mysql/mysql-test/std_data/checkDBI_DBD-MariaDB.pl]
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mysql/mysql-test/suite/engines/rr_trx/run_stress_tx_rr.pl]
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mysql/mysql-test/suite/funcs_1/lib/DataGen_local.pl]
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mysql/mysql-test/suite/funcs_1/lib/DataGen_modify.pl]
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mysql/mysql-test/suite/funcs_2/lib/gen_charset_utf8.pl]
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mysql/mysql-test/suite/rpl/extension/checksum.pl]
# Intentional for test files
national-encoding usr/share/mysql/mysql-test/*
# Extra test documentation files that really need to be kept in context in test directory

View File

@ -1,3 +1,8 @@
# These should be moved, see https://jira.mariadb.org/browse/MDEV-21653
arch-dependent-file-in-usr-share usr/share/mysql/mysql-test/lib/My/SafeProcess/my_safe_process
arch-dependent-file-in-usr-share usr/share/mysql/mysql-test/lib/My/SafeProcess/wsrep_check_version
# Mainly for support for *BSD family. Not right way to do but this is test package and not for production
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mysql/mysql-test/lib/process-purecov-annotations.pl]
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mysql/mysql-test/lib/v1/mysql-test-run.pl]
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mysql/mysql-test/mysql-stress-test.pl]
incorrect-path-for-interpreter /usr/bin/env perl != /usr/bin/perl [usr/share/mysql/mysql-test/mysql-test-run.pl]

View File

@ -4,23 +4,82 @@ version-substvar-for-external-package mariadb-client-core-10.7 -> mysql-client-5
version-substvar-for-external-package mariadb-server-10.7 -> mysql-server
version-substvar-for-external-package libmariadb-dev -> libmysqlclient-dev
version-substvar-for-external-package libmariadb-dev -> libmysqld-dev
version-substvar-for-external-package mariadb-server-10.7 -> mysql-client-5.5
version-substvar-for-external-package mariadb-server-10.7 -> mysql-client-5.6
version-substvar-for-external-package mariadb-server-10.7 -> mysql-client-5.7
version-substvar-for-external-package mariadb-server-10.7 -> mysql-client-8.0
version-substvar-for-external-package mariadb-client-10.7 -> mysql-client-core-5.1
version-substvar-for-external-package mariadb-client-10.7 -> mysql-client-core-5.5
version-substvar-for-external-package mariadb-client-10.7 -> mysql-client-core-5.6
version-substvar-for-external-package mariadb-client-10.7 -> mysql-client-core-5.7
version-substvar-for-external-package mariadb-client-10.7 -> mysql-client-core-8.0
version-substvar-for-external-package mariadb-server-10.7 -> mysql-client-5.*
version-substvar-for-external-package mariadb-server-10.7 -> mysql-client-8.*
version-substvar-for-external-package mariadb-client-10.7 -> mysql-client-core-5.*
version-substvar-for-external-package mariadb-client-10.7 -> mysql-client-core-8.*
version-substvar-for-external-package libmariadbd-dev -> libmariadbclient-dev
# ColumnStore not used in Debian, safe to ignore. Reported upstream in https://jira.mariadb.org/browse/MDEV-24124
source-is-missing storage/columnstore/columnstore/utils/jemalloc/libjemalloc.so.2
# Must be fixed upstream
source-is-missing storage/mroonga/vendor/groonga/examples/dictionary/html/js/jquery-ui-1.8.18.custom.js*
source-is-missing storage/mroonga/vendor/groonga/examples/dictionary/html/js/jquery-ui-*.custom.js
# Intentional control relationships
version-substvar-for-external-package Replaces * ${source:Version} libmariadbd-dev -> libmariadbclient-dev
version-substvar-for-external-package Replaces * ${source:Version} libmariadb-dev -> libmysqlclient-dev
version-substvar-for-external-package Replaces * ${source:Version} libmariadb-dev -> libmysqld-dev
version-substvar-for-external-package Replaces * libmariadbd-dev -> libmariadbclient-dev
version-substvar-for-external-package Replaces * libmariadb-dev -> libmysqlclient-dev
version-substvar-for-external-package Replaces * libmariadb-dev -> libmysqld-dev
# We can't change build dependencies on a stable branch (10.5..10.8) so just override this
missing-build-dependency-for-dh-addon systemd *
# Data or test files where long lines are justified
very-long-line-length-in-source-file *.test *
very-long-line-length-in-source-file *.result *
very-long-line-length-in-source-file BUILD/compile-*
very-long-line-length-in-source-file *COPYING.rtf *
# These are mainly found under extra/wolfssl
very-long-line-length-in-source-file *.cproject *
very-long-line-length-in-source-file *.md *
very-long-line-length-in-source-file *.scfg *
very-long-line-length-in-source-file *.launch *
very-long-line-length-in-source-file extra/wolfssl/wolfssl/IDE/Espressif/ESP-IDF/test/test_wolfssl.c *
very-long-line-length-in-source-file extra/wolfssl/wolfssl/configure.ac *
very-long-line-length-in-source-file extra/wolfssl/wolfssl/doc/formats/html/html_changes/tabs.css *
# Preprocessed C files which have long lines
very-long-line-length-in-source-file extra/wolfssl/wolfssl/wolfcrypt/src/*.i *
# These are all results for test cases and similar so they can be
# especially formatted to be too long
very-long-line-length-in-source-file mysql-test/*.dump *
very-long-line-length-in-source-file mysql-test/*.inc *
very-long-line-length-in-source-file mysql-test/*.rdiff *
very-long-line-length-in-source-file mysql-test/*.txt *
very-long-line-length-in-source-file mysql-test/*.weekly *
# Test file
very-long-line-length-in-source-file plugin/handler_socket/regtest/test_01_lib/test19.expected *
# SQL source file that has very long inserts/selects
very-long-line-length-in-source-file mysql-test/std_data/init_file_longline_3816.sql *
very-long-line-length-in-source-file scripts/fill_help_tables.sql *
very-long-line-length-in-source-file scripts/mysql_system_tables.sql *
very-long-line-length-in-source-file scripts/mysql_test_data_timezone.sql *
# Machine formated HTML
very-long-line-length-in-source-file sql/share/charsets/languages.html *
very-long-line-length-in-source-file sql/share/errmsg-utf8.txt *
# Very long test string
very-long-line-length-in-source-file storage/archive/archive_test.c line 30 is 1051 characters long (>512)
# autogenerated thrift file
very-long-line-length-in-source-file storage/cassandra/gen-cpp/cassandra_types.h *
# ColumnStore ignores
# In Directory mysql-test are some long test includes
very-long-line-length-in-source-file storage/columnstore/columnstore/.drone.jsonnet *
very-long-line-length-in-source-file storage/columnstore/columnstore/CMakeLists.txt *
very-long-line-length-in-source-file storage/columnstore/columnstore/mysql-test/columnstore/csinternal/include/autopilot_create_datatypetestm_tables.inc *
very-long-line-length-in-source-file storage/columnstore/columnstore/mysql-test/columnstore/csinternal/include/autopilot_create_datatypeupdate_table.inc *
very-long-line-length-in-source-file storage/columnstore/columnstore/*.xmi *
very-long-line-length-in-source-file storage/columnstore/columnstore/dbcon/doc/q19_plan.txt *
very-long-line-length-in-source-file storage/columnstore/columnstore/utils/udfsdk/docs/source/reference/mcsv1Context.rst *
very-long-line-length-in-source-file storage/columnstore/columnstore/utils/winport/win_setup_mysql_part1.sql *
# Minified CSS files. These appear in several places
very-long-line-length-in-source-file *badge_only.css *
very-long-line-length-in-source-file *theme.css line *
# General storage ignores
very-long-line-length-in-source-file storage/mroonga/vendor/groonga/examples/dictionary/html/css/smoothness/jquery-ui-1.8.12.custom.css *
very-long-line-length-in-source-file storage/rocksdb/mysql-test/rocksdb/t/bypass_select_basic_bloom-master.opt *
very-long-line-length-in-source-file storage/rocksdb/mysql-test/rocksdb/t/type_enum.inc *
very-long-line-length-in-source-file storage/rocksdb/mysql-test/rocksdb/t/type_set.inc *
very-long-line-length-in-source-file storage/rocksdb/rocksdb/docs/_includes/footer.html *
very-long-line-length-in-source-file storage/rocksdb/rocksdb/docs/_posts/*.markdown line *
very-long-line-length-in-source-file storage/spider/mysql-test/spider/bugfix/include/sql_mode_init.inc *
very-long-line-length-in-source-file storage/tokudb/PerconaFT/cmake_modules/TokuBuildTagDatabases.cmake *
very-long-line-length-in-source-file storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/m4/po.m4 *
# These are generated files which should not make any harm
source-contains-autogenerated-visual-c++-file storage/columnstore/columnstore/*.rc
source-contains-autogenerated-visual-c++-file storage/columnstore/columnstore/*.h
source-contains-autogenerated-visual-c++-file win/upgrade_wizard/resource.h
source-contains-autogenerated-visual-c++-file win/upgrade_wizard/upgrade.rc

View File

@ -0,0 +1,16 @@
#
# MDEV-28802 DROP DATABASE in InnoDB still is case-insensitive
#
SET @save_fpt=@@GLOBAL.innodb_file_per_table;
SET GLOBAL innodb_file_per_table=0;
CREATE DATABASE Db;
CREATE TABLE Db.t1 (c1 INT KEY) ENGINE=InnoDB;
CREATE DATABASE DB;
DROP DATABASE DB;
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'D%';
NAME
Db/t1
DROP DATABASE Db;
SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'D%';
TABLE_ID NAME FLAG N_COLS SPACE ROW_FORMAT ZIP_PAGE_SIZE SPACE_TYPE
SET GLOBAL innodb_file_per_table=@save_fpt;

View File

@ -0,0 +1,17 @@
--source include/have_innodb.inc
--source include/have_case_sensitive_file_system.inc
--echo #
--echo # MDEV-28802 DROP DATABASE in InnoDB still is case-insensitive
--echo #
SET @save_fpt=@@GLOBAL.innodb_file_per_table;
SET GLOBAL innodb_file_per_table=0;
CREATE DATABASE Db;
CREATE TABLE Db.t1 (c1 INT KEY) ENGINE=InnoDB;
CREATE DATABASE DB;
DROP DATABASE DB;
SELECT NAME FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'D%';
DROP DATABASE Db;
SELECT * FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'D%';
SET GLOBAL innodb_file_per_table=@save_fpt;

View File

@ -19,7 +19,7 @@ INSERT INTO t2 VALUES('mariadb');
connection default;
SET @saved_dbug = @@GLOBAL.debug_dbug;
SET GLOBAL debug_dbug ='+d,fts_instrument_sync_request,ib_optimize_wq_hang';
SET DEBUG_SYNC= 'fts_instrument_sync_request
SET DEBUG_SYNC= 'fts_sync_end
SIGNAL drop_index_start WAIT_FOR sync_op';
INSERT INTO t1 VALUES('Keyword');
connect con1,localhost,root,,,;

View File

@ -11,19 +11,19 @@ INSERT INTO t1(title) VALUES('database');
connection con1;
SET @old_dbug = @@SESSION.debug_dbug;
SET debug_dbug = '+d,fts_instrument_sync_debug';
SET DEBUG_SYNC= 'fts_write_node SIGNAL written WAIT_FOR selected';
SET DEBUG_SYNC= 'fts_sync_end SIGNAL written WAIT_FOR selected';
INSERT INTO t1(title) VALUES('mysql database');
connection default;
SET DEBUG_SYNC= 'now WAIT_FOR written';
SET GLOBAL innodb_ft_aux_table="test/t1";
SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE;
WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION
SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE;
WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION
database 2 3 2 2 0
database 2 3 2 3 6
mysql 1 3 2 1 0
mysql 1 3 2 3 0
SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE;
WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION
SET GLOBAL innodb_ft_aux_table=default;
SELECT * FROM t1 WHERE MATCH(title) AGAINST('mysql database');
FTS_DOC_ID title
@ -59,7 +59,7 @@ INSERT INTO t1(title) VALUES('mysql');
INSERT INTO t1(title) VALUES('database');
connection con1;
SET debug_dbug = '+d,fts_instrument_sync_debug';
SET DEBUG_SYNC= 'fts_write_node SIGNAL written WAIT_FOR inserted';
SET DEBUG_SYNC= 'fts_sync_end SIGNAL written WAIT_FOR inserted';
INSERT INTO t1(title) VALUES('mysql database');
connection default;
SET DEBUG_SYNC= 'now WAIT_FOR written';
@ -70,14 +70,14 @@ SET debug_dbug = @old_dbug;
SET GLOBAL innodb_ft_aux_table="test/t1";
SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_CACHE;
WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION
database 4 4 1 4 6
mysql 4 4 1 4 0
SELECT * FROM INFORMATION_SCHEMA.INNODB_FT_INDEX_TABLE;
WORD FIRST_DOC_ID LAST_DOC_ID DOC_COUNT DOC_ID POSITION
database 2 3 2 2 0
database 2 3 2 3 6
database 4 4 1 4 6
mysql 1 4 3 1 0
mysql 1 4 3 3 0
mysql 1 4 3 4 0
mysql 1 3 2 1 0
mysql 1 3 2 3 0
SET GLOBAL innodb_ft_aux_table=default;
SELECT * FROM t1 WHERE MATCH(title) AGAINST('mysql database');
FTS_DOC_ID title

View File

@ -30,6 +30,7 @@ connection con1;
connection con2;
/* conneciton con2 */ SELECT * FROM t1 WHERE MATCH(title) AGAINST('mysql database');
FTS_DOC_ID title
1 mysql database
connection default;
# make con1 & con2 show up in mysql.slow_log
SELECT SLEEP(2);
@ -39,41 +40,11 @@ SLEEP(2)
SELECT sql_text FROM mysql.slow_log WHERE query_time >= '00:00:02';
sql_text
INSERT INTO t1(title) VALUES('mysql database')
SELECT * FROM t1 WHERE MATCH(title) AGAINST('mysql database')
SET GLOBAL debug_dbug = @old_debug;
TRUNCATE TABLE mysql.slow_log;
DROP TABLE t1;
# Case 2: Sync blocks DML(insert) on other tables.
CREATE TABLE t1 (
FTS_DOC_ID BIGINT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY,
title VARCHAR(200),
FULLTEXT(title)
) ENGINE = InnoDB;
CREATE TABLE t2(id INT);
connection con1;
SET GLOBAL debug_dbug='+d,fts_instrument_sync_request,fts_instrument_sync_sleep';
SET DEBUG_SYNC= 'fts_instrument_sync_request SIGNAL begin WAIT_FOR continue';
INSERT INTO t1(title) VALUES('mysql database');
connection con2;
SET DEBUG_SYNC= 'now WAIT_FOR begin';
INSERT INTO t2 VALUES(1);
connection default;
SET DEBUG_SYNC= 'now SIGNAL continue';
connection con1;
/* connection con1 */ INSERT INTO t1(title) VALUES('mysql database');
connection con2;
/* conneciton con2 */ INSERT INTO t2 VALUES(1);
connection default;
SET DEBUG_SYNC = 'RESET';
# make con1 & con2 show up in mysql.slow_log
SELECT SLEEP(2);
SLEEP(2)
0
# slow log results should be empty here.
SELECT sql_text FROM mysql.slow_log WHERE query_time >= '00:00:02';
sql_text
SET GLOBAL debug_dbug = @old_debug;
TRUNCATE TABLE mysql.slow_log;
DROP TABLE t1,t2;
SET DEBUG_SYNC=RESET;
disconnect con1;
disconnect con2;
# Restore slow log settings.

View File

@ -31,7 +31,7 @@ INSERT INTO t2 VALUES('mariadb');
connection default;
SET @saved_dbug = @@GLOBAL.debug_dbug;
SET GLOBAL debug_dbug ='+d,fts_instrument_sync_request,ib_optimize_wq_hang';
SET DEBUG_SYNC= 'fts_instrument_sync_request
SET DEBUG_SYNC= 'fts_sync_end
SIGNAL drop_index_start WAIT_FOR sync_op';
send INSERT INTO t1 VALUES('Keyword');

View File

@ -26,7 +26,7 @@ connection con1;
SET @old_dbug = @@SESSION.debug_dbug;
SET debug_dbug = '+d,fts_instrument_sync_debug';
SET DEBUG_SYNC= 'fts_write_node SIGNAL written WAIT_FOR selected';
SET DEBUG_SYNC= 'fts_sync_end SIGNAL written WAIT_FOR selected';
send INSERT INTO t1(title) VALUES('mysql database');
@ -73,7 +73,7 @@ connection con1;
SET debug_dbug = '+d,fts_instrument_sync_debug';
SET DEBUG_SYNC= 'fts_write_node SIGNAL written WAIT_FOR inserted';
SET DEBUG_SYNC= 'fts_sync_end SIGNAL written WAIT_FOR inserted';
send INSERT INTO t1(title) VALUES('mysql database');

View File

@ -65,53 +65,7 @@ SET GLOBAL debug_dbug = @old_debug;
TRUNCATE TABLE mysql.slow_log;
DROP TABLE t1;
--echo # Case 2: Sync blocks DML(insert) on other tables.
CREATE TABLE t1 (
FTS_DOC_ID BIGINT UNSIGNED AUTO_INCREMENT NOT NULL PRIMARY KEY,
title VARCHAR(200),
FULLTEXT(title)
) ENGINE = InnoDB;
CREATE TABLE t2(id INT);
connection con1;
SET GLOBAL debug_dbug='+d,fts_instrument_sync_request,fts_instrument_sync_sleep';
SET DEBUG_SYNC= 'fts_instrument_sync_request SIGNAL begin WAIT_FOR continue';
send INSERT INTO t1(title) VALUES('mysql database');
connection con2;
SET DEBUG_SYNC= 'now WAIT_FOR begin';
send INSERT INTO t2 VALUES(1);
connection default;
SET DEBUG_SYNC= 'now SIGNAL continue';
connection con1;
--echo /* connection con1 */ INSERT INTO t1(title) VALUES('mysql database');
--reap
connection con2;
--echo /* conneciton con2 */ INSERT INTO t2 VALUES(1);
--reap
connection default;
SET DEBUG_SYNC = 'RESET';
-- echo # make con1 & con2 show up in mysql.slow_log
SELECT SLEEP(2);
-- echo # slow log results should be empty here.
SELECT sql_text FROM mysql.slow_log WHERE query_time >= '00:00:02';
SET GLOBAL debug_dbug = @old_debug;
TRUNCATE TABLE mysql.slow_log;
DROP TABLE t1,t2;
SET DEBUG_SYNC=RESET;
disconnect con1;
disconnect con2;

View File

@ -78,3 +78,11 @@ if (`SELECT IF('$engine' != 'InnoDB', 1, 0)`)
--remove_files_wildcard $MYSQLTEST_VARDIR/tmp/mdev_27065 *
--rmdir $MYSQLTEST_VARDIR/tmp/mdev_27065
--echo #
--echo # MDEV-26127 Assertion `err != DB_DUPLICATE_KEY' failed or InnoDB: Failing assertion: id != 0 on ALTER ... REBUILD PARTITION
--echo #
--eval CREATE TABLE t1 (c INT) ENGINE=$engine PARTITION BY KEY(c) PARTITIONS 4;
LOCK TABLES t1 WRITE, t1 AS a READ;
ALTER TABLE t1 REBUILD PARTITION p0;
DROP TABLE t1;

View File

@ -62,6 +62,13 @@ Warnings:
Warning 1618 <DATA DIRECTORY> table option of old schema is ignored
DROP TABLE t1;
#
# MDEV-26127 Assertion `err != DB_DUPLICATE_KEY' failed or InnoDB: Failing assertion: id != 0 on ALTER ... REBUILD PARTITION
#
CREATE TABLE t1 (c INT) ENGINE=InnoDB PARTITION BY KEY(c) PARTITIONS 4;;
LOCK TABLES t1 WRITE, t1 AS a READ;
ALTER TABLE t1 REBUILD PARTITION p0;
DROP TABLE t1;
#
# MDEV-28079 Shutdown hangs after altering innodb partition fts table
#
CREATE TABLE t1(f1 INT, f2 CHAR(100))ENGINE=InnoDB PARTITION BY HASH(f1) PARTITIONS 2;

View File

@ -95,3 +95,10 @@ PARTITION p1 VALUES LESS THAN MAXVALUE
Warnings:
Warning 1618 <INDEX DIRECTORY> table option of old schema is ignored
DROP TABLE t2;
#
# MDEV-26127 Assertion `err != DB_DUPLICATE_KEY' failed or InnoDB: Failing assertion: id != 0 on ALTER ... REBUILD PARTITION
#
CREATE TABLE t1 (c INT) ENGINE=Aria PARTITION BY KEY(c) PARTITIONS 4;;
LOCK TABLES t1 WRITE, t1 AS a READ;
ALTER TABLE t1 REBUILD PARTITION p0;
DROP TABLE t1;

View File

@ -68,6 +68,13 @@ PARTITION p1 VALUES LESS THAN MAXVALUE
Warnings:
Warning 1618 <INDEX DIRECTORY> table option of old schema is ignored
DROP TABLE t2;
#
# MDEV-26127 Assertion `err != DB_DUPLICATE_KEY' failed or InnoDB: Failing assertion: id != 0 on ALTER ... REBUILD PARTITION
#
CREATE TABLE t1 (c INT) ENGINE=MyISAM PARTITION BY KEY(c) PARTITIONS 4;;
LOCK TABLES t1 WRITE, t1 AS a READ;
ALTER TABLE t1 REBUILD PARTITION p0;
DROP TABLE t1;
create table t1 ( c1 int, c2 int, c3 varchar(100)) delay_key_write=1
partition by key(c1) (
partition p01 data directory = 'MYSQL_TMP_DIR'

View File

@ -1,5 +1,5 @@
/* Copyright (c) 2005, 2017, Oracle and/or its affiliates.
Copyright (c) 2009, 2020, MariaDB
Copyright (c) 2009, 2022, MariaDB
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -6969,17 +6969,34 @@ static bool alter_partition_lock_handling(ALTER_PARTITION_PARAM_TYPE *lpt)
static int alter_close_table(ALTER_PARTITION_PARAM_TYPE *lpt)
{
int error= 0;
THD *thd= lpt->thd;
TABLE_SHARE *share= lpt->table->s;
DBUG_ENTER("alter_close_table");
if (lpt->table->db_stat)
TABLE *table= thd->open_tables;
do {
table= find_locked_table(table, share->db.str, share->table_name.str);
if (!table)
{
error= mysql_lock_remove(lpt->thd, lpt->thd->lock, lpt->table);
error= lpt->table->file->ha_close();
lpt->table->db_stat= 0; // Mark file closed
DBUG_RETURN(0);
}
if (table->db_stat)
{
if (int error= mysql_lock_remove(thd, thd->lock, table))
{
DBUG_RETURN(error);
}
if (int error= table->file->ha_close())
{
DBUG_RETURN(error);
}
table->db_stat= 0; // Mark file closed
}
} while ((table= table->next));
DBUG_RETURN(0);
}
/**

View File

@ -1236,7 +1236,6 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t *n)
n->flushed + n->evicted < max) ||
recv_recovery_is_on()); ++scanned)
{
retry:
buf_page_t *prev= UT_LIST_GET_PREV(LRU, bpage);
const lsn_t oldest_modification= bpage->oldest_modification();
buf_pool.lru_hp.set(prev);
@ -1272,7 +1271,6 @@ static void buf_flush_LRU_list_batch(ulint max, flush_counters_t *n)
mysql_mutex_lock(&buf_pool.mutex);
if (p.second)
buf_pool.stat.n_pages_written+= p.second;
bpage= buf_pool.lru_hp.get();
goto retry;
}
else
@ -1304,6 +1302,7 @@ reacquire_mutex:
must_skip:
/* Can't evict or dispatch this block. Go to previous. */
ut_ad(buf_pool.lru_hp.is_hp(prev));
retry:
bpage= buf_pool.lru_hp.get();
}

View File

@ -38,6 +38,22 @@ Full Text Search interface
#include "dict0stats.h"
#include "btr0pcur.h"
/** The SYNC state of the cache. There is one instance of this struct
associated with each ADD thread. */
struct fts_sync_t {
/** Transaction used for SYNCing the cache to disk */
trx_t *trx;
/** Table with FTS index(es) */
dict_table_t *table;
/** Max size in bytes of the cache */
ulint max_cache_size;
/** The doc id at which the cache was noted as being
full, we use this to set the upper_limit field */
doc_id_t max_doc_id;
/** SYNC start time; only used if fts_enable_diag_print */
time_t start_time;
};
static const ulint FTS_MAX_ID_LEN = 32;
/** Column name from the FTS config table */
@ -185,15 +201,8 @@ struct fts_tokenize_param_t {
/** Run SYNC on the table, i.e., write out data from the cache to the
FTS auxiliary INDEX table and clear the cache at the end.
@param[in,out] sync sync state
@param[in] unlock_cache whether unlock cache lock when write node
@param[in] wait whether wait when a sync is in progress
@return DB_SUCCESS if all OK */
static
dberr_t
fts_sync(
fts_sync_t* sync,
bool unlock_cache,
bool wait);
static dberr_t fts_sync(fts_sync_t *sync);
/****************************************************************//**
Release all resources help by the words rb tree e.g., the node ilist. */
@ -266,7 +275,6 @@ fts_cache_destroy(fts_cache_t* cache)
mysql_mutex_destroy(&cache->init_lock);
mysql_mutex_destroy(&cache->deleted_lock);
mysql_mutex_destroy(&cache->doc_id_lock);
pthread_cond_destroy(&cache->sync->cond);
if (cache->stopword_info.cached_stopword) {
rbt_free(cache->stopword_info.cached_stopword);
@ -540,7 +548,6 @@ fts_index_cache_init(
for (i = 0; i < FTS_NUM_AUX_INDEX; ++i) {
ut_a(index_cache->ins_graph[i] == NULL);
ut_a(index_cache->sel_graph[i] == NULL);
}
}
@ -610,7 +617,6 @@ fts_cache_create(
mem_heap_zalloc(heap, sizeof(fts_sync_t)));
cache->sync->table = table;
pthread_cond_init(&cache->sync->cond, nullptr);
/* Create the index cache vector that will hold the inverted indexes. */
cache->indexes = ib_vector_create(
@ -935,10 +941,6 @@ fts_cache_index_cache_create(
mem_heap_zalloc(static_cast<mem_heap_t*>(
cache->self_heap->arg), n_bytes));
index_cache->sel_graph = static_cast<que_t**>(
mem_heap_zalloc(static_cast<mem_heap_t*>(
cache->self_heap->arg), n_bytes));
fts_index_cache_init(cache->sync_heap, index_cache);
if (cache->get_docs) {
@ -1012,13 +1014,6 @@ fts_cache_clear(
index_cache->ins_graph[j] = NULL;
}
if (index_cache->sel_graph[j] != NULL) {
que_graph_free(index_cache->sel_graph[j]);
index_cache->sel_graph[j] = NULL;
}
}
index_cache->doc_stats = NULL;
@ -1311,8 +1306,7 @@ fts_cache_add_doc(
ib_vector_last(word->nodes));
}
if (fts_node == NULL || fts_node->synced
|| fts_node->ilist_size > FTS_ILIST_MAX_SIZE
if (!fts_node || fts_node->ilist_size > FTS_ILIST_MAX_SIZE
|| doc_id < fts_node->last_doc_id) {
fts_node = static_cast<fts_node_t*>(
@ -3284,7 +3278,7 @@ fts_add_doc_from_tuple(
if (cache->total_size > fts_max_cache_size / 5
|| fts_need_sync) {
fts_sync(cache->sync, true, false);
fts_sync(cache->sync);
}
mtr_start(&mtr);
@ -3444,42 +3438,34 @@ fts_add_doc_by_id(
get_doc->index_cache,
doc_id, doc.tokens);
bool need_sync = !cache->sync->in_progress
&& (fts_need_sync
|| (cache->total_size
- cache->total_size_at_sync)
> fts_max_cache_size / 10);
if (need_sync) {
cache->total_size_at_sync =
cache->total_size;
}
/** FTS cache sync should happen
frequently. Because user thread
shouldn't hold the cache lock for
longer time. So cache should sync
whenever cache size exceeds 512 KB */
bool need_sync =
cache->total_size > 512*1024;
mysql_mutex_unlock(&table->fts->cache->lock);
DBUG_EXECUTE_IF(
"fts_instrument_sync",
fts_optimize_request_sync_table(table);
mysql_mutex_lock(&cache->lock);
if (cache->sync->in_progress)
my_cond_wait(
&cache->sync->cond,
&cache->lock.m_mutex);
mysql_mutex_unlock(&cache->lock);
fts_sync_table(table);
);
DBUG_EXECUTE_IF(
"fts_instrument_sync_debug",
fts_sync(cache->sync, true, true);
fts_sync(cache->sync);
);
DEBUG_SYNC_C("fts_instrument_sync_request");
DBUG_EXECUTE_IF(
"fts_instrument_sync_request",
fts_optimize_request_sync_table(table);
need_sync= true;
);
if (need_sync) {
fts_optimize_request_sync_table(table);
fts_sync_table(table);
}
mtr_start(&mtr);
@ -3846,15 +3832,13 @@ static MY_ATTRIBUTE((nonnull, warn_unused_result))
dberr_t
fts_sync_write_words(
trx_t* trx,
fts_index_cache_t* index_cache,
bool unlock_cache)
fts_index_cache_t* index_cache)
{
fts_table_t fts_table;
ulint n_nodes = 0;
ulint n_words = 0;
const ib_rbt_node_t* rbt_node;
dberr_t error = DB_SUCCESS;
ibool print_error = FALSE;
dict_table_t* table = index_cache->index->table;
FTS_INIT_INDEX_TABLE(
@ -3885,53 +3869,35 @@ fts_sync_write_words(
fts_table.suffix = fts_get_suffix(selected);
/* We iterate over all the nodes even if there was an error */
for (i = 0; i < ib_vector_size(word->nodes); ++i) {
fts_node_t* fts_node = static_cast<fts_node_t*>(
ib_vector_get(word->nodes, i));
if (fts_node->synced) {
continue;
} else {
fts_node->synced = true;
}
/*FIXME: we need to handle the error properly. */
if (error == DB_SUCCESS) {
if (unlock_cache) {
mysql_mutex_unlock(
&table->fts->cache->lock);
}
error = fts_write_node(
trx,
&index_cache->ins_graph[selected],
trx, &index_cache->ins_graph[selected],
&fts_table, &word->text, fts_node);
DEBUG_SYNC_C("fts_write_node");
DBUG_EXECUTE_IF("fts_write_node_crash",
DBUG_SUICIDE(););
DBUG_EXECUTE_IF(
"fts_instrument_sync_sleep",
DBUG_EXECUTE_IF("fts_instrument_sync_sleep",
std::this_thread::sleep_for(
std::chrono::seconds(1)););
if (unlock_cache) {
mysql_mutex_lock(
&table->fts->cache->lock);
}
if (error != DB_SUCCESS) {
goto err_exit;
}
}
n_nodes += ib_vector_size(word->nodes);
if (UNIV_UNLIKELY(error != DB_SUCCESS) && !print_error) {
if (UNIV_UNLIKELY(error != DB_SUCCESS)) {
err_exit:
ib::error() << "(" << error << ") writing"
" word node to FTS auxiliary index table "
<< table->name;
print_error = TRUE;
}
}
@ -3990,58 +3956,7 @@ fts_sync_index(
ut_ad(rbt_validate(index_cache->words));
return(fts_sync_write_words(trx, index_cache, sync->unlock_cache));
}
/** Check if index cache has been synced completely
@param[in,out] index_cache index cache
@return true if index is synced, otherwise false. */
static
bool
fts_sync_index_check(
fts_index_cache_t* index_cache)
{
const ib_rbt_node_t* rbt_node;
for (rbt_node = rbt_first(index_cache->words);
rbt_node != NULL;
rbt_node = rbt_next(index_cache->words, rbt_node)) {
fts_tokenizer_word_t* word;
word = rbt_value(fts_tokenizer_word_t, rbt_node);
fts_node_t* fts_node;
fts_node = static_cast<fts_node_t*>(ib_vector_last(word->nodes));
if (!fts_node->synced) {
return(false);
}
}
return(true);
}
/** Reset synced flag in index cache when rollback
@param[in,out] index_cache index cache */
static
void
fts_sync_index_reset(
fts_index_cache_t* index_cache)
{
const ib_rbt_node_t* rbt_node;
for (rbt_node = rbt_first(index_cache->words);
rbt_node != NULL;
rbt_node = rbt_next(index_cache->words, rbt_node)) {
fts_tokenizer_word_t* word;
word = rbt_value(fts_tokenizer_word_t, rbt_node);
fts_node_t* fts_node;
fts_node = static_cast<fts_node_t*>(ib_vector_last(word->nodes));
fts_node->synced = false;
}
return(fts_sync_write_words(trx, index_cache));
}
/** Commit the SYNC, change state of processed doc ids etc.
@ -4074,14 +3989,14 @@ fts_sync_commit(
sync, cache->deleted_doc_ids);
}
/* We need to do this within the deleted lock since fts_delete() can
attempt to add a deleted doc id to the cache deleted id array. */
if (UNIV_LIKELY(error == DB_SUCCESS)) {
/* We need to do this within the deleted lock
since fts_delete() can attempt to add a deleted
doc id to the cache deleted id array. */
fts_cache_clear(cache);
DEBUG_SYNC_C("fts_deleted_doc_ids_clear");
fts_cache_init(cache);
mysql_mutex_unlock(&cache->lock);
if (UNIV_LIKELY(error == DB_SUCCESS)) {
fts_sql_commit(trx);
} else {
fts_sql_rollback(trx);
@ -4123,10 +4038,6 @@ fts_sync_rollback(
index_cache = static_cast<fts_index_cache_t*>(
ib_vector_get(cache->indexes, i));
/* Reset synced flag so nodes will not be skipped
in the next sync, see fts_sync_write_words(). */
fts_sync_index_reset(index_cache);
for (j = 0; fts_index_selector[j].value; ++j) {
if (index_cache->ins_graph[j] != NULL) {
@ -4135,13 +4046,6 @@ fts_sync_rollback(
index_cache->ins_graph[j] = NULL;
}
if (index_cache->sel_graph[j] != NULL) {
que_graph_free(index_cache->sel_graph[j]);
index_cache->sel_graph[j] = NULL;
}
}
}
@ -4160,12 +4064,7 @@ FTS auxiliary INDEX table and clear the cache at the end.
@param[in] unlock_cache whether unlock cache lock when write node
@param[in] wait whether wait when a sync is in progress
@return DB_SUCCESS if all OK */
static
dberr_t
fts_sync(
fts_sync_t* sync,
bool unlock_cache,
bool wait)
static dberr_t fts_sync(fts_sync_t *sync)
{
if (srv_read_only_mode) {
return DB_READ_ONLY;
@ -4176,33 +4075,13 @@ fts_sync(
fts_cache_t* cache = sync->table->fts->cache;
mysql_mutex_lock(&cache->lock);
/* Check if cache is being synced.
Note: we release cache lock in fts_sync_write_words() to
avoid long wait for the lock by other threads. */
if (sync->in_progress) {
if (!wait) {
mysql_mutex_unlock(&cache->lock);
return(DB_SUCCESS);
}
do {
my_cond_wait(&sync->cond, &cache->lock.m_mutex);
} while (sync->in_progress);
}
sync->unlock_cache = unlock_cache;
sync->in_progress = true;
DEBUG_SYNC_C("fts_sync_begin");
fts_sync_begin(sync);
begin_sync:
const size_t fts_cache_size= fts_max_cache_size;
if (cache->total_size > fts_cache_size) {
/* Avoid the case: sync never finish when
insert/update keeps comming. */
ut_ad(sync->unlock_cache);
sync->unlock_cache = false;
ib::warn() << "Total InnoDB FTS size "
<< cache->total_size << " for the table "
<< cache->sync->table->name
@ -4226,52 +4105,23 @@ begin_sync:
error = fts_sync_index(sync, index_cache);
if (error != DB_SUCCESS) {
goto end_sync;
}
if (!sync->unlock_cache
&& cache->total_size < fts_max_cache_size) {
/* Reset the unlock cache if the value
is less than innodb_ft_cache_size */
sync->unlock_cache = true;
goto err_exit;
}
}
DBUG_EXECUTE_IF("fts_instrument_sync_interrupted",
sync->interrupted = true;
error = DB_INTERRUPTED;
goto end_sync;
goto err_exit;
);
/* Make sure all the caches are synced. */
for (i = 0; i < ib_vector_size(cache->indexes); ++i) {
fts_index_cache_t* index_cache;
index_cache = static_cast<fts_index_cache_t*>(
ib_vector_get(cache->indexes, i));
if (index_cache->index->to_be_dropped
|| fts_sync_index_check(index_cache)) {
continue;
}
goto begin_sync;
}
end_sync:
if (error == DB_SUCCESS && !sync->interrupted) {
if (error == DB_SUCCESS) {
error = fts_sync_commit(sync);
} else {
err_exit:
fts_sync_rollback(sync);
return error;
}
mysql_mutex_lock(&cache->lock);
ut_ad(sync->in_progress);
sync->interrupted = false;
sync->in_progress = false;
pthread_cond_broadcast(&sync->cond);
mysql_mutex_unlock(&cache->lock);
/* We need to check whether an optimize is required, for that
we make copies of the two variables that control the trigger. These
variables can change behind our back and we don't want to hold the
@ -4283,6 +4133,7 @@ end_sync:
mysql_mutex_unlock(&cache->deleted_lock);
DEBUG_SYNC_C("fts_sync_end");
return(error);
}
@ -4291,12 +4142,12 @@ FTS auxiliary INDEX table and clear the cache at the end.
@param[in,out] table fts table
@param[in] wait whether wait for existing sync to finish
@return DB_SUCCESS on success, error code on failure. */
dberr_t fts_sync_table(dict_table_t* table, bool wait)
dberr_t fts_sync_table(dict_table_t* table)
{
ut_ad(table->fts);
return table->space && !table->corrupted && table->fts->cache
? fts_sync(table->fts->cache->sync, !wait, wait)
? fts_sync(table->fts->cache->sync)
: DB_SUCCESS;
}

View File

@ -83,9 +83,8 @@ enum fts_msg_type_t {
FTS_MSG_ADD_TABLE, /*!< Add table to the optimize thread's
work queue */
FTS_MSG_DEL_TABLE, /*!< Remove a table from the optimize
FTS_MSG_DEL_TABLE /*!< Remove a table from the optimize
threads work queue */
FTS_MSG_SYNC_TABLE /*!< Sync fts cache of a table */
};
/** Compressed list of words that have been read from FTS INDEX
@ -2625,36 +2624,6 @@ fts_optimize_remove_table(
mysql_mutex_unlock(&fts_optimize_wq->mutex);
}
/** Send sync fts cache for the table.
@param[in] table table to sync */
void
fts_optimize_request_sync_table(
dict_table_t* table)
{
/* if the optimize system not yet initialized, return */
if (!fts_optimize_wq) {
return;
}
mysql_mutex_lock(&fts_optimize_wq->mutex);
/* FTS optimizer thread is already exited */
if (fts_opt_start_shutdown) {
ib::info() << "Try to sync table " << table->name
<< " after FTS optimize thread exiting.";
} else if (table->fts->sync_message) {
/* If the table already has SYNC message in
fts_optimize_wq queue then ignore it */
} else {
add_msg(fts_optimize_create_msg(FTS_MSG_SYNC_TABLE, table));
table->fts->sync_message = true;
DBUG_EXECUTE_IF("fts_optimize_wq_count_check",
DBUG_ASSERT(fts_optimize_wq->length <= 1000););
}
mysql_mutex_unlock(&fts_optimize_wq->mutex);
}
/** Add a table to fts_slots if it doesn't already exist. */
static bool fts_optimize_new_table(dict_table_t* table)
{
@ -2796,7 +2765,8 @@ static void fts_optimize_sync_table(dict_table_t *table,
if (sync_table->fts && sync_table->fts->cache && sync_table->is_accessible())
{
fts_sync_table(sync_table, false);
fts_sync_table(sync_table);
if (process_message)
{
mysql_mutex_lock(&fts_optimize_wq->mutex);
@ -2896,24 +2866,6 @@ retry_later:
--n_tables;
}
break;
case FTS_MSG_SYNC_TABLE:
if (UNIV_UNLIKELY(wsrep_sst_disable_writes)) {
add_msg(msg);
goto retry_later;
}
DBUG_EXECUTE_IF(
"fts_instrument_msg_sync_sleep",
std::this_thread::sleep_for(
std::chrono::milliseconds(
300)););
fts_optimize_sync_table(
static_cast<dict_table_t*>(msg->ptr),
true);
break;
default:
ut_error;
}
@ -3046,7 +2998,7 @@ void fts_sync_during_ddl(dict_table_t* table)
if (!sync_message)
return;
fts_sync_table(table, false);
fts_sync_table(table);
mysql_mutex_lock(&fts_optimize_wq->mutex);
table->fts->sync_message = false;

View File

@ -1476,7 +1476,8 @@ static void innodb_drop_database(handlerton*, char *path)
"WHILE 1 = 1 LOOP\n"
" FETCH tab INTO tid,name;\n"
" IF (SQL % NOTFOUND) THEN EXIT; END IF;\n"
" IF SUBSTR(name, 0, LENGTH(:db)) <> :db THEN EXIT; END IF;\n"
" IF TO_BINARY(SUBSTR(name, 0, LENGTH(:db))) <> TO_BINARY(:db)"
" THEN EXIT; END IF;\n"
" DELETE FROM SYS_COLUMNS WHERE TABLE_ID=tid;\n"
" DELETE FROM SYS_TABLES WHERE ID=tid;\n"
" OPEN idx;\n"

View File

@ -11415,12 +11415,8 @@ foreign_fail:
ut_d(dict_table_check_for_dup_indexes(
ctx->new_table, CHECK_ABORTED_OK));
#ifdef UNIV_DEBUG
if (!(ctx->new_table->fts != NULL
&& ctx->new_table->fts->cache->sync->in_progress)) {
ut_a(fts_check_cached_index(ctx->new_table));
}
#endif
ut_ad(!ctx->new_table->fts
|| fts_check_cached_index(ctx->new_table));
}
unlock_and_close_files(deleted, trx);

View File

@ -656,12 +656,6 @@ fts_optimize_remove_table(
void
fts_optimize_shutdown();
/** Send sync fts cache for the table.
@param[in] table table to sync */
void
fts_optimize_request_sync_table(
dict_table_t* table);
/**********************************************************************//**
Take a FTS savepoint. */
void
@ -716,9 +710,8 @@ fts_savepoint_rollback_last_stmt(
/** Run SYNC on the table, i.e., write out data from the cache to the
FTS auxiliary INDEX table and clear the cache at the end.
@param[in,out] table fts table
@param[in] wait whether to wait for existing sync to finish
@return DB_SUCCESS on success, error code on failure. */
dberr_t fts_sync_table(dict_table_t* table, bool wait = true);
dberr_t fts_sync_table(dict_table_t* table);
/****************************************************************//**
Create an FTS index cache. */

View File

@ -75,7 +75,6 @@ struct fts_index_cache_t {
que_t** ins_graph; /*!< Insert query graphs */
que_t** sel_graph; /*!< Select query graphs */
CHARSET_INFO* charset; /*!< charset */
};
@ -87,35 +86,7 @@ struct fts_stopword_t {
CHARSET_INFO* charset; /*!< charset for stopword */
};
/** The SYNC state of the cache. There is one instance of this struct
associated with each ADD thread. */
struct fts_sync_t {
trx_t* trx; /*!< The transaction used for SYNCing
the cache to disk */
dict_table_t* table; /*!< Table with FTS index(es) */
ulint max_cache_size; /*!< Max size in bytes of the cache */
ibool cache_full; /*!< flag, when true it indicates that
we need to sync the cache to disk */
ulint lower_index; /*!< the start index of the doc id
vector from where to start adding
documents to the FTS cache */
ulint upper_index; /*!< max index of the doc id vector to
add to the FTS cache */
ibool interrupted; /*!< TRUE if SYNC was interrupted */
doc_id_t min_doc_id; /*!< The smallest doc id added to the
cache. It should equal to
doc_ids[lower_index] */
doc_id_t max_doc_id; /*!< The doc id at which the cache was
noted as being full, we use this to
set the upper_limit field */
time_t start_time; /*!< SYNC start time; only used if
fts_enable_diag_print */
bool in_progress; /*!< flag whether sync is in progress.*/
bool unlock_cache; /*!< flag whether unlock cache when
write fts node */
/** condition variable for in_progress; used with table->fts->cache->lock */
pthread_cond_t cond;
};
struct fts_sync_t;
/** The cache for the FTS system. It is a memory-based inverted index
that new entries are added to, until it grows over the configured maximum
@ -204,7 +175,6 @@ struct fts_node_t {
ulint ilist_size_alloc;
/*!< Allocated size of ilist in
bytes */
bool synced; /*!< flag whether the node is synced */
};
/** A tokenizer word. Contains information about one word. */

View File

@ -1,26 +1,26 @@
#
# Add the following to the end of your /etc/magic file to get the 'file'
# command to recognize some MySQL files.
# command to recognize some MariaDB / MySQL files.
#
0 beshort 0xfe01 MySQL table definition file
0 beshort 0xfe01 MariaDB/MySQL table definition file
>2 byte x Version %d
0 belong&0xffffff00 0xfefe0700 MySQL MyISAM index file
0 belong&0xffffff00 0xfefe0700 MariaDB/MySQL MyISAM index file
>3 byte x Version %d
0 belong&0xffffff00 0xfefe0800 MySQL MyISAM compressed data file
0 belong&0xffffff00 0xfefe0800 MariaDB/MySQL MyISAM compressed data file
>3 byte x Version %d
0 belong&0xffffff00 0xfefe0900 MySQL Maria index file
0 belong&0xffffff00 0xfefe0900 MariaDB Aaria index file
>3 byte x Version %d
0 belong&0xffffff00 0xfefe0A00 MySQL Maria compressed data file
0 belong&0xffffff00 0xfefe0A00 MariaDB Aaria compressed data file
>3 byte x Version %d
0 belong&0xffffff00 0xfefe0500 MySQL ISAM index file
0 belong&0xffffff00 0xfefe0500 MariaDB/MySQL ISAM index file
>3 byte x Version %d
0 belong&0xffffff00 0xfefe0600 MySQL ISAM compressed data file
0 belong&0xffffff00 0xfefe0600 MariaDB/MySQL ISAM compressed data file
>3 byte x Version %d
0 string \376bin MySQL replication log
0 string \376bin MariaDB/MySQL replication log
0 belong&0xffffff00 0xfefe0b00
>4 string MARIALOG MySQL Maria transaction log file
>4 string MARIALOG MariaDB Aria transaction log file
>>3 byte x Version %d
0 belong&0xffffff00 0xfefe0c00
>4 string MACF MySQL Maria control file
>>3 byte x Version %d
0 belong&0xffffff00 0xfefe0c00 MariaDB Aria control file
>3 byte x Version %d
0 belong&0xffffff00 0xfefe0b00 MariaDB DDL recovery log
>3 byte x Version %d