mirror of
https://github.com/MariaDB/server.git
synced 2026-01-06 05:22:24 +03:00
Merge InnoDB 5.7 from mysql-5.7.14.
Contains also:
MDEV-10549 mysqld: sql/handler.cc:2692: int handler::ha_index_first(uchar*): Assertion `table_share->tmp_table != NO_TMP_TABLE || m_lock_type != 2' failed. (branch bb-10.2-jan)
Unlike MySQL, InnoDB still uses THR_LOCK in MariaDB
MDEV-10548 Some of the debug sync waits do not work with InnoDB 5.7 (branch bb-10.2-jan)
enable tests that were fixed in MDEV-10549
MDEV-10548 Some of the debug sync waits do not work with InnoDB 5.7 (branch bb-10.2-jan)
fix main.innodb_mysql_sync - re-enable online alter for partitioned innodb tables
This commit is contained in:
@@ -499,7 +499,18 @@ enum ha_base_keytype {
|
||||
#define HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE 191 /* Too many words in a phrase */
|
||||
#define HA_ERR_DECRYPTION_FAILED 192 /* Table encrypted but
|
||||
decypt failed */
|
||||
#define HA_ERR_LAST 192 /* Copy of last error nr */
|
||||
#define HA_ERR_FK_DEPTH_EXCEEDED 193 /* FK cascade depth exceeded */
|
||||
#define HA_MISSING_CREATE_OPTION 194 /* Option Missing during Create */
|
||||
#define HA_ERR_SE_OUT_OF_MEMORY 195 /* Out of memory in storage engine */
|
||||
#define HA_ERR_TABLE_CORRUPT 196 /* Table/Clustered index is corrupted. */
|
||||
#define HA_ERR_QUERY_INTERRUPTED 197 /* The query was interrupted */
|
||||
#define HA_ERR_TABLESPACE_MISSING 198 /* Missing Tablespace */
|
||||
#define HA_ERR_TABLESPACE_IS_NOT_EMPTY 199 /* Tablespace is not empty */
|
||||
#define HA_ERR_WRONG_FILE_NAME 200 /* Invalid Filename */
|
||||
#define HA_ERR_NOT_ALLOWED_COMMAND 201 /* Operation is not allowed */
|
||||
#define HA_ERR_COMPUTE_FAILED 202 /* Compute generated column value failed */
|
||||
#define HA_ERR_INNODB_READ_ONLY 203 /* InnoDB is in read only mode */
|
||||
#define HA_ERR_LAST 203 /* Copy of last error nr * */
|
||||
|
||||
/* Number of different errors */
|
||||
#define HA_ERR_ERRORS (HA_ERR_LAST - HA_ERR_FIRST + 1)
|
||||
|
||||
@@ -95,7 +95,18 @@ static const char *handler_error_messages[]=
|
||||
"Disk full",
|
||||
"Incompatible key or row definition between the MariaDB .frm file and the information in the storage engine. You have to dump and restore the table to fix this",
|
||||
"Too many words in a FTS phrase or proximity search",
|
||||
"Table encrypted but decryption failed. This could be because correct encryption management plugin is not loaded, used encryption key is not available or encryption method does not match."
|
||||
"Table encrypted but decryption failed. This could be because correct encryption management plugin is not loaded, used encryption key is not available or encryption method does not match.",
|
||||
"Foreign key cascade delete/update exceeds max depth",
|
||||
"Table storage engine found required create option missing",
|
||||
"Out of memory in storage engine",
|
||||
"Operation cannot be performed. The table is missing, corrupt or contains bad data.",
|
||||
"Query execution was interrupted",
|
||||
"Tablespace is missing for table",
|
||||
"Tablespace is not empty",
|
||||
"Incorrect File Name",
|
||||
"Table storage engine found required create option missing",
|
||||
"Compute virtual column value failed",
|
||||
"InnoDB is in read only mode"
|
||||
};
|
||||
|
||||
#endif /* MYSYS_MY_HANDLER_ERRORS_INCLUDED */
|
||||
|
||||
@@ -22,7 +22,3 @@ innodb-wl5522-debug-zip : broken upstream
|
||||
innodb_bug12902967 : broken upstream
|
||||
file_contents : MDEV-6526 these files are not installed anymore
|
||||
max_statement_time : cannot possibly work, depends on timing
|
||||
implicit_commit : MDEV-10549
|
||||
lock_sync : MDEV-10548
|
||||
innodb_mysql_sync : MDEV-10548
|
||||
partition_debug_sync : MDEV-10548
|
||||
|
||||
4
mysql-test/include/have_no_undo_tablespaces.inc
Normal file
4
mysql-test/include/have_no_undo_tablespaces.inc
Normal file
@@ -0,0 +1,4 @@
|
||||
if (`select count(*) = 0 from information_schema.global_variables where variable_name like 'innodb_undo_tablespaces' and variable_value = 0`)
|
||||
{
|
||||
--skip Test requires innodb_undo_tablespaces=0
|
||||
}
|
||||
@@ -624,6 +624,11 @@ DROP TABLE t1,t2,t3;
|
||||
# Test bug when trying to drop data file which no InnoDB directory entry
|
||||
#
|
||||
|
||||
--disable_query_log
|
||||
call mtr.add_suppression("InnoDB: Table .*bug29807.*");
|
||||
call mtr.add_suppression("InnoDB: Cannot open table test/bug29807 from");
|
||||
--enable_query_log
|
||||
|
||||
create table t1 (a int) engine=innodb;
|
||||
let $MYSQLD_DATADIR= `select @@datadir`;
|
||||
copy_file $MYSQLD_DATADIR/test/t1.frm $MYSQLD_DATADIR/test/bug29807.frm;
|
||||
@@ -631,10 +636,6 @@ copy_file $MYSQLD_DATADIR/test/t1.frm $MYSQLD_DATADIR/test/bug29807.frm;
|
||||
select * from bug29807;
|
||||
drop table t1;
|
||||
drop table bug29807;
|
||||
--disable_query_log
|
||||
call mtr.add_suppression("InnoDB: Error: table .test...bug29807. does not exist in the InnoDB internal");
|
||||
call mtr.add_suppression("InnoDB: Cannot open table test/bug29807 from");
|
||||
--enable_query_log
|
||||
|
||||
|
||||
#
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
call mtr.add_suppression("InnoDB: Table .* does not exist in the InnoDB internal data dictionary .*");
|
||||
#
|
||||
# Bug#11766879/Bug#60106: DIFF BETWEEN # OF INDEXES IN MYSQL VS INNODB,
|
||||
# PARTITONING, ON INDEX CREATE
|
||||
|
||||
@@ -76,13 +76,17 @@ DROP TABLE t1, t2;
|
||||
# MDEV-10235: Deadlock in CREATE TABLE ... AS SELECT .. if result set
|
||||
# is empty in Galera
|
||||
#
|
||||
connection node_1;
|
||||
CREATE TABLE t1(c1 INT) ENGINE=INNODB;
|
||||
INSERT INTO t1 VALUES(1);
|
||||
CREATE TABLE t2 AS SELECT * FROM t1 WHERE c1=2;
|
||||
connection node_2;
|
||||
SELECT * FROM t1;
|
||||
c1
|
||||
1
|
||||
SELECT * FROM t2;
|
||||
c1
|
||||
DROP TABLE t1, t2;
|
||||
disconnect node_2;
|
||||
disconnect node_1;
|
||||
# End of tests
|
||||
|
||||
@@ -9,5 +9,3 @@
|
||||
# Do not use any TAB characters for whitespace.
|
||||
#
|
||||
##############################################################################
|
||||
|
||||
innodb : MDEV-10549
|
||||
@@ -545,6 +545,7 @@ optimize table t1;
|
||||
connection default;
|
||||
handler t1 read next;
|
||||
c1
|
||||
1
|
||||
handler t1 close;
|
||||
connection con2;
|
||||
Table Op Msg_type Msg_text
|
||||
|
||||
@@ -12,5 +12,5 @@
|
||||
|
||||
innodb.auto_increment_dup : MDEV-10548
|
||||
innodb_skip_innodb_is_tables : MDEV-10200
|
||||
innodb.innodb_bug13510739: MDEV-10549
|
||||
innodb.defrag_mdl-9155 : MDEV-10551
|
||||
innodb_defragment_fill_factor : MDEV-10771
|
||||
38
mysql-test/suite/innodb/include/show_i_s_tablespaces.inc
Normal file
38
mysql-test/suite/innodb/include/show_i_s_tablespaces.inc
Normal file
@@ -0,0 +1,38 @@
|
||||
# This script assumes that the caller did the following;
|
||||
# LET $MYSQLD_DATADIR = `select @@datadir`;
|
||||
# LET $INNODB_PAGE_SIZE = `select @@innodb_page_size`;
|
||||
--echo === information_schema.innodb_sys_tablespaces and innodb_sys_datafiles ===
|
||||
--disable_query_log
|
||||
--replace_regex /#P#/#p#/ /#SP#/#sp#/
|
||||
--replace_result ./ MYSQLD_DATADIR/ $MYSQLD_DATADIR/ MYSQLD_DATADIR/ $MYSQLD_DATADIR MYSQLD_DATADIR/ $MYSQL_TMP_DIR MYSQL_TMP_DIR $INNODB_PAGE_SIZE DEFAULT
|
||||
SELECT s.name 'Space_Name',
|
||||
s.space_type 'Space_Type',
|
||||
s.page_size 'Page_Size',
|
||||
s.zip_page_size 'Zip_Size',
|
||||
s.row_format 'Formats_Permitted',
|
||||
d.path 'Path'
|
||||
FROM information_schema.innodb_sys_tablespaces s,
|
||||
information_schema.innodb_sys_datafiles d
|
||||
WHERE s.space = d.space
|
||||
AND s.name NOT LIKE 'mysql/%'
|
||||
AND s.name NOT LIKE 'sys/%'
|
||||
ORDER BY s.space;
|
||||
|
||||
# This SELECT will not show UNDO or TEMPORARY tablespaces since
|
||||
# they are only in FILES, not SYS_TABLESPACES.
|
||||
--echo === information_schema.files ===
|
||||
--replace_regex /innodb_file_per_table.[0-9]+/innodb_file_per_table.##/ /#P#/#p#/ /#SP#/#sp#/
|
||||
--replace_result ./ MYSQLD_DATADIR/ $MYSQLD_DATADIR/ MYSQLD_DATADIR/ $MYSQLD_DATADIR MYSQLD_DATADIR/ $MYSQL_TMP_DIR MYSQL_TMP_DIR $INNODB_PAGE_SIZE DEFAULT
|
||||
SELECT s.name 'Space_Name',
|
||||
f.file_type 'File_Type',
|
||||
f.engine 'Engine',
|
||||
f.status 'Status',
|
||||
f.tablespace_name 'Tablespace_Name',
|
||||
f.file_name 'Path'
|
||||
FROM information_schema.files f,
|
||||
information_schema.innodb_sys_tablespaces s
|
||||
WHERE f.file_id = s.space
|
||||
AND s.name NOT LIKE 'mysql/%'
|
||||
AND s.name NOT LIKE 'sys/%'
|
||||
ORDER BY f.file_id;
|
||||
--enable_query_log
|
||||
@@ -1,6 +1,7 @@
|
||||
call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded.");
|
||||
call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue.");
|
||||
call mtr.add_suppression("InnoDB: Error: Tablespace flags .* corrupted unused .*");
|
||||
call mtr.add_suppression("InnoDB: Tablespace flags: .* corrupted in file: .* ");
|
||||
SET GLOBAL innodb_file_per_table = 1;
|
||||
SELECT @@innodb_file_per_table;
|
||||
@@innodb_file_per_table
|
||||
|
||||
@@ -7,10 +7,10 @@ X RECORD `test`.```t'\"_str` PRIMARY 4 '3', 'abc', '\\abc', 'abc\\', 'a\\bc', 'a
|
||||
X RECORD `test`.```t'\"_str` PRIMARY 4 '3', 'abc', '\\abc', 'abc\\', 'a\\bc', 'a\\bc\\', '\\abc\\\\'
|
||||
X RECORD `test`.```t'\"_str` PRIMARY 5 '4', 'abc', '\0abc', 'abc\0', 'a\0bc', 'a\0bc\0', 'a\0bc\0\0'
|
||||
X RECORD `test`.```t'\"_str` PRIMARY 5 '4', 'abc', '\0abc', 'abc\0', 'a\0bc', 'a\0bc\0', 'a\0bc\0\0'
|
||||
X RECORD `test`.`t_max` PRIMARY 2 127.000000, 140517642401116, 32767.000000, 140517642401147, 8388607.000000, 140517642401180, 2147483647.000000, 140517642401216, 9223372036854775808.000000, 140517642401261
|
||||
X RECORD `test`.`t_max` PRIMARY 2 127.000000, 140517642401116, 32767.000000, 140517642401147, 8388607.000000, 140517642401180, 2147483647.000000, 140517642401216, 9223372036854775808.000000, 140517642401261
|
||||
X RECORD `test`.`t_min` PRIMARY 2 18446744073709551616.000000, 140517642401133, 18446744073709518848.000000, 140517642401179, 18446744073701163008.000000, 140517642401225, 18446744071562067968.000000, 140517642401271, 9223372036854775808.000000, 140517642401316
|
||||
X RECORD `test`.`t_min` PRIMARY 2 18446744073709551616.000000, 140517642401133, 18446744073709518848.000000, 140517642401179, 18446744073701163008.000000, 140517642401225, 18446744071562067968.000000, 140517642401271, 9223372036854775808.000000, 140517642401316
|
||||
X RECORD `test`.`t_min` PRIMARY 2 -128, 0, -32768, 0, -8388608, 0, -2147483648, 0, -9223372036854775808, 0
|
||||
X RECORD `test`.`t_min` PRIMARY 2 -128, 0, -32768, 0, -8388608, 0, -2147483648, 0, -9223372036854775808, 0
|
||||
X RECORD `test`.`t_max` PRIMARY 2 127, 255, 32767, 65535, 8388607, 16777215, 2147483647, 4294967295, 9223372036854775807, 18446744073709551615
|
||||
X RECORD `test`.`t_max` PRIMARY 2 127, 255, 32767, 65535, 8388607, 16777215, 2147483647, 4294967295, 9223372036854775807, 18446744073709551615
|
||||
X RECORD `test`.```t'\"_str` PRIMARY 1 supremum pseudo-record
|
||||
X RECORD `test`.```t'\"_str` PRIMARY 1 supremum pseudo-record
|
||||
lock_table COUNT(*)
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
set global innodb_support_xa=default;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
set session innodb_support_xa=default;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SET SESSION DEFAULT_STORAGE_ENGINE = InnoDB;
|
||||
drop table if exists t1,t2,t3,t1m,t1i,t2m,t2i,t4;
|
||||
drop procedure if exists p1;
|
||||
|
||||
@@ -20,6 +20,7 @@
|
||||
call mtr.add_suppression("InnoDB: Tablespace for table .* is set as discarded.");
|
||||
call mtr.add_suppression("InnoDB: Cannot calculate statistics for table .* because the .ibd file is missing. Please refer to .* for how to resolve the issue.");
|
||||
call mtr.add_suppression("InnoDB: Error: Tablespace flags .* corrupted unused .*");
|
||||
call mtr.add_suppression("InnoDB: Tablespace flags: .* corrupted in file: .* ");
|
||||
|
||||
let MYSQLD_DATADIR =`SELECT @@datadir`;
|
||||
let $innodb_file_per_table = `SELECT @@innodb_file_per_table`;
|
||||
|
||||
@@ -1141,7 +1141,7 @@ insert into t1 values
|
||||
(244, 243), (245, 244), (246, 245), (247, 246),
|
||||
(248, 247), (249, 248), (250, 249), (251, 250),
|
||||
(252, 251), (253, 252), (254, 253), (255, 254);
|
||||
--error 1296,1451
|
||||
--error 4029,1451
|
||||
delete from t1 where id=0;
|
||||
delete from t1 where id=255;
|
||||
--error 0,1451
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
--loose-innodb-lock-wait-timeout=2 --default-storage-engine=MyISAM
|
||||
--loose-innodb-lock-wait-timeout=2
|
||||
--default-storage-engine=MyISAM
|
||||
--loose-innodb-large-prefix=off
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
--source include/big_test.inc
|
||||
# test takes too long with valgrind
|
||||
--source include/not_valgrind.inc
|
||||
--source include/have_debug.inc
|
||||
--let $num_inserts = 1500
|
||||
--let $num_ops = 3500
|
||||
--source suite/innodb/include/innodb_simulate_comp_failures.inc
|
||||
|
||||
@@ -8,7 +8,7 @@ variable_value
|
||||
SELECT variable_value FROM information_schema.global_status
|
||||
WHERE LOWER(variable_name) = 'innodb_buffer_pool_pages_total';
|
||||
variable_value
|
||||
{checked_valid}
|
||||
512
|
||||
# Test 3) Query some information_shema tables that are dependent upon
|
||||
# the page size.
|
||||
SELECT t.name table_name, t.n_cols, t.flag table_flags,
|
||||
@@ -20,28 +20,8 @@ WHERE t.table_id = i.table_id
|
||||
AND t.name LIKE 'mysql%'
|
||||
ORDER BY t.name, i.index_id;
|
||||
table_name n_cols table_flags index_name root_page type n_fields merge_threshold
|
||||
mysql/engine_cost 9 33 PRIMARY 3 3 3 50
|
||||
mysql/gtid_executed 6 33 PRIMARY 3 3 2 50
|
||||
mysql/help_category 7 33 PRIMARY 3 3 1 50
|
||||
mysql/help_category 7 33 name 4 2 1 50
|
||||
mysql/help_keyword 5 33 PRIMARY 3 3 1 50
|
||||
mysql/help_keyword 5 33 name 4 2 1 50
|
||||
mysql/help_relation 5 33 PRIMARY 3 3 2 50
|
||||
mysql/help_topic 9 33 PRIMARY 3 3 1 50
|
||||
mysql/help_topic 9 33 name 4 2 1 50
|
||||
mysql/innodb_index_stats 11 33 PRIMARY 3 3 4 50
|
||||
mysql/innodb_table_stats 9 33 PRIMARY 3 3 2 50
|
||||
mysql/plugin 5 33 PRIMARY 3 3 1 50
|
||||
mysql/servers 12 33 PRIMARY 3 3 1 50
|
||||
mysql/server_cost 7 33 PRIMARY 3 3 1 50
|
||||
mysql/slave_master_info 28 33 PRIMARY 3 3 1 50
|
||||
mysql/slave_relay_log_info 12 33 PRIMARY 3 3 1 50
|
||||
mysql/slave_worker_info 16 33 PRIMARY 3 3 2 50
|
||||
mysql/time_zone 5 33 PRIMARY 3 3 1 50
|
||||
mysql/time_zone_leap_second 5 33 PRIMARY 3 3 1 50
|
||||
mysql/time_zone_name 5 33 PRIMARY 3 3 1 50
|
||||
mysql/time_zone_transition 6 33 PRIMARY 3 3 2 50
|
||||
mysql/time_zone_transition_type 8 33 PRIMARY 3 3 2 50
|
||||
CREATE TABLE t1 (a INT KEY, b TEXT) ROW_FORMAT=REDUNDANT ENGINE=innodb;
|
||||
CREATE TABLE t2 (a INT KEY, b TEXT) ROW_FORMAT=COMPACT ENGINE=innodb;
|
||||
CREATE TABLE t3 (a INT KEY, b TEXT) ROW_FORMAT=COMPRESSED ENGINE=innodb;
|
||||
@@ -209,35 +189,35 @@ Level Code Message
|
||||
SELECT table_name, row_format, create_options
|
||||
FROM information_schema.tables WHERE table_name = 't1';
|
||||
table_name row_format create_options
|
||||
t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=16
|
||||
t1 Compressed row_format=COMPRESSED key_block_size=16
|
||||
ALTER TABLE t1 KEY_BLOCK_SIZE=8;
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
SELECT table_name, row_format, create_options
|
||||
FROM information_schema.tables WHERE table_name = 't1';
|
||||
table_name row_format create_options
|
||||
t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=8
|
||||
t1 Compressed row_format=COMPRESSED key_block_size=8
|
||||
ALTER TABLE t1 KEY_BLOCK_SIZE=4;
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
SELECT table_name, row_format, create_options
|
||||
FROM information_schema.tables WHERE table_name = 't1';
|
||||
table_name row_format create_options
|
||||
t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=4
|
||||
t1 Compressed row_format=COMPRESSED key_block_size=4
|
||||
ALTER TABLE t1 KEY_BLOCK_SIZE=2;
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
SELECT table_name, row_format, create_options
|
||||
FROM information_schema.tables WHERE table_name = 't1';
|
||||
table_name row_format create_options
|
||||
t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=2
|
||||
t1 Compressed row_format=COMPRESSED key_block_size=2
|
||||
ALTER TABLE t1 KEY_BLOCK_SIZE=1;
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
SELECT table_name, row_format, create_options
|
||||
FROM information_schema.tables WHERE table_name = 't1';
|
||||
table_name row_format create_options
|
||||
t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=1
|
||||
t1 Compressed row_format=COMPRESSED key_block_size=1
|
||||
ALTER TABLE t1 KEY_BLOCK_SIZE=0;
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
@@ -253,35 +233,35 @@ Level Code Message
|
||||
SELECT table_name, row_format, create_options
|
||||
FROM information_schema.tables WHERE table_name = 't1';
|
||||
table_name row_format create_options
|
||||
t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=16
|
||||
t1 Compressed row_format=COMPRESSED key_block_size=16
|
||||
ALTER TABLE t1 KEY_BLOCK_SIZE=8;
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
SELECT table_name, row_format, create_options
|
||||
FROM information_schema.tables WHERE table_name = 't1';
|
||||
table_name row_format create_options
|
||||
t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=8
|
||||
t1 Compressed row_format=COMPRESSED key_block_size=8
|
||||
ALTER TABLE t1 KEY_BLOCK_SIZE=4;
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
SELECT table_name, row_format, create_options
|
||||
FROM information_schema.tables WHERE table_name = 't1';
|
||||
table_name row_format create_options
|
||||
t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=4
|
||||
t1 Compressed row_format=COMPRESSED key_block_size=4
|
||||
ALTER TABLE t1 KEY_BLOCK_SIZE=2;
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
SELECT table_name, row_format, create_options
|
||||
FROM information_schema.tables WHERE table_name = 't1';
|
||||
table_name row_format create_options
|
||||
t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=2
|
||||
t1 Compressed row_format=COMPRESSED key_block_size=2
|
||||
ALTER TABLE t1 KEY_BLOCK_SIZE=1;
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
SELECT table_name, row_format, create_options
|
||||
FROM information_schema.tables WHERE table_name = 't1';
|
||||
table_name row_format create_options
|
||||
t1 Compressed row_format=COMPRESSED KEY_BLOCK_SIZE=1
|
||||
t1 Compressed row_format=COMPRESSED key_block_size=1
|
||||
ALTER TABLE t1 KEY_BLOCK_SIZE=0;
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
@@ -298,33 +278,37 @@ SHOW VARIABLES LIKE 'innodb_file_per_table';
|
||||
Variable_name Value
|
||||
innodb_file_per_table OFF
|
||||
CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8;
|
||||
ERROR HY000: Table storage engine for 't4' doesn't have this option
|
||||
Got one of the listed errors
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
|
||||
Error 1031 Table storage engine for 't4' doesn't have this option
|
||||
Error 1005 Can't create table `test`.`t4` (errno: 140 "Wrong create options")
|
||||
Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB
|
||||
CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16;
|
||||
ERROR HY000: Table storage engine for 't5' doesn't have this option
|
||||
Got one of the listed errors
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
|
||||
Error 1031 Table storage engine for 't5' doesn't have this option
|
||||
Error 1005 Can't create table `test`.`t5` (errno: 140 "Wrong create options")
|
||||
Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB
|
||||
SET GLOBAL innodb_file_per_table = ON;
|
||||
SET GLOBAL innodb_file_format = `Antelope`;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html
|
||||
CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8;
|
||||
ERROR HY000: Table storage engine for 't4' doesn't have this option
|
||||
Got one of the listed errors
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
|
||||
Error 1031 Table storage engine for 't4' doesn't have this option
|
||||
Error 1005 Can't create table `test`.`t4` (errno: 140 "Wrong create options")
|
||||
Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB
|
||||
CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16;
|
||||
ERROR HY000: Table storage engine for 't5' doesn't have this option
|
||||
Got one of the listed errors
|
||||
SHOW WARNINGS;
|
||||
Level Code Message
|
||||
Warning 1478 InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
|
||||
Error 1031 Table storage engine for 't5' doesn't have this option
|
||||
Error 1005 Can't create table `test`.`t5` (errno: 140 "Wrong create options")
|
||||
Warning 1030 Got error 140 "Wrong create options" from storage engine InnoDB
|
||||
SET GLOBAL innodb_file_format = `Barracuda`;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_file_format is deprecated and the parameter may be removed in future releases. See http://dev.mysql.com/doc/refman/5.7/en/innodb-file-format.html
|
||||
@@ -420,10 +404,8 @@ CHECK TABLE t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
EXPLAIN SELECT * FROM t1 WHERE b LIKE 'adfd%';
|
||||
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 NULL range b b 769 NULL 12 100.00 Using where
|
||||
Warnings:
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` AS `c`,`test`.`t1`.`d` AS `d` from `test`.`t1` where (`test`.`t1`.`b` like 'adfd%')
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 SIMPLE t1 ALL b NULL NULL NULL 15 Using where
|
||||
DROP TABLE t1;
|
||||
# Test 8) Test creating a table that could lead to undo log overflow.
|
||||
CREATE TABLE t1(a blob,b blob,c blob,d blob,e blob,f blob,g blob,
|
||||
@@ -489,27 +471,27 @@ CREATE INDEX t1st ON t1 (s(767), t(767));
|
||||
SHOW CREATE TABLE t1;
|
||||
Table Create Table
|
||||
t1 CREATE TABLE `t1` (
|
||||
`a` blob,
|
||||
`b` blob,
|
||||
`c` blob,
|
||||
`d` blob,
|
||||
`e` blob,
|
||||
`f` blob,
|
||||
`g` blob,
|
||||
`h` blob,
|
||||
`i` blob,
|
||||
`j` blob,
|
||||
`k` blob,
|
||||
`l` blob,
|
||||
`m` blob,
|
||||
`n` blob,
|
||||
`o` blob,
|
||||
`p` blob,
|
||||
`q` blob,
|
||||
`r` blob,
|
||||
`s` blob,
|
||||
`t` blob,
|
||||
`u` blob,
|
||||
`a` blob DEFAULT NULL,
|
||||
`b` blob DEFAULT NULL,
|
||||
`c` blob DEFAULT NULL,
|
||||
`d` blob DEFAULT NULL,
|
||||
`e` blob DEFAULT NULL,
|
||||
`f` blob DEFAULT NULL,
|
||||
`g` blob DEFAULT NULL,
|
||||
`h` blob DEFAULT NULL,
|
||||
`i` blob DEFAULT NULL,
|
||||
`j` blob DEFAULT NULL,
|
||||
`k` blob DEFAULT NULL,
|
||||
`l` blob DEFAULT NULL,
|
||||
`m` blob DEFAULT NULL,
|
||||
`n` blob DEFAULT NULL,
|
||||
`o` blob DEFAULT NULL,
|
||||
`p` blob DEFAULT NULL,
|
||||
`q` blob DEFAULT NULL,
|
||||
`r` blob DEFAULT NULL,
|
||||
`s` blob DEFAULT NULL,
|
||||
`t` blob DEFAULT NULL,
|
||||
`u` blob DEFAULT NULL,
|
||||
KEY `t1a` (`a`(767)),
|
||||
KEY `t1b` (`b`(767)),
|
||||
KEY `t1c` (`c`(767)),
|
||||
@@ -620,22 +602,22 @@ CREATE INDEX ndx_p ON t12963823 (p(500));
|
||||
SHOW CREATE TABLE t12963823;
|
||||
Table Create Table
|
||||
t12963823 CREATE TABLE `t12963823` (
|
||||
`a` blob,
|
||||
`b` blob,
|
||||
`c` blob,
|
||||
`d` blob,
|
||||
`e` blob,
|
||||
`f` blob,
|
||||
`g` blob,
|
||||
`h` blob,
|
||||
`i` blob,
|
||||
`j` blob,
|
||||
`k` blob,
|
||||
`l` blob,
|
||||
`m` blob,
|
||||
`n` blob,
|
||||
`o` blob,
|
||||
`p` blob,
|
||||
`a` blob DEFAULT NULL,
|
||||
`b` blob DEFAULT NULL,
|
||||
`c` blob DEFAULT NULL,
|
||||
`d` blob DEFAULT NULL,
|
||||
`e` blob DEFAULT NULL,
|
||||
`f` blob DEFAULT NULL,
|
||||
`g` blob DEFAULT NULL,
|
||||
`h` blob DEFAULT NULL,
|
||||
`i` blob DEFAULT NULL,
|
||||
`j` blob DEFAULT NULL,
|
||||
`k` blob DEFAULT NULL,
|
||||
`l` blob DEFAULT NULL,
|
||||
`m` blob DEFAULT NULL,
|
||||
`n` blob DEFAULT NULL,
|
||||
`o` blob DEFAULT NULL,
|
||||
`p` blob DEFAULT NULL,
|
||||
KEY `ndx_c` (`c`(500)),
|
||||
KEY `ndx_d` (`d`(500)),
|
||||
KEY `ndx_e` (`e`(500)),
|
||||
@@ -727,11 +709,9 @@ EXPLAIN
|
||||
SELECT COUNT(*) FROM
|
||||
(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
|
||||
WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
|
||||
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
|
||||
1 PRIMARY <derived2> NULL ALL NULL NULL NULL NULL 1537 100.00 NULL
|
||||
2 DERIVED t1 NULL index_merge PRIMARY,idx idx,PRIMARY 5,4 NULL 1537 100.00 Using sort_union(idx,PRIMARY); Using where
|
||||
Warnings:
|
||||
Note 1003 /* select#1 */ select count(0) AS `COUNT(*)` from (/* select#2 */ select `test`.`t1`.`pk` AS `pk`,`test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` FORCE INDEX (PRIMARY) FORCE INDEX (`idx`) where ((`test`.`t1`.`a` between 2 and 7) or (`test`.`t1`.`pk` = 1000000))) `t`
|
||||
id select_type table type possible_keys key key_len ref rows Extra
|
||||
1 PRIMARY <derived2> ALL NULL NULL NULL NULL 1537
|
||||
2 DERIVED t1 index_merge PRIMARY,idx idx,PRIMARY 5,4 NULL 1537 Using sort_union(idx,PRIMARY); Using where
|
||||
SELECT COUNT(*) FROM
|
||||
(SELECT * FROM t1 FORCE INDEX (idx,PRIMARY)
|
||||
WHERE a BETWEEN 2 AND 7 OR pk=1000000) AS t;
|
||||
|
||||
@@ -320,7 +320,6 @@ AND table_name='tab5' AND database_name='test'
|
||||
AND index_name like 'idx%' ;
|
||||
compress_stat 1
|
||||
The size of the tab5.ibd file: 5242880
|
||||
# restart
|
||||
# set the flag on (default off)
|
||||
SET GLOBAL innodb_cmp_per_index_enabled=ON;
|
||||
# set the flags
|
||||
@@ -666,7 +665,6 @@ AND table_name='tab5' AND database_name='test'
|
||||
AND index_name like 'idx%' ;
|
||||
compress_stat 1
|
||||
The size of the tab5.ibd file: 2097152
|
||||
# restart
|
||||
# set the flag on (default off)
|
||||
SET GLOBAL innodb_cmp_per_index_enabled=ON;
|
||||
# set the flags
|
||||
@@ -1964,7 +1962,6 @@ AND table_name='tab5' AND database_name='test'
|
||||
AND index_name like 'idx%' ;
|
||||
compress_stat 1
|
||||
The size of the tab5.ibd file: 65536
|
||||
# restart
|
||||
# set the flag on (default off)
|
||||
SET GLOBAL innodb_cmp_per_index_enabled=ON;
|
||||
# set the flags
|
||||
@@ -2312,7 +2309,6 @@ AND table_name='tab5' AND database_name='test'
|
||||
AND index_name like 'idx%' ;
|
||||
compress_stat 1
|
||||
The size of the tab5.ibd file: 65536
|
||||
# restart
|
||||
# set the flag on (default off)
|
||||
SET GLOBAL innodb_cmp_per_index_enabled=ON;
|
||||
# set the flags
|
||||
@@ -5113,7 +5109,6 @@ AND table_name='tab5' AND database_name='test'
|
||||
AND index_name like 'idx%' ;
|
||||
compress_stat 1
|
||||
The size of the tab5.ibd file: 65536
|
||||
# restart
|
||||
# set the flag on (default off)
|
||||
SET GLOBAL innodb_cmp_per_index_enabled=ON;
|
||||
# set the flags
|
||||
@@ -6734,7 +6729,6 @@ AND table_name='tab5' AND database_name='test'
|
||||
AND index_name like 'idx%' ;
|
||||
compress_stat 1
|
||||
The size of the tab5.ibd file: 65536
|
||||
# restart
|
||||
# set the flag on (default off)
|
||||
SET GLOBAL innodb_cmp_per_index_enabled=ON;
|
||||
# set the flags
|
||||
|
||||
@@ -1,418 +0,0 @@
|
||||
set global innodb_file_per_table = off;
|
||||
# files in MYSQL_DATA_DIR
|
||||
ibtmp1
|
||||
select @@global.innodb_file_per_table;
|
||||
@@global.innodb_file_per_table
|
||||
0
|
||||
create temporary table t1 (i int, f float, c char(100)) engine=innodb;
|
||||
insert into t1 values (100, 1.1, 'pune');
|
||||
insert into t1 values (99, 1.2, 'mumbai');
|
||||
insert into t1 values (98, 1.3, 'jaipur');
|
||||
insert into t1 values (97, 1.4, 'delhi');
|
||||
insert into t1 values (96, 1.5, 'ahmedabad');
|
||||
select * from t1;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
96 1.5 ahmedabad
|
||||
select * from t1 where i = 98;
|
||||
i f c
|
||||
98 1.3 jaipur
|
||||
select * from t1 where i < 100;
|
||||
i f c
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
96 1.5 ahmedabad
|
||||
explain select * from t1 where f > 1.29999;
|
||||
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 5 33.33 Using where
|
||||
Warnings:
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`f` > 1.29999)
|
||||
alter table t1 add index sec_index(f);
|
||||
explain select * from t1 where f > 1.29999;
|
||||
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 NULL ALL sec_index NULL NULL NULL 5 60.00 Using where
|
||||
Warnings:
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`f` > 1.29999)
|
||||
select * from t1 where f > 1.29999;
|
||||
i f c
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
96 1.5 ahmedabad
|
||||
explain select * from t1 where i = 100;
|
||||
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 5 20.00 Using where
|
||||
Warnings:
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`i` = 100)
|
||||
alter table t1 add unique index pri_index(i);
|
||||
explain select * from t1 where i = 100;
|
||||
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 NULL const pri_index pri_index 5 const 1 100.00 NULL
|
||||
Warnings:
|
||||
Note 1003 /* select#1 */ select '100' AS `i`,'1.1' AS `f`,'pune' AS `c` from `test`.`t1` where 1
|
||||
select * from t1 where i = 100;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
delete from t1 where i < 97;
|
||||
select * from t1;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
insert into t1 values (96, 1.5, 'kolkata');
|
||||
select * from t1;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
96 1.5 kolkata
|
||||
update t1 set f = 1.44 where c = 'delhi';
|
||||
select * from t1;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.44 delhi
|
||||
96 1.5 kolkata
|
||||
truncate table t1;
|
||||
insert into t1 values (100, 1.1, 'pune');
|
||||
insert into t1 values (99, 1.2, 'mumbai');
|
||||
insert into t1 values (98, 1.3, 'jaipur');
|
||||
insert into t1 values (97, 1.4, 'delhi');
|
||||
insert into t1 values (96, 1.5, 'ahmedabad');
|
||||
select * from t1;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
96 1.5 ahmedabad
|
||||
alter table t1 discard tablespace;
|
||||
ERROR HY000: Cannot DISCARD/IMPORT tablespace associated with temporary table
|
||||
alter table t1 import tablespace;
|
||||
ERROR HY000: Cannot DISCARD/IMPORT tablespace associated with temporary table
|
||||
drop table t1;
|
||||
#files in MYSQL_TMP_DIR
|
||||
set global innodb_file_per_table = 1;
|
||||
select @@global.innodb_file_per_table;
|
||||
@@global.innodb_file_per_table
|
||||
1
|
||||
create temporary table t1
|
||||
(i int, f float, c char(100)) engine = innodb key_block_size = 4;
|
||||
show create table t1;
|
||||
Table Create Table
|
||||
t1 CREATE TEMPORARY TABLE `t1` (
|
||||
`i` int(11) DEFAULT NULL,
|
||||
`f` float DEFAULT NULL,
|
||||
`c` char(100) DEFAULT NULL
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=4
|
||||
#files in MYSQL_TMP_DIR
|
||||
#sql<temporary>.ibd
|
||||
insert into t1 values (100, 1.1, 'pune');
|
||||
insert into t1 values (99, 1.2, 'mumbai');
|
||||
insert into t1 values (98, 1.3, 'jaipur');
|
||||
insert into t1 values (97, 1.4, 'delhi');
|
||||
insert into t1 values (96, 1.5, 'ahmedabad');
|
||||
select * from t1;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
96 1.5 ahmedabad
|
||||
select * from t1 where i = 98;
|
||||
i f c
|
||||
98 1.3 jaipur
|
||||
select * from t1 where i < 100;
|
||||
i f c
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
96 1.5 ahmedabad
|
||||
explain select * from t1 where f > 1.29999;
|
||||
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 5 33.33 Using where
|
||||
Warnings:
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`f` > 1.29999)
|
||||
alter table t1 add index sec_index(f);
|
||||
explain select * from t1 where f > 1.29999;
|
||||
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 NULL ALL sec_index NULL NULL NULL 5 60.00 Using where
|
||||
Warnings:
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`f` > 1.29999)
|
||||
select * from t1 where f > 1.29999;
|
||||
i f c
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
96 1.5 ahmedabad
|
||||
explain select * from t1 where i = 100;
|
||||
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 NULL ALL NULL NULL NULL NULL 5 20.00 Using where
|
||||
Warnings:
|
||||
Note 1003 /* select#1 */ select `test`.`t1`.`i` AS `i`,`test`.`t1`.`f` AS `f`,`test`.`t1`.`c` AS `c` from `test`.`t1` where (`test`.`t1`.`i` = 100)
|
||||
alter table t1 add unique index pri_index(i);
|
||||
explain select * from t1 where i = 100;
|
||||
id select_type table partitions type possible_keys key key_len ref rows filtered Extra
|
||||
1 SIMPLE t1 NULL const pri_index pri_index 5 const 1 100.00 NULL
|
||||
Warnings:
|
||||
Note 1003 /* select#1 */ select '100' AS `i`,'1.1' AS `f`,'pune' AS `c` from `test`.`t1` where 1
|
||||
select * from t1 where i = 100;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
delete from t1 where i < 97;
|
||||
select * from t1;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
insert into t1 values (96, 1.5, 'kolkata');
|
||||
select * from t1;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
96 1.5 kolkata
|
||||
update t1 set f = 1.44 where c = 'delhi';
|
||||
select * from t1;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.44 delhi
|
||||
96 1.5 kolkata
|
||||
truncate table t1;
|
||||
insert into t1 values (100, 1.1, 'pune');
|
||||
insert into t1 values (99, 1.2, 'mumbai');
|
||||
insert into t1 values (98, 1.3, 'jaipur');
|
||||
insert into t1 values (97, 1.4, 'delhi');
|
||||
insert into t1 values (96, 1.5, 'ahmedabad');
|
||||
select * from t1;
|
||||
i f c
|
||||
100 1.1 pune
|
||||
99 1.2 mumbai
|
||||
98 1.3 jaipur
|
||||
97 1.4 delhi
|
||||
96 1.5 ahmedabad
|
||||
alter table t1 discard tablespace;
|
||||
ERROR HY000: Cannot DISCARD/IMPORT tablespace associated with temporary table
|
||||
drop table t1;
|
||||
set global innodb_file_per_table = off;
|
||||
create temporary table t1
|
||||
(keyc int, c1 char(100), c2 char(100),
|
||||
primary key(keyc)) engine = innodb;
|
||||
CREATE PROCEDURE populate_t1()
|
||||
BEGIN
|
||||
DECLARE i INT DEFAULT 1;
|
||||
while (i <= 20000) DO
|
||||
insert into t1 values (i, 'a', 'b');
|
||||
SET i = i + 1;
|
||||
END WHILE;
|
||||
END|
|
||||
set autocommit=0;
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
0
|
||||
call populate_t1();
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
20000
|
||||
select * from t1 limit 10;
|
||||
keyc c1 c2
|
||||
1 a b
|
||||
2 a b
|
||||
3 a b
|
||||
4 a b
|
||||
5 a b
|
||||
6 a b
|
||||
7 a b
|
||||
8 a b
|
||||
9 a b
|
||||
10 a b
|
||||
set autocommit=1;
|
||||
truncate table t1;
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
0
|
||||
drop procedure populate_t1;
|
||||
drop table t1;
|
||||
create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb;
|
||||
insert into t1 values (1, 'c', 'b');
|
||||
select * from t1;
|
||||
keyc c1 c2
|
||||
1 c b
|
||||
# restart
|
||||
# files in MYSQL_DATA_DIR
|
||||
ibtmp1
|
||||
use test;
|
||||
select * from t1;
|
||||
ERROR 42S02: Table 'test.t1' doesn't exist
|
||||
"testing temp-table creation in --innodb_read_only mode"
|
||||
# restart: --innodb-read-only
|
||||
use test;
|
||||
show tables;
|
||||
Tables_in_test
|
||||
create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb;
|
||||
ERROR HY000: InnoDB is in read only mode.
|
||||
"testing system and temp tablespace name conflict"
|
||||
"restarting server in normal mode"
|
||||
# restart
|
||||
show tables;
|
||||
Tables_in_test
|
||||
create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb;
|
||||
drop table t1;
|
||||
# test condition of full-temp-tablespace
|
||||
# restart: --innodb_temp_data_file_path=ibtmp1:12M
|
||||
create temporary table t1
|
||||
(keyc int, c1 char(100), c2 char(100),
|
||||
primary key(keyc)) engine = innodb;
|
||||
CREATE PROCEDURE populate_t1()
|
||||
BEGIN
|
||||
DECLARE i INT DEFAULT 1;
|
||||
while (i <= 20000) DO
|
||||
insert into t1 values (i, 'a', 'b');
|
||||
SET i = i + 1;
|
||||
END WHILE;
|
||||
END|
|
||||
set autocommit=0;
|
||||
select count(*) from t1;
|
||||
count(*)
|
||||
0
|
||||
call populate_t1();
|
||||
ERROR HY000: The table 't1' is full
|
||||
drop procedure populate_t1;
|
||||
drop table t1;
|
||||
set innodb_strict_mode = off;
|
||||
set global innodb_file_per_table = 0;
|
||||
set global innodb_file_format = 'Antelope';
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = compressed;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
Warning NUMBER InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
|
||||
Warning NUMBER InnoDB: assuming ROW_FORMAT=DYNAMIC.
|
||||
drop table t;
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = compressed key_block_size = 8;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
Warning NUMBER InnoDB: KEY_BLOCK_SIZE requires innodb_file_per_table.
|
||||
Warning NUMBER InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
|
||||
Warning NUMBER InnoDB: ignoring KEY_BLOCK_SIZE=NUMBER.
|
||||
Warning NUMBER InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_per_table.
|
||||
Warning NUMBER InnoDB: assuming ROW_FORMAT=DYNAMIC.
|
||||
drop table t;
|
||||
set global innodb_file_per_table = 1;
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = compressed key_block_size = 8;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
Warning NUMBER InnoDB: KEY_BLOCK_SIZE requires innodb_file_format > Antelope.
|
||||
Warning NUMBER InnoDB: ignoring KEY_BLOCK_SIZE=NUMBER.
|
||||
Warning NUMBER InnoDB: ROW_FORMAT=COMPRESSED requires innodb_file_format > Antelope.
|
||||
Warning NUMBER InnoDB: assuming ROW_FORMAT=DYNAMIC.
|
||||
drop table t;
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = dynamic;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
#files in MYSQL_TMP_DIR
|
||||
drop table t;
|
||||
set innodb_strict_mode = on;
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = dynamic;
|
||||
drop table t;
|
||||
set global innodb_file_format = 'Barracuda';
|
||||
set innodb_strict_mode = off;
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = compressed key_block_size = 8;
|
||||
set innodb_strict_mode = default;
|
||||
#files in MYSQL_TMP_DIR
|
||||
#sql<temporary>.ibd
|
||||
drop table t;
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = compressed;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
#files in MYSQL_TMP_DIR
|
||||
#sql<temporary>.ibd
|
||||
drop table t;
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = dynamic;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
#files in MYSQL_TMP_DIR
|
||||
drop table t;
|
||||
set innodb_strict_mode = on;
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = dynamic;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
drop table t;
|
||||
set innodb_strict_mode = off;
|
||||
#files in MYSQL_TMP_DIR
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = dynamic key_block_size = 4;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
Warning NUMBER InnoDB: ignoring KEY_BLOCK_SIZE=NUMBER unless ROW_FORMAT=COMPRESSED.
|
||||
#files in MYSQL_TMP_DIR
|
||||
#sql<temporary>.ibd
|
||||
drop table t;
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb row_format = compact;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
#files in MYSQL_TMP_DIR
|
||||
drop table t;
|
||||
create temporary table t (
|
||||
i int)
|
||||
engine = innodb key_block_size = 4;
|
||||
show warnings;
|
||||
Level Code Message
|
||||
#files in MYSQL_TMP_DIR
|
||||
#sql<temporary>.ibd
|
||||
drop table t;
|
||||
"testing temp tablespace non-support for raw device"
|
||||
"testing temp tablespace non-support for raw device"
|
||||
# restart
|
||||
show tables;
|
||||
Tables_in_test
|
||||
create temporary table t1 (
|
||||
keyc int, c1 char(100), c2 char(100)
|
||||
) engine = innodb;
|
||||
drop table t1;
|
||||
"try starting server with temp-tablespace size < min. threshold"
|
||||
"try starting server with sys-tablespace size < min. threshold"
|
||||
# restart
|
||||
show tables;
|
||||
Tables_in_test
|
||||
create temporary table t1 (
|
||||
keyc int, c1 char(100), c2 char(100)
|
||||
) engine = innodb;
|
||||
drop table t1;
|
||||
"try starting server with no file specified for temp-tablespace"
|
||||
# restart
|
||||
show tables;
|
||||
Tables_in_test
|
||||
create temporary table t1 (
|
||||
keyc int, c1 char(100), c2 char(100)
|
||||
) engine = innodb;
|
||||
drop table t1;
|
||||
@@ -270,9 +270,14 @@ rollback;
|
||||
set n = n - 1;
|
||||
end while;
|
||||
end|
|
||||
connect con1,localhost,root,,;
|
||||
connect con2,localhost,root,,;
|
||||
#---client 1 : dml operation ---"
|
||||
connection con1;
|
||||
#---client 2 : dml operation ---"
|
||||
connection con2;
|
||||
# In connection 1
|
||||
connection con1;
|
||||
SELECT count(*) FROM t1_1;
|
||||
count(*)
|
||||
36
|
||||
@@ -550,6 +555,7 @@ c1
|
||||
138
|
||||
140
|
||||
# In connection 2
|
||||
connection con2;
|
||||
SELECT count(*) FROM t1_2;
|
||||
count(*)
|
||||
36
|
||||
@@ -827,6 +833,7 @@ c1
|
||||
138
|
||||
140
|
||||
# In connection 1
|
||||
connection con1;
|
||||
set AUTOCOMMIT = 0;
|
||||
ALTER TABLE t1_temp DROP PRIMARY KEY;
|
||||
ALTER TABLE t1_temp ADD PRIMARY KEY (c1);
|
||||
@@ -898,17 +905,9 @@ SELECT c1,c2,c3,c4,c5,c6,c7,c9 FROM t4_temp WHERE c1 = 20;
|
||||
c1 c2 c3 c4 c5 c6 c7 c9
|
||||
20 1 a a a a a 100.550
|
||||
update ignore t1_temp set c1 = 20 WHERE c1 = 140 ;
|
||||
Warnings:
|
||||
Warning 1062 Duplicate entry '20' for key 'PRIMARY'
|
||||
update ignore t2_temp set c1 = 20 WHERE c1 = 140 ;
|
||||
Warnings:
|
||||
Warning 1062 Duplicate entry '20' for key 'PRIMARY'
|
||||
update ignore t3_temp set c1 = 20 WHERE c1 = 140 ;
|
||||
Warnings:
|
||||
Warning 1062 Duplicate entry '20' for key 'PRIMARY'
|
||||
update ignore t4_temp set c1 = 20 WHERE c1 = 140 ;
|
||||
Warnings:
|
||||
Warning 1062 Duplicate entry '20' for key 'PRIMARY'
|
||||
SELECT count(*) FROM t1_temp WHERE c1 = 140;
|
||||
count(*)
|
||||
1
|
||||
@@ -938,11 +937,19 @@ SELECT c1,c2,c3,c4,c5,c6,c7,c9,c10,c11 FROM t1_temp WHERE c1 < 0;
|
||||
c1 c2 c3 c4 c5 c6 c7 c9 c10 c11
|
||||
-2 -2 a a a a a 100.550 99 test
|
||||
DROP TABLE t1_1 ,t2_1 ,t3_1,t4_1;
|
||||
disconnect con1;
|
||||
connection con2;
|
||||
DROP TABLE t1_2 ,t2_2 ,t3_2,t4_2;
|
||||
# restart: --innodb_undo_tablespaces=0 --innodb_rollback_segments=20 --innodb_undo_logs=20 --innodb_log_files_in_group=4
|
||||
disconnect con2;
|
||||
connection default;
|
||||
connect con1,localhost,root,,;
|
||||
connect con2,localhost,root,,;
|
||||
connection con1;
|
||||
call populate_tables('_1');;
|
||||
connection con2;
|
||||
call populate_tables('_2');;
|
||||
"#connection 1 - verify tables"
|
||||
connection con1;
|
||||
SELECT count(*) FROM t1_1;
|
||||
count(*)
|
||||
36
|
||||
@@ -1220,7 +1227,9 @@ c1
|
||||
138
|
||||
140
|
||||
DROP TABLE t1_1 ,t2_1 ,t3_1,t4_1;
|
||||
disconnect con1;
|
||||
"#connection 2 - verify tables"
|
||||
connection con2;
|
||||
SELECT count(*) FROM t1_2;
|
||||
count(*)
|
||||
36
|
||||
@@ -1498,10 +1507,16 @@ c1
|
||||
138
|
||||
140
|
||||
DROP TABLE t1_2 ,t2_2 ,t3_2,t4_2;
|
||||
# restart: --innodb_undo_tablespaces=0 --innodb_rollback_segments=30 --innodb_undo_logs=20 --innodb_log_files_in_group=4
|
||||
disconnect con2;
|
||||
connection default;
|
||||
connect con1,localhost,root,,;
|
||||
connect con2,localhost,root,,;
|
||||
connection con1;
|
||||
call populate_tables('_1');;
|
||||
connection con2;
|
||||
call populate_tables('_2');;
|
||||
"#connection 1 - verify tables"
|
||||
connection con1;
|
||||
SELECT count(*) FROM t1_1;
|
||||
count(*)
|
||||
36
|
||||
@@ -1779,7 +1794,9 @@ c1
|
||||
138
|
||||
140
|
||||
DROP TABLE t1_1 ,t2_1 ,t3_1,t4_1;
|
||||
disconnect con1;
|
||||
"#connection 2 - verify tables"
|
||||
connection con2;
|
||||
SELECT count(*) FROM t1_2;
|
||||
count(*)
|
||||
36
|
||||
@@ -2057,4 +2074,6 @@ c1
|
||||
138
|
||||
140
|
||||
DROP TABLE t1_2 ,t2_2 ,t3_2,t4_2;
|
||||
disconnect con2;
|
||||
connection default;
|
||||
DROP PROCEDURE populate_tables;
|
||||
|
||||
3
mysql-test/suite/innodb_zip/t/16k-master.opt
Normal file
3
mysql-test/suite/innodb_zip/t/16k-master.opt
Normal file
@@ -0,0 +1,3 @@
|
||||
--loose-innodb-sys-indexes
|
||||
--loose-innodb-sys-tablespaces
|
||||
--loose-innodb-sys-datafiles
|
||||
@@ -288,18 +288,18 @@ DROP TABLE t1;
|
||||
SET SESSION innodb_strict_mode = ON;
|
||||
SET GLOBAL innodb_file_per_table = OFF;
|
||||
SHOW VARIABLES LIKE 'innodb_file_per_table';
|
||||
--error ER_ILLEGAL_HA
|
||||
--error ER_ILLEGAL_HA,1005
|
||||
CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8;
|
||||
SHOW WARNINGS;
|
||||
--error ER_ILLEGAL_HA
|
||||
--error ER_ILLEGAL_HA,1005
|
||||
CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16;
|
||||
SHOW WARNINGS;
|
||||
SET GLOBAL innodb_file_per_table = ON;
|
||||
SET GLOBAL innodb_file_format = `Antelope`;
|
||||
--error ER_ILLEGAL_HA
|
||||
--error ER_ILLEGAL_HA,1005
|
||||
CREATE TABLE t4 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=8;
|
||||
SHOW WARNINGS;
|
||||
--error ER_ILLEGAL_HA
|
||||
--error ER_ILLEGAL_HA,1005
|
||||
CREATE TABLE t5 (id int PRIMARY KEY) ENGINE=innodb KEY_BLOCK_SIZE=16;
|
||||
SHOW WARNINGS;
|
||||
SET GLOBAL innodb_file_format = `Barracuda`;
|
||||
|
||||
@@ -14,4 +14,4 @@ restart : Not supported by MariaDB 10.2 2/9/2016 jplindst
|
||||
innochecksum : MDEV-10727 2/9/2016 jplindst
|
||||
innochecksum_2 : MDEV-10727 2/9/2016 jplindst
|
||||
innochecksum_3 : MDEV-10727 2/9/2016 jplindst
|
||||
|
||||
wl6560 : MDEV_10727
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
--echo #
|
||||
|
||||
--source include/have_innodb.inc
|
||||
--source include/have_nodebug.inc
|
||||
--source include/not_debug.inc
|
||||
--source include/big_test.inc
|
||||
|
||||
--disable_query_log
|
||||
|
||||
@@ -164,12 +164,12 @@ select * from t1;
|
||||
#
|
||||
#
|
||||
--echo "testing temp-table creation in --innodb_read_only mode"
|
||||
let $restart_parameters = restart: --innodb-read-only;
|
||||
let $restart_parameters=--innodb-read-only;
|
||||
--source include/restart_mysqld.inc
|
||||
#
|
||||
use test;
|
||||
show tables;
|
||||
--error ER_INNODB_READ_ONLY
|
||||
--error ER_INNODB_READ_ONLY, 1005
|
||||
create temporary table t1 (keyc int, c1 char(100), c2 char(100)) engine = innodb;
|
||||
|
||||
#-----------------------------------------------------------------------------
|
||||
@@ -198,7 +198,7 @@ drop table t1;
|
||||
# and insert enough data to make it full.
|
||||
#
|
||||
--echo # test condition of full-temp-tablespace
|
||||
let $restart_parameters = restart: --innodb_temp_data_file_path=ibtmp1:12M;
|
||||
let $restart_parameters=--innodb_temp_data_file_path=ibtmp1:12M;
|
||||
--source include/restart_mysqld.inc
|
||||
#
|
||||
create temporary table t1
|
||||
@@ -366,7 +366,6 @@ let SEARCH_PATTERN = support raw device;
|
||||
--source include/search_pattern_in_file.inc
|
||||
--remove_file $SEARCH_FILE
|
||||
|
||||
let $restart_parameters = restart;
|
||||
--source include/start_mysqld.inc
|
||||
|
||||
show tables;
|
||||
|
||||
@@ -517,7 +517,7 @@ connection default;
|
||||
#
|
||||
## trying with VALUES innodb_undo_tablespaces, innodb_undo_logs ,innodb_log_files_in_group
|
||||
##
|
||||
let $restart_parameters = restart: --innodb_undo_tablespaces=0 --innodb_rollback_segments=20 --innodb_undo_logs=20 --innodb_log_files_in_group=4;
|
||||
let $restart_parameters=--innodb_undo_tablespaces=0 --innodb_rollback_segments=20 --innodb_undo_logs=20 --innodb_log_files_in_group=4;
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
# Create two client for concurrent execution
|
||||
@@ -574,7 +574,7 @@ disconnect con2;
|
||||
|
||||
connection default;
|
||||
# innodb_undo_logs > non redo rsegment
|
||||
let $restart_parameters = restart: --innodb_undo_tablespaces=0 --innodb_rollback_segments=30 --innodb_undo_logs=20 --innodb_log_files_in_group=4;
|
||||
let $restart_parameters=--innodb_undo_tablespaces=0 --innodb_rollback_segments=30 --innodb_undo_logs=20 --innodb_log_files_in_group=4;
|
||||
--source include/restart_mysqld.inc
|
||||
|
||||
connect (con1,localhost,root,,);
|
||||
|
||||
@@ -8,17 +8,27 @@ SELECT @global_start_value;
|
||||
1
|
||||
'#--------------------FN_DYNVARS_046_01------------------------#'
|
||||
SET @@session.innodb_support_xa = 0;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SET @@session.innodb_support_xa = DEFAULT;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
1
|
||||
SET @@global.innodb_support_xa = 0;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SET @@global.innodb_support_xa = DEFAULT;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
1
|
||||
'#---------------------FN_DYNVARS_046_02-------------------------#'
|
||||
SET innodb_support_xa = 1;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@innodb_support_xa;
|
||||
@@innodb_support_xa
|
||||
1
|
||||
@@ -29,27 +39,39 @@ ERROR 42S02: Unknown table 'local' in field list
|
||||
SELECT global.innodb_support_xa;
|
||||
ERROR 42S02: Unknown table 'global' in field list
|
||||
SET session innodb_support_xa = 0;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SET global innodb_support_xa = 0;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
0
|
||||
1
|
||||
'#--------------------FN_DYNVARS_046_03------------------------#'
|
||||
SET @@session.innodb_support_xa = 0;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SET @@session.innodb_support_xa = 1;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
1
|
||||
SET @@global.innodb_support_xa = 0;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SET @@global.innodb_support_xa = 1;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
1
|
||||
@@ -67,9 +89,11 @@ ERROR 42000: Variable 'innodb_support_xa' can't be set to the value of 'TR
|
||||
SET @@session.innodb_support_xa = <20>N;
|
||||
ERROR 42000: Variable 'innodb_support_xa' can't be set to the value of '<27>N'
|
||||
SET @@session.innodb_support_xa = OF;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SET @@session.innodb_support_xa = <20>FF;
|
||||
ERROR 42000: Variable 'innodb_support_xa' can't be set to the value of '<27>FF'
|
||||
SET @@global.innodb_support_xa = -1;
|
||||
@@ -88,18 +112,26 @@ ERROR 42000: Variable 'innodb_support_xa' can't be set to the value of 'TR
|
||||
SET @@global.innodb_support_xa = <20>N;
|
||||
ERROR 42000: Variable 'innodb_support_xa' can't be set to the value of '<27>N'
|
||||
SET @@global.innodb_support_xa = OF;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SET @@global.innodb_support_xa = <20>FF;
|
||||
ERROR 42000: Variable 'innodb_support_xa' can't be set to the value of '<27>FF'
|
||||
'#-------------------FN_DYNVARS_046_05----------------------------#'
|
||||
SET @@global.innodb_support_xa = 0;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SET @@session.innodb_support_xa = 1;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@global.innodb_support_xa AS res_is_0;
|
||||
res_is_0
|
||||
0
|
||||
1
|
||||
SET @@global.innodb_support_xa = 0;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@session.innodb_support_xa AS res_is_1;
|
||||
res_is_1
|
||||
1
|
||||
@@ -112,11 +144,11 @@ VARIABLE_VALUE
|
||||
1
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES
|
||||
WHERE VARIABLE_NAME='innodb_support_xa';
|
||||
VARIABLE_VALUE
|
||||
OFF
|
||||
ON
|
||||
'#----------------------FN_DYNVARS_046_07------------------------#'
|
||||
SELECT IF(@@session.innodb_support_xa, "ON", "OFF") =
|
||||
VARIABLE_VALUE FROM INFORMATION_SCHEMA.SESSION_VARIABLES
|
||||
@@ -133,43 +165,63 @@ VARIABLE_VALUE
|
||||
ON
|
||||
'#---------------------FN_DYNVARS_046_08-------------------------#'
|
||||
SET @@session.innodb_support_xa = OFF;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SET @@session.innodb_support_xa = ON;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
1
|
||||
SET @@global.innodb_support_xa = OFF;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SET @@global.innodb_support_xa = ON;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
1
|
||||
'#---------------------FN_DYNVARS_046_09----------------------#'
|
||||
SET @@session.innodb_support_xa = TRUE;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
1
|
||||
SET @@session.innodb_support_xa = FALSE;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SET @@global.innodb_support_xa = TRUE;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
1
|
||||
SET @@global.innodb_support_xa = FALSE;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SET @@session.innodb_support_xa = @session_start_value;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
1
|
||||
SET @@global.innodb_support_xa = @global_start_value;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
1
|
||||
|
||||
@@ -1,21 +1,27 @@
|
||||
'#--------------------FN_DYNVARS_046_01-------------------------#'
|
||||
SET @@global.innodb_support_xa = OFF;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
connect con1,localhost,root,,,,;
|
||||
connection con1;
|
||||
SELECT @@global.innodb_support_xa;
|
||||
@@global.innodb_support_xa
|
||||
0
|
||||
1
|
||||
SELECT @@session.innodb_support_xa;
|
||||
@@session.innodb_support_xa
|
||||
0
|
||||
1
|
||||
disconnect con1;
|
||||
'#--------------------FN_DYNVARS_046_01-------------------------#'
|
||||
connection default;
|
||||
SET @@global.innodb_support_xa = 1;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
drop table if exists t1, t2;
|
||||
create table t1 (a int) engine=innodb;
|
||||
'---check when innodb_support_xa is 1---'
|
||||
SET @@innodb_support_xa = 1;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
xa start 'test1';
|
||||
INSERT t1 values (10);
|
||||
xa end 'test1';
|
||||
@@ -25,6 +31,8 @@ SELECT * from t1;
|
||||
a
|
||||
'---check when innodb_support_xa is 0---'
|
||||
SET @@innodb_support_xa = 0;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases. Only innodb_support_xa=ON is allowed.
|
||||
xa start 'test1';
|
||||
INSERT t1 values (10);
|
||||
xa end 'test1';
|
||||
@@ -34,7 +42,11 @@ SELECT * from t1;
|
||||
a
|
||||
'------general xa testing--------'
|
||||
SET @@global.innodb_support_xa = 1;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
SET @@innodb_support_xa = 1;
|
||||
Warnings:
|
||||
Warning 131 Using innodb_support_xa is deprecated and the parameter may be removed in future releases.
|
||||
xa start 'testa','testb';
|
||||
INSERT t1 values (30);
|
||||
COMMIT;
|
||||
|
||||
@@ -3,6 +3,8 @@
|
||||
|
||||
let $MYSQLD_DATADIR= `SELECT @@datadir`;
|
||||
|
||||
call mtr.add_suppression("InnoDB: Table .* does not exist in the InnoDB internal data dictionary .*");
|
||||
|
||||
--echo #
|
||||
--echo # Bug#11766879/Bug#60106: DIFF BETWEEN # OF INDEXES IN MYSQL VS INNODB,
|
||||
--echo # PARTITONING, ON INDEX CREATE
|
||||
|
||||
1
mysql-test/t/row-checksum.opt
Normal file
1
mysql-test/t/row-checksum.opt
Normal file
@@ -0,0 +1 @@
|
||||
--loose-innodb-strict-mode=off
|
||||
@@ -359,7 +359,7 @@ int ha_init_errors(void)
|
||||
SETMSG(HA_ERR_NO_CONNECTION, "Could not connect to storage engine");
|
||||
SETMSG(HA_ERR_TABLE_DEF_CHANGED, ER_DEFAULT(ER_TABLE_DEF_CHANGED));
|
||||
SETMSG(HA_ERR_FOREIGN_DUPLICATE_KEY, "FK constraint would lead to duplicate key");
|
||||
SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, "Table upgrade required. Please do \"REPAIR TABLE %`\" or dump/reload to fix it");
|
||||
SETMSG(HA_ERR_TABLE_NEEDS_UPGRADE, ER_DEFAULT(ER_TABLE_NEEDS_UPGRADE));
|
||||
SETMSG(HA_ERR_TABLE_READONLY, ER_DEFAULT(ER_OPEN_AS_READONLY));
|
||||
SETMSG(HA_ERR_AUTOINC_READ_FAILED, ER_DEFAULT(ER_AUTOINC_READ_FAILED));
|
||||
SETMSG(HA_ERR_AUTOINC_ERANGE, ER_DEFAULT(ER_WARN_DATA_OUT_OF_RANGE));
|
||||
@@ -370,6 +370,17 @@ int ha_init_errors(void)
|
||||
SETMSG(HA_ERR_TABLE_IN_FK_CHECK, ER_DEFAULT(ER_TABLE_IN_FK_CHECK));
|
||||
SETMSG(HA_ERR_DISK_FULL, ER_DEFAULT(ER_DISK_FULL));
|
||||
SETMSG(HA_ERR_FTS_TOO_MANY_WORDS_IN_PHRASE, "Too many words in a FTS phrase or proximity search");
|
||||
SETMSG(HA_ERR_FK_DEPTH_EXCEEDED, ER_DEFAULT(ER_FK_DEPTH_EXCEEDED));
|
||||
SETMSG(HA_MISSING_CREATE_OPTION, ER_DEFAULT(ER_MISSING_HA_CREATE_OPTION));
|
||||
SETMSG(HA_ERR_SE_OUT_OF_MEMORY, ER_DEFAULT(ER_ENGINE_OUT_OF_MEMORY));
|
||||
SETMSG(HA_ERR_TABLE_CORRUPT, ER_DEFAULT(ER_TABLE_CORRUPT));
|
||||
SETMSG(HA_ERR_QUERY_INTERRUPTED, ER_DEFAULT(ER_QUERY_INTERRUPTED));
|
||||
SETMSG(HA_ERR_TABLESPACE_MISSING, ER_DEFAULT(ER_TABLESPACE_MISSING));
|
||||
SETMSG(HA_ERR_TABLESPACE_IS_NOT_EMPTY,ER_DEFAULT(ER_TABLESPACE_IS_NOT_EMPTY));
|
||||
SETMSG(HA_ERR_WRONG_FILE_NAME, ER_DEFAULT(ER_WRONG_FILE_NAME));
|
||||
SETMSG(HA_ERR_NOT_ALLOWED_COMMAND, ER_DEFAULT(ER_NOT_ALLOWED_COMMAND));
|
||||
SETMSG(HA_ERR_COMPUTE_FAILED, "Compute virtual column value failed");
|
||||
SETMSG(HA_ERR_INNODB_READ_ONLY, ER_DEFAULT(ER_INNODB_READ_ONLY));
|
||||
|
||||
/* Register the error messages for use with my_error(). */
|
||||
return my_error_register(get_handler_errmsgs, HA_ERR_FIRST, HA_ERR_LAST);
|
||||
@@ -3525,9 +3536,10 @@ void handler::print_error(int error, myf errflag)
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
case HA_ERR_TABLE_NEEDS_UPGRADE:
|
||||
textno= ER_TABLE_NEEDS_UPGRADE;
|
||||
my_error(ER_TABLE_NEEDS_UPGRADE, errflag,
|
||||
"TABLE", table_share->table_name.str);
|
||||
break;
|
||||
DBUG_VOID_RETURN;
|
||||
case HA_ERR_NO_PARTITION_FOUND:
|
||||
textno=ER_WRONG_PARTITION_NAME;
|
||||
break;
|
||||
|
||||
@@ -7214,3 +7214,21 @@ ER_CALCULATING_DEFAULT_VALUE
|
||||
eng "Got an error when calculating default value for %`s"
|
||||
ER_EXPRESSION_REFERS_TO_UNINIT_FIELD 01000
|
||||
eng "Expression for field %`-.64s is refering to uninitialized field %`s"
|
||||
ER_BUFPOOL_RESIZE_INPROGRESS
|
||||
eng "Another buffer pool resize is already in progress."
|
||||
ER_CANNOT_DISCARD_TEMPORARY_TABLE
|
||||
eng "Cannot DISCARD/IMPORT tablespace associated with temporary table"
|
||||
ER_FK_DEPTH_EXCEEDED
|
||||
eng "Foreign key cascade delete/update exceeds max depth of %d."
|
||||
ER_INNODB_FORCED_RECOVERY
|
||||
eng "Operation not allowed when innodb_forced_recovery > 0."
|
||||
ER_TABLE_REFERENCED
|
||||
eng "Cannot complete the operation because table is referenced by another connection."
|
||||
ER_TABLESPACE_IS_NOT_EMPTY
|
||||
eng "Tablespace `%-.192s` is not empty."
|
||||
ER_MISSING_HA_CREATE_OPTION
|
||||
eng "Table storage engine '%-.64s' found required create option missing"
|
||||
ER_ENGINE_OUT_OF_MEMORY
|
||||
eng "Out of memory in storage engine '%-.64s'."
|
||||
ER_WRONG_FILE_NAME
|
||||
eng "Incorrect File Name '%s'."
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
# Copyright (c) 2006, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2006, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2014, 2016, MariaDB Corporation
|
||||
#
|
||||
# This program is free software; you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@@ -30,351 +31,7 @@ MYSQL_CHECK_LZMA()
|
||||
MYSQL_CHECK_BZIP2()
|
||||
MYSQL_CHECK_SNAPPY()
|
||||
|
||||
IF(CMAKE_CROSSCOMPILING)
|
||||
# Use CHECK_C_SOURCE_COMPILES instead of CHECK_C_SOURCE_RUNS when
|
||||
# cross-compiling. Not as precise, but usually good enough.
|
||||
# This only make sense for atomic tests in this file, this trick doesn't
|
||||
# work in a general case.
|
||||
MACRO(CHECK_C_SOURCE SOURCE VAR)
|
||||
CHECK_C_SOURCE_COMPILES("${SOURCE}" "${VAR}")
|
||||
ENDMACRO()
|
||||
ELSE()
|
||||
MACRO(CHECK_C_SOURCE SOURCE VAR)
|
||||
CHECK_C_SOURCE_RUNS("${SOURCE}" "${VAR}")
|
||||
ENDMACRO()
|
||||
ENDIF()
|
||||
|
||||
# OS tests
|
||||
IF(UNIX)
|
||||
IF(CMAKE_SYSTEM_NAME STREQUAL "Linux")
|
||||
CHECK_INCLUDE_FILES (libaio.h HAVE_LIBAIO_H)
|
||||
CHECK_LIBRARY_EXISTS(aio io_queue_init "" HAVE_LIBAIO)
|
||||
ADD_DEFINITIONS("-DUNIV_LINUX -D_GNU_SOURCE=1")
|
||||
IF(HAVE_LIBAIO_H AND HAVE_LIBAIO)
|
||||
ADD_DEFINITIONS(-DLINUX_NATIVE_AIO=1)
|
||||
LINK_LIBRARIES(aio)
|
||||
ENDIF()
|
||||
IF(HAVE_LIBNUMA)
|
||||
LINK_LIBRARIES(numa)
|
||||
ENDIF()
|
||||
ELSEIF(CMAKE_SYSTEM_NAME MATCHES "HP*")
|
||||
ADD_DEFINITIONS("-DUNIV_HPUX")
|
||||
ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "AIX")
|
||||
ADD_DEFINITIONS("-DUNIV_AIX")
|
||||
ELSEIF(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
|
||||
ADD_DEFINITIONS("-DUNIV_SOLARIS")
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
|
||||
# After: WL#5825 Using C++ Standard Library with MySQL code
|
||||
# we no longer use -fno-exceptions
|
||||
# SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
|
||||
ENDIF()
|
||||
|
||||
# Enable InnoDB's UNIV_DEBUG and UNIV_SYNC_DEBUG in debug builds
|
||||
SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -DUNIV_DEBUG -DUNIV_SYNC_DEBUG")
|
||||
|
||||
# Add -Wconversion if compiling with GCC
|
||||
## As of Mar 15 2011 this flag causes 3573+ warnings. If you are reading this
|
||||
## please fix them and enable the following code:
|
||||
#IF(CMAKE_CXX_COMPILER_ID MATCHES "GNU")
|
||||
#SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wconversion")
|
||||
#ENDIF()
|
||||
|
||||
CHECK_FUNCTION_EXISTS(sched_getcpu HAVE_SCHED_GETCPU)
|
||||
IF(HAVE_SCHED_GETCPU)
|
||||
ADD_DEFINITIONS(-DHAVE_SCHED_GETCPU)
|
||||
ENDIF()
|
||||
|
||||
IF(NOT MSVC)
|
||||
# either define HAVE_IB_GCC_ATOMIC_BUILTINS or not
|
||||
# workaround for gcc 4.1.2 RHEL5/x86, gcc atomic ops only work under -march=i686
|
||||
IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "i686" AND CMAKE_COMPILER_IS_GNUCC AND
|
||||
CMAKE_C_COMPILER_VERSION VERSION_LESS "4.1.3")
|
||||
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=i686")
|
||||
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=i686")
|
||||
ENDIF()
|
||||
CHECK_C_SOURCE(
|
||||
"
|
||||
int main()
|
||||
{
|
||||
long x;
|
||||
long y;
|
||||
long res;
|
||||
|
||||
x = 10;
|
||||
y = 123;
|
||||
res = __sync_bool_compare_and_swap(&x, x, y);
|
||||
if (!res || x != y) {
|
||||
return(1);
|
||||
}
|
||||
|
||||
x = 10;
|
||||
y = 123;
|
||||
res = __sync_bool_compare_and_swap(&x, x + 1, y);
|
||||
if (res || x != 10) {
|
||||
return(1);
|
||||
}
|
||||
x = 10;
|
||||
y = 123;
|
||||
res = __sync_add_and_fetch(&x, y);
|
||||
if (res != 123 + 10 || x != 123 + 10) {
|
||||
return(1);
|
||||
}
|
||||
return(0);
|
||||
}"
|
||||
HAVE_IB_GCC_ATOMIC_BUILTINS
|
||||
)
|
||||
CHECK_C_SOURCE(
|
||||
"
|
||||
int main()
|
||||
{
|
||||
long res;
|
||||
char c;
|
||||
|
||||
c = 10;
|
||||
res = __sync_lock_test_and_set(&c, 123);
|
||||
if (res != 10 || c != 123) {
|
||||
return(1);
|
||||
}
|
||||
return(0);
|
||||
}"
|
||||
HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE
|
||||
)
|
||||
CHECK_C_SOURCE(
|
||||
"#include<stdint.h>
|
||||
int main()
|
||||
{
|
||||
int64_t x,y,res;
|
||||
|
||||
x = 10;
|
||||
y = 123;
|
||||
res = __sync_sub_and_fetch(&y, x);
|
||||
if (res != y || y != 113) {
|
||||
return(1);
|
||||
}
|
||||
res = __sync_add_and_fetch(&y, x);
|
||||
if (res != y || y != 123) {
|
||||
return(1);
|
||||
}
|
||||
return(0);
|
||||
}"
|
||||
HAVE_IB_GCC_ATOMIC_BUILTINS_64
|
||||
)
|
||||
CHECK_C_SOURCE(
|
||||
"#include<stdint.h>
|
||||
int main()
|
||||
{
|
||||
__sync_synchronize();
|
||||
return(0);
|
||||
}"
|
||||
HAVE_IB_GCC_SYNC_SYNCHRONISE
|
||||
)
|
||||
CHECK_C_SOURCE(
|
||||
"#include<stdint.h>
|
||||
int main()
|
||||
{
|
||||
__atomic_thread_fence(__ATOMIC_ACQUIRE);
|
||||
__atomic_thread_fence(__ATOMIC_RELEASE);
|
||||
return(0);
|
||||
}"
|
||||
HAVE_IB_GCC_ATOMIC_THREAD_FENCE
|
||||
)
|
||||
CHECK_C_SOURCE(
|
||||
"#include<stdint.h>
|
||||
int main()
|
||||
{
|
||||
unsigned char c;
|
||||
|
||||
__atomic_test_and_set(&c, __ATOMIC_ACQUIRE);
|
||||
__atomic_clear(&c, __ATOMIC_RELEASE);
|
||||
return(0);
|
||||
}"
|
||||
HAVE_IB_GCC_ATOMIC_TEST_AND_SET
|
||||
)
|
||||
|
||||
IF(HAVE_IB_GCC_ATOMIC_BUILTINS)
|
||||
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS=1)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE)
|
||||
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_BYTE=1)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_IB_GCC_ATOMIC_BUILTINS_64)
|
||||
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_64=1)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_IB_GCC_SYNC_SYNCHRONISE)
|
||||
ADD_DEFINITIONS(-DHAVE_IB_GCC_SYNC_SYNCHRONISE=1)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_IB_GCC_ATOMIC_THREAD_FENCE)
|
||||
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_THREAD_FENCE=1)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_IB_GCC_ATOMIC_TEST_AND_SET)
|
||||
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_TEST_AND_SET=1)
|
||||
ENDIF()
|
||||
|
||||
# either define HAVE_IB_ATOMIC_PTHREAD_T_GCC or not
|
||||
CHECK_C_SOURCE(
|
||||
"
|
||||
#include <pthread.h>
|
||||
#include <string.h>
|
||||
|
||||
int main() {
|
||||
pthread_t x1;
|
||||
pthread_t x2;
|
||||
pthread_t x3;
|
||||
|
||||
memset(&x1, 0x0, sizeof(x1));
|
||||
memset(&x2, 0x0, sizeof(x2));
|
||||
memset(&x3, 0x0, sizeof(x3));
|
||||
|
||||
__sync_bool_compare_and_swap(&x1, x2, x3);
|
||||
|
||||
return(0);
|
||||
}"
|
||||
HAVE_IB_ATOMIC_PTHREAD_T_GCC)
|
||||
|
||||
IF(HAVE_IB_ATOMIC_PTHREAD_T_GCC)
|
||||
ADD_DEFINITIONS(-DHAVE_IB_ATOMIC_PTHREAD_T_GCC=1)
|
||||
ENDIF()
|
||||
|
||||
CHECK_CXX_SOURCE_COMPILES("struct t1{ int a; char *b; }; struct t1 c= { .a=1, .b=0 }; main() { }" HAVE_C99_INITIALIZERS)
|
||||
IF(HAVE_C99_INITIALIZERS)
|
||||
ADD_DEFINITIONS(-DHAVE_C99_INITIALIZERS)
|
||||
ENDIF()
|
||||
|
||||
ENDIF(NOT MSVC)
|
||||
|
||||
CHECK_FUNCTION_EXISTS(vasprintf HAVE_VASPRINTF)
|
||||
|
||||
# Solaris atomics
|
||||
IF(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
|
||||
CHECK_FUNCTION_EXISTS(atomic_cas_ulong HAVE_ATOMIC_CAS_ULONG)
|
||||
CHECK_FUNCTION_EXISTS(atomic_cas_32 HAVE_ATOMIC_CAS_32)
|
||||
CHECK_FUNCTION_EXISTS(atomic_cas_64 HAVE_ATOMIC_CAS_64)
|
||||
CHECK_FUNCTION_EXISTS(atomic_add_long_nv HAVE_ATOMIC_ADD_LONG_NV)
|
||||
CHECK_FUNCTION_EXISTS(atomic_swap_uchar HAVE_ATOMIC_SWAP_UCHAR)
|
||||
IF(HAVE_ATOMIC_CAS_ULONG AND
|
||||
HAVE_ATOMIC_CAS_32 AND
|
||||
HAVE_ATOMIC_CAS_64 AND
|
||||
HAVE_ATOMIC_ADD_LONG_NV AND
|
||||
HAVE_ATOMIC_SWAP_UCHAR)
|
||||
SET(HAVE_IB_SOLARIS_ATOMICS 1)
|
||||
ENDIF()
|
||||
|
||||
IF(HAVE_IB_SOLARIS_ATOMICS)
|
||||
ADD_DEFINITIONS(-DHAVE_IB_SOLARIS_ATOMICS=1)
|
||||
ENDIF()
|
||||
|
||||
# either define HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS or not
|
||||
CHECK_C_SOURCE_COMPILES(
|
||||
" #include <pthread.h>
|
||||
#include <string.h>
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
pthread_t x1;
|
||||
pthread_t x2;
|
||||
pthread_t x3;
|
||||
|
||||
memset(&x1, 0x0, sizeof(x1));
|
||||
memset(&x2, 0x0, sizeof(x2));
|
||||
memset(&x3, 0x0, sizeof(x3));
|
||||
|
||||
if (sizeof(pthread_t) == 4) {
|
||||
|
||||
atomic_cas_32(&x1, x2, x3);
|
||||
|
||||
} else if (sizeof(pthread_t) == 8) {
|
||||
|
||||
atomic_cas_64(&x1, x2, x3);
|
||||
|
||||
} else {
|
||||
|
||||
return(1);
|
||||
}
|
||||
|
||||
return(0);
|
||||
}
|
||||
" HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS)
|
||||
CHECK_C_SOURCE_COMPILES(
|
||||
"#include <mbarrier.h>
|
||||
int main() {
|
||||
__machine_r_barrier();
|
||||
__machine_w_barrier();
|
||||
return(0);
|
||||
}"
|
||||
HAVE_IB_MACHINE_BARRIER_SOLARIS)
|
||||
|
||||
IF(HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS)
|
||||
ADD_DEFINITIONS(-DHAVE_IB_ATOMIC_PTHREAD_T_SOLARIS=1)
|
||||
ENDIF()
|
||||
IF(HAVE_IB_MACHINE_BARRIER_SOLARIS)
|
||||
ADD_DEFINITIONS(-DHAVE_IB_MACHINE_BARRIER_SOLARIS=1)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
|
||||
IF(UNIX)
|
||||
# this is needed to know which one of atomic_cas_32() or atomic_cas_64()
|
||||
# to use in the source
|
||||
SET(CMAKE_EXTRA_INCLUDE_FILES pthread.h)
|
||||
CHECK_TYPE_SIZE(pthread_t SIZEOF_PTHREAD_T)
|
||||
SET(CMAKE_EXTRA_INCLUDE_FILES)
|
||||
ENDIF()
|
||||
|
||||
IF(SIZEOF_PTHREAD_T)
|
||||
ADD_DEFINITIONS(-DSIZEOF_PTHREAD_T=${SIZEOF_PTHREAD_T})
|
||||
ENDIF()
|
||||
|
||||
IF(MSVC)
|
||||
ADD_DEFINITIONS(-DHAVE_WINDOWS_ATOMICS)
|
||||
ADD_DEFINITIONS(-DHAVE_WINDOWS_MM_FENCE)
|
||||
ENDIF()
|
||||
|
||||
SET(MUTEXTYPE "event" CACHE STRING "Mutex type: event, sys or futex")
|
||||
|
||||
IF(MUTEXTYPE MATCHES "event")
|
||||
ADD_DEFINITIONS(-DMUTEX_EVENT)
|
||||
ELSEIF(MUTEXTYPE MATCHES "futex" AND DEFINED HAVE_IB_LINUX_FUTEX)
|
||||
ADD_DEFINITIONS(-DMUTEX_FUTEX)
|
||||
ELSE()
|
||||
ADD_DEFINITIONS(-DMUTEX_SYS)
|
||||
ENDIF()
|
||||
|
||||
# Include directories under innobase
|
||||
INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/storage/innobase/include
|
||||
${CMAKE_SOURCE_DIR}/storage/innobase/handler)
|
||||
|
||||
# Sun Studio bug with -xO2
|
||||
IF(CMAKE_CXX_COMPILER_ID MATCHES "SunPro"
|
||||
AND CMAKE_CXX_FLAGS_RELEASE MATCHES "O2"
|
||||
AND NOT CMAKE_BUILD_TYPE STREQUAL "Debug")
|
||||
# Sun Studio 12 crashes with -xO2 flag, but not with higher optimization
|
||||
# -xO3
|
||||
SET_SOURCE_FILES_PROPERTIES(${CMAKE_CURRENT_SOURCE_DIR}/rem/rem0rec.cc
|
||||
PROPERTIES COMPILE_FLAGS -xO3)
|
||||
ENDIF()
|
||||
|
||||
# Removing compiler optimizations for innodb/mem/* files on 64-bit Windows
|
||||
# due to 64-bit compiler error, See MySQL Bug #19424, #36366, #34297
|
||||
IF (MSVC AND CMAKE_SIZEOF_VOID_P EQUAL 8)
|
||||
SET_SOURCE_FILES_PROPERTIES(mem/mem0mem.cc mem/mem0pool.cc
|
||||
PROPERTIES COMPILE_FLAGS -Od)
|
||||
ENDIF()
|
||||
|
||||
IF(MSVC)
|
||||
# Avoid "unreferenced label" warning in generated file
|
||||
GET_FILENAME_COMPONENT(_SRC_DIR ${CMAKE_CURRENT_LIST_FILE} PATH)
|
||||
SET_SOURCE_FILES_PROPERTIES(${_SRC_DIR}/pars/pars0grm.c
|
||||
PROPERTIES COMPILE_FLAGS "/wd4102")
|
||||
SET_SOURCE_FILES_PROPERTIES(${_SRC_DIR}/pars/lexyy.c
|
||||
PROPERTIES COMPILE_FLAGS "/wd4003")
|
||||
ENDIF()
|
||||
INCLUDE(innodb.cmake)
|
||||
|
||||
SET(INNOBASE_SOURCES
|
||||
api/api0api.cc
|
||||
@@ -404,7 +61,7 @@ SET(INNOBASE_SOURCES
|
||||
dict/dict0mem.cc
|
||||
dict/dict0stats.cc
|
||||
dict/dict0stats_bg.cc
|
||||
# dyn/dyn0dyn.cc
|
||||
dict/dict0defrag_bg.cc
|
||||
eval/eval0eval.cc
|
||||
eval/eval0proc.cc
|
||||
fil/fil0fil.cc
|
||||
@@ -425,34 +82,32 @@ SET(INNOBASE_SOURCES
|
||||
fts/fts0config.cc
|
||||
fts/fts0opt.cc
|
||||
fts/fts0pars.cc
|
||||
fts/fts0plugin.cc
|
||||
fts/fts0que.cc
|
||||
fts/fts0sql.cc
|
||||
fts/fts0tlex.cc
|
||||
gis/gis0geo.cc
|
||||
gis/gis0rtree.cc
|
||||
gis/gis0sea.cc
|
||||
fts/fts0plugin.cc
|
||||
handler/ha_innodb.cc
|
||||
# handler/ha_innopart.cc
|
||||
handler/handler0alter.cc
|
||||
handler/i_s.cc
|
||||
ibuf/ibuf0ibuf.cc
|
||||
lock/lock0iter.cc
|
||||
lock/lock0lock.cc
|
||||
lock/lock0prdt.cc
|
||||
lock/lock0lock.cc
|
||||
lock/lock0wait.cc
|
||||
log/log0log.cc
|
||||
log/log0recv.cc
|
||||
log/log0crypt.cc
|
||||
mach/mach0data.cc
|
||||
mem/mem0mem.cc
|
||||
# mem/mem0pool.cc
|
||||
mtr/mtr0log.cc
|
||||
mtr/mtr0mtr.cc
|
||||
os/os0event.cc
|
||||
os/os0file.cc
|
||||
os/os0proc.cc
|
||||
# os/os0sync.cc
|
||||
os/os0event.cc
|
||||
os/os0thread.cc
|
||||
page/page0cur.cc
|
||||
page/page0page.cc
|
||||
@@ -488,8 +143,8 @@ SET(INNOBASE_SOURCES
|
||||
srv/srv0srv.cc
|
||||
srv/srv0start.cc
|
||||
sync/sync0arr.cc
|
||||
sync/sync0debug.cc
|
||||
sync/sync0rw.cc
|
||||
sync/sync0debug.cc
|
||||
sync/sync0sync.cc
|
||||
trx/trx0i_s.cc
|
||||
trx/trx0purge.cc
|
||||
@@ -500,7 +155,6 @@ SET(INNOBASE_SOURCES
|
||||
trx/trx0trx.cc
|
||||
trx/trx0undo.cc
|
||||
usr/usr0sess.cc
|
||||
# ut/ut0bh.cc
|
||||
ut/ut0byte.cc
|
||||
ut/ut0crc32.cc
|
||||
ut/ut0dbg.cc
|
||||
@@ -527,19 +181,42 @@ IF(WITH_INNODB)
|
||||
SET(WITH_INNOBASE_STORAGE_ENGINE TRUE)
|
||||
ENDIF()
|
||||
|
||||
# On solaris, reduce symbol visibility, so loader does not mix
|
||||
# the same symbols from builtin innodb and from shared one.
|
||||
# Only required for old GCC (3.4.3) that does not support hidden visibility
|
||||
IF(CMAKE_SYSTEM_NAME MATCHES "SunOS" AND CMAKE_COMPILER_IS_GNUCC
|
||||
AND NOT HAVE_VISIBILITY_HIDDEN)
|
||||
SET(LINKER_SCRIPT "-Wl,-M${CMAKE_CURRENT_SOURCE_DIR}/plugin_exports")
|
||||
ELSE()
|
||||
SET(LINKER_SCRIPT)
|
||||
ENDIF()
|
||||
|
||||
MYSQL_ADD_PLUGIN(innobase ${INNOBASE_SOURCES} STORAGE_ENGINE
|
||||
# MODULE_ONLY
|
||||
# MODULE_OUTPUT_NAME ha_innodb
|
||||
DEFAULT RECOMPILE_FOR_EMBEDDED
|
||||
LINK_LIBRARIES ${ZLIB_LIBRARY} ${LINKER_SCRIPT})
|
||||
|
||||
IF(WITH_INNOBASE_STORAGE_ENGINE)
|
||||
ADD_DEPENDENCIES(innobase GenError)
|
||||
ENDIF()
|
||||
|
||||
# Avoid generating Hardware Capabilities due to crc32 instructions
|
||||
IF(CMAKE_SYSTEM_NAME MATCHES "SunOS" AND CMAKE_SYSTEM_PROCESSOR MATCHES "i386")
|
||||
INCLUDE(${MYSQL_CMAKE_SCRIPT_DIR}/compile_flags.cmake)
|
||||
MY_CHECK_CXX_COMPILER_FLAG("-Wa,-nH" HAVE_WA_NH)
|
||||
IF(HAVE_WA_NH)
|
||||
ADD_COMPILE_FLAGS(
|
||||
ut/ut0crc32.cc
|
||||
COMPILE_FLAGS "-Wa,-nH"
|
||||
)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
# A GCC bug causes crash when compiling these files on ARM64 with -O1+
|
||||
# Compile them with -O0 as a workaround.
|
||||
IF(CMAKE_COMPILER_IS_GNUCXX AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
||||
# Bug was fixed in GCC 5.2, so workaround only needed < 5.2
|
||||
EXECUTE_PROCESS(COMMAND ${CMAKE_C_COMPILER} -dumpversion
|
||||
OUTPUT_VARIABLE GCC_VERSION)
|
||||
IF(GCC_VERSION VERSION_LESS 5.2)
|
||||
INCLUDE(${MYSQL_CMAKE_SCRIPT_DIR}/compile_flags.cmake)
|
||||
ADD_COMPILE_FLAGS(
|
||||
btr/btr0btr.cc
|
||||
btr/btr0cur.cc
|
||||
buf/buf0buf.cc
|
||||
gis/gis0sea.cc
|
||||
COMPILE_FLAGS "-O0"
|
||||
)
|
||||
ENDIF()
|
||||
ENDIF()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2008, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2008, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -1006,6 +1006,16 @@ ib_cursor_open_table(
|
||||
return(err);
|
||||
}
|
||||
|
||||
/** Check the table whether it contains virtual columns.
|
||||
@param[in] crsr InnoDB Cursor
|
||||
@return true if table contains virtual column else false. */
|
||||
ib_bool_t
|
||||
ib_is_virtual_table(
|
||||
ib_crsr_t crsr)
|
||||
{
|
||||
return(crsr->prebuilt->table->n_v_cols > 0);
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
Free a context struct for a table handle. */
|
||||
static
|
||||
@@ -1262,12 +1272,14 @@ ib_insert_query_graph_create(
|
||||
row = dtuple_create(heap, dict_table_get_n_cols(table));
|
||||
dict_table_copy_types(row, table);
|
||||
|
||||
ut_ad(!dict_table_have_virtual_index(table));
|
||||
|
||||
ins_node_set_new_row(node->ins, row);
|
||||
|
||||
grph->ins = static_cast<que_fork_t*>(
|
||||
que_node_get_parent(
|
||||
pars_complete_graph_for_exec(node->ins, trx,
|
||||
heap)));
|
||||
heap, NULL)));
|
||||
|
||||
grph->ins->state = QUE_FORK_ACTIVE;
|
||||
}
|
||||
@@ -1376,9 +1388,12 @@ ib_update_vector_create(
|
||||
row_create_update_node_for_mysql(table, heap));
|
||||
}
|
||||
|
||||
ut_ad(!dict_table_have_virtual_index(table));
|
||||
|
||||
grph->upd = static_cast<que_fork_t*>(
|
||||
que_node_get_parent(
|
||||
pars_complete_graph_for_exec(node->upd, trx, heap)));
|
||||
pars_complete_graph_for_exec(node->upd, trx,
|
||||
heap, NULL)));
|
||||
|
||||
grph->upd->state = QUE_FORK_ACTIVE;
|
||||
|
||||
@@ -3009,12 +3024,13 @@ ib_table_lock(
|
||||
}
|
||||
|
||||
ut_a(ib_lck_mode <= static_cast<ib_lck_mode_t>(LOCK_NUM));
|
||||
ut_ad(!dict_table_have_virtual_index(table));
|
||||
|
||||
heap = mem_heap_create(128);
|
||||
|
||||
q_proc.node.sel = sel_node_create(heap);
|
||||
|
||||
thr = pars_complete_graph_for_exec(q_proc.node.sel, trx, heap);
|
||||
thr = pars_complete_graph_for_exec(q_proc.node.sel, trx, heap, NULL);
|
||||
|
||||
q_proc.grph.sel = static_cast<que_fork_t*>(que_node_get_parent(thr));
|
||||
q_proc.grph.sel->state = QUE_FORK_ACTIVE;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2008, 2014, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2008, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -45,69 +45,9 @@ ib_trx_lock_table_with_retry(
|
||||
dict_table_t* table, /*!< in: table to lock */
|
||||
enum lock_mode mode) /*!< in: LOCK_X or LOCK_S */
|
||||
{
|
||||
que_thr_t* thr;
|
||||
dberr_t err;
|
||||
mem_heap_t* heap;
|
||||
sel_node_t* node;
|
||||
|
||||
heap = mem_heap_create(512);
|
||||
|
||||
trx->op_info = "setting table lock";
|
||||
|
||||
node = sel_node_create(heap);
|
||||
thr = pars_complete_graph_for_exec(node, trx, heap);
|
||||
thr->graph->state = QUE_FORK_ACTIVE;
|
||||
|
||||
/* We use the select query graph as the dummy graph needed
|
||||
in the lock module call */
|
||||
|
||||
thr = que_fork_get_first_thr(static_cast<que_fork_t*>(
|
||||
que_node_get_parent(thr)));
|
||||
que_thr_move_to_run_state_for_mysql(thr, trx);
|
||||
|
||||
run_again:
|
||||
thr->run_node = thr;
|
||||
thr->prev_node = thr->common.parent;
|
||||
|
||||
err = lock_table(0, table, mode, thr);
|
||||
|
||||
trx->error_state = err;
|
||||
|
||||
if (UNIV_LIKELY(err == DB_SUCCESS)) {
|
||||
que_thr_stop_for_mysql_no_error(thr, trx);
|
||||
} else {
|
||||
que_thr_stop_for_mysql(thr);
|
||||
|
||||
if (err != DB_QUE_THR_SUSPENDED) {
|
||||
ibool was_lock_wait;
|
||||
|
||||
was_lock_wait = ib_handle_errors(&err, trx, thr, NULL);
|
||||
|
||||
if (was_lock_wait) {
|
||||
goto run_again;
|
||||
}
|
||||
} else {
|
||||
que_thr_t* run_thr;
|
||||
que_node_t* parent;
|
||||
|
||||
parent = que_node_get_parent(thr);
|
||||
run_thr = que_fork_start_command(
|
||||
static_cast<que_fork_t*>(parent));
|
||||
|
||||
ut_a(run_thr == thr);
|
||||
|
||||
/* There was a lock wait but the thread was not
|
||||
in a ready to run or running state. */
|
||||
trx->error_state = DB_LOCK_WAIT;
|
||||
|
||||
goto run_again;
|
||||
}
|
||||
}
|
||||
|
||||
que_graph_free(thr->graph);
|
||||
trx->op_info = "";
|
||||
|
||||
return(err);
|
||||
return(lock_table_for_trx(table, trx, mode));
|
||||
}
|
||||
/****************************************************************//**
|
||||
Handles user errors and lock waits detected by the database engine.
|
||||
|
||||
@@ -338,8 +338,10 @@ btr_root_adjust_on_import(
|
||||
} else {
|
||||
/* Check that the table flags and the tablespace
|
||||
flags match. */
|
||||
ulint flags = dict_tf_to_fsp_flags(table->flags,
|
||||
false);
|
||||
ulint flags = dict_tf_to_fsp_flags(
|
||||
table->flags,
|
||||
false,
|
||||
dict_table_is_encrypted(table));
|
||||
ulint fsp_flags = fil_space_get_flags(table->space);
|
||||
err = fsp_flags_are_equal(flags, fsp_flags)
|
||||
? DB_SUCCESS : DB_CORRUPTION;
|
||||
@@ -910,6 +912,7 @@ btr_page_get_father_node_ptr_func(
|
||||
|
||||
page_no = btr_cur_get_block(cursor)->page.id.page_no();
|
||||
index = btr_cur_get_index(cursor);
|
||||
ut_ad(!dict_index_is_spatial(index));
|
||||
|
||||
ut_ad(srv_read_only_mode
|
||||
|| mtr_memo_contains_flagged(mtr, dict_index_get_lock(index),
|
||||
@@ -923,44 +926,33 @@ btr_page_get_father_node_ptr_func(
|
||||
|
||||
user_rec = btr_cur_get_rec(cursor);
|
||||
ut_a(page_rec_is_user_rec(user_rec));
|
||||
|
||||
tuple = dict_index_build_node_ptr(index, user_rec, 0, heap, level);
|
||||
|
||||
if (!dict_index_is_spatial(index)) {
|
||||
dberr_t err = DB_SUCCESS;
|
||||
if (dict_table_is_intrinsic(index->table)) {
|
||||
err = btr_cur_search_to_nth_level_with_no_latch(
|
||||
index, level + 1, tuple, PAGE_CUR_LE, cursor,
|
||||
file, line, mtr);
|
||||
} else {
|
||||
err = btr_cur_search_to_nth_level(
|
||||
index, level + 1, tuple,
|
||||
PAGE_CUR_LE, latch_mode, cursor, 0,
|
||||
file, line, mtr);
|
||||
}
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
ib::warn() << " Error code: " << err
|
||||
<< " btr_page_get_father_node_ptr_func "
|
||||
<< " level: " << level + 1
|
||||
<< " called from file: "
|
||||
<< file << " line: " << line
|
||||
<< " table: " << index->table->name
|
||||
<< " index: " << index->name;
|
||||
}
|
||||
dberr_t err = DB_SUCCESS;
|
||||
|
||||
if (dict_table_is_intrinsic(index->table)) {
|
||||
err = btr_cur_search_to_nth_level_with_no_latch(
|
||||
index, level + 1, tuple, PAGE_CUR_LE, cursor,
|
||||
file, line, mtr);
|
||||
} else {
|
||||
/* For R-tree, only latch mode from caller would be
|
||||
BTR_CONT_MODIFY_TREE */
|
||||
ut_ad(latch_mode == BTR_CONT_MODIFY_TREE);
|
||||
err = btr_cur_search_to_nth_level(
|
||||
index, level + 1, tuple,
|
||||
PAGE_CUR_LE, latch_mode, cursor, 0,
|
||||
file, line, mtr);
|
||||
}
|
||||
|
||||
/* Try to avoid traverse from the root, and get the
|
||||
father node from parent_path vector */
|
||||
rtr_get_father_node(index, level + 1, tuple,
|
||||
NULL, cursor, page_no, mtr);
|
||||
if (err != DB_SUCCESS) {
|
||||
ib::warn() << " Error code: " << err
|
||||
<< " btr_page_get_father_node_ptr_func "
|
||||
<< " level: " << level + 1
|
||||
<< " called from file: "
|
||||
<< file << " line: " << line
|
||||
<< " table: " << index->table->name
|
||||
<< " index: " << index->name();
|
||||
}
|
||||
|
||||
node_ptr = btr_cur_get_rec(cursor);
|
||||
ut_ad(!page_rec_is_comp(node_ptr)
|
||||
|| rec_get_status(node_ptr) == REC_STATUS_NODE_PTR);
|
||||
|
||||
offsets = rec_get_offsets(node_ptr, index, offsets,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
@@ -1106,7 +1098,7 @@ btr_free_root_invalidate(
|
||||
@param[in,out] mtr mini-transaction
|
||||
@return root block, to invoke btr_free_but_not_root() and btr_free_root()
|
||||
@retval NULL if the page is no longer a matching B-tree page */
|
||||
static __attribute__((warn_unused_result))
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
buf_block_t*
|
||||
btr_free_root_check(
|
||||
const page_id_t& page_id,
|
||||
@@ -1654,7 +1646,6 @@ IBUF_BITMAP_FREE is unaffected by reorganization.
|
||||
|
||||
@retval true if the operation was successful
|
||||
@retval false if it is a compressed page, and recompression failed */
|
||||
UNIV_INTERN
|
||||
bool
|
||||
btr_page_reorganize_block(
|
||||
/*======================*/
|
||||
@@ -1782,7 +1773,7 @@ the tuple. It is assumed that mtr contains an x-latch on the tree.
|
||||
NOTE that the operation of this function must always succeed,
|
||||
we cannot reverse it: therefore enough free disk space must be
|
||||
guaranteed to be available before this function is called.
|
||||
@return inserted record or NULL if run out of space */
|
||||
@return inserted record */
|
||||
rec_t*
|
||||
btr_root_raise_and_insert(
|
||||
/*======================*/
|
||||
@@ -1848,7 +1839,7 @@ btr_root_raise_and_insert(
|
||||
|
||||
if (new_block == NULL && os_has_said_disk_full) {
|
||||
return(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
new_page = buf_block_get_frame(new_block);
|
||||
new_page_zip = buf_block_get_page_zip(new_block);
|
||||
@@ -2719,7 +2710,6 @@ released within this function! NOTE that the operation of this
|
||||
function must always succeed, we cannot reverse it: therefore enough
|
||||
free disk space (2 pages) must be guaranteed to be available before
|
||||
this function is called.
|
||||
|
||||
NOTE: jonaso added support for calling function with tuple == NULL
|
||||
which cause it to only split a page.
|
||||
|
||||
@@ -2850,7 +2840,7 @@ func_start:
|
||||
|
||||
DBUG_EXECUTE_IF("disk_is_full",
|
||||
os_has_said_disk_full = true;
|
||||
return(NULL););
|
||||
return(NULL););
|
||||
|
||||
/* 2. Allocate a new page to the index */
|
||||
new_block = btr_page_alloc(cursor->index, hint_page_no, direction,
|
||||
@@ -2858,7 +2848,7 @@ func_start:
|
||||
|
||||
if (new_block == NULL && os_has_said_disk_full) {
|
||||
return(NULL);
|
||||
}
|
||||
}
|
||||
|
||||
new_page = buf_block_get_frame(new_block);
|
||||
new_page_zip = buf_block_get_page_zip(new_block);
|
||||
@@ -3164,7 +3154,6 @@ func_exit:
|
||||
@param[in,out] page page to remove
|
||||
@param[in] index index tree
|
||||
@param[in,out] mtr mini-transaction */
|
||||
UNIV_INTERN
|
||||
void
|
||||
btr_level_list_remove_func(
|
||||
ulint space,
|
||||
@@ -3369,8 +3358,15 @@ btr_lift_page_up(
|
||||
* (REC_OFFS_HEADER_SIZE + 1 + 1 + index->n_fields));
|
||||
buf_block_t* b;
|
||||
|
||||
offsets = btr_page_get_father_block(offsets, heap, index,
|
||||
block, mtr, &cursor);
|
||||
if (dict_index_is_spatial(index)) {
|
||||
offsets = rtr_page_get_father_block(
|
||||
NULL, heap, index, block, mtr,
|
||||
NULL, &cursor);
|
||||
} else {
|
||||
offsets = btr_page_get_father_block(offsets, heap,
|
||||
index, block,
|
||||
mtr, &cursor);
|
||||
}
|
||||
father_block = btr_cur_get_block(&cursor);
|
||||
father_page_zip = buf_block_get_page_zip(father_block);
|
||||
father_page = buf_block_get_frame(father_block);
|
||||
@@ -3386,9 +3382,17 @@ btr_lift_page_up(
|
||||
b->page.id.page_no() != root_page_no; ) {
|
||||
ut_a(n_blocks < BTR_MAX_LEVELS);
|
||||
|
||||
offsets = btr_page_get_father_block(offsets, heap,
|
||||
index, b,
|
||||
mtr, &cursor);
|
||||
if (dict_index_is_spatial(index)) {
|
||||
offsets = rtr_page_get_father_block(
|
||||
NULL, heap, index, b, mtr,
|
||||
NULL, &cursor);
|
||||
} else {
|
||||
offsets = btr_page_get_father_block(offsets,
|
||||
heap,
|
||||
index, b,
|
||||
mtr,
|
||||
&cursor);
|
||||
}
|
||||
|
||||
blocks[n_blocks++] = b = btr_cur_get_block(&cursor);
|
||||
}
|
||||
@@ -3460,6 +3464,13 @@ btr_lift_page_up(
|
||||
}
|
||||
|
||||
if (!dict_table_is_locking_disabled(index->table)) {
|
||||
/* Free predicate page locks on the block */
|
||||
if (dict_index_is_spatial(index)) {
|
||||
lock_mutex_enter();
|
||||
lock_prdt_page_free_from_discard(
|
||||
block, lock_sys->prdt_page_hash);
|
||||
lock_mutex_exit();
|
||||
}
|
||||
lock_update_copy_and_discard(father_block, block);
|
||||
}
|
||||
|
||||
@@ -3686,6 +3697,11 @@ retry:
|
||||
goto retry;
|
||||
}
|
||||
|
||||
/* Set rtr_info for cursor2, since it is
|
||||
necessary in recursive page merge. */
|
||||
cursor2.rtr_info = cursor->rtr_info;
|
||||
cursor2.tree_height = cursor->tree_height;
|
||||
|
||||
offsets2 = rec_get_offsets(
|
||||
btr_cur_get_rec(&cursor2), index,
|
||||
NULL, ULINT_UNDEFINED, &heap);
|
||||
@@ -3744,6 +3760,8 @@ retry:
|
||||
|
||||
/* No GAP lock needs to be worrying about */
|
||||
lock_mutex_enter();
|
||||
lock_prdt_page_free_from_discard(
|
||||
block, lock_sys->prdt_page_hash);
|
||||
lock_rec_free_all_from_discard_page(block);
|
||||
lock_mutex_exit();
|
||||
} else {
|
||||
@@ -3781,6 +3799,11 @@ retry:
|
||||
merge_block, heap)) {
|
||||
goto err_exit;
|
||||
}
|
||||
|
||||
/* Set rtr_info for cursor2, since it is
|
||||
necessary in recursive page merge. */
|
||||
cursor2.rtr_info = cursor->rtr_info;
|
||||
cursor2.tree_height = cursor->tree_height;
|
||||
} else {
|
||||
btr_page_get_father(index, merge_block, mtr, &cursor2);
|
||||
}
|
||||
@@ -3858,6 +3881,8 @@ retry:
|
||||
/* For rtree, we need to update father's mbr. */
|
||||
if (dict_index_is_spatial(index)) {
|
||||
ulint* offsets2;
|
||||
ulint rec_info;
|
||||
|
||||
offsets2 = rec_get_offsets(
|
||||
btr_cur_get_rec(&cursor2),
|
||||
index, NULL, ULINT_UNDEFINED, &heap);
|
||||
@@ -3866,12 +3891,34 @@ retry:
|
||||
btr_cur_get_rec(&cursor2), offsets2)
|
||||
== right_page_no);
|
||||
|
||||
rtr_merge_and_update_mbr(&father_cursor,
|
||||
&cursor2,
|
||||
offsets, offsets2,
|
||||
merge_page, merge_block,
|
||||
block, index, mtr);
|
||||
rec_info = rec_get_info_bits(
|
||||
btr_cur_get_rec(&father_cursor),
|
||||
rec_offs_comp(offsets));
|
||||
if (rec_info & REC_INFO_MIN_REC_FLAG) {
|
||||
/* When the father node ptr is minimal rec,
|
||||
we will keep it and delete the node ptr of
|
||||
merge page. */
|
||||
rtr_merge_and_update_mbr(&father_cursor,
|
||||
&cursor2,
|
||||
offsets, offsets2,
|
||||
merge_page,
|
||||
merge_block,
|
||||
block, index, mtr);
|
||||
} else {
|
||||
/* Otherwise, we will keep the node ptr of
|
||||
merge page and delete the father node ptr.
|
||||
This is for keeping the rec order in upper
|
||||
level. */
|
||||
rtr_merge_and_update_mbr(&cursor2,
|
||||
&father_cursor,
|
||||
offsets2, offsets,
|
||||
merge_page,
|
||||
merge_block,
|
||||
block, index, mtr);
|
||||
}
|
||||
lock_mutex_enter();
|
||||
lock_prdt_page_free_from_discard(
|
||||
block, lock_sys->prdt_page_hash);
|
||||
lock_rec_free_all_from_discard_page(block);
|
||||
lock_mutex_exit();
|
||||
} else {
|
||||
@@ -4028,9 +4075,10 @@ btr_discard_only_page_on_level(
|
||||
if (dict_index_is_spatial(index)) {
|
||||
/* Check any concurrent search having this page */
|
||||
rtr_check_discard_page(index, NULL, block);
|
||||
rtr_page_get_father(index, block, mtr, NULL, &cursor);
|
||||
} else {
|
||||
btr_page_get_father(index, block, mtr, &cursor);
|
||||
}
|
||||
|
||||
btr_page_get_father(index, block, mtr, &cursor);
|
||||
father = btr_cur_get_block(&cursor);
|
||||
|
||||
if (!dict_table_is_locking_disabled(index->table)) {
|
||||
@@ -4114,7 +4162,11 @@ btr_discard_page(
|
||||
MONITOR_INC(MONITOR_INDEX_DISCARD);
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
btr_page_get_father(index, block, mtr, &parent_cursor);
|
||||
if (dict_index_is_spatial(index)) {
|
||||
rtr_page_get_father(index, block, mtr, cursor, &parent_cursor);
|
||||
} else {
|
||||
btr_page_get_father(index, block, mtr, &parent_cursor);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Decide the page which will inherit the locks */
|
||||
@@ -4178,10 +4230,20 @@ btr_discard_page(
|
||||
btr_set_min_rec_mark(node_ptr, mtr);
|
||||
}
|
||||
|
||||
btr_node_ptr_delete(index, block, mtr);
|
||||
if (dict_index_is_spatial(index)) {
|
||||
btr_cur_t father_cursor;
|
||||
|
||||
/* Since rtr_node_ptr_delete doesn't contain get father
|
||||
node ptr, so, we need to get father node ptr first and then
|
||||
delete it. */
|
||||
rtr_page_get_father(index, block, mtr, cursor, &father_cursor);
|
||||
rtr_node_ptr_delete(index, &father_cursor, block, mtr);
|
||||
} else {
|
||||
btr_node_ptr_delete(index, block, mtr);
|
||||
}
|
||||
|
||||
/* Remove the page from the level list */
|
||||
btr_level_list_remove(space, page_size, (page_t*)page, index, mtr);
|
||||
btr_level_list_remove(space, page_size, page, index, mtr);
|
||||
|
||||
#ifdef UNIV_ZIP_DEBUG
|
||||
{
|
||||
@@ -4377,8 +4439,14 @@ btr_check_node_ptr(
|
||||
}
|
||||
|
||||
heap = mem_heap_create(256);
|
||||
offsets = btr_page_get_father_block(NULL, heap, index, block, mtr,
|
||||
&cursor);
|
||||
|
||||
if (dict_index_is_spatial(index)) {
|
||||
offsets = rtr_page_get_father_block(NULL, heap, index, block, mtr,
|
||||
NULL, &cursor);
|
||||
} else {
|
||||
offsets = btr_page_get_father_block(NULL, heap, index, block, mtr,
|
||||
&cursor);
|
||||
}
|
||||
|
||||
if (page_is_leaf(page)) {
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2014, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -641,7 +641,7 @@ BtrBulk::pageSplit(
|
||||
|
||||
/* 1. Check if we have only one user record on the page. */
|
||||
if (page_bulk->getRecNo() <= 1) {
|
||||
return DB_TOO_BIG_RECORD;
|
||||
return(DB_TOO_BIG_RECORD);
|
||||
}
|
||||
|
||||
/* 2. create a new page. */
|
||||
@@ -771,6 +771,7 @@ BtrBulk::insert(
|
||||
ulint level)
|
||||
{
|
||||
bool is_left_most = false;
|
||||
dberr_t err = DB_SUCCESS;
|
||||
|
||||
ut_ad(m_heap != NULL);
|
||||
|
||||
@@ -779,7 +780,7 @@ BtrBulk::insert(
|
||||
PageBulk* new_page_bulk
|
||||
= UT_NEW_NOKEY(PageBulk(m_index, m_trx_id, FIL_NULL,
|
||||
level, m_flush_observer));
|
||||
dberr_t err = new_page_bulk->init();
|
||||
err = new_page_bulk->init();
|
||||
if (err != DB_SUCCESS) {
|
||||
return(err);
|
||||
}
|
||||
@@ -806,29 +807,37 @@ BtrBulk::insert(
|
||||
ulint n_ext = 0;
|
||||
ulint rec_size = rec_get_converted_size(m_index, tuple, n_ext);
|
||||
big_rec_t* big_rec = NULL;
|
||||
rec_t* rec = NULL;
|
||||
ulint* offsets = NULL;
|
||||
|
||||
if (page_bulk->needExt(tuple, rec_size)) {
|
||||
/* The record is so big that we have to store some fields
|
||||
externally on separate database pages */
|
||||
big_rec = dtuple_convert_big_rec(m_index, 0, tuple, &n_ext);
|
||||
|
||||
if (UNIV_UNLIKELY(big_rec == NULL)) {
|
||||
if (big_rec == NULL) {
|
||||
return(DB_TOO_BIG_RECORD);
|
||||
}
|
||||
|
||||
rec_size = rec_get_converted_size(m_index, tuple, n_ext);
|
||||
}
|
||||
|
||||
if (page_bulk->getPageZip() != NULL
|
||||
&& page_zip_is_too_big(m_index, tuple)) {
|
||||
err = DB_TOO_BIG_RECORD;
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
if (!page_bulk->isSpaceAvailable(rec_size)) {
|
||||
/* Create a sibling page_bulk. */
|
||||
PageBulk* sibling_page_bulk;
|
||||
sibling_page_bulk = UT_NEW_NOKEY(PageBulk(m_index, m_trx_id,
|
||||
FIL_NULL, level,
|
||||
m_flush_observer));
|
||||
dberr_t err = sibling_page_bulk->init();
|
||||
err = sibling_page_bulk->init();
|
||||
if (err != DB_SUCCESS) {
|
||||
UT_DELETE(sibling_page_bulk);
|
||||
return(err);
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
/* Commit page bulk. */
|
||||
@@ -836,7 +845,7 @@ BtrBulk::insert(
|
||||
if (err != DB_SUCCESS) {
|
||||
pageAbort(sibling_page_bulk);
|
||||
UT_DELETE(sibling_page_bulk);
|
||||
return(err);
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
/* Set new page bulk to page_bulks. */
|
||||
@@ -850,7 +859,8 @@ BtrBulk::insert(
|
||||
if (page_is_leaf(sibling_page_bulk->getPage())) {
|
||||
/* Check whether trx is interrupted */
|
||||
if (m_flush_observer->check_interrupted()) {
|
||||
return(DB_INTERRUPTED);
|
||||
err = DB_INTERRUPTED;
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
/* Wake up page cleaner to flush dirty pages. */
|
||||
@@ -862,8 +872,6 @@ BtrBulk::insert(
|
||||
|
||||
}
|
||||
|
||||
rec_t* rec;
|
||||
ulint* offsets = NULL;
|
||||
/* Convert tuple to rec. */
|
||||
rec = rec_convert_dtuple_to_rec(static_cast<byte*>(mem_heap_alloc(
|
||||
page_bulk->m_heap, rec_size)), m_index, tuple, n_ext);
|
||||
@@ -873,8 +881,6 @@ BtrBulk::insert(
|
||||
page_bulk->insert(rec, offsets);
|
||||
|
||||
if (big_rec != NULL) {
|
||||
dberr_t err;
|
||||
|
||||
ut_ad(dict_index_is_clust(m_index));
|
||||
ut_ad(page_bulk->getLevel() == 0);
|
||||
ut_ad(page_bulk == m_page_bulks->at(0));
|
||||
@@ -893,13 +899,14 @@ BtrBulk::insert(
|
||||
PageBulk* page_bulk = m_page_bulks->at(level);
|
||||
page_bulk->latch();
|
||||
}
|
||||
|
||||
dtuple_convert_back_big_rec(m_index, tuple, big_rec);
|
||||
|
||||
return(err);
|
||||
}
|
||||
|
||||
return(DB_SUCCESS);
|
||||
func_exit:
|
||||
if (big_rec != NULL) {
|
||||
dtuple_convert_back_big_rec(m_index, tuple, big_rec);
|
||||
}
|
||||
|
||||
return(err);
|
||||
}
|
||||
|
||||
/** Btree bulk load finish. We commit the last page in each level
|
||||
|
||||
@@ -723,7 +723,6 @@ If mode is PAGE_CUR_LE , cursor is left at the place where an insert of the
|
||||
search tuple should be performed in the B-tree. InnoDB does an insert
|
||||
immediately after the cursor. Thus, the cursor may end up on a user record,
|
||||
or on a page infimum record. */
|
||||
UNIV_INTERN
|
||||
dberr_t
|
||||
btr_cur_search_to_nth_level(
|
||||
/*========================*/
|
||||
@@ -1412,7 +1411,11 @@ retry_page_get:
|
||||
level, the search becomes PAGE_CUR_LE */
|
||||
if (page_mode == PAGE_CUR_RTREE_LOCATE
|
||||
&& level == height) {
|
||||
page_mode = PAGE_CUR_LE;
|
||||
if (level == 0) {
|
||||
page_mode = PAGE_CUR_LE;
|
||||
} else {
|
||||
page_mode = PAGE_CUR_RTREE_GET_FATHER;
|
||||
}
|
||||
}
|
||||
|
||||
if (page_mode == PAGE_CUR_RTREE_INSERT) {
|
||||
@@ -1463,6 +1466,11 @@ retry_page_get:
|
||||
ut_ad(0);
|
||||
}
|
||||
}
|
||||
|
||||
if (found && page_mode == PAGE_CUR_RTREE_GET_FATHER) {
|
||||
cursor->low_match =
|
||||
DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1;
|
||||
}
|
||||
} else if (height == 0 && btr_search_enabled
|
||||
&& !dict_index_is_spatial(index)) {
|
||||
/* The adaptive hash index is only used when searching
|
||||
@@ -2102,7 +2110,7 @@ btr_cur_search_to_nth_level_with_no_latch(
|
||||
ut_ad(n_blocks < BTR_MAX_LEVELS);
|
||||
|
||||
block = buf_page_get_gen(page_id, page_size, rw_latch, NULL,
|
||||
buf_mode, file, line, mtr, &err, mark_dirty);
|
||||
buf_mode, file, line, mtr, &err, mark_dirty);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
if (err == DB_DECRYPTION_FAILED) {
|
||||
@@ -2115,7 +2123,7 @@ btr_cur_search_to_nth_level_with_no_latch(
|
||||
index->table->is_encrypted = true;
|
||||
}
|
||||
|
||||
return (err);
|
||||
DBUG_RETURN(err);
|
||||
}
|
||||
|
||||
page = buf_block_get_frame(block);
|
||||
@@ -2171,7 +2179,7 @@ btr_cur_search_to_nth_level_with_no_latch(
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
return err;
|
||||
DBUG_RETURN(err);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
@@ -2578,7 +2586,6 @@ btr_cur_open_at_index_side_with_no_latch_func(
|
||||
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
ulint* offsets = offsets_;
|
||||
dberr_t err = DB_SUCCESS;
|
||||
|
||||
rec_offs_init(offsets_);
|
||||
|
||||
ut_ad(level != ULINT_UNDEFINED);
|
||||
@@ -2599,7 +2606,7 @@ btr_cur_open_at_index_side_with_no_latch_func(
|
||||
ut_ad(n_blocks < BTR_MAX_LEVELS);
|
||||
|
||||
block = buf_page_get_gen(page_id, page_size, rw_latch, NULL,
|
||||
BUF_GET, file, line, mtr, &err);
|
||||
BUF_GET, file, line, mtr, &err);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
if (err == DB_DECRYPTION_FAILED) {
|
||||
@@ -2665,7 +2672,7 @@ btr_cur_open_at_index_side_with_no_latch_func(
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
return (err);
|
||||
return(err);
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
@@ -2759,6 +2766,7 @@ btr_cur_open_at_rnd_pos_func(
|
||||
page_id_t page_id(dict_index_get_space(index),
|
||||
dict_index_get_page(index));
|
||||
const page_size_t& page_size = dict_table_page_size(index->table);
|
||||
dberr_t err = DB_SUCCESS;
|
||||
|
||||
if (root_leaf_rw_latch == RW_X_LATCH) {
|
||||
node_ptr_max_size = dict_index_node_ptr_max_size(index);
|
||||
@@ -2769,7 +2777,6 @@ btr_cur_open_at_rnd_pos_func(
|
||||
for (;;) {
|
||||
buf_block_t* block;
|
||||
page_t* page;
|
||||
dberr_t err=DB_SUCCESS;
|
||||
ulint rw_latch;
|
||||
|
||||
ut_ad(n_blocks < BTR_MAX_LEVELS);
|
||||
@@ -2783,10 +2790,9 @@ btr_cur_open_at_rnd_pos_func(
|
||||
|
||||
tree_savepoints[n_blocks] = mtr_set_savepoint(mtr);
|
||||
block = buf_page_get_gen(page_id, page_size, rw_latch, NULL,
|
||||
BUF_GET, file, line, mtr, &err);
|
||||
BUF_GET, file, line, mtr, &err);
|
||||
tree_blocks[n_blocks] = block;
|
||||
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
if (err == DB_DECRYPTION_FAILED) {
|
||||
ib_push_warning((void *)NULL,
|
||||
@@ -3222,46 +3228,12 @@ btr_cur_optimistic_insert(
|
||||
rec_size = rec_get_converted_size(index, entry, n_ext);
|
||||
}
|
||||
|
||||
if (page_size.is_compressed()) {
|
||||
/* Estimate the free space of an empty compressed page.
|
||||
Subtract one byte for the encoded heap_no in the
|
||||
modification log. */
|
||||
ulint free_space_zip = page_zip_empty_size(
|
||||
cursor->index->n_fields, page_size.physical());
|
||||
ulint n_uniq = dict_index_get_n_unique_in_tree(index);
|
||||
|
||||
ut_ad(dict_table_is_comp(index->table));
|
||||
|
||||
if (free_space_zip == 0) {
|
||||
too_big:
|
||||
if (big_rec_vec) {
|
||||
dtuple_convert_back_big_rec(
|
||||
index, entry, big_rec_vec);
|
||||
}
|
||||
|
||||
return(DB_TOO_BIG_RECORD);
|
||||
if (page_size.is_compressed() && page_zip_is_too_big(index, entry)) {
|
||||
if (big_rec_vec != NULL) {
|
||||
dtuple_convert_back_big_rec(index, entry, big_rec_vec);
|
||||
}
|
||||
|
||||
/* Subtract one byte for the encoded heap_no in the
|
||||
modification log. */
|
||||
free_space_zip--;
|
||||
|
||||
/* There should be enough room for two node pointer
|
||||
records on an empty non-leaf page. This prevents
|
||||
infinite page splits. */
|
||||
|
||||
if (entry->n_fields >= n_uniq
|
||||
&& (REC_NODE_PTR_SIZE
|
||||
+ rec_get_converted_size_comp_prefix(
|
||||
index, entry->fields, n_uniq, NULL)
|
||||
/* On a compressed page, there is
|
||||
a two-byte entry in the dense
|
||||
page directory for every record.
|
||||
But there is no record header. */
|
||||
- (REC_N_NEW_EXTRA_BYTES - 2)
|
||||
> free_space_zip / 2)) {
|
||||
goto too_big;
|
||||
}
|
||||
return(DB_TOO_BIG_RECORD);
|
||||
}
|
||||
|
||||
LIMIT_OPTIMISTIC_INSERT_DEBUG(page_get_n_recs(page),
|
||||
@@ -3323,12 +3295,12 @@ fail_err:
|
||||
#ifdef UNIV_DEBUG
|
||||
{
|
||||
rec_printer p(entry);
|
||||
DBUG_PRINT("ib_cur", ("insert %s (%llu) by %lu %s",
|
||||
index->name(), index->id,
|
||||
thr != NULL
|
||||
? trx_get_id_for_print(thr_get_trx(thr))
|
||||
: 0,
|
||||
p.str().c_str()));
|
||||
DBUG_PRINT("ib_cur", ("insert %s (" IB_ID_FMT ") by " IB_ID_FMT " %s",
|
||||
index->name(), index->id,
|
||||
thr != NULL
|
||||
? trx_get_id_for_print(thr_get_trx(thr))
|
||||
: 0,
|
||||
p.str().c_str()));
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -3350,7 +3322,6 @@ fail_err:
|
||||
if specified */
|
||||
err = btr_cur_ins_lock_and_undo(flags, cursor, entry,
|
||||
thr, mtr, &inherit);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
goto fail_err;
|
||||
}
|
||||
@@ -3592,6 +3563,7 @@ btr_cur_pessimistic_insert(
|
||||
btr_cur_get_page_zip(cursor),
|
||||
thr_get_trx(thr)->id, mtr);
|
||||
}
|
||||
|
||||
if (!page_rec_is_infimum(btr_cur_get_rec(cursor))
|
||||
|| btr_page_get_prev(
|
||||
buf_block_get_frame(
|
||||
@@ -3601,18 +3573,6 @@ btr_cur_pessimistic_insert(
|
||||
lock_update_insert() always. */
|
||||
inherit = TRUE;
|
||||
}
|
||||
|
||||
buf_block_t* block = btr_cur_get_block(cursor);
|
||||
buf_frame_t* frame = NULL;
|
||||
|
||||
if (block) {
|
||||
frame = buf_block_get_frame(block);
|
||||
}
|
||||
/* split and inserted need to call
|
||||
lock_update_insert() always. */
|
||||
if (frame && btr_page_get_prev(frame, mtr) == FIL_NULL) {
|
||||
inherit = TRUE;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3640,7 +3600,7 @@ btr_cur_pessimistic_insert(
|
||||
/*************************************************************//**
|
||||
For an update, checks the locks and does the undo logging.
|
||||
@return DB_SUCCESS, DB_WAIT_LOCK, or error number */
|
||||
UNIV_INLINE MY_ATTRIBUTE((warn_unused_result, nonnull(2,3,6,7)))
|
||||
UNIV_INLINE MY_ATTRIBUTE((warn_unused_result))
|
||||
dberr_t
|
||||
btr_cur_upd_lock_and_undo(
|
||||
/*======================*/
|
||||
@@ -3978,7 +3938,7 @@ btr_cur_update_in_place(
|
||||
#ifdef UNIV_DEBUG
|
||||
{
|
||||
rec_printer p(rec, offsets);
|
||||
DBUG_PRINT("ib_cur", ("update-in-place %s (%llu) by %lu: %s",
|
||||
DBUG_PRINT("ib_cur", ("update-in-place %s (" IB_ID_FMT ") by "IB_ID_FMT ": %s",
|
||||
index->name(), index->id, trx_id,
|
||||
p.str().c_str()));
|
||||
}
|
||||
@@ -4186,8 +4146,8 @@ any_extern:
|
||||
#ifdef UNIV_DEBUG
|
||||
{
|
||||
rec_printer p(rec, *offsets);
|
||||
DBUG_PRINT("ib_cur", ("update %s (%llu) by %lu: %s",
|
||||
index->name(), index->id, trx_id,
|
||||
DBUG_PRINT("ib_cur", ("update %s (" IB_ID_FMT ") by " IB_ID_FMT ": %s",
|
||||
index->name(), index->id, trx_id,
|
||||
p.str().c_str()));
|
||||
}
|
||||
#endif
|
||||
@@ -5026,7 +4986,7 @@ btr_cur_del_mark_set_clust_rec(
|
||||
#ifdef UNIV_DEBUG
|
||||
{
|
||||
rec_printer p(rec, offsets);
|
||||
DBUG_PRINT("ib_cur", ("delete-mark clust %s (%llu) by %lu: %s",
|
||||
DBUG_PRINT("ib_cur", ("delete-mark clust %s (" IB_ID_FMT ") by " IB_ID_FMT ": %s",
|
||||
index->table_name, index->id,
|
||||
trx_get_id_for_print(trx),
|
||||
p.str().c_str()));
|
||||
@@ -5154,7 +5114,7 @@ btr_cur_del_mark_set_sec_rec(
|
||||
== dict_table_is_comp(cursor->index->table));
|
||||
|
||||
DBUG_PRINT("ib_cur", ("delete-mark=%u sec %u:%u:%u in %s("
|
||||
UINT32PF ") by " TRX_ID_FMT,
|
||||
IB_ID_FMT ") by " TRX_ID_FMT,
|
||||
unsigned(val),
|
||||
block->page.id.space(), block->page.id.page_no(),
|
||||
unsigned(page_rec_get_heap_no(rec)),
|
||||
@@ -5231,10 +5191,15 @@ btr_cur_compress_if_useful(
|
||||
|
||||
if (dict_index_is_spatial(cursor->index)) {
|
||||
const page_t* page = btr_cur_get_page(cursor);
|
||||
const trx_t* trx = NULL;
|
||||
|
||||
if (cursor->rtr_info->thr != NULL) {
|
||||
trx = thr_get_trx(cursor->rtr_info->thr);
|
||||
}
|
||||
|
||||
/* Check whether page lock prevents the compression */
|
||||
if (!lock_test_prdt_page_lock(
|
||||
page_get_space_id(page), page_get_page_no(page))) {
|
||||
if (!lock_test_prdt_page_lock(trx, page_get_space_id(page),
|
||||
page_get_page_no(page))) {
|
||||
return(false);
|
||||
}
|
||||
}
|
||||
@@ -5840,7 +5805,6 @@ btr_estimate_n_rows_in_range_low(
|
||||
|
||||
table_n_rows = dict_table_get_n_rows(index->table);
|
||||
|
||||
mtr_start(&mtr);
|
||||
/* Below we dive to the two records specified by tuple1 and tuple2 and
|
||||
we remember the entire dive paths from the tree root. The place where
|
||||
the tuple1 path ends on the leaf level we call "left border" of our
|
||||
@@ -5850,6 +5814,7 @@ btr_estimate_n_rows_in_range_low(
|
||||
example if "5 < x AND x <= 10" then we should not include the left
|
||||
boundary, but should include the right one. */
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
cursor.path_arr = path1;
|
||||
|
||||
@@ -6910,6 +6875,8 @@ struct btr_blob_log_check_t {
|
||||
|
||||
log_free_check();
|
||||
|
||||
DEBUG_SYNC_C("blob_write_middle_after_check");
|
||||
|
||||
const mtr_log_t log_mode = m_mtr->get_log_mode();
|
||||
m_mtr->start();
|
||||
m_mtr->set_log_mode(log_mode);
|
||||
@@ -7065,10 +7032,7 @@ btr_store_big_rec_extern_fields(
|
||||
}
|
||||
#endif /* UNIV_DEBUG || UNIV_BLOB_LIGHT_DEBUG */
|
||||
|
||||
/* Calculate the total number of pages for blob data */
|
||||
ulint total_blob_pages = 0;
|
||||
const page_size_t page_size(dict_table_page_size(index->table));
|
||||
const ulint pages_in_extent = dict_table_extent_size(index->table);
|
||||
|
||||
/* Space available in compressed page to carry blob data */
|
||||
const ulint payload_size_zip = page_size.physical()
|
||||
@@ -7078,55 +7042,6 @@ btr_store_big_rec_extern_fields(
|
||||
const ulint payload_size = page_size.physical()
|
||||
- FIL_PAGE_DATA - BTR_BLOB_HDR_SIZE - FIL_PAGE_DATA_END;
|
||||
|
||||
if (page_size.is_compressed()) {
|
||||
for (ulint i = 0; i < big_rec_vec->n_fields; i++) {
|
||||
total_blob_pages
|
||||
+= static_cast<ulint>
|
||||
(compressBound(static_cast<uLong>
|
||||
(big_rec_vec->fields[i].len))
|
||||
+ payload_size_zip - 1)
|
||||
/ payload_size_zip;
|
||||
}
|
||||
} else {
|
||||
for (ulint i = 0; i < big_rec_vec->n_fields; i++) {
|
||||
total_blob_pages += (big_rec_vec->fields[i].len
|
||||
+ payload_size - 1)
|
||||
/ payload_size;
|
||||
}
|
||||
}
|
||||
|
||||
const ulint n_extents = (total_blob_pages + pages_in_extent - 1)
|
||||
/ pages_in_extent;
|
||||
ulint n_reserved = 0;
|
||||
#ifdef UNIV_DEBUG
|
||||
ulint n_used = 0; /* number of pages used */
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
if (op == BTR_STORE_INSERT_BULK) {
|
||||
mtr_t alloc_mtr;
|
||||
|
||||
mtr_start(&alloc_mtr);
|
||||
alloc_mtr.set_named_space(index->space);
|
||||
|
||||
if (!fsp_reserve_free_extents(&n_reserved, space_id, n_extents,
|
||||
FSP_BLOB, &alloc_mtr)) {
|
||||
mtr_commit(&alloc_mtr);
|
||||
error = DB_OUT_OF_FILE_SPACE;
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
mtr_commit(&alloc_mtr);
|
||||
} else {
|
||||
if (!fsp_reserve_free_extents(&n_reserved, space_id, n_extents,
|
||||
FSP_BLOB, btr_mtr)) {
|
||||
error = DB_OUT_OF_FILE_SPACE;
|
||||
goto func_exit;
|
||||
}
|
||||
}
|
||||
|
||||
ut_ad(n_reserved > 0);
|
||||
ut_ad(n_reserved == n_extents);
|
||||
|
||||
/* We have to create a file segment to the tablespace
|
||||
for each field and put the pointer to the field in rec */
|
||||
|
||||
@@ -7160,6 +7075,7 @@ btr_store_big_rec_extern_fields(
|
||||
buf_block_t* block;
|
||||
page_t* page;
|
||||
const ulint commit_freq = 4;
|
||||
ulint r_extents;
|
||||
|
||||
ut_ad(page_align(field_ref) == page_align(rec));
|
||||
|
||||
@@ -7188,23 +7104,35 @@ btr_store_big_rec_extern_fields(
|
||||
hint_page_no = prev_page_no + 1;
|
||||
}
|
||||
|
||||
mtr_t *alloc_mtr;
|
||||
|
||||
if (op == BTR_STORE_INSERT_BULK) {
|
||||
mtr_t alloc_mtr;
|
||||
|
||||
mtr_start(&alloc_mtr);
|
||||
alloc_mtr.set_named_space(index->space);
|
||||
|
||||
block = btr_page_alloc(index, hint_page_no,
|
||||
FSP_NO_DIR, 0, &alloc_mtr, &mtr);
|
||||
mtr_commit(&alloc_mtr);
|
||||
|
||||
mtr_start(&mtr_bulk);
|
||||
mtr_bulk.set_spaces(mtr);
|
||||
alloc_mtr = &mtr_bulk;
|
||||
} else {
|
||||
block = btr_page_alloc(index, hint_page_no,
|
||||
FSP_NO_DIR, 0, &mtr, &mtr);
|
||||
alloc_mtr = &mtr;
|
||||
}
|
||||
|
||||
if (!fsp_reserve_free_extents(&r_extents, space_id, 1,
|
||||
FSP_BLOB, alloc_mtr,
|
||||
1)) {
|
||||
|
||||
mtr_commit(alloc_mtr);
|
||||
error = DB_OUT_OF_FILE_SPACE;
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
block = btr_page_alloc(index, hint_page_no, FSP_NO_DIR,
|
||||
0, alloc_mtr, &mtr);
|
||||
|
||||
alloc_mtr->release_free_extents(r_extents);
|
||||
|
||||
if (op == BTR_STORE_INSERT_BULK) {
|
||||
mtr_commit(&mtr_bulk);
|
||||
}
|
||||
|
||||
ut_a(block != NULL);
|
||||
ut_ad(++n_used <= (n_reserved * pages_in_extent));
|
||||
|
||||
page_no = block->page.id.page_no();
|
||||
page = buf_block_get_frame(block);
|
||||
@@ -7443,13 +7371,6 @@ next_zip_page:
|
||||
rec_offs_make_nth_extern(offsets, field_no);
|
||||
}
|
||||
|
||||
/* Verify that the number of extents used is the same as the number
|
||||
of extents reserved. */
|
||||
ut_ad(page_zip != NULL
|
||||
|| ((n_used + pages_in_extent - 1) / pages_in_extent
|
||||
== n_reserved));
|
||||
ut_ad((n_used + pages_in_extent - 1) / pages_in_extent <= n_reserved);
|
||||
|
||||
func_exit:
|
||||
if (page_zip) {
|
||||
deflateEnd(&c_stream);
|
||||
@@ -7459,8 +7380,6 @@ func_exit:
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
fil_space_release_free_extents(space_id, n_reserved);
|
||||
|
||||
#if defined UNIV_DEBUG || defined UNIV_BLOB_LIGHT_DEBUG
|
||||
/* All pointers to externally stored columns in the record
|
||||
must be valid. */
|
||||
@@ -7545,7 +7464,7 @@ btr_free_externally_stored_field(
|
||||
ulint i, /*!< in: field number of field_ref;
|
||||
ignored if rec == NULL */
|
||||
bool rollback, /*!< in: performing rollback? */
|
||||
mtr_t* local_mtr MY_ATTRIBUTE((unused))) /*!< in: mtr
|
||||
mtr_t* local_mtr) /*!< in: mtr
|
||||
containing the latch to data an an
|
||||
X-latch to the index tree */
|
||||
{
|
||||
@@ -7659,7 +7578,7 @@ btr_free_externally_stored_field(
|
||||
next_page_no = mach_read_from_4(page + FIL_PAGE_NEXT);
|
||||
|
||||
btr_page_free_low(index, ext_block, 0,
|
||||
true, &mtr);
|
||||
true, &mtr);
|
||||
|
||||
if (page_zip != NULL) {
|
||||
mach_write_to_4(field_ref + BTR_EXTERN_PAGE_NO,
|
||||
@@ -7686,8 +7605,11 @@ btr_free_externally_stored_field(
|
||||
page + FIL_PAGE_DATA
|
||||
+ BTR_BLOB_HDR_NEXT_PAGE_NO);
|
||||
|
||||
/* We must supply the page level (= 0) as an argument
|
||||
because we did not store it on the page (we save the
|
||||
space overhead from an index page header. */
|
||||
btr_page_free_low(index, ext_block, 0,
|
||||
true, &mtr);
|
||||
true, &mtr);
|
||||
|
||||
mlog_write_ulint(field_ref + BTR_EXTERN_PAGE_NO,
|
||||
next_page_no,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (C) 2013, 2014 Facebook, Inc. All Rights Reserved.
|
||||
Copyright (C) 2014, 2015, MariaDB Corporation. All Rights Reserved.
|
||||
Copyright (C) 2014, 2016, MariaDB Corporation. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -33,6 +33,7 @@ Modified 30/07/2014 Jan Lindström jan.lindstrom@mariadb.com
|
||||
#include "btr0pcur.h"
|
||||
#include "dict0stats.h"
|
||||
#include "dict0stats_bg.h"
|
||||
#include "dict0defrag_bg.h"
|
||||
#include "ibuf0ibuf.h"
|
||||
#include "lock0lock.h"
|
||||
#include "srv0start.h"
|
||||
@@ -153,7 +154,7 @@ btr_defragment_init()
|
||||
{
|
||||
srv_defragment_interval = ut_microseconds_to_timer(
|
||||
(ulonglong) (1000000.0 / srv_defragment_frequency));
|
||||
mutex_create(LATCH_ID_DEFRAGMENT_MUTEX, &btr_defragment_mutex);
|
||||
mutex_create(LATCH_ID_BTR_DEFRAGMENT_MUTEX, &btr_defragment_mutex);
|
||||
os_thread_create(btr_defragment_thread, NULL, NULL);
|
||||
}
|
||||
|
||||
@@ -800,6 +801,9 @@ DECLARE_THREAD(btr_defragment_thread)(
|
||||
cursor = btr_pcur_get_btr_cur(pcur);
|
||||
index = btr_cur_get_index(cursor);
|
||||
first_block = btr_cur_get_block(cursor);
|
||||
|
||||
mtr_x_lock(dict_index_get_lock(index), &mtr);
|
||||
mtr.set_named_space(index->space);
|
||||
last_block = btr_defragment_n_pages(first_block, index,
|
||||
srv_defragment_n_pages,
|
||||
&mtr);
|
||||
@@ -818,16 +822,32 @@ DECLARE_THREAD(btr_defragment_thread)(
|
||||
/* Update the last_processed time of this index. */
|
||||
item->last_processed = now;
|
||||
} else {
|
||||
dberr_t err = DB_SUCCESS;
|
||||
mtr_commit(&mtr);
|
||||
/* Reaching the end of the index. */
|
||||
dict_stats_empty_defrag_stats(index);
|
||||
dict_stats_save_defrag_stats(index);
|
||||
dict_stats_save_defrag_summary(index);
|
||||
err = dict_stats_save_defrag_stats(index);
|
||||
if (err != DB_SUCCESS) {
|
||||
ib::error() << "Saving defragmentation stats for table "
|
||||
<< index->table->name.m_name
|
||||
<< " index " << index->name()
|
||||
<< " failed with error " << err;
|
||||
} else {
|
||||
err = dict_stats_save_defrag_summary(index);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
ib::error() << "Saving defragmentation summary for table "
|
||||
<< index->table->name.m_name
|
||||
<< " index " << index->name()
|
||||
<< " failed with error " << err;
|
||||
}
|
||||
}
|
||||
|
||||
btr_defragment_remove_item(item);
|
||||
}
|
||||
}
|
||||
btr_defragment_shutdown();
|
||||
os_thread_exit(NULL);
|
||||
os_thread_exit();
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
}
|
||||
|
||||
|
||||
@@ -412,7 +412,6 @@ btr_pessimistic_scrub(
|
||||
}
|
||||
|
||||
/* read block variables */
|
||||
const ulint space_id = mach_read_from_4(page + FIL_PAGE_SPACE_ID);
|
||||
const ulint page_no = mach_read_from_4(page + FIL_PAGE_OFFSET);
|
||||
const page_id_t page_id(dict_index_get_space(index), page_no);
|
||||
const ulint left_page_no = btr_page_get_prev(page, mtr);
|
||||
@@ -434,7 +433,7 @@ btr_pessimistic_scrub(
|
||||
*/
|
||||
mtr->release_block_at_savepoint(scrub_data->savepoint, block);
|
||||
|
||||
buf_block_t* get_block = btr_block_get(
|
||||
buf_block_t* get_block __attribute__((unused)) = btr_block_get(
|
||||
lpage_id, page_size,
|
||||
RW_X_LATCH, index, mtr);
|
||||
|
||||
@@ -455,7 +454,7 @@ btr_pessimistic_scrub(
|
||||
}
|
||||
|
||||
if (right_page_no != FIL_NULL) {
|
||||
buf_block_t* get_block = btr_block_get(
|
||||
buf_block_t* get_block __attribute__((unused))= btr_block_get(
|
||||
rpage_id, page_size,
|
||||
RW_X_LATCH, index, mtr);
|
||||
}
|
||||
|
||||
@@ -52,8 +52,13 @@ char btr_search_enabled = true;
|
||||
/** Number of adaptive hash index partition. */
|
||||
ulong btr_ahi_parts = 8;
|
||||
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
/** Number of successful adaptive hash index lookups */
|
||||
ulint btr_search_n_succ = 0;
|
||||
/** Number of failed adaptive hash index lookups */
|
||||
ulint btr_search_n_hash_fail = 0;
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
|
||||
/** padding to prevent other memory update
|
||||
hotspots from residing on the same memory
|
||||
cache line as btr_search_latches */
|
||||
@@ -87,7 +92,7 @@ before hash index building is started */
|
||||
@param[in] n_fields number of complete fields
|
||||
@param[in] n_bytes number of bytes in an incomplete last field
|
||||
@return number of complete or incomplete fields */
|
||||
inline __attribute__((warn_unused_result))
|
||||
inline MY_ATTRIBUTE((warn_unused_result))
|
||||
ulint
|
||||
btr_search_get_n_fields(
|
||||
ulint n_fields,
|
||||
@@ -99,7 +104,7 @@ btr_search_get_n_fields(
|
||||
/** Determine the number of accessed key fields.
|
||||
@param[in] cursor b-tree cursor
|
||||
@return number of complete or incomplete fields */
|
||||
inline __attribute__((warn_unused_result))
|
||||
inline MY_ATTRIBUTE((warn_unused_result))
|
||||
ulint
|
||||
btr_search_get_n_fields(
|
||||
const btr_cur_t* cursor)
|
||||
@@ -561,7 +566,7 @@ ibool
|
||||
btr_search_update_block_hash_info(
|
||||
btr_search_t* info,
|
||||
buf_block_t* block,
|
||||
const btr_cur_t* cursor MY_ATTRIBUTE((unused)))
|
||||
const btr_cur_t* cursor)
|
||||
{
|
||||
ut_ad(!rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_S));
|
||||
ut_ad(!rw_lock_own(btr_get_search_latch(cursor->index), RW_LOCK_X));
|
||||
@@ -728,6 +733,10 @@ btr_search_info_update_slow(
|
||||
if (cursor->flag == BTR_CUR_HASH_FAIL) {
|
||||
/* Update the hash node reference, if appropriate */
|
||||
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
btr_search_n_hash_fail++;
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
|
||||
btr_search_x_lock(cursor->index);
|
||||
|
||||
btr_search_update_hash_ref(info, block, cursor);
|
||||
@@ -1011,7 +1020,7 @@ btr_search_guess_on_hash(
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
buf_block_t* block = buf_block_align(rec);
|
||||
buf_block_t* block = buf_block_from_ahi(rec);
|
||||
|
||||
if (!has_search_latch) {
|
||||
|
||||
@@ -1114,6 +1123,9 @@ btr_search_guess_on_hash(
|
||||
#endif
|
||||
info->last_hash_succ = TRUE;
|
||||
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
btr_search_n_succ++;
|
||||
#endif
|
||||
if (!has_search_latch && buf_page_peek_if_too_old(&block->page)) {
|
||||
|
||||
buf_page_make_young(&block->page);
|
||||
@@ -1121,7 +1133,6 @@ btr_search_guess_on_hash(
|
||||
|
||||
/* Increment the page get statistics though we did not really
|
||||
fix the page: for user info only */
|
||||
|
||||
{
|
||||
buf_pool_t* buf_pool = buf_pool_from_bpage(&block->page);
|
||||
|
||||
@@ -1181,7 +1192,8 @@ retry:
|
||||
const index_id_t index_id
|
||||
= btr_page_get_index_id(block->frame);
|
||||
const ulint ahi_slot
|
||||
= ut_fold_ulint_pair(index_id, block->page.id.space())
|
||||
= ut_fold_ulint_pair(static_cast<ulint>(index_id),
|
||||
static_cast<ulint>(block->page.id.space()))
|
||||
% btr_ahi_parts;
|
||||
latch = btr_search_latches[ahi_slot];
|
||||
|
||||
@@ -2006,7 +2018,7 @@ btr_search_hash_table_validate(ulint hash_table_id)
|
||||
|
||||
for (; node != NULL; node = node->next) {
|
||||
const buf_block_t* block
|
||||
= buf_block_align((byte*) node->data);
|
||||
= buf_block_from_ahi((byte*) node->data);
|
||||
const buf_block_t* hash_block;
|
||||
buf_pool_t* buf_pool;
|
||||
index_id_t page_index_id;
|
||||
|
||||
@@ -72,14 +72,9 @@ Created 11/5/1995 Heikki Tuuri
|
||||
#include "sync0sync.h"
|
||||
#include "buf0dump.h"
|
||||
#include "ut0new.h"
|
||||
|
||||
#include <new>
|
||||
#include <map>
|
||||
#include <sstream>
|
||||
#ifdef HAVE_LIBNUMA
|
||||
#include <numa.h>
|
||||
#include <numaif.h>
|
||||
#endif // HAVE_LIBNUMA
|
||||
#ifndef UNIV_INNOCHECKSUM
|
||||
#include "fil0pagecompress.h"
|
||||
#include "fsp0pagecompress.h"
|
||||
@@ -92,6 +87,48 @@ Created 11/5/1995 Heikki Tuuri
|
||||
#include "lzo/lzo1x.h"
|
||||
#endif
|
||||
|
||||
#if defined(HAVE_LIBNUMA) && defined(WITH_NUMA)
|
||||
#include <numa.h>
|
||||
#include <numaif.h>
|
||||
struct set_numa_interleave_t
|
||||
{
|
||||
set_numa_interleave_t()
|
||||
{
|
||||
if (srv_numa_interleave) {
|
||||
|
||||
ib::info() << "Setting NUMA memory policy to"
|
||||
" MPOL_INTERLEAVE";
|
||||
if (set_mempolicy(MPOL_INTERLEAVE,
|
||||
numa_all_nodes_ptr->maskp,
|
||||
numa_all_nodes_ptr->size) != 0) {
|
||||
|
||||
ib::warn() << "Failed to set NUMA memory"
|
||||
" policy to MPOL_INTERLEAVE: "
|
||||
<< strerror(errno);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
~set_numa_interleave_t()
|
||||
{
|
||||
if (srv_numa_interleave) {
|
||||
|
||||
ib::info() << "Setting NUMA memory policy to"
|
||||
" MPOL_DEFAULT";
|
||||
if (set_mempolicy(MPOL_DEFAULT, NULL, 0) != 0) {
|
||||
ib::warn() << "Failed to set NUMA memory"
|
||||
" policy to MPOL_DEFAULT: "
|
||||
<< strerror(errno);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#define NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE set_numa_interleave_t scoped_numa
|
||||
#else
|
||||
#define NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE
|
||||
#endif /* HAVE_LIBNUMA && WITH_NUMA */
|
||||
|
||||
/*
|
||||
IMPLEMENTATION OF THE BUFFER POOL
|
||||
=================================
|
||||
@@ -323,10 +360,6 @@ The map pointed by this should not be updated */
|
||||
static buf_pool_chunk_map_t* buf_chunk_map_ref = NULL;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/** Protect reference for buf_chunk_map_ref from deleting map,
|
||||
because the reference can be caused by debug assertion code. */
|
||||
static rw_lock_t* buf_chunk_map_latch;
|
||||
|
||||
/** Disable resizing buffer pool to make assertion code not expensive. */
|
||||
my_bool buf_disable_resize_buffer_pool_debug = TRUE;
|
||||
#endif /* UNIV_DEBUG */
|
||||
@@ -569,29 +602,31 @@ buf_page_is_zeroes(
|
||||
}
|
||||
|
||||
/** Checks if the page is in crc32 checksum format.
|
||||
@param[in] read_buf database page
|
||||
@param[in] checksum_field1 new checksum field
|
||||
@param[in] checksum_field2 old checksum field
|
||||
@param[in] page_no page number of given read_buf
|
||||
@param[in] is_log_enabled true if log option is enabled
|
||||
@param[in] log_file file pointer to log_file
|
||||
@param[in] curr_algo current checksum algorithm
|
||||
@param[in] read_buf database page
|
||||
@param[in] checksum_field1 new checksum field
|
||||
@param[in] checksum_field2 old checksum field
|
||||
@param[in] page_no page number of given read_buf
|
||||
@param[in] is_log_enabled true if log option is enabled
|
||||
@param[in] log_file file pointer to log_file
|
||||
@param[in] curr_algo current checksum algorithm
|
||||
@param[in] use_legacy_big_endian use legacy big endian algorithm
|
||||
@return true if the page is in crc32 checksum format. */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
buf_page_is_checksum_valid_crc32(
|
||||
const byte* read_buf,
|
||||
ulint checksum_field1,
|
||||
ulint checksum_field2
|
||||
ulint checksum_field2,
|
||||
#ifdef UNIV_INNOCHECKSUM
|
||||
,uintmax_t page_no,
|
||||
uintmax_t page_no,
|
||||
bool is_log_enabled,
|
||||
FILE* log_file,
|
||||
const srv_checksum_algorithm_t curr_algo
|
||||
const srv_checksum_algorithm_t curr_algo,
|
||||
#endif /* UNIV_INNOCHECKSUM */
|
||||
)
|
||||
bool use_legacy_big_endian)
|
||||
{
|
||||
const uint32_t crc32 = buf_calc_page_crc32(read_buf);
|
||||
const uint32_t crc32 = buf_calc_page_crc32(read_buf,
|
||||
use_legacy_big_endian);
|
||||
|
||||
#ifdef UNIV_INNOCHECKSUM
|
||||
if (is_log_enabled
|
||||
@@ -740,6 +775,7 @@ buf_page_is_checksum_valid_none(
|
||||
}
|
||||
#endif /* UNIV_INNOCHECKSUM */
|
||||
|
||||
|
||||
return(checksum_field1 == checksum_field2
|
||||
&& checksum_field1 == BUF_NO_CHECKSUM_MAGIC);
|
||||
}
|
||||
@@ -896,16 +932,18 @@ buf_page_is_corrupted(
|
||||
const srv_checksum_algorithm_t curr_algo =
|
||||
static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm);
|
||||
|
||||
bool legacy_checksum_checked = false;
|
||||
|
||||
switch (curr_algo) {
|
||||
case SRV_CHECKSUM_ALGORITHM_CRC32:
|
||||
case SRV_CHECKSUM_ALGORITHM_STRICT_CRC32:
|
||||
|
||||
if (buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2
|
||||
checksum_field1, checksum_field2,
|
||||
#ifdef UNIV_INNOCHECKSUM
|
||||
, page_no, is_log_enabled, log_file, curr_algo
|
||||
page_no, is_log_enabled, log_file, curr_algo,
|
||||
#endif /* UNIV_INNOCHECKSUM */
|
||||
)) {
|
||||
false)) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
@@ -928,13 +966,13 @@ buf_page_is_corrupted(
|
||||
if (is_log_enabled) {
|
||||
|
||||
fprintf(log_file, "page::%lu;"
|
||||
" old style: calculated = %lu;"
|
||||
" recorded = %lu\n", page_no,
|
||||
" old style: calculated = " ULINTPF ";"
|
||||
" recorded = " ULINTPF "\n", page_no,
|
||||
buf_calc_page_old_checksum(read_buf),
|
||||
checksum_field2);
|
||||
fprintf(log_file, "page::%lu;"
|
||||
" new style: calculated = %lu;"
|
||||
" crc32 = %u; recorded = %lu\n",
|
||||
" new style: calculated = " ULINTPF ";"
|
||||
" crc32 = %u; recorded = " ULINTPF "\n",
|
||||
page_no,
|
||||
buf_calc_page_new_checksum(read_buf),
|
||||
buf_calc_page_crc32(read_buf),
|
||||
@@ -944,6 +982,24 @@ buf_page_is_corrupted(
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/* We need to check whether the stored checksum matches legacy
|
||||
big endian checksum or Innodb checksum. We optimize the order
|
||||
based on earlier results. if earlier we have found pages
|
||||
matching legacy big endian checksum, we try to match it first.
|
||||
Otherwise we check innodb checksum first. */
|
||||
if (legacy_big_endian_checksum) {
|
||||
if (buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2,
|
||||
#ifdef UNIV_INNOCHECKSUM
|
||||
page_no, is_log_enabled, log_file, curr_algo,
|
||||
#endif /* UNIV_INNOCHECKSUM */
|
||||
true)) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
legacy_checksum_checked = true;
|
||||
}
|
||||
|
||||
if (buf_page_is_checksum_valid_innodb(read_buf,
|
||||
checksum_field1, checksum_field2
|
||||
#ifdef UNIV_INNOCHECKSUM
|
||||
@@ -961,6 +1017,18 @@ buf_page_is_corrupted(
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
/* If legacy checksum is not checked, do it now. */
|
||||
if (!legacy_checksum_checked && buf_page_is_checksum_valid_crc32(
|
||||
read_buf, checksum_field1, checksum_field2,
|
||||
#ifdef UNIV_INNOCHECKSUM
|
||||
page_no, is_log_enabled, log_file, curr_algo,
|
||||
#endif /* UNIV_INNOCHECKSUM */
|
||||
true)) {
|
||||
|
||||
legacy_big_endian_checksum = true;
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
#ifdef UNIV_INNOCHECKSUM
|
||||
if (is_log_enabled) {
|
||||
fprintf(log_file, "Fail; page %lu"
|
||||
@@ -1016,12 +1084,19 @@ buf_page_is_corrupted(
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
if (buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2
|
||||
#ifdef UNIV_INNOCHECKSUM
|
||||
, page_no, is_log_enabled, log_file, curr_algo)) {
|
||||
if (buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2,
|
||||
page_no, is_log_enabled, log_file, curr_algo, false)
|
||||
|| buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2,
|
||||
page_no, is_log_enabled, log_file, curr_algo, true)) {
|
||||
#else /* UNIV_INNOCHECKSUM */
|
||||
)) {
|
||||
if (buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2, false)
|
||||
|| buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2, true)) {
|
||||
|
||||
if (curr_algo
|
||||
== SRV_CHECKSUM_ALGORITHM_STRICT_INNODB) {
|
||||
page_warn_strict_checksum(
|
||||
@@ -1054,12 +1129,19 @@ buf_page_is_corrupted(
|
||||
return(false);
|
||||
}
|
||||
|
||||
if (buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2
|
||||
#ifdef UNIV_INNOCHECKSUM
|
||||
, page_no, is_log_enabled, log_file, curr_algo)) {
|
||||
if (buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2,
|
||||
page_no, is_log_enabled, log_file, curr_algo, false)
|
||||
|| buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2,
|
||||
page_no, is_log_enabled, log_file, curr_algo, true)) {
|
||||
#else /* UNIV_INNOCHECKSUM */
|
||||
)) {
|
||||
if (buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2, false)
|
||||
|| buf_page_is_checksum_valid_crc32(read_buf,
|
||||
checksum_field1, checksum_field2, true)) {
|
||||
|
||||
page_warn_strict_checksum(
|
||||
curr_algo,
|
||||
SRV_CHECKSUM_ALGORITHM_CRC32,
|
||||
@@ -1469,9 +1551,9 @@ buf_chunk_init(
|
||||
return(NULL);
|
||||
}
|
||||
|
||||
#ifdef HAVE_LIBNUMA
|
||||
#if defined(HAVE_LIBNUMA) && defined(WITH_NUMA)
|
||||
if (srv_numa_interleave) {
|
||||
int st = mbind(chunk->mem, mem_size,
|
||||
int st = mbind(chunk->mem, chunk->mem_size(),
|
||||
MPOL_INTERLEAVE,
|
||||
numa_all_nodes_ptr->maskp,
|
||||
numa_all_nodes_ptr->size,
|
||||
@@ -1482,7 +1564,8 @@ buf_chunk_init(
|
||||
" (error: " << strerror(errno) << ").";
|
||||
}
|
||||
}
|
||||
#endif // HAVE_LIBNUMA
|
||||
#endif /* HAVE_LIBNUMA && WITH_NUMA */
|
||||
|
||||
|
||||
/* Allocate the block descriptors from
|
||||
the start of the memory block. */
|
||||
@@ -1891,7 +1974,6 @@ buf_pool_free_instance(
|
||||
hash_table_free(buf_pool->page_hash);
|
||||
hash_table_free(buf_pool->zip_hash);
|
||||
|
||||
buf_pool->allocator.~ut_allocator();
|
||||
/* Free all used temporary slots */
|
||||
if (buf_pool->tmp_arr) {
|
||||
for(ulint i = 0; i < buf_pool->tmp_arr->n_slots; i++) {
|
||||
@@ -1912,11 +1994,13 @@ buf_pool_free_instance(
|
||||
slot->comp_buf_free = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ut_free(buf_pool->tmp_arr->slots);
|
||||
ut_free(buf_pool->tmp_arr);
|
||||
buf_pool->tmp_arr = NULL;
|
||||
}
|
||||
|
||||
ut_free(buf_pool->tmp_arr->slots);
|
||||
ut_free(buf_pool->tmp_arr);
|
||||
buf_pool->tmp_arr = NULL;
|
||||
buf_pool->allocator.~ut_allocator();
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
@@ -1935,33 +2019,17 @@ buf_pool_init(
|
||||
ut_ad(n_instances <= MAX_BUFFER_POOLS);
|
||||
ut_ad(n_instances == srv_buf_pool_instances);
|
||||
|
||||
NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE;
|
||||
|
||||
buf_pool_resizing = false;
|
||||
buf_pool_withdrawing = false;
|
||||
buf_withdraw_clock = 0;
|
||||
|
||||
#ifdef HAVE_LIBNUMA
|
||||
if (srv_numa_interleave) {
|
||||
ib::info() << "Setting NUMA memory policy to MPOL_INTERLEAVE";
|
||||
if (set_mempolicy(MPOL_INTERLEAVE,
|
||||
numa_all_nodes_ptr->maskp,
|
||||
numa_all_nodes_ptr->size) != 0) {
|
||||
ib::warn() << "Failed to set NUMA memory policy to"
|
||||
" MPOL_INTERLEAVE: " << strerror(errno);
|
||||
}
|
||||
}
|
||||
#endif // HAVE_LIBNUMA
|
||||
|
||||
buf_pool_ptr = (buf_pool_t*) ut_zalloc_nokey(
|
||||
n_instances * sizeof *buf_pool_ptr);
|
||||
|
||||
buf_chunk_map_reg = UT_NEW_NOKEY(buf_pool_chunk_map_t());
|
||||
|
||||
ut_d(buf_chunk_map_latch = static_cast<rw_lock_t*>(
|
||||
ut_zalloc_nokey(sizeof(*buf_chunk_map_latch))));
|
||||
|
||||
ut_d(rw_lock_create(
|
||||
buf_chunk_map_latch_key, buf_chunk_map_latch, SYNC_ANY_LATCH));
|
||||
|
||||
for (i = 0; i < n_instances; i++) {
|
||||
buf_pool_t* ptr = &buf_pool_ptr[i];
|
||||
|
||||
@@ -1981,18 +2049,6 @@ buf_pool_init(
|
||||
|
||||
btr_search_sys_create(buf_pool_get_curr_size() / sizeof(void*) / 64);
|
||||
|
||||
#ifdef HAVE_LIBNUMA
|
||||
if (srv_numa_interleave) {
|
||||
ib::info() << "Setting NUMA memory policy to MPOL_DEFAULT";
|
||||
if (set_mempolicy(MPOL_DEFAULT, NULL, 0) != 0) {
|
||||
ib::warn() << "Failed to set NUMA memory policy to"
|
||||
" MPOL_DEFAULT: " << strerror(errno);
|
||||
}
|
||||
}
|
||||
#endif // HAVE_LIBNUMA
|
||||
|
||||
buf_flush_event = os_event_create(0);
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
@@ -2008,10 +2064,6 @@ buf_pool_free(
|
||||
buf_pool_free_instance(buf_pool_from_array(i));
|
||||
}
|
||||
|
||||
ut_d(rw_lock_free(buf_chunk_map_latch));
|
||||
ut_d(ut_free(buf_chunk_map_latch));
|
||||
ut_d(buf_chunk_map_latch = NULL);
|
||||
|
||||
UT_DELETE(buf_chunk_map_reg);
|
||||
buf_chunk_map_reg = buf_chunk_map_ref = NULL;
|
||||
|
||||
@@ -2561,6 +2613,8 @@ buf_pool_resize()
|
||||
ulint new_instance_size;
|
||||
bool warning = false;
|
||||
|
||||
NUMA_MEMPOLICY_INTERLEAVE_IN_SCOPE;
|
||||
|
||||
ut_ad(!buf_pool_resizing);
|
||||
ut_ad(!buf_pool_withdrawing);
|
||||
ut_ad(srv_buf_pool_chunk_unit > 0);
|
||||
@@ -2958,9 +3012,7 @@ calc_buf_pool_size:
|
||||
}
|
||||
}
|
||||
|
||||
ut_d(rw_lock_x_lock(buf_chunk_map_latch));
|
||||
UT_DELETE(chunk_map_old);
|
||||
ut_d(rw_lock_x_unlock(buf_chunk_map_latch));
|
||||
|
||||
buf_pool_resizing = false;
|
||||
|
||||
@@ -3028,14 +3080,12 @@ when waked up either performs a resizing and sleeps again.
|
||||
extern "C"
|
||||
os_thread_ret_t
|
||||
DECLARE_THREAD(buf_resize_thread)(
|
||||
void* arg __attribute__((unused)))
|
||||
void* arg MY_ATTRIBUTE((unused)))
|
||||
{
|
||||
my_thread_init();
|
||||
|
||||
srv_buf_resize_thread_active = true;
|
||||
|
||||
buf_resize_status("not started");
|
||||
|
||||
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
|
||||
os_event_wait(srv_buf_resize_event);
|
||||
os_event_reset(srv_buf_resize_event);
|
||||
@@ -3063,7 +3113,7 @@ DECLARE_THREAD(buf_resize_thread)(
|
||||
srv_buf_resize_thread_active = false;
|
||||
|
||||
my_thread_end();
|
||||
os_thread_exit(NULL);
|
||||
os_thread_exit();
|
||||
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
}
|
||||
@@ -3666,6 +3716,7 @@ buf_page_get_zip(
|
||||
ibool discard_attempted = FALSE;
|
||||
ibool must_read;
|
||||
buf_pool_t* buf_pool = buf_pool_get(page_id);
|
||||
buf_page_t* rpage = NULL;
|
||||
|
||||
buf_pool->stat.n_page_gets++;
|
||||
|
||||
@@ -3684,7 +3735,7 @@ lookup:
|
||||
/* Page not in buf_pool: needs to be read from file */
|
||||
|
||||
ut_ad(!hash_lock);
|
||||
buf_read_page(page_id, page_size, NULL);
|
||||
buf_read_page(page_id, page_size, &rpage);
|
||||
|
||||
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
|
||||
ut_a(++buf_dbg_counter % 5771 || buf_validate());
|
||||
@@ -3873,150 +3924,44 @@ buf_zip_decompress(
|
||||
}
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/*******************************************************************//**
|
||||
Gets the block to whose frame the pointer is pointing to.
|
||||
/** Get a buffer block from an adaptive hash index pointer.
|
||||
This function does not return if the block is not identified.
|
||||
@param[in] ptr pointer to within a page frame
|
||||
@return pointer to block, never NULL */
|
||||
buf_block_t*
|
||||
buf_block_align(
|
||||
/*============*/
|
||||
const byte* ptr) /*!< in: pointer to a frame */
|
||||
buf_block_from_ahi(const byte* ptr)
|
||||
{
|
||||
buf_pool_chunk_map_t::iterator it;
|
||||
|
||||
ut_ad(srv_buf_pool_chunk_unit > 0);
|
||||
|
||||
/* TODO: This might be still optimistic treatment.
|
||||
buf_pool_resize() needs all buf_pool_mutex and all
|
||||
buf_pool->page_hash x-latched until actual modification.
|
||||
It should block the other user threads and should take while
|
||||
which is enough to done the buf_pool_chunk_map access. */
|
||||
while (buf_pool_resizing) {
|
||||
/* buf_pool_chunk_map is being modified */
|
||||
os_thread_sleep(100000); /* 0.1 sec */
|
||||
}
|
||||
|
||||
ulint counter = 0;
|
||||
retry:
|
||||
#ifdef UNIV_DEBUG
|
||||
bool resize_disabled = (buf_disable_resize_buffer_pool_debug != FALSE);
|
||||
if (!resize_disabled) {
|
||||
rw_lock_s_lock(buf_chunk_map_latch);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
buf_pool_chunk_map_t* chunk_map = buf_chunk_map_ref;
|
||||
ut_ad(buf_chunk_map_ref == buf_chunk_map_reg);
|
||||
ut_ad(!buf_pool_resizing);
|
||||
|
||||
if (ptr < reinterpret_cast<byte*>(srv_buf_pool_chunk_unit)) {
|
||||
it = chunk_map->upper_bound(0);
|
||||
} else {
|
||||
it = chunk_map->upper_bound(
|
||||
ptr - srv_buf_pool_chunk_unit);
|
||||
}
|
||||
const byte* bound = reinterpret_cast<uintptr_t>(ptr)
|
||||
> srv_buf_pool_chunk_unit
|
||||
? ptr - srv_buf_pool_chunk_unit : 0;
|
||||
it = chunk_map->upper_bound(bound);
|
||||
|
||||
if (it == chunk_map->end()) {
|
||||
#ifdef UNIV_DEBUG
|
||||
if (!resize_disabled) {
|
||||
rw_lock_s_unlock(buf_chunk_map_latch);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
/* The block should always be found. */
|
||||
++counter;
|
||||
ut_a(counter < 10);
|
||||
os_thread_sleep(100000); /* 0.1 sec */
|
||||
goto retry;
|
||||
}
|
||||
ut_a(it != chunk_map->end());
|
||||
|
||||
buf_chunk_t* chunk = it->second;
|
||||
#ifdef UNIV_DEBUG
|
||||
if (!resize_disabled) {
|
||||
rw_lock_s_unlock(buf_chunk_map_latch);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
ulint offs = ptr - chunk->blocks->frame;
|
||||
|
||||
offs >>= UNIV_PAGE_SIZE_SHIFT;
|
||||
|
||||
if (offs < chunk->size) {
|
||||
buf_block_t* block = &chunk->blocks[offs];
|
||||
ut_a(offs < chunk->size);
|
||||
|
||||
/* The function buf_chunk_init() invokes
|
||||
buf_block_init() so that block[n].frame ==
|
||||
block->frame + n * UNIV_PAGE_SIZE. Check it. */
|
||||
ut_ad(block->frame == page_align(ptr));
|
||||
#ifdef UNIV_DEBUG
|
||||
/* A thread that updates these fields must
|
||||
hold buf_pool->mutex and block->mutex. Acquire
|
||||
only the latter. */
|
||||
buf_page_mutex_enter(block);
|
||||
buf_block_t* block = &chunk->blocks[offs];
|
||||
|
||||
switch (buf_block_get_state(block)) {
|
||||
case BUF_BLOCK_POOL_WATCH:
|
||||
case BUF_BLOCK_ZIP_PAGE:
|
||||
case BUF_BLOCK_ZIP_DIRTY:
|
||||
/* These types should only be used in
|
||||
the compressed buffer pool, whose
|
||||
memory is allocated from
|
||||
buf_pool->chunks, in UNIV_PAGE_SIZE
|
||||
blocks flagged as BUF_BLOCK_MEMORY. */
|
||||
ut_error;
|
||||
break;
|
||||
case BUF_BLOCK_NOT_USED:
|
||||
case BUF_BLOCK_READY_FOR_USE:
|
||||
case BUF_BLOCK_MEMORY:
|
||||
/* Some data structures contain
|
||||
"guess" pointers to file pages. The
|
||||
file pages may have been freed and
|
||||
reused. Do not complain. */
|
||||
break;
|
||||
case BUF_BLOCK_REMOVE_HASH:
|
||||
/* buf_LRU_block_remove_hashed_page()
|
||||
will overwrite the FIL_PAGE_OFFSET and
|
||||
FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID with
|
||||
0xff and set the state to
|
||||
BUF_BLOCK_REMOVE_HASH. */
|
||||
# ifndef UNIV_DEBUG_VALGRIND
|
||||
/* In buf_LRU_block_remove_hashed() we
|
||||
explicitly set those values to 0xff and
|
||||
declare them uninitialized with
|
||||
UNIV_MEM_INVALID() after that. */
|
||||
ut_ad(page_get_space_id(page_align(ptr))
|
||||
== 0xffffffff);
|
||||
ut_ad(page_get_page_no(page_align(ptr))
|
||||
== 0xffffffff);
|
||||
# endif /* UNIV_DEBUG_VALGRIND */
|
||||
break;
|
||||
case BUF_BLOCK_FILE_PAGE:
|
||||
const ulint space_id1 = block->page.id.space();
|
||||
const ulint page_no1 = block->page.id.page_no();
|
||||
const ulint space_id2 = page_get_space_id(
|
||||
page_align(ptr));
|
||||
const ulint page_no2= page_get_page_no(
|
||||
page_align(ptr));
|
||||
|
||||
if (space_id1 != space_id2 || page_no1 != page_no2) {
|
||||
|
||||
ib::error() << "Found a mismatch page,"
|
||||
<< " expect page "
|
||||
<< page_id_t(space_id1, page_no1)
|
||||
<< " but found "
|
||||
<< page_id_t(space_id2, page_no2);
|
||||
|
||||
ut_ad(0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
buf_page_mutex_exit(block);
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
return(block);
|
||||
}
|
||||
|
||||
/* The block should always be found. */
|
||||
++counter;
|
||||
ut_a(counter < 10);
|
||||
os_thread_sleep(100000); /* 0.1 sec */
|
||||
goto retry;
|
||||
/* The function buf_chunk_init() invokes buf_block_init() so that
|
||||
block[n].frame == block->frame + n * UNIV_PAGE_SIZE. Check it. */
|
||||
ut_ad(block->frame == page_align(ptr));
|
||||
/* Read the state of the block without holding a mutex.
|
||||
A state transition from BUF_BLOCK_FILE_PAGE to
|
||||
BUF_BLOCK_REMOVE_HASH is possible during this execution. */
|
||||
ut_d(const buf_page_state state = buf_block_get_state(block));
|
||||
ut_ad(state == BUF_BLOCK_FILE_PAGE || state == BUF_BLOCK_REMOVE_HASH);
|
||||
return(block);
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
@@ -5657,6 +5602,7 @@ buf_page_create(
|
||||
/* These 8 bytes are also repurposed for PageIO compression and must
|
||||
be reset when the frame is assigned to a new page id. See fil0fil.h.
|
||||
|
||||
|
||||
FIL_PAGE_FILE_FLUSH_LSN is used on the following pages:
|
||||
(1) The first page of the InnoDB system tablespace (page 0:0)
|
||||
(2) FIL_RTREE_SPLIT_SEQ_NUM on R-tree pages .
|
||||
@@ -5948,7 +5894,7 @@ buf_page_io_complete(
|
||||
ulint read_page_no;
|
||||
ulint read_space_id;
|
||||
byte* frame;
|
||||
bool compressed_page;
|
||||
bool compressed_page=false;
|
||||
|
||||
ut_ad(bpage->zip.data != NULL || ((buf_block_t*)bpage)->frame != NULL);
|
||||
|
||||
@@ -6009,6 +5955,7 @@ buf_page_io_complete(
|
||||
<< ", should be " << bpage->id;
|
||||
}
|
||||
|
||||
#ifdef MYSQL_COMPRESSION
|
||||
compressed_page = Compression::is_compressed_page(frame);
|
||||
|
||||
/* If the decompress failed then the most likely case is
|
||||
@@ -6026,6 +5973,7 @@ buf_page_io_complete(
|
||||
<< Compression::to_string(meta) << " "
|
||||
<< "that is not supported by this instance";
|
||||
}
|
||||
#endif /* MYSQL_COMPRESSION */
|
||||
|
||||
/* From version 3.23.38 up we store the page checksum
|
||||
to the 4 first bytes of the page end lsn field */
|
||||
@@ -6064,7 +6012,7 @@ corrupt:
|
||||
|
||||
ib::info()
|
||||
<< "It is also possible that your"
|
||||
" operating system has corrupted"
|
||||
" operating system has corrupted"
|
||||
" its own file cache and rebooting"
|
||||
" your computer removes the error."
|
||||
" If the corrupt page is an index page."
|
||||
@@ -6125,7 +6073,9 @@ corrupt:
|
||||
/* If space is being truncated then avoid ibuf operation.
|
||||
During re-init we have already freed ibuf entries. */
|
||||
if (uncompressed
|
||||
#ifdef MYSQL_COMPRESSION
|
||||
&& !Compression::is_compressed_page(frame)
|
||||
#endif /* MYSQL_COMPRESSION */
|
||||
&& !recv_no_ibuf_operations
|
||||
&& !Tablespace::is_undo_tablespace(bpage->id.space())
|
||||
&& bpage->id.space() != srv_tmp_space.space_id()
|
||||
@@ -7143,8 +7093,9 @@ buf_print_io_instance(
|
||||
/* Print some values to help us with visualizing what is
|
||||
happening with LRU eviction. */
|
||||
fprintf(file,
|
||||
"LRU len: %lu, unzip_LRU len: %lu\n"
|
||||
"I/O sum[%lu]:cur[%lu], unzip sum[%lu]:cur[%lu]\n",
|
||||
"LRU len: " ULINTPF ", unzip_LRU len: " ULINTPF "\n"
|
||||
"I/O sum[" ULINTPF "]:cur[" ULINTPF "], "
|
||||
"unzip sum[" ULINTPF "]:cur[" ULINTPF "]\n",
|
||||
pool_info->lru_len, pool_info->unzip_lru_len,
|
||||
pool_info->io_sum, pool_info->io_cur,
|
||||
pool_info->unzip_sum, pool_info->unzip_cur);
|
||||
@@ -7205,7 +7156,7 @@ buf_print_io(
|
||||
"----------------------\n", file);
|
||||
|
||||
for (i = 0; i < srv_buf_pool_instances; i++) {
|
||||
fprintf(file, "---BUFFER POOL %lu\n", i);
|
||||
fprintf(file, "---BUFFER POOL " ULINTPF "\n", i);
|
||||
buf_print_io_instance(&pool_info[i], file);
|
||||
}
|
||||
}
|
||||
@@ -7683,3 +7634,4 @@ buf_page_decrypt_after_read(
|
||||
return (success);
|
||||
}
|
||||
#endif /* !UNIV_INNOCHECKSUM */
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -30,9 +30,9 @@ Created Aug 11, 2011 Vasil Dimov
|
||||
#include "buf0checksum.h"
|
||||
|
||||
#ifndef UNIV_INNOCHECKSUM
|
||||
|
||||
#include "srv0srv.h"
|
||||
#endif /* !UNIV_INNOCHECKSUM */
|
||||
|
||||
#include "buf0types.h"
|
||||
|
||||
/** the macro MYSQL_SYSVAR_ENUM() requires "long unsigned int" and if we
|
||||
@@ -41,6 +41,8 @@ ha_innodb.cc:12251: error: cannot convert 'srv_checksum_algorithm_t*' to
|
||||
'long unsigned int*' in initialization */
|
||||
ulong srv_checksum_algorithm = SRV_CHECKSUM_ALGORITHM_INNODB;
|
||||
|
||||
/** set if we have found pages matching legacy big endian checksum */
|
||||
bool legacy_big_endian_checksum = false;
|
||||
/** Calculates the CRC32 checksum of a page. The value is stored to the page
|
||||
when it is written to a file and also checked for a match when reading from
|
||||
the file. When reading we allow both normal CRC32 and CRC-legacy-big-endian
|
||||
|
||||
@@ -173,7 +173,7 @@ buf_dblwr_init(
|
||||
Creates the doublewrite buffer to a new InnoDB installation. The header of the
|
||||
doublewrite buffer is placed on the trx system header page.
|
||||
@return true if successful, false if not. */
|
||||
__attribute__((warn_unused_result))
|
||||
MY_ATTRIBUTE((warn_unused_result))
|
||||
bool
|
||||
buf_dblwr_create(void)
|
||||
/*==================*/
|
||||
@@ -576,7 +576,7 @@ buf_dblwr_process(void)
|
||||
MLOG_TRUNCATE record in redo. */
|
||||
bool skip_warning =
|
||||
srv_is_tablespace_truncated(space_id)
|
||||
|| srv_was_tablespace_truncated(space_id);
|
||||
|| srv_was_tablespace_truncated(space);
|
||||
|
||||
if (!skip_warning) {
|
||||
ib::warn() << "Page " << page_no_dblwr
|
||||
@@ -602,6 +602,14 @@ buf_dblwr_process(void)
|
||||
page_id, page_size,
|
||||
0, page_size.physical(), read_buf, NULL, NULL);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
ib::warn()
|
||||
<< "Double write buffer recovery: "
|
||||
<< page_id << " read failed with "
|
||||
<< "error: " << ut_strerr(err);
|
||||
}
|
||||
|
||||
/* Is page compressed ? */
|
||||
is_compressed = fil_page_is_compressed_encrypted(read_buf) |
|
||||
fil_page_is_compressed(read_buf);
|
||||
@@ -1187,6 +1195,7 @@ try_again:
|
||||
} else {
|
||||
ut_a(buf_page_get_state(bpage) == BUF_BLOCK_FILE_PAGE);
|
||||
|
||||
|
||||
UNIV_MEM_ASSERT_RW(frame,
|
||||
bpage->size.logical());
|
||||
|
||||
|
||||
@@ -184,6 +184,25 @@ buf_load_status(
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
/** Returns the directory path where the buffer pool dump file will be created.
|
||||
@return directory path */
|
||||
static
|
||||
const char*
|
||||
get_buf_dump_dir()
|
||||
{
|
||||
const char* dump_dir;
|
||||
|
||||
/* The dump file should be created in the default data directory if
|
||||
innodb_data_home_dir is set as an empty string. */
|
||||
if (strcmp(srv_data_home, "") == 0) {
|
||||
dump_dir = fil_path_to_mysql_datadir;
|
||||
} else {
|
||||
dump_dir = srv_data_home;
|
||||
}
|
||||
|
||||
return(dump_dir);
|
||||
}
|
||||
|
||||
/** Generate the path to the buffer pool dump/load file.
|
||||
@param[out] path generated path
|
||||
@param[in] path_size size of 'path', used as in snprintf(3). */
|
||||
@@ -195,7 +214,7 @@ buf_dump_generate_path(
|
||||
{
|
||||
char buf[FN_REFLEN];
|
||||
|
||||
ut_snprintf(buf, sizeof(buf), "%s%c%s", srv_data_home,
|
||||
ut_snprintf(buf, sizeof(buf), "%s%c%s", get_buf_dump_dir(),
|
||||
OS_PATH_SEPARATOR, srv_buf_dump_filename);
|
||||
|
||||
os_file_type_t type;
|
||||
@@ -217,7 +236,7 @@ buf_dump_generate_path(
|
||||
and append srv_buf_dump_filename to it. */
|
||||
char srv_data_home_full[FN_REFLEN];
|
||||
|
||||
my_realpath(srv_data_home_full, srv_data_home, 0);
|
||||
my_realpath(srv_data_home_full, get_buf_dump_dir(), 0);
|
||||
|
||||
if (srv_data_home_full[strlen(srv_data_home_full) - 1]
|
||||
== OS_PATH_SEPARATOR) {
|
||||
@@ -549,13 +568,22 @@ buf_load()
|
||||
dump_n = total_buffer_pools_pages;
|
||||
}
|
||||
|
||||
dump = static_cast<buf_dump_t*>(ut_malloc_nokey(dump_n
|
||||
* sizeof(*dump)));
|
||||
if(dump_n != 0) {
|
||||
dump = static_cast<buf_dump_t*>(ut_malloc_nokey(
|
||||
dump_n * sizeof(*dump)));
|
||||
} else {
|
||||
fclose(f);
|
||||
ut_sprintf_timestamp(now);
|
||||
buf_load_status(STATUS_INFO,
|
||||
"Buffer pool(s) load completed at %s"
|
||||
" (%s was empty)", now, full_filename);
|
||||
return;
|
||||
}
|
||||
|
||||
if (dump == NULL) {
|
||||
fclose(f);
|
||||
buf_load_status(STATUS_ERR,
|
||||
"Cannot allocate " ULINTPF " bytes: %s",
|
||||
"Cannot allocate %lu bytes: %s",
|
||||
(ulint) (dump_n * sizeof(*dump)),
|
||||
strerror(errno));
|
||||
return;
|
||||
@@ -767,8 +795,8 @@ DECLARE_THREAD(buf_dump_thread)(
|
||||
|
||||
srv_buf_dump_thread_active = TRUE;
|
||||
|
||||
buf_dump_status(STATUS_VERBOSE, "not started");
|
||||
buf_load_status(STATUS_VERBOSE, "not started");
|
||||
buf_dump_status(STATUS_VERBOSE, "Dumping of buffer pool not started");
|
||||
buf_load_status(STATUS_VERBOSE, "Loading of buffer pool not started");
|
||||
|
||||
if (srv_buffer_pool_load_at_startup) {
|
||||
buf_load();
|
||||
@@ -800,7 +828,7 @@ DECLARE_THREAD(buf_dump_thread)(
|
||||
|
||||
/* We count the number of threads in os_thread_exit(). A created
|
||||
thread should always use that to exit and not use return() to exit. */
|
||||
os_thread_exit(NULL);
|
||||
os_thread_exit();
|
||||
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1995, 2016, Oracle and/or its affiliates
|
||||
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2013, 2016, MariaDB Corporation
|
||||
Copyright (c) 2013, 2014, Fusion-io
|
||||
|
||||
@@ -27,6 +27,7 @@ Created 11/11/1995 Heikki Tuuri
|
||||
|
||||
#include "ha_prototypes.h"
|
||||
#include <mysql/service_thd_wait.h>
|
||||
#include <my_dbug.h>
|
||||
|
||||
#include "buf0flu.h"
|
||||
|
||||
@@ -86,6 +87,7 @@ static lsn_t lsn_avg_rate = 0;
|
||||
|
||||
/** Target oldest LSN for the requested flush_sync */
|
||||
static lsn_t buf_flush_sync_lsn = 0;
|
||||
|
||||
#ifdef UNIV_PFS_THREAD
|
||||
mysql_pfs_key_t page_cleaner_thread_key;
|
||||
#endif /* UNIV_PFS_THREAD */
|
||||
@@ -180,10 +182,20 @@ struct page_cleaner_t {
|
||||
page_cleaner_slot_t* slots; /*!< pointer to the slots */
|
||||
bool is_running; /*!< false if attempt
|
||||
to shutdown */
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
ulint n_disabled_debug;
|
||||
/*<! how many of pc threads
|
||||
have been disabled */
|
||||
#endif /* UNIV_DEBUG */
|
||||
};
|
||||
|
||||
static page_cleaner_t* page_cleaner = NULL;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
my_bool innodb_page_cleaner_disabled_debug;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/** If LRU list of a buf_pool is less than this size then LRU eviction
|
||||
should not happen. This is because when we do LRU flushing we also put
|
||||
the blocks on free list. If LRU list is very small then we can end up
|
||||
@@ -998,7 +1010,7 @@ buf_flush_write_block_low(
|
||||
bool sync) /*!< in: true if sync IO request */
|
||||
{
|
||||
page_t* frame = NULL;
|
||||
ulint space_id = bpage->id.space();
|
||||
ulint space_id = bpage->id.space();
|
||||
atomic_writes_t awrites = fil_space_get_atomic_writes(space_id);
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
@@ -1603,7 +1615,7 @@ The calling thread is not allowed to own any latches on pages!
|
||||
It attempts to make 'max' blocks available in the free list. Note that
|
||||
it is a best effort attempt and it is not guaranteed that after a call
|
||||
to this function there will be 'max' blocks in the free list.*/
|
||||
__attribute__((nonnull))
|
||||
|
||||
void
|
||||
buf_flush_LRU_list_batch(
|
||||
/*=====================*/
|
||||
@@ -1624,9 +1636,7 @@ buf_flush_LRU_list_batch(
|
||||
n->flushed = 0;
|
||||
n->evicted = 0;
|
||||
n->unzip_LRU_evicted = 0;
|
||||
|
||||
ut_ad(buf_pool_mutex_own(buf_pool));
|
||||
|
||||
if (buf_pool->curr_size < buf_pool->old_size
|
||||
&& buf_pool->withdraw_target > 0) {
|
||||
withdraw_depth = buf_pool->withdraw_target
|
||||
@@ -1704,7 +1714,7 @@ buf_flush_LRU_list_batch(
|
||||
/*******************************************************************//**
|
||||
Flush and move pages from LRU or unzip_LRU list to the free list.
|
||||
Whether LRU or unzip_LRU is used depends on the state of the system.*/
|
||||
__attribute__((nonnull))
|
||||
|
||||
static
|
||||
void
|
||||
buf_do_LRU_batch(
|
||||
@@ -1830,7 +1840,7 @@ BUF_FLUSH_LIST, then the caller must not own any latches on pages
|
||||
not guaranteed that the actual number is that big, though)
|
||||
@param[in] lsn_limit in the case of BUF_FLUSH_LIST all blocks whose
|
||||
oldest_modification is smaller than this should be flushed (if their number
|
||||
does not exceed min_n), otherwise ignored*/
|
||||
does not exceed min_n), otherwise ignored */
|
||||
void
|
||||
buf_flush_batch(
|
||||
buf_pool_t* buf_pool,
|
||||
@@ -2003,6 +2013,7 @@ buf_flush_wait_batch_end(
|
||||
}
|
||||
|
||||
/** Do flushing batch of a given type.
|
||||
NOTE: The calling thread is not allowed to own any latches on pages!
|
||||
@param[in,out] buf_pool buffer pool instance
|
||||
@param[in] type flush type
|
||||
@param[in] min_n wished minimum mumber of blocks flushed
|
||||
@@ -2010,7 +2021,7 @@ buf_flush_wait_batch_end(
|
||||
@param[in] lsn_limit in the case BUF_FLUSH_LIST all blocks whose
|
||||
oldest_modification is smaller than this should be flushed (if their number
|
||||
does not exceed min_n), otherwise ignored
|
||||
@param[out] n the number of pages which were processed is
|
||||
@param[out] n_processed the number of pages which were processed is
|
||||
passed back to caller. Ignored if NULL
|
||||
@retval true if a batch was queued successfully.
|
||||
@retval false if another batch of same type was already running. */
|
||||
@@ -2038,7 +2049,6 @@ buf_flush_do_batch(
|
||||
|
||||
return(true);
|
||||
}
|
||||
|
||||
/**
|
||||
Waits until a flush batch of the given lsn ends
|
||||
@param[in] new_oldest target oldest_modified_lsn to wait for */
|
||||
@@ -2136,6 +2146,7 @@ buf_flush_lists(
|
||||
buf_pool_t* buf_pool;
|
||||
flush_counters_t n;
|
||||
|
||||
memset(&n, 0, sizeof(flush_counters_t));
|
||||
buf_pool = buf_pool_from_array(i);
|
||||
|
||||
if (!buf_flush_do_batch(buf_pool,
|
||||
@@ -2237,10 +2248,8 @@ buf_flush_single_page_from_LRU(
|
||||
} else {
|
||||
mutex_exit(block_mutex);
|
||||
}
|
||||
|
||||
ut_ad(!mutex_own(block_mutex));
|
||||
}
|
||||
|
||||
if (!freed) {
|
||||
/* Can't find a single flushable page. */
|
||||
ut_ad(!bpage);
|
||||
@@ -2255,6 +2264,8 @@ buf_flush_single_page_from_LRU(
|
||||
scanned);
|
||||
}
|
||||
|
||||
|
||||
|
||||
ut_ad(!buf_pool_mutex_own(buf_pool));
|
||||
return(freed);
|
||||
}
|
||||
@@ -2283,12 +2294,10 @@ buf_flush_LRU_list(
|
||||
}
|
||||
|
||||
ut_ad(buf_pool);
|
||||
|
||||
/* srv_LRU_scan_depth can be arbitrarily large value.
|
||||
We cap it with current LRU size. */
|
||||
buf_pool_mutex_enter(buf_pool);
|
||||
scan_depth = UT_LIST_GET_LEN(buf_pool->LRU);
|
||||
|
||||
if (buf_pool->curr_size < buf_pool->old_size
|
||||
&& buf_pool->withdraw_target > 0) {
|
||||
withdraw_depth = buf_pool->withdraw_target
|
||||
@@ -2296,16 +2305,13 @@ buf_flush_LRU_list(
|
||||
} else {
|
||||
withdraw_depth = 0;
|
||||
}
|
||||
|
||||
buf_pool_mutex_exit(buf_pool);
|
||||
|
||||
if (withdraw_depth > srv_LRU_scan_depth) {
|
||||
scan_depth = ut_min(withdraw_depth, scan_depth);
|
||||
} else {
|
||||
scan_depth = ut_min(static_cast<ulint>(srv_LRU_scan_depth),
|
||||
scan_depth);
|
||||
}
|
||||
|
||||
/* Currently one of page_cleaners is the only thread
|
||||
that can trigger an LRU flush at the same time.
|
||||
So, it is not possible that a batch triggered during
|
||||
@@ -2328,6 +2334,7 @@ buf_flush_LRU_lists(void)
|
||||
{
|
||||
ulint n_flushed = 0;
|
||||
for (ulint i = 0; i < srv_buf_pool_instances; i++) {
|
||||
|
||||
n_flushed += buf_flush_LRU_list(buf_pool_from_array(i));
|
||||
}
|
||||
|
||||
@@ -2718,8 +2725,6 @@ pc_sleep_if_needed(
|
||||
return(OS_SYNC_TIME_EXCEEDED);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/******************************************************************//**
|
||||
Initialize page_cleaner. */
|
||||
void
|
||||
@@ -2742,6 +2747,8 @@ buf_flush_page_cleaner_init(void)
|
||||
ut_zalloc_nokey(page_cleaner->n_slots
|
||||
* sizeof(*page_cleaner->slots)));
|
||||
|
||||
ut_d(page_cleaner->n_disabled_debug = 0);
|
||||
|
||||
page_cleaner->is_running = true;
|
||||
}
|
||||
|
||||
@@ -2997,6 +3004,122 @@ buf_flush_page_cleaner_set_priority(
|
||||
}
|
||||
#endif /* UNIV_LINUX */
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/** Loop used to disable page cleaner threads. */
|
||||
static
|
||||
void
|
||||
buf_flush_page_cleaner_disabled_loop(void)
|
||||
{
|
||||
ut_ad(page_cleaner != NULL);
|
||||
|
||||
if (!innodb_page_cleaner_disabled_debug) {
|
||||
/* We return to avoid entering and exiting mutex. */
|
||||
return;
|
||||
}
|
||||
|
||||
mutex_enter(&page_cleaner->mutex);
|
||||
page_cleaner->n_disabled_debug++;
|
||||
mutex_exit(&page_cleaner->mutex);
|
||||
|
||||
while (innodb_page_cleaner_disabled_debug
|
||||
&& srv_shutdown_state == SRV_SHUTDOWN_NONE
|
||||
&& page_cleaner->is_running) {
|
||||
|
||||
os_thread_sleep(100000); /* [A] */
|
||||
}
|
||||
|
||||
/* We need to wait for threads exiting here, otherwise we would
|
||||
encounter problem when we quickly perform following steps:
|
||||
1) SET GLOBAL innodb_page_cleaner_disabled_debug = 1;
|
||||
2) SET GLOBAL innodb_page_cleaner_disabled_debug = 0;
|
||||
3) SET GLOBAL innodb_page_cleaner_disabled_debug = 1;
|
||||
That's because after step 1 this thread could still be sleeping
|
||||
inside the loop above at [A] and steps 2, 3 could happen before
|
||||
this thread wakes up from [A]. In such case this thread would
|
||||
not re-increment n_disabled_debug and we would be waiting for
|
||||
him forever in buf_flush_page_cleaner_disabled_debug_update(...).
|
||||
|
||||
Therefore we are waiting in step 2 for this thread exiting here. */
|
||||
|
||||
mutex_enter(&page_cleaner->mutex);
|
||||
page_cleaner->n_disabled_debug--;
|
||||
mutex_exit(&page_cleaner->mutex);
|
||||
}
|
||||
|
||||
/** Disables page cleaner threads (coordinator and workers).
|
||||
It's used by: SET GLOBAL innodb_page_cleaner_disabled_debug = 1 (0).
|
||||
@param[in] thd thread handle
|
||||
@param[in] var pointer to system variable
|
||||
@param[out] var_ptr where the formal string goes
|
||||
@param[in] save immediate result from check function */
|
||||
void
|
||||
buf_flush_page_cleaner_disabled_debug_update(
|
||||
THD* thd,
|
||||
struct st_mysql_sys_var* var,
|
||||
void* var_ptr,
|
||||
const void* save)
|
||||
{
|
||||
if (page_cleaner == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!*static_cast<const my_bool*>(save)) {
|
||||
if (!innodb_page_cleaner_disabled_debug) {
|
||||
return;
|
||||
}
|
||||
|
||||
innodb_page_cleaner_disabled_debug = false;
|
||||
|
||||
/* Enable page cleaner threads. */
|
||||
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
|
||||
mutex_enter(&page_cleaner->mutex);
|
||||
const ulint n = page_cleaner->n_disabled_debug;
|
||||
mutex_exit(&page_cleaner->mutex);
|
||||
/* Check if all threads have been enabled, to avoid
|
||||
problem when we decide to re-disable them soon. */
|
||||
if (n == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (innodb_page_cleaner_disabled_debug) {
|
||||
return;
|
||||
}
|
||||
|
||||
innodb_page_cleaner_disabled_debug = true;
|
||||
|
||||
while (srv_shutdown_state == SRV_SHUTDOWN_NONE) {
|
||||
/* Workers are possibly sleeping on is_requested.
|
||||
|
||||
We have to wake them, otherwise they could possibly
|
||||
have never noticed, that they should be disabled,
|
||||
and we would wait for them here forever.
|
||||
|
||||
That's why we have sleep-loop instead of simply
|
||||
waiting on some disabled_debug_event. */
|
||||
os_event_set(page_cleaner->is_requested);
|
||||
|
||||
mutex_enter(&page_cleaner->mutex);
|
||||
|
||||
ut_ad(page_cleaner->n_disabled_debug
|
||||
<= srv_n_page_cleaners);
|
||||
|
||||
if (page_cleaner->n_disabled_debug
|
||||
== srv_n_page_cleaners) {
|
||||
|
||||
mutex_exit(&page_cleaner->mutex);
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_exit(&page_cleaner->mutex);
|
||||
|
||||
os_thread_sleep(100000);
|
||||
}
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/******************************************************************//**
|
||||
page_cleaner thread tasked with flushing dirty pages from the buffer
|
||||
pools. As of now we'll have only one coordinator.
|
||||
@@ -3014,6 +3137,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)(
|
||||
ulint last_activity = srv_get_activity_count();
|
||||
ulint last_pages = 0;
|
||||
|
||||
my_thread_init();
|
||||
#ifdef UNIV_PFS_THREAD
|
||||
/* JAN: TODO: MySQL 5.7 PSI
|
||||
pfs_register_thread(page_cleaner_thread_key);
|
||||
@@ -3132,6 +3256,7 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)(
|
||||
} else {
|
||||
warn_interval *= 2;
|
||||
}
|
||||
|
||||
warn_count = warn_interval;
|
||||
} else {
|
||||
--warn_count;
|
||||
@@ -3268,6 +3393,8 @@ DECLARE_THREAD(buf_flush_page_cleaner_coordinator)(
|
||||
/* no activity, but woken up by event */
|
||||
n_flushed = 0;
|
||||
}
|
||||
|
||||
ut_d(buf_flush_page_cleaner_disabled_loop());
|
||||
}
|
||||
|
||||
ut_ad(srv_shutdown_state > 0);
|
||||
@@ -3367,7 +3494,7 @@ thread_exit:
|
||||
|
||||
/* We count the number of threads in os_thread_exit(). A created
|
||||
thread should always use that to exit and not use return() to exit. */
|
||||
os_thread_exit(NULL);
|
||||
os_thread_exit();
|
||||
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
}
|
||||
@@ -3379,10 +3506,12 @@ extern "C"
|
||||
os_thread_ret_t
|
||||
DECLARE_THREAD(buf_flush_page_cleaner_worker)(
|
||||
/*==========================================*/
|
||||
void* arg __attribute__((unused)))
|
||||
void* arg MY_ATTRIBUTE((unused)))
|
||||
/*!< in: a dummy parameter required by
|
||||
os_thread_create */
|
||||
{
|
||||
my_thread_init();
|
||||
|
||||
mutex_enter(&page_cleaner->mutex);
|
||||
page_cleaner->n_workers++;
|
||||
mutex_exit(&page_cleaner->mutex);
|
||||
@@ -3401,6 +3530,8 @@ DECLARE_THREAD(buf_flush_page_cleaner_worker)(
|
||||
while (true) {
|
||||
os_event_wait(page_cleaner->is_requested);
|
||||
|
||||
ut_d(buf_flush_page_cleaner_disabled_loop());
|
||||
|
||||
if (!page_cleaner->is_running) {
|
||||
break;
|
||||
}
|
||||
@@ -3412,7 +3543,9 @@ DECLARE_THREAD(buf_flush_page_cleaner_worker)(
|
||||
page_cleaner->n_workers--;
|
||||
mutex_exit(&page_cleaner->mutex);
|
||||
|
||||
os_thread_exit(NULL);
|
||||
my_thread_end();
|
||||
|
||||
os_thread_exit();
|
||||
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
}
|
||||
@@ -3549,7 +3682,6 @@ buf_flush_validate(
|
||||
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
|
||||
/******************************************************************//**
|
||||
Check if there are any dirty pages that belong to a space id in the flush
|
||||
list in a particular buffer pool.
|
||||
|
||||
@@ -145,7 +145,7 @@ If a compressed page is freed other compressed pages may be relocated.
|
||||
caller needs to free the page to the free list
|
||||
@retval false if BUF_BLOCK_ZIP_PAGE was removed from page_hash. In
|
||||
this case the block is already returned to the buddy allocator. */
|
||||
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
bool
|
||||
buf_LRU_block_remove_hashed(
|
||||
/*========================*/
|
||||
@@ -376,7 +376,7 @@ want to hog the CPU and resources. Release the buffer pool and block
|
||||
mutex and try to force a context switch. Then reacquire the same mutexes.
|
||||
The current page is "fixed" before the release of the mutexes and then
|
||||
"unfixed" again once we have reacquired the mutexes. */
|
||||
static MY_ATTRIBUTE((nonnull))
|
||||
static
|
||||
void
|
||||
buf_flush_yield(
|
||||
/*============*/
|
||||
@@ -419,7 +419,7 @@ If we have hogged the resources for too long then release the buffer
|
||||
pool and flush list mutex and do a thread yield. Set the current page
|
||||
to "sticky" so that it is not relocated during the yield.
|
||||
@return true if yielded */
|
||||
static MY_ATTRIBUTE((nonnull(1), warn_unused_result))
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
bool
|
||||
buf_flush_try_yield(
|
||||
/*================*/
|
||||
@@ -462,7 +462,7 @@ buf_flush_try_yield(
|
||||
Removes a single page from a given tablespace inside a specific
|
||||
buffer pool instance.
|
||||
@return true if page was removed. */
|
||||
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
bool
|
||||
buf_flush_or_remove_page(
|
||||
/*=====================*/
|
||||
@@ -548,7 +548,7 @@ the list as they age towards the tail of the LRU.
|
||||
@retval DB_SUCCESS if all freed
|
||||
@retval DB_FAIL if not all freed
|
||||
@retval DB_INTERRUPTED if the transaction was interrupted */
|
||||
static MY_ATTRIBUTE((nonnull(1), warn_unused_result))
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
dberr_t
|
||||
buf_flush_or_remove_pages(
|
||||
/*======================*/
|
||||
@@ -668,7 +668,7 @@ Remove or flush all the dirty pages that belong to a given tablespace
|
||||
inside a specific buffer pool instance. The pages will remain in the LRU
|
||||
list and will be evicted from the LRU list as they age and move towards
|
||||
the tail of the LRU list. */
|
||||
static MY_ATTRIBUTE((nonnull(1)))
|
||||
static
|
||||
void
|
||||
buf_flush_dirty_pages(
|
||||
/*==================*/
|
||||
@@ -717,7 +717,7 @@ buf_flush_dirty_pages(
|
||||
/******************************************************************//**
|
||||
Remove all pages that belong to a given tablespace inside a specific
|
||||
buffer pool instance when we are DISCARDing the tablespace. */
|
||||
static MY_ATTRIBUTE((nonnull))
|
||||
static
|
||||
void
|
||||
buf_LRU_remove_all_pages(
|
||||
/*=====================*/
|
||||
@@ -856,7 +856,7 @@ buffer pool instance when we are deleting the data file(s) of that
|
||||
tablespace. The pages still remain a part of LRU and are evicted from
|
||||
the list as they age towards the tail of the LRU only if buf_remove
|
||||
is BUF_REMOVE_FLUSH_NO_WRITE. */
|
||||
static MY_ATTRIBUTE((nonnull(1)))
|
||||
static
|
||||
void
|
||||
buf_LRU_remove_pages(
|
||||
/*=================*/
|
||||
@@ -1902,7 +1902,7 @@ func_exit:
|
||||
DBUG_PRINT("ib_buf", ("free page %u:%u",
|
||||
bpage->id.space(), bpage->id.page_no()));
|
||||
|
||||
ut_ad(rw_lock_own(hash_lock, RW_LOCK_X));
|
||||
ut_ad(rw_lock_own(hash_lock, RW_LOCK_X));
|
||||
ut_ad(buf_page_can_relocate(bpage));
|
||||
|
||||
if (!buf_LRU_block_remove_hashed(bpage, zip)) {
|
||||
@@ -2102,8 +2102,9 @@ buf_LRU_block_free_non_file_page(
|
||||
case BUF_BLOCK_READY_FOR_USE:
|
||||
break;
|
||||
default:
|
||||
fprintf(stderr, "InnoDB: Error: Block %p incorrect state %s in buf_LRU_block_free_non_file_page()\n",
|
||||
block, buf_get_state_name(block));
|
||||
ib::error() << "Block:" << block
|
||||
<< " incorrect state:" << buf_get_state_name(block)
|
||||
<< " in buf_LRU_block_free_non_file_page";
|
||||
return; /* Continue */
|
||||
}
|
||||
|
||||
@@ -2280,27 +2281,27 @@ buf_LRU_block_remove_hashed(
|
||||
}
|
||||
|
||||
hashed_bpage = buf_page_hash_get_low(buf_pool, bpage->id);
|
||||
|
||||
if (bpage != hashed_bpage) {
|
||||
ib::error() << "Page " << bpage->id
|
||||
<< " not found in the hash table";
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
fprintf(stderr,
|
||||
"InnoDB: in_page_hash %lu in_zip_hash %lu\n"
|
||||
" in_free_list %lu in_flush_list %lu in_LRU_list %lu\n"
|
||||
" zip.data %p zip_size %lu page_state %d\n",
|
||||
bpage->in_page_hash, bpage->in_zip_hash,
|
||||
bpage->in_free_list, bpage->in_flush_list,
|
||||
bpage->in_LRU_list, bpage->zip.data,
|
||||
bpage->size.logical(),
|
||||
buf_page_get_state(bpage));
|
||||
|
||||
|
||||
ib::error()
|
||||
<< "in_page_hash:" << bpage->in_page_hash
|
||||
<< " in_zip_hash:" << bpage->in_zip_hash
|
||||
// << " in_free_list:"<< bpage->in_fee_list
|
||||
<< " in_flush_list:" << bpage->in_flush_list
|
||||
<< " in_LRU_list:" << bpage->in_LRU_list
|
||||
<< " zip.data:" << bpage->zip.data
|
||||
<< " zip_size:" << bpage->size.logical()
|
||||
<< " page_state:" << buf_page_get_state(bpage);
|
||||
#else
|
||||
fprintf(stderr,
|
||||
"InnoDB: zip.data %p zip_size %lu page_state %d\n",
|
||||
bpage->zip.data,
|
||||
bpage->size.logical(),
|
||||
buf_page_get_state(bpage));
|
||||
ib::error()
|
||||
<< " zip.data:" << bpage->zip.data
|
||||
<< " zip_size:" << bpage->size.logical()
|
||||
<< " page_state:" << buf_page_get_state(bpage);
|
||||
#endif
|
||||
|
||||
if (hashed_bpage) {
|
||||
@@ -2751,14 +2752,14 @@ buf_LRU_print_instance(
|
||||
case BUF_BLOCK_FILE_PAGE:
|
||||
frame = buf_block_get_frame((buf_block_t*) bpage);
|
||||
fprintf(stderr, "\ntype %lu"
|
||||
" index id " UINT32PF "\n",
|
||||
" index id " IB_ID_FMT "\n",
|
||||
(ulong) fil_page_get_type(frame),
|
||||
btr_page_get_index_id(frame));
|
||||
break;
|
||||
case BUF_BLOCK_ZIP_PAGE:
|
||||
frame = bpage->zip.data;
|
||||
fprintf(stderr, "\ntype %lu size %lu"
|
||||
" index id " UINT32PF "\n",
|
||||
" index id " IB_ID_FMT "\n",
|
||||
(ulong) fil_page_get_type(frame),
|
||||
(ulong) bpage->size.physical(),
|
||||
btr_page_get_index_id(frame));
|
||||
|
||||
@@ -350,7 +350,7 @@ DECLARE_THREAD(mtflush_io_thread)(
|
||||
}
|
||||
}
|
||||
|
||||
os_thread_exit(NULL);
|
||||
os_thread_exit();
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
}
|
||||
|
||||
|
||||
@@ -746,10 +746,10 @@ buf_read_ahead_linear(
|
||||
|
||||
if (count) {
|
||||
DBUG_PRINT("ib_buf", ("linear read-ahead %lu pages, "
|
||||
UINT32PF ":" UINT32PF,
|
||||
"%lu:%lu",
|
||||
count,
|
||||
page_id.space(),
|
||||
page_id.page_no()));
|
||||
(ulint)page_id.space(),
|
||||
(ulint)page_id.page_no()));
|
||||
}
|
||||
|
||||
/* Read ahead is considered one I/O operation for the purpose of
|
||||
@@ -773,13 +773,6 @@ buf_read_ibuf_merge_pages(
|
||||
to get read in, before this
|
||||
function returns */
|
||||
const ulint* space_ids, /*!< in: array of space ids */
|
||||
const ib_uint64_t* space_versions,/*!< in: the spaces must have
|
||||
this version number
|
||||
(timestamp), otherwise we
|
||||
discard the read; we use this
|
||||
to cancel reads if DISCARD +
|
||||
IMPORT may have changed the
|
||||
tablespace size */
|
||||
const ulint* page_nos, /*!< in: array of page numbers
|
||||
to read, with the highest page
|
||||
number the last in the
|
||||
|
||||
@@ -447,7 +447,7 @@ print_hex:
|
||||
fputs(" Hex: ",stderr);
|
||||
|
||||
for (i = 0; i < len; i++) {
|
||||
fprintf(stderr, "%02lx", (ulint) *data++);
|
||||
fprintf(stderr, "%02lx", static_cast<ulong>(*data++));
|
||||
}
|
||||
|
||||
if (dfield_is_ext(dfield)) {
|
||||
@@ -837,6 +837,7 @@ dfield_t::clone(
|
||||
obj->ext = ext;
|
||||
obj->len = len;
|
||||
obj->type = type;
|
||||
obj->spatial_status = spatial_status;
|
||||
|
||||
if (len != UNIV_SQL_NULL) {
|
||||
obj->data = obj + 1;
|
||||
|
||||
@@ -516,9 +516,9 @@ dict_boot(void)
|
||||
dict_load_sys_table(dict_sys->sys_indexes);
|
||||
dict_load_sys_table(dict_sys->sys_fields);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
|
||||
return(err);
|
||||
}
|
||||
|
||||
@@ -494,8 +494,11 @@ dict_build_tablespace_for_table(
|
||||
|
||||
/* Determine the tablespace flags. */
|
||||
bool is_temp = dict_table_is_temporary(table);
|
||||
bool is_encrypted = dict_table_is_encrypted(table);
|
||||
bool has_data_dir = DICT_TF_HAS_DATA_DIR(table->flags);
|
||||
ulint fsp_flags = dict_tf_to_fsp_flags(table->flags, is_temp);
|
||||
ulint fsp_flags = dict_tf_to_fsp_flags(table->flags,
|
||||
is_temp,
|
||||
is_encrypted);
|
||||
|
||||
/* Determine the full filepath */
|
||||
if (is_temp) {
|
||||
@@ -544,9 +547,14 @@ dict_build_tablespace_for_table(
|
||||
mtr.set_named_space(table->space);
|
||||
dict_disable_redo_if_temporary(table, &mtr);
|
||||
|
||||
fsp_header_init(table->space, FIL_IBD_FILE_INITIAL_SIZE, &mtr);
|
||||
bool ret = fsp_header_init(table->space,
|
||||
FIL_IBD_FILE_INITIAL_SIZE,
|
||||
&mtr);
|
||||
|
||||
mtr_commit(&mtr);
|
||||
if (!ret) {
|
||||
return(DB_ERROR);
|
||||
}
|
||||
} else {
|
||||
/* We do not need to build a tablespace for this table. It
|
||||
is already built. Just find the correct tablespace ID. */
|
||||
@@ -2290,6 +2298,197 @@ dict_create_add_foreign_to_dictionary(
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
/** Check whether a column is in an index by the column name
|
||||
@param[in] col_name column name for the column to be checked
|
||||
@param[in] index the index to be searched
|
||||
@return true if this column is in the index, otherwise, false */
|
||||
static
|
||||
bool
|
||||
dict_index_has_col_by_name(
|
||||
/*=======================*/
|
||||
const char* col_name,
|
||||
const dict_index_t* index)
|
||||
{
|
||||
for (ulint i = 0; i < index->n_fields; i++) {
|
||||
dict_field_t* field = dict_index_get_nth_field(index, i);
|
||||
|
||||
if (strcmp(field->name, col_name) == 0) {
|
||||
return(true);
|
||||
}
|
||||
}
|
||||
return(false);
|
||||
}
|
||||
|
||||
/** Check whether the foreign constraint could be on a column that is
|
||||
part of a virtual index (index contains virtual column) in the table
|
||||
@param[in] fk_col_name FK column name to be checked
|
||||
@param[in] table the table
|
||||
@return true if this column is indexed with other virtual columns */
|
||||
bool
|
||||
dict_foreign_has_col_in_v_index(
|
||||
const char* fk_col_name,
|
||||
const dict_table_t* table)
|
||||
{
|
||||
/* virtual column can't be Primary Key, so start with secondary index */
|
||||
for (dict_index_t* index = dict_table_get_next_index(
|
||||
dict_table_get_first_index(table));
|
||||
index;
|
||||
index = dict_table_get_next_index(index)) {
|
||||
|
||||
if (dict_index_has_virtual(index)) {
|
||||
if (dict_index_has_col_by_name(fk_col_name, index)) {
|
||||
return(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return(false);
|
||||
}
|
||||
|
||||
|
||||
/** Check whether the foreign constraint could be on a column that is
|
||||
a base column of some indexed virtual columns.
|
||||
@param[in] col_name column name for the column to be checked
|
||||
@param[in] table the table
|
||||
@return true if this column is a base column, otherwise, false */
|
||||
bool
|
||||
dict_foreign_has_col_as_base_col(
|
||||
const char* col_name,
|
||||
const dict_table_t* table)
|
||||
{
|
||||
/* Loop through each virtual column and check if its base column has
|
||||
the same name as the column name being checked */
|
||||
for (ulint i = 0; i < table->n_v_cols; i++) {
|
||||
dict_v_col_t* v_col = dict_table_get_nth_v_col(table, i);
|
||||
|
||||
/* Only check if the virtual column is indexed */
|
||||
if (!v_col->m_col.ord_part) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (ulint j = 0; j < v_col->num_base; j++) {
|
||||
if (strcmp(col_name, dict_table_get_col_name(
|
||||
table,
|
||||
v_col->base_col[j]->ind)) == 0) {
|
||||
return(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return(false);
|
||||
}
|
||||
|
||||
/** Check if a foreign constraint is on the given column name.
|
||||
@param[in] col_name column name to be searched for fk constraint
|
||||
@param[in] table table to which foreign key constraint belongs
|
||||
@return true if fk constraint is present on the table, false otherwise. */
|
||||
static
|
||||
bool
|
||||
dict_foreign_base_for_stored(
|
||||
const char* col_name,
|
||||
const dict_table_t* table)
|
||||
{
|
||||
/* Loop through each stored column and check if its base column has
|
||||
the same name as the column name being checked */
|
||||
dict_s_col_list::const_iterator it;
|
||||
for (it = table->s_cols->begin();
|
||||
it != table->s_cols->end(); ++it) {
|
||||
dict_s_col_t s_col = *it;
|
||||
|
||||
for (ulint j = 0; j < s_col.num_base; j++) {
|
||||
if (strcmp(col_name, dict_table_get_col_name(
|
||||
table,
|
||||
s_col.base_col[j]->ind)) == 0) {
|
||||
return(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return(false);
|
||||
}
|
||||
|
||||
/** Check if a foreign constraint is on columns served as base columns
|
||||
of any stored column. This is to prevent creating SET NULL or CASCADE
|
||||
constraint on such columns
|
||||
@param[in] local_fk_set set of foreign key objects, to be added to
|
||||
the dictionary tables
|
||||
@param[in] table table to which the foreign key objects in
|
||||
local_fk_set belong to
|
||||
@return true if yes, otherwise, false */
|
||||
bool
|
||||
dict_foreigns_has_s_base_col(
|
||||
const dict_foreign_set& local_fk_set,
|
||||
const dict_table_t* table)
|
||||
{
|
||||
dict_foreign_t* foreign;
|
||||
|
||||
if (table->s_cols == NULL) {
|
||||
return (false);
|
||||
}
|
||||
|
||||
for (dict_foreign_set::const_iterator it = local_fk_set.begin();
|
||||
it != local_fk_set.end(); ++it) {
|
||||
|
||||
foreign = *it;
|
||||
ulint type = foreign->type;
|
||||
|
||||
type &= ~(DICT_FOREIGN_ON_DELETE_NO_ACTION
|
||||
| DICT_FOREIGN_ON_UPDATE_NO_ACTION);
|
||||
|
||||
if (type == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (ulint i = 0; i < foreign->n_fields; i++) {
|
||||
/* Check if the constraint is on a column that
|
||||
is a base column of any stored column */
|
||||
if (dict_foreign_base_for_stored(
|
||||
foreign->foreign_col_names[i], table)) {
|
||||
return(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return(false);
|
||||
}
|
||||
|
||||
/** Check if a column is in foreign constraint with CASCADE properties or
|
||||
SET NULL
|
||||
@param[in] table table
|
||||
@param[in] fk_col_name name for the column to be checked
|
||||
@return true if the column is in foreign constraint, otherwise, false */
|
||||
bool
|
||||
dict_foreigns_has_this_col(
|
||||
const dict_table_t* table,
|
||||
const char* col_name)
|
||||
{
|
||||
dict_foreign_t* foreign;
|
||||
const dict_foreign_set* local_fk_set = &table->foreign_set;
|
||||
|
||||
for (dict_foreign_set::const_iterator it = local_fk_set->begin();
|
||||
it != local_fk_set->end();
|
||||
++it) {
|
||||
foreign = *it;
|
||||
ut_ad(foreign->id != NULL);
|
||||
ulint type = foreign->type;
|
||||
|
||||
type &= ~(DICT_FOREIGN_ON_DELETE_NO_ACTION
|
||||
| DICT_FOREIGN_ON_UPDATE_NO_ACTION);
|
||||
|
||||
if (type == 0) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (ulint i = 0; i < foreign->n_fields; i++) {
|
||||
if (strcmp(foreign->foreign_col_names[i],
|
||||
col_name) == 0) {
|
||||
return(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
return(false);
|
||||
}
|
||||
|
||||
/** Adds the given set of foreign key objects to the dictionary tables
|
||||
in the database. This function does not modify the dictionary cache. The
|
||||
caller must ensure that all foreign key objects contain a valid constraint
|
||||
|
||||
403
storage/innobase/dict/dict0defrag_bg.cc
Normal file
403
storage/innobase/dict/dict0defrag_bg.cc
Normal file
@@ -0,0 +1,403 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2016, MariaDB Corporation. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
|
||||
|
||||
*****************************************************************************/
|
||||
|
||||
/**************************************************//**
|
||||
@file dict/dict0defrag_bg.cc
|
||||
Defragmentation routines.
|
||||
|
||||
Created 25/08/2016 Jan Lindström
|
||||
*******************************************************/
|
||||
|
||||
#include "dict0dict.h"
|
||||
#include "dict0stats.h"
|
||||
#include "dict0stats_bg.h"
|
||||
#include "dict0defrag_bg.h"
|
||||
#include "row0mysql.h"
|
||||
#include "srv0start.h"
|
||||
#include "ut0new.h"
|
||||
|
||||
#ifdef UNIV_NONINL
|
||||
# include "dict0stats_bg.ic"
|
||||
#endif
|
||||
|
||||
#include <vector>
|
||||
|
||||
static ib_mutex_t defrag_pool_mutex;
|
||||
|
||||
#ifdef MYSQL_PFS
|
||||
static mysql_pfs_key_t defrag_pool_mutex_key;
|
||||
#endif
|
||||
|
||||
/** The number of tables that can be added to "defrag_pool" before
|
||||
it is enlarged */
|
||||
static const ulint DEFRAG_POOL_INITIAL_SLOTS = 128;
|
||||
|
||||
/** Indices whose defrag stats need to be saved to persistent storage.*/
|
||||
struct defrag_pool_item_t {
|
||||
table_id_t table_id;
|
||||
index_id_t index_id;
|
||||
};
|
||||
|
||||
/** Allocator type, used by std::vector */
|
||||
typedef ut_allocator<defrag_pool_item_t>
|
||||
defrag_pool_allocator_t;
|
||||
|
||||
/** The multitude of tables to be defragmented- an STL vector */
|
||||
typedef std::vector<defrag_pool_item_t, defrag_pool_allocator_t>
|
||||
defrag_pool_t;
|
||||
|
||||
/** Iterator type for iterating over the elements of objects of type
|
||||
defrag_pool_t. */
|
||||
typedef defrag_pool_t::iterator defrag_pool_iterator_t;
|
||||
|
||||
/** Pool where we store information on which tables are to be processed
|
||||
by background defragmentation. */
|
||||
static defrag_pool_t* defrag_pool;
|
||||
|
||||
extern bool dict_stats_start_shutdown;
|
||||
|
||||
/*****************************************************************//**
|
||||
Initialize the defrag pool, called once during thread initialization. */
|
||||
void
|
||||
dict_defrag_pool_init(void)
|
||||
/*=======================*/
|
||||
{
|
||||
ut_ad(!srv_read_only_mode);
|
||||
/* JAN: TODO: MySQL 5.7 PSI
|
||||
const PSI_memory_key key2 = mem_key_dict_defrag_pool_t;
|
||||
|
||||
defrag_pool = UT_NEW(defrag_pool_t(defrag_pool_allocator_t(key2)), key2);
|
||||
|
||||
recalc_pool->reserve(RECALC_POOL_INITIAL_SLOTS);
|
||||
*/
|
||||
defrag_pool = new std::vector<defrag_pool_item_t, defrag_pool_allocator_t>();
|
||||
|
||||
/* We choose SYNC_STATS_DEFRAG to be below SYNC_FSP_PAGE. */
|
||||
mutex_create(LATCH_ID_DEFRAGMENT_MUTEX, &defrag_pool_mutex);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Free the resources occupied by the defrag pool, called once during
|
||||
thread de-initialization. */
|
||||
void
|
||||
dict_defrag_pool_deinit(void)
|
||||
/*=========================*/
|
||||
{
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
defrag_pool->clear();
|
||||
mutex_free(&defrag_pool_mutex);
|
||||
|
||||
UT_DELETE(defrag_pool);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Get an index from the auto defrag pool. The returned index id is removed
|
||||
from the pool.
|
||||
@return true if the pool was non-empty and "id" was set, false otherwise */
|
||||
static
|
||||
bool
|
||||
dict_stats_defrag_pool_get(
|
||||
/*=======================*/
|
||||
table_id_t* table_id, /*!< out: table id, or unmodified if
|
||||
list is empty */
|
||||
index_id_t* index_id) /*!< out: index id, or unmodified if
|
||||
list is empty */
|
||||
{
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
mutex_enter(&defrag_pool_mutex);
|
||||
|
||||
if (defrag_pool->empty()) {
|
||||
mutex_exit(&defrag_pool_mutex);
|
||||
return(false);
|
||||
}
|
||||
|
||||
defrag_pool_item_t& item = defrag_pool->back();
|
||||
*table_id = item.table_id;
|
||||
*index_id = item.index_id;
|
||||
|
||||
defrag_pool->pop_back();
|
||||
|
||||
mutex_exit(&defrag_pool_mutex);
|
||||
|
||||
return(true);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Add an index in a table to the defrag pool, which is processed by the
|
||||
background stats gathering thread. Only the table id and index id are
|
||||
added to the list, so the table can be closed after being enqueued and
|
||||
it will be opened when needed. If the table or index does not exist later
|
||||
(has been DROPped), then it will be removed from the pool and skipped. */
|
||||
void
|
||||
dict_stats_defrag_pool_add(
|
||||
/*=======================*/
|
||||
const dict_index_t* index) /*!< in: table to add */
|
||||
{
|
||||
defrag_pool_item_t item;
|
||||
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
mutex_enter(&defrag_pool_mutex);
|
||||
|
||||
/* quit if already in the list */
|
||||
for (defrag_pool_iterator_t iter = defrag_pool->begin();
|
||||
iter != defrag_pool->end();
|
||||
++iter) {
|
||||
if ((*iter).table_id == index->table->id
|
||||
&& (*iter).index_id == index->id) {
|
||||
mutex_exit(&defrag_pool_mutex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
item.table_id = index->table->id;
|
||||
item.index_id = index->id;
|
||||
defrag_pool->push_back(item);
|
||||
|
||||
mutex_exit(&defrag_pool_mutex);
|
||||
|
||||
os_event_set(dict_stats_event);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Delete a given index from the auto defrag pool. */
|
||||
void
|
||||
dict_stats_defrag_pool_del(
|
||||
/*=======================*/
|
||||
const dict_table_t* table, /*!<in: if given, remove
|
||||
all entries for the table */
|
||||
const dict_index_t* index) /*!< in: if given, remove this index */
|
||||
{
|
||||
ut_a((table && !index) || (!table && index));
|
||||
ut_ad(!srv_read_only_mode);
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
mutex_enter(&defrag_pool_mutex);
|
||||
|
||||
defrag_pool_iterator_t iter = defrag_pool->begin();
|
||||
while (iter != defrag_pool->end()) {
|
||||
if ((table && (*iter).table_id == table->id)
|
||||
|| (index
|
||||
&& (*iter).table_id == index->table->id
|
||||
&& (*iter).index_id == index->id)) {
|
||||
/* erase() invalidates the iterator */
|
||||
iter = defrag_pool->erase(iter);
|
||||
if (index)
|
||||
break;
|
||||
} else {
|
||||
iter++;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_exit(&defrag_pool_mutex);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Get the first index that has been added for updating persistent defrag
|
||||
stats and eventually save its stats. */
|
||||
static
|
||||
void
|
||||
dict_stats_process_entry_from_defrag_pool()
|
||||
/*=======================================*/
|
||||
{
|
||||
table_id_t table_id;
|
||||
index_id_t index_id;
|
||||
dberr_t err = DB_SUCCESS;
|
||||
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
/* pop the first index from the auto defrag pool */
|
||||
if (!dict_stats_defrag_pool_get(&table_id, &index_id)) {
|
||||
/* no index in defrag pool */
|
||||
return;
|
||||
}
|
||||
|
||||
dict_table_t* table;
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
/* If the table is no longer cached, we've already lost the in
|
||||
memory stats so there's nothing really to write to disk. */
|
||||
table = dict_table_open_on_id(table_id, TRUE,
|
||||
DICT_TABLE_OP_OPEN_ONLY_IF_CACHED);
|
||||
|
||||
if (table == NULL) {
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check whether table is corrupted */
|
||||
if (table->corrupted) {
|
||||
dict_table_close(table, TRUE, FALSE);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
return;
|
||||
}
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
dict_index_t* index = dict_table_find_index_on_id(table, index_id);
|
||||
|
||||
if (index == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check whether index is corrupted */
|
||||
if (dict_index_is_corrupted(index)) {
|
||||
dict_table_close(table, FALSE, FALSE);
|
||||
return;
|
||||
}
|
||||
|
||||
err = dict_stats_save_defrag_stats(index);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
ib::error() << "Saving defragmentation status for table "
|
||||
<< index->table->name.m_name
|
||||
<< " index " << index->name()
|
||||
<< " failed " << err;
|
||||
}
|
||||
|
||||
dict_table_close(table, FALSE, FALSE);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Get the first index that has been added for updating persistent defrag
|
||||
stats and eventually save its stats. */
|
||||
void
|
||||
dict_defrag_process_entries_from_defrag_pool()
|
||||
/*==========================================*/
|
||||
{
|
||||
while (defrag_pool->size() && !dict_stats_start_shutdown) {
|
||||
dict_stats_process_entry_from_defrag_pool();
|
||||
}
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation result.
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_save_defrag_summary(
|
||||
/*============================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
{
|
||||
dberr_t ret=DB_SUCCESS;
|
||||
lint now = (lint) ut_time();
|
||||
|
||||
if (dict_index_is_univ(index)) {
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
ret = dict_stats_save_index_stat(index, now, "n_pages_freed",
|
||||
index->stat_defrag_n_pages_freed,
|
||||
NULL,
|
||||
"Number of pages freed during"
|
||||
" last defragmentation run.",
|
||||
NULL);
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation stats for a given index.
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_save_defrag_stats(
|
||||
/*============================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
{
|
||||
dberr_t ret;
|
||||
|
||||
if (index->table->ibd_file_missing) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Cannot save defragment stats because "
|
||||
".ibd file is missing.\n");
|
||||
return (DB_TABLESPACE_DELETED);
|
||||
}
|
||||
if (dict_index_is_corrupted(index)) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Cannot save defragment stats because "
|
||||
"index is corrupted.\n");
|
||||
return(DB_CORRUPTION);
|
||||
}
|
||||
|
||||
if (dict_index_is_univ(index)) {
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
lint now = (lint) ut_time();
|
||||
mtr_t mtr;
|
||||
ulint n_leaf_pages;
|
||||
ulint n_leaf_reserved;
|
||||
mtr_start(&mtr);
|
||||
mtr_s_lock(dict_index_get_lock(index), &mtr);
|
||||
n_leaf_reserved = btr_get_size_and_reserved(index, BTR_N_LEAF_PAGES,
|
||||
&n_leaf_pages, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
|
||||
if (n_leaf_reserved == ULINT_UNDEFINED) {
|
||||
// The index name is different during fast index creation,
|
||||
// so the stats won't be associated with the right index
|
||||
// for later use. We just return without saving.
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
ret = dict_stats_save_index_stat(index, now, "n_page_split",
|
||||
index->stat_defrag_n_page_split,
|
||||
NULL,
|
||||
"Number of new page splits on leaves"
|
||||
" since last defragmentation.",
|
||||
NULL);
|
||||
if (ret != DB_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = dict_stats_save_index_stat(
|
||||
index, now, "n_leaf_pages_defrag",
|
||||
n_leaf_pages,
|
||||
NULL,
|
||||
"Number of leaf pages when this stat is saved to disk",
|
||||
NULL);
|
||||
if (ret != DB_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = dict_stats_save_index_stat(
|
||||
index, now, "n_leaf_pages_reserved",
|
||||
n_leaf_reserved,
|
||||
NULL,
|
||||
"Number of pages reserved for this index leaves when this stat "
|
||||
"is saved to disk",
|
||||
NULL);
|
||||
|
||||
end:
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
@@ -25,6 +25,9 @@ Data dictionary system
|
||||
Created 1/8/1996 Heikki Tuuri
|
||||
***********************************************************************/
|
||||
|
||||
#include <my_config.h>
|
||||
#include <string>
|
||||
|
||||
#include "ha_prototypes.h"
|
||||
#include <mysqld.h>
|
||||
#include <strfunc.h>
|
||||
@@ -33,7 +36,6 @@ Created 1/8/1996 Heikki Tuuri
|
||||
#include "fts0fts.h"
|
||||
#include "fil0fil.h"
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
|
||||
#ifdef UNIV_NONINL
|
||||
#include "dict0dict.ic"
|
||||
@@ -569,6 +571,8 @@ dict_table_close_and_drop(
|
||||
trx_t* trx, /*!< in: data dictionary transaction */
|
||||
dict_table_t* table) /*!< in/out: table */
|
||||
{
|
||||
dberr_t err = DB_SUCCESS;
|
||||
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
ut_ad(rw_lock_own(dict_operation_lock, RW_LOCK_X));
|
||||
ut_ad(trx->dict_operation != TRX_DICT_OP_NONE);
|
||||
@@ -583,7 +587,13 @@ dict_table_close_and_drop(
|
||||
ut_a(!table->stat_initialized);
|
||||
#endif /* UNIV_DEBUG || UNIV_DDL_DEBUG */
|
||||
|
||||
row_merge_drop_table(trx, table);
|
||||
err = row_merge_drop_table(trx, table);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
ib::error() << "At " << __FILE__ << ":" << __LINE__
|
||||
<< " row_merge_drop_table returned error: " << err
|
||||
<< " table: " << table->name.m_name;
|
||||
}
|
||||
}
|
||||
|
||||
/** Check if the table has a given (non_virtual) column.
|
||||
@@ -684,6 +694,7 @@ dict_table_get_col_name_for_mysql(
|
||||
|
||||
return(s);
|
||||
}
|
||||
|
||||
/** Returns a virtual column's name.
|
||||
@param[in] table target table
|
||||
@param[in] col_nr virtual column number (nth virtual column)
|
||||
@@ -1237,7 +1248,7 @@ dict_init(void)
|
||||
dict_operation_lock, SYNC_DICT_OPERATION);
|
||||
|
||||
if (!srv_read_only_mode) {
|
||||
dict_foreign_err_file = os_file_create_tmpfile();
|
||||
dict_foreign_err_file = os_file_create_tmpfile(NULL);
|
||||
ut_a(dict_foreign_err_file);
|
||||
}
|
||||
|
||||
@@ -1309,9 +1320,7 @@ dict_table_open_on_name(
|
||||
if (ignore_err == DICT_ERR_IGNORE_NONE
|
||||
&& table->is_encrypted) {
|
||||
/* Make life easy for drop table. */
|
||||
if (table->can_be_evicted) {
|
||||
dict_table_move_from_lru_to_non_lru(table);
|
||||
}
|
||||
dict_table_prevent_eviction(table);
|
||||
|
||||
if (table->can_be_evicted) {
|
||||
dict_move_to_mru(table);
|
||||
@@ -1328,10 +1337,8 @@ dict_table_open_on_name(
|
||||
/* If table is corrupted, return NULL */
|
||||
else if (ignore_err == DICT_ERR_IGNORE_NONE
|
||||
&& table->corrupted) {
|
||||
|
||||
/* Make life easy for drop table. */
|
||||
dict_table_prevent_eviction(table);
|
||||
|
||||
if (!dict_locked) {
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
@@ -1668,7 +1675,6 @@ dict_table_move_from_lru_to_non_lru(
|
||||
@param[in] table table instance
|
||||
@param[in] id index id
|
||||
@return index or NULL */
|
||||
UNIV_INTERN
|
||||
dict_index_t*
|
||||
dict_table_find_index_on_id(
|
||||
const dict_table_t* table,
|
||||
@@ -1764,6 +1770,7 @@ dict_table_rename_in_cache(
|
||||
dict_index_t* index;
|
||||
ulint fold;
|
||||
char old_name[MAX_FULL_NAME_LEN + 1];
|
||||
os_file_type_t ftype;
|
||||
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
@@ -1798,7 +1805,6 @@ dict_table_rename_in_cache(
|
||||
.ibd file and rebuild the .isl file if needed. */
|
||||
|
||||
if (dict_table_is_discarded(table)) {
|
||||
os_file_type_t type;
|
||||
bool exists;
|
||||
char* filepath;
|
||||
|
||||
@@ -1826,7 +1832,7 @@ dict_table_rename_in_cache(
|
||||
fil_delete_tablespace(table->space, BUF_REMOVE_ALL_NO_WRITE);
|
||||
|
||||
/* Delete any temp file hanging around. */
|
||||
if (os_file_status(filepath, &exists, &type)
|
||||
if (os_file_status(filepath, &exists, &ftype)
|
||||
&& exists
|
||||
&& !os_file_delete_if_exists(innodb_temp_file_key,
|
||||
filepath, NULL)) {
|
||||
@@ -1860,19 +1866,31 @@ dict_table_rename_in_cache(
|
||||
ut_free(old_path);
|
||||
return(DB_TABLESPACE_EXISTS);
|
||||
}
|
||||
} else {
|
||||
new_path = fil_make_filepath(
|
||||
NULL, new_name, IBD, false);
|
||||
}
|
||||
|
||||
/* New filepath must not exist. */
|
||||
err = fil_rename_tablespace_check(
|
||||
table->space, old_path, new_path, false);
|
||||
if (err != DB_SUCCESS) {
|
||||
ut_free(old_path);
|
||||
ut_free(new_path);
|
||||
return(err);
|
||||
}
|
||||
|
||||
bool success = fil_rename_tablespace(
|
||||
table->space, old_path, new_name, new_path);
|
||||
|
||||
ut_free(old_path);
|
||||
ut_free(new_path);
|
||||
|
||||
/* If the tablespace is remote, a new .isl file was created
|
||||
If success, delete the old one. If not, delete the new one. */
|
||||
if (new_path) {
|
||||
|
||||
ut_free(new_path);
|
||||
RemoteDatafile::delete_link_file(success ? old_name : new_name);
|
||||
If success, delete the old one. If not, delete the new one. */
|
||||
if (DICT_TF_HAS_DATA_DIR(table->flags)) {
|
||||
RemoteDatafile::delete_link_file(
|
||||
success ? old_name : new_name);
|
||||
}
|
||||
|
||||
if (!success) {
|
||||
@@ -2271,6 +2289,12 @@ dict_table_remove_from_cache_low(
|
||||
trx_free_for_background(trx);
|
||||
}
|
||||
|
||||
/* Free virtual column template if any */
|
||||
if (table->vc_templ != NULL) {
|
||||
dict_free_vc_templ(table->vc_templ);
|
||||
UT_DELETE(table->vc_templ);
|
||||
}
|
||||
|
||||
size = mem_heap_get_size(table->heap) + strlen(table->name.m_name) + 1;
|
||||
|
||||
ut_ad(dict_sys->size >= size);
|
||||
@@ -2516,7 +2540,7 @@ dict_index_too_big_for_tree(
|
||||
REC_STATUS_ORDINARY records. */
|
||||
|
||||
field_max_size = dict_col_get_fixed_size(col, comp);
|
||||
if (field_max_size) {
|
||||
if (field_max_size && field->fixed_len != 0) {
|
||||
/* dict_index_add_col() should guarantee this */
|
||||
ut_ad(!field->prefix_len
|
||||
|| field->fixed_len == field->prefix_len);
|
||||
@@ -2681,18 +2705,31 @@ dict_index_add_to_cache_w_vcol(
|
||||
}
|
||||
|
||||
n_ord = new_index->n_uniq;
|
||||
|
||||
/* Flag the ordering columns and also set column max_prefix */
|
||||
|
||||
for (i = 0; i < n_ord; i++) {
|
||||
const dict_field_t* field
|
||||
= dict_index_get_nth_field(new_index, i);
|
||||
|
||||
field->col->ord_part = 1;
|
||||
|
||||
if (field->prefix_len > field->col->max_prefix) {
|
||||
/* Check the column being added in the index for
|
||||
the first time and flag the ordering column. */
|
||||
if (field->col->ord_part == 0 ) {
|
||||
field->col->max_prefix = field->prefix_len;
|
||||
field->col->ord_part = 1;
|
||||
} else if (field->prefix_len == 0) {
|
||||
/* Set the max_prefix for a column to 0 if
|
||||
its prefix length is 0 (for this index)
|
||||
even if it was a part of any other index
|
||||
with some prefix length. */
|
||||
field->col->max_prefix = 0;
|
||||
} else if (field->col->max_prefix != 0
|
||||
&& field->prefix_len
|
||||
> field->col->max_prefix) {
|
||||
/* Set the max_prefix value based on the
|
||||
prefix_len. */
|
||||
field->col->max_prefix = field->prefix_len;
|
||||
}
|
||||
ut_ad(field->col->ord_part == 1);
|
||||
}
|
||||
|
||||
new_index->stat_n_diff_key_vals =
|
||||
@@ -3051,7 +3088,6 @@ dict_index_add_col(
|
||||
field = dict_index_get_nth_field(index, index->n_def - 1);
|
||||
|
||||
field->col = col;
|
||||
|
||||
/* DATA_POINT is a special type, whose fixed_len should be:
|
||||
1) DATA_MBR_LEN, when it's indexed in R-TREE. In this case,
|
||||
it must be the first col to be added.
|
||||
@@ -3663,7 +3699,7 @@ dict_foreign_find_index(
|
||||
/*!< out: column number where
|
||||
error happened */
|
||||
dict_index_t** err_index)
|
||||
/*!< out: index where error
|
||||
/*!< out: index where error
|
||||
happened */
|
||||
{
|
||||
dict_index_t* index;
|
||||
@@ -4645,6 +4681,11 @@ dict_foreign_push_index_error(
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Scans a table create SQL string and adds to the data dictionary the foreign key
|
||||
constraints declared in the string. This function should be called after the
|
||||
indexes for a table have been created. Each foreign key constraint must be
|
||||
accompanied with indexes in bot participating tables. The indexes are allowed
|
||||
to contain more fields than mentioned in the constraint.
|
||||
@return error code or DB_SUCCESS */
|
||||
static
|
||||
dberr_t
|
||||
@@ -4879,6 +4920,10 @@ loop:
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
|
||||
if (dict_foreigns_has_s_base_col(local_fk_set, table)) {
|
||||
return(DB_NO_FK_ON_S_BASE_COL);
|
||||
}
|
||||
|
||||
/**********************************************************/
|
||||
/* The following call adds the foreign key constraints
|
||||
to the data dictionary system tables on disk */
|
||||
@@ -4894,6 +4939,8 @@ loop:
|
||||
local_fk_set.end(),
|
||||
dict_foreign_add_to_referenced_table());
|
||||
local_fk_set.clear();
|
||||
|
||||
dict_mem_table_fill_foreign_vcol_set(table);
|
||||
}
|
||||
return(error);
|
||||
}
|
||||
@@ -4919,53 +4966,52 @@ loop:
|
||||
}
|
||||
|
||||
if (my_isspace(cs, *ptr)) {
|
||||
ptr1 = dict_accept(cs, ptr, "IF", &success);
|
||||
ptr1 = dict_accept(cs, ptr, "IF", &success);
|
||||
|
||||
if (success) {
|
||||
if (!my_isspace(cs, *ptr1)) {
|
||||
goto loop;
|
||||
}
|
||||
ptr1 = dict_accept(cs, ptr1, "NOT", &success);
|
||||
if (!success) {
|
||||
goto loop;
|
||||
}
|
||||
ptr1 = dict_accept(cs, ptr1, "EXISTS", &success);
|
||||
if (!success) {
|
||||
goto loop;
|
||||
}
|
||||
ptr = ptr1;
|
||||
}
|
||||
if (success) {
|
||||
if (!my_isspace(cs, *ptr1)) {
|
||||
goto loop;
|
||||
}
|
||||
ptr1 = dict_accept(cs, ptr1, "NOT", &success);
|
||||
if (!success) {
|
||||
goto loop;
|
||||
}
|
||||
ptr1 = dict_accept(cs, ptr1, "EXISTS", &success);
|
||||
if (!success) {
|
||||
goto loop;
|
||||
}
|
||||
ptr = ptr1;
|
||||
}
|
||||
}
|
||||
|
||||
orig = ptr;
|
||||
ptr = dict_accept(cs, ptr, "(", &success);
|
||||
|
||||
if (!success) {
|
||||
if (constraint_name) {
|
||||
/* MySQL allows also an index id before the '('; we
|
||||
skip it */
|
||||
ptr = dict_skip_word(cs, ptr, &success);
|
||||
if (!success) {
|
||||
dict_foreign_report_syntax_err(
|
||||
"%s table %s with foreign key constraint"
|
||||
" failed. Parse error in '%s'"
|
||||
" near '%s'.\n",
|
||||
operation, create_name, start_of_latest_foreign, orig);
|
||||
if (constraint_name) {
|
||||
/* MySQL allows also an index id before the '('; we
|
||||
skip it */
|
||||
ptr = dict_skip_word(cs, ptr, &success);
|
||||
if (!success) {
|
||||
dict_foreign_report_syntax_err(
|
||||
"%s table %s with foreign key constraint"
|
||||
" failed. Parse error in '%s'"
|
||||
" near '%s'.\n",
|
||||
operation, create_name, start_of_latest_foreign, orig);
|
||||
|
||||
ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT,
|
||||
"%s table %s with foreign key constraint"
|
||||
" failed. Parse error in '%s'"
|
||||
" near '%s'.",
|
||||
operation, create_name, start_of_latest_foreign, orig);
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
}
|
||||
else {
|
||||
while (my_isspace(cs, *ptr)) {
|
||||
ptr++;
|
||||
}
|
||||
ib_push_warning(trx, DB_CANNOT_ADD_CONSTRAINT,
|
||||
"%s table %s with foreign key constraint"
|
||||
" failed. Parse error in '%s'"
|
||||
" near '%s'.",
|
||||
operation, create_name, start_of_latest_foreign, orig);
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
} else {
|
||||
while (my_isspace(cs, *ptr)) {
|
||||
ptr++;
|
||||
}
|
||||
|
||||
ptr = dict_scan_id(cs, ptr, heap,
|
||||
ptr = dict_scan_id(cs, ptr, heap,
|
||||
&constraint_name, FALSE, FALSE);
|
||||
}
|
||||
|
||||
@@ -5095,6 +5141,23 @@ col_loop1:
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
|
||||
/* Don't allow foreign keys on partitioned tables yet. */
|
||||
ptr1 = dict_scan_to(ptr, "PARTITION");
|
||||
if (ptr1) {
|
||||
ptr1 = dict_accept(cs, ptr1, "PARTITION", &success);
|
||||
if (success && my_isspace(cs, *ptr1)) {
|
||||
ptr2 = dict_accept(cs, ptr1, "BY", &success);
|
||||
if (success) {
|
||||
my_error(ER_FOREIGN_KEY_ON_PARTITIONED,MYF(0));
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (dict_table_is_partition(table)) {
|
||||
my_error(ER_FOREIGN_KEY_ON_PARTITIONED,MYF(0));
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
|
||||
/* Let us create a constraint struct */
|
||||
|
||||
foreign = dict_mem_foreign_create();
|
||||
@@ -5602,7 +5665,7 @@ dict_foreign_parse_drop_constraints(
|
||||
char* str;
|
||||
size_t len;
|
||||
const char* ptr;
|
||||
const char* ptr1;
|
||||
const char* ptr1;
|
||||
const char* id;
|
||||
CHARSET_INFO* cs;
|
||||
|
||||
@@ -5656,11 +5719,10 @@ loop:
|
||||
ptr1 = dict_accept(cs, ptr, "IF", &success);
|
||||
|
||||
if (success && my_isspace(cs, *ptr1)) {
|
||||
ptr1 = dict_accept(cs, ptr1, "EXISTS", &success);
|
||||
if (success) {
|
||||
|
||||
ptr = ptr1;
|
||||
}
|
||||
ptr1 = dict_accept(cs, ptr1, "EXISTS", &success);
|
||||
if (success) {
|
||||
ptr = ptr1;
|
||||
}
|
||||
}
|
||||
|
||||
ptr = dict_scan_id(cs, ptr, heap, &id, FALSE, TRUE);
|
||||
@@ -5875,6 +5937,12 @@ dict_index_copy_rec_order_prefix(
|
||||
n = dict_index_get_n_unique_in_tree(index);
|
||||
} else {
|
||||
n = dict_index_get_n_unique_in_tree_nonleaf(index);
|
||||
/* For internal node of R-tree, since we need to
|
||||
compare the page no field, so, we need to copy this
|
||||
field as well. */
|
||||
if (dict_index_is_spatial(index)) {
|
||||
n++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5992,11 +6060,11 @@ dict_print_info_on_foreign_key_in_create_format(
|
||||
|
||||
str.append(" CONSTRAINT ");
|
||||
|
||||
str.append(ut_get_name(trx, FALSE, stripped_id));
|
||||
str.append(innobase_quote_identifier(trx, stripped_id));
|
||||
str.append(" FOREIGN KEY (");
|
||||
|
||||
for (i = 0;;) {
|
||||
str.append(ut_get_name(trx, FALSE, foreign->foreign_col_names[i]));
|
||||
str.append(innobase_quote_identifier(trx, foreign->foreign_col_names[i]));
|
||||
|
||||
if (++i < foreign->n_fields) {
|
||||
str.append(", ");
|
||||
@@ -6010,18 +6078,18 @@ dict_print_info_on_foreign_key_in_create_format(
|
||||
if (dict_tables_have_same_db(foreign->foreign_table_name_lookup,
|
||||
foreign->referenced_table_name_lookup)) {
|
||||
/* Do not print the database name of the referenced table */
|
||||
str.append(ut_get_name(trx, TRUE,
|
||||
str.append(ut_get_name(trx,
|
||||
dict_remove_db_name(
|
||||
foreign->referenced_table_name)));
|
||||
} else {
|
||||
str.append(ut_get_name(trx, TRUE,
|
||||
str.append(ut_get_name(trx,
|
||||
foreign->referenced_table_name));
|
||||
}
|
||||
|
||||
str.append(" (");
|
||||
|
||||
for (i = 0;;) {
|
||||
str.append(ut_get_name(trx, FALSE,
|
||||
str.append(innobase_quote_identifier(trx,
|
||||
foreign->referenced_col_names[i]));
|
||||
|
||||
if (++i < foreign->n_fields) {
|
||||
@@ -6096,12 +6164,12 @@ dict_print_info_on_foreign_keys(
|
||||
str.append(" ");
|
||||
}
|
||||
|
||||
str.append(ut_get_name(trx, FALSE,
|
||||
str.append(innobase_quote_identifier(trx,
|
||||
foreign->foreign_col_names[i]));
|
||||
}
|
||||
|
||||
str.append(") REFER ");
|
||||
str.append(ut_get_name(trx, TRUE,
|
||||
str.append(ut_get_name(trx,
|
||||
foreign->referenced_table_name));
|
||||
str.append(")");
|
||||
|
||||
@@ -6109,8 +6177,8 @@ dict_print_info_on_foreign_keys(
|
||||
if (i) {
|
||||
str.append(" ");
|
||||
}
|
||||
str.append(ut_get_name(
|
||||
trx, FALSE,
|
||||
str.append(innobase_quote_identifier(
|
||||
trx,
|
||||
foreign->referenced_col_names[i]));
|
||||
}
|
||||
|
||||
@@ -6143,7 +6211,6 @@ dict_print_info_on_foreign_keys(
|
||||
}
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
return str;
|
||||
}
|
||||
|
||||
@@ -6262,6 +6329,13 @@ dict_set_corrupted(
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
/* If this is read only mode, do not update SYS_INDEXES, just
|
||||
mark it as corrupted in memory */
|
||||
if (srv_read_only_mode) {
|
||||
index->type |= DICT_CORRUPT;
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
heap = mem_heap_create(sizeof(dtuple_t) + 2 * (sizeof(dfield_t)
|
||||
+ sizeof(que_fork_t) + sizeof(upd_node_t)
|
||||
+ sizeof(upd_t) + 12));
|
||||
@@ -6451,6 +6525,7 @@ dict_set_merge_threshold_all_debug(
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
|
||||
#endif /* UNIV_DEBUG */
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
@@ -6914,7 +6989,8 @@ dict_fs2utf8(
|
||||
|
||||
errors = 0;
|
||||
strconvert(
|
||||
&my_charset_filename, buf, (uint) (buf_p - buf), system_charset_info,
|
||||
&my_charset_filename, buf, (uint) (buf_p - buf),
|
||||
system_charset_info,
|
||||
table_utf8, table_utf8_size,
|
||||
&errors);
|
||||
|
||||
@@ -7380,11 +7456,13 @@ dict_table_t::flags | 0 | 1 | 1 | 1
|
||||
fil_space_t::flags | 0 | 0 | 1 | 1
|
||||
@param[in] table_flags dict_table_t::flags
|
||||
@param[in] is_temp whether the tablespace is temporary
|
||||
@param[in] is_encrypted whether the tablespace is encrypted
|
||||
@return tablespace flags (fil_space_t::flags) */
|
||||
ulint
|
||||
dict_tf_to_fsp_flags(
|
||||
ulint table_flags,
|
||||
bool is_temp)
|
||||
bool is_temp,
|
||||
bool is_encrypted)
|
||||
{
|
||||
DBUG_EXECUTE_IF("dict_tf_to_fsp_flags_failure",
|
||||
return(ULINT_UNDEFINED););
|
||||
@@ -7411,9 +7489,30 @@ dict_tf_to_fsp_flags(
|
||||
has_data_dir,
|
||||
is_shared,
|
||||
is_temp,
|
||||
page_compression,
|
||||
page_compression_level,
|
||||
atomic_writes);
|
||||
0,
|
||||
0,
|
||||
0,
|
||||
is_encrypted);
|
||||
|
||||
/* In addition, tablespace flags also contain if the page
|
||||
compression is used for this table. */
|
||||
if (page_compression) {
|
||||
fsp_flags |= FSP_FLAGS_SET_PAGE_COMPRESSION(fsp_flags, page_compression);
|
||||
}
|
||||
|
||||
/* In addition, tablespace flags also contain page compression level
|
||||
if page compression is used for this table. */
|
||||
if (page_compression && page_compression_level) {
|
||||
fsp_flags |= FSP_FLAGS_SET_PAGE_COMPRESSION_LEVEL(fsp_flags, page_compression_level);
|
||||
}
|
||||
|
||||
/* In addition, tablespace flags also contain flag if atomic writes
|
||||
is used for this table */
|
||||
if (atomic_writes) {
|
||||
fsp_flags |= FSP_FLAGS_SET_ATOMIC_WRITES(fsp_flags, atomic_writes);
|
||||
}
|
||||
|
||||
ut_ad(fsp_flags_is_valid(fsp_flags));
|
||||
|
||||
return(fsp_flags);
|
||||
}
|
||||
@@ -7442,10 +7541,10 @@ dict_tf_to_row_format_string(
|
||||
}
|
||||
|
||||
/** Look for any dictionary objects that are found in the given tablespace.
|
||||
@param[in] space Tablespace ID to search for.
|
||||
@param[in] space_id Tablespace ID to search for.
|
||||
@return true if tablespace is empty. */
|
||||
bool
|
||||
dict_tablespace_is_empty(
|
||||
dict_space_is_empty(
|
||||
ulint space_id)
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
@@ -7480,6 +7579,55 @@ dict_tablespace_is_empty(
|
||||
|
||||
return(!found);
|
||||
}
|
||||
|
||||
/** Find the space_id for the given name in sys_tablespaces.
|
||||
@param[in] name Tablespace name to search for.
|
||||
@return the tablespace ID. */
|
||||
ulint
|
||||
dict_space_get_id(
|
||||
const char* name)
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec;
|
||||
mtr_t mtr;
|
||||
ulint name_len = strlen(name);
|
||||
ulint id = ULINT_UNDEFINED;
|
||||
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
mtr_start(&mtr);
|
||||
|
||||
for (rec = dict_startscan_system(&pcur, &mtr, SYS_TABLESPACES);
|
||||
rec != NULL;
|
||||
rec = dict_getnext_system(&pcur, &mtr)) {
|
||||
const byte* field;
|
||||
ulint len;
|
||||
|
||||
field = rec_get_nth_field_old(
|
||||
rec, DICT_FLD__SYS_TABLESPACES__NAME, &len);
|
||||
ut_ad(len > 0);
|
||||
ut_ad(len < OS_FILE_MAX_PATH);
|
||||
|
||||
if (len == name_len && ut_memcmp(name, field, len) == 0) {
|
||||
|
||||
field = rec_get_nth_field_old(
|
||||
rec, DICT_FLD__SYS_TABLESPACES__SPACE, &len);
|
||||
ut_ad(len == 4);
|
||||
id = mach_read_from_4(field);
|
||||
|
||||
/* This is normally called by dict_getnext_system()
|
||||
at the end of the index. */
|
||||
btr_pcur_close(&pcur);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mtr_commit(&mtr);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
|
||||
return(id);
|
||||
}
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
/** Determine the extent size (in pages) for the given table
|
||||
|
||||
@@ -733,7 +733,6 @@ err_len:
|
||||
@param[in] space_id Tablespace ID
|
||||
@return First filepath (caller must invoke ut_free() on it)
|
||||
@retval NULL if no SYS_DATAFILES entry was found. */
|
||||
static
|
||||
char*
|
||||
dict_get_first_path(
|
||||
ulint space_id)
|
||||
@@ -819,7 +818,7 @@ dict_get_first_path(
|
||||
@retval NULL if no dictionary entry was found. */
|
||||
static
|
||||
char*
|
||||
dict_get_space_name(
|
||||
dict_space_get_name(
|
||||
ulint space_id,
|
||||
mem_heap_t* callers_heap)
|
||||
{
|
||||
@@ -1127,7 +1126,7 @@ dict_sys_tablespaces_rec_read(
|
||||
rec, DICT_FLD__SYS_TABLESPACES__NAME, &len);
|
||||
if (len == 0 || len == UNIV_SQL_NULL) {
|
||||
ib::error() << "Wrong field length in SYS_TABLESPACES.NAME: "
|
||||
<< len;
|
||||
<< len;
|
||||
return(false);
|
||||
}
|
||||
strncpy(name, reinterpret_cast<const char*>(field), NAME_LEN);
|
||||
@@ -1137,7 +1136,7 @@ dict_sys_tablespaces_rec_read(
|
||||
rec, DICT_FLD__SYS_TABLESPACES__FLAGS, &len);
|
||||
if (len != 4) {
|
||||
ib::error() << "Wrong field length in SYS_TABLESPACES.FLAGS: "
|
||||
<< len;
|
||||
<< len;
|
||||
return(false);
|
||||
}
|
||||
*flags = mach_read_from_4(field);
|
||||
@@ -1313,32 +1312,16 @@ dict_sys_tables_rec_read(
|
||||
|
||||
*flags = dict_sys_tables_type_to_tf(type, *n_cols);
|
||||
|
||||
/* For tables created with old versions of InnoDB, there may be
|
||||
garbage in SYS_TABLES.MIX_LEN where flags2 are found. Such tables
|
||||
would always be in ROW_FORMAT=REDUNDANT which do not have the
|
||||
high bit set in n_cols, and flags would be zero. */
|
||||
if (*flags != 0 || *n_cols & DICT_N_COLS_COMPACT) {
|
||||
/* Get flags2 from SYS_TABLES.MIX_LEN */
|
||||
field = rec_get_nth_field_old(
|
||||
rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len);
|
||||
*flags2 = mach_read_from_4(field);
|
||||
|
||||
/* Get flags2 from SYS_TABLES.MIX_LEN */
|
||||
field = rec_get_nth_field_old(
|
||||
rec, DICT_FLD__SYS_TABLES__MIX_LEN, &len);
|
||||
*flags2 = mach_read_from_4(field);
|
||||
|
||||
if (!dict_tf2_is_valid(*flags, *flags2)) {
|
||||
ib::error() << "Table " << table_name << " in InnoDB"
|
||||
" data dictionary contains invalid flags."
|
||||
" SYS_TABLES.MIX_LEN=" << *flags2;
|
||||
*flags2 = ULINT_UNDEFINED;
|
||||
return(false);
|
||||
}
|
||||
|
||||
/* DICT_TF2_FTS will be set when indexes are being loaded */
|
||||
*flags2 &= ~DICT_TF2_FTS;
|
||||
|
||||
/* Now that we have used this bit, unset it. */
|
||||
*n_cols &= ~DICT_N_COLS_COMPACT;
|
||||
}
|
||||
/* DICT_TF2_FTS will be set when indexes are being loaded */
|
||||
*flags2 &= ~DICT_TF2_FTS;
|
||||
|
||||
/* Now that we have used this bit, unset it. */
|
||||
*n_cols &= ~DICT_N_COLS_COMPACT;
|
||||
return(true);
|
||||
}
|
||||
|
||||
@@ -1431,10 +1414,10 @@ dict_check_sys_tables(
|
||||
and the tablespace_name are the same.
|
||||
Some hidden tables like FTS AUX tables may not be found in
|
||||
the dictionary since they can always be found in the default
|
||||
location. If so, then dict_get_space_name() will return NULL,
|
||||
location. If so, then dict_space_get_name() will return NULL,
|
||||
the space name must be the table_name, and the filepath can be
|
||||
discovered in the default location.*/
|
||||
char* shared_space_name = dict_get_space_name(space_id, NULL);
|
||||
char* shared_space_name = dict_space_get_name(space_id, NULL);
|
||||
space_name = shared_space_name == NULL
|
||||
? table_name.m_name
|
||||
: shared_space_name;
|
||||
@@ -1468,17 +1451,13 @@ dict_check_sys_tables(
|
||||
opened. */
|
||||
char* filepath = dict_get_first_path(space_id);
|
||||
|
||||
/* We need to read page 0 to get (optional) IV
|
||||
regardless if encryptions is turned on or not,
|
||||
since if it's off we should decrypt a potentially
|
||||
already encrypted table */
|
||||
bool read_page_0 = true;
|
||||
|
||||
/* Check that the .ibd file exists. */
|
||||
bool is_temp = flags2 & DICT_TF2_TEMPORARY;
|
||||
ulint fsp_flags = dict_tf_to_fsp_flags(flags, is_temp);
|
||||
|
||||
validate = true;
|
||||
bool is_encrypted = flags2 & DICT_TF2_ENCRYPTION;
|
||||
ulint fsp_flags = dict_tf_to_fsp_flags(flags,
|
||||
is_temp,
|
||||
is_encrypted);
|
||||
validate = true; /* Encryption */
|
||||
|
||||
dberr_t err = fil_ibd_open(
|
||||
validate,
|
||||
@@ -2601,7 +2580,7 @@ dict_load_indexes(
|
||||
dictionary cache for such metadata corruption,
|
||||
since we would always be able to set it
|
||||
when loading the dictionary cache */
|
||||
ut_ad(index->table == table);
|
||||
index->table = table;
|
||||
dict_set_corrupted_index_cache_only(index);
|
||||
|
||||
ib::info() << "Index is corrupt but forcing"
|
||||
@@ -2848,7 +2827,7 @@ dict_get_and_save_space_name(
|
||||
dict_mutex_enter_for_mysql();
|
||||
}
|
||||
|
||||
table->tablespace = dict_get_space_name(
|
||||
table->tablespace = dict_space_get_name(
|
||||
table->space, table->heap);
|
||||
|
||||
if (!dict_mutex_own) {
|
||||
@@ -2948,7 +2927,7 @@ dict_load_tablespace(
|
||||
if (DICT_TF_HAS_SHARED_SPACE(table->flags)) {
|
||||
if (srv_sys_tablespaces_open) {
|
||||
shared_space_name =
|
||||
dict_get_space_name(table->space, NULL);
|
||||
dict_space_get_name(table->space, NULL);
|
||||
|
||||
} else {
|
||||
/* Make the temporary tablespace name. */
|
||||
@@ -3012,7 +2991,9 @@ dict_load_tablespace(
|
||||
|
||||
/* Try to open the tablespace. We set the 2nd param (fix_dict) to
|
||||
false because we do not have an x-lock on dict_operation_lock */
|
||||
ulint fsp_flags = dict_tf_to_fsp_flags(table->flags, false);
|
||||
ulint fsp_flags = dict_tf_to_fsp_flags(table->flags,
|
||||
false,
|
||||
dict_table_is_encrypted(table));
|
||||
dberr_t err = fil_ibd_open(
|
||||
true, false, FIL_TYPE_TABLESPACE, table->space,
|
||||
fsp_flags, space_name, filepath, table);
|
||||
@@ -3179,6 +3160,32 @@ err_exit:
|
||||
}
|
||||
}
|
||||
|
||||
/* We don't trust the table->flags2(retrieved from SYS_TABLES.MIX_LEN
|
||||
field) if the datafiles are from 3.23.52 version. To identify this
|
||||
version, we do the below check and reset the flags. */
|
||||
if (!DICT_TF2_FLAG_IS_SET(table, DICT_TF2_FTS_HAS_DOC_ID)
|
||||
&& table->space == srv_sys_space.space_id()
|
||||
&& table->flags == 0) {
|
||||
table->flags2 = 0;
|
||||
}
|
||||
|
||||
DBUG_EXECUTE_IF("ib_table_invalid_flags",
|
||||
if(strcmp(table->name.m_name, "test/t1") == 0) {
|
||||
table->flags2 = 255;
|
||||
table->flags = 255;
|
||||
});
|
||||
|
||||
if (!dict_tf2_is_valid(table->flags, table->flags2)) {
|
||||
ib::error() << "Table " << table->name << " in InnoDB"
|
||||
" data dictionary contains invalid flags."
|
||||
" SYS_TABLES.MIX_LEN=" << table->flags2;
|
||||
table->flags2 &= ~(DICT_TF2_TEMPORARY|DICT_TF2_INTRINSIC);
|
||||
dict_table_remove_from_cache(table);
|
||||
table = NULL;
|
||||
err = DB_FAIL;
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
/* Initialize table foreign_child value. Its value could be
|
||||
changed when dict_load_foreigns() is called below */
|
||||
table->fk_max_recusive_level = 0;
|
||||
@@ -3203,6 +3210,7 @@ err_exit:
|
||||
dict_table_remove_from_cache(table);
|
||||
table = NULL;
|
||||
} else {
|
||||
dict_mem_table_fill_foreign_vcol_set(table);
|
||||
table->fk_max_recusive_level = 0;
|
||||
}
|
||||
} else {
|
||||
@@ -3353,99 +3361,6 @@ check_rec:
|
||||
return(table);
|
||||
}
|
||||
|
||||
/***********************************************************************//**
|
||||
Loads a table id based on the index id.
|
||||
@return true if found */
|
||||
static
|
||||
bool
|
||||
dict_load_table_id_on_index_id(
|
||||
/*==================*/
|
||||
index_id_t index_id, /*!< in: index id */
|
||||
table_id_t* table_id) /*!< out: table id */
|
||||
{
|
||||
/* check hard coded indexes */
|
||||
switch(index_id) {
|
||||
case DICT_TABLES_ID:
|
||||
case DICT_COLUMNS_ID:
|
||||
case DICT_INDEXES_ID:
|
||||
case DICT_FIELDS_ID:
|
||||
*table_id = index_id;
|
||||
return true;
|
||||
case DICT_TABLE_IDS_ID:
|
||||
/* The following is a secondary index on SYS_TABLES */
|
||||
*table_id = DICT_TABLES_ID;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool found = false;
|
||||
mtr_t mtr;
|
||||
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
|
||||
/* NOTE that the operation of this function is protected by
|
||||
the dictionary mutex, and therefore no deadlocks can occur
|
||||
with other dictionary operations. */
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec = dict_startscan_system(&pcur, &mtr, SYS_INDEXES);
|
||||
|
||||
while (rec) {
|
||||
ulint len;
|
||||
const byte* field = rec_get_nth_field_old(
|
||||
rec, DICT_FLD__SYS_INDEXES__ID, &len);
|
||||
ut_ad(len == 8);
|
||||
|
||||
/* Check if the index id is the one searched for */
|
||||
if (index_id == mach_read_from_8(field)) {
|
||||
found = true;
|
||||
/* Now we get the table id */
|
||||
const byte* field = rec_get_nth_field_old(
|
||||
rec,
|
||||
DICT_FLD__SYS_INDEXES__TABLE_ID,
|
||||
&len);
|
||||
*table_id = mach_read_from_8(field);
|
||||
break;
|
||||
}
|
||||
mtr_commit(&mtr);
|
||||
mtr_start(&mtr);
|
||||
rec = dict_getnext_system(&pcur, &mtr);
|
||||
}
|
||||
|
||||
btr_pcur_close(&pcur);
|
||||
mtr_commit(&mtr);
|
||||
|
||||
return(found);
|
||||
}
|
||||
|
||||
UNIV_INTERN
|
||||
dict_table_t*
|
||||
dict_table_open_on_index_id(
|
||||
/*==================*/
|
||||
index_id_t index_id, /*!< in: index id */
|
||||
bool dict_locked) /*!< in: dict locked */
|
||||
{
|
||||
if (!dict_locked) {
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
}
|
||||
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
table_id_t table_id;
|
||||
dict_table_t * table = NULL;
|
||||
if (dict_load_table_id_on_index_id(index_id, &table_id)) {
|
||||
bool local_dict_locked = true;
|
||||
table = dict_table_open_on_id(table_id,
|
||||
local_dict_locked,
|
||||
DICT_TABLE_OP_LOAD_TABLESPACE);
|
||||
}
|
||||
|
||||
if (!dict_locked) {
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
return table;
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
This function is called when the database is booted. Loads system table
|
||||
index definitions except for the clustered index which is added to the
|
||||
@@ -3953,3 +3868,96 @@ load_next_index:
|
||||
|
||||
DBUG_RETURN(DB_SUCCESS);
|
||||
}
|
||||
|
||||
/***********************************************************************//**
|
||||
Loads a table id based on the index id.
|
||||
@return true if found */
|
||||
static
|
||||
bool
|
||||
dict_load_table_id_on_index_id(
|
||||
/*===========================*/
|
||||
index_id_t index_id, /*!< in: index id */
|
||||
table_id_t* table_id) /*!< out: table id */
|
||||
{
|
||||
/* check hard coded indexes */
|
||||
switch(index_id) {
|
||||
case DICT_TABLES_ID:
|
||||
case DICT_COLUMNS_ID:
|
||||
case DICT_INDEXES_ID:
|
||||
case DICT_FIELDS_ID:
|
||||
*table_id = index_id;
|
||||
return true;
|
||||
case DICT_TABLE_IDS_ID:
|
||||
/* The following is a secondary index on SYS_TABLES */
|
||||
*table_id = DICT_TABLES_ID;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool found = false;
|
||||
mtr_t mtr;
|
||||
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
|
||||
/* NOTE that the operation of this function is protected by
|
||||
the dictionary mutex, and therefore no deadlocks can occur
|
||||
with other dictionary operations. */
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
btr_pcur_t pcur;
|
||||
const rec_t* rec = dict_startscan_system(&pcur, &mtr, SYS_INDEXES);
|
||||
|
||||
while (rec) {
|
||||
ulint len;
|
||||
const byte* field = rec_get_nth_field_old(
|
||||
rec, DICT_FLD__SYS_INDEXES__ID, &len);
|
||||
ut_ad(len == 8);
|
||||
|
||||
/* Check if the index id is the one searched for */
|
||||
if (index_id == mach_read_from_8(field)) {
|
||||
found = true;
|
||||
/* Now we get the table id */
|
||||
const byte* field = rec_get_nth_field_old(
|
||||
rec,
|
||||
DICT_FLD__SYS_INDEXES__TABLE_ID,
|
||||
&len);
|
||||
*table_id = mach_read_from_8(field);
|
||||
break;
|
||||
}
|
||||
mtr_commit(&mtr);
|
||||
mtr_start(&mtr);
|
||||
rec = dict_getnext_system(&pcur, &mtr);
|
||||
}
|
||||
|
||||
btr_pcur_close(&pcur);
|
||||
mtr_commit(&mtr);
|
||||
|
||||
return(found);
|
||||
}
|
||||
|
||||
UNIV_INTERN
|
||||
dict_table_t*
|
||||
dict_table_open_on_index_id(
|
||||
/*========================*/
|
||||
index_id_t index_id, /*!< in: index id */
|
||||
bool dict_locked) /*!< in: dict locked */
|
||||
{
|
||||
if (!dict_locked) {
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
}
|
||||
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
table_id_t table_id;
|
||||
dict_table_t * table = NULL;
|
||||
if (dict_load_table_id_on_index_id(index_id, &table_id)) {
|
||||
bool local_dict_locked = true;
|
||||
table = dict_table_open_on_id(table_id,
|
||||
local_dict_locked,
|
||||
DICT_TABLE_OP_LOAD_TABLESPACE);
|
||||
}
|
||||
|
||||
if (!dict_locked) {
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
return table;
|
||||
}
|
||||
|
||||
@@ -189,36 +189,6 @@ dict_mem_table_create(
|
||||
return(table);
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Determines if a table belongs to a system database
|
||||
@return */
|
||||
UNIV_INTERN
|
||||
bool
|
||||
dict_mem_table_is_system(
|
||||
/*================*/
|
||||
char *name) /*!< in: table name */
|
||||
{
|
||||
ut_ad(name);
|
||||
|
||||
/* table has the following format: database/table
|
||||
and some system table are of the form SYS_* */
|
||||
if (strchr(name, '/')) {
|
||||
int table_len = strlen(name);
|
||||
const char *system_db;
|
||||
int i = 0;
|
||||
while ((system_db = innobase_system_databases[i++])
|
||||
&& (system_db != NullS)) {
|
||||
int len = strlen(system_db);
|
||||
if (table_len > len && !strncmp(name, system_db, len)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Free a table memory object. */
|
||||
void
|
||||
@@ -243,6 +213,7 @@ dict_mem_table_free(
|
||||
dict_table_autoinc_destroy(table);
|
||||
#endif /* UNIV_HOTBACKUP */
|
||||
|
||||
dict_mem_table_free_foreign_vcol_set(table);
|
||||
dict_table_stats_latch_destroy(table);
|
||||
|
||||
table->foreign_set.~dict_foreign_set();
|
||||
@@ -260,6 +231,10 @@ dict_mem_table_free(
|
||||
UT_DELETE(vcol->v_indexes);
|
||||
}
|
||||
|
||||
if (table->s_cols != NULL) {
|
||||
UT_DELETE(table->s_cols);
|
||||
}
|
||||
|
||||
mem_heap_free(table->heap);
|
||||
}
|
||||
|
||||
@@ -433,6 +408,39 @@ dict_mem_table_add_v_col(
|
||||
return(v_col);
|
||||
}
|
||||
|
||||
/** Adds a stored column definition to a table.
|
||||
@param[in] table table
|
||||
@param[in] num_base number of base columns. */
|
||||
void
|
||||
dict_mem_table_add_s_col(
|
||||
dict_table_t* table,
|
||||
ulint num_base)
|
||||
{
|
||||
ulint i = table->n_def - 1;
|
||||
dict_col_t* col = dict_table_get_nth_col(table, i);
|
||||
dict_s_col_t s_col;
|
||||
|
||||
ut_ad(col != NULL);
|
||||
|
||||
if (table->s_cols == NULL) {
|
||||
table->s_cols = UT_NEW_NOKEY(dict_s_col_list());
|
||||
}
|
||||
|
||||
s_col.m_col = col;
|
||||
s_col.s_pos = i + table->n_v_def;
|
||||
|
||||
if (num_base != 0) {
|
||||
s_col.base_col = static_cast<dict_col_t**>(mem_heap_zalloc(
|
||||
table->heap, num_base * sizeof(dict_col_t*)));
|
||||
} else {
|
||||
s_col.base_col = NULL;
|
||||
}
|
||||
|
||||
s_col.num_base = num_base;
|
||||
table->s_cols->push_back(s_col);
|
||||
}
|
||||
|
||||
|
||||
/**********************************************************************//**
|
||||
Renames a column of a table in the data dictionary cache. */
|
||||
static MY_ATTRIBUTE((nonnull))
|
||||
@@ -452,7 +460,9 @@ dict_mem_table_col_rename_low(
|
||||
|
||||
size_t from_len = strlen(s), to_len = strlen(to);
|
||||
|
||||
ut_ad(i < table->n_def);
|
||||
ut_ad(i < table->n_def || is_virtual);
|
||||
ut_ad(i < table->n_v_def || !is_virtual);
|
||||
|
||||
ut_ad(from_len <= NAME_LEN);
|
||||
ut_ad(to_len <= NAME_LEN);
|
||||
|
||||
@@ -592,7 +602,7 @@ void
|
||||
dict_mem_table_col_rename(
|
||||
/*======================*/
|
||||
dict_table_t* table, /*!< in/out: table */
|
||||
unsigned nth_col,/*!< in: column index */
|
||||
ulint nth_col,/*!< in: column index */
|
||||
const char* from, /*!< in: old column name */
|
||||
const char* to, /*!< in: new column name */
|
||||
bool is_virtual)
|
||||
@@ -603,7 +613,7 @@ dict_mem_table_col_rename(
|
||||
ut_ad((!is_virtual && nth_col < table->n_def)
|
||||
|| (is_virtual && nth_col < table->n_v_def));
|
||||
|
||||
for (unsigned i = 0; i < nth_col; i++) {
|
||||
for (ulint i = 0; i < nth_col; i++) {
|
||||
size_t len = strlen(s);
|
||||
ut_ad(len > 0);
|
||||
s += len + 1;
|
||||
@@ -613,7 +623,8 @@ dict_mem_table_col_rename(
|
||||
Proceed with the renaming anyway. */
|
||||
ut_ad(!strcmp(from, s));
|
||||
|
||||
dict_mem_table_col_rename_low(table, nth_col, to, s, is_virtual);
|
||||
dict_mem_table_col_rename_low(table, static_cast<unsigned>(nth_col),
|
||||
to, s, is_virtual);
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
@@ -709,6 +720,8 @@ dict_mem_foreign_create(void)
|
||||
|
||||
foreign->heap = heap;
|
||||
|
||||
foreign->v_cols = NULL;
|
||||
|
||||
DBUG_PRINT("dict_mem_foreign_create", ("heap: %p", heap));
|
||||
|
||||
DBUG_RETURN(foreign);
|
||||
@@ -773,6 +786,181 @@ dict_mem_referenced_table_name_lookup_set(
|
||||
= foreign->referenced_table_name;
|
||||
}
|
||||
}
|
||||
|
||||
/** Fill the virtual column set with virtual column information
|
||||
present in the given virtual index.
|
||||
@param[in] index virtual index
|
||||
@param[out] v_cols virtual column set. */
|
||||
static
|
||||
void
|
||||
dict_mem_fill_vcol_has_index(
|
||||
const dict_index_t* index,
|
||||
dict_vcol_set** v_cols)
|
||||
{
|
||||
for (ulint i = 0; i < index->table->n_v_cols; i++) {
|
||||
dict_v_col_t* v_col = dict_table_get_nth_v_col(
|
||||
index->table, i);
|
||||
if (!v_col->m_col.ord_part) {
|
||||
continue;
|
||||
}
|
||||
|
||||
dict_v_idx_list::iterator it;
|
||||
for (it = v_col->v_indexes->begin();
|
||||
it != v_col->v_indexes->end(); ++it) {
|
||||
dict_v_idx_t v_idx = *it;
|
||||
|
||||
if (v_idx.index != index) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (*v_cols == NULL) {
|
||||
*v_cols = UT_NEW_NOKEY(dict_vcol_set());
|
||||
}
|
||||
|
||||
(*v_cols)->insert(v_col);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Fill the virtual column set with the virtual column of the index
|
||||
if the index contains given column name.
|
||||
@param[in] col_name column name
|
||||
@param[in] table innodb table object
|
||||
@param[out] v_cols set of virtual column information. */
|
||||
static
|
||||
void
|
||||
dict_mem_fill_vcol_from_v_indexes(
|
||||
const char* col_name,
|
||||
const dict_table_t* table,
|
||||
dict_vcol_set** v_cols)
|
||||
{
|
||||
/* virtual column can't be Primary Key, so start with
|
||||
secondary index */
|
||||
for (dict_index_t* index = dict_table_get_next_index(
|
||||
dict_table_get_first_index(table));
|
||||
index;
|
||||
index = dict_table_get_next_index(index)) {
|
||||
|
||||
if (!dict_index_has_virtual(index)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (ulint i = 0; i < index->n_fields; i++) {
|
||||
dict_field_t* field =
|
||||
dict_index_get_nth_field(index, i);
|
||||
|
||||
if (strcmp(field->name, col_name) == 0) {
|
||||
dict_mem_fill_vcol_has_index(
|
||||
index, v_cols);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Fill the virtual column set with virtual columns which have base columns
|
||||
as the given col_name
|
||||
@param[in] col_name column name
|
||||
@param[in] table table object
|
||||
@param[out] v_cols set of virtual columns. */
|
||||
static
|
||||
void
|
||||
dict_mem_fill_vcol_set_for_base_col(
|
||||
const char* col_name,
|
||||
const dict_table_t* table,
|
||||
dict_vcol_set** v_cols)
|
||||
{
|
||||
for (ulint i = 0; i < table->n_v_cols; i++) {
|
||||
dict_v_col_t* v_col = dict_table_get_nth_v_col(table, i);
|
||||
|
||||
if (!v_col->m_col.ord_part) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (ulint j = 0; j < v_col->num_base; j++) {
|
||||
if (strcmp(col_name, dict_table_get_col_name(
|
||||
table,
|
||||
v_col->base_col[j]->ind)) == 0) {
|
||||
|
||||
if (*v_cols == NULL) {
|
||||
*v_cols = UT_NEW_NOKEY(dict_vcol_set());
|
||||
}
|
||||
|
||||
(*v_cols)->insert(v_col);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Fills the dependent virtual columns in a set.
|
||||
Reason for being dependent are
|
||||
1) FK can be present on base column of virtual columns
|
||||
2) FK can be present on column which is a part of virtual index
|
||||
@param[in,out] foreign foreign key information. */
|
||||
void
|
||||
dict_mem_foreign_fill_vcol_set(
|
||||
dict_foreign_t* foreign)
|
||||
{
|
||||
ulint type = foreign->type;
|
||||
|
||||
if (type == 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (ulint i = 0; i < foreign->n_fields; i++) {
|
||||
/** FK can be present on base columns
|
||||
of virtual columns. */
|
||||
dict_mem_fill_vcol_set_for_base_col(
|
||||
foreign->foreign_col_names[i],
|
||||
foreign->foreign_table,
|
||||
&foreign->v_cols);
|
||||
|
||||
/** FK can be present on the columns
|
||||
which can be a part of virtual index. */
|
||||
dict_mem_fill_vcol_from_v_indexes(
|
||||
foreign->foreign_col_names[i],
|
||||
foreign->foreign_table,
|
||||
&foreign->v_cols);
|
||||
}
|
||||
}
|
||||
|
||||
/** Fill virtual columns set in each fk constraint present in the table.
|
||||
@param[in,out] table innodb table object. */
|
||||
void
|
||||
dict_mem_table_fill_foreign_vcol_set(
|
||||
dict_table_t* table)
|
||||
{
|
||||
dict_foreign_set fk_set = table->foreign_set;
|
||||
dict_foreign_t* foreign;
|
||||
|
||||
dict_foreign_set::iterator it;
|
||||
for (it = fk_set.begin(); it != fk_set.end(); ++it) {
|
||||
foreign = *it;
|
||||
|
||||
dict_mem_foreign_fill_vcol_set(foreign);
|
||||
}
|
||||
}
|
||||
|
||||
/** Free the vcol_set from all foreign key constraint on the table.
|
||||
@param[in,out] table innodb table object. */
|
||||
void
|
||||
dict_mem_table_free_foreign_vcol_set(
|
||||
dict_table_t* table)
|
||||
{
|
||||
dict_foreign_set fk_set = table->foreign_set;
|
||||
dict_foreign_t* foreign;
|
||||
|
||||
dict_foreign_set::iterator it;
|
||||
for (it = fk_set.begin(); it != fk_set.end(); ++it) {
|
||||
|
||||
foreign = *it;
|
||||
|
||||
if (foreign->v_cols != NULL) {
|
||||
UT_DELETE(foreign->v_cols);
|
||||
foreign->v_cols = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
/**********************************************************************//**
|
||||
@@ -942,3 +1130,32 @@ operator<< (std::ostream& out, const dict_foreign_set& fk_set)
|
||||
return(out);
|
||||
}
|
||||
|
||||
/****************************************************************//**
|
||||
Determines if a table belongs to a system database
|
||||
@return */
|
||||
bool
|
||||
dict_mem_table_is_system(
|
||||
/*================*/
|
||||
char *name) /*!< in: table name */
|
||||
{
|
||||
ut_ad(name);
|
||||
|
||||
/* table has the following format: database/table
|
||||
and some system table are of the form SYS_* */
|
||||
if (strchr(name, '/')) {
|
||||
int table_len = strlen(name);
|
||||
const char *system_db;
|
||||
int i = 0;
|
||||
while ((system_db = innobase_system_databases[i++])
|
||||
&& (system_db != NullS)) {
|
||||
int len = strlen(system_db);
|
||||
if (table_len > len && !strncmp(name, system_db, len)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
} else {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -930,14 +930,14 @@ dict_stats_update_transient(
|
||||
|
||||
if (dict_table_is_discarded(table)) {
|
||||
/* Nothing to do. */
|
||||
dict_stats_empty_table(table, false);
|
||||
dict_stats_empty_table(table, true);
|
||||
return;
|
||||
} else if (index == NULL) {
|
||||
/* Table definition is corrupt */
|
||||
|
||||
ib::warn() << "Table " << table->name
|
||||
<< " has no indexes. Cannot calculate statistics.";
|
||||
dict_stats_empty_table(table, false);
|
||||
dict_stats_empty_table(table, true);
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -2316,7 +2316,6 @@ storage.
|
||||
allocate and free the trx object. If it is not NULL then it will be
|
||||
rolled back only in the case of error, but not freed.
|
||||
@return DB_SUCCESS or error code */
|
||||
static
|
||||
dberr_t
|
||||
dict_stats_save_index_stat(
|
||||
dict_index_t* index,
|
||||
@@ -3257,15 +3256,15 @@ dict_stats_update(
|
||||
|
||||
if (innodb_table_stats_not_found == false &&
|
||||
table->stats_error_printed == false) {
|
||||
ib::error() << "Fetch of persistent statistics"
|
||||
" requested for table "
|
||||
<< table->name
|
||||
<< " but the required system tables "
|
||||
<< TABLE_STATS_NAME_PRINT
|
||||
<< " and " << INDEX_STATS_NAME_PRINT
|
||||
<< " are not present or have unexpected"
|
||||
" structure. Using transient stats instead.";
|
||||
table->stats_error_printed = true;
|
||||
ib::error() << "Fetch of persistent statistics"
|
||||
" requested for table "
|
||||
<< table->name
|
||||
<< " but the required system tables "
|
||||
<< TABLE_STATS_NAME_PRINT
|
||||
<< " and " << INDEX_STATS_NAME_PRINT
|
||||
<< " are not present or have unexpected"
|
||||
" structure. Using transient stats instead.";
|
||||
table->stats_error_printed = true;
|
||||
}
|
||||
|
||||
goto transient;
|
||||
@@ -3337,12 +3336,12 @@ dict_stats_update(
|
||||
|
||||
if (innodb_table_stats_not_found == false &&
|
||||
table->stats_error_printed == false) {
|
||||
ib::error() << "Error fetching persistent statistics"
|
||||
" for table "
|
||||
<< table->name
|
||||
<< " from " TABLE_STATS_NAME_PRINT " and "
|
||||
INDEX_STATS_NAME_PRINT ": " << ut_strerr(err)
|
||||
<< ". Using transient stats method instead.";
|
||||
ib::error() << "Error fetching persistent statistics"
|
||||
" for table "
|
||||
<< table->name
|
||||
<< " from " TABLE_STATS_NAME_PRINT " and "
|
||||
INDEX_STATS_NAME_PRINT ": " << ut_strerr(err)
|
||||
<< ". Using transient stats method instead.";
|
||||
}
|
||||
|
||||
goto transient;
|
||||
@@ -3841,120 +3840,6 @@ dict_stats_rename_table(
|
||||
return(ret);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation result.
|
||||
@return DB_SUCCESS or error code */
|
||||
UNIV_INTERN
|
||||
dberr_t
|
||||
dict_stats_save_defrag_summary(
|
||||
dict_index_t* index) /*!< in: index */
|
||||
{
|
||||
dberr_t ret;
|
||||
lint now = (lint) ut_time();
|
||||
|
||||
if (dict_stats_should_ignore_index(index)) {
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
ret = dict_stats_save_index_stat(index, now, "n_pages_freed",
|
||||
index->stat_defrag_n_pages_freed,
|
||||
NULL,
|
||||
"Number of pages freed during"
|
||||
" last defragmentation run.",
|
||||
NULL);
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation stats for a given index.
|
||||
@return DB_SUCCESS or error code */
|
||||
UNIV_INTERN
|
||||
dberr_t
|
||||
dict_stats_save_defrag_stats(
|
||||
dict_index_t* index) /*!< in: index */
|
||||
{
|
||||
dberr_t ret;
|
||||
|
||||
if (index->table->ibd_file_missing) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Cannot save defragment stats because "
|
||||
".ibd file is missing.\n");
|
||||
return (DB_TABLESPACE_DELETED);
|
||||
}
|
||||
if (dict_index_is_corrupted(index)) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Cannot save defragment stats because "
|
||||
"index is corrupted.\n");
|
||||
return(DB_CORRUPTION);
|
||||
}
|
||||
|
||||
if (dict_stats_should_ignore_index(index)) {
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
lint now = (lint) ut_time();
|
||||
mtr_t mtr;
|
||||
ulint n_leaf_pages=0;
|
||||
ulint n_leaf_reserved=0;
|
||||
mtr_start(&mtr);
|
||||
mtr_s_lock(dict_index_get_lock(index), &mtr);
|
||||
|
||||
n_leaf_reserved = btr_get_size_and_reserved(index, BTR_N_LEAF_PAGES,
|
||||
&n_leaf_pages, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
|
||||
if (n_leaf_reserved == ULINT_UNDEFINED) {
|
||||
// The index name is different during fast index creation,
|
||||
// so the stats won't be associated with the right index
|
||||
// for later use. We just return without saving.
|
||||
return DB_SUCCESS;
|
||||
}
|
||||
|
||||
rw_lock_x_lock(dict_operation_lock);
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
ret = dict_stats_save_index_stat(index, now, "n_page_split",
|
||||
index->stat_defrag_n_page_split,
|
||||
NULL,
|
||||
"Number of new page splits on leaves"
|
||||
" since last defragmentation.",
|
||||
NULL);
|
||||
if (ret != DB_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = dict_stats_save_index_stat(
|
||||
index, now, "n_leaf_pages_defrag",
|
||||
n_leaf_pages,
|
||||
NULL,
|
||||
"Number of leaf pages when this stat is saved to disk",
|
||||
NULL);
|
||||
if (ret != DB_SUCCESS) {
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = dict_stats_save_index_stat(
|
||||
index, now, "n_leaf_pages_reserved",
|
||||
n_leaf_reserved,
|
||||
NULL,
|
||||
"Number of pages reserved for this index leaves when this stat "
|
||||
"is saved to disk",
|
||||
NULL);
|
||||
|
||||
end:
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
rw_lock_x_unlock(dict_operation_lock);
|
||||
|
||||
return (ret);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Renames an index in InnoDB persistent stats storage.
|
||||
This function creates its own transaction and commits it.
|
||||
|
||||
@@ -23,10 +23,10 @@ Code used for background table and index stats gathering.
|
||||
Created Apr 25, 2012 Vasil Dimov
|
||||
*******************************************************/
|
||||
|
||||
#include "dict0dict.h"
|
||||
#include "dict0dict.h"
|
||||
#include "dict0stats.h"
|
||||
#include "dict0stats_bg.h"
|
||||
#include "dict0defrag_bg.h"
|
||||
#include "row0mysql.h"
|
||||
#include "srv0start.h"
|
||||
#include "ut0new.h"
|
||||
@@ -45,15 +45,27 @@ Created Apr 25, 2012 Vasil Dimov
|
||||
/** Event to wake up the stats thread */
|
||||
os_event_t dict_stats_event = NULL;
|
||||
|
||||
/** Variable to initiate shutdown the dict stats thread. Note we don't
|
||||
use 'srv_shutdown_state' because we want to shutdown dict stats thread
|
||||
before purge thread. */
|
||||
bool dict_stats_start_shutdown = false;
|
||||
|
||||
/** Event to wait for shutdown of the dict stats thread */
|
||||
os_event_t dict_stats_shutdown_event = NULL;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/** Used by SET GLOBAL innodb_dict_stats_disabled_debug = 1; */
|
||||
my_bool innodb_dict_stats_disabled_debug;
|
||||
|
||||
static os_event_t dict_stats_disabled_event;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/** This mutex protects the "recalc_pool" variable. */
|
||||
static ib_mutex_t recalc_pool_mutex;
|
||||
static ib_mutex_t defrag_pool_mutex;
|
||||
static mysql_pfs_key_t defrag_pool_mutex_key;
|
||||
|
||||
/** The number of tables that can be added to "recalc_pool" before
|
||||
it is enlarged */
|
||||
static const ulint RECALC_POOL_INITIAL_SLOTS = 128;
|
||||
static const ulint DEFRAG_POOL_INITIAL_SLOTS = 128;
|
||||
|
||||
/** Allocator type, used by std::vector */
|
||||
typedef ut_allocator<table_id_t>
|
||||
@@ -73,39 +85,23 @@ typedef recalc_pool_t::iterator
|
||||
by background statistics gathering. */
|
||||
static recalc_pool_t* recalc_pool;
|
||||
|
||||
/** Indices whose defrag stats need to be saved to persistent storage.*/
|
||||
struct defrag_pool_item_t {
|
||||
table_id_t table_id;
|
||||
index_id_t index_id;
|
||||
};
|
||||
|
||||
typedef ut_allocator<defrag_pool_item_t>
|
||||
defrag_pool_allocator_t;
|
||||
typedef std::vector<defrag_pool_item_t, defrag_pool_allocator_t>
|
||||
defrag_pool_t;
|
||||
static defrag_pool_t* defrag_pool;
|
||||
typedef defrag_pool_t::iterator defrag_pool_iterator_t;
|
||||
|
||||
/*****************************************************************//**
|
||||
Initialize the recalc pool, called once during thread initialization. */
|
||||
static
|
||||
void
|
||||
dict_stats_pool_init()
|
||||
dict_stats_recalc_pool_init()
|
||||
/*=========================*/
|
||||
{
|
||||
ut_ad(!srv_read_only_mode);
|
||||
/* JAN: TODO: MySQL 5.7 PSI
|
||||
const PSI_memory_key key = mem_key_dict_stats_bg_recalc_pool_t;
|
||||
const PSI_memory_key key2 = mem_key_dict_defrag_pool_t;
|
||||
|
||||
recalc_pool = UT_NEW(recalc_pool_t(recalc_pool_allocator_t(key)), key);
|
||||
defrag_pool = UT_NEW(defrag_pool_t(defrag_pool_allocator_t(key2)), key2);
|
||||
|
||||
defrag_pool->reserve(DEFRAG_POOL_INITIAL_SLOTS);
|
||||
recalc_pool->reserve(RECALC_POOL_INITIAL_SLOTS);
|
||||
*/
|
||||
recalc_pool = new std::vector<table_id_t, recalc_pool_allocator_t>();
|
||||
defrag_pool = new std::vector<defrag_pool_item_t, defrag_pool_allocator_t>();
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
@@ -113,16 +109,14 @@ Free the resources occupied by the recalc pool, called once during
|
||||
thread de-initialization. */
|
||||
static
|
||||
void
|
||||
dict_stats_pool_deinit()
|
||||
/*====================*/
|
||||
dict_stats_recalc_pool_deinit()
|
||||
/*===========================*/
|
||||
{
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
recalc_pool->clear();
|
||||
defrag_pool->clear();
|
||||
|
||||
UT_DELETE(recalc_pool);
|
||||
UT_DELETE(defrag_pool);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
@@ -216,111 +210,6 @@ dict_stats_recalc_pool_del(
|
||||
mutex_exit(&recalc_pool_mutex);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Add an index in a table to the defrag pool, which is processed by the
|
||||
background stats gathering thread. Only the table id and index id are
|
||||
added to the list, so the table can be closed after being enqueued and
|
||||
it will be opened when needed. If the table or index does not exist later
|
||||
(has been DROPped), then it will be removed from the pool and skipped. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
dict_stats_defrag_pool_add(
|
||||
/*=======================*/
|
||||
const dict_index_t* index) /*!< in: table to add */
|
||||
{
|
||||
defrag_pool_item_t item;
|
||||
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
mutex_enter(&defrag_pool_mutex);
|
||||
|
||||
/* quit if already in the list */
|
||||
for (defrag_pool_iterator_t iter = defrag_pool->begin();
|
||||
iter != defrag_pool->end();
|
||||
++iter) {
|
||||
if ((*iter).table_id == index->table->id
|
||||
&& (*iter).index_id == index->id) {
|
||||
mutex_exit(&defrag_pool_mutex);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
item.table_id = index->table->id;
|
||||
item.index_id = index->id;
|
||||
defrag_pool->push_back(item);
|
||||
|
||||
mutex_exit(&defrag_pool_mutex);
|
||||
|
||||
os_event_set(dict_stats_event);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Get an index from the auto defrag pool. The returned index id is removed
|
||||
from the pool.
|
||||
@return true if the pool was non-empty and "id" was set, false otherwise */
|
||||
static
|
||||
bool
|
||||
dict_stats_defrag_pool_get(
|
||||
/*=======================*/
|
||||
table_id_t* table_id, /*!< out: table id, or unmodified if
|
||||
list is empty */
|
||||
index_id_t* index_id) /*!< out: index id, or unmodified if
|
||||
list is empty */
|
||||
{
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
mutex_enter(&defrag_pool_mutex);
|
||||
|
||||
if (defrag_pool->empty()) {
|
||||
mutex_exit(&defrag_pool_mutex);
|
||||
return(false);
|
||||
}
|
||||
|
||||
defrag_pool_item_t& item = defrag_pool->back();
|
||||
*table_id = item.table_id;
|
||||
*index_id = item.index_id;
|
||||
|
||||
defrag_pool->pop_back();
|
||||
|
||||
mutex_exit(&defrag_pool_mutex);
|
||||
|
||||
return(true);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Delete a given index from the auto defrag pool. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
dict_stats_defrag_pool_del(
|
||||
/*=======================*/
|
||||
const dict_table_t* table, /*!<in: if given, remove
|
||||
all entries for the table */
|
||||
const dict_index_t* index) /*!< in: if given, remove this index */
|
||||
{
|
||||
ut_a((table && !index) || (!table && index));
|
||||
ut_ad(!srv_read_only_mode);
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
mutex_enter(&defrag_pool_mutex);
|
||||
|
||||
defrag_pool_iterator_t iter = defrag_pool->begin();
|
||||
while (iter != defrag_pool->end()) {
|
||||
if ((table && (*iter).table_id == table->id)
|
||||
|| (index
|
||||
&& (*iter).table_id == index->table->id
|
||||
&& (*iter).index_id == index->id)) {
|
||||
/* erase() invalidates the iterator */
|
||||
iter = defrag_pool->erase(iter);
|
||||
if (index)
|
||||
break;
|
||||
} else {
|
||||
iter++;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_exit(&defrag_pool_mutex);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Wait until background stats thread has stopped using the specified table.
|
||||
The caller must have locked the data dictionary using
|
||||
@@ -352,6 +241,9 @@ dict_stats_thread_init()
|
||||
ut_a(!srv_read_only_mode);
|
||||
|
||||
dict_stats_event = os_event_create(0);
|
||||
dict_stats_shutdown_event = os_event_create(0);
|
||||
|
||||
ut_d(dict_stats_disabled_event = os_event_create(0));
|
||||
|
||||
/* The recalc_pool_mutex is acquired from:
|
||||
1) the background stats gathering thread before any other latch
|
||||
@@ -369,10 +261,9 @@ dict_stats_thread_init()
|
||||
|
||||
mutex_create(LATCH_ID_RECALC_POOL, &recalc_pool_mutex);
|
||||
|
||||
/* We choose SYNC_STATS_DEFRAG to be below SYNC_FSP_PAGE. */
|
||||
mutex_create(LATCH_ID_DEFRAGMENT_MUTEX, &defrag_pool_mutex);
|
||||
dict_stats_recalc_pool_init();
|
||||
dict_defrag_pool_init();
|
||||
|
||||
dict_stats_pool_init();
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
@@ -385,13 +276,21 @@ dict_stats_thread_deinit()
|
||||
ut_a(!srv_read_only_mode);
|
||||
ut_ad(!srv_dict_stats_thread_active);
|
||||
|
||||
dict_stats_pool_deinit();
|
||||
dict_stats_recalc_pool_deinit();
|
||||
dict_defrag_pool_deinit();
|
||||
|
||||
mutex_free(&recalc_pool_mutex);
|
||||
mutex_free(&defrag_pool_mutex);
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
os_event_destroy(dict_stats_disabled_event);
|
||||
dict_stats_disabled_event = NULL;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
os_event_destroy(dict_stats_event);
|
||||
os_event_destroy(dict_stats_shutdown_event);
|
||||
dict_stats_event = NULL;
|
||||
dict_stats_shutdown_event = NULL;
|
||||
dict_stats_start_shutdown = false;
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
@@ -459,69 +358,43 @@ dict_stats_process_entry_from_recalc_pool()
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
table->stats_bg_flag &= ~BG_STAT_IN_PROGRESS;
|
||||
table->stats_bg_flag = BG_STAT_NONE;
|
||||
|
||||
dict_table_close(table, TRUE, FALSE);
|
||||
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
|
||||
/*****************************************************************//**
|
||||
Get the first index that has been added for updating persistent defrag
|
||||
stats and eventually save its stats. */
|
||||
static
|
||||
#ifdef UNIV_DEBUG
|
||||
/** Disables dict stats thread. It's used by:
|
||||
SET GLOBAL innodb_dict_stats_disabled_debug = 1 (0).
|
||||
@param[in] thd thread handle
|
||||
@param[in] var pointer to system variable
|
||||
@param[out] var_ptr where the formal string goes
|
||||
@param[in] save immediate result from check function */
|
||||
void
|
||||
dict_stats_process_entry_from_defrag_pool()
|
||||
/*=======================================*/
|
||||
dict_stats_disabled_debug_update(
|
||||
THD* thd,
|
||||
struct st_mysql_sys_var* var,
|
||||
void* var_ptr,
|
||||
const void* save)
|
||||
{
|
||||
table_id_t table_id;
|
||||
index_id_t index_id;
|
||||
/* This method is protected by mutex, as every SET GLOBAL .. */
|
||||
ut_ad(dict_stats_disabled_event != NULL);
|
||||
|
||||
ut_ad(!srv_read_only_mode);
|
||||
const bool disable = *static_cast<const my_bool*>(save);
|
||||
|
||||
/* pop the first index from the auto defrag pool */
|
||||
if (!dict_stats_defrag_pool_get(&table_id, &index_id)) {
|
||||
/* no index in defrag pool */
|
||||
return;
|
||||
const int64_t sig_count = os_event_reset(dict_stats_disabled_event);
|
||||
|
||||
innodb_dict_stats_disabled_debug = disable;
|
||||
|
||||
if (disable) {
|
||||
os_event_set(dict_stats_event);
|
||||
os_event_wait_low(dict_stats_disabled_event, sig_count);
|
||||
}
|
||||
|
||||
dict_table_t* table;
|
||||
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
|
||||
/* If the table is no longer cached, we've already lost the in
|
||||
memory stats so there's nothing really to write to disk. */
|
||||
table = dict_table_open_on_id(table_id, TRUE,
|
||||
DICT_TABLE_OP_OPEN_ONLY_IF_CACHED);
|
||||
|
||||
if (table == NULL) {
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check whether table is corrupted */
|
||||
if (table->corrupted) {
|
||||
dict_table_close(table, TRUE, FALSE);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
return;
|
||||
}
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
dict_index_t* index = dict_table_find_index_on_id(table, index_id);
|
||||
|
||||
if (index == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check whether index is corrupted */
|
||||
if (dict_index_is_corrupted(index)) {
|
||||
dict_table_close(table, FALSE, FALSE);
|
||||
return;
|
||||
}
|
||||
|
||||
dict_stats_save_defrag_stats(index);
|
||||
dict_table_close(table, FALSE, FALSE);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
|
||||
/*****************************************************************//**
|
||||
This is the thread for background stats gathering. It pops tables, from
|
||||
@@ -545,7 +418,7 @@ DECLARE_THREAD(dict_stats_thread)(
|
||||
|
||||
srv_dict_stats_thread_active = TRUE;
|
||||
|
||||
while (!SHUTTING_DOWN()) {
|
||||
while (!dict_stats_start_shutdown) {
|
||||
|
||||
/* Wake up periodically even if not signaled. This is
|
||||
because we may lose an event - if the below call to
|
||||
@@ -555,23 +428,44 @@ DECLARE_THREAD(dict_stats_thread)(
|
||||
os_event_wait_time(
|
||||
dict_stats_event, MIN_RECALC_INTERVAL * 1000000);
|
||||
|
||||
if (SHUTTING_DOWN()) {
|
||||
#ifdef UNIV_DEBUG
|
||||
while (innodb_dict_stats_disabled_debug) {
|
||||
os_event_set(dict_stats_disabled_event);
|
||||
if (dict_stats_start_shutdown) {
|
||||
break;
|
||||
}
|
||||
os_event_wait_time(
|
||||
dict_stats_event, 100000);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
if (dict_stats_start_shutdown) {
|
||||
break;
|
||||
}
|
||||
|
||||
dict_stats_process_entry_from_recalc_pool();
|
||||
|
||||
while (defrag_pool->size())
|
||||
dict_stats_process_entry_from_defrag_pool();
|
||||
dict_defrag_process_entries_from_defrag_pool();
|
||||
|
||||
os_event_reset(dict_stats_event);
|
||||
}
|
||||
|
||||
srv_dict_stats_thread_active = FALSE;
|
||||
|
||||
os_event_set(dict_stats_shutdown_event);
|
||||
my_thread_end();
|
||||
|
||||
/* We count the number of threads in os_thread_exit(). A created
|
||||
thread should always use that to exit instead of return(). */
|
||||
os_thread_exit(NULL);
|
||||
os_thread_exit();
|
||||
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
}
|
||||
|
||||
/** Shutdown the dict stats thread. */
|
||||
void
|
||||
dict_stats_shutdown()
|
||||
{
|
||||
dict_stats_start_shutdown = true;
|
||||
os_event_set(dict_stats_event);
|
||||
os_event_wait(dict_stats_shutdown_event);
|
||||
}
|
||||
|
||||
@@ -2353,7 +2353,7 @@ DECLARE_THREAD(fil_crypt_thread)(
|
||||
/* We count the number of threads in os_thread_exit(). A created
|
||||
thread should always use that to exit and not use return() to exit. */
|
||||
|
||||
os_thread_exit(NULL);
|
||||
os_thread_exit();
|
||||
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2013, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -33,8 +33,11 @@ Created 2013-7-26 by Kevin Lewis
|
||||
#include "srv0start.h"
|
||||
#include "ut0new.h"
|
||||
#include "fil0crypt.h"
|
||||
#ifdef UNIV_HOTBACKUP
|
||||
#include "my_sys.h"
|
||||
#endif /* UNIV_HOTBACKUP */
|
||||
|
||||
/** Initialize the name and flags of this datafile.
|
||||
/** Initialize the name, size and order of this datafile
|
||||
@param[in] name tablespace name, will be copied
|
||||
@param[in] flags tablespace flags */
|
||||
void
|
||||
@@ -47,6 +50,8 @@ Datafile::init(
|
||||
|
||||
m_name = mem_strdup(name);
|
||||
m_flags = flags;
|
||||
m_encryption_key = NULL;
|
||||
m_encryption_iv = NULL;
|
||||
}
|
||||
|
||||
/** Release the resources. */
|
||||
@@ -60,10 +65,20 @@ Datafile::shutdown()
|
||||
|
||||
free_filepath();
|
||||
|
||||
if (m_encryption_key != NULL) {
|
||||
ut_free(m_encryption_key);
|
||||
m_encryption_key = NULL;
|
||||
}
|
||||
|
||||
if (m_crypt_info) {
|
||||
fil_space_destroy_crypt_data(&m_crypt_info);
|
||||
}
|
||||
|
||||
if (m_encryption_iv != NULL) {
|
||||
ut_free(m_encryption_iv);
|
||||
m_encryption_iv = NULL;
|
||||
}
|
||||
|
||||
free_first_page();
|
||||
}
|
||||
|
||||
@@ -380,13 +395,15 @@ Datafile::free_first_page()
|
||||
space ID and flags. The file should exist and be successfully opened
|
||||
in order for this function to validate it.
|
||||
@param[in] space_id The expected tablespace ID.
|
||||
@param[in] flags The expected tablespace flags.
|
||||
@param[in] flags The expected tablespace flags.
|
||||
@param[in] for_import if it is for importing
|
||||
@retval DB_SUCCESS if tablespace is valid, DB_ERROR if not.
|
||||
m_is_valid is also set true on success, else false. */
|
||||
dberr_t
|
||||
Datafile::validate_to_dd(
|
||||
ulint space_id,
|
||||
ulint flags)
|
||||
ulint space_id,
|
||||
ulint flags,
|
||||
bool for_import)
|
||||
{
|
||||
dberr_t err;
|
||||
|
||||
@@ -397,7 +414,7 @@ Datafile::validate_to_dd(
|
||||
/* Validate this single-table-tablespace with the data dictionary,
|
||||
but do not compare the DATA_DIR flag, in case the tablespace was
|
||||
remotely located. */
|
||||
err = validate_first_page();
|
||||
err = validate_first_page(0, for_import);
|
||||
if (err != DB_SUCCESS) {
|
||||
return(err);
|
||||
}
|
||||
@@ -441,14 +458,52 @@ Datafile::validate_for_recovery()
|
||||
ut_ad(is_open());
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
err = validate_first_page();
|
||||
err = validate_first_page(0, false);
|
||||
|
||||
switch (err) {
|
||||
case DB_SUCCESS:
|
||||
case DB_TABLESPACE_EXISTS:
|
||||
#ifdef UNIV_HOTBACKUP
|
||||
err = restore_from_doublewrite(0);
|
||||
if (err != DB_SUCCESS) {
|
||||
return(err);
|
||||
}
|
||||
/* Free the previously read first page and then re-validate. */
|
||||
free_first_page();
|
||||
err = validate_first_page(0, false);
|
||||
if (err == DB_SUCCESS) {
|
||||
std::string filepath = fil_space_get_first_path(
|
||||
m_space_id);
|
||||
if (is_intermediate_file(filepath.c_str())) {
|
||||
/* Existing intermediate file with same space
|
||||
id is obsolete.*/
|
||||
if (fil_space_free(m_space_id, FALSE)) {
|
||||
err = DB_SUCCESS;
|
||||
}
|
||||
} else {
|
||||
filepath.assign(m_filepath);
|
||||
if (is_intermediate_file(filepath.c_str())) {
|
||||
/* New intermediate file with same space id
|
||||
shall be ignored.*/
|
||||
err = DB_TABLESPACE_EXISTS;
|
||||
/* Set all bits of 'flags' as a special
|
||||
indicator for "ignore tablespace". Hopefully
|
||||
InnoDB will never use all bits or at least all
|
||||
bits set will not be a meaningful setting
|
||||
otherwise.*/
|
||||
m_flags = ~0;
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif /* UNIV_HOTBACKUP */
|
||||
break;
|
||||
|
||||
default:
|
||||
/* For encryption tablespace, we skip the retry step,
|
||||
since it is only because the keyring is not ready. */
|
||||
if (FSP_FLAGS_GET_ENCRYPTION(m_flags)) {
|
||||
return(err);
|
||||
}
|
||||
/* Re-open the file in read-write mode Attempt to restore
|
||||
page 0 from doublewrite and read the space ID from a survey
|
||||
of the first few pages. */
|
||||
@@ -476,7 +531,7 @@ Datafile::validate_for_recovery()
|
||||
|
||||
/* Free the previously read first page and then re-validate. */
|
||||
free_first_page();
|
||||
err = validate_first_page();
|
||||
err = validate_first_page(0, false);
|
||||
}
|
||||
|
||||
if (err == DB_SUCCESS) {
|
||||
@@ -491,12 +546,14 @@ tablespace is opened. This occurs before the fil_space_t is created
|
||||
so the Space ID found here must not already be open.
|
||||
m_is_valid is set true on success, else false.
|
||||
@param[out] flush_lsn contents of FIL_PAGE_FILE_FLUSH_LSN
|
||||
@param[in] for_import if it is for importing
|
||||
(only valid for the first file of the system tablespace)
|
||||
@retval DB_SUCCESS on if the datafile is valid
|
||||
@retval DB_CORRUPTION if the datafile is not readable
|
||||
@retval DB_TABLESPACE_EXISTS if there is a duplicate space_id */
|
||||
dberr_t
|
||||
Datafile::validate_first_page(lsn_t* flush_lsn)
|
||||
Datafile::validate_first_page(lsn_t* flush_lsn,
|
||||
bool for_import)
|
||||
{
|
||||
char* prev_name;
|
||||
char* prev_filepath;
|
||||
@@ -585,6 +642,51 @@ Datafile::validate_first_page(lsn_t* flush_lsn)
|
||||
|
||||
}
|
||||
|
||||
#ifdef MYSQL_ENCRYPTION
|
||||
/* For encrypted tablespace, check the encryption info in the
|
||||
first page can be decrypt by master key, otherwise, this table
|
||||
can't be open. And for importing, we skip checking it. */
|
||||
if (FSP_FLAGS_GET_ENCRYPTION(m_flags) && !for_import) {
|
||||
m_encryption_key = static_cast<byte*>(
|
||||
ut_zalloc_nokey(ENCRYPTION_KEY_LEN));
|
||||
m_encryption_iv = static_cast<byte*>(
|
||||
ut_zalloc_nokey(ENCRYPTION_KEY_LEN));
|
||||
#ifdef UNIV_ENCRYPT_DEBUG
|
||||
fprintf(stderr, "Got from file %lu:", m_space_id);
|
||||
#endif
|
||||
if (!fsp_header_get_encryption_key(m_flags,
|
||||
m_encryption_key,
|
||||
m_encryption_iv,
|
||||
m_first_page)) {
|
||||
ib::error()
|
||||
<< "Encryption information in"
|
||||
<< " datafile: " << m_filepath
|
||||
<< " can't be decrypted"
|
||||
<< " , please confirm the keyfile"
|
||||
<< " is match and keyring plugin"
|
||||
<< " is loaded.";
|
||||
|
||||
m_is_valid = false;
|
||||
free_first_page();
|
||||
ut_free(m_encryption_key);
|
||||
ut_free(m_encryption_iv);
|
||||
m_encryption_key = NULL;
|
||||
m_encryption_iv = NULL;
|
||||
return(DB_CORRUPTION);
|
||||
}
|
||||
|
||||
if (recv_recovery_is_on()
|
||||
&& memcmp(m_encryption_key,
|
||||
m_encryption_iv,
|
||||
ENCRYPTION_KEY_LEN) == 0) {
|
||||
ut_free(m_encryption_key);
|
||||
ut_free(m_encryption_iv);
|
||||
m_encryption_key = NULL;
|
||||
m_encryption_iv = NULL;
|
||||
}
|
||||
}
|
||||
#endif /* MYSQL_ENCRYPTION */
|
||||
|
||||
if (fil_space_read_name_and_filepath(
|
||||
m_space_id, &prev_name, &prev_filepath)) {
|
||||
|
||||
@@ -988,13 +1090,11 @@ RemoteDatafile::create_link_file(
|
||||
} else {
|
||||
link_filepath = fil_make_filepath(NULL, name, ISL, false);
|
||||
}
|
||||
|
||||
if (link_filepath == NULL) {
|
||||
return(DB_ERROR);
|
||||
}
|
||||
|
||||
prev_filepath = read_link_file(link_filepath);
|
||||
|
||||
if (prev_filepath) {
|
||||
/* Truncate will call this with an existing
|
||||
link file which contains the same filepath. */
|
||||
@@ -1007,14 +1107,15 @@ RemoteDatafile::create_link_file(
|
||||
}
|
||||
|
||||
/** Check if the file already exists. */
|
||||
FILE* file = NULL;
|
||||
bool exists;
|
||||
os_file_type_t ftype;
|
||||
FILE* file = NULL;
|
||||
bool exists;
|
||||
os_file_type_t ftype;
|
||||
|
||||
success = os_file_status(link_filepath, &exists, &ftype);
|
||||
ulint error = 0;
|
||||
|
||||
if (success && !exists) {
|
||||
|
||||
file = fopen(link_filepath, "w");
|
||||
if (file == NULL) {
|
||||
/* This call will print its own error message */
|
||||
@@ -1025,6 +1126,7 @@ RemoteDatafile::create_link_file(
|
||||
}
|
||||
|
||||
if (error != 0) {
|
||||
|
||||
ib::error() << "Cannot create file " << link_filepath << ".";
|
||||
|
||||
if (error == OS_FILE_ALREADY_EXISTS) {
|
||||
@@ -1050,7 +1152,7 @@ RemoteDatafile::create_link_file(
|
||||
error = os_file_get_last_error(true);
|
||||
ib::error() <<
|
||||
"Cannot write link file: "
|
||||
<< filepath;
|
||||
<< link_filepath << " filepath: " << filepath;
|
||||
err = DB_ERROR;
|
||||
}
|
||||
|
||||
|
||||
@@ -31,30 +31,29 @@ Created 11/29/1995 Heikki Tuuri
|
||||
#include "fsp0fsp.ic"
|
||||
#endif
|
||||
|
||||
#ifdef UNIV_HOTBACKUP
|
||||
# include "fut0lst.h"
|
||||
#else /* UNIV_HOTBACKUP */
|
||||
#include "buf0buf.h"
|
||||
#include "fil0fil.h"
|
||||
#include "fil0crypt.h"
|
||||
#include "mtr0log.h"
|
||||
#include "ut0byte.h"
|
||||
#include "page0page.h"
|
||||
#include "page0zip.h"
|
||||
#ifdef UNIV_HOTBACKUP
|
||||
# include "fut0lst.h"
|
||||
#else /* UNIV_HOTBACKUP */
|
||||
# include "fut0fut.h"
|
||||
# include "srv0srv.h"
|
||||
# include "srv0start.h"
|
||||
# include "ibuf0ibuf.h"
|
||||
# include "btr0btr.h"
|
||||
# include "btr0sea.h"
|
||||
# include "dict0boot.h"
|
||||
# include "log0log.h"
|
||||
#endif /* UNIV_HOTBACKUP */
|
||||
#include "dict0mem.h"
|
||||
#include "fut0fut.h"
|
||||
#include "srv0srv.h"
|
||||
#include "srv0start.h"
|
||||
#include "ibuf0ibuf.h"
|
||||
#include "btr0btr.h"
|
||||
#include "btr0sea.h"
|
||||
#include "dict0boot.h"
|
||||
#include "log0log.h"
|
||||
#include "fsp0sysspace.h"
|
||||
#include "dict0mem.h"
|
||||
#include "fsp0types.h"
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
// JAN: MySQL 5.7 Encryption
|
||||
// #include <my_aes.h>
|
||||
|
||||
/** Returns an extent to the free list of a space.
|
||||
@param[in] page_id page id in the extent
|
||||
@@ -148,7 +147,7 @@ fseg_alloc_free_page_low(
|
||||
, ibool has_done_reservation
|
||||
#endif /* UNIV_DEBUG */
|
||||
)
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Gets a pointer to the space header and x-locks its page.
|
||||
@param[in] id space id
|
||||
@@ -205,6 +204,7 @@ fsp_flags_to_dict_tf(
|
||||
bool page_compressed = FSP_FLAGS_GET_PAGE_COMPRESSION(fsp_flags);
|
||||
ulint comp_level = FSP_FLAGS_GET_PAGE_COMPRESSION_LEVEL(fsp_flags);
|
||||
bool atomic_writes = FSP_FLAGS_GET_ATOMIC_WRITES(fsp_flags);
|
||||
|
||||
/* FSP_FLAGS_GET_TEMPORARY(fsp_flags) does not have an equivalent
|
||||
flag position in the table flags. But it would go into flags2 if
|
||||
any code is created where that is needed. */
|
||||
@@ -215,6 +215,7 @@ fsp_flags_to_dict_tf(
|
||||
|
||||
return(flags);
|
||||
}
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
/** Validate the tablespace flags.
|
||||
These flags are stored in the tablespace header at offset FSP_SPACE_FLAGS.
|
||||
@@ -234,11 +235,17 @@ fsp_flags_is_valid(
|
||||
bool has_data_dir = FSP_FLAGS_HAS_DATA_DIR(flags);
|
||||
bool is_shared = FSP_FLAGS_GET_SHARED(flags);
|
||||
bool is_temp = FSP_FLAGS_GET_TEMPORARY(flags);
|
||||
bool is_encryption = FSP_FLAGS_GET_ENCRYPTION(flags);
|
||||
ulint unused = FSP_FLAGS_GET_UNUSED(flags);
|
||||
bool page_compression = FSP_FLAGS_GET_PAGE_COMPRESSION(flags);
|
||||
ulint page_compression_level = FSP_FLAGS_GET_PAGE_COMPRESSION_LEVEL(flags);
|
||||
ulint atomic_writes = FSP_FLAGS_GET_ATOMIC_WRITES(flags);
|
||||
|
||||
const char *file;
|
||||
ulint line;
|
||||
|
||||
#define GOTO_ERROR file = __FILE__; line = __LINE__; goto err_exit;
|
||||
|
||||
DBUG_EXECUTE_IF("fsp_flags_is_valid_failure", return(false););
|
||||
|
||||
/* The Antelope row formats REDUNDANT and COMPACT did
|
||||
@@ -254,60 +261,55 @@ fsp_flags_is_valid(
|
||||
and externally stored parts. So if it is Post_antelope, it uses
|
||||
Atomic BLOBs. */
|
||||
if (post_antelope != atomic_blobs) {
|
||||
fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted atomic_blobs %d\n",
|
||||
flags, atomic_blobs);
|
||||
GOTO_ERROR;
|
||||
return(false);
|
||||
}
|
||||
|
||||
/* Make sure there are no bits that we do not know about. */
|
||||
if (unused != 0) {
|
||||
fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted unused %lu\n",
|
||||
flags, unused);
|
||||
return(false);
|
||||
GOTO_ERROR;
|
||||
}
|
||||
|
||||
/* The zip ssize can be zero if it is other than compressed row format,
|
||||
or it could be from 1 to the max. */
|
||||
if (zip_ssize > PAGE_ZIP_SSIZE_MAX) {
|
||||
fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted zip_ssize %lu max %d\n",
|
||||
flags, zip_ssize, PAGE_ZIP_SSIZE_MAX);
|
||||
return(false);
|
||||
GOTO_ERROR;
|
||||
}
|
||||
|
||||
/* The actual page size must be within 4k and 16K (3 =< ssize =< 5). */
|
||||
if (page_ssize != 0
|
||||
&& (page_ssize < UNIV_PAGE_SSIZE_MIN
|
||||
|| page_ssize > UNIV_PAGE_SSIZE_MAX)) {
|
||||
fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted page_ssize %lu min:%lu:max:%lu\n",
|
||||
flags, page_ssize, UNIV_PAGE_SSIZE_MIN, UNIV_PAGE_SSIZE_MAX);
|
||||
return(false);
|
||||
GOTO_ERROR;
|
||||
}
|
||||
|
||||
/* Only single-table tablespaces use the DATA DIRECTORY clause.
|
||||
It is not compatible with the TABLESPACE clause. Nor is it
|
||||
compatible with the TEMPORARY clause. */
|
||||
if (has_data_dir && (is_shared || is_temp)) {
|
||||
fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted has_data_dir %d is_shared %d is_temp %d\n",
|
||||
flags, has_data_dir, is_shared, is_temp);
|
||||
GOTO_ERROR;
|
||||
return(false);
|
||||
}
|
||||
|
||||
/* Only single-table and not temp tablespaces use the encryption
|
||||
clause. */
|
||||
if (is_encryption && (is_shared || is_temp)) {
|
||||
GOTO_ERROR;
|
||||
}
|
||||
|
||||
/* Page compression level requires page compression and atomic blobs
|
||||
to be set */
|
||||
if (page_compression_level || page_compression) {
|
||||
if (page_compression_level || page_compression) {
|
||||
if (!page_compression || !atomic_blobs) {
|
||||
fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted page_compression %d\n"
|
||||
"InnoDB: Error: page_compression_level %lu atomic_blobs %d\n",
|
||||
flags, page_compression, page_compression_level, atomic_blobs);
|
||||
return(false);
|
||||
GOTO_ERROR;
|
||||
}
|
||||
}
|
||||
|
||||
if (atomic_writes > ATOMIC_WRITES_OFF) {
|
||||
fprintf(stderr, "InnoDB: Error: Tablespace flags %lu corrupted atomic_writes %lu\n",
|
||||
flags, atomic_writes);
|
||||
GOTO_ERROR;
|
||||
return (false);
|
||||
}
|
||||
|
||||
#if UNIV_FORMAT_MAX != UNIV_FORMAT_B
|
||||
# error UNIV_FORMAT_MAX != UNIV_FORMAT_B, Add more validations.
|
||||
#endif
|
||||
@@ -316,6 +318,24 @@ fsp_flags_is_valid(
|
||||
#endif
|
||||
|
||||
return(true);
|
||||
|
||||
err_exit:
|
||||
ib::error() << "Tablespace flags: " << flags << " corrupted "
|
||||
<< " in file: " << file << " line: " << line
|
||||
<< " post_antelope: " << post_antelope
|
||||
<< " atomic_blobs: " << atomic_blobs
|
||||
<< " unused: " << unused
|
||||
<< " zip_ssize: " << zip_ssize << " max: " << PAGE_ZIP_SSIZE_MAX
|
||||
<< " page_ssize: " << page_ssize
|
||||
<< " " << UNIV_PAGE_SSIZE_MIN << ":" << UNIV_PAGE_SSIZE_MAX
|
||||
<< " has_data_dir: " << has_data_dir
|
||||
<< " is_shared: " << is_shared
|
||||
<< " is_temp: " << is_temp
|
||||
<< " is_encryption: " << is_encryption
|
||||
<< " page_compressed: " << page_compression
|
||||
<< " page_compression_level: " << page_compression_level
|
||||
<< " atomic_writes: " << atomic_writes;
|
||||
return (false);
|
||||
}
|
||||
|
||||
/** Check if tablespace is system temporary.
|
||||
@@ -351,6 +371,7 @@ fsp_is_file_per_table(
|
||||
&& !fsp_is_shared_tablespace(fsp_flags));
|
||||
}
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
#ifdef UNIV_DEBUG
|
||||
|
||||
/** Skip some of the sanity checks that are time consuming even in debug mode
|
||||
@@ -588,7 +609,7 @@ xdes_init(
|
||||
the same as the tablespace header
|
||||
@return pointer to the extent descriptor, NULL if the page does not
|
||||
exist in the space or if the offset exceeds free limit */
|
||||
UNIV_INLINE MY_ATTRIBUTE((nonnull(1,4), warn_unused_result))
|
||||
UNIV_INLINE MY_ATTRIBUTE((warn_unused_result))
|
||||
xdes_t*
|
||||
xdes_get_descriptor_with_space_hdr(
|
||||
fsp_header_t* sp_header,
|
||||
@@ -883,6 +904,202 @@ fsp_header_init_fields(
|
||||
}
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/** Get the offset of encrytion information in page 0.
|
||||
@param[in] page_size page size.
|
||||
@return offset on success, otherwise 0. */
|
||||
static
|
||||
ulint
|
||||
fsp_header_get_encryption_offset(
|
||||
const page_size_t& page_size)
|
||||
{
|
||||
ulint offset;
|
||||
#ifdef UNIV_DEBUG
|
||||
ulint left_size;
|
||||
#endif
|
||||
|
||||
offset = XDES_ARR_OFFSET + XDES_SIZE * xdes_arr_size(page_size);
|
||||
#ifdef UNIV_DEBUG
|
||||
left_size = page_size.physical() - FSP_HEADER_OFFSET - offset
|
||||
- FIL_PAGE_DATA_END;
|
||||
|
||||
ut_ad(left_size >= ENCRYPTION_INFO_SIZE_V2);
|
||||
#endif
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
#if 0 /* MySQL 5.7 Encryption */
|
||||
/** Fill the encryption info.
|
||||
@param[in] space tablespace
|
||||
@param[in,out] encrypt_info buffer for encrypt key.
|
||||
@return true if success. */
|
||||
bool
|
||||
fsp_header_fill_encryption_info(
|
||||
fil_space_t* space,
|
||||
byte* encrypt_info)
|
||||
{
|
||||
byte* ptr;
|
||||
lint elen;
|
||||
ulint master_key_id;
|
||||
byte* master_key;
|
||||
byte key_info[ENCRYPTION_KEY_LEN * 2];
|
||||
ulint crc;
|
||||
Encryption::Version version;
|
||||
#ifdef UNIV_ENCRYPT_DEBUG
|
||||
const byte* data;
|
||||
ulint i;
|
||||
#endif
|
||||
|
||||
/* Get master key from key ring */
|
||||
Encryption::get_master_key(&master_key_id, &master_key, &version);
|
||||
if (master_key == NULL) {
|
||||
return(false);
|
||||
}
|
||||
|
||||
memset(encrypt_info, 0, ENCRYPTION_INFO_SIZE_V2);
|
||||
memset(key_info, 0, ENCRYPTION_KEY_LEN * 2);
|
||||
|
||||
/* Use the new master key to encrypt the tablespace
|
||||
key. */
|
||||
ut_ad(encrypt_info != NULL);
|
||||
ptr = encrypt_info;
|
||||
|
||||
/* Write magic header. */
|
||||
if (version == Encryption::ENCRYPTION_VERSION_1) {
|
||||
memcpy(ptr, ENCRYPTION_KEY_MAGIC_V1, ENCRYPTION_MAGIC_SIZE);
|
||||
} else {
|
||||
memcpy(ptr, ENCRYPTION_KEY_MAGIC_V2, ENCRYPTION_MAGIC_SIZE);
|
||||
}
|
||||
ptr += ENCRYPTION_MAGIC_SIZE;
|
||||
|
||||
/* Write master key id. */
|
||||
mach_write_to_4(ptr, master_key_id);
|
||||
ptr += sizeof(ulint);
|
||||
|
||||
/* Write server uuid. */
|
||||
if (version == Encryption::ENCRYPTION_VERSION_2) {
|
||||
memcpy(ptr, Encryption::uuid, ENCRYPTION_SERVER_UUID_LEN);
|
||||
ptr += ENCRYPTION_SERVER_UUID_LEN;
|
||||
}
|
||||
|
||||
/* Write tablespace key to temp space. */
|
||||
memcpy(key_info,
|
||||
space->encryption_key,
|
||||
ENCRYPTION_KEY_LEN);
|
||||
|
||||
/* Write tablespace iv to temp space. */
|
||||
memcpy(key_info + ENCRYPTION_KEY_LEN,
|
||||
space->encryption_iv,
|
||||
ENCRYPTION_KEY_LEN);
|
||||
|
||||
#ifdef UNIV_ENCRYPT_DEBUG
|
||||
fprintf(stderr, "Set %lu:%lu ",space->id,
|
||||
Encryption::master_key_id);
|
||||
for (data = (const byte*) master_key, i = 0;
|
||||
i < ENCRYPTION_KEY_LEN; i++)
|
||||
fprintf(stderr, "%02lx", (ulong)*data++);
|
||||
fprintf(stderr, " ");
|
||||
for (data = (const byte*) space->encryption_key,
|
||||
i = 0; i < ENCRYPTION_KEY_LEN; i++)
|
||||
fprintf(stderr, "%02lx", (ulong)*data++);
|
||||
fprintf(stderr, " ");
|
||||
for (data = (const byte*) space->encryption_iv,
|
||||
i = 0; i < ENCRYPTION_KEY_LEN; i++)
|
||||
fprintf(stderr, "%02lx", (ulong)*data++);
|
||||
fprintf(stderr, "\n");
|
||||
#endif
|
||||
/* Encrypt tablespace key and iv. */
|
||||
elen = my_aes_encrypt(
|
||||
key_info,
|
||||
ENCRYPTION_KEY_LEN * 2,
|
||||
ptr,
|
||||
master_key,
|
||||
ENCRYPTION_KEY_LEN,
|
||||
my_aes_256_ecb,
|
||||
NULL, false);
|
||||
|
||||
if (elen == MY_AES_BAD_DATA) {
|
||||
my_free(master_key);
|
||||
return(false);
|
||||
}
|
||||
|
||||
ptr += ENCRYPTION_KEY_LEN * 2;
|
||||
|
||||
/* Write checksum bytes. */
|
||||
crc = ut_crc32(key_info, ENCRYPTION_KEY_LEN * 2);
|
||||
mach_write_to_4(ptr, crc);
|
||||
|
||||
my_free(master_key);
|
||||
return(true);
|
||||
}
|
||||
#endif /* ! */
|
||||
|
||||
/** Rotate the encryption info in the space header.
|
||||
@param[in] space tablespace
|
||||
@param[in] encrypt_info buffer for re-encrypt key.
|
||||
@param[in,out] mtr mini-transaction
|
||||
@return true if success. */
|
||||
bool
|
||||
fsp_header_rotate_encryption(
|
||||
fil_space_t* space,
|
||||
byte* encrypt_info,
|
||||
mtr_t* mtr)
|
||||
{
|
||||
buf_block_t* block;
|
||||
ulint offset;
|
||||
|
||||
ut_ad(mtr);
|
||||
|
||||
const page_size_t page_size(space->flags);
|
||||
|
||||
#if MYSQL_ENCRYPTION
|
||||
page_t* page;
|
||||
ulint master_key_id;
|
||||
ut_ad(space->encryption_type != Encryption::NONE);
|
||||
/* Fill encryption info. */
|
||||
if (!fsp_header_fill_encryption_info(space,
|
||||
encrypt_info)) {
|
||||
return(false);
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Save the encryption info to the page 0. */
|
||||
block = buf_page_get(page_id_t(space->id, 0),
|
||||
page_size,
|
||||
RW_SX_LATCH, mtr);
|
||||
buf_block_dbg_add_level(block, SYNC_FSP_PAGE);
|
||||
ut_ad(space->id == page_get_space_id(buf_block_get_frame(block)));
|
||||
|
||||
offset = fsp_header_get_encryption_offset(page_size);
|
||||
ut_ad(offset != 0 && offset < UNIV_PAGE_SIZE);
|
||||
|
||||
|
||||
#if MYSQL_ENCRYPTION
|
||||
page = buf_block_get_frame(block);
|
||||
/* If is in recovering, skip all master key id is rotated
|
||||
tablespaces. */
|
||||
master_key_id = mach_read_from_4(
|
||||
page + offset + ENCRYPTION_MAGIC_SIZE);
|
||||
if (recv_recovery_is_on()
|
||||
&& master_key_id == Encryption::master_key_id) {
|
||||
ut_ad(memcmp(page + offset,
|
||||
ENCRYPTION_KEY_MAGIC_V1,
|
||||
ENCRYPTION_MAGIC_SIZE) == 0
|
||||
|| memcmp(page + offset,
|
||||
ENCRYPTION_KEY_MAGIC_V2,
|
||||
ENCRYPTION_MAGIC_SIZE) == 0);
|
||||
return(true);
|
||||
}
|
||||
|
||||
mlog_write_string(page + offset,
|
||||
encrypt_info,
|
||||
ENCRYPTION_INFO_SIZE_V2,
|
||||
mtr);
|
||||
#endif /* MYSQL_ENCRYPTION */
|
||||
|
||||
return(true);
|
||||
}
|
||||
|
||||
/** Initializes the space header of a new created space and creates also the
|
||||
insert buffer tree root if space == 0.
|
||||
@param[in] space_id space id
|
||||
@@ -944,8 +1161,33 @@ fsp_header_init(
|
||||
fsp_fill_free_list(!is_system_tablespace(space_id),
|
||||
space, header, mtr);
|
||||
|
||||
#if 0 /* MySQL 5.7 Encryption */
|
||||
/* For encryption tablespace, we need to save the encryption
|
||||
info to the page 0. */
|
||||
if (FSP_FLAGS_GET_ENCRYPTION(space->flags)) {
|
||||
ulint offset = fsp_header_get_encryption_offset(page_size);
|
||||
byte encryption_info[ENCRYPTION_INFO_SIZE_V2];
|
||||
|
||||
if (offset == 0)
|
||||
return(false);
|
||||
|
||||
if (!fsp_header_fill_encryption_info(space,
|
||||
encryption_info)) {
|
||||
space->encryption_type = Encryption::NONE;
|
||||
memset(space->encryption_key, 0, ENCRYPTION_KEY_LEN);
|
||||
memset(space->encryption_iv, 0, ENCRYPTION_KEY_LEN);
|
||||
return(false);
|
||||
}
|
||||
|
||||
mlog_write_string(page + offset,
|
||||
encryption_info,
|
||||
ENCRYPTION_INFO_SIZE_V2,
|
||||
mtr);
|
||||
}
|
||||
#endif /* ! */
|
||||
|
||||
if (space_id == srv_sys_space.space_id()) {
|
||||
if (btr_create(DICT_CLUSTERED | DICT_IBUF,
|
||||
if (btr_create(DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF,
|
||||
0, univ_page_size, DICT_IBUF_ID_MIN + space_id,
|
||||
dict_ind_redundant, NULL, mtr) == FIL_NULL) {
|
||||
return(false);
|
||||
@@ -997,6 +1239,164 @@ fsp_header_get_page_size(
|
||||
return(page_size_t(fsp_header_get_flags(page)));
|
||||
}
|
||||
|
||||
#if 0 /* MySQL 5.7 Encryption */
|
||||
/** Decoding the encryption info
|
||||
from the first page of a tablespace.
|
||||
@param[in/out] key key
|
||||
@param[in/out] iv iv
|
||||
@param[in] encryption_info encrytion info.
|
||||
@return true if success */
|
||||
bool
|
||||
fsp_header_decode_encryption_info(
|
||||
byte* key,
|
||||
byte* iv,
|
||||
byte* encryption_info)
|
||||
{
|
||||
byte* ptr;
|
||||
ulint master_key_id;
|
||||
byte* master_key = NULL;
|
||||
lint elen;
|
||||
byte key_info[ENCRYPTION_KEY_LEN * 2];
|
||||
ulint crc1;
|
||||
ulint crc2;
|
||||
char srv_uuid[ENCRYPTION_SERVER_UUID_LEN + 1];
|
||||
Encryption::Version version;
|
||||
#ifdef UNIV_ENCRYPT_DEBUG
|
||||
const byte* data;
|
||||
ulint i;
|
||||
#endif
|
||||
|
||||
ptr = encryption_info;
|
||||
|
||||
/* For compatibility with 5.7.11, we need to handle the
|
||||
encryption information which created in this old version. */
|
||||
if (memcmp(ptr, ENCRYPTION_KEY_MAGIC_V1,
|
||||
ENCRYPTION_MAGIC_SIZE) == 0) {
|
||||
version = Encryption::ENCRYPTION_VERSION_1;
|
||||
} else {
|
||||
version = Encryption::ENCRYPTION_VERSION_2;
|
||||
}
|
||||
/* Check magic. */
|
||||
if (version == Encryption::ENCRYPTION_VERSION_2
|
||||
&& memcmp(ptr, ENCRYPTION_KEY_MAGIC_V2, ENCRYPTION_MAGIC_SIZE) != 0) {
|
||||
/* We ignore report error for recovery,
|
||||
since the encryption info maybe hasn't writen
|
||||
into datafile when the table is newly created. */
|
||||
if (!recv_recovery_is_on()) {
|
||||
return(false);
|
||||
} else {
|
||||
return(true);
|
||||
}
|
||||
}
|
||||
ptr += ENCRYPTION_MAGIC_SIZE;
|
||||
|
||||
/* Get master key id. */
|
||||
master_key_id = mach_read_from_4(ptr);
|
||||
ptr += sizeof(ulint);
|
||||
|
||||
/* Get server uuid. */
|
||||
if (version == Encryption::ENCRYPTION_VERSION_2) {
|
||||
memset(srv_uuid, 0, ENCRYPTION_SERVER_UUID_LEN + 1);
|
||||
memcpy(srv_uuid, ptr, ENCRYPTION_SERVER_UUID_LEN);
|
||||
ptr += ENCRYPTION_SERVER_UUID_LEN;
|
||||
}
|
||||
|
||||
/* Get master key by key id. */
|
||||
memset(key_info, 0, ENCRYPTION_KEY_LEN * 2);
|
||||
if (version == Encryption::ENCRYPTION_VERSION_1) {
|
||||
Encryption::get_master_key(master_key_id, NULL, &master_key);
|
||||
} else {
|
||||
Encryption::get_master_key(master_key_id, srv_uuid, &master_key);
|
||||
}
|
||||
if (master_key == NULL) {
|
||||
return(false);
|
||||
}
|
||||
|
||||
#ifdef UNIV_ENCRYPT_DEBUG
|
||||
fprintf(stderr, "%lu ", master_key_id);
|
||||
for (data = (const byte*) master_key, i = 0;
|
||||
i < ENCRYPTION_KEY_LEN; i++)
|
||||
fprintf(stderr, "%02lx", (ulong)*data++);
|
||||
#endif
|
||||
|
||||
/* Decrypt tablespace key and iv. */
|
||||
elen = my_aes_decrypt(
|
||||
ptr,
|
||||
ENCRYPTION_KEY_LEN * 2,
|
||||
key_info,
|
||||
master_key,
|
||||
ENCRYPTION_KEY_LEN,
|
||||
my_aes_256_ecb, NULL, false);
|
||||
|
||||
if (elen == MY_AES_BAD_DATA) {
|
||||
my_free(master_key);
|
||||
return(NULL);
|
||||
}
|
||||
|
||||
/* Check checksum bytes. */
|
||||
ptr += ENCRYPTION_KEY_LEN * 2;
|
||||
|
||||
crc1 = mach_read_from_4(ptr);
|
||||
crc2 = ut_crc32(key_info, ENCRYPTION_KEY_LEN * 2);
|
||||
if (crc1 != crc2) {
|
||||
ib::error() << "Failed to decrpt encryption information,"
|
||||
<< " please check key file is not changed!";
|
||||
return(false);
|
||||
}
|
||||
|
||||
/* Get tablespace key */
|
||||
memcpy(key, key_info, ENCRYPTION_KEY_LEN);
|
||||
|
||||
/* Get tablespace iv */
|
||||
memcpy(iv, key_info + ENCRYPTION_KEY_LEN,
|
||||
ENCRYPTION_KEY_LEN);
|
||||
|
||||
#ifdef UNIV_ENCRYPT_DEBUG
|
||||
fprintf(stderr, " ");
|
||||
for (data = (const byte*) key,
|
||||
i = 0; i < ENCRYPTION_KEY_LEN; i++)
|
||||
fprintf(stderr, "%02lx", (ulong)*data++);
|
||||
fprintf(stderr, " ");
|
||||
for (data = (const byte*) iv,
|
||||
i = 0; i < ENCRYPTION_KEY_LEN; i++)
|
||||
fprintf(stderr, "%02lx", (ulong)*data++);
|
||||
fprintf(stderr, "\n");
|
||||
#endif
|
||||
|
||||
my_free(master_key);
|
||||
|
||||
if (Encryption::master_key_id < master_key_id) {
|
||||
Encryption::master_key_id = master_key_id;
|
||||
memcpy(Encryption::uuid, srv_uuid, ENCRYPTION_SERVER_UUID_LEN);
|
||||
}
|
||||
|
||||
return(true);
|
||||
}
|
||||
|
||||
/** Reads the encryption key from the first page of a tablespace.
|
||||
@param[in] fsp_flags tablespace flags
|
||||
@param[in/out] key tablespace key
|
||||
@param[in/out] iv tablespace iv
|
||||
@param[in] page first page of a tablespace
|
||||
@return true if success */
|
||||
bool
|
||||
fsp_header_get_encryption_key(
|
||||
ulint fsp_flags,
|
||||
byte* key,
|
||||
byte* iv,
|
||||
page_t* page)
|
||||
{
|
||||
ulint offset;
|
||||
const page_size_t page_size(fsp_flags);
|
||||
offset = fsp_header_get_encryption_offset(page_size);
|
||||
if (offset == 0) {
|
||||
return(false);
|
||||
}
|
||||
|
||||
return(fsp_header_decode_encryption_info(key, iv, page + offset));
|
||||
}
|
||||
#endif /* ! */
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/**********************************************************************//**
|
||||
Increases the space size field of a space. */
|
||||
@@ -1065,7 +1465,7 @@ data file.
|
||||
@param[in,out] header tablespace header
|
||||
@param[in,out] mtr mini-transaction
|
||||
@return true if success */
|
||||
static UNIV_COLD __attribute__((warn_unused_result))
|
||||
static UNIV_COLD MY_ATTRIBUTE((warn_unused_result))
|
||||
bool
|
||||
fsp_try_extend_data_file_with_pages(
|
||||
fil_space_t* space,
|
||||
@@ -1097,6 +1497,7 @@ fsp_try_extend_data_file_with_pages(
|
||||
@param[in,out] header tablespace header
|
||||
@param[in,out] mtr mini-transaction
|
||||
@return whether the tablespace was extended */
|
||||
static UNIV_COLD MY_ATTRIBUTE((nonnull))
|
||||
ulint
|
||||
fsp_try_extend_data_file(
|
||||
fil_space_t* space,
|
||||
@@ -1577,7 +1978,7 @@ initialized (may be the same as mtr)
|
||||
@retval block rw_lock_x_lock_count(&block->lock) == 1 if allocation succeeded
|
||||
(init_mtr == mtr, or the page was not previously freed in mtr)
|
||||
@retval block (not allocated or initialized) otherwise */
|
||||
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
buf_block_t*
|
||||
fsp_alloc_free_page(
|
||||
ulint space,
|
||||
@@ -1971,7 +2372,6 @@ fsp_alloc_seg_inode(
|
||||
&& !fsp_alloc_seg_inode_page(space_header, mtr)) {
|
||||
return(NULL);
|
||||
}
|
||||
|
||||
const page_size_t page_size(
|
||||
mach_read_from_4(FSP_SPACE_FLAGS + space_header));
|
||||
|
||||
@@ -2926,24 +3326,27 @@ fseg_alloc_free_page_general(
|
||||
return(block);
|
||||
}
|
||||
|
||||
/** Check that we have at least 2 frag pages free in the first extent of a
|
||||
single-table tablespace, and they are also physically initialized to the data
|
||||
file. That is we have already extended the data file so that those pages are
|
||||
inside the data file. If not, this function extends the tablespace with
|
||||
pages.
|
||||
/** Check that we have at least n_pages frag pages free in the first extent
|
||||
of a single-table tablespace, and they are also physically initialized to
|
||||
the data file. That is we have already extended the data file so that those
|
||||
pages are inside the data file. If not, this function extends the tablespace
|
||||
with pages.
|
||||
@param[in,out] space tablespace
|
||||
@param[in,out] space_header tablespace header, x-latched
|
||||
@param[in] size size of the tablespace in pages,
|
||||
must be less than FSP_EXTENT_SIZE/2
|
||||
must be less than FSP_EXTENT_SIZE
|
||||
@param[in,out] mtr mini-transaction
|
||||
@return true if there were at least 3 free pages, or we were able to extend */
|
||||
@param[in] n_pages number of pages to reserve
|
||||
@return true if there were at least n_pages free pages, or we were able
|
||||
to extend */
|
||||
static
|
||||
bool
|
||||
fsp_reserve_free_pages(
|
||||
fil_space_t* space,
|
||||
fsp_header_t* space_header,
|
||||
ulint size,
|
||||
mtr_t* mtr)
|
||||
mtr_t* mtr,
|
||||
ulint n_pages)
|
||||
{
|
||||
xdes_t* descr;
|
||||
ulint n_used;
|
||||
@@ -2957,13 +3360,12 @@ fsp_reserve_free_pages(
|
||||
|
||||
ut_a(n_used <= size);
|
||||
|
||||
return(size >= n_used + 2
|
||||
return(size >= n_used + n_pages
|
||||
|| fsp_try_extend_data_file_with_pages(
|
||||
space, n_used + 1, space_header, mtr));
|
||||
space, n_used + n_pages - 1, space_header, mtr));
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Reserves free pages from a tablespace. All mini-transactions which may
|
||||
/** Reserves free pages from a tablespace. All mini-transactions which may
|
||||
use several pages from the tablespace should call this function beforehand
|
||||
and reserve enough free extents so that they certainly will be able
|
||||
to do their operation, like a B-tree page split, fully. Reservations
|
||||
@@ -2982,23 +3384,33 @@ The purpose is to avoid dead end where the database is full but the
|
||||
user cannot free any space because these freeing operations temporarily
|
||||
reserve some space.
|
||||
|
||||
Single-table tablespaces whose size is < 32 pages are a special case. In this
|
||||
function we would liberally reserve several 64 page extents for every page
|
||||
split or merge in a B-tree. But we do not want to waste disk space if the table
|
||||
only occupies < 32 pages. That is why we apply different rules in that special
|
||||
case, just ensuring that there are 3 free pages available.
|
||||
@return TRUE if we were able to make the reservation */
|
||||
Single-table tablespaces whose size is < FSP_EXTENT_SIZE pages are a special
|
||||
case. In this function we would liberally reserve several extents for
|
||||
every page split or merge in a B-tree. But we do not want to waste disk space
|
||||
if the table only occupies < FSP_EXTENT_SIZE pages. That is why we apply
|
||||
different rules in that special case, just ensuring that there are n_pages
|
||||
free pages available.
|
||||
|
||||
@param[out] n_reserved number of extents actually reserved; if we
|
||||
return true and the tablespace size is <
|
||||
FSP_EXTENT_SIZE pages, then this can be 0,
|
||||
otherwise it is n_ext
|
||||
@param[in] space_id tablespace identifier
|
||||
@param[in] n_ext number of extents to reserve
|
||||
@param[in] alloc_type page reservation type (FSP_BLOB, etc)
|
||||
@param[in,out] mtr the mini transaction
|
||||
@param[in] n_pages for small tablespaces (tablespace size is
|
||||
less than FSP_EXTENT_SIZE), number of free
|
||||
pages to reserve.
|
||||
@return true if we were able to make the reservation */
|
||||
bool
|
||||
fsp_reserve_free_extents(
|
||||
/*=====================*/
|
||||
ulint* n_reserved,/*!< out: number of extents actually reserved; if we
|
||||
return TRUE and the tablespace size is < 64 pages,
|
||||
then this can be 0, otherwise it is n_ext */
|
||||
ulint space_id,/*!< in: space id */
|
||||
ulint n_ext, /*!< in: number of extents to reserve */
|
||||
ulint* n_reserved,
|
||||
ulint space_id,
|
||||
ulint n_ext,
|
||||
fsp_reserve_t alloc_type,
|
||||
/*!< in: page reservation type */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction */
|
||||
mtr_t* mtr,
|
||||
ulint n_pages)
|
||||
{
|
||||
fsp_header_t* space_header;
|
||||
ulint n_free_list_ext;
|
||||
@@ -3009,7 +3421,7 @@ fsp_reserve_free_extents(
|
||||
ulint reserve= 0;
|
||||
size_t total_reserved = 0;
|
||||
ulint rounds = 0;
|
||||
ulint n_pages_added;
|
||||
ulint n_pages_added = 0;
|
||||
|
||||
ut_ad(mtr);
|
||||
*n_reserved = n_ext;
|
||||
@@ -3022,10 +3434,11 @@ try_again:
|
||||
size = mach_read_from_4(space_header + FSP_SIZE);
|
||||
ut_ad(size == space->size_in_header);
|
||||
|
||||
if (alloc_type != FSP_BLOB && size < FSP_EXTENT_SIZE) {
|
||||
if (size < FSP_EXTENT_SIZE && n_pages < FSP_EXTENT_SIZE / 2) {
|
||||
/* Use different rules for small single-table tablespaces */
|
||||
*n_reserved = 0;
|
||||
return(fsp_reserve_free_pages(space, space_header, size, mtr));
|
||||
return(fsp_reserve_free_pages(space, space_header, size,
|
||||
mtr, n_pages));
|
||||
}
|
||||
|
||||
n_free_list_ext = flst_get_len(space_header + FSP_FREE);
|
||||
@@ -3105,7 +3518,6 @@ try_to_extend:
|
||||
<< " rounds: " << rounds
|
||||
<< " total_reserved: " << total_reserved << ".";
|
||||
}
|
||||
|
||||
goto try_again;
|
||||
}
|
||||
|
||||
@@ -3419,34 +3831,9 @@ fseg_page_is_free(
|
||||
return(is_free);
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Checks if a single page is free.
|
||||
@return true if free */
|
||||
UNIV_INTERN
|
||||
bool
|
||||
fsp_page_is_free_func(
|
||||
/*==============*/
|
||||
ulint space_id, /*!< in: space id */
|
||||
ulint page_no, /*!< in: page offset */
|
||||
mtr_t* mtr, /*!< in/out: mini-transaction */
|
||||
const char *file,
|
||||
ulint line)
|
||||
{
|
||||
ut_ad(mtr);
|
||||
|
||||
const fil_space_t* space = mtr_x_lock_space(space_id, mtr);
|
||||
const page_size_t page_size(space->flags);
|
||||
|
||||
xdes_t* descr = xdes_get_descriptor(space_id, page_no, page_size, mtr);
|
||||
ut_a(descr);
|
||||
|
||||
return xdes_mtr_get_bit(
|
||||
descr, XDES_FREE_BIT, page_no % FSP_EXTENT_SIZE, mtr);
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Frees an extent of a segment to the space free list. */
|
||||
static __attribute__((nonnull))
|
||||
static MY_ATTRIBUTE((nonnull))
|
||||
void
|
||||
fseg_free_extent(
|
||||
/*=============*/
|
||||
@@ -3929,7 +4316,6 @@ fseg_header::to_stream(std::ostream& out) const
|
||||
{
|
||||
const ulint space = mtr_read_ulint(m_header + FSEG_HDR_SPACE,
|
||||
MLOG_4BYTES, m_mtr);
|
||||
|
||||
const ulint page_no = mtr_read_ulint(m_header + FSEG_HDR_PAGE_NO,
|
||||
MLOG_4BYTES, m_mtr);
|
||||
|
||||
@@ -3943,6 +4329,31 @@ fseg_header::to_stream(std::ostream& out) const
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/**********************************************************************//**
|
||||
Checks if a single page is free.
|
||||
@return true if free */
|
||||
UNIV_INTERN
|
||||
bool
|
||||
fsp_page_is_free_func(
|
||||
/*==============*/
|
||||
ulint space_id, /*!< in: space id */
|
||||
ulint page_no, /*!< in: page offset */
|
||||
mtr_t* mtr, /*!< in/out: mini-transaction */
|
||||
const char *file,
|
||||
ulint line)
|
||||
{
|
||||
ut_ad(mtr);
|
||||
|
||||
const fil_space_t* space = mtr_x_lock_space(space_id, mtr);
|
||||
const page_size_t page_size(space->flags);
|
||||
|
||||
xdes_t* descr = xdes_get_descriptor(space_id, page_no, page_size, mtr);
|
||||
ut_a(descr);
|
||||
|
||||
return xdes_mtr_get_bit(
|
||||
descr, XDES_FREE_BIT, page_no % FSP_EXTENT_SIZE, mtr);
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Compute offset after xdes where crypt data can be stored
|
||||
@return offset */
|
||||
|
||||
@@ -27,8 +27,10 @@ Created 2012-11-16 by Sunny Bains as srv/srv0space.cc
|
||||
|
||||
#include "fsp0space.h"
|
||||
#include "fsp0sysspace.h"
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
#include "fsp0fsp.h"
|
||||
#include "os0file.h"
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
#include "my_sys.h"
|
||||
|
||||
@@ -70,34 +72,6 @@ Tablespace::shutdown()
|
||||
m_space_id = ULINT_UNDEFINED;
|
||||
}
|
||||
|
||||
/** Get the sum of the file sizes of each Datafile in a tablespace
|
||||
@return ULINT_UNDEFINED if the size is invalid else the sum of sizes */
|
||||
ulint
|
||||
Tablespace::get_sum_of_sizes() const
|
||||
{
|
||||
ulint sum = 0;
|
||||
|
||||
files_t::const_iterator end = m_files.end();
|
||||
|
||||
for (files_t::const_iterator it = m_files.begin(); it != end; ++it) {
|
||||
|
||||
#ifndef _WIN32
|
||||
if (sizeof(off_t) < 5
|
||||
&& it->m_size >= (1UL << (32UL - UNIV_PAGE_SIZE_SHIFT))) {
|
||||
|
||||
ib::error() << "File size must be < 4 GB with this"
|
||||
" MySQL binary-operating system combination."
|
||||
" In some OS's < 2 GB";
|
||||
|
||||
return(ULINT_UNDEFINED);
|
||||
}
|
||||
#endif /* _WIN32 */
|
||||
sum += it->m_size;
|
||||
}
|
||||
|
||||
return(sum);
|
||||
}
|
||||
|
||||
/** Note that the data file was found.
|
||||
@param[in,out] file Data file object to set */
|
||||
void
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2013, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2013, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -357,8 +357,14 @@ SysTablespace::check_size(
|
||||
os_offset_t size = os_file_get_size(file.m_handle);
|
||||
ut_a(size != (os_offset_t) -1);
|
||||
|
||||
/* Round size downward to megabytes */
|
||||
ulint rounded_size_pages = (ulint) (size >> UNIV_PAGE_SIZE_SHIFT);
|
||||
/* Under some error conditions like disk full scenarios
|
||||
or file size reaching filesystem limit the data file
|
||||
could contain an incomplete extent at the end. When we
|
||||
extend a data file and if some failure happens, then
|
||||
also the data file could contain an incomplete extent.
|
||||
So we need to round the size downward to a megabyte.*/
|
||||
|
||||
ulint rounded_size_pages = get_pages_from_size(size);
|
||||
|
||||
/* If last file */
|
||||
if (&file == &m_files.back() && m_auto_extend_last_file) {
|
||||
@@ -531,6 +537,7 @@ SysTablespace::open_file(
|
||||
return(err);
|
||||
}
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/** Check the tablespace header for this tablespace.
|
||||
@param[out] flushed_lsn the value of FIL_PAGE_FILE_FLUSH_LSN
|
||||
@return DB_SUCCESS or error code */
|
||||
@@ -574,7 +581,7 @@ SysTablespace::read_lsn_and_check_flags(lsn_t* flushed_lsn)
|
||||
first datafile. */
|
||||
for (int retry = 0; retry < 2; ++retry) {
|
||||
|
||||
err = it->validate_first_page(flushed_lsn);
|
||||
err = it->validate_first_page(flushed_lsn, false);
|
||||
|
||||
if (err != DB_SUCCESS
|
||||
&& (retry == 1
|
||||
@@ -605,7 +612,7 @@ SysTablespace::read_lsn_and_check_flags(lsn_t* flushed_lsn)
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
/** Check if a file can be opened in the correct mode.
|
||||
@param[in] file data file object
|
||||
@param[out] reason exact reason if file_status check failed.
|
||||
@@ -752,7 +759,7 @@ SysTablespace::file_found(
|
||||
/* Need to create the system tablespace for new raw device. */
|
||||
return(file.m_type == SRV_NEW_RAW);
|
||||
}
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/** Check the data file specification.
|
||||
@param[out] create_new_db true if a new database is to be created
|
||||
@param[in] min_expected_size Minimum expected tablespace size in bytes
|
||||
@@ -772,11 +779,7 @@ SysTablespace::check_file_spec(
|
||||
return(DB_ERROR);
|
||||
}
|
||||
|
||||
ulint tablespace_size = get_sum_of_sizes();
|
||||
if (tablespace_size == ULINT_UNDEFINED) {
|
||||
return(DB_ERROR);
|
||||
} else if (tablespace_size
|
||||
< min_expected_size / UNIV_PAGE_SIZE) {
|
||||
if (get_sum_of_sizes() < min_expected_size / UNIV_PAGE_SIZE) {
|
||||
|
||||
ib::error() << "Tablespace size must be at least "
|
||||
<< min_expected_size / (1024 * 1024) << " MB";
|
||||
@@ -987,7 +990,7 @@ SysTablespace::open_or_create(
|
||||
|
||||
return(err);
|
||||
}
|
||||
|
||||
#endif /* UNIV_HOTBACKUP */
|
||||
/** Normalize the file size, convert from megabytes to number of pages. */
|
||||
void
|
||||
SysTablespace::normalize()
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2007, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -161,7 +161,7 @@ fts_ast_create_node_term_for_parser(
|
||||
|
||||
/* '%' as first char is forbidden for LIKE in internal SQL parser;
|
||||
'%' as last char is reserved for wildcard search;*/
|
||||
if (len == 0 || len > fts_max_token_size
|
||||
if (len == 0 || len > FTS_MAX_WORD_LEN
|
||||
|| ptr[0] == '%' || ptr[len - 1] == '%') {
|
||||
return(NULL);
|
||||
}
|
||||
@@ -537,6 +537,36 @@ fts_ast_node_print(
|
||||
fts_ast_node_print_recursive(node, 0);
|
||||
}
|
||||
|
||||
/** Check only union operation involved in the node
|
||||
@param[in] node ast node to check
|
||||
@return true if the node contains only union else false. */
|
||||
bool
|
||||
fts_ast_node_check_union(
|
||||
fts_ast_node_t* node)
|
||||
{
|
||||
if (node->type == FTS_AST_LIST
|
||||
|| node->type == FTS_AST_SUBEXP_LIST
|
||||
|| node->type == FTS_AST_PARSER_PHRASE_LIST) {
|
||||
|
||||
for (node = node->list.head; node; node = node->next) {
|
||||
if (!fts_ast_node_check_union(node)) {
|
||||
return(false);
|
||||
}
|
||||
}
|
||||
|
||||
} else if (node->type == FTS_AST_OPER
|
||||
&& (node->oper == FTS_IGNORE
|
||||
|| node->oper == FTS_EXIST)) {
|
||||
|
||||
return(false);
|
||||
} else if (node->type == FTS_AST_TEXT) {
|
||||
/* Distance or phrase search query. */
|
||||
return(false);
|
||||
}
|
||||
|
||||
return(true);
|
||||
}
|
||||
|
||||
/******************************************************************//**
|
||||
Traverse the AST - in-order traversal, except for the FTX_EXIST and FTS_IGNORE
|
||||
nodes, which will be ignored in the first pass of each level, and visited in a
|
||||
|
||||
@@ -208,19 +208,20 @@ struct fts_tokenize_param_t {
|
||||
ulint add_pos; /*!< Added position for tokens */
|
||||
};
|
||||
|
||||
/****************************************************************//**
|
||||
Run SYNC on the table, i.e., write out data from the cache to the
|
||||
/** Run SYNC on the table, i.e., write out data from the cache to the
|
||||
FTS auxiliary INDEX table and clear the cache at the end.
|
||||
@param[in,out] sync sync state
|
||||
@param[in] unlock_cache whether unlock cache lock when write node
|
||||
@param[in] wait whether wait when a sync is in progress
|
||||
@param[in] has_dict whether has dict operation lock
|
||||
@return DB_SUCCESS if all OK */
|
||||
static
|
||||
dberr_t
|
||||
fts_sync(
|
||||
fts_sync_t* sync,
|
||||
bool unlock_cache,
|
||||
bool wait);
|
||||
bool wait,
|
||||
bool has_dict);
|
||||
|
||||
/****************************************************************//**
|
||||
Release all resources help by the words rb tree e.g., the node ilist. */
|
||||
@@ -1979,7 +1980,6 @@ fts_create_common_tables(
|
||||
|
||||
func_exit:
|
||||
if (error != DB_SUCCESS) {
|
||||
|
||||
for (it = common_tables.begin(); it != common_tables.end();
|
||||
++it) {
|
||||
row_drop_table_for_mysql(
|
||||
@@ -3648,7 +3648,7 @@ fts_add_doc_by_id(
|
||||
|
||||
DBUG_EXECUTE_IF(
|
||||
"fts_instrument_sync_debug",
|
||||
fts_sync(cache->sync, true, true);
|
||||
fts_sync(cache->sync, true, true, false);
|
||||
);
|
||||
|
||||
DEBUG_SYNC_C("fts_instrument_sync_request");
|
||||
@@ -3929,6 +3929,8 @@ fts_write_node(
|
||||
doc_id_t first_doc_id;
|
||||
char table_name[MAX_FULL_NAME_LEN];
|
||||
|
||||
ut_a(node->ilist != NULL);
|
||||
|
||||
if (*graph) {
|
||||
info = (*graph)->info;
|
||||
} else {
|
||||
@@ -4446,7 +4448,7 @@ fts_sync_index(
|
||||
|
||||
ut_ad(rbt_validate(index_cache->words));
|
||||
|
||||
error = fts_sync_write_words(sync->trx, index_cache, sync->unlock_cache);
|
||||
error = fts_sync_write_words(trx, index_cache, sync->unlock_cache);
|
||||
|
||||
#ifdef FTS_DOC_STATS_DEBUG
|
||||
/* FTS_RESOLVE: the word counter info in auxiliary table "DOC_ID"
|
||||
@@ -4463,13 +4465,11 @@ fts_sync_index(
|
||||
}
|
||||
|
||||
/** Check if index cache has been synced completely
|
||||
@param[in,out] sync sync state
|
||||
@param[in,out] index_cache index cache
|
||||
@return true if index is synced, otherwise false. */
|
||||
static
|
||||
bool
|
||||
fts_sync_index_check(
|
||||
fts_sync_t* sync,
|
||||
fts_index_cache_t* index_cache)
|
||||
{
|
||||
const ib_rbt_node_t* rbt_node;
|
||||
@@ -4492,14 +4492,36 @@ fts_sync_index_check(
|
||||
return(true);
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Commit the SYNC, change state of processed doc ids etc.
|
||||
/** Reset synced flag in index cache when rollback
|
||||
@param[in,out] index_cache index cache */
|
||||
static
|
||||
void
|
||||
fts_sync_index_reset(
|
||||
fts_index_cache_t* index_cache)
|
||||
{
|
||||
const ib_rbt_node_t* rbt_node;
|
||||
|
||||
for (rbt_node = rbt_first(index_cache->words);
|
||||
rbt_node != NULL;
|
||||
rbt_node = rbt_next(index_cache->words, rbt_node)) {
|
||||
|
||||
fts_tokenizer_word_t* word;
|
||||
word = rbt_value(fts_tokenizer_word_t, rbt_node);
|
||||
|
||||
fts_node_t* fts_node;
|
||||
fts_node = static_cast<fts_node_t*>(ib_vector_last(word->nodes));
|
||||
|
||||
fts_node->synced = false;
|
||||
}
|
||||
}
|
||||
|
||||
/** Commit the SYNC, change state of processed doc ids etc.
|
||||
@param[in,out] sync sync state
|
||||
@return DB_SUCCESS if all OK */
|
||||
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
dberr_t
|
||||
fts_sync_commit(
|
||||
/*============*/
|
||||
fts_sync_t* sync) /*!< in: sync state */
|
||||
fts_sync_t* sync)
|
||||
{
|
||||
dberr_t error;
|
||||
trx_t* trx = sync->trx;
|
||||
@@ -4550,6 +4572,8 @@ fts_sync_commit(
|
||||
<< " ins/sec";
|
||||
}
|
||||
|
||||
/* Avoid assertion in trx_free(). */
|
||||
trx->dict_operation_lock_mode = 0;
|
||||
trx_free_for_background(trx);
|
||||
|
||||
return(error);
|
||||
@@ -4572,6 +4596,10 @@ fts_sync_rollback(
|
||||
index_cache = static_cast<fts_index_cache_t*>(
|
||||
ib_vector_get(cache->indexes, i));
|
||||
|
||||
/* Reset synced flag so nodes will not be skipped
|
||||
in the next sync, see fts_sync_write_words(). */
|
||||
fts_sync_index_reset(index_cache);
|
||||
|
||||
for (j = 0; fts_index_selector[j].value; ++j) {
|
||||
|
||||
if (index_cache->ins_graph[j] != NULL) {
|
||||
@@ -4597,6 +4625,9 @@ fts_sync_rollback(
|
||||
rw_lock_x_unlock(&cache->lock);
|
||||
|
||||
fts_sql_rollback(trx);
|
||||
|
||||
/* Avoid assertion in trx_free(). */
|
||||
trx->dict_operation_lock_mode = 0;
|
||||
trx_free_for_background(trx);
|
||||
}
|
||||
|
||||
@@ -4605,13 +4636,15 @@ FTS auxiliary INDEX table and clear the cache at the end.
|
||||
@param[in,out] sync sync state
|
||||
@param[in] unlock_cache whether unlock cache lock when write node
|
||||
@param[in] wait whether wait when a sync is in progress
|
||||
@param[in] has_dict whether has dict operation lock
|
||||
@return DB_SUCCESS if all OK */
|
||||
static
|
||||
dberr_t
|
||||
fts_sync(
|
||||
fts_sync_t* sync,
|
||||
bool unlock_cache,
|
||||
bool wait)
|
||||
bool wait,
|
||||
bool has_dict)
|
||||
{
|
||||
ulint i;
|
||||
dberr_t error = DB_SUCCESS;
|
||||
@@ -4640,6 +4673,12 @@ fts_sync(
|
||||
DEBUG_SYNC_C("fts_sync_begin");
|
||||
fts_sync_begin(sync);
|
||||
|
||||
/* When sync in background, we hold dict operation lock
|
||||
to prevent DDL like DROP INDEX, etc. */
|
||||
if (has_dict) {
|
||||
sync->trx->dict_operation_lock_mode = RW_S_LATCH;
|
||||
}
|
||||
|
||||
begin_sync:
|
||||
if (cache->total_size > fts_max_cache_size) {
|
||||
/* Avoid the case: sync never finish when
|
||||
@@ -4676,7 +4715,7 @@ begin_sync:
|
||||
ib_vector_get(cache->indexes, i));
|
||||
|
||||
if (index_cache->index->to_be_dropped
|
||||
|| fts_sync_index_check(sync, index_cache)) {
|
||||
|| fts_sync_index_check(index_cache)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -4691,6 +4730,7 @@ end_sync:
|
||||
}
|
||||
|
||||
rw_lock_x_lock(&cache->lock);
|
||||
sync->interrupted = false;
|
||||
sync->in_progress = false;
|
||||
os_event_set(sync->event);
|
||||
rw_lock_x_unlock(&cache->lock);
|
||||
@@ -4714,19 +4754,23 @@ FTS auxiliary INDEX table and clear the cache at the end.
|
||||
@param[in,out] table fts table
|
||||
@param[in] unlock_cache whether unlock cache when write node
|
||||
@param[in] wait whether wait for existing sync to finish
|
||||
@param[in] has_dict whether has dict operation lock
|
||||
@return DB_SUCCESS on success, error code on failure. */
|
||||
dberr_t
|
||||
fts_sync_table(
|
||||
dict_table_t* table,
|
||||
bool unlock_cache,
|
||||
bool wait)
|
||||
bool wait,
|
||||
bool has_dict)
|
||||
{
|
||||
dberr_t err = DB_SUCCESS;
|
||||
|
||||
ut_ad(table->fts);
|
||||
|
||||
if (!dict_table_is_discarded(table) && table->fts->cache) {
|
||||
err = fts_sync(table->fts->cache->sync, unlock_cache, wait);
|
||||
if (!dict_table_is_discarded(table) && table->fts->cache
|
||||
&& !dict_table_is_corrupted(table)) {
|
||||
err = fts_sync(table->fts->cache->sync,
|
||||
unlock_cache, wait, has_dict);
|
||||
}
|
||||
|
||||
return(err);
|
||||
@@ -5114,7 +5158,7 @@ fts_tokenize_document(
|
||||
ut_a(doc->charset);
|
||||
|
||||
doc->tokens = rbt_create_arg_cmp(
|
||||
sizeof(fts_token_t), innobase_fts_text_cmp, (void*) doc->charset);
|
||||
sizeof(fts_token_t), innobase_fts_text_cmp, (void*) doc->charset);
|
||||
|
||||
if (parser != NULL) {
|
||||
fts_tokenize_param_t fts_param;
|
||||
@@ -5809,6 +5853,8 @@ fts_update_doc_id(
|
||||
|
||||
if (error == DB_SUCCESS) {
|
||||
dict_index_t* clust_index;
|
||||
dict_col_t* col = dict_table_get_nth_col(
|
||||
table, table->fts->doc_col);
|
||||
|
||||
ufield->exp = NULL;
|
||||
|
||||
@@ -5816,8 +5862,8 @@ fts_update_doc_id(
|
||||
|
||||
clust_index = dict_table_get_first_index(table);
|
||||
|
||||
ufield->field_no = dict_col_get_clust_pos(
|
||||
&table->cols[table->fts->doc_col], clust_index);
|
||||
ufield->field_no = dict_col_get_clust_pos(col, clust_index);
|
||||
dict_col_copy_type(col, dfield_get_type(&ufield->new_val));
|
||||
|
||||
/* It is possible we update record that has
|
||||
not yet be sync-ed from last crash. */
|
||||
@@ -6784,11 +6830,7 @@ fts_fake_hex_to_dec(
|
||||
#ifdef UNIV_DEBUG
|
||||
ret =
|
||||
#endif /* UNIV_DEBUG */
|
||||
#ifdef _WIN32
|
||||
sscanf(tmp_id, "%016llu", &dec_id);
|
||||
#else
|
||||
sscanf(tmp_id, "%016llu", &dec_id);
|
||||
#endif /* _WIN32 */
|
||||
sscanf(tmp_id, "%016" UINT64scan, &dec_id);
|
||||
ut_ad(ret == 1);
|
||||
|
||||
return dec_id;
|
||||
@@ -7995,10 +8037,9 @@ func_exit:
|
||||
consistent state. For now consistency is check only by ensuring
|
||||
index->page_no != FIL_NULL
|
||||
@param[out] base_table table has host fts index
|
||||
@param[in,out] trx trx handler
|
||||
@return true if check certifies auxillary tables are sane false otherwise. */
|
||||
bool
|
||||
fts_is_corrupt(
|
||||
@param[in,out] trx trx handler */
|
||||
void
|
||||
fts_check_corrupt(
|
||||
dict_table_t* base_table,
|
||||
trx_t* trx)
|
||||
{
|
||||
@@ -8016,7 +8057,7 @@ fts_is_corrupt(
|
||||
fts_get_table_name(&fts_table, table_name);
|
||||
|
||||
dict_table_t* aux_table = dict_table_open_on_name(
|
||||
table_name, FALSE, FALSE, DICT_ERR_IGNORE_NONE);
|
||||
table_name, true, FALSE, DICT_ERR_IGNORE_NONE);
|
||||
|
||||
if (aux_table == NULL) {
|
||||
dict_set_corrupted(
|
||||
@@ -8045,6 +8086,4 @@ fts_is_corrupt(
|
||||
|
||||
dict_table_close(aux_table, FALSE, FALSE);
|
||||
}
|
||||
|
||||
return(sane);
|
||||
}
|
||||
|
||||
@@ -602,7 +602,7 @@ fts_zip_read_word(
|
||||
/* Finished decompressing block. */
|
||||
if (zip->zp->avail_in == 0) {
|
||||
|
||||
/* Free the block that's been decompressed. */
|
||||
/* Free the block thats been decompressed. */
|
||||
if (zip->pos > 0) {
|
||||
ulint prev = zip->pos - 1;
|
||||
|
||||
@@ -1507,6 +1507,12 @@ fts_optimize_write_word(
|
||||
fts_node_t* node = (fts_node_t*) ib_vector_get(nodes, i);
|
||||
|
||||
if (error == DB_SUCCESS) {
|
||||
/* Skip empty node. */
|
||||
if (node->ilist == NULL) {
|
||||
ut_ad(node->ilist_size == 0);
|
||||
continue;
|
||||
}
|
||||
|
||||
error = fts_write_node(
|
||||
trx, &graph, fts_table, word, node);
|
||||
|
||||
@@ -2635,7 +2641,6 @@ fts_optimize_remove_table(
|
||||
|
||||
/** Send sync fts cache for the table.
|
||||
@param[in] table table to sync */
|
||||
UNIV_INTERN
|
||||
void
|
||||
fts_optimize_request_sync_table(
|
||||
dict_table_t* table)
|
||||
@@ -2650,7 +2655,7 @@ fts_optimize_request_sync_table(
|
||||
|
||||
/* FTS optimizer thread is already exited */
|
||||
if (fts_opt_start_shutdown) {
|
||||
ib::info() << "Try to remove table " << table->name
|
||||
ib::info() << "Try to sync table " << table->name
|
||||
<< " after FTS optimize thread exiting.";
|
||||
return;
|
||||
}
|
||||
@@ -2964,7 +2969,7 @@ fts_optimize_sync_table(
|
||||
|
||||
if (table) {
|
||||
if (dict_table_has_fts_index(table) && table->fts->cache) {
|
||||
fts_sync_table(table, true, false);
|
||||
fts_sync_table(table, true, false, true);
|
||||
}
|
||||
|
||||
dict_table_close(table, FALSE, FALSE);
|
||||
@@ -3122,26 +3127,7 @@ fts_optimize_thread(
|
||||
ib_vector_get(tables, i));
|
||||
|
||||
if (slot->state != FTS_STATE_EMPTY) {
|
||||
dict_table_t* table = NULL;
|
||||
|
||||
/*slot->table may be freed, so we try to open
|
||||
table by slot->table_id.*/
|
||||
table = dict_table_open_on_id(
|
||||
slot->table_id, FALSE,
|
||||
DICT_TABLE_OP_NORMAL);
|
||||
|
||||
if (table) {
|
||||
|
||||
if (dict_table_has_fts_index(table)) {
|
||||
fts_sync_table(table, false, true);
|
||||
}
|
||||
|
||||
if (table->fts) {
|
||||
fts_free(table);
|
||||
}
|
||||
|
||||
dict_table_close(table, FALSE, FALSE);
|
||||
}
|
||||
fts_optimize_sync_table(slot->table_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3155,7 +3141,7 @@ fts_optimize_thread(
|
||||
|
||||
/* We count the number of threads in os_thread_exit(). A created
|
||||
thread should always use that to exit and not use return() to exit. */
|
||||
os_thread_exit(NULL);
|
||||
os_thread_exit();
|
||||
|
||||
OS_THREAD_DUMMY_RETURN;
|
||||
}
|
||||
@@ -3189,11 +3175,9 @@ fts_optimize_is_init(void)
|
||||
return(fts_optimize_wq != NULL);
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Signal the optimize thread to prepare for shutdown. */
|
||||
/** Shutdown fts optimize thread. */
|
||||
void
|
||||
fts_optimize_start_shutdown(void)
|
||||
/*=============================*/
|
||||
fts_optimize_shutdown()
|
||||
{
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
@@ -3222,17 +3206,5 @@ fts_optimize_start_shutdown(void)
|
||||
os_event_destroy(fts_opt_shutdown_event);
|
||||
|
||||
ib_wqueue_free(fts_optimize_wq);
|
||||
}
|
||||
|
||||
/**********************************************************************//**
|
||||
Reset the work queue. */
|
||||
void
|
||||
fts_optimize_end(void)
|
||||
/*==================*/
|
||||
{
|
||||
ut_ad(!srv_read_only_mode);
|
||||
|
||||
// FIXME: Potential race condition here: We should wait for
|
||||
// the optimize thread to confirm shutdown.
|
||||
fts_optimize_wq = NULL;
|
||||
}
|
||||
|
||||
@@ -153,6 +153,13 @@ struct fts_query_t {
|
||||
bool multi_exist; /*!< multiple FTS_EXIST oper */
|
||||
|
||||
st_mysql_ftparser* parser; /*!< fts plugin parser */
|
||||
|
||||
/** limit value for the fts query */
|
||||
ulonglong limit;
|
||||
|
||||
/** number of docs fetched by query. This is to restrict the
|
||||
result with limit value */
|
||||
ulonglong n_docs;
|
||||
};
|
||||
|
||||
/** For phrase matching, first we collect the documents and the positions
|
||||
@@ -2700,7 +2707,7 @@ fts_query_phrase_split(
|
||||
/*****************************************************************//**
|
||||
Text/Phrase search.
|
||||
@return DB_SUCCESS or error code */
|
||||
static __attribute__((warn_unused_result))
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
dberr_t
|
||||
fts_query_phrase_search(
|
||||
/*====================*/
|
||||
@@ -3209,6 +3216,11 @@ fts_query_filter_doc_ids(
|
||||
ulint decoded = 0;
|
||||
ib_rbt_t* doc_freqs = word_freq->doc_freqs;
|
||||
|
||||
if (query->limit != ULONG_UNDEFINED
|
||||
&& query->n_docs >= query->limit) {
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
/* Decode the ilist and add the doc ids to the query doc_id set. */
|
||||
while (decoded < len) {
|
||||
ulint freq = 0;
|
||||
@@ -3296,11 +3308,17 @@ fts_query_filter_doc_ids(
|
||||
/* Add the word to the document's matched RB tree. */
|
||||
fts_query_add_word_to_document(query, doc_id, word);
|
||||
}
|
||||
|
||||
if (query->limit != ULONG_UNDEFINED
|
||||
&& query->limit <= ++query->n_docs) {
|
||||
goto func_exit;
|
||||
}
|
||||
}
|
||||
|
||||
/* Some sanity checks. */
|
||||
ut_a(doc_id == node->last_doc_id);
|
||||
|
||||
func_exit:
|
||||
if (query->total_size > fts_result_cache_limit) {
|
||||
return(DB_FTS_EXCEED_RESULT_CACHE_LIMIT);
|
||||
} else {
|
||||
@@ -3904,19 +3922,24 @@ fts_query_can_optimize(
|
||||
}
|
||||
}
|
||||
|
||||
/*******************************************************************//**
|
||||
FTS Query entry point.
|
||||
/** FTS Query entry point.
|
||||
@param[in] trx transaction
|
||||
@param[in] index fts index to search
|
||||
@param[in] flags FTS search mode
|
||||
@param[in] query_str FTS query
|
||||
@param[in] query_len FTS query string len in bytes
|
||||
@param[in,out] result result doc ids
|
||||
@param[in] limit limit value
|
||||
@return DB_SUCCESS if successful otherwise error code */
|
||||
dberr_t
|
||||
fts_query(
|
||||
/*======*/
|
||||
trx_t* trx, /*!< in: transaction */
|
||||
dict_index_t* index, /*!< in: The FTS index to search */
|
||||
uint flags, /*!< in: FTS search mode */
|
||||
const byte* query_str, /*!< in: FTS query */
|
||||
ulint query_len, /*!< in: FTS query string len
|
||||
in bytes */
|
||||
fts_result_t** result) /*!< in/out: result doc ids */
|
||||
trx_t* trx,
|
||||
dict_index_t* index,
|
||||
uint flags,
|
||||
const byte* query_str,
|
||||
ulint query_len,
|
||||
fts_result_t** result,
|
||||
ulonglong limit)
|
||||
{
|
||||
fts_query_t query;
|
||||
dberr_t error = DB_SUCCESS;
|
||||
@@ -3971,13 +3994,16 @@ fts_query(
|
||||
|
||||
if (flags & FTS_EXPAND) {
|
||||
query.wildcard_words = rbt_create_arg_cmp(
|
||||
sizeof(fts_string_t), innobase_fts_text_cmp, (void*)charset);
|
||||
sizeof(fts_string_t), innobase_fts_text_cmp, (void *)charset);
|
||||
}
|
||||
|
||||
query.total_size += SIZEOF_RBT_CREATE;
|
||||
|
||||
query.total_docs = dict_table_get_n_rows(index->table);
|
||||
|
||||
query.limit = limit;
|
||||
|
||||
query.n_docs = 0;
|
||||
#ifdef FTS_DOC_STATS_DEBUG
|
||||
if (ft_enable_diag_print) {
|
||||
error = fts_get_total_word_count(
|
||||
@@ -4053,6 +4079,19 @@ fts_query(
|
||||
fts_result_cache_limit = 2048;
|
||||
);
|
||||
|
||||
/* Optimisation is allowed for limit value
|
||||
when
|
||||
i) No ranking involved
|
||||
ii) Only FTS Union operations involved. */
|
||||
if (query.limit != ULONG_UNDEFINED
|
||||
&& !fts_ast_node_check_union(ast)) {
|
||||
query.limit = ULONG_UNDEFINED;
|
||||
}
|
||||
|
||||
DBUG_EXECUTE_IF("fts_union_limit_off",
|
||||
query.limit = ULONG_UNDEFINED;
|
||||
);
|
||||
|
||||
/* Traverse the Abstract Syntax Tree (AST) and execute
|
||||
the query. */
|
||||
query.error = fts_ast_visit(
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -152,7 +152,9 @@ rtr_index_build_node_ptr(
|
||||
|
||||
tuple = dtuple_create(heap, n_unique + 1);
|
||||
|
||||
dtuple_set_n_fields_cmp(tuple, n_unique);
|
||||
/* For rtree internal node, we need to compare page number
|
||||
fields. */
|
||||
dtuple_set_n_fields_cmp(tuple, n_unique + 1);
|
||||
|
||||
dict_index_copy_types(tuple, index, n_unique);
|
||||
|
||||
@@ -621,7 +623,7 @@ update_mbr:
|
||||
|
||||
/**************************************************************//**
|
||||
Update parent page's MBR and Predicate lock information during a split */
|
||||
static __attribute__((nonnull))
|
||||
static MY_ATTRIBUTE((nonnull))
|
||||
void
|
||||
rtr_adjust_upper_level(
|
||||
/*===================*/
|
||||
@@ -723,8 +725,6 @@ rtr_adjust_upper_level(
|
||||
node_ptr_upper, &rec, &dummy_big_rec, 0, NULL, mtr);
|
||||
|
||||
if (err == DB_FAIL) {
|
||||
ut_ad(!cursor.rtr_info);
|
||||
|
||||
cursor.rtr_info = sea_cur->rtr_info;
|
||||
cursor.tree_height = sea_cur->tree_height;
|
||||
|
||||
@@ -1025,6 +1025,7 @@ rtr_page_split_and_insert(
|
||||
lock_prdt_t new_prdt;
|
||||
rec_t* first_rec = NULL;
|
||||
int first_rec_group = 1;
|
||||
ulint n_iterations = 0;
|
||||
|
||||
if (!*heap) {
|
||||
*heap = mem_heap_create(1024);
|
||||
@@ -1229,6 +1230,15 @@ func_start:
|
||||
page_cur_search(insert_block, cursor->index, tuple,
|
||||
PAGE_CUR_LE, page_cursor);
|
||||
|
||||
/* It's possible that the new record is too big to be inserted into
|
||||
the page, and it'll need the second round split in this case.
|
||||
We test this scenario here*/
|
||||
DBUG_EXECUTE_IF("rtr_page_need_second_split",
|
||||
if (n_iterations == 0) {
|
||||
rec = NULL;
|
||||
goto after_insert; }
|
||||
);
|
||||
|
||||
rec = page_cur_tuple_insert(page_cursor, tuple, cursor->index,
|
||||
offsets, heap, n_ext, mtr);
|
||||
|
||||
@@ -1247,6 +1257,9 @@ func_start:
|
||||
again. */
|
||||
}
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
after_insert:
|
||||
#endif
|
||||
/* Calculate the mbr on the upper half-page, and the mbr on
|
||||
original page. */
|
||||
rtr_page_cal_mbr(cursor->index, block, &mbr, *heap);
|
||||
@@ -1279,6 +1292,7 @@ func_start:
|
||||
block, new_block, mtr);
|
||||
}
|
||||
|
||||
|
||||
/* If the new res insert fail, we need to do another split
|
||||
again. */
|
||||
if (!rec) {
|
||||
@@ -1289,9 +1303,12 @@ func_start:
|
||||
ibuf_reset_free_bits(block);
|
||||
}
|
||||
|
||||
*offsets = rtr_page_get_father_block(
|
||||
NULL, *heap, cursor->index, block, mtr,
|
||||
NULL, cursor);
|
||||
/* We need to clean the parent path here and search father
|
||||
node later, otherwise, it's possible that find a wrong
|
||||
parent. */
|
||||
rtr_clean_rtr_info(cursor->rtr_info, true);
|
||||
cursor->rtr_info = NULL;
|
||||
n_iterations++;
|
||||
|
||||
rec_t* i_rec = page_rec_get_next(page_get_infimum_rec(
|
||||
buf_block_get_frame(block)));
|
||||
@@ -1411,19 +1428,19 @@ rtr_page_copy_rec_list_end_no_locks(
|
||||
page_cur_t page_cur;
|
||||
page_cur_t cur1;
|
||||
rec_t* cur_rec;
|
||||
dtuple_t* tuple;
|
||||
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
ulint* offsets = offsets_;
|
||||
ulint n_fields = 0;
|
||||
ulint offsets_1[REC_OFFS_NORMAL_SIZE];
|
||||
ulint* offsets1 = offsets_1;
|
||||
ulint offsets_2[REC_OFFS_NORMAL_SIZE];
|
||||
ulint* offsets2 = offsets_2;
|
||||
ulint moved = 0;
|
||||
bool is_leaf = page_is_leaf(new_page);
|
||||
|
||||
rec_offs_init(offsets_);
|
||||
rec_offs_init(offsets_1);
|
||||
rec_offs_init(offsets_2);
|
||||
|
||||
page_cur_position(rec, block, &cur1);
|
||||
|
||||
if (page_cur_is_before_first(&cur1)) {
|
||||
|
||||
page_cur_move_to_next(&cur1);
|
||||
}
|
||||
|
||||
@@ -1436,30 +1453,27 @@ rtr_page_copy_rec_list_end_no_locks(
|
||||
page_get_infimum_rec(buf_block_get_frame(new_block)));
|
||||
page_cur_position(cur_rec, new_block, &page_cur);
|
||||
|
||||
n_fields = dict_index_get_n_fields(index);
|
||||
|
||||
/* Copy records from the original page to the new page */
|
||||
while (!page_cur_is_after_last(&cur1)) {
|
||||
rec_t* cur1_rec = page_cur_get_rec(&cur1);
|
||||
rec_t* ins_rec;
|
||||
|
||||
/* Find the place to insert. */
|
||||
tuple = dict_index_build_data_tuple(index, cur1_rec,
|
||||
n_fields, heap);
|
||||
|
||||
if (page_rec_is_infimum(cur_rec)) {
|
||||
cur_rec = page_rec_get_next(cur_rec);
|
||||
}
|
||||
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
while (!page_rec_is_supremum(cur_rec)) {
|
||||
ulint cur_matched_fields = 0;
|
||||
int cmp;
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
cur_rec, index, offsets,
|
||||
dtuple_get_n_fields_cmp(tuple), &heap);
|
||||
cmp = cmp_dtuple_rec_with_match(tuple, cur_rec, offsets,
|
||||
&cur_matched_fields);
|
||||
offsets2 = rec_get_offsets(cur_rec, index, offsets2,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
cmp = cmp_rec_rec_with_match(cur1_rec, cur_rec,
|
||||
offsets1, offsets2,
|
||||
index, FALSE,
|
||||
&cur_matched_fields);
|
||||
if (cmp < 0) {
|
||||
page_cur_move_to_prev(&page_cur);
|
||||
break;
|
||||
@@ -1490,11 +1504,11 @@ rtr_page_copy_rec_list_end_no_locks(
|
||||
|
||||
cur_rec = page_cur_get_rec(&page_cur);
|
||||
|
||||
offsets = rec_get_offsets(cur1_rec, index, offsets,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ins_rec = page_cur_insert_rec_low(cur_rec, index,
|
||||
cur1_rec, offsets, mtr);
|
||||
cur1_rec, offsets1, mtr);
|
||||
if (UNIV_UNLIKELY(!ins_rec)) {
|
||||
fprintf(stderr, "page number %ld and %ld\n",
|
||||
(long)new_block->page.id.page_no(),
|
||||
@@ -1540,17 +1554,16 @@ rtr_page_copy_rec_list_start_no_locks(
|
||||
{
|
||||
page_cur_t cur1;
|
||||
rec_t* cur_rec;
|
||||
dtuple_t* tuple;
|
||||
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
ulint* offsets = offsets_;
|
||||
ulint n_fields = 0;
|
||||
ulint offsets_1[REC_OFFS_NORMAL_SIZE];
|
||||
ulint* offsets1 = offsets_1;
|
||||
ulint offsets_2[REC_OFFS_NORMAL_SIZE];
|
||||
ulint* offsets2 = offsets_2;
|
||||
page_cur_t page_cur;
|
||||
ulint moved = 0;
|
||||
bool is_leaf = page_is_leaf(buf_block_get_frame(block));
|
||||
|
||||
rec_offs_init(offsets_);
|
||||
|
||||
n_fields = dict_index_get_n_fields(index);
|
||||
rec_offs_init(offsets_1);
|
||||
rec_offs_init(offsets_2);
|
||||
|
||||
page_cur_set_before_first(block, &cur1);
|
||||
page_cur_move_to_next(&cur1);
|
||||
@@ -1563,23 +1576,23 @@ rtr_page_copy_rec_list_start_no_locks(
|
||||
rec_t* cur1_rec = page_cur_get_rec(&cur1);
|
||||
rec_t* ins_rec;
|
||||
|
||||
/* Find the place to insert. */
|
||||
tuple = dict_index_build_data_tuple(index, cur1_rec,
|
||||
n_fields, heap);
|
||||
|
||||
if (page_rec_is_infimum(cur_rec)) {
|
||||
cur_rec = page_rec_get_next(cur_rec);
|
||||
}
|
||||
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
while (!page_rec_is_supremum(cur_rec)) {
|
||||
ulint cur_matched_fields = 0;
|
||||
int cmp;
|
||||
|
||||
offsets = rec_get_offsets(cur_rec, index, offsets,
|
||||
dtuple_get_n_fields_cmp(tuple),
|
||||
&heap);
|
||||
cmp = cmp_dtuple_rec_with_match(tuple, cur_rec, offsets,
|
||||
&cur_matched_fields);
|
||||
offsets2 = rec_get_offsets(cur_rec, index, offsets2,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
cmp = cmp_rec_rec_with_match(cur1_rec, cur_rec,
|
||||
offsets1, offsets2,
|
||||
index, FALSE,
|
||||
&cur_matched_fields);
|
||||
if (cmp < 0) {
|
||||
page_cur_move_to_prev(&page_cur);
|
||||
cur_rec = page_cur_get_rec(&page_cur);
|
||||
@@ -1612,11 +1625,11 @@ rtr_page_copy_rec_list_start_no_locks(
|
||||
|
||||
cur_rec = page_cur_get_rec(&page_cur);
|
||||
|
||||
offsets = rec_get_offsets(cur1_rec, index, offsets,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
offsets1 = rec_get_offsets(cur1_rec, index, offsets1,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
ins_rec = page_cur_insert_rec_low(cur_rec, index,
|
||||
cur1_rec, offsets, mtr);
|
||||
cur1_rec, offsets1, mtr);
|
||||
if (UNIV_UNLIKELY(!ins_rec)) {
|
||||
fprintf(stderr, "page number %ld and %ld\n",
|
||||
(long)new_block->page.id.page_no(),
|
||||
@@ -1689,36 +1702,6 @@ rtr_merge_mbr_changed(
|
||||
mbr++;
|
||||
}
|
||||
|
||||
if (!changed) {
|
||||
rec_t* rec1;
|
||||
rec_t* rec2;
|
||||
ulint* offsets1;
|
||||
ulint* offsets2;
|
||||
mem_heap_t* heap;
|
||||
|
||||
heap = mem_heap_create(100);
|
||||
|
||||
rec1 = page_rec_get_next(
|
||||
page_get_infimum_rec(
|
||||
buf_block_get_frame(merge_block)));
|
||||
|
||||
offsets1 = rec_get_offsets(
|
||||
rec1, index, NULL, ULINT_UNDEFINED, &heap);
|
||||
|
||||
rec2 = page_rec_get_next(
|
||||
page_get_infimum_rec(
|
||||
buf_block_get_frame(block)));
|
||||
offsets2 = rec_get_offsets(
|
||||
rec2, index, NULL, ULINT_UNDEFINED, &heap);
|
||||
|
||||
/* Check any primary key fields have been changed */
|
||||
if (cmp_rec_rec(rec1, rec2, offsets1, offsets2, index) != 0) {
|
||||
changed = true;
|
||||
}
|
||||
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
return(changed);
|
||||
}
|
||||
|
||||
@@ -1887,7 +1870,7 @@ rtr_estimate_n_rows_in_range(
|
||||
|
||||
/* Read mbr from tuple. */
|
||||
const dfield_t* dtuple_field;
|
||||
ulint dtuple_f_len __attribute__((unused));
|
||||
ulint dtuple_f_len MY_ATTRIBUTE((unused));
|
||||
rtr_mbr_t range_mbr;
|
||||
double range_area;
|
||||
byte* range_mbr_ptr;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -310,7 +310,7 @@ rtr_pcur_getnext_from_path(
|
||||
page_cursor->rec = NULL;
|
||||
|
||||
if (mode == PAGE_CUR_RTREE_LOCATE) {
|
||||
if (level == target_level) {
|
||||
if (level == target_level && level == 0) {
|
||||
ulint low_match;
|
||||
|
||||
found = false;
|
||||
@@ -336,10 +336,15 @@ rtr_pcur_getnext_from_path(
|
||||
}
|
||||
}
|
||||
} else {
|
||||
page_cur_mode_t page_mode = mode;
|
||||
|
||||
if (level == target_level
|
||||
&& target_level != 0) {
|
||||
page_mode = PAGE_CUR_RTREE_GET_FATHER;
|
||||
}
|
||||
found = rtr_cur_search_with_match(
|
||||
block, index, tuple,
|
||||
PAGE_CUR_RTREE_LOCATE, page_cursor,
|
||||
btr_cur->rtr_info);
|
||||
block, index, tuple, page_mode,
|
||||
page_cursor, btr_cur->rtr_info);
|
||||
|
||||
/* Save the position of parent if needed */
|
||||
if (found && need_parent) {
|
||||
@@ -428,6 +433,9 @@ rtr_pcur_getnext_from_path(
|
||||
page_cur_get_block(page_cursor),
|
||||
r_cur);
|
||||
|
||||
btr_cur->low_match = level != 0 ?
|
||||
DICT_INDEX_SPATIAL_NODEPTR_SIZE + 1
|
||||
: btr_cur->low_match;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -641,6 +649,41 @@ rtr_pcur_open_low(
|
||||
}
|
||||
}
|
||||
|
||||
/* Get the rtree page father.
|
||||
@param[in] index rtree index
|
||||
@param[in] block child page in the index
|
||||
@param[in] mtr mtr
|
||||
@param[in] sea_cur search cursor, contains information
|
||||
about parent nodes in search
|
||||
@param[in] cursor cursor on node pointer record,
|
||||
its page x-latched */
|
||||
void
|
||||
rtr_page_get_father(
|
||||
dict_index_t* index,
|
||||
buf_block_t* block,
|
||||
mtr_t* mtr,
|
||||
btr_cur_t* sea_cur,
|
||||
btr_cur_t* cursor)
|
||||
{
|
||||
mem_heap_t* heap = mem_heap_create(100);
|
||||
#ifdef UNIV_DEBUG
|
||||
ulint* offsets;
|
||||
|
||||
offsets = rtr_page_get_father_block(
|
||||
NULL, heap, index, block, mtr, sea_cur, cursor);
|
||||
|
||||
ulint page_no = btr_node_ptr_get_child_page_no(cursor->page_cur.rec,
|
||||
offsets);
|
||||
|
||||
ut_ad(page_no == block->page.id.page_no());
|
||||
#else
|
||||
rtr_page_get_father_block(
|
||||
NULL, heap, index, block, mtr, sea_cur, cursor);
|
||||
#endif
|
||||
|
||||
mem_heap_free(heap);
|
||||
}
|
||||
|
||||
/************************************************************//**
|
||||
Returns the father block to a page. It is assumed that mtr holds
|
||||
an X or SX latch on the tree.
|
||||
@@ -658,6 +701,7 @@ rtr_page_get_father_block(
|
||||
btr_cur_t* cursor) /*!< out: cursor on node pointer record,
|
||||
its page x-latched */
|
||||
{
|
||||
|
||||
rec_t* rec = page_rec_get_next(
|
||||
page_get_infimum_rec(buf_block_get_frame(block)));
|
||||
btr_cur_position(index, rec, block, cursor);
|
||||
@@ -785,14 +829,13 @@ rtr_get_father_node(
|
||||
ulint n_fields;
|
||||
bool new_rtr = false;
|
||||
|
||||
get_parent:
|
||||
/* Try to optimally locate the parent node. Level should always
|
||||
less than sea_cur->tree_height unless the root is splitting */
|
||||
if (sea_cur && sea_cur->tree_height > level) {
|
||||
|
||||
ut_ad(mtr_memo_contains_flagged(mtr,
|
||||
dict_index_get_lock(index),
|
||||
MTR_MEMO_X_LOCK
|
||||
MTR_MEMO_X_LOCK
|
||||
| MTR_MEMO_SX_LOCK));
|
||||
ret = rtr_cur_restore_position(
|
||||
BTR_CONT_MODIFY_TREE, sea_cur, level, mtr);
|
||||
@@ -812,6 +855,8 @@ get_parent:
|
||||
page_cur_position(rec,
|
||||
btr_pcur_get_block(r_cursor),
|
||||
btr_cur_get_page_cur(btr_cur));
|
||||
btr_cur->rtr_info = sea_cur->rtr_info;
|
||||
btr_cur->tree_height = sea_cur->tree_height;
|
||||
ut_ad(rtr_compare_cursor_rec(
|
||||
index, btr_cur, page_no, &heap));
|
||||
goto func_exit;
|
||||
@@ -838,14 +883,13 @@ get_parent:
|
||||
BTR_CONT_MODIFY_TREE, btr_cur, 0,
|
||||
__FILE__, __LINE__, mtr);
|
||||
|
||||
|
||||
} else {
|
||||
/* btr_validate */
|
||||
ut_ad(level >= 1);
|
||||
ut_ad(!sea_cur);
|
||||
|
||||
btr_cur_search_to_nth_level(
|
||||
index, level - 1, tuple, PAGE_CUR_RTREE_LOCATE,
|
||||
index, level, tuple, PAGE_CUR_RTREE_LOCATE,
|
||||
BTR_CONT_MODIFY_TREE, btr_cur, 0,
|
||||
__FILE__, __LINE__, mtr);
|
||||
|
||||
@@ -856,50 +900,11 @@ get_parent:
|
||||
|| (btr_cur->low_match != n_fields)) {
|
||||
ret = rtr_pcur_getnext_from_path(
|
||||
tuple, PAGE_CUR_RTREE_LOCATE, btr_cur,
|
||||
level - 1, BTR_CONT_MODIFY_TREE,
|
||||
level, BTR_CONT_MODIFY_TREE,
|
||||
true, mtr);
|
||||
|
||||
ut_ad(ret && btr_cur->low_match == n_fields);
|
||||
}
|
||||
|
||||
/* Since there could be some identical recs in different
|
||||
pages, we still need to compare the page_no field to
|
||||
verify we have the right parent. */
|
||||
btr_pcur_t* r_cursor = rtr_get_parent_cursor(btr_cur,
|
||||
level,
|
||||
false);
|
||||
rec = btr_pcur_get_rec(r_cursor);
|
||||
|
||||
ulint* offsets = rec_get_offsets(rec, index, NULL,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
while (page_no != btr_node_ptr_get_child_page_no(rec, offsets)) {
|
||||
ret = rtr_pcur_getnext_from_path(
|
||||
tuple, PAGE_CUR_RTREE_LOCATE, btr_cur,
|
||||
level - 1, BTR_CONT_MODIFY_TREE,
|
||||
true, mtr);
|
||||
|
||||
ut_ad(ret && btr_cur->low_match == n_fields);
|
||||
|
||||
/* There must be a rec in the path, if the path
|
||||
is run out, the spatial index is corrupted. */
|
||||
if (!ret) {
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
dict_set_corrupted_index_cache_only(index);
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
|
||||
ib::info() << "InnoDB: Corruption of a"
|
||||
" spatial index " << index->name
|
||||
<< " of table " << index->table->name;
|
||||
break;
|
||||
}
|
||||
r_cursor = rtr_get_parent_cursor(btr_cur, level, false);
|
||||
rec = btr_pcur_get_rec(r_cursor);
|
||||
offsets = rec_get_offsets(rec, index, NULL,
|
||||
ULINT_UNDEFINED, &heap);
|
||||
}
|
||||
|
||||
sea_cur = btr_cur;
|
||||
goto get_parent;
|
||||
}
|
||||
|
||||
ret = rtr_compare_cursor_rec(
|
||||
@@ -1249,8 +1254,8 @@ rtr_check_discard_page(
|
||||
mutex_exit(&index->rtr_track->rtr_active_mutex);
|
||||
|
||||
lock_mutex_enter();
|
||||
lock_prdt_free_from_discard_page(block, lock_sys->prdt_hash);
|
||||
lock_prdt_free_from_discard_page(block, lock_sys->prdt_page_hash);
|
||||
lock_prdt_page_free_from_discard(block, lock_sys->prdt_hash);
|
||||
lock_prdt_page_free_from_discard(block, lock_sys->prdt_page_hash);
|
||||
lock_mutex_exit();
|
||||
}
|
||||
|
||||
@@ -1784,6 +1789,10 @@ rtr_cur_search_with_match(
|
||||
}
|
||||
}
|
||||
break;
|
||||
case PAGE_CUR_RTREE_GET_FATHER:
|
||||
cmp = cmp_dtuple_rec_with_gis_internal(
|
||||
tuple, rec, offsets);
|
||||
break;
|
||||
default:
|
||||
/* WITHIN etc. */
|
||||
cmp = cmp_dtuple_rec_with_gis(
|
||||
@@ -1807,6 +1816,12 @@ rtr_cur_search_with_match(
|
||||
if (!is_leaf) {
|
||||
ulint page_no;
|
||||
node_seq_t new_seq;
|
||||
bool is_loc;
|
||||
|
||||
is_loc = (orig_mode
|
||||
== PAGE_CUR_RTREE_LOCATE
|
||||
|| orig_mode
|
||||
== PAGE_CUR_RTREE_GET_FATHER);
|
||||
|
||||
offsets = rec_get_offsets(
|
||||
rec, index, offsets,
|
||||
@@ -1827,8 +1842,7 @@ rtr_cur_search_with_match(
|
||||
new_seq, level - 1, 0,
|
||||
NULL, 0);
|
||||
|
||||
if (orig_mode
|
||||
== PAGE_CUR_RTREE_LOCATE) {
|
||||
if (is_loc) {
|
||||
rtr_non_leaf_insert_stack_push(
|
||||
index,
|
||||
rtr_info->parent_path,
|
||||
@@ -1838,8 +1852,7 @@ rtr_cur_search_with_match(
|
||||
|
||||
if (!srv_read_only_mode
|
||||
&& (rtr_info->need_page_lock
|
||||
|| orig_mode
|
||||
!= PAGE_CUR_RTREE_LOCATE)) {
|
||||
|| !is_loc)) {
|
||||
|
||||
/* Lock the page, preventing it
|
||||
from being shrunk */
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -27,11 +27,15 @@ extern const char innobase_index_reserve_name[];
|
||||
to explicitly create a file_per_table tablespace for the table. */
|
||||
extern const char reserved_file_per_table_space_name[];
|
||||
|
||||
/* "innodb_system" tablespace name is reserved by InnoDB for the system tablespace
|
||||
which uses space_id 0 and stores extra types of system pages like UNDO
|
||||
and doublewrite. */
|
||||
/* "innodb_system" tablespace name is reserved by InnoDB for the
|
||||
system tablespace which uses space_id 0 and stores extra types of
|
||||
system pages like UNDO and doublewrite. */
|
||||
extern const char reserved_system_space_name[];
|
||||
|
||||
/* "innodb_temporary" tablespace name is reserved by InnoDB for the
|
||||
predefined shared temporary tablespace. */
|
||||
extern const char reserved_temporary_space_name[];
|
||||
|
||||
/* Structure defines translation table between mysql index and InnoDB
|
||||
index structures */
|
||||
struct innodb_idx_translate_t {
|
||||
@@ -46,38 +50,9 @@ struct innodb_idx_translate_t {
|
||||
array index */
|
||||
};
|
||||
|
||||
|
||||
/** Structure defines template related to virtual columns and
|
||||
their base columns */
|
||||
struct innodb_col_templ_t {
|
||||
/** number of regular columns */
|
||||
ulint n_col;
|
||||
|
||||
/** number of virtual columns */
|
||||
ulint n_v_col;
|
||||
|
||||
/** array of templates for virtual col and their base columns */
|
||||
mysql_row_templ_t** vtempl;
|
||||
|
||||
/** table's database name */
|
||||
char db_name[MAX_DATABASE_NAME_LEN];
|
||||
|
||||
/** table name */
|
||||
char tb_name[MAX_TABLE_NAME_LEN];
|
||||
|
||||
/** share->table_name */
|
||||
char share_name[MAX_DATABASE_NAME_LEN
|
||||
+ MAX_TABLE_NAME_LEN];
|
||||
|
||||
/** MySQL record length */
|
||||
ulint rec_len;
|
||||
|
||||
/** default column value if any */
|
||||
const byte* default_rec;
|
||||
};
|
||||
|
||||
/** InnoDB table share */
|
||||
typedef struct st_innobase_share {
|
||||
THR_LOCK lock;
|
||||
const char* table_name; /*!< InnoDB table name */
|
||||
uint use_count; /*!< reference count,
|
||||
incremented in get_share()
|
||||
@@ -88,9 +63,6 @@ typedef struct st_innobase_share {
|
||||
innodb_idx_translate_t
|
||||
idx_trans_tbl; /*!< index translation table between
|
||||
MySQL and InnoDB */
|
||||
innodb_col_templ_t
|
||||
s_templ; /*!< table virtual column template
|
||||
info */
|
||||
} INNOBASE_SHARE;
|
||||
|
||||
/** Prebuilt structures in an InnoDB table handle used within MySQL */
|
||||
@@ -112,39 +84,15 @@ struct ha_table_option_struct
|
||||
uint encryption; /*!< DEFAULT, ON, OFF */
|
||||
ulonglong encryption_key_id; /*!< encryption key id */
|
||||
};
|
||||
|
||||
/* JAN: TODO: MySQL 5.7 handler.h */
|
||||
struct st_handler_tablename
|
||||
{
|
||||
const char *db;
|
||||
const char *tablename;
|
||||
};
|
||||
|
||||
/** The class defining a handle to an Innodb table */
|
||||
class ha_innobase: public handler
|
||||
{
|
||||
ha_statistics* ha_partition_stats; /*!< stats of the partition owner
|
||||
handler (if there is one) */
|
||||
uint store_key_val_for_row(uint keynr, char* buff, uint buff_len,
|
||||
const uchar* record);
|
||||
inline void update_thd(THD* thd);
|
||||
void update_thd();
|
||||
int change_active_index(uint keynr);
|
||||
int general_fetch(uchar* buf, uint direction, uint match_mode);
|
||||
dberr_t innobase_lock_autoinc();
|
||||
ulonglong innobase_peek_autoinc();
|
||||
dberr_t innobase_set_max_autoinc(ulonglong auto_inc);
|
||||
dberr_t innobase_reset_autoinc(ulonglong auto_inc);
|
||||
dberr_t innobase_get_autoinc(ulonglong* value);
|
||||
void innobase_initialize_autoinc();
|
||||
dict_index_t* innobase_get_index(uint keynr);
|
||||
|
||||
#ifdef WITH_WSREP
|
||||
int wsrep_append_keys(THD *thd, bool shared,
|
||||
const uchar* record0, const uchar* record1);
|
||||
#endif
|
||||
|
||||
/* Init values for the class: */
|
||||
public:
|
||||
ha_innobase(handlerton* hton, TABLE_SHARE* table_arg);
|
||||
~ha_innobase();
|
||||
@@ -154,13 +102,21 @@ public:
|
||||
enum row_type get_row_type() const;
|
||||
|
||||
const char* table_type() const;
|
||||
|
||||
const char* index_type(uint key_number);
|
||||
|
||||
const char** bas_ext() const;
|
||||
|
||||
Table_flags table_flags() const;
|
||||
|
||||
ulong index_flags(uint idx, uint part, bool all_parts) const;
|
||||
|
||||
uint max_supported_keys() const;
|
||||
|
||||
uint max_supported_key_length() const;
|
||||
|
||||
uint max_supported_key_part_length() const;
|
||||
|
||||
const key_map* keys_to_use_for_scanning();
|
||||
|
||||
/** Opens dictionary table object using table name. For partition, we need to
|
||||
@@ -171,136 +127,192 @@ public:
|
||||
@param[in] is_partition if this is a partition of a table
|
||||
@param[in] ignore_err error to ignore for loading dictionary object
|
||||
@return dictionary table object or NULL if not found */
|
||||
static dict_table_t* open_dict_table(
|
||||
const char* table_name,
|
||||
const char* norm_name,
|
||||
bool is_partition,
|
||||
dict_err_ignore_t ignore_err);
|
||||
static dict_table_t* open_dict_table(
|
||||
const char* table_name,
|
||||
const char* norm_name,
|
||||
bool is_partition,
|
||||
dict_err_ignore_t ignore_err);
|
||||
|
||||
int open(const char *name, int mode, uint test_if_locked);
|
||||
|
||||
handler* clone(const char *name, MEM_ROOT *mem_root);
|
||||
|
||||
int close(void);
|
||||
|
||||
double scan_time();
|
||||
|
||||
double read_time(uint index, uint ranges, ha_rows rows);
|
||||
|
||||
longlong get_memory_buffer_size() const;
|
||||
|
||||
int delete_all_rows();
|
||||
|
||||
int write_row(uchar * buf);
|
||||
|
||||
int update_row(const uchar * old_data, uchar * new_data);
|
||||
|
||||
int delete_row(const uchar * buf);
|
||||
|
||||
bool was_semi_consistent_read();
|
||||
|
||||
void try_semi_consistent_read(bool yes);
|
||||
|
||||
void unlock_row();
|
||||
|
||||
int index_init(uint index, bool sorted);
|
||||
|
||||
int index_end();
|
||||
int index_read(uchar * buf, const uchar * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_idx(uchar * buf, uint index, const uchar * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
|
||||
int index_read(
|
||||
uchar* buf,
|
||||
const uchar* key,
|
||||
uint key_len,
|
||||
ha_rkey_function find_flag);
|
||||
|
||||
int index_read_last(uchar * buf, const uchar * key, uint key_len);
|
||||
|
||||
int index_next(uchar * buf);
|
||||
|
||||
int index_next_same(uchar * buf, const uchar *key, uint keylen);
|
||||
|
||||
int index_prev(uchar * buf);
|
||||
|
||||
int index_first(uchar * buf);
|
||||
|
||||
int index_last(uchar * buf);
|
||||
|
||||
int rnd_init(bool scan);
|
||||
|
||||
int rnd_end();
|
||||
|
||||
int rnd_next(uchar *buf);
|
||||
|
||||
int rnd_pos(uchar * buf, uchar *pos);
|
||||
|
||||
int ft_init();
|
||||
void ft_end();
|
||||
FT_INFO *ft_init_ext(uint flags, uint inx, String* key);
|
||||
int ft_read(uchar* buf);
|
||||
|
||||
FT_INFO *ft_init_ext_with_hints(
|
||||
void ft_end();
|
||||
|
||||
FT_INFO* ft_init_ext(uint flags, uint inx, String* key);
|
||||
|
||||
FT_INFO* ft_init_ext_with_hints(
|
||||
uint inx,
|
||||
String* key,
|
||||
void* hints);
|
||||
/* JAN: TODO: MySQL 5.6
|
||||
Ft_hints* hints);
|
||||
*/
|
||||
//Ft_hints* hints);
|
||||
|
||||
int ft_read(uchar* buf);
|
||||
|
||||
int enable_indexes(uint mode);
|
||||
int disable_indexes(uint mode);
|
||||
|
||||
void position(const uchar *record);
|
||||
|
||||
int info(uint);
|
||||
|
||||
int analyze(THD* thd,HA_CHECK_OPT* check_opt);
|
||||
|
||||
int optimize(THD* thd,HA_CHECK_OPT* check_opt);
|
||||
|
||||
int discard_or_import_tablespace(my_bool discard);
|
||||
int extra(enum ha_extra_function operation);
|
||||
|
||||
int extra(ha_extra_function operation);
|
||||
|
||||
int reset();
|
||||
|
||||
int external_lock(THD *thd, int lock_type);
|
||||
int transactional_table_lock(THD *thd, int lock_type);
|
||||
|
||||
int start_stmt(THD *thd, thr_lock_type lock_type);
|
||||
|
||||
void position(uchar *record);
|
||||
ha_rows records_in_range(uint inx, key_range *min_key, key_range
|
||||
*max_key);
|
||||
|
||||
// MySQL 5.7 Select count optimization
|
||||
// int records(ha_rows* num_rows);
|
||||
|
||||
ha_rows records_in_range(
|
||||
uint inx,
|
||||
key_range* min_key,
|
||||
key_range* max_key);
|
||||
|
||||
ha_rows estimate_rows_upper_bound();
|
||||
|
||||
// JAN: TODO: MySQL 5.7
|
||||
// int records(ha_rows* num_rows);
|
||||
|
||||
void update_create_info(HA_CREATE_INFO* create_info);
|
||||
int parse_table_name(const char*name,
|
||||
HA_CREATE_INFO* create_info,
|
||||
ulint flags,
|
||||
ulint flags2,
|
||||
char* norm_name,
|
||||
char* temp_path,
|
||||
char* remote_path);
|
||||
|
||||
int create(
|
||||
const char* name,
|
||||
TABLE* form,
|
||||
HA_CREATE_INFO* create_info);
|
||||
|
||||
const char* check_table_options(THD *thd, TABLE* table,
|
||||
HA_CREATE_INFO* create_info, const bool use_tablespace, const ulint file_format);
|
||||
int create(const char *name, register TABLE *form,
|
||||
HA_CREATE_INFO *create_info);
|
||||
|
||||
int truncate();
|
||||
|
||||
int delete_table(const char *name);
|
||||
|
||||
int rename_table(const char* from, const char* to);
|
||||
int defragment_table(const char* name, const char* index_name,
|
||||
bool async);
|
||||
int check(THD* thd, HA_CHECK_OPT* check_opt);
|
||||
char* update_table_comment(const char* comment);
|
||||
|
||||
char* get_foreign_key_create_info();
|
||||
|
||||
int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list);
|
||||
int get_parent_foreign_key_list(THD *thd,
|
||||
List<FOREIGN_KEY_INFO> *f_key_list);
|
||||
|
||||
int get_parent_foreign_key_list(
|
||||
THD* thd,
|
||||
List<FOREIGN_KEY_INFO>* f_key_list);
|
||||
int get_cascade_foreign_key_table_list(
|
||||
THD* thd,
|
||||
List<st_handler_tablename>* fk_table_list);
|
||||
|
||||
|
||||
bool can_switch_engines();
|
||||
|
||||
uint referenced_by_foreign_key();
|
||||
|
||||
void free_foreign_key_create_info(char* str);
|
||||
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
|
||||
enum thr_lock_type lock_type);
|
||||
|
||||
//uint lock_count(void) const;
|
||||
|
||||
THR_LOCK_DATA** store_lock(
|
||||
THD* thd,
|
||||
THR_LOCK_DATA** to,
|
||||
thr_lock_type lock_type);
|
||||
|
||||
void init_table_handle_for_HANDLER();
|
||||
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
ulonglong nb_desired_values,
|
||||
ulonglong *first_value,
|
||||
ulonglong *nb_reserved_values);
|
||||
|
||||
virtual void get_auto_increment(
|
||||
ulonglong offset,
|
||||
ulonglong increment,
|
||||
ulonglong nb_desired_values,
|
||||
ulonglong* first_value,
|
||||
ulonglong* nb_reserved_values);
|
||||
int reset_auto_increment(ulonglong value);
|
||||
|
||||
uint lock_count(void) const;
|
||||
|
||||
virtual bool get_error_message(int error, String *buf);
|
||||
|
||||
virtual bool get_foreign_dup_key(char*, uint, char*, uint);
|
||||
|
||||
uint8 table_cache_type();
|
||||
|
||||
/**
|
||||
Ask handler about permission to cache table during query registration
|
||||
*/
|
||||
my_bool register_query_cache_table(THD *thd, char *table_key,
|
||||
uint key_length,
|
||||
qc_engine_callback *call_back,
|
||||
ulonglong *engine_data);
|
||||
static const char *get_mysql_bin_log_name();
|
||||
static ulonglong get_mysql_bin_log_pos();
|
||||
my_bool register_query_cache_table(
|
||||
THD* thd,
|
||||
char* table_key,
|
||||
uint key_length,
|
||||
qc_engine_callback* call_back,
|
||||
ulonglong* engine_data);
|
||||
|
||||
bool primary_key_is_clustered();
|
||||
int cmp_ref(const uchar *ref1, const uchar *ref2);
|
||||
|
||||
int cmp_ref(const uchar* ref1, const uchar* ref2);
|
||||
|
||||
/** On-line ALTER TABLE interface @see handler0alter.cc @{ */
|
||||
|
||||
@@ -371,87 +383,74 @@ public:
|
||||
Alter_inplace_info* ha_alter_info,
|
||||
bool commit);
|
||||
/** @} */
|
||||
void set_partition_owner_stats(ha_statistics *stats);
|
||||
|
||||
bool check_if_incompatible_data(HA_CREATE_INFO *info,
|
||||
uint table_changes);
|
||||
bool check_if_supported_virtual_columns(void) { return TRUE; }
|
||||
bool check_if_incompatible_data(
|
||||
HA_CREATE_INFO* info,
|
||||
uint table_changes);
|
||||
|
||||
private:
|
||||
/** Builds a 'template' to the prebuilt struct.
|
||||
|
||||
The template is used in fast retrieval of just those column
|
||||
values MySQL needs in its processing.
|
||||
@param whole_row true if access is needed to a whole row,
|
||||
false if accessing individual fields is enough */
|
||||
void build_template(bool whole_row);
|
||||
/** Resets a query execution 'template'.
|
||||
@see build_template() */
|
||||
inline void reset_template();
|
||||
|
||||
int info_low(uint, bool);
|
||||
|
||||
/** Write Row Interface optimized for Intrinsic table. */
|
||||
int intrinsic_table_write_row(uchar* record);
|
||||
|
||||
public:
|
||||
/** @name Multi Range Read interface @{ */
|
||||
|
||||
/** Initialize multi range read @see DsMrr_impl::dsmrr_init
|
||||
* @param seq
|
||||
* @param seq_init_param
|
||||
* @param n_ranges
|
||||
* @param mode
|
||||
* @param buf
|
||||
*/
|
||||
int multi_range_read_init(RANGE_SEQ_IF* seq,
|
||||
void* seq_init_param,
|
||||
uint n_ranges, uint mode,
|
||||
HANDLER_BUFFER* buf);
|
||||
@param seq
|
||||
@param seq_init_param
|
||||
@param n_ranges
|
||||
@param mode
|
||||
@param buf */
|
||||
int multi_range_read_init(
|
||||
RANGE_SEQ_IF* seq,
|
||||
void* seq_init_param,
|
||||
uint n_ranges,
|
||||
uint mode,
|
||||
HANDLER_BUFFER* buf);
|
||||
|
||||
/** Process next multi range read @see DsMrr_impl::dsmrr_next
|
||||
* @param range_info
|
||||
*/
|
||||
int multi_range_read_next(range_id_t *range_info);
|
||||
@param range_info */
|
||||
int multi_range_read_next(range_id_t *range_info);
|
||||
|
||||
/** Initialize multi range read and get information.
|
||||
* @see ha_myisam::multi_range_read_info_const
|
||||
* @see DsMrr_impl::dsmrr_info_const
|
||||
* @param keyno
|
||||
* @param seq
|
||||
* @param seq_init_param
|
||||
* @param n_ranges
|
||||
* @param bufsz
|
||||
* @param flags
|
||||
* @param cost
|
||||
*/
|
||||
ha_rows multi_range_read_info_const(uint keyno, RANGE_SEQ_IF* seq,
|
||||
void* seq_init_param,
|
||||
uint n_ranges, uint* bufsz,
|
||||
uint* flags, Cost_estimate* cost);
|
||||
@see ha_myisam::multi_range_read_info_const
|
||||
@see DsMrr_impl::dsmrr_info_const
|
||||
@param keyno
|
||||
@param seq
|
||||
@param seq_init_param
|
||||
@param n_ranges
|
||||
@param bufsz
|
||||
@param flags
|
||||
@param cost */
|
||||
ha_rows multi_range_read_info_const(
|
||||
uint keyno,
|
||||
RANGE_SEQ_IF* seq,
|
||||
void* seq_init_param,
|
||||
uint n_ranges,
|
||||
uint* bufsz,
|
||||
uint* flags,
|
||||
Cost_estimate* cost);
|
||||
|
||||
/** Initialize multi range read and get information.
|
||||
* @see DsMrr_impl::dsmrr_info
|
||||
* @param keyno
|
||||
* @param seq
|
||||
* @param seq_init_param
|
||||
* @param n_ranges
|
||||
* @param bufsz
|
||||
* @param flags
|
||||
* @param cost
|
||||
*/
|
||||
@see DsMrr_impl::dsmrr_info
|
||||
@param keyno
|
||||
@param seq
|
||||
@param seq_init_param
|
||||
@param n_ranges
|
||||
@param bufsz
|
||||
@param flags
|
||||
@param cost */
|
||||
ha_rows multi_range_read_info(uint keyno, uint n_ranges, uint keys,
|
||||
uint key_parts, uint* bufsz, uint* flags,
|
||||
Cost_estimate* cost);
|
||||
|
||||
int multi_range_read_explain_info(uint mrr_mode,
|
||||
char *str, size_t size);
|
||||
int multi_range_read_explain_info(uint mrr_mode,
|
||||
char *str, size_t size);
|
||||
|
||||
/** Attempt to push down an index condition.
|
||||
* @param[in] keyno MySQL key number
|
||||
* @param[in] idx_cond Index condition to be checked
|
||||
* @return idx_cond if pushed; NULL if not pushed
|
||||
*/
|
||||
class Item* idx_cond_push(uint keyno, class Item* idx_cond);
|
||||
@param[in] keyno MySQL key number
|
||||
@param[in] idx_cond Index condition to be checked
|
||||
@return idx_cond if pushed; NULL if not pushed */
|
||||
Item* idx_cond_push(uint keyno, Item* idx_cond);
|
||||
/* @} */
|
||||
|
||||
/* An helper function for index_cond_func_innodb: */
|
||||
bool is_thd_killed();
|
||||
/* An helper function for index_cond_func_innodb: */
|
||||
bool is_thd_killed();
|
||||
|
||||
protected:
|
||||
|
||||
@@ -461,6 +460,42 @@ protected:
|
||||
doesn't give any clue that it is called at the end of a statement. */
|
||||
int end_stmt();
|
||||
|
||||
dberr_t innobase_get_autoinc(ulonglong* value);
|
||||
void innobase_initialize_autoinc();
|
||||
dberr_t innobase_lock_autoinc();
|
||||
ulonglong innobase_peek_autoinc();
|
||||
dberr_t innobase_set_max_autoinc(ulonglong auto_inc);
|
||||
dberr_t innobase_reset_autoinc(ulonglong auto_inc);
|
||||
|
||||
/** Resets a query execution 'template'.
|
||||
@see build_template() */
|
||||
void reset_template();
|
||||
|
||||
/** Write Row Interface optimized for Intrinsic table. */
|
||||
int intrinsic_table_write_row(uchar* record);
|
||||
|
||||
protected:
|
||||
inline void update_thd(THD* thd);
|
||||
void update_thd();
|
||||
|
||||
int general_fetch(uchar* buf, uint direction, uint match_mode);
|
||||
int change_active_index(uint keynr);
|
||||
dict_index_t* innobase_get_index(uint keynr);
|
||||
|
||||
#ifdef WITH_WSREP
|
||||
int wsrep_append_keys(THD *thd, bool shared,
|
||||
const uchar* record0, const uchar* record1);
|
||||
#endif
|
||||
/** Builds a 'template' to the prebuilt struct.
|
||||
|
||||
The template is used in fast retrieval of just those column
|
||||
values MySQL needs in its processing.
|
||||
@param whole_row true if access is needed to a whole row,
|
||||
false if accessing individual fields is enough */
|
||||
void build_template(bool whole_row);
|
||||
|
||||
virtual int info_low(uint, bool);
|
||||
|
||||
/** The multi range read session object */
|
||||
DsMrr_impl m_ds_mrr;
|
||||
|
||||
@@ -475,6 +510,8 @@ protected:
|
||||
this is set in external_lock function */
|
||||
THD* m_user_thd;
|
||||
|
||||
THR_LOCK_DATA lock;
|
||||
|
||||
/** information for MySQL table locking */
|
||||
INNOBASE_SHARE* m_share;
|
||||
|
||||
@@ -514,70 +551,51 @@ the definitions are bracketed with #ifdef INNODB_COMPATIBILITY_HOOKS */
|
||||
#error InnoDB needs MySQL to be built with #define INNODB_COMPATIBILITY_HOOKS
|
||||
#endif
|
||||
|
||||
LEX_STRING* thd_query_string(MYSQL_THD thd);
|
||||
size_t thd_query_safe(MYSQL_THD thd, char *buf, size_t buflen);
|
||||
|
||||
extern "C" {
|
||||
|
||||
struct charset_info_st *thd_charset(MYSQL_THD thd);
|
||||
LEX_STRING* thd_query_string(MYSQL_THD thd);
|
||||
|
||||
/**
|
||||
Check if a user thread is a replication slave thread
|
||||
@param thd user thread
|
||||
@retval 0 the user thread is not a replication slave thread
|
||||
@retval 1 the user thread is a replication slave thread
|
||||
*/
|
||||
/** Check if a user thread is a replication slave thread
|
||||
@param thd user thread
|
||||
@retval 0 the user thread is not a replication slave thread
|
||||
@retval 1 the user thread is a replication slave thread */
|
||||
int thd_slave_thread(const MYSQL_THD thd);
|
||||
|
||||
/**
|
||||
Check if a user thread is running a non-transactional update
|
||||
@param thd user thread
|
||||
@retval 0 the user thread is not running a non-transactional update
|
||||
@retval 1 the user thread is running a non-transactional update
|
||||
*/
|
||||
/** Check if a user thread is running a non-transactional update
|
||||
@param thd user thread
|
||||
@retval 0 the user thread is not running a non-transactional update
|
||||
@retval 1 the user thread is running a non-transactional update */
|
||||
int thd_non_transactional_update(const MYSQL_THD thd);
|
||||
|
||||
/**
|
||||
Get the user thread's binary logging format
|
||||
@param thd user thread
|
||||
@return Value to be used as index into the binlog_format_names array
|
||||
*/
|
||||
/** Get the user thread's binary logging format
|
||||
@param thd user thread
|
||||
@return Value to be used as index into the binlog_format_names array */
|
||||
int thd_binlog_format(const MYSQL_THD thd);
|
||||
|
||||
/**
|
||||
Mark transaction to rollback and mark error as fatal to a sub-statement.
|
||||
@param thd Thread handle
|
||||
@param all TRUE <=> rollback main transaction.
|
||||
*/
|
||||
void thd_mark_transaction_to_rollback(MYSQL_THD thd, bool all);
|
||||
|
||||
/**
|
||||
Check if binary logging is filtered for thread's current db.
|
||||
@param thd Thread handle
|
||||
@retval 1 the query is not filtered, 0 otherwise.
|
||||
*/
|
||||
/** Check if binary logging is filtered for thread's current db.
|
||||
@param thd Thread handle
|
||||
@retval 1 the query is not filtered, 0 otherwise. */
|
||||
bool thd_binlog_filter_ok(const MYSQL_THD thd);
|
||||
|
||||
/**
|
||||
Check if the query may generate row changes which
|
||||
may end up in the binary.
|
||||
@param thd Thread handle
|
||||
@return 1 the query may generate row changes, 0 otherwise.
|
||||
/** Check if the query may generate row changes which may end up in the binary.
|
||||
@param thd Thread handle
|
||||
@retval 1 the query may generate row changes, 0 otherwise.
|
||||
*/
|
||||
bool thd_sqlcom_can_generate_row_events(const MYSQL_THD thd);
|
||||
|
||||
/**
|
||||
Gets information on the durability property requested by
|
||||
a thread.
|
||||
@param thd Thread handle
|
||||
@return a durability property.
|
||||
*/
|
||||
enum durability_properties thd_get_durability_property(const MYSQL_THD thd);
|
||||
/** Gets information on the durability property requested by a thread.
|
||||
@param thd Thread handle
|
||||
@return a durability property. */
|
||||
durability_properties thd_get_durability_property(const MYSQL_THD thd);
|
||||
|
||||
/** Is strict sql_mode set.
|
||||
@param thd Thread object
|
||||
@return True if sql_mode has strict mode (all or trans), false otherwise.
|
||||
*/
|
||||
bool thd_is_strict_mode(const MYSQL_THD thd)
|
||||
__attribute__((nonnull));
|
||||
@param thd Thread object
|
||||
@return True if sql_mode has strict mode (all or trans), false otherwise. */
|
||||
bool thd_is_strict_mode(const MYSQL_THD thd);
|
||||
|
||||
} /* extern "C" */
|
||||
|
||||
/** Get the file name and position of the MySQL binlog corresponding to the
|
||||
@@ -657,48 +675,14 @@ innobase_index_name_is_reserved(
|
||||
created */
|
||||
ulint num_of_keys) /*!< in: Number of indexes to
|
||||
be created. */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
extern const char reserved_file_per_table_space_name[];
|
||||
|
||||
#ifdef WITH_WSREP
|
||||
//extern "C" int wsrep_trx_is_aborting(void *thd_ptr);
|
||||
#endif
|
||||
|
||||
/** Check if the explicit tablespace targeted is file_per_table.
|
||||
@param[in] create_info Metadata for the table to create.
|
||||
Determines InnoDB table flags.
|
||||
@retval true if successful, false if error */
|
||||
UNIV_INTERN
|
||||
bool
|
||||
innobase_table_flags(
|
||||
/*=================*/
|
||||
const TABLE* form, /*!< in: table */
|
||||
const HA_CREATE_INFO* create_info, /*!< in: information
|
||||
on table columns and indexes */
|
||||
THD* thd, /*!< in: connection */
|
||||
bool use_tablespace, /*!< in: whether to create
|
||||
outside system tablespace */
|
||||
ulint* flags, /*!< out: DICT_TF flags */
|
||||
ulint* flags2) /*!< out: DICT_TF2 flags */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/*****************************************************************//**
|
||||
Validates the create options. We may build on this function
|
||||
in future. For now, it checks two specifiers:
|
||||
KEY_BLOCK_SIZE and ROW_FORMAT
|
||||
If innodb_strict_mode is not set then this function is a no-op
|
||||
@return NULL if valid, string if not. */
|
||||
UNIV_INTERN
|
||||
const char*
|
||||
create_options_are_invalid(
|
||||
/*=======================*/
|
||||
THD* thd, /*!< in: connection thread. */
|
||||
TABLE* form, /*!< in: information on table
|
||||
columns and indexes */
|
||||
HA_CREATE_INFO* create_info, /*!< in: create info. */
|
||||
bool use_tablespace) /*!< in: srv_file_per_table */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/** Check if the explicit tablespace targeted is file_per_table.
|
||||
@param[in] create_info Metadata for the table to create.
|
||||
@return true if the table is intended to use a file_per_table tablespace. */
|
||||
@@ -712,18 +696,37 @@ tablespace_is_file_per_table(
|
||||
reserved_file_per_table_space_name)));
|
||||
}
|
||||
|
||||
/** Check if table will be put in an existing shared general tablespace.
|
||||
/** Check if table will be explicitly put in an existing shared general
|
||||
or system tablespace.
|
||||
@param[in] create_info Metadata for the table to create.
|
||||
@return true if the table will use an existing shared general tablespace. */
|
||||
@return true if the table will use a shared general or system tablespace. */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
tablespace_is_shared_space(
|
||||
const HA_CREATE_INFO* create_info)
|
||||
{
|
||||
return(create_info->tablespace != NULL
|
||||
&& create_info->tablespace[0] != '\0'
|
||||
&& (0 != strcmp(create_info->tablespace,
|
||||
reserved_file_per_table_space_name)));
|
||||
}
|
||||
|
||||
/** Check if table will be explicitly put in a general tablespace.
|
||||
@param[in] create_info Metadata for the table to create.
|
||||
@return true if the table will use a general tablespace. */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
tablespace_is_general_space(
|
||||
const HA_CREATE_INFO* create_info)
|
||||
{
|
||||
return(create_info->tablespace != NULL
|
||||
&& create_info->tablespace[0] != '\0'
|
||||
&& (0 != strcmp(create_info->tablespace,
|
||||
reserved_file_per_table_space_name)));
|
||||
&& create_info->tablespace[0] != '\0'
|
||||
&& (0 != strcmp(create_info->tablespace,
|
||||
reserved_file_per_table_space_name))
|
||||
&& (0 != strcmp(create_info->tablespace,
|
||||
reserved_temporary_space_name))
|
||||
&& (0 != strcmp(create_info->tablespace,
|
||||
reserved_system_space_name)));
|
||||
}
|
||||
|
||||
/** Parse hint for table and its indexes, and update the information
|
||||
@@ -796,6 +799,9 @@ public:
|
||||
/** Validate TABLESPACE option. */
|
||||
bool create_option_tablespace_is_valid();
|
||||
|
||||
/** Validate COMPRESSION option. */
|
||||
bool create_option_compression_is_valid();
|
||||
|
||||
/** Prepare to create a table. */
|
||||
int prepare_create_table(const char* name);
|
||||
|
||||
@@ -912,7 +918,8 @@ private:
|
||||
/** Table flags2 */
|
||||
ulint m_flags2;
|
||||
};
|
||||
/*********************************************************************//**
|
||||
|
||||
/**
|
||||
Retrieve the FTS Relevance Ranking result for doc with doc_id
|
||||
of prebuilt->fts_doc_id
|
||||
@return the relevance ranking value */
|
||||
@@ -934,9 +941,8 @@ innobase_fts_find_ranking(
|
||||
Free the memory for the FTS handler */
|
||||
void
|
||||
innobase_fts_close_ranking(
|
||||
/*=======================*/
|
||||
FT_INFO* fts_hdl) /*!< in: FTS handler */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
FT_INFO* fts_hdl); /*!< in: FTS handler */
|
||||
|
||||
/**
|
||||
Initialize the table FTS stopword list
|
||||
@return TRUE if success */
|
||||
@@ -946,7 +952,7 @@ innobase_fts_load_stopword(
|
||||
dict_table_t* table, /*!< in: Table has the FTS */
|
||||
trx_t* trx, /*!< in: transaction */
|
||||
THD* thd) /*!< in: current thread */
|
||||
MY_ATTRIBUTE((nonnull(1,3), warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Some defines for innobase_fts_check_doc_id_index() return value */
|
||||
enum fts_doc_id_index_enum {
|
||||
@@ -961,7 +967,6 @@ on the Doc ID column.
|
||||
@return the status of the FTS_DOC_ID index */
|
||||
fts_doc_id_index_enum
|
||||
innobase_fts_check_doc_id_index(
|
||||
/*============================*/
|
||||
const dict_table_t* table, /*!< in: table definition */
|
||||
const TABLE* altered_table, /*!< in: MySQL table
|
||||
that is being altered */
|
||||
@@ -976,22 +981,21 @@ on the Doc ID column in MySQL create index definition.
|
||||
FTS_INCORRECT_DOC_ID_INDEX if the FTS_DOC_ID index is of wrong format */
|
||||
fts_doc_id_index_enum
|
||||
innobase_fts_check_doc_id_index_in_def(
|
||||
/*===================================*/
|
||||
ulint n_key, /*!< in: Number of keys */
|
||||
const KEY* key_info) /*!< in: Key definitions */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/***********************************************************************
|
||||
/**
|
||||
@return version of the extended FTS API */
|
||||
uint
|
||||
innobase_fts_get_version();
|
||||
|
||||
/***********************************************************************
|
||||
/**
|
||||
@return Which part of the extended FTS API is supported */
|
||||
ulonglong
|
||||
innobase_fts_flags();
|
||||
|
||||
/***********************************************************************
|
||||
/**
|
||||
Find and Retrieve the FTS doc_id for the current result row
|
||||
@return the document ID */
|
||||
ulonglong
|
||||
@@ -999,7 +1003,7 @@ innobase_fts_retrieve_docid(
|
||||
/*========================*/
|
||||
FT_INFO_EXT* fts_hdl); /*!< in: FTS handler */
|
||||
|
||||
/***********************************************************************
|
||||
/**
|
||||
Find and retrieve the size of the current result
|
||||
@return number of matching rows */
|
||||
ulonglong
|
||||
@@ -1007,50 +1011,54 @@ innobase_fts_count_matches(
|
||||
/*=======================*/
|
||||
FT_INFO_EXT* fts_hdl); /*!< in: FTS handler */
|
||||
|
||||
/** "GEN_CLUST_INDEX" is the name reserved for InnoDB default
|
||||
system clustered index when there is no primary key. */
|
||||
extern const char innobase_index_reserve_name[];
|
||||
|
||||
/*********************************************************************//**
|
||||
/**
|
||||
Copy table flags from MySQL's HA_CREATE_INFO into an InnoDB table object.
|
||||
Those flags are stored in .frm file and end up in the MySQL table object,
|
||||
but are frequently used inside InnoDB so we keep their copies into the
|
||||
InnoDB table object. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
innobase_copy_frm_flags_from_create_info(
|
||||
/*=====================================*/
|
||||
dict_table_t* innodb_table, /*!< in/out: InnoDB table */
|
||||
const HA_CREATE_INFO* create_info); /*!< in: create info */
|
||||
|
||||
/*********************************************************************//**
|
||||
/**
|
||||
Copy table flags from MySQL's TABLE_SHARE into an InnoDB table object.
|
||||
Those flags are stored in .frm file and end up in the MySQL table object,
|
||||
but are frequently used inside InnoDB so we keep their copies into the
|
||||
InnoDB table object. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
innobase_copy_frm_flags_from_table_share(
|
||||
/*=====================================*/
|
||||
dict_table_t* innodb_table, /*!< in/out: InnoDB table */
|
||||
const TABLE_SHARE* table_share); /*!< in: table share */
|
||||
|
||||
/********************************************************************//**
|
||||
Helper function to push frm mismatch error to error log and
|
||||
if needed to sql-layer. */
|
||||
UNIV_INTERN
|
||||
/** Set up base columns for virtual column
|
||||
@param[in] table the InnoDB table
|
||||
@param[in] field MySQL field
|
||||
@param[in,out] v_col virtual column to be set up */
|
||||
void
|
||||
ib_push_frm_error(
|
||||
/*==============*/
|
||||
THD* thd, /*!< in: MySQL thd */
|
||||
dict_table_t* ib_table, /*!< in: InnoDB table */
|
||||
TABLE* table, /*!< in: MySQL table */
|
||||
ulint n_keys, /*!< in: InnoDB #keys */
|
||||
bool push_warning); /*!< in: print warning ? */
|
||||
innodb_base_col_setup(
|
||||
dict_table_t* table,
|
||||
const Field* field,
|
||||
dict_v_col_t* v_col);
|
||||
|
||||
/** Set up base columns for stored column
|
||||
@param[in] table InnoDB table
|
||||
@param[in] field MySQL field
|
||||
@param[in,out] s_col stored column */
|
||||
void
|
||||
innodb_base_col_setup_for_stored(
|
||||
const dict_table_t* table,
|
||||
const Field* field,
|
||||
dict_s_col_t* s_col);
|
||||
|
||||
/** whether this is a stored column */
|
||||
// JAN: TODO: MySQL 5.7 virtual fields
|
||||
// #define innobase_is_v_fld(field) ((field)->gcol_info && !(field)->stored_in_db)
|
||||
#define innobase_is_v_fld(field) (false)
|
||||
//#define innobase_is_s_fld(field) ((field)->gcol_info && (field)->stored_in_db)
|
||||
#define innobase_is_s_fld(field) (field == NULL)
|
||||
// JAN: TODO: MySQL 5.7 virtual fields
|
||||
/** whether this is a computed virtual column */
|
||||
//#define innobase_is_v_fld(field) ((field)->gcol_info && !(field)->stored_in_db)
|
||||
#define innobase_is_v_fld(field) (field == NULL)
|
||||
|
||||
/** Release temporary latches.
|
||||
Call this function when mysqld passes control to the client. That is to
|
||||
@@ -1133,28 +1141,11 @@ void
|
||||
innobase_build_v_templ(
|
||||
const TABLE* table,
|
||||
const dict_table_t* ib_table,
|
||||
innodb_col_templ_t* s_templ,
|
||||
dict_vcol_templ_t* s_templ,
|
||||
const dict_add_v_col_t* add_v,
|
||||
bool locked,
|
||||
const char* share_tbl_name);
|
||||
|
||||
/** Free a virtual template in INNOBASE_SHARE structure
|
||||
@param[in,out] share table share holds the template to free */
|
||||
void
|
||||
free_share_vtemp(
|
||||
INNOBASE_SHARE* share);
|
||||
|
||||
/** Refresh template for the virtual columns and their base columns if
|
||||
the share structure exists
|
||||
@param[in] table MySQL TABLE
|
||||
@param[in] ib_table InnoDB dict_table_t
|
||||
@param[in] table_name table_name used to find the share structure */
|
||||
void
|
||||
refresh_share_vtempl(
|
||||
const TABLE* mysql_table,
|
||||
const dict_table_t* ib_table,
|
||||
const char* table_name);
|
||||
|
||||
/** callback used by MySQL server layer to initialized
|
||||
the table virtual columns' template
|
||||
@param[in] table MySQL TABLE
|
||||
@@ -1168,25 +1159,32 @@ innobase_build_v_templ_callback(
|
||||
the table virtual columns' template */
|
||||
typedef void (*my_gcolumn_templatecallback_t)(const TABLE*, void*);
|
||||
|
||||
/** Get the computed value by supplying the base column values.
|
||||
@param[in,out] table the table whose virtual column template to be built */
|
||||
/********************************************************************//**
|
||||
Helper function to push frm mismatch error to error log and
|
||||
if needed to sql-layer. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
innobase_init_vc_templ(
|
||||
dict_table_t* table);
|
||||
|
||||
/** Free the virtual column template
|
||||
@param[in,out] vc_templ virtual column template */
|
||||
void
|
||||
free_vc_templ(
|
||||
innodb_col_templ_t* vc_templ);
|
||||
|
||||
/** Set up base columns for virtual column
|
||||
@param[in] table InnoDB table
|
||||
@param[in] field MySQL field
|
||||
@param[in,out] v_col virtual column */
|
||||
void
|
||||
innodb_base_col_setup(
|
||||
dict_table_t* table,
|
||||
const Field* field,
|
||||
dict_v_col_t* v_col);
|
||||
ib_push_frm_error(
|
||||
/*==============*/
|
||||
THD* thd, /*!< in: MySQL thd */
|
||||
dict_table_t* ib_table, /*!< in: InnoDB table */
|
||||
TABLE* table, /*!< in: MySQL table */
|
||||
ulint n_keys, /*!< in: InnoDB #keys */
|
||||
bool push_warning); /*!< in: print warning ? */
|
||||
|
||||
/*****************************************************************//**
|
||||
Validates the create options. We may build on this function
|
||||
in future. For now, it checks two specifiers:
|
||||
KEY_BLOCK_SIZE and ROW_FORMAT
|
||||
If innodb_strict_mode is not set then this function is a no-op
|
||||
@return NULL if valid, string if not. */
|
||||
UNIV_INTERN
|
||||
const char*
|
||||
create_options_are_invalid(
|
||||
/*=======================*/
|
||||
THD* thd, /*!< in: connection thread. */
|
||||
TABLE* form, /*!< in: information on table
|
||||
columns and indexes */
|
||||
HA_CREATE_INFO* create_info, /*!< in: create info. */
|
||||
bool use_tablespace) /*!< in: srv_file_per_table */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -79,8 +79,7 @@ Ha_innopart_share::Ha_innopart_share(
|
||||
m_tot_parts(),
|
||||
m_index_count(),
|
||||
m_ref_count(),
|
||||
m_table_share(table_share),
|
||||
m_s_templ()
|
||||
m_table_share(table_share)
|
||||
{}
|
||||
|
||||
Ha_innopart_share::~Ha_innopart_share()
|
||||
@@ -94,11 +93,6 @@ Ha_innopart_share::~Ha_innopart_share()
|
||||
ut_free(m_index_mapping);
|
||||
m_index_mapping = NULL;
|
||||
}
|
||||
if (m_s_templ != NULL) {
|
||||
free_vc_templ(m_s_templ);
|
||||
ut_free(m_s_templ);
|
||||
m_s_templ = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/** Fold to lower case if windows or lower_case_table_names == 1.
|
||||
@@ -229,25 +223,27 @@ Ha_innopart_share::set_v_templ(
|
||||
dict_table_t* ib_table,
|
||||
const char* name)
|
||||
{
|
||||
#ifndef DBUG_OFF
|
||||
if (m_table_share->tmp_table == NO_TMP_TABLE) {
|
||||
mysql_mutex_assert_owner(&m_table_share->LOCK_ha_data);
|
||||
}
|
||||
#endif /* DBUG_OFF */
|
||||
ut_ad(mutex_own(&dict_sys->mutex));
|
||||
|
||||
if (ib_table->n_v_cols > 0) {
|
||||
if (!m_s_templ) {
|
||||
m_s_templ = static_cast<innodb_col_templ_t*>(
|
||||
ut_zalloc_nokey( sizeof *m_s_templ));
|
||||
innobase_build_v_templ(table, ib_table,
|
||||
m_s_templ, NULL, false, name);
|
||||
for (ulint i = 0; i < m_tot_parts; i++) {
|
||||
if (m_table_parts[i]->vc_templ == NULL) {
|
||||
m_table_parts[i]->vc_templ
|
||||
= UT_NEW_NOKEY(dict_vcol_templ_t());
|
||||
m_table_parts[i]->vc_templ->vtempl = NULL;
|
||||
} else if (m_table_parts[i]->get_ref_count() == 1) {
|
||||
/* Clean and refresh the template */
|
||||
dict_free_vc_templ(m_table_parts[i]->vc_templ);
|
||||
m_table_parts[i]->vc_templ->vtempl = NULL;
|
||||
}
|
||||
|
||||
for (ulint i = 0; i < m_tot_parts; i++) {
|
||||
m_table_parts[i]->vc_templ = m_s_templ;
|
||||
if (m_table_parts[i]->vc_templ->vtempl == NULL) {
|
||||
innobase_build_v_templ(
|
||||
table, ib_table,
|
||||
m_table_parts[i]->vc_templ,
|
||||
NULL, true, name);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ut_ad(!m_s_templ);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -467,12 +463,6 @@ Ha_innopart_share::close_table_parts()
|
||||
m_index_mapping = NULL;
|
||||
}
|
||||
|
||||
if (m_s_templ != NULL) {
|
||||
free_vc_templ(m_s_templ);
|
||||
ut_free(m_s_templ);
|
||||
m_s_templ = NULL;
|
||||
}
|
||||
|
||||
m_tot_parts = 0;
|
||||
m_index_count = 0;
|
||||
}
|
||||
@@ -1128,10 +1118,13 @@ share_error:
|
||||
m_prebuilt->default_rec = table->s->default_values;
|
||||
ut_ad(m_prebuilt->default_rec);
|
||||
|
||||
DBUG_ASSERT(table != NULL);
|
||||
m_prebuilt->m_mysql_table = table;
|
||||
|
||||
if (ib_table->n_v_cols > 0) {
|
||||
lock_shared_ha_data();
|
||||
mutex_enter(&dict_sys->mutex);
|
||||
m_part_share->set_v_templ(table, ib_table, name);
|
||||
unlock_shared_ha_data();
|
||||
mutex_exit(&dict_sys->mutex);
|
||||
}
|
||||
|
||||
/* Looks like MySQL-3.23 sometimes has primary key number != 0. */
|
||||
@@ -1763,6 +1756,9 @@ ha_innopart::index_init(
|
||||
m_prebuilt->m_no_prefetch = true;
|
||||
}
|
||||
|
||||
/* For scan across partitions, the keys needs to be materialized */
|
||||
m_prebuilt->m_read_virtual_key = true;
|
||||
|
||||
error = change_active_index(part_id, keynr);
|
||||
if (error != 0) {
|
||||
destroy_record_priority_queue();
|
||||
@@ -1787,12 +1783,14 @@ ha_innopart::index_end()
|
||||
|
||||
if (part_id == MY_BIT_NONE) {
|
||||
/* Never initialized any index. */
|
||||
active_index = MAX_KEY;
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
if (m_ordered) {
|
||||
destroy_record_priority_queue();
|
||||
m_prebuilt->m_no_prefetch = false;
|
||||
}
|
||||
m_prebuilt->m_read_virtual_key = false;
|
||||
|
||||
DBUG_RETURN(ha_innobase::index_end());
|
||||
}
|
||||
@@ -2119,7 +2117,7 @@ ha_innopart::index_next_in_part(
|
||||
ut_ad(m_ordered_scan_ongoing
|
||||
|| m_ordered_rec_buffer == NULL
|
||||
|| m_prebuilt->used_in_HANDLER
|
||||
|| m_part_info->num_partitions_used() <= 1);
|
||||
|| m_part_spec.start_part >= m_part_spec.end_part);
|
||||
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
@@ -2182,7 +2180,7 @@ ha_innopart::index_prev_in_part(
|
||||
ut_ad(m_ordered_scan_ongoing
|
||||
|| m_ordered_rec_buffer == NULL
|
||||
|| m_prebuilt->used_in_HANDLER
|
||||
|| m_part_info->num_partitions_used() <= 1);
|
||||
|| m_part_spec.start_part >= m_part_spec.end_part);
|
||||
|
||||
return(error);
|
||||
}
|
||||
@@ -2553,6 +2551,19 @@ ha_innopart::update_part_elem(
|
||||
ib_table->tablespace);
|
||||
}
|
||||
}
|
||||
else {
|
||||
ut_ad(part_elem->tablespace_name == NULL
|
||||
|| 0 == strcmp(part_elem->tablespace_name,
|
||||
"innodb_file_per_table"));
|
||||
if (part_elem->tablespace_name != NULL
|
||||
&& 0 != strcmp(part_elem->tablespace_name,
|
||||
"innodb_file_per_table")) {
|
||||
|
||||
/* Update part_elem tablespace to NULL same as in
|
||||
innodb data dictionary ib_table. */
|
||||
part_elem->tablespace_name = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/** Update create_info.
|
||||
@@ -3537,14 +3548,31 @@ ha_innopart::info_low(
|
||||
checked_sys_tablespace = true;
|
||||
}
|
||||
|
||||
ulint space = static_cast<ulint>(
|
||||
uintmax_t space =
|
||||
fsp_get_available_space_in_free_extents(
|
||||
ib_table->space));
|
||||
if (space == ULINT_UNDEFINED) {
|
||||
ut_ad(0);
|
||||
avail_space = space;
|
||||
ib_table->space);
|
||||
if (space == UINTMAX_MAX) {
|
||||
THD* thd = ha_thd();
|
||||
const char* table_name
|
||||
= ib_table->name.m_name;
|
||||
|
||||
push_warning_printf(
|
||||
thd,
|
||||
Sql_condition::SL_WARNING,
|
||||
ER_CANT_GET_STAT,
|
||||
"InnoDB: Trying to get the"
|
||||
" free space for partition %s"
|
||||
" but its tablespace has been"
|
||||
" discarded or the .ibd file"
|
||||
" is missing. Setting the free"
|
||||
" space of the partition to"
|
||||
" zero.",
|
||||
ut_get_name(
|
||||
m_prebuilt->trx,
|
||||
table_name).c_str());
|
||||
} else {
|
||||
avail_space += space;
|
||||
avail_space +=
|
||||
static_cast<ulint>(space);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -3598,35 +3626,7 @@ ha_innopart::info_low(
|
||||
if ((flag & HA_STATUS_NO_LOCK) == 0
|
||||
&& (flag & HA_STATUS_VARIABLE_EXTRA) != 0
|
||||
&& srv_force_recovery < SRV_FORCE_NO_IBUF_MERGE) {
|
||||
|
||||
if (avail_space == ULINT_UNDEFINED) {
|
||||
THD* thd;
|
||||
char errbuf[MYSYS_STRERROR_SIZE];
|
||||
|
||||
thd = ha_thd();
|
||||
|
||||
std::string err_str;
|
||||
err_str = "InnoDB: Trying to get"
|
||||
" the free space for table ";
|
||||
err_str += ut_get_name(m_prebuilt->trx,
|
||||
ib_table->name.m_name);
|
||||
err_str += " but its tablespace has been"
|
||||
" discarded or the .ibd file is"
|
||||
" missing. Setting the free space to"
|
||||
" zero.";
|
||||
push_warning_printf(
|
||||
thd,
|
||||
Sql_condition::SL_WARNING,
|
||||
ER_CANT_GET_STAT,
|
||||
err_str.c_str(),
|
||||
errno,
|
||||
my_strerror(errbuf, sizeof(errbuf),
|
||||
errno));
|
||||
|
||||
stats.delete_length = 0;
|
||||
} else {
|
||||
stats.delete_length = avail_space * 1024;
|
||||
}
|
||||
stats.delete_length = avail_space * 1024;
|
||||
}
|
||||
|
||||
stats.check_time = 0;
|
||||
@@ -4085,6 +4085,39 @@ ha_innopart::start_stmt(
|
||||
return(error);
|
||||
}
|
||||
|
||||
/** Function to store lock for all partitions in native partitioned table. Also
|
||||
look at ha_innobase::store_lock for more details.
|
||||
@param[in] thd user thread handle
|
||||
@param[in] to pointer to the current element in an array of
|
||||
pointers to lock structs
|
||||
@param[in] lock_type lock type to store in 'lock'; this may also be
|
||||
TL_IGNORE
|
||||
@retval to pointer to the current element in the 'to' array */
|
||||
THR_LOCK_DATA**
|
||||
ha_innopart::store_lock(
|
||||
THD* thd,
|
||||
THR_LOCK_DATA** to,
|
||||
thr_lock_type lock_type)
|
||||
{
|
||||
trx_t* trx = m_prebuilt->trx;
|
||||
const uint sql_command = thd_sql_command(thd);
|
||||
|
||||
ha_innobase::store_lock(thd, to, lock_type);
|
||||
|
||||
if (sql_command == SQLCOM_FLUSH
|
||||
&& lock_type == TL_READ_NO_INSERT) {
|
||||
for (uint i = 1; i < m_tot_parts; i++) {
|
||||
dict_table_t* table = m_part_share->get_table_part(i);
|
||||
|
||||
dberr_t err = row_quiesce_set_state(
|
||||
table, QUIESCE_START, trx);
|
||||
ut_a(err == DB_SUCCESS || err == DB_UNSUPPORTED);
|
||||
}
|
||||
}
|
||||
|
||||
return to;
|
||||
}
|
||||
|
||||
/** Lock/prepare to lock table.
|
||||
As MySQL will execute an external lock for every new table it uses when it
|
||||
starts to process an SQL statement (an exception is when MySQL calls
|
||||
@@ -4102,8 +4135,6 @@ ha_innopart::external_lock(
|
||||
int lock_type)
|
||||
{
|
||||
int error = 0;
|
||||
bool is_quiesce_set = false;
|
||||
bool is_quiesce_start = false;
|
||||
|
||||
if (m_part_info->get_first_used_partition() == MY_BIT_NONE
|
||||
&& !(m_mysql_has_locked
|
||||
@@ -4116,63 +4147,55 @@ ha_innopart::external_lock(
|
||||
ut_ad(m_mysql_has_locked || lock_type != F_UNLCK);
|
||||
|
||||
m_prebuilt->table = m_part_share->get_table_part(0);
|
||||
switch (m_prebuilt->table->quiesce) {
|
||||
case QUIESCE_START:
|
||||
/* Check for FLUSH TABLE t WITH READ LOCK; */
|
||||
if (!srv_read_only_mode
|
||||
&& thd_sql_command(thd) == SQLCOM_FLUSH
|
||||
&& lock_type == F_RDLCK) {
|
||||
|
||||
is_quiesce_set = true;
|
||||
is_quiesce_start = true;
|
||||
}
|
||||
break;
|
||||
|
||||
case QUIESCE_COMPLETE:
|
||||
/* Check for UNLOCK TABLES; implicit or explicit
|
||||
or trx interruption. */
|
||||
if (m_prebuilt->trx->flush_tables > 0
|
||||
&& (lock_type == F_UNLCK
|
||||
|| trx_is_interrupted(m_prebuilt->trx))) {
|
||||
|
||||
is_quiesce_set = true;
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
case QUIESCE_NONE:
|
||||
break;
|
||||
default:
|
||||
ut_ad(0);
|
||||
}
|
||||
|
||||
error = ha_innobase::external_lock(thd, lock_type);
|
||||
|
||||
/* FLUSH FOR EXPORT is done above only for the first partition,
|
||||
so complete it for all the other partitions. */
|
||||
if (is_quiesce_set) {
|
||||
for (uint i = 1; i < m_tot_parts; i++) {
|
||||
dict_table_t* table = m_part_share->get_table_part(i);
|
||||
if (is_quiesce_start) {
|
||||
table->quiesce = QUIESCE_START;
|
||||
row_quiesce_table_start(table, m_prebuilt->trx);
|
||||
for (uint i = 0; i < m_tot_parts; i++) {
|
||||
dict_table_t* table = m_part_share->get_table_part(i);
|
||||
|
||||
/* Use the transaction instance to track UNLOCK
|
||||
TABLES. It can be done via START TRANSACTION;
|
||||
too implicitly. */
|
||||
switch (table->quiesce) {
|
||||
case QUIESCE_START:
|
||||
/* Check for FLUSH TABLE t WITH READ LOCK */
|
||||
if (!srv_read_only_mode
|
||||
&& thd_sql_command(thd) == SQLCOM_FLUSH
|
||||
&& lock_type == F_RDLCK) {
|
||||
|
||||
ut_ad(table->quiesce == QUIESCE_START);
|
||||
|
||||
row_quiesce_table_start(table,
|
||||
m_prebuilt->trx);
|
||||
|
||||
/* Use the transaction instance to track
|
||||
UNLOCK TABLES. It can be done via START
|
||||
TRANSACTION; too implicitly. */
|
||||
|
||||
++m_prebuilt->trx->flush_tables;
|
||||
} else {
|
||||
}
|
||||
break;
|
||||
|
||||
case QUIESCE_COMPLETE:
|
||||
/* Check for UNLOCK TABLES; implicit or explicit
|
||||
or trx interruption. */
|
||||
if (m_prebuilt->trx->flush_tables > 0
|
||||
&& (lock_type == F_UNLCK
|
||||
|| trx_is_interrupted(m_prebuilt->trx))) {
|
||||
|
||||
ut_ad(table->quiesce == QUIESCE_COMPLETE);
|
||||
row_quiesce_table_complete(table,
|
||||
m_prebuilt->trx);
|
||||
m_prebuilt->trx);
|
||||
|
||||
ut_a(m_prebuilt->trx->flush_tables > 0);
|
||||
--m_prebuilt->trx->flush_tables;
|
||||
}
|
||||
break;
|
||||
|
||||
case QUIESCE_NONE:
|
||||
break;
|
||||
|
||||
default:
|
||||
ut_ad(0);
|
||||
}
|
||||
m_prebuilt->table = m_part_share->get_table_part(0);
|
||||
}
|
||||
|
||||
ut_ad(!m_auto_increment_lock);
|
||||
ut_ad(!m_auto_increment_safe_stmt_log_lock);
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2014, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -21,9 +21,7 @@ this program; if not, write to the Free Software Foundation, Inc.,
|
||||
#ifndef ha_innopart_h
|
||||
#define ha_innopart_h
|
||||
|
||||
/* JAN: TODO: MySQL 5.7 */
|
||||
//#include "partitioning/partition_handler.h"
|
||||
#include "ha_partition.h"
|
||||
#include "partitioning/partition_handler.h"
|
||||
|
||||
/* Forward declarations */
|
||||
class Altered_partitions;
|
||||
@@ -62,8 +60,6 @@ private:
|
||||
/** Pointer back to owning TABLE_SHARE. */
|
||||
TABLE_SHARE* m_table_share;
|
||||
|
||||
/** Virtual column template */
|
||||
innodb_col_templ_t* m_s_templ;
|
||||
public:
|
||||
Ha_innopart_share(
|
||||
TABLE_SHARE* table_share);
|
||||
@@ -188,7 +184,7 @@ truncate_partition.
|
||||
InnoDB specific functions related to partitioning is implemented here. */
|
||||
class ha_innopart:
|
||||
public ha_innobase,
|
||||
// public Partition_helper,
|
||||
public Partition_helper,
|
||||
public Partition_handler
|
||||
{
|
||||
public:
|
||||
@@ -509,9 +505,7 @@ public:
|
||||
ft_init_ext_with_hints(
|
||||
uint inx,
|
||||
String* key,
|
||||
/* JAN: TODO: MySQL 5. /
|
||||
Ft_hints* hints)*/
|
||||
void* hints)
|
||||
Ft_hints* hints)
|
||||
{
|
||||
ut_ad(0);
|
||||
return(NULL);
|
||||
@@ -613,7 +607,7 @@ public:
|
||||
|
||||
uint
|
||||
alter_flags(
|
||||
uint flags __attribute__((unused))) const
|
||||
uint flags MY_ATTRIBUTE((unused))) const
|
||||
{
|
||||
return(HA_PARTITION_FUNCTION_SUPPORTED
|
||||
| HA_FAST_CHANGE_PARTITION);
|
||||
@@ -1124,6 +1118,12 @@ private:
|
||||
THD* thd,
|
||||
int lock_type);
|
||||
|
||||
THR_LOCK_DATA**
|
||||
store_lock(
|
||||
THD* thd,
|
||||
THR_LOCK_DATA** to,
|
||||
thr_lock_type lock_type);
|
||||
|
||||
int
|
||||
write_row(
|
||||
uchar* record)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -566,7 +566,7 @@ ibuf_init_at_db_start(void)
|
||||
|
||||
ibuf->index = dict_mem_index_create(
|
||||
"innodb_change_buffer", "CLUST_IND",
|
||||
IBUF_SPACE_ID, DICT_CLUSTERED | DICT_IBUF, 1);
|
||||
IBUF_SPACE_ID, DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF, 1);
|
||||
ibuf->index->id = DICT_IBUF_ID_MIN + IBUF_SPACE_ID;
|
||||
ibuf->index->table = dict_mem_table_create(
|
||||
"innodb_change_buffer", IBUF_SPACE_ID, 1, 0, 0, 0);
|
||||
@@ -828,6 +828,7 @@ ibuf_bitmap_get_map_page_func(
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
buf_block_dbg_add_level(block, SYNC_IBUF_BITMAP);
|
||||
|
||||
return(buf_block_get_frame(block));
|
||||
@@ -2245,11 +2246,11 @@ ibuf_free_excess_pages(void)
|
||||
}
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
# define ibuf_get_merge_page_nos(contract,rec,mtr,ids,vers,pages,n_stored) \
|
||||
ibuf_get_merge_page_nos_func(contract,rec,mtr,ids,vers,pages,n_stored)
|
||||
# define ibuf_get_merge_page_nos(contract,rec,mtr,ids,pages,n_stored) \
|
||||
ibuf_get_merge_page_nos_func(contract,rec,mtr,ids,pages,n_stored)
|
||||
#else /* UNIV_DEBUG */
|
||||
# define ibuf_get_merge_page_nos(contract,rec,mtr,ids,vers,pages,n_stored) \
|
||||
ibuf_get_merge_page_nos_func(contract,rec,ids,vers, pages,n_stored)
|
||||
# define ibuf_get_merge_page_nos(contract,rec,mtr,ids,pages,n_stored) \
|
||||
ibuf_get_merge_page_nos_func(contract,rec,ids,pages,n_stored)
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/*********************************************************************//**
|
||||
@@ -2272,9 +2273,6 @@ ibuf_get_merge_page_nos_func(
|
||||
ulint* page_nos,/*!< in/out: buffer for at least
|
||||
IBUF_MAX_N_PAGES_MERGED many page numbers;
|
||||
the page numbers are in an ascending order */
|
||||
ib_uint64_t* space_versions,/*!< in/out: tablespace version
|
||||
timestamps; used to prevent reading in old
|
||||
pages after DISCARD + IMPORT tablespace */
|
||||
ulint* n_stored)/*!< out: number of page numbers stored to
|
||||
page_nos in this function */
|
||||
{
|
||||
@@ -2403,8 +2401,6 @@ ibuf_get_merge_page_nos_func(
|
||||
/ IBUF_MERGE_THRESHOLD)) {
|
||||
|
||||
space_ids[*n_stored] = prev_space_id;
|
||||
space_versions[*n_stored]
|
||||
= fil_space_get_version(prev_space_id);
|
||||
page_nos[*n_stored] = prev_page_no;
|
||||
|
||||
(*n_stored)++;
|
||||
@@ -2482,13 +2478,11 @@ ibuf_get_merge_pages(
|
||||
ulint limit, /*!< in: max page numbers to read */
|
||||
ulint* pages, /*!< out: pages read */
|
||||
ulint* spaces, /*!< out: spaces read */
|
||||
ib_uint64_t* versions,/*!< out: space versions read */
|
||||
ulint* n_pages,/*!< out: number of pages read */
|
||||
mtr_t* mtr) /*!< in: mini transaction */
|
||||
{
|
||||
const rec_t* rec;
|
||||
ulint volume = 0;
|
||||
ib_uint64_t version = fil_space_get_version(space);
|
||||
|
||||
ut_a(space != ULINT_UNDEFINED);
|
||||
|
||||
@@ -2503,7 +2497,6 @@ ibuf_get_merge_pages(
|
||||
if (*n_pages == 0 || pages[*n_pages - 1] != page_no) {
|
||||
spaces[*n_pages] = space;
|
||||
pages[*n_pages] = page_no;
|
||||
versions[*n_pages] = version;
|
||||
++*n_pages;
|
||||
}
|
||||
|
||||
@@ -2534,7 +2527,6 @@ ibuf_merge_pages(
|
||||
ulint sum_sizes;
|
||||
ulint page_nos[IBUF_MAX_N_PAGES_MERGED];
|
||||
ulint space_ids[IBUF_MAX_N_PAGES_MERGED];
|
||||
ib_uint64_t space_versions[IBUF_MAX_N_PAGES_MERGED];
|
||||
|
||||
*n_pages = 0;
|
||||
|
||||
@@ -2570,7 +2562,7 @@ ibuf_merge_pages(
|
||||
sum_sizes = ibuf_get_merge_page_nos(TRUE,
|
||||
btr_pcur_get_rec(&pcur), &mtr,
|
||||
space_ids,
|
||||
page_nos, space_versions, n_pages);
|
||||
page_nos, n_pages);
|
||||
#if 0 /* defined UNIV_IBUF_DEBUG */
|
||||
fprintf(stderr, "Ibuf contract sync %lu pages %lu volume %lu\n",
|
||||
sync, *n_pages, sum_sizes);
|
||||
@@ -2579,7 +2571,7 @@ ibuf_merge_pages(
|
||||
btr_pcur_close(&pcur);
|
||||
|
||||
buf_read_ibuf_merge_pages(
|
||||
sync, space_ids, space_versions, page_nos, *n_pages);
|
||||
sync, space_ids, page_nos, *n_pages);
|
||||
|
||||
return(sum_sizes + 1);
|
||||
}
|
||||
@@ -2588,7 +2580,6 @@ ibuf_merge_pages(
|
||||
Contracts insert buffer trees by reading pages referring to space_id
|
||||
to the buffer pool.
|
||||
@returns number of pages merged.*/
|
||||
UNIV_INTERN
|
||||
ulint
|
||||
ibuf_merge_space(
|
||||
/*=============*/
|
||||
@@ -2619,7 +2610,6 @@ ibuf_merge_space(
|
||||
ulint sum_sizes = 0;
|
||||
ulint pages[IBUF_MAX_N_PAGES_MERGED];
|
||||
ulint spaces[IBUF_MAX_N_PAGES_MERGED];
|
||||
ib_uint64_t versions[IBUF_MAX_N_PAGES_MERGED];
|
||||
|
||||
if (page_is_empty(btr_pcur_get_page(&pcur))) {
|
||||
/* If a B-tree page is empty, it must be the root page
|
||||
@@ -2634,11 +2624,10 @@ ibuf_merge_space(
|
||||
} else {
|
||||
|
||||
sum_sizes = ibuf_get_merge_pages(
|
||||
&pcur, space, IBUF_MAX_N_PAGES_MERGED,
|
||||
&pages[0], &spaces[0], &versions[0], &n_pages,
|
||||
&mtr);
|
||||
&pcur, space, IBUF_MAX_N_PAGES_MERGED,
|
||||
&pages[0], &spaces[0], &n_pages,
|
||||
&mtr);
|
||||
ib::info() << "Size of pages merged " << sum_sizes;
|
||||
|
||||
}
|
||||
|
||||
ibuf_mtr_commit(&mtr);
|
||||
@@ -2646,18 +2635,16 @@ ibuf_merge_space(
|
||||
btr_pcur_close(&pcur);
|
||||
|
||||
if (n_pages > 0) {
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
ut_ad(n_pages <= UT_ARR_SIZE(pages));
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
for (ulint i = 0; i < n_pages; ++i) {
|
||||
ut_ad(spaces[i] == space);
|
||||
ut_ad(i == 0 || versions[i] == versions[i - 1]);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
buf_read_ibuf_merge_pages(
|
||||
true, spaces, versions, pages, n_pages);
|
||||
true, spaces, pages, n_pages);
|
||||
}
|
||||
|
||||
return(n_pages);
|
||||
@@ -2667,17 +2654,14 @@ ibuf_merge_space(
|
||||
@param[out] n_pages number of pages merged
|
||||
@param[in] sync whether the caller waits for
|
||||
the issued reads to complete
|
||||
@param[in] space_id tablespace for which to merge, or
|
||||
ULINT_UNDEFINED for all tablespaces
|
||||
@return a lower limit for the combined size in bytes of entries which
|
||||
will be merged from ibuf trees to the pages read, 0 if ibuf is
|
||||
empty */
|
||||
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
ulint
|
||||
ibuf_merge(
|
||||
ulint* n_pages,
|
||||
bool sync,
|
||||
ulint space_id)
|
||||
bool sync)
|
||||
{
|
||||
*n_pages = 0;
|
||||
|
||||
@@ -2692,10 +2676,8 @@ ibuf_merge(
|
||||
} else if (ibuf_debug) {
|
||||
return(0);
|
||||
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
|
||||
} else if (space_id == ULINT_UNDEFINED) {
|
||||
return(ibuf_merge_pages(n_pages, sync));
|
||||
} else {
|
||||
return(ibuf_merge_space(space_id));
|
||||
return(ibuf_merge_pages(n_pages, sync));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2718,15 +2700,12 @@ ibuf_contract(
|
||||
@param[in] full If true, do a full contraction based
|
||||
on PCT_IO(100). If false, the size of contract batch is determined
|
||||
based on the current size of the change buffer.
|
||||
@param[in] space_id tablespace for which to contract, or
|
||||
ULINT_UNDEFINED to contract for all tablespaces
|
||||
@return a lower limit for the combined size in bytes of entries which
|
||||
will be merged from ibuf trees to the pages read, 0 if ibuf is
|
||||
empty */
|
||||
ulint
|
||||
ibuf_merge_in_background(
|
||||
bool full,
|
||||
ulint space_id)
|
||||
bool full)
|
||||
{
|
||||
ulint sum_bytes = 0;
|
||||
ulint sum_pages = 0;
|
||||
@@ -2734,7 +2713,7 @@ ibuf_merge_in_background(
|
||||
ulint n_pages;
|
||||
|
||||
#if defined UNIV_DEBUG || defined UNIV_IBUF_DEBUG
|
||||
if (srv_ibuf_disable_background_merge && space_id == ULINT_UNDEFINED) {
|
||||
if (srv_ibuf_disable_background_merge) {
|
||||
return(0);
|
||||
}
|
||||
#endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
|
||||
@@ -2771,7 +2750,7 @@ ibuf_merge_in_background(
|
||||
while (sum_pages < n_pages) {
|
||||
ulint n_bytes;
|
||||
|
||||
n_bytes = ibuf_merge(&n_pag2, false, space_id);
|
||||
n_bytes = ibuf_merge(&n_pag2, false);
|
||||
|
||||
if (n_bytes == 0) {
|
||||
return(sum_bytes);
|
||||
@@ -3383,7 +3362,7 @@ or clustered
|
||||
@param[in] page_size page size
|
||||
@param[in,out] thr query thread
|
||||
@return DB_SUCCESS, DB_STRONG_FAIL or other error */
|
||||
static MY_ATTRIBUTE((nonnull, warn_unused_result))
|
||||
static MY_ATTRIBUTE((warn_unused_result))
|
||||
dberr_t
|
||||
ibuf_insert_low(
|
||||
ulint mode,
|
||||
@@ -3414,7 +3393,6 @@ ibuf_insert_low(
|
||||
ibool do_merge;
|
||||
ulint space_ids[IBUF_MAX_N_PAGES_MERGED];
|
||||
ulint page_nos[IBUF_MAX_N_PAGES_MERGED];
|
||||
ib_uint64_t space_versions[IBUF_MAX_N_PAGES_MERGED];
|
||||
ulint n_stored;
|
||||
mtr_t mtr;
|
||||
mtr_t bitmap_mtr;
|
||||
@@ -3576,7 +3554,7 @@ fail_exit:
|
||||
ibuf_get_merge_page_nos(FALSE,
|
||||
btr_pcur_get_rec(&pcur), &mtr,
|
||||
space_ids,
|
||||
page_nos, space_versions, &n_stored);
|
||||
page_nos, &n_stored);
|
||||
|
||||
goto fail_exit;
|
||||
}
|
||||
@@ -3711,7 +3689,7 @@ func_exit:
|
||||
#ifdef UNIV_IBUF_DEBUG
|
||||
ut_a(n_stored <= IBUF_MAX_N_PAGES_MERGED);
|
||||
#endif
|
||||
buf_read_ibuf_merge_pages(false, space_ids, space_versions,
|
||||
buf_read_ibuf_merge_pages(false, space_ids,
|
||||
page_nos, n_stored);
|
||||
}
|
||||
|
||||
@@ -3973,7 +3951,7 @@ ibuf_insert_to_index_page(
|
||||
ut_ad(!dict_index_is_online_ddl(index));// this is an ibuf_dummy index
|
||||
ut_ad(ibuf_inside(mtr));
|
||||
ut_ad(dtuple_check_typed(entry));
|
||||
ut_ad(!buf_block_align(page)->index);
|
||||
ut_ad(!block->index);
|
||||
ut_ad(mtr->is_named_space(block->page.id.space()));
|
||||
|
||||
if (UNIV_UNLIKELY(dict_table_is_comp(index->table)
|
||||
@@ -4813,7 +4791,6 @@ reset_bit:
|
||||
os_atomic_increment_ulint(&ibuf->n_merges, 1);
|
||||
ibuf_add_ops(ibuf->n_merged_ops, mops);
|
||||
ibuf_add_ops(ibuf->n_discarded_ops, dops);
|
||||
|
||||
if (space != NULL) {
|
||||
fil_space_release(space);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2011, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2012, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -1030,4 +1030,11 @@ ib_ut_strerr(
|
||||
/*=========*/
|
||||
ib_err_t num); /*!< in: error number */
|
||||
|
||||
/** Check the table whether it contains virtual columns.
|
||||
@param[in] crsr InnoDB Cursor
|
||||
@return true if table contains virtual column else false. */
|
||||
ib_bool_t
|
||||
ib_is_virtual_table(
|
||||
ib_crsr_t crsr);
|
||||
|
||||
#endif /* api0api_h */
|
||||
|
||||
@@ -179,7 +179,7 @@ dberr_t
|
||||
btr_root_adjust_on_import(
|
||||
/*======================*/
|
||||
const dict_index_t* index) /*!< in: index tree */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/**************************************************************//**
|
||||
Gets the height of the B-tree (the level of the root, when the leaf
|
||||
@@ -191,7 +191,7 @@ btr_height_get(
|
||||
/*===========*/
|
||||
dict_index_t* index, /*!< in: index tree */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Gets a buffer page and declares its latching order level.
|
||||
@param[in] page_id page id
|
||||
@@ -254,7 +254,7 @@ index_id_t
|
||||
btr_page_get_index_id(
|
||||
/*==================*/
|
||||
const page_t* page) /*!< in: index page */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/********************************************************//**
|
||||
Gets the node level field in an index page.
|
||||
@@ -264,7 +264,7 @@ ulint
|
||||
btr_page_get_level_low(
|
||||
/*===================*/
|
||||
const page_t* page) /*!< in: index page */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#define btr_page_get_level(page, mtr) btr_page_get_level_low(page)
|
||||
/********************************************************//**
|
||||
Gets the next index page number.
|
||||
@@ -275,7 +275,7 @@ btr_page_get_next(
|
||||
/*==============*/
|
||||
const page_t* page, /*!< in: index page */
|
||||
mtr_t* mtr) /*!< in: mini-transaction handle */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/********************************************************//**
|
||||
Gets the previous index page number.
|
||||
@return prev page number */
|
||||
@@ -285,7 +285,7 @@ btr_page_get_prev(
|
||||
/*==============*/
|
||||
const page_t* page, /*!< in: index page */
|
||||
mtr_t* mtr) /*!< in: mini-transaction handle */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/**************************************************************//**
|
||||
Releases the latch on a leaf page and bufferunfixes it. */
|
||||
UNIV_INLINE
|
||||
@@ -310,7 +310,7 @@ btr_node_ptr_get_child_page_no(
|
||||
/*===========================*/
|
||||
const rec_t* rec, /*!< in: node pointer record */
|
||||
const ulint* offsets)/*!< in: array returned by rec_get_offsets() */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Create the root node for a new index tree.
|
||||
@param[in] type type of the index
|
||||
@@ -374,7 +374,7 @@ btr_root_raise_and_insert(
|
||||
const dtuple_t* tuple, /*!< in: tuple to insert */
|
||||
ulint n_ext, /*!< in: number of externally stored columns */
|
||||
mtr_t* mtr) /*!< in: mtr */
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*************************************************************//**
|
||||
Reorganizes an index page.
|
||||
|
||||
@@ -399,7 +399,7 @@ btr_page_reorganize_low(
|
||||
page_cur_t* cursor, /*!< in/out: page cursor */
|
||||
dict_index_t* index, /*!< in: the index tree of the page */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*************************************************************//**
|
||||
Reorganizes an index page.
|
||||
|
||||
@@ -429,7 +429,7 @@ btr_page_get_split_rec_to_left(
|
||||
rec_t** split_rec)/*!< out: if split recommended,
|
||||
the first record on upper half page,
|
||||
or NULL if tuple should be first */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*************************************************************//**
|
||||
Decides if the page should be split at the convergence point of
|
||||
inserts converging to right.
|
||||
@@ -441,7 +441,7 @@ btr_page_get_split_rec_to_right(
|
||||
rec_t** split_rec)/*!< out: if split recommended,
|
||||
the first record on upper half page,
|
||||
or NULL if tuple should be first */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/*************************************************************//**
|
||||
Splits an index page to halves and inserts the tuple. It is assumed
|
||||
@@ -465,7 +465,7 @@ btr_page_split_and_insert(
|
||||
const dtuple_t* tuple, /*!< in: tuple to insert */
|
||||
ulint n_ext, /*!< in: number of externally stored columns */
|
||||
mtr_t* mtr) /*!< in: mtr */
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*******************************************************//**
|
||||
Inserts a data tuple to a tree on a non-leaf level. It is assumed
|
||||
that mtr holds an x-latch on the tree. */
|
||||
@@ -478,8 +478,7 @@ btr_insert_on_non_leaf_level_func(
|
||||
dtuple_t* tuple, /*!< in: the record to be inserted */
|
||||
const char* file, /*!< in: file name */
|
||||
ulint line, /*!< in: line where called */
|
||||
mtr_t* mtr) /*!< in: mtr */
|
||||
MY_ATTRIBUTE((nonnull(4,5)));
|
||||
mtr_t* mtr); /*!< in: mtr */
|
||||
# define btr_insert_on_non_leaf_level(f,i,l,t,m) \
|
||||
btr_insert_on_non_leaf_level_func(f,i,l,t,__FILE__,__LINE__,m)
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
@@ -511,7 +510,7 @@ btr_check_node_ptr(
|
||||
dict_index_t* index, /*!< in: index tree */
|
||||
buf_block_t* block, /*!< in: index page */
|
||||
mtr_t* mtr) /*!< in: mtr */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#endif /* UNIV_DEBUG */
|
||||
/*************************************************************//**
|
||||
Tries to merge the page first to the left immediate brother if such a
|
||||
@@ -543,8 +542,7 @@ btr_discard_page(
|
||||
/*=============*/
|
||||
btr_cur_t* cursor, /*!< in: cursor on the page to discard: not on
|
||||
the root page */
|
||||
mtr_t* mtr) /*!< in: mtr */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
mtr_t* mtr); /*!< in: mtr */
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
/****************************************************************//**
|
||||
Parses the redo log record for setting an index record as the predefined
|
||||
@@ -571,7 +569,7 @@ btr_parse_page_reorganize(
|
||||
bool compressed,/*!< in: true if compressed page */
|
||||
buf_block_t* block, /*!< in: page to be reorganized, or NULL */
|
||||
mtr_t* mtr) /*!< in: mtr or NULL */
|
||||
MY_ATTRIBUTE((nonnull(1,2,3), warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/**************************************************************//**
|
||||
Gets the number of pages in a B-tree.
|
||||
@@ -583,7 +581,7 @@ btr_get_size(
|
||||
ulint flag, /*!< in: BTR_N_LEAF_PAGES or BTR_TOTAL_SIZE */
|
||||
mtr_t* mtr) /*!< in/out: mini-transaction where index
|
||||
is s-latched */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/**************************************************************//**
|
||||
Gets the number of reserved and used pages in a B-tree.
|
||||
@return number of pages reserved, or ULINT_UNDEFINED if the index
|
||||
@@ -620,7 +618,7 @@ btr_page_alloc(
|
||||
mtr_t* init_mtr) /*!< in/out: mini-transaction
|
||||
for x-latching and initializing
|
||||
the page */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/**************************************************************//**
|
||||
Frees a file page used in an index tree. NOTE: cannot free field external
|
||||
storage pages because the page must contain info on its level. */
|
||||
@@ -653,7 +651,7 @@ btr_page_free_low(
|
||||
ulint level, /*!< in: page level (ULINT_UNDEFINED=BLOB) */
|
||||
bool blob, /*!< in: blob page */
|
||||
mtr_t* mtr) /*!< in: mtr */
|
||||
__attribute__((nonnull));
|
||||
MY_ATTRIBUTE((nonnull(1,2)));
|
||||
/**************************************************************//**
|
||||
Gets the root node of a tree and x- or s-latches it.
|
||||
@return root page, x- or s-latched */
|
||||
@@ -695,7 +693,6 @@ btr_page_reorganize_block(
|
||||
#ifdef UNIV_BTR_PRINT
|
||||
/*************************************************************//**
|
||||
Prints size info of a B-tree. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
btr_print_size(
|
||||
/*===========*/
|
||||
@@ -703,7 +700,6 @@ btr_print_size(
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
/**************************************************************//**
|
||||
Prints directories and other info of all nodes in the index. */
|
||||
UNIV_INTERN
|
||||
void
|
||||
btr_print_index(
|
||||
/*============*/
|
||||
@@ -724,18 +720,17 @@ btr_index_rec_validate(
|
||||
ibool dump_on_error) /*!< in: TRUE if the function
|
||||
should print hex dump of record
|
||||
and page on error */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/**************************************************************//**
|
||||
Checks the consistency of an index tree.
|
||||
@return DB_SUCCESS if ok, error code if not */
|
||||
UNIV_INTERN
|
||||
dberr_t
|
||||
btr_validate_index(
|
||||
/*===============*/
|
||||
dict_index_t* index, /*!< in: index */
|
||||
const trx_t* trx, /*!< in: transaction or 0 */
|
||||
bool lockout)/*!< in: true if X-latch index is intended */
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/*************************************************************//**
|
||||
Removes a page from the level list of pages. */
|
||||
|
||||
@@ -260,7 +260,7 @@ private:
|
||||
FlushObserver* m_flush_observer;
|
||||
|
||||
/** Operation result DB_SUCCESS or error code */
|
||||
dberr_t m_err;
|
||||
dberr_t m_err;
|
||||
};
|
||||
|
||||
typedef std::vector<PageBulk*, ut_allocator<PageBulk*> >
|
||||
|
||||
@@ -328,6 +328,13 @@ extern rw_lock_t** btr_search_latches;
|
||||
/** The adaptive hash index */
|
||||
extern btr_search_sys_t* btr_search_sys;
|
||||
|
||||
#ifdef UNIV_SEARCH_PERF_STAT
|
||||
/** Number of successful adaptive hash index lookups */
|
||||
extern ulint btr_search_n_succ;
|
||||
/** Number of failed adaptive hash index lookups */
|
||||
extern ulint btr_search_n_hash_fail;
|
||||
#endif /* UNIV_SEARCH_PERF_STAT */
|
||||
|
||||
/** After change in n_fields or n_bytes in info, this many rounds are waited
|
||||
before starting the hash analysis again: this is to save CPU time when there
|
||||
is no hope in building a hash index. */
|
||||
|
||||
@@ -200,7 +200,8 @@ btr_get_search_latch(const dict_index_t* index)
|
||||
{
|
||||
ut_ad(index != NULL);
|
||||
|
||||
ulint ifold = ut_fold_ulint_pair(index->id, index->space);
|
||||
ulint ifold = ut_fold_ulint_pair(static_cast<ulint>(index->id),
|
||||
static_cast<ulint>(index->space));
|
||||
|
||||
return(btr_search_latches[ifold % btr_ahi_parts]);
|
||||
}
|
||||
@@ -215,7 +216,8 @@ btr_get_search_table(const dict_index_t* index)
|
||||
{
|
||||
ut_ad(index != NULL);
|
||||
|
||||
ulint ifold = ut_fold_ulint_pair(index->id, index->space);
|
||||
ulint ifold = ut_fold_ulint_pair(static_cast<ulint>(index->id),
|
||||
static_cast<ulint>(index->space));
|
||||
|
||||
return(btr_search_sys->hash_tables[ifold % btr_ahi_parts]);
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
|
||||
@@ -109,6 +109,9 @@ extern buf_block_t* back_block2; /*!< second block, for page reorganize */
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
#endif /* !UNIV_INNOCHECKSUM */
|
||||
|
||||
/** Magic value to use instead of checksums when they are disabled */
|
||||
#define BUF_NO_CHECKSUM_MAGIC 0xDEADBEEFUL
|
||||
|
||||
#ifndef UNIV_INNOCHECKSUM
|
||||
/** @brief States of a control block
|
||||
@see buf_page_t
|
||||
@@ -679,7 +682,7 @@ ulint
|
||||
buf_page_get_freed_page_clock(
|
||||
/*==========================*/
|
||||
const buf_page_t* bpage) /*!< in: block */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/********************************************************************//**
|
||||
Reads the freed_page_clock of a buffer block.
|
||||
@return freed_page_clock */
|
||||
@@ -688,7 +691,7 @@ ulint
|
||||
buf_block_get_freed_page_clock(
|
||||
/*===========================*/
|
||||
const buf_block_t* block) /*!< in: block */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/********************************************************************//**
|
||||
Tells if a block is still close enough to the MRU end of the LRU list
|
||||
@@ -834,8 +837,7 @@ buf_page_is_corrupted(
|
||||
bool is_log_enabled,
|
||||
FILE* log_file
|
||||
#endif /* UNIV_INNOCHECKSUM */
|
||||
) __attribute__((warn_unused_result));
|
||||
|
||||
) MY_ATTRIBUTE((warn_unused_result));
|
||||
#ifndef UNIV_INNOCHECKSUM
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/**********************************************************************//**
|
||||
@@ -857,7 +859,7 @@ ulint
|
||||
buf_block_get_lock_hash_val(
|
||||
/*========================*/
|
||||
const buf_block_t* block) /*!< in: block */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#ifdef UNIV_DEBUG
|
||||
/*********************************************************************//**
|
||||
Finds a block in the buffer pool that points to a
|
||||
@@ -1039,7 +1041,7 @@ enum buf_page_state
|
||||
buf_block_get_state(
|
||||
/*================*/
|
||||
const buf_block_t* block) /*!< in: pointer to the control block */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*********************************************************************//**
|
||||
Sets the state of a block. */
|
||||
UNIV_INLINE
|
||||
@@ -1064,7 +1066,7 @@ ibool
|
||||
buf_page_in_file(
|
||||
/*=============*/
|
||||
const buf_page_t* bpage) /*!< in: pointer to control block */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/*********************************************************************//**
|
||||
Determines if a block should be on unzip_LRU list.
|
||||
@@ -1074,7 +1076,7 @@ ibool
|
||||
buf_page_belongs_to_unzip_LRU(
|
||||
/*==========================*/
|
||||
const buf_page_t* bpage) /*!< in: pointer to control block */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/*********************************************************************//**
|
||||
Gets the mutex of a block.
|
||||
@@ -1084,7 +1086,7 @@ BPageMutex*
|
||||
buf_page_get_mutex(
|
||||
/*===============*/
|
||||
const buf_page_t* bpage) /*!< in: pointer to control block */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/*********************************************************************//**
|
||||
Get the flush type of a page.
|
||||
@@ -1094,7 +1096,7 @@ buf_flush_t
|
||||
buf_page_get_flush_type(
|
||||
/*====================*/
|
||||
const buf_page_t* bpage) /*!< in: buffer page */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*********************************************************************//**
|
||||
Set the flush type of a page. */
|
||||
UNIV_INLINE
|
||||
@@ -1121,7 +1123,7 @@ enum buf_io_fix
|
||||
buf_page_get_io_fix(
|
||||
/*================*/
|
||||
const buf_page_t* bpage) /*!< in: pointer to the control block */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*********************************************************************//**
|
||||
Gets the io_fix state of a block.
|
||||
@return io_fix state */
|
||||
@@ -1130,7 +1132,7 @@ enum buf_io_fix
|
||||
buf_block_get_io_fix(
|
||||
/*================*/
|
||||
const buf_block_t* block) /*!< in: pointer to the control block */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*********************************************************************//**
|
||||
Sets the io_fix state of a block. */
|
||||
UNIV_INLINE
|
||||
@@ -1176,7 +1178,7 @@ ibool
|
||||
buf_page_can_relocate(
|
||||
/*==================*/
|
||||
const buf_page_t* bpage) /*!< control block being relocated */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/*********************************************************************//**
|
||||
Determine if a block has been flagged old.
|
||||
@@ -1186,7 +1188,7 @@ ibool
|
||||
buf_page_is_old(
|
||||
/*============*/
|
||||
const buf_page_t* bpage) /*!< in: control block */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*********************************************************************//**
|
||||
Flag a block old. */
|
||||
UNIV_INLINE
|
||||
@@ -1203,7 +1205,7 @@ unsigned
|
||||
buf_page_is_accessed(
|
||||
/*=================*/
|
||||
const buf_page_t* bpage) /*!< in: control block */
|
||||
MY_ATTRIBUTE((nonnull, pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*********************************************************************//**
|
||||
Flag a block accessed. */
|
||||
UNIV_INLINE
|
||||
@@ -1222,7 +1224,7 @@ buf_block_t*
|
||||
buf_page_get_block(
|
||||
/*===============*/
|
||||
buf_page_t* bpage) /*!< in: control block, or NULL */
|
||||
MY_ATTRIBUTE((pure));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
#ifdef UNIV_DEBUG
|
||||
/*********************************************************************//**
|
||||
@@ -1233,7 +1235,7 @@ buf_frame_t*
|
||||
buf_block_get_frame(
|
||||
/*================*/
|
||||
const buf_block_t* block) /*!< in: pointer to the control block */
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#else /* UNIV_DEBUG */
|
||||
# define buf_block_get_frame(block) (block)->frame
|
||||
#endif /* UNIV_DEBUG */
|
||||
@@ -1244,13 +1246,14 @@ if applicable. */
|
||||
#define buf_block_get_page_zip(block) \
|
||||
((block)->page.zip.data ? &(block)->page.zip : NULL)
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/*******************************************************************//**
|
||||
Gets the block to whose frame the pointer is pointing to.
|
||||
|
||||
/** Get a buffer block from an adaptive hash index pointer.
|
||||
This function does not return if the block is not identified.
|
||||
@param[in] ptr pointer to within a page frame
|
||||
@return pointer to block, never NULL */
|
||||
buf_block_t*
|
||||
buf_block_align(
|
||||
/*============*/
|
||||
const byte* ptr); /*!< in: pointer to a frame */
|
||||
buf_block_from_ahi(const byte* ptr);
|
||||
|
||||
/********************************************************************//**
|
||||
Find out if a pointer belongs to a buf_block_t. It can be a pointer to
|
||||
the buf_block_t itself or a member of it
|
||||
@@ -1271,18 +1274,6 @@ buf_pointer_is_block_field(
|
||||
#define buf_pool_is_block_lock(l) \
|
||||
buf_pointer_is_block_field((const void*)(l))
|
||||
|
||||
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
|
||||
/*********************************************************************//**
|
||||
Gets the compressed page descriptor corresponding to an uncompressed page
|
||||
if applicable.
|
||||
@return compressed page descriptor, or NULL */
|
||||
UNIV_INLINE
|
||||
const page_zip_des_t*
|
||||
buf_frame_get_page_zip(
|
||||
/*===================*/
|
||||
const byte* ptr); /*!< in: pointer to the page */
|
||||
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
|
||||
|
||||
/** Inits a page for read to the buffer buf_pool. If the page is
|
||||
(1) already in buf_pool, or
|
||||
(2) if we specify to read only ibuf pages and the page is not an ibuf page, or
|
||||
@@ -1322,7 +1313,7 @@ ulint
|
||||
buf_pool_index(
|
||||
/*===========*/
|
||||
const buf_pool_t* buf_pool) /*!< in: buffer pool */
|
||||
MY_ATTRIBUTE((nonnull, const));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/******************************************************************//**
|
||||
Returns the buffer pool instance given a page instance
|
||||
@return buf_pool */
|
||||
@@ -1465,7 +1456,7 @@ buf_page_t*
|
||||
buf_pool_watch_set(
|
||||
const page_id_t& page_id,
|
||||
rw_lock_t** hash_lock)
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Stop watching if the page has been read in.
|
||||
buf_pool_watch_set(space,offset) must have returned NULL before.
|
||||
@@ -1482,7 +1473,7 @@ has returned NULL and before invoking buf_pool_watch_unset(space,offset).
|
||||
ibool
|
||||
buf_pool_watch_occurred(
|
||||
const page_id_t& page_id)
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/********************************************************************//**
|
||||
Get total buffer pool statistics. */
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2008, Google Inc.
|
||||
Copyright (c) 2014, 2015, MariaDB Corporation.
|
||||
Copyright (c) 2014, 2016, MariaDB Corporation.
|
||||
|
||||
Portions of this file contain modifications contributed and copyrighted by
|
||||
Google, Inc. Those modifications are gratefully acknowledged and are described
|
||||
@@ -50,6 +50,11 @@ struct buf_chunk_t{
|
||||
alloc method and later passed to the
|
||||
deallocate method. */
|
||||
buf_block_t* blocks; /*!< array of buffer control blocks */
|
||||
|
||||
/** Get the size of 'mem' in bytes. */
|
||||
size_t mem_size() const {
|
||||
return(mem_pfx.m_size);
|
||||
}
|
||||
};
|
||||
|
||||
/*********************************************************************//**
|
||||
@@ -315,7 +320,8 @@ buf_page_set_state(
|
||||
break;
|
||||
case BUF_BLOCK_FILE_PAGE:
|
||||
if (!(state == BUF_BLOCK_NOT_USED
|
||||
|| state == BUF_BLOCK_REMOVE_HASH)) {
|
||||
|| state == BUF_BLOCK_REMOVE_HASH
|
||||
|| state == BUF_BLOCK_FILE_PAGE)) {
|
||||
const char *old_state_name = buf_get_state_name((buf_block_t*)bpage);
|
||||
bpage->state = state;
|
||||
|
||||
@@ -326,10 +332,11 @@ buf_page_set_state(
|
||||
old_state_name,
|
||||
state,
|
||||
buf_get_state_name((buf_block_t*)bpage));
|
||||
ut_a(state == BUF_BLOCK_NOT_USED
|
||||
|| state == BUF_BLOCK_REMOVE_HASH
|
||||
|| state == BUF_BLOCK_FILE_PAGE);
|
||||
}
|
||||
|
||||
ut_a(state == BUF_BLOCK_NOT_USED
|
||||
|| state == BUF_BLOCK_REMOVE_HASH);
|
||||
break;
|
||||
case BUF_BLOCK_REMOVE_HASH:
|
||||
ut_a(state == BUF_BLOCK_MEMORY);
|
||||
@@ -770,23 +777,6 @@ buf_frame_align(
|
||||
return(frame);
|
||||
}
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
#if defined UNIV_DEBUG || defined UNIV_ZIP_DEBUG
|
||||
/*********************************************************************//**
|
||||
Gets the compressed page descriptor corresponding to an uncompressed page
|
||||
if applicable.
|
||||
@return compressed page descriptor, or NULL */
|
||||
UNIV_INLINE
|
||||
const page_zip_des_t*
|
||||
buf_frame_get_page_zip(
|
||||
/*===================*/
|
||||
const byte* ptr) /*!< in: pointer to the page */
|
||||
{
|
||||
return(buf_block_get_page_zip(buf_block_align(ptr)));
|
||||
}
|
||||
#endif /* UNIV_DEBUG || UNIV_ZIP_DEBUG */
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
/**********************************************************************//**
|
||||
Gets the space id, page offset, and byte offset within page of a
|
||||
pointer pointing to a buffer frame containing a file page. */
|
||||
@@ -1421,6 +1411,25 @@ buf_get_nth_chunk_block(
|
||||
return(chunk->blocks);
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
Get buf frame. */
|
||||
UNIV_INLINE
|
||||
void *
|
||||
buf_page_get_frame(
|
||||
/*===============*/
|
||||
const buf_page_t* bpage) /*!< in: buffer pool page */
|
||||
{
|
||||
/* In encryption/compression buffer pool page may contain extra
|
||||
buffer where result is stored. */
|
||||
if (bpage->slot && bpage->slot->out_buf) {
|
||||
return bpage->slot->out_buf;
|
||||
} else if (bpage->zip.data) {
|
||||
return bpage->zip.data;
|
||||
} else {
|
||||
return ((buf_block_t*) bpage)->frame;
|
||||
}
|
||||
}
|
||||
|
||||
/** Verify the possibility that a stored page is not in buffer pool.
|
||||
@param[in] withdraw_clock withdraw clock when stored the page
|
||||
@retval true if the page might be relocated */
|
||||
@@ -1452,22 +1461,4 @@ buf_pool_size_align(
|
||||
}
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
Get buf frame. */
|
||||
UNIV_INLINE
|
||||
void *
|
||||
buf_page_get_frame(
|
||||
/*===============*/
|
||||
const buf_page_t* bpage) /*!< in: buffer pool page */
|
||||
{
|
||||
/* In encryption/compression buffer pool page may contain extra
|
||||
buffer where result is stored. */
|
||||
if (bpage->slot && bpage->slot->out_buf) {
|
||||
return bpage->slot->out_buf;
|
||||
} else if (bpage->zip.data) {
|
||||
return bpage->zip.data;
|
||||
} else {
|
||||
return ((buf_block_t*) bpage)->frame;
|
||||
}
|
||||
}
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -30,9 +30,6 @@ Created Aug 11, 2011 Vasil Dimov
|
||||
|
||||
#include "buf0types.h"
|
||||
|
||||
/** Magic value to use instead of checksums when they are disabled */
|
||||
#define BUF_NO_CHECKSUM_MAGIC 0xDEADBEEFUL
|
||||
|
||||
/** Calculates the CRC32 checksum of a page. The value is stored to the page
|
||||
when it is written to a file and also checked for a match when reading from
|
||||
the file. When reading we allow both normal CRC32 and CRC-legacy-big-endian
|
||||
@@ -70,6 +67,7 @@ buf_calc_page_old_checksum(
|
||||
/*=======================*/
|
||||
const byte* page); /*!< in: buffer page */
|
||||
|
||||
|
||||
/********************************************************************//**
|
||||
Return a printable string describing the checksum algorithm.
|
||||
@return algorithm name */
|
||||
@@ -79,5 +77,6 @@ buf_checksum_algorithm_name(
|
||||
srv_checksum_algorithm_t algo); /*!< in: algorithm */
|
||||
|
||||
extern ulong srv_checksum_algorithm;
|
||||
extern bool legacy_big_endian_checksum;
|
||||
|
||||
#endif /* buf0checksum_h */
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1995, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 1995, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -43,7 +43,7 @@ extern ibool buf_dblwr_being_created;
|
||||
Creates the doublewrite buffer to a new InnoDB installation. The header of the
|
||||
doublewrite buffer is placed on the trx system header page.
|
||||
@return true if successful, false if not. */
|
||||
__attribute__((warn_unused_result))
|
||||
MY_ATTRIBUTE((warn_unused_result))
|
||||
bool
|
||||
buf_dblwr_create(void);
|
||||
/*==================*/
|
||||
|
||||
@@ -36,14 +36,18 @@ Created 11/5/1995 Heikki Tuuri
|
||||
/** Flag indicating if the page_cleaner is in active state. */
|
||||
extern bool buf_page_cleaner_is_active;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
|
||||
/** Value of MySQL global variable used to disable page cleaner. */
|
||||
extern my_bool innodb_page_cleaner_disabled_debug;
|
||||
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/** Event to synchronise with the flushing. */
|
||||
extern os_event_t buf_flush_event;
|
||||
|
||||
class ut_stage_alter_t;
|
||||
|
||||
/** Event to synchronise with the flushing. */
|
||||
extern os_event_t buf_flush_event;
|
||||
|
||||
/** Handled page counters for a single flush */
|
||||
struct flush_counters_t {
|
||||
ulint flushed; /*!< number of dirty pages flushed */
|
||||
@@ -101,7 +105,7 @@ buf_flush_page_try(
|
||||
/*===============*/
|
||||
buf_pool_t* buf_pool, /*!< in/out: buffer pool instance */
|
||||
buf_block_t* block) /*!< in/out: buffer control block */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
# endif /* UNIV_DEBUG || UNIV_IBUF_DEBUG */
|
||||
/** Do flushing batch of a given type.
|
||||
NOTE: The calling thread is not allowed to own any latches on pages!
|
||||
@@ -124,6 +128,7 @@ buf_flush_do_batch(
|
||||
lsn_t lsn_limit,
|
||||
flush_counters_t* n);
|
||||
|
||||
|
||||
/** This utility flushes dirty blocks from the end of the flush list of all
|
||||
buffer pool instances.
|
||||
NOTE: The calling thread is not allowed to own any latches on pages!
|
||||
@@ -216,6 +221,22 @@ buf_flush_ready_for_replace(
|
||||
/*========================*/
|
||||
buf_page_t* bpage); /*!< in: buffer control block, must be
|
||||
buf_page_in_file(bpage) and in the LRU list */
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/** Disables page cleaner threads (coordinator and workers).
|
||||
It's used by: SET GLOBAL innodb_page_cleaner_disabled_debug = 1 (0).
|
||||
@param[in] thd thread handle
|
||||
@param[in] var pointer to system variable
|
||||
@param[out] var_ptr where the formal string goes
|
||||
@param[in] save immediate result from check function */
|
||||
void
|
||||
buf_flush_page_cleaner_disabled_debug_update(
|
||||
THD* thd,
|
||||
struct st_mysql_sys_var* var,
|
||||
void* var_ptr,
|
||||
const void* save);
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/******************************************************************//**
|
||||
page_cleaner thread tasked with flushing dirty pages from the buffer
|
||||
pools. As of now we'll have only one coordinator of this thread.
|
||||
|
||||
@@ -126,13 +126,6 @@ buf_read_ibuf_merge_pages(
|
||||
to get read in, before this
|
||||
function returns */
|
||||
const ulint* space_ids, /*!< in: array of space ids */
|
||||
const ib_uint64_t* space_versions,/*!< in: the spaces must have
|
||||
this version number
|
||||
(timestamp), otherwise we
|
||||
discard the read; we use this
|
||||
to cancel reads if DISCARD +
|
||||
IMPORT may have changed the
|
||||
tablespace size */
|
||||
const ulint* page_nos, /*!< in: array of page numbers
|
||||
to read, with the highest page
|
||||
number the last in the
|
||||
|
||||
@@ -70,8 +70,8 @@ void
|
||||
dfield_set_type(
|
||||
/*============*/
|
||||
dfield_t* field, /*!< in: SQL data field */
|
||||
const dtype_t* type) /*!< in: pointer to data type struct */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
const dtype_t* type); /*!< in: pointer to data type struct */
|
||||
|
||||
/*********************************************************************//**
|
||||
Gets length of field data.
|
||||
@return length of data; UNIV_SQL_NULL if SQL null data */
|
||||
@@ -116,6 +116,23 @@ dfield_set_ext(
|
||||
/*===========*/
|
||||
dfield_t* field) /*!< in/out: field */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
|
||||
/** Gets spatial status for "external storage"
|
||||
@param[in,out] field field */
|
||||
UNIV_INLINE
|
||||
spatial_status_t
|
||||
dfield_get_spatial_status(
|
||||
const dfield_t* field);
|
||||
|
||||
/** Sets spatial status for "external storage"
|
||||
@param[in,out] field field
|
||||
@param[in] spatial_status spatial status */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dfield_set_spatial_status(
|
||||
dfield_t* field,
|
||||
spatial_status_t spatial_status);
|
||||
|
||||
/*********************************************************************//**
|
||||
Sets pointer to the data and length in a field. */
|
||||
UNIV_INLINE
|
||||
@@ -134,7 +151,7 @@ dfield_write_mbr(
|
||||
/*=============*/
|
||||
dfield_t* field, /*!< in: field */
|
||||
const double* mbr) /*!< in: data */
|
||||
__attribute__((nonnull(1)));
|
||||
MY_ATTRIBUTE((nonnull(1)));
|
||||
/*********************************************************************//**
|
||||
Sets a data field to SQL NULL. */
|
||||
UNIV_INLINE
|
||||
@@ -159,8 +176,8 @@ void
|
||||
dfield_copy_data(
|
||||
/*=============*/
|
||||
dfield_t* field1, /*!< out: field to copy to */
|
||||
const dfield_t* field2) /*!< in: field to copy from */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
const dfield_t* field2); /*!< in: field to copy from */
|
||||
|
||||
/*********************************************************************//**
|
||||
Copies a data field to another. */
|
||||
UNIV_INLINE
|
||||
@@ -408,7 +425,7 @@ int
|
||||
dtuple_coll_cmp(
|
||||
const dtuple_t* tuple1,
|
||||
const dtuple_t* tuple2)
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/** Fold a prefix given as the number of fields of a tuple.
|
||||
@param[in] tuple index record
|
||||
@param[in] n_fields number of complete fields to fold
|
||||
@@ -422,7 +439,7 @@ dtuple_fold(
|
||||
ulint n_fields,
|
||||
ulint n_bytes,
|
||||
index_id_t tree_id)
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*******************************************************************//**
|
||||
Sets types of fields binary in a tuple. */
|
||||
UNIV_INLINE
|
||||
@@ -544,7 +561,7 @@ dtuple_convert_big_rec(
|
||||
dtuple_t* entry, /*!< in/out: index entry */
|
||||
ulint* n_ext) /*!< in/out: number of
|
||||
externally stored columns */
|
||||
MY_ATTRIBUTE((nonnull(1,4), malloc, warn_unused_result));
|
||||
MY_ATTRIBUTE((malloc, warn_unused_result));
|
||||
/**************************************************************//**
|
||||
Puts back to entry the data stored in vector. Note that to ensure the
|
||||
fields in entry can accommodate the data, vector must have been created
|
||||
@@ -572,7 +589,10 @@ dtuple_big_rec_free(
|
||||
/** Structure for an SQL data field */
|
||||
struct dfield_t{
|
||||
void* data; /*!< pointer to data */
|
||||
unsigned ext; /*!< TRUE=externally stored, FALSE=local */
|
||||
unsigned ext:1; /*!< TRUE=externally stored, FALSE=local */
|
||||
unsigned spatial_status:2;
|
||||
/*!< spatial status of externally stored field
|
||||
in undo log for purge */
|
||||
unsigned len; /*!< data length; UNIV_SQL_NULL if SQL null */
|
||||
dtype_t type; /*!< type of data */
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 1994, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
@@ -157,6 +157,34 @@ dfield_set_ext(
|
||||
field->ext = 1;
|
||||
}
|
||||
|
||||
/** Gets spatial status for "external storage"
|
||||
@param[in,out] field field */
|
||||
UNIV_INLINE
|
||||
spatial_status_t
|
||||
dfield_get_spatial_status(
|
||||
const dfield_t* field)
|
||||
{
|
||||
ut_ad(field);
|
||||
ut_ad(dfield_is_ext(field));
|
||||
|
||||
return(static_cast<spatial_status_t>(field->spatial_status));
|
||||
}
|
||||
|
||||
/** Sets spatial status for "external storage"
|
||||
@param[in,out] field field
|
||||
@param[in] spatial_status spatial status */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dfield_set_spatial_status(
|
||||
dfield_t* field,
|
||||
spatial_status_t spatial_status)
|
||||
{
|
||||
ut_ad(field);
|
||||
ut_ad(dfield_is_ext(field));
|
||||
|
||||
field->spatial_status = spatial_status;
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
Sets pointer to the data and length in a field. */
|
||||
UNIV_INLINE
|
||||
@@ -227,6 +255,7 @@ dfield_copy_data(
|
||||
field1->data = field2->data;
|
||||
field1->len = field2->len;
|
||||
field1->ext = field2->ext;
|
||||
field1->spatial_status = field2->spatial_status;
|
||||
}
|
||||
|
||||
/*********************************************************************//**
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2015, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
@@ -137,6 +137,7 @@ enum dberr_t {
|
||||
/*< Too many words in a phrase */
|
||||
|
||||
DB_TABLESPACE_TRUNCATED, /*!< tablespace was truncated */
|
||||
|
||||
DB_DECRYPTION_FAILED, /* Tablespace encrypted and
|
||||
decrypt operation failed because
|
||||
of missing key management plugin,
|
||||
@@ -157,6 +158,12 @@ enum dberr_t {
|
||||
DB_IO_NO_PUNCH_HOLE_TABLESPACE, /*!< The tablespace doesn't support
|
||||
punch hole */
|
||||
|
||||
DB_IO_DECRYPT_FAIL, /*!< Failure to decrypt a page
|
||||
after reading it from disk */
|
||||
|
||||
DB_IO_NO_ENCRYPT_TABLESPACE, /*!< The tablespace doesn't support
|
||||
encrypt */
|
||||
|
||||
DB_IO_PARTIAL_FAILED, /*!< Partial IO request failed */
|
||||
|
||||
DB_FORCED_ABORT, /*!< Transaction was forced to rollback
|
||||
@@ -169,6 +176,10 @@ enum dberr_t {
|
||||
|
||||
DB_COMPUTE_VALUE_FAILED, /*!< Compute generated value failed */
|
||||
|
||||
DB_NO_FK_ON_S_BASE_COL, /*!< Cannot add foreign constrain
|
||||
placed on the base column of
|
||||
stored column */
|
||||
|
||||
/* The following are partial failure codes */
|
||||
DB_FAIL = 1000,
|
||||
DB_OVERFLOW,
|
||||
|
||||
@@ -213,6 +213,20 @@ dict_create_add_foreigns_to_dictionary(
|
||||
const dict_table_t* table,
|
||||
trx_t* trx)
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/** Check if a foreign constraint is on columns server as base columns
|
||||
of any stored column. This is to prevent creating SET NULL or CASCADE
|
||||
constraint on such columns
|
||||
@param[in] local_fk_set set of foreign key objects, to be added to
|
||||
the dictionary tables
|
||||
@param[in] table table to which the foreign key objects in
|
||||
local_fk_set belong to
|
||||
@return true if yes, otherwise, false */
|
||||
bool
|
||||
dict_foreigns_has_s_base_col(
|
||||
const dict_foreign_set& local_fk_set,
|
||||
const dict_table_t* table);
|
||||
|
||||
/****************************************************************//**
|
||||
Creates the tablespaces and datafiles system tables inside InnoDB
|
||||
at server bootstrap or server start if they are not found or are
|
||||
|
||||
93
storage/innobase/include/dict0defrag_bg.h
Normal file
93
storage/innobase/include/dict0defrag_bg.h
Normal file
@@ -0,0 +1,93 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 2016, MariaDB Corporation. All rights Reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
the terms of the GNU General Public License as published by the Free Software
|
||||
Foundation; version 2 of the License.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along with
|
||||
this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
|
||||
|
||||
*****************************************************************************/
|
||||
|
||||
/**************************************************//**
|
||||
@file include/dict0defrag_bg.h
|
||||
Code used for background table and index
|
||||
defragmentation
|
||||
|
||||
Created 25/08/2016 Jan Lindström
|
||||
*******************************************************/
|
||||
|
||||
#ifndef dict0defrag_bg_h
|
||||
#define dict0defrag_bg_h
|
||||
|
||||
#include "univ.i"
|
||||
|
||||
#include "dict0types.h"
|
||||
#include "os0event.h"
|
||||
#include "os0thread.h"
|
||||
|
||||
/*****************************************************************//**
|
||||
Initialize the defrag pool, called once during thread initialization. */
|
||||
void
|
||||
dict_defrag_pool_init(void);
|
||||
/*========================*/
|
||||
|
||||
/*****************************************************************//**
|
||||
Free the resources occupied by the defrag pool, called once during
|
||||
thread de-initialization. */
|
||||
void
|
||||
dict_defrag_pool_deinit(void);
|
||||
/*==========================*/
|
||||
|
||||
/*****************************************************************//**
|
||||
Add an index in a table to the defrag pool, which is processed by the
|
||||
background stats gathering thread. Only the table id and index id are
|
||||
added to the list, so the table can be closed after being enqueued and
|
||||
it will be opened when needed. If the table or index does not exist later
|
||||
(has been DROPped), then it will be removed from the pool and skipped. */
|
||||
void
|
||||
dict_stats_defrag_pool_add(
|
||||
/*=======================*/
|
||||
const dict_index_t* index); /*!< in: table to add */
|
||||
|
||||
/*****************************************************************//**
|
||||
Delete a given index from the auto defrag pool. */
|
||||
void
|
||||
dict_stats_defrag_pool_del(
|
||||
/*=======================*/
|
||||
const dict_table_t* table, /*!<in: if given, remove
|
||||
all entries for the table */
|
||||
const dict_index_t* index); /*!< in: index to remove */
|
||||
|
||||
/*****************************************************************//**
|
||||
Get the first index that has been added for updating persistent defrag
|
||||
stats and eventually save its stats. */
|
||||
void
|
||||
dict_defrag_process_entries_from_defrag_pool();
|
||||
/*===========================================*/
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation result.
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_save_defrag_summary(
|
||||
/*============================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/*********************************************************************//**
|
||||
Save defragmentation stats for a given index.
|
||||
@return DB_SUCCESS or error code */
|
||||
dberr_t
|
||||
dict_stats_save_defrag_stats(
|
||||
/*============================*/
|
||||
dict_index_t* index) /*!< in: index */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#endif /* dict0defrag_bg_h */
|
||||
@@ -125,7 +125,7 @@ dict_table_open_on_id(
|
||||
table_id_t table_id, /*!< in: table id */
|
||||
ibool dict_locked, /*!< in: TRUE=data dictionary locked */
|
||||
dict_table_op_t table_op) /*!< in: operation to perform */
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/**********************************************************************//**
|
||||
Returns a table object based on table id.
|
||||
@@ -309,7 +309,7 @@ ulint
|
||||
dict_col_get_index_pos(
|
||||
const dict_col_t* col,
|
||||
const dict_index_t* index)
|
||||
__attribute__((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/****************************************************************//**
|
||||
If the given column name is reserved for InnoDB system columns, return
|
||||
@@ -389,7 +389,7 @@ dict_table_add_system_columns(
|
||||
void
|
||||
dict_table_set_big_rows(
|
||||
dict_table_t* table)
|
||||
__attribute__((nonnull));
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
/**********************************************************************//**
|
||||
Adds a table object to the dictionary cache. */
|
||||
void
|
||||
@@ -531,7 +531,7 @@ dict_create_foreign_constraints(
|
||||
size_t sql_length,
|
||||
const char* name,
|
||||
ibool reject_fks)
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/**********************************************************************//**
|
||||
Parses the CONSTRAINT id's to be dropped in an ALTER TABLE statement.
|
||||
@return DB_SUCCESS or DB_CANNOT_DROP_CONSTRAINT if syntax error or the
|
||||
@@ -565,7 +565,7 @@ dict_table_open_on_name(
|
||||
ibool dict_locked,
|
||||
ibool try_drop,
|
||||
dict_err_ignore_t ignore_err)
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/*********************************************************************//**
|
||||
Tries to find an index whose first fields are the columns in the array,
|
||||
@@ -598,7 +598,7 @@ dict_foreign_find_index(
|
||||
/*!< out: column number where
|
||||
error happened */
|
||||
dict_index_t** err_index)
|
||||
/*!< out: index where error
|
||||
/*!< out: index where error
|
||||
happened */
|
||||
|
||||
MY_ATTRIBUTE((nonnull(1,3), warn_unused_result));
|
||||
@@ -624,7 +624,7 @@ dict_table_get_col_name_for_mysql(
|
||||
const dict_table_t* table, /*!< in: table */
|
||||
const char* col_name)/*!< in: MySQL table column name */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
|
||||
/** Returns a virtual column's name.
|
||||
@param[in] table table object
|
||||
@param[in] col_nr virtual column number(nth virtual column)
|
||||
@@ -634,7 +634,8 @@ dict_table_get_v_col_name(
|
||||
const dict_table_t* table,
|
||||
ulint col_nr);
|
||||
|
||||
/**********************************************************************//**
|
||||
/** Check if the table has a given column.
|
||||
@param[in] table table object
|
||||
@param[in] col_name column name
|
||||
@param[in] col_nr column number guessed, 0 as default
|
||||
@return column number if the table has the specified column,
|
||||
@@ -656,6 +657,7 @@ dict_print_info_on_foreign_keys(
|
||||
of SHOW TABLE STATUS */
|
||||
trx_t* trx, /*!< in: transaction */
|
||||
dict_table_t* table); /*!< in: table */
|
||||
|
||||
/**********************************************************************//**
|
||||
Outputs info on a foreign key of a table in a format suitable for
|
||||
CREATE TABLE. */
|
||||
@@ -665,6 +667,7 @@ dict_print_info_on_foreign_key_in_create_format(
|
||||
trx_t* trx, /*!< in: transaction */
|
||||
dict_foreign_t* foreign, /*!< in: foreign key constraint */
|
||||
ibool add_newline); /*!< in: whether to add a newline */
|
||||
|
||||
/*********************************************************************//**
|
||||
Tries to find an index whose first fields are the columns in the array,
|
||||
in the same order and is not marked for deletion and is not the same
|
||||
@@ -756,7 +759,7 @@ ulint
|
||||
dict_index_is_clust(
|
||||
/*================*/
|
||||
const dict_index_t* index) /*!< in: index */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Check if index is auto-generated clustered index.
|
||||
@param[in] index index
|
||||
@@ -775,7 +778,7 @@ ulint
|
||||
dict_index_is_unique(
|
||||
/*=================*/
|
||||
const dict_index_t* index) /*!< in: index */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/********************************************************************//**
|
||||
Check whether the index is a Spatial Index.
|
||||
@return nonzero for Spatial Index, zero for other indexes */
|
||||
@@ -784,7 +787,7 @@ ulint
|
||||
dict_index_is_spatial(
|
||||
/*==================*/
|
||||
const dict_index_t* index) /*!< in: index */
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/** Check whether the index contains a virtual column.
|
||||
@param[in] index index
|
||||
@return nonzero for index on virtual column, zero for other indexes */
|
||||
@@ -800,7 +803,7 @@ ulint
|
||||
dict_index_is_ibuf(
|
||||
/*===============*/
|
||||
const dict_index_t* index) /*!< in: index */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/********************************************************************//**
|
||||
Check whether the index is a secondary index or the insert buffer tree.
|
||||
@return nonzero for insert buffer, zero for other indexes */
|
||||
@@ -809,7 +812,7 @@ ulint
|
||||
dict_index_is_sec_or_ibuf(
|
||||
/*======================*/
|
||||
const dict_index_t* index) /*!< in: index */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Get all the FTS indexes on a table.
|
||||
@param[in] table table
|
||||
@@ -830,7 +833,7 @@ ulint
|
||||
dict_table_get_n_user_cols(
|
||||
/*=======================*/
|
||||
const dict_table_t* table) /*!< in: table */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/** Gets the number of user-defined virtual and non-virtual columns in a table
|
||||
in the dictionary cache.
|
||||
@param[in] table table
|
||||
@@ -849,7 +852,7 @@ ulint
|
||||
dict_table_get_n_sys_cols(
|
||||
/*======================*/
|
||||
const dict_table_t* table) /*!< in: table */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/********************************************************************//**
|
||||
Gets the number of all non-virtual columns (also system) in a table
|
||||
in the dictionary cache.
|
||||
@@ -859,7 +862,7 @@ ulint
|
||||
dict_table_get_n_cols(
|
||||
/*==================*/
|
||||
const dict_table_t* table) /*!< in: table */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Gets the number of virtual columns in a table in the dictionary cache.
|
||||
@param[in] table the table to check
|
||||
@@ -885,7 +888,7 @@ ib_uint64_t
|
||||
dict_table_get_n_rows(
|
||||
/*==================*/
|
||||
const dict_table_t* table) /*!< in: table */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/********************************************************************//**
|
||||
Increment the number of rows in the table by one.
|
||||
Notice that this operation is not protected by any latch, the number is
|
||||
@@ -1005,6 +1008,7 @@ dict_tf_get_format(
|
||||
/*===============*/
|
||||
ulint flags) /*!< in: dict_table_t::flags */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Set the various values in a dict_table_t::flags pointer.
|
||||
@param[in,out] flags, Pointer to a 4 byte Table Flags
|
||||
@param[in] format, File Format
|
||||
@@ -1053,11 +1057,13 @@ fil_space_t::flags | 0 | 0 | 1 | 1
|
||||
==================================================================
|
||||
@param[in] table_flags dict_table_t::flags
|
||||
@param[in] is_temp whether the tablespace is temporary
|
||||
@param[in] is_encrypted whether the tablespace is encrypted
|
||||
@return tablespace flags (fil_space_t::flags) */
|
||||
ulint
|
||||
dict_tf_to_fsp_flags(
|
||||
ulint table_flags,
|
||||
bool is_temp)
|
||||
bool is_temp,
|
||||
bool is_encrypted = false)
|
||||
MY_ATTRIBUTE((const));
|
||||
|
||||
/** Extract the page size from table flags.
|
||||
@@ -1067,7 +1073,7 @@ UNIV_INLINE
|
||||
const page_size_t
|
||||
dict_tf_get_page_size(
|
||||
ulint flags)
|
||||
__attribute__((const));
|
||||
MY_ATTRIBUTE((const));
|
||||
|
||||
/** Determine the extent size (in pages) for the given table
|
||||
@param[in] table the table whose extent size is being
|
||||
@@ -1084,7 +1090,7 @@ UNIV_INLINE
|
||||
const page_size_t
|
||||
dict_table_page_size(
|
||||
const dict_table_t* table)
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/*********************************************************************//**
|
||||
@@ -1194,7 +1200,7 @@ dict_index_add_to_cache(
|
||||
dict_index_t* index,
|
||||
ulint page_no,
|
||||
ibool strict)
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Adds an index to the dictionary cache, with possible indexing newly
|
||||
added column.
|
||||
@@ -1215,7 +1221,7 @@ dict_index_add_to_cache_w_vcol(
|
||||
const dict_add_v_col_t* add_v,
|
||||
ulint page_no,
|
||||
ibool strict)
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
/********************************************************************//**
|
||||
Gets the number of fields in the internal representation of an index,
|
||||
@@ -1269,7 +1275,7 @@ UNIV_INLINE
|
||||
ulint
|
||||
dict_index_get_n_unique_in_tree_nonleaf(
|
||||
const dict_index_t* index)
|
||||
__attribute__((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
/********************************************************************//**
|
||||
Gets the number of user-defined ordering fields in the index. In the internal
|
||||
representation we add the row id to the ordering fields to make all indexes
|
||||
@@ -1327,9 +1333,9 @@ dict_index_get_nth_col_pos(
|
||||
/*=======================*/
|
||||
const dict_index_t* index, /*!< in: index */
|
||||
ulint n, /*!< in: column number */
|
||||
ulint* prefix_col_pos) /*!< out: col num if prefix
|
||||
*/
|
||||
__attribute__((warn_unused_result));
|
||||
ulint* prefix_col_pos) /*!< out: col num if prefix */
|
||||
MY_ATTRIBUTE((nonnull(1), warn_unused_result));
|
||||
|
||||
/** Looks for column n in an index.
|
||||
@param[in] index index
|
||||
@param[in] n column number
|
||||
@@ -1362,7 +1368,7 @@ dict_index_contains_col_or_prefix(
|
||||
ulint n, /*!< in: column number */
|
||||
bool is_virtual)
|
||||
/*!< in: whether it is a virtual col */
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/********************************************************************//**
|
||||
Looks for a matching field in an index. The column has to be the same. The
|
||||
column in index must be complete, or must contain a prefix longer than the
|
||||
@@ -1386,7 +1392,7 @@ dict_table_get_nth_col_pos(
|
||||
const dict_table_t* table, /*!< in: table */
|
||||
ulint n, /*!< in: column number */
|
||||
ulint* prefix_col_pos) /*!< out: col num if prefix */
|
||||
__attribute__((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((nonnull(1), warn_unused_result));
|
||||
/********************************************************************//**
|
||||
Returns the position of a system column in an index.
|
||||
@return position, ULINT_UNDEFINED if not contained */
|
||||
@@ -1668,6 +1674,7 @@ dict_tables_have_same_db(
|
||||
const char* name2) /*!< in: table name in the form
|
||||
dbname '/' tablename */
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
|
||||
/** Get an index by name.
|
||||
@param[in] table the table where to look for the index
|
||||
@param[in] name the index name to look for
|
||||
@@ -1679,19 +1686,13 @@ dict_table_get_index_on_name(
|
||||
dict_table_t* table,
|
||||
const char* name,
|
||||
bool committed=true)
|
||||
MY_ATTRIBUTE((nonnull, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Get an index by name.
|
||||
@param[in] table the table where to look for the index
|
||||
@param[in] name the index name to look for
|
||||
@param[in] committed true=search for committed,
|
||||
false=search for uncommitted */
|
||||
dict_index_t*
|
||||
dict_table_find_index_on_id(
|
||||
/*========================*/
|
||||
const dict_table_t* table, /*!< in: table instance */
|
||||
index_id_t id) /*!< in: index id */
|
||||
__attribute__((nonnull, warn_unused_result));
|
||||
/**********************************************************************//**
|
||||
false=search for uncommitted
|
||||
@return index, NULL if does not exist */
|
||||
inline
|
||||
const dict_index_t*
|
||||
@@ -1715,7 +1716,7 @@ dict_table_is_fts_column(
|
||||
ib_vector_t* indexes,/* in: vector containing only FTS indexes */
|
||||
ulint col_no, /* in: col number to search for */
|
||||
bool is_virtual)/*!< in: whether it is a virtual column */
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/**********************************************************************//**
|
||||
Prevent table eviction by moving a table to the non-LRU list from the
|
||||
LRU list if it is not already there. */
|
||||
@@ -1724,7 +1725,8 @@ void
|
||||
dict_table_prevent_eviction(
|
||||
/*========================*/
|
||||
dict_table_t* table) /*!< in: table to prevent eviction */
|
||||
__attribute__((nonnull));
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
|
||||
/**********************************************************************//**
|
||||
Move a table to the non LRU end of the LRU list. */
|
||||
void
|
||||
@@ -1732,6 +1734,7 @@ dict_table_move_from_lru_to_non_lru(
|
||||
/*================================*/
|
||||
dict_table_t* table) /*!< in: table to move from LRU to non-LRU */
|
||||
MY_ATTRIBUTE((nonnull));
|
||||
|
||||
/** Looks for an index with the given id given a table instance.
|
||||
@param[in] table table instance
|
||||
@param[in] id index id
|
||||
@@ -1741,6 +1744,7 @@ dict_table_find_index_on_id(
|
||||
const dict_table_t* table,
|
||||
index_id_t id)
|
||||
MY_ATTRIBUTE((nonnull(1)));
|
||||
|
||||
/**********************************************************************//**
|
||||
Move to the most recently used segment of the LRU list. */
|
||||
void
|
||||
@@ -1990,7 +1994,7 @@ bool
|
||||
dict_table_is_discarded(
|
||||
/*====================*/
|
||||
const dict_table_t* table) /*!< in: table to check */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/********************************************************************//**
|
||||
Check if it is a temporary table.
|
||||
@@ -2000,7 +2004,17 @@ bool
|
||||
dict_table_is_temporary(
|
||||
/*====================*/
|
||||
const dict_table_t* table) /*!< in: table to check */
|
||||
MY_ATTRIBUTE((nonnull, pure, warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/********************************************************************//**
|
||||
Check if it is a encrypted table.
|
||||
@return true if table encryption flag is set. */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
dict_table_is_encrypted(
|
||||
/*====================*/
|
||||
const dict_table_t* table) /*!< in: table to check */
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Check whether the table is intrinsic.
|
||||
An intrinsic table is a special kind of temporary table that
|
||||
@@ -2018,7 +2032,16 @@ UNIV_INLINE
|
||||
bool
|
||||
dict_table_is_intrinsic(
|
||||
const dict_table_t* table)
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Check if the table is in a shared tablespace (System or General).
|
||||
@param[in] id Space ID to check
|
||||
@return true if id is a shared tablespace, false if not. */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
dict_table_in_shared_tablespace(
|
||||
const dict_table_t* table)
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/** Check whether locking is disabled for this table.
|
||||
Currently this is done for intrinsic table as their visibility is limited
|
||||
@@ -2030,7 +2053,7 @@ UNIV_INLINE
|
||||
bool
|
||||
dict_table_is_locking_disabled(
|
||||
const dict_table_t* table)
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
|
||||
/********************************************************************//**
|
||||
Turn-off redo-logging if temporary table. */
|
||||
@@ -2105,7 +2128,7 @@ ulint
|
||||
dict_index_node_ptr_max_size(
|
||||
/*=========================*/
|
||||
const dict_index_t* index) /*!< in: index */
|
||||
__attribute__((warn_unused_result));
|
||||
MY_ATTRIBUTE((warn_unused_result));
|
||||
/*****************************************************************//**
|
||||
Get index by first field of the index
|
||||
@return index which is having first field matches
|
||||
@@ -2149,12 +2172,34 @@ dict_table_decode_n_col(
|
||||
ulint* n_v_col);
|
||||
|
||||
/** Look for any dictionary objects that are found in the given tablespace.
|
||||
@param[in] space Tablespace ID to search for.
|
||||
@param[in] space_id Tablespace ID to search for.
|
||||
@return true if tablespace is empty. */
|
||||
bool
|
||||
dict_tablespace_is_empty(
|
||||
dict_space_is_empty(
|
||||
ulint space_id);
|
||||
|
||||
/** Find the space_id for the given name in sys_tablespaces.
|
||||
@param[in] name Tablespace name to search for.
|
||||
@return the tablespace ID. */
|
||||
ulint
|
||||
dict_space_get_id(
|
||||
const char* name);
|
||||
|
||||
/** Free the virtual column template
|
||||
@param[in,out] vc_templ virtual column template */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dict_free_vc_templ(
|
||||
dict_vcol_templ_t* vc_templ);
|
||||
|
||||
/** Check whether the table have virtual index.
|
||||
@param[in] table InnoDB table
|
||||
@return true if the table have virtual index, false otherwise. */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
dict_table_have_virtual_index(
|
||||
dict_table_t* table);
|
||||
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
/*****************************************************************************
|
||||
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates
|
||||
Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved.
|
||||
Copyright (c) 2013, 2016, MariaDB Corporation
|
||||
|
||||
This program is free software; you can redistribute it and/or modify it under
|
||||
@@ -337,6 +337,21 @@ dict_index_is_unique(
|
||||
return(index->type & DICT_UNIQUE);
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
Check whether the index is an universal index tree.
|
||||
@return nonzero for universal tree, zero for other indexes */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
dict_index_is_univ(
|
||||
/*===============*/
|
||||
const dict_index_t* index) /*!< in: index */
|
||||
{
|
||||
ut_ad(index);
|
||||
ut_ad(index->magic_n == DICT_INDEX_MAGIC_N);
|
||||
|
||||
return(index->type & DICT_UNIVERSAL);
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
Check whether the index is a Spatial Index.
|
||||
@return nonzero for Spatial Index, zero for other indexes */
|
||||
@@ -711,7 +726,7 @@ dict_tf_is_valid(
|
||||
}
|
||||
}
|
||||
|
||||
if (page_compression || page_compression_level) {
|
||||
if (page_compression || page_compression_level) {
|
||||
/* Page compression format must have compact and
|
||||
atomic_blobs and page_compression_level requires
|
||||
page_compression */
|
||||
@@ -774,6 +789,7 @@ dict_tf2_is_valid(
|
||||
|
||||
bool file_per_table = ((flags2 & DICT_TF2_USE_FILE_PER_TABLE) != 0);
|
||||
bool shared_space = DICT_TF_HAS_SHARED_SPACE(flags);
|
||||
|
||||
if (file_per_table && shared_space) {
|
||||
return(false);
|
||||
}
|
||||
@@ -869,13 +885,13 @@ dict_sys_tables_type_validate(
|
||||
format, so the DATA_DIR flag is compatible with any other
|
||||
table flags. However, it is not used with TEMPORARY tables. */
|
||||
|
||||
if (page_compression || page_compression_level) {
|
||||
if (page_compression || page_compression_level) {
|
||||
/* page compressed row format must have low_order_bit and
|
||||
atomic_blobs bits set and the DICT_N_COLS_COMPACT flag
|
||||
should be in N_COLS, but we already know about the
|
||||
low_order_bit and DICT_N_COLS_COMPACT flags. */
|
||||
|
||||
if (!atomic_blobs || !page_compression) {
|
||||
if (!atomic_blobs || !page_compression) {
|
||||
ib::error() << "SYS_TABLES::TYPE=" << type
|
||||
<< " page_compression:" << page_compression
|
||||
<< " page_compression_level:" << page_compression_level
|
||||
@@ -1008,7 +1024,7 @@ dict_tf_set(
|
||||
|
||||
if (page_compressed) {
|
||||
*flags |= (1 << DICT_TF_POS_ATOMIC_BLOBS)
|
||||
| (1 << DICT_TF_POS_PAGE_COMPRESSION)
|
||||
| (1 << DICT_TF_POS_PAGE_COMPRESSION)
|
||||
| (page_compression_level << DICT_TF_POS_PAGE_COMPRESSION_LEVEL);
|
||||
|
||||
ut_ad(zip_ssize == 0);
|
||||
@@ -1067,7 +1083,7 @@ dict_tf_init(
|
||||
|
||||
if (page_compressed) {
|
||||
flags |= (1 << DICT_TF_POS_ATOMIC_BLOBS)
|
||||
| (1 << DICT_TF_POS_PAGE_COMPRESSION)
|
||||
| (1 << DICT_TF_POS_PAGE_COMPRESSION)
|
||||
| (page_compression_level << DICT_TF_POS_PAGE_COMPRESSION_LEVEL);
|
||||
|
||||
ut_ad(zip_ssize == 0);
|
||||
@@ -1706,9 +1722,11 @@ dict_max_v_field_len_store_undo(
|
||||
for UNIV_FORMAT_B, upto col->max_prefix or
|
||||
2) REC_VERSION_56_MAX_INDEX_COL_LEN, whichever is less */
|
||||
if (dict_table_get_format(table) >= UNIV_FORMAT_B) {
|
||||
max_log_len = (col->max_prefix > 0)
|
||||
? col->max_prefix
|
||||
: DICT_MAX_FIELD_LEN_BY_FORMAT(table);
|
||||
if (DATA_BIG_COL(col) && col->max_prefix > 0) {
|
||||
max_log_len = col->max_prefix;
|
||||
} else {
|
||||
max_log_len = DICT_MAX_FIELD_LEN_BY_FORMAT(table);
|
||||
}
|
||||
} else {
|
||||
max_log_len = REC_ANTELOPE_MAX_INDEX_COL_LEN;
|
||||
}
|
||||
@@ -1786,6 +1804,18 @@ dict_table_is_temporary(
|
||||
return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_TEMPORARY));
|
||||
}
|
||||
|
||||
/********************************************************************//**
|
||||
Check if it is a encrypted table.
|
||||
@return true if table encrypted flag is set. */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
dict_table_is_encrypted(
|
||||
/*====================*/
|
||||
const dict_table_t* table) /*!< in: table to check */
|
||||
{
|
||||
return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_ENCRYPTION));
|
||||
}
|
||||
|
||||
/** Check whether the table is intrinsic.
|
||||
An intrinsic table is a special kind of temporary table that
|
||||
is invisible to the end user. It can be created internally by InnoDB, the MySQL
|
||||
@@ -1806,6 +1836,18 @@ dict_table_is_intrinsic(
|
||||
return(DICT_TF2_FLAG_IS_SET(table, DICT_TF2_INTRINSIC));
|
||||
}
|
||||
|
||||
/** Check if the table is in a shared tablespace (System or General).
|
||||
@param[in] id Space ID to check
|
||||
@return true if id is a shared tablespace, false if not. */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
dict_table_in_shared_tablespace(
|
||||
const dict_table_t* table)
|
||||
{
|
||||
return(is_system_tablespace(table->space)
|
||||
|| DICT_TF_HAS_SHARED_SPACE(table->flags));
|
||||
}
|
||||
|
||||
/** Check whether locking is disabled for this table.
|
||||
Currently this is done for intrinsic table as their visibility is limited
|
||||
to the connection only.
|
||||
@@ -2003,4 +2045,45 @@ dict_table_decode_n_col(
|
||||
*n_col = num & 0xFFFF;
|
||||
}
|
||||
|
||||
/** Free the virtual column template
|
||||
@param[in,out] vc_templ virtual column template */
|
||||
void
|
||||
dict_free_vc_templ(
|
||||
dict_vcol_templ_t* vc_templ)
|
||||
{
|
||||
if (vc_templ->vtempl != NULL) {
|
||||
ut_ad(vc_templ->n_v_col > 0);
|
||||
for (ulint i = 0; i < vc_templ->n_col
|
||||
+ vc_templ->n_v_col; i++) {
|
||||
if (vc_templ->vtempl[i] != NULL) {
|
||||
ut_free(vc_templ->vtempl[i]);
|
||||
}
|
||||
}
|
||||
ut_free(vc_templ->default_rec);
|
||||
ut_free(vc_templ->vtempl);
|
||||
vc_templ->vtempl = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/** Check whether the table have virtual index.
|
||||
@param[in] table InnoDB table
|
||||
@return true if the table have virtual index, false otherwise. */
|
||||
UNIV_INLINE
|
||||
bool
|
||||
dict_table_have_virtual_index(
|
||||
dict_table_t* table)
|
||||
{
|
||||
for (ulint col_no = 0; col_no < dict_table_get_n_v_cols(table);
|
||||
col_no++) {
|
||||
const dict_v_col_t* col
|
||||
= dict_table_get_nth_v_col(table, col_no);
|
||||
|
||||
if (col->m_col.ord_part) {
|
||||
return(true);
|
||||
}
|
||||
}
|
||||
|
||||
return(false);
|
||||
}
|
||||
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
@@ -184,6 +184,14 @@ dict_save_data_dir_path(
|
||||
dict_table_t* table, /*!< in/out: table */
|
||||
char* filepath); /*!< in: filepath of tablespace */
|
||||
|
||||
/** Get the first filepath from SYS_DATAFILES for a given space_id.
|
||||
@param[in] space_id Tablespace ID
|
||||
@return First filepath (caller must invoke ut_free() on it)
|
||||
@retval NULL if no SYS_DATAFILES entry was found. */
|
||||
char*
|
||||
dict_get_first_path(
|
||||
ulint space_id);
|
||||
|
||||
/** Make sure the data_file_name is saved in dict_table_t if needed.
|
||||
Try to read it from the fil_system first, then from SYS_DATAFILES.
|
||||
@param[in] table Table object
|
||||
@@ -262,7 +270,7 @@ dict_load_foreigns(
|
||||
which must be loaded
|
||||
subsequently to load all the
|
||||
foreign key constraints. */
|
||||
__attribute__((nonnull(1), warn_unused_result));
|
||||
MY_ATTRIBUTE((nonnull(1), warn_unused_result));
|
||||
|
||||
/********************************************************************//**
|
||||
This function opens a system table, and return the first record.
|
||||
|
||||
@@ -49,6 +49,8 @@ Created 1/8/1996 Heikki Tuuri
|
||||
#include "buf0buf.h"
|
||||
#include "gis0type.h"
|
||||
#include "os0once.h"
|
||||
#include "ut0new.h"
|
||||
|
||||
#include "fil0fil.h"
|
||||
#include <my_crypt.h>
|
||||
#include "fil0crypt.h"
|
||||
@@ -67,6 +69,8 @@ combination of types */
|
||||
auto-generated clustered indexes,
|
||||
also DICT_UNIQUE will be set */
|
||||
#define DICT_UNIQUE 2 /*!< unique index */
|
||||
#define DICT_UNIVERSAL 4 /*!< index which can contain records from any
|
||||
other index */
|
||||
#define DICT_IBUF 8 /*!< insert buffer tree */
|
||||
#define DICT_CORRUPT 16 /*!< bit to store the corrupted flag
|
||||
in SYS_INDEXES.TYPE */
|
||||
@@ -170,9 +174,9 @@ DEFAULT=0, ON = 1, OFF = 2
|
||||
+ DICT_TF_WIDTH_SHARED_SPACE \
|
||||
+ DICT_TF_WIDTH_PAGE_COMPRESSION \
|
||||
+ DICT_TF_WIDTH_PAGE_COMPRESSION_LEVEL \
|
||||
+ DICT_TF_WIDTH_ATOMIC_WRITES \
|
||||
+ DICT_TF_WIDTH_PAGE_ENCRYPTION \
|
||||
+ DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY)
|
||||
+ DICT_TF_WIDTH_ATOMIC_WRITES \
|
||||
+ DICT_TF_WIDTH_PAGE_ENCRYPTION \
|
||||
+ DICT_TF_WIDTH_PAGE_ENCRYPTION_KEY)
|
||||
|
||||
/** A mask of all the known/used bits in table flags */
|
||||
#define DICT_TF_BIT_MASK (~(~0 << DICT_TF_BITS))
|
||||
@@ -305,7 +309,7 @@ ROW_FORMAT=REDUNDANT. InnoDB engines do not check these flags
|
||||
for unknown bits in order to protect backward incompatibility. */
|
||||
/* @{ */
|
||||
/** Total number of bits in table->flags2. */
|
||||
#define DICT_TF2_BITS 8
|
||||
#define DICT_TF2_BITS 9
|
||||
#define DICT_TF2_UNUSED_BIT_MASK (~0U << DICT_TF2_BITS)
|
||||
#define DICT_TF2_BIT_MASK ~DICT_TF2_UNUSED_BIT_MASK
|
||||
|
||||
@@ -339,6 +343,9 @@ FTS, etc.... Intrinsic table has all the properties of the normal table except
|
||||
it is not created by user and so not visible to end-user. */
|
||||
#define DICT_TF2_INTRINSIC 128
|
||||
|
||||
/** Encryption table bit. */
|
||||
#define DICT_TF2_ENCRYPTION 256
|
||||
|
||||
/* @} */
|
||||
|
||||
#define DICT_TF2_FLAG_SET(table, flag) \
|
||||
@@ -431,13 +438,22 @@ dict_mem_table_add_v_col(
|
||||
ulint len,
|
||||
ulint pos,
|
||||
ulint num_base);
|
||||
|
||||
/** Adds a stored column definition to a table.
|
||||
@param[in] table table
|
||||
@param[in] num_base number of base columns. */
|
||||
void
|
||||
dict_mem_table_add_s_col(
|
||||
dict_table_t* table,
|
||||
ulint num_base);
|
||||
|
||||
/**********************************************************************//**
|
||||
Renames a column of a table in the data dictionary cache. */
|
||||
void
|
||||
dict_mem_table_col_rename(
|
||||
/*======================*/
|
||||
dict_table_t* table, /*!< in/out: table */
|
||||
unsigned nth_col,/*!< in: column index */
|
||||
ulint nth_col,/*!< in: column index */
|
||||
const char* from, /*!< in: old column name */
|
||||
const char* to, /*!< in: new column name */
|
||||
bool is_virtual);
|
||||
@@ -532,6 +548,27 @@ dict_mem_referenced_table_name_lookup_set(
|
||||
dict_foreign_t* foreign, /*!< in/out: foreign struct */
|
||||
ibool do_alloc); /*!< in: is an alloc needed */
|
||||
|
||||
/** Fills the dependent virtual columns in a set.
|
||||
Reason for being dependent are
|
||||
1) FK can be present on base column of virtual columns
|
||||
2) FK can be present on column which is a part of virtual index
|
||||
@param[in,out] foreign foreign key information. */
|
||||
void
|
||||
dict_mem_foreign_fill_vcol_set(
|
||||
dict_foreign_t* foreign);
|
||||
|
||||
/** Fill virtual columns set in each fk constraint present in the table.
|
||||
@param[in,out] table innodb table object. */
|
||||
void
|
||||
dict_mem_table_fill_foreign_vcol_set(
|
||||
dict_table_t* table);
|
||||
|
||||
/** Free the vcol_set from all foreign key constraint on the table.
|
||||
@param[in,out] table innodb table object. */
|
||||
void
|
||||
dict_mem_table_free_foreign_vcol_set(
|
||||
dict_table_t* table);
|
||||
|
||||
/** Create a temporary tablename like "#sql-ibtid-inc where
|
||||
tid = the Table ID
|
||||
inc = a randomly initialized number that is incremented for each file
|
||||
@@ -696,6 +733,21 @@ struct dict_add_v_col_t{
|
||||
const char** v_col_name;
|
||||
};
|
||||
|
||||
/** Data structure for a stored column in a table. */
|
||||
struct dict_s_col_t {
|
||||
/** Stored column ptr */
|
||||
dict_col_t* m_col;
|
||||
/** array of base col ptr */
|
||||
dict_col_t** base_col;
|
||||
/** number of base columns */
|
||||
ulint num_base;
|
||||
/** column pos in table */
|
||||
ulint s_pos;
|
||||
};
|
||||
|
||||
/** list to put stored column for create_table_info_t */
|
||||
typedef std::list<dict_s_col_t, ut_allocator<dict_s_col_t> > dict_s_col_list;
|
||||
|
||||
/** @brief DICT_ANTELOPE_MAX_INDEX_COL_LEN is measured in bytes and
|
||||
is the maximum indexed column length (or indexed prefix length) in
|
||||
ROW_FORMAT=REDUNDANT and ROW_FORMAT=COMPACT. Also, in any format,
|
||||
@@ -726,6 +778,7 @@ be REC_VERSION_56_MAX_INDEX_COL_LEN (3072) bytes */
|
||||
|
||||
/** Defines the maximum fixed length column size */
|
||||
#define DICT_MAX_FIXED_COL_LEN DICT_ANTELOPE_MAX_INDEX_COL_LEN
|
||||
|
||||
#ifdef WITH_WSREP
|
||||
#define WSREP_MAX_SUPPORTED_KEY_LENGTH 3500
|
||||
#endif /* WITH_WSREP */
|
||||
@@ -982,6 +1035,9 @@ struct dict_index_t{
|
||||
parser; /*!< fulltext parser plugin */
|
||||
bool is_ngram;
|
||||
/*!< true if it's ngram parser */
|
||||
bool has_new_v_col;
|
||||
/*!< whether it has a newly added virtual
|
||||
column in ALTER */
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
UT_LIST_NODE_T(dict_index_t)
|
||||
indexes;/*!< list of indexes of the table */
|
||||
@@ -1106,6 +1162,11 @@ enum online_index_status {
|
||||
ONLINE_INDEX_ABORTED_DROPPED
|
||||
};
|
||||
|
||||
/** Set to store the virtual columns which are affected by Foreign
|
||||
key constraint. */
|
||||
typedef std::set<dict_v_col_t*, std::less<dict_v_col_t*>,
|
||||
ut_allocator<dict_v_col_t*> > dict_vcol_set;
|
||||
|
||||
/** Data structure for a foreign key constraint; an example:
|
||||
FOREIGN KEY (A, B) REFERENCES TABLE2 (C, D). Most fields will be
|
||||
initialized to 0, NULL or FALSE in dict_mem_foreign_create(). */
|
||||
@@ -1141,6 +1202,9 @@ struct dict_foreign_t{
|
||||
does not generate new indexes
|
||||
implicitly */
|
||||
dict_index_t* referenced_index;/*!< referenced index */
|
||||
|
||||
dict_vcol_set* v_cols; /*!< set of virtual columns affected
|
||||
by foreign key constraint. */
|
||||
};
|
||||
|
||||
std::ostream&
|
||||
@@ -1189,6 +1253,24 @@ struct dict_foreign_with_index {
|
||||
const dict_index_t* m_index;
|
||||
};
|
||||
|
||||
#ifdef WITH_WSREP
|
||||
/** A function object to find a foreign key with the given index as the
|
||||
foreign index. Return the foreign key with matching criteria or NULL */
|
||||
struct dict_foreign_with_foreign_index {
|
||||
|
||||
dict_foreign_with_foreign_index(const dict_index_t* index)
|
||||
: m_index(index)
|
||||
{}
|
||||
|
||||
bool operator()(const dict_foreign_t* foreign) const
|
||||
{
|
||||
return(foreign->foreign_index == m_index);
|
||||
}
|
||||
|
||||
const dict_index_t* m_index;
|
||||
};
|
||||
#endif
|
||||
|
||||
/* A function object to check if the foreign constraint is between different
|
||||
tables. Returns true if foreign key constraint is between different tables,
|
||||
false otherwise. */
|
||||
@@ -1273,6 +1355,10 @@ dict_foreign_free(
|
||||
/*==============*/
|
||||
dict_foreign_t* foreign) /*!< in, own: foreign key struct */
|
||||
{
|
||||
if (foreign->v_cols != NULL) {
|
||||
UT_DELETE(foreign->v_cols);
|
||||
}
|
||||
|
||||
mem_heap_free(foreign->heap);
|
||||
}
|
||||
|
||||
@@ -1332,12 +1418,42 @@ generate a specific template for it. */
|
||||
typedef ut_list_base<lock_t, ut_list_node<lock_t> lock_table_t::*>
|
||||
table_lock_list_t;
|
||||
|
||||
/** mysql template structure defined in row0mysql.cc */
|
||||
struct mysql_row_templ_t;
|
||||
|
||||
/** Structure defines template related to virtual columns and
|
||||
their base columns */
|
||||
struct dict_vcol_templ_t {
|
||||
/** number of regular columns */
|
||||
ulint n_col;
|
||||
|
||||
/** number of virtual columns */
|
||||
ulint n_v_col;
|
||||
|
||||
/** array of templates for virtual col and their base columns */
|
||||
mysql_row_templ_t** vtempl;
|
||||
|
||||
/** table's database name */
|
||||
std::string db_name;
|
||||
|
||||
/** table name */
|
||||
std::string tb_name;
|
||||
|
||||
/** share->table_name */
|
||||
std::string share_name;
|
||||
|
||||
/** MySQL record length */
|
||||
ulint rec_len;
|
||||
|
||||
/** default column value if any */
|
||||
byte* default_rec;
|
||||
};
|
||||
|
||||
/* This flag is for sync SQL DDL and memcached DML.
|
||||
if table->memcached_sync_count == DICT_TABLE_IN_DDL means there's DDL running on
|
||||
the table, DML from memcached will be blocked. */
|
||||
#define DICT_TABLE_IN_DDL -1
|
||||
|
||||
struct innodb_col_templ_t;
|
||||
/** These are used when MySQL FRM and InnoDB data dictionary are
|
||||
in inconsistent state. */
|
||||
typedef enum {
|
||||
@@ -1363,6 +1479,7 @@ struct dict_table_t {
|
||||
|
||||
void* thd; /*!< thd */
|
||||
fil_space_crypt_t *crypt_data; /*!< crypt data if present */
|
||||
|
||||
/** Release the table handle. */
|
||||
inline void release();
|
||||
|
||||
@@ -1417,6 +1534,8 @@ struct dict_table_t {
|
||||
5 whether the table is being created its own tablespace,
|
||||
6 whether the table has been DISCARDed,
|
||||
7 whether the aux FTS tables names are in hex.
|
||||
8 whether the table is instinc table.
|
||||
9 whether the table has encryption setting.
|
||||
Use DICT_TF2_FLAG_IS_SET() to parse this flag. */
|
||||
unsigned flags2:DICT_TF2_BITS;
|
||||
|
||||
@@ -1470,6 +1589,13 @@ struct dict_table_t {
|
||||
/** Array of virtual column descriptions. */
|
||||
dict_v_col_t* v_cols;
|
||||
|
||||
/** List of stored column descriptions. It is used only for foreign key
|
||||
check during create table and copy alter operations.
|
||||
During copy alter, s_cols list is filled during create table operation
|
||||
and need to preserve till rename table operation. That is the
|
||||
reason s_cols is a part of dict_table_t */
|
||||
dict_s_col_list* s_cols;
|
||||
|
||||
/** Column names packed in a character string
|
||||
"name1\0name2\0...nameN\0". Until the string contains n_cols, it will
|
||||
be allocated from a temporary heap. The final string will be allocated
|
||||
@@ -1650,15 +1776,22 @@ struct dict_table_t {
|
||||
/** The state of the background stats thread wrt this table.
|
||||
See BG_STAT_NONE, BG_STAT_IN_PROGRESS and BG_STAT_SHOULD_QUIT.
|
||||
Writes are covered by dict_sys->mutex. Dirty reads are possible. */
|
||||
#define BG_SCRUB_IN_PROGRESS ((byte)(1 << 2))
|
||||
|
||||
#define BG_SCRUB_IN_PROGRESS ((byte)(1 << 2))
|
||||
/*!< BG_SCRUB_IN_PROGRESS is set in
|
||||
stats_bg_flag when the background
|
||||
scrub code is working on this table. The DROP
|
||||
TABLE code waits for this to be cleared
|
||||
before proceeding. */
|
||||
|
||||
#define BG_IN_PROGRESS (BG_STAT_IN_PROGRESS | BG_SCRUB_IN_PROGRESS)
|
||||
#define BG_STAT_SHOULD_QUIT (1 << 1)
|
||||
|
||||
#define BG_IN_PROGRESS (BG_STAT_IN_PROGRESS | BG_SCRUB_IN_PROGRESS)
|
||||
|
||||
|
||||
/** The state of the background stats thread wrt this table.
|
||||
See BG_STAT_NONE, BG_STAT_IN_PROGRESS and BG_STAT_SHOULD_QUIT.
|
||||
Writes are covered by dict_sys->mutex. Dirty reads are possible. */
|
||||
byte stats_bg_flag;
|
||||
|
||||
bool stats_error_printed;
|
||||
@@ -1752,8 +1885,10 @@ public:
|
||||
but just need a increased counter to track consistent view while
|
||||
proceeding SELECT as part of UPDATE. */
|
||||
ib_uint64_t sess_trx_id;
|
||||
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
ibool is_encrypted;
|
||||
|
||||
bool is_encrypted;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/** Value of 'magic_n'. */
|
||||
@@ -1764,10 +1899,13 @@ public:
|
||||
#endif /* UNIV_DEBUG */
|
||||
/** mysql_row_templ_t for base columns used for compute the virtual
|
||||
columns */
|
||||
innodb_col_templ_t* vc_templ;
|
||||
dict_vcol_templ_t* vc_templ;
|
||||
|
||||
/** whether above vc_templ comes from purge allocation */
|
||||
bool vc_templ_purge;
|
||||
/** encryption key, it's only for export/import */
|
||||
byte* encryption_key;
|
||||
|
||||
/** encryption iv, it's only for export/import */
|
||||
byte* encryption_iv;
|
||||
};
|
||||
|
||||
/*******************************************************************//**
|
||||
@@ -1871,29 +2009,20 @@ dict_table_autoinc_own(
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/** whether a col is used in spatial index or regular index */
|
||||
enum col_spatial_status {
|
||||
/** Not used in gis index. */
|
||||
SPATIAL_NONE = 0,
|
||||
|
||||
/** Used in both spatial index and regular index. */
|
||||
SPATIAL_MIXED = 1,
|
||||
|
||||
/** Only used in spatial index. */
|
||||
SPATIAL_ONLY = 2
|
||||
};
|
||||
|
||||
/** Check whether the col is used in spatial index or regular index.
|
||||
@param[in] col column to check
|
||||
@return col_spatial_status */
|
||||
@return spatial status */
|
||||
inline
|
||||
col_spatial_status
|
||||
spatial_status_t
|
||||
dict_col_get_spatial_status(
|
||||
const dict_col_t* col)
|
||||
{
|
||||
col_spatial_status spatial_status = SPATIAL_NONE;
|
||||
spatial_status_t spatial_status = SPATIAL_NONE;
|
||||
|
||||
ut_ad(col->ord_part);
|
||||
/* Column is not a part of any index. */
|
||||
if (!col->ord_part) {
|
||||
return(spatial_status);
|
||||
}
|
||||
|
||||
if (DATA_GEOMETRY_MTYPE(col->mtype)) {
|
||||
if (col->max_prefix == 0) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user