mirror of
https://github.com/MariaDB/server.git
synced 2025-06-06 05:21:19 +03:00
Merge branch '10.0' into 10.1
This commit is contained in:
commit
aa59ecec89
6
.gitignore
vendored
6
.gitignore
vendored
@ -477,3 +477,9 @@ UpgradeLog*.htm
|
|||||||
|
|
||||||
# Microsoft Fakes
|
# Microsoft Fakes
|
||||||
FakesAssemblies/
|
FakesAssemblies/
|
||||||
|
|
||||||
|
# macOS garbage
|
||||||
|
.DS_Store
|
||||||
|
|
||||||
|
# QtCreator && CodeBlocks
|
||||||
|
*.cbp
|
||||||
|
@ -362,6 +362,10 @@ INCLUDE(maintainer)
|
|||||||
|
|
||||||
IF(WITH_UNIT_TESTS)
|
IF(WITH_UNIT_TESTS)
|
||||||
ENABLE_TESTING()
|
ENABLE_TESTING()
|
||||||
|
# This is the only instance where ADD_TEST should be used,
|
||||||
|
# to make sure that make test will run MTR,
|
||||||
|
# use MY_ADD_TEST macro to add other tests
|
||||||
|
ADD_TEST(NAME MTR COMMAND ./mysql-test-run WORKING_DIRECTORY "mysql-test")
|
||||||
ADD_SUBDIRECTORY(unittest/mytap)
|
ADD_SUBDIRECTORY(unittest/mytap)
|
||||||
ADD_SUBDIRECTORY(unittest/strings)
|
ADD_SUBDIRECTORY(unittest/strings)
|
||||||
ADD_SUBDIRECTORY(unittest/examples)
|
ADD_SUBDIRECTORY(unittest/examples)
|
||||||
|
@ -4994,6 +4994,14 @@ static int dump_selected_tables(char *db, char **table_names, int tables)
|
|||||||
if (opt_xml)
|
if (opt_xml)
|
||||||
print_xml_tag(md_result_file, "", "\n", "database", "name=", db, NullS);
|
print_xml_tag(md_result_file, "", "\n", "database", "name=", db, NullS);
|
||||||
|
|
||||||
|
|
||||||
|
/* obtain dump of routines (procs/functions) */
|
||||||
|
if (opt_routines && mysql_get_server_version(mysql) >= 50009)
|
||||||
|
{
|
||||||
|
DBUG_PRINT("info", ("Dumping routines for database %s", db));
|
||||||
|
dump_routines_for_db(db);
|
||||||
|
}
|
||||||
|
|
||||||
if (opt_single_transaction && mysql_get_server_version(mysql) >= 50500)
|
if (opt_single_transaction && mysql_get_server_version(mysql) >= 50500)
|
||||||
{
|
{
|
||||||
verbose_msg("-- Setting savepoint...\n");
|
verbose_msg("-- Setting savepoint...\n");
|
||||||
@ -5003,7 +5011,6 @@ static int dump_selected_tables(char *db, char **table_names, int tables)
|
|||||||
DBUG_RETURN(1);
|
DBUG_RETURN(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Dump each selected table */
|
/* Dump each selected table */
|
||||||
for (pos= dump_tables; pos < end; pos++)
|
for (pos= dump_tables; pos < end; pos++)
|
||||||
{
|
{
|
||||||
@ -5065,12 +5072,6 @@ static int dump_selected_tables(char *db, char **table_names, int tables)
|
|||||||
DBUG_PRINT("info", ("Dumping events for database %s", db));
|
DBUG_PRINT("info", ("Dumping events for database %s", db));
|
||||||
dump_events_for_db(db);
|
dump_events_for_db(db);
|
||||||
}
|
}
|
||||||
/* obtain dump of routines (procs/functions) */
|
|
||||||
if (opt_routines && mysql_get_server_version(mysql) >= 50009)
|
|
||||||
{
|
|
||||||
DBUG_PRINT("info", ("Dumping routines for database %s", db));
|
|
||||||
dump_routines_for_db(db);
|
|
||||||
}
|
|
||||||
free_root(&glob_root, MYF(0));
|
free_root(&glob_root, MYF(0));
|
||||||
if (opt_xml)
|
if (opt_xml)
|
||||||
{
|
{
|
||||||
|
@ -853,7 +853,7 @@ build_table_string(void)
|
|||||||
|
|
||||||
if (auto_generate_sql_guid_primary)
|
if (auto_generate_sql_guid_primary)
|
||||||
{
|
{
|
||||||
dynstr_append(&table_string, "id varchar(32) primary key");
|
dynstr_append(&table_string, "id varchar(36) primary key");
|
||||||
|
|
||||||
if (num_int_cols || num_char_cols || auto_generate_sql_guid_primary)
|
if (num_int_cols || num_char_cols || auto_generate_sql_guid_primary)
|
||||||
dynstr_append(&table_string, ",");
|
dynstr_append(&table_string, ",");
|
||||||
@ -868,7 +868,7 @@ build_table_string(void)
|
|||||||
if (count) /* Except for the first pass we add a comma */
|
if (count) /* Except for the first pass we add a comma */
|
||||||
dynstr_append(&table_string, ",");
|
dynstr_append(&table_string, ",");
|
||||||
|
|
||||||
if (snprintf(buf, HUGE_STRING_LENGTH, "id%d varchar(32) unique key", count)
|
if (snprintf(buf, HUGE_STRING_LENGTH, "id%d varchar(36) unique key", count)
|
||||||
> HUGE_STRING_LENGTH)
|
> HUGE_STRING_LENGTH)
|
||||||
{
|
{
|
||||||
fprintf(stderr, "Memory Allocation error in create table\n");
|
fprintf(stderr, "Memory Allocation error in create table\n");
|
||||||
|
@ -8739,6 +8739,7 @@ void init_re(void)
|
|||||||
"[[:space:]]*SELECT[[:space:]]|"
|
"[[:space:]]*SELECT[[:space:]]|"
|
||||||
"[[:space:]]*CREATE[[:space:]]+TABLE[[:space:]]|"
|
"[[:space:]]*CREATE[[:space:]]+TABLE[[:space:]]|"
|
||||||
"[[:space:]]*DO[[:space:]]|"
|
"[[:space:]]*DO[[:space:]]|"
|
||||||
|
"[[:space:]]*HANDLER[[:space:]]+.*[[:space:]]+READ[[:space:]]|"
|
||||||
"[[:space:]]*SET[[:space:]]+OPTION[[:space:]]|"
|
"[[:space:]]*SET[[:space:]]+OPTION[[:space:]]|"
|
||||||
"[[:space:]]*DELETE[[:space:]]+MULTI[[:space:]]|"
|
"[[:space:]]*DELETE[[:space:]]+MULTI[[:space:]]|"
|
||||||
"[[:space:]]*UPDATE[[:space:]]+MULTI[[:space:]]|"
|
"[[:space:]]*UPDATE[[:space:]]+MULTI[[:space:]]|"
|
||||||
|
@ -2,7 +2,7 @@
|
|||||||
INCLUDE(${MYSQL_CMAKE_SCRIPT_DIR}/cmake_parse_arguments.cmake)
|
INCLUDE(${MYSQL_CMAKE_SCRIPT_DIR}/cmake_parse_arguments.cmake)
|
||||||
|
|
||||||
MACRO(MY_ADD_TEST name)
|
MACRO(MY_ADD_TEST name)
|
||||||
ADD_TEST(${name} ${name}-t)
|
ADD_TEST(NAME ${name} COMMAND ${name}-t CONFIGURATIONS default_ignore)
|
||||||
ENDMACRO()
|
ENDMACRO()
|
||||||
|
|
||||||
MACRO (MY_ADD_TESTS)
|
MACRO (MY_ADD_TESTS)
|
||||||
|
@ -5722,7 +5722,7 @@ sub lldb_arguments {
|
|||||||
$input = $input ? "< $input" : "";
|
$input = $input ? "< $input" : "";
|
||||||
|
|
||||||
# write init file for mysqld or client
|
# write init file for mysqld or client
|
||||||
mtr_tofile($lldb_init_file, "set args $str $input\n");
|
mtr_tofile($lldb_init_file, "process launch --stop-at-entry -- $str $input\n");
|
||||||
|
|
||||||
print "\nTo start lldb for $type, type in another window:\n";
|
print "\nTo start lldb for $type, type in another window:\n";
|
||||||
print "cd $glob_mysql_test_dir && lldb -s $lldb_init_file $$exe\n";
|
print "cd $glob_mysql_test_dir && lldb -s $lldb_init_file $$exe\n";
|
||||||
|
@ -2234,6 +2234,29 @@ t1 CREATE TABLE `t1` (
|
|||||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
#
|
#
|
||||||
|
# MDEV-14668 ADD PRIMARY KEY IF NOT EXISTS on composite key
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
`ID` BIGINT(20) NOT NULL,
|
||||||
|
`RANK` MEDIUMINT(4) NOT NULL,
|
||||||
|
`CHECK_POINT` BIGINT(20) NOT NULL,
|
||||||
|
UNIQUE INDEX `HORIZON_UIDX01` (`ID`, `RANK`)
|
||||||
|
) ENGINE=InnoDB;
|
||||||
|
ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`);
|
||||||
|
SHOW CREATE TABLE t1;
|
||||||
|
Table Create Table
|
||||||
|
t1 CREATE TABLE `t1` (
|
||||||
|
`ID` bigint(20) NOT NULL,
|
||||||
|
`RANK` mediumint(4) NOT NULL,
|
||||||
|
`CHECK_POINT` bigint(20) NOT NULL,
|
||||||
|
PRIMARY KEY (`ID`,`CHECK_POINT`),
|
||||||
|
UNIQUE KEY `HORIZON_UIDX01` (`ID`,`RANK`)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||||
|
ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`);
|
||||||
|
Warnings:
|
||||||
|
Note 1061 Multiple primary key defined
|
||||||
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
# End of 10.0 tests
|
# End of 10.0 tests
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
@ -1413,7 +1413,7 @@ USE test;
|
|||||||
End of 5.0 tests.
|
End of 5.0 tests.
|
||||||
select * from information_schema.engines WHERE ENGINE="MyISAM";
|
select * from information_schema.engines WHERE ENGINE="MyISAM";
|
||||||
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
|
ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS
|
||||||
MyISAM DEFAULT MyISAM storage engine NO NO NO
|
MyISAM DEFAULT Non-transactional engine with good performance and small data footprint NO NO NO
|
||||||
grant select on *.* to user3148@localhost;
|
grant select on *.* to user3148@localhost;
|
||||||
select user,db from information_schema.processlist;
|
select user,db from information_schema.processlist;
|
||||||
user db
|
user db
|
||||||
|
@ -146,3 +146,19 @@ a
|
|||||||
16
|
16
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
End of 5.1 tests
|
End of 5.1 tests
|
||||||
|
#
|
||||||
|
# mdev-16235: SELECT over a table with LIMIT 0
|
||||||
|
#
|
||||||
|
EXPLAIN
|
||||||
|
SELECT * FROM mysql.slow_log WHERE sql_text != 'foo' LIMIT 0;
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Zero limit
|
||||||
|
SELECT * FROM mysql.slow_log WHERE sql_text != 'foo' LIMIT 0;
|
||||||
|
start_time user_host query_time lock_time rows_sent rows_examined db last_insert_id insert_id server_id sql_text thread_id rows_affected
|
||||||
|
EXPLAIN
|
||||||
|
SELECT * FROM mysql.help_topic WHERE help_category_id != example LIMIT 0;
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Zero limit
|
||||||
|
SELECT * FROM mysql.help_topic WHERE help_category_id != example LIMIT 0;
|
||||||
|
help_topic_id name help_category_id description example url
|
||||||
|
End of 5.5 tests
|
||||||
|
@ -407,7 +407,7 @@ LOCK TABLE t1 WRITE;
|
|||||||
HANDLER t1 OPEN;
|
HANDLER t1 OPEN;
|
||||||
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
|
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
|
||||||
HANDLER t1 READ FIRST;
|
HANDLER t1 READ FIRST;
|
||||||
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
|
Got one of the listed errors
|
||||||
HANDLER t1 CLOSE;
|
HANDLER t1 CLOSE;
|
||||||
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
|
ERROR HY000: Can't execute the given command because you have active locked tables or an active transaction
|
||||||
UNLOCK TABLES;
|
UNLOCK TABLES;
|
||||||
|
@ -5579,3 +5579,18 @@ USE `db1`;
|
|||||||
DROP DATABASE db1;
|
DROP DATABASE db1;
|
||||||
DROP DATABASE db2;
|
DROP DATABASE db2;
|
||||||
FOUND /Database: mysql/ in bug11505.sql
|
FOUND /Database: mysql/ in bug11505.sql
|
||||||
|
#
|
||||||
|
# MDEV-15021: Fix the order in which routines are called
|
||||||
|
#
|
||||||
|
use test;
|
||||||
|
CREATE FUNCTION f() RETURNS INT RETURN 1;
|
||||||
|
CREATE VIEW v1 AS SELECT f();
|
||||||
|
# Running mysqldump -uroot test --routines --tables v1 > **vardir**/test.dmp
|
||||||
|
DROP VIEW v1;
|
||||||
|
DROP FUNCTION f;
|
||||||
|
# Running mysql -uroot test < **vardir**/test.dmp
|
||||||
|
#
|
||||||
|
# Cleanup after succesful import.
|
||||||
|
#
|
||||||
|
DROP VIEW v1;
|
||||||
|
DROP FUNCTION f;
|
||||||
|
@ -255,3 +255,6 @@ Benchmark
|
|||||||
# MDEV-4684 - Enhancement request: --init-command support for mysqlslap
|
# MDEV-4684 - Enhancement request: --init-command support for mysqlslap
|
||||||
#
|
#
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
|
# Bug MDEV-15789 (Upstream: #80329): MYSQLSLAP OPTIONS --AUTO-GENERATE-SQL-GUID-PRIMARY and --AUTO-GENERATE-SQL-SECONDARY-INDEXES DONT WORK
|
||||||
|
#
|
||||||
|
@ -356,13 +356,13 @@ and o_orderkey = l_orderkey
|
|||||||
group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice
|
group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice
|
||||||
order by o_totalprice desc, o_orderdate;
|
order by o_totalprice desc, o_orderdate;
|
||||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||||
1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 6005 0.00 Using temporary; Using filesort
|
1 PRIMARY orders ALL PRIMARY,i_o_custkey NULL NULL NULL 1500 100.00 Using where; Using temporary; Using filesort
|
||||||
1 PRIMARY orders eq_ref PRIMARY,i_o_custkey PRIMARY 4 <subquery2>.l_orderkey 1 100.00 Using where
|
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 dbt3_s001.orders.o_orderkey 1 100.00
|
||||||
1 PRIMARY customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1 100.00
|
1 PRIMARY customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1 100.00
|
||||||
1 PRIMARY lineitem ref PRIMARY,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 <subquery2>.l_orderkey 4 100.00
|
1 PRIMARY lineitem ref PRIMARY,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey_quantity 4 dbt3_s001.orders.o_orderkey 4 100.00 Using index
|
||||||
2 MATERIALIZED lineitem index NULL i_l_orderkey_quantity 13 NULL 6005 100.00 Using index
|
2 MATERIALIZED lineitem index NULL i_l_orderkey_quantity 13 NULL 6005 100.00 Using index
|
||||||
Warnings:
|
Warnings:
|
||||||
Note 1003 select `dbt3_s001`.`customer`.`c_name` AS `c_name`,`dbt3_s001`.`customer`.`c_custkey` AS `c_custkey`,`dbt3_s001`.`orders`.`o_orderkey` AS `o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE` AS `o_orderdate`,`dbt3_s001`.`orders`.`o_totalprice` AS `o_totalprice`,sum(`dbt3_s001`.`lineitem`.`l_quantity`) AS `sum(l_quantity)` from <materialize> (select `dbt3_s001`.`lineitem`.`l_orderkey` from `dbt3_s001`.`lineitem` group by `dbt3_s001`.`lineitem`.`l_orderkey` having (sum(`dbt3_s001`.`lineitem`.`l_quantity`) > 250)) join `dbt3_s001`.`customer` join `dbt3_s001`.`orders` join `dbt3_s001`.`lineitem` where ((`dbt3_s001`.`customer`.`c_custkey` = `dbt3_s001`.`orders`.`o_custkey`) and (`dbt3_s001`.`orders`.`o_orderkey` = `<subquery2>`.`l_orderkey`) and (`dbt3_s001`.`lineitem`.`l_orderkey` = `<subquery2>`.`l_orderkey`)) group by `dbt3_s001`.`customer`.`c_name`,`dbt3_s001`.`customer`.`c_custkey`,`dbt3_s001`.`orders`.`o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE`,`dbt3_s001`.`orders`.`o_totalprice` order by `dbt3_s001`.`orders`.`o_totalprice` desc,`dbt3_s001`.`orders`.`o_orderDATE`
|
Note 1003 select `dbt3_s001`.`customer`.`c_name` AS `c_name`,`dbt3_s001`.`customer`.`c_custkey` AS `c_custkey`,`dbt3_s001`.`orders`.`o_orderkey` AS `o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE` AS `o_orderdate`,`dbt3_s001`.`orders`.`o_totalprice` AS `o_totalprice`,sum(`dbt3_s001`.`lineitem`.`l_quantity`) AS `sum(l_quantity)` from <materialize> (select `dbt3_s001`.`lineitem`.`l_orderkey` from `dbt3_s001`.`lineitem` group by `dbt3_s001`.`lineitem`.`l_orderkey` having (sum(`dbt3_s001`.`lineitem`.`l_quantity`) > 250)) join `dbt3_s001`.`customer` join `dbt3_s001`.`orders` join `dbt3_s001`.`lineitem` where ((`dbt3_s001`.`customer`.`c_custkey` = `dbt3_s001`.`orders`.`o_custkey`) and (`<subquery2>`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey`) and (`dbt3_s001`.`lineitem`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey`)) group by `dbt3_s001`.`customer`.`c_name`,`dbt3_s001`.`customer`.`c_custkey`,`dbt3_s001`.`orders`.`o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE`,`dbt3_s001`.`orders`.`o_totalprice` order by `dbt3_s001`.`orders`.`o_totalprice` desc,`dbt3_s001`.`orders`.`o_orderDATE`
|
||||||
select
|
select
|
||||||
c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity)
|
c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity)
|
||||||
from customer, orders, lineitem
|
from customer, orders, lineitem
|
||||||
@ -1530,6 +1530,68 @@ t
|
|||||||
10:00:00
|
10:00:00
|
||||||
11:00:00
|
11:00:00
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
|
# MDEV-16374: filtered shows 0 for materilization scan for a semi join, which makes optimizer
|
||||||
|
# always pick materialization scan over materialization lookup
|
||||||
|
#
|
||||||
|
create table t0(a int);
|
||||||
|
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||||
|
create table t1 (a int, b int);
|
||||||
|
insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),
|
||||||
|
(11,11),(12,12),(13,13),(14,14),(15,15);
|
||||||
|
set @@optimizer_use_condition_selectivity=2;
|
||||||
|
explain extended select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||||
|
1 PRIMARY t1 ALL NULL NULL NULL NULL 16 100.00 Using where
|
||||||
|
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
|
||||||
|
2 MATERIALIZED t1 ALL NULL NULL NULL NULL 16 100.00 Using temporary
|
||||||
|
Warnings:
|
||||||
|
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (select max(`test`.`t1`.`a`) from `test`.`t1` group by `test`.`t1`.`b`) join `test`.`t1` where (`<subquery2>`.`max(a)` = `test`.`t1`.`a`)
|
||||||
|
select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
a b
|
||||||
|
0 0
|
||||||
|
1 1
|
||||||
|
2 2
|
||||||
|
3 3
|
||||||
|
4 4
|
||||||
|
5 5
|
||||||
|
6 6
|
||||||
|
7 7
|
||||||
|
8 8
|
||||||
|
9 9
|
||||||
|
10 10
|
||||||
|
11 11
|
||||||
|
12 12
|
||||||
|
13 13
|
||||||
|
14 14
|
||||||
|
15 15
|
||||||
|
set @@optimizer_use_condition_selectivity=1;
|
||||||
|
explain extended select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||||
|
1 PRIMARY t1 ALL NULL NULL NULL NULL 16 100.00 Using where
|
||||||
|
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
|
||||||
|
2 MATERIALIZED t1 ALL NULL NULL NULL NULL 16 100.00 Using temporary
|
||||||
|
Warnings:
|
||||||
|
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (select max(`test`.`t1`.`a`) from `test`.`t1` group by `test`.`t1`.`b`) join `test`.`t1` where (`<subquery2>`.`max(a)` = `test`.`t1`.`a`)
|
||||||
|
select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
a b
|
||||||
|
0 0
|
||||||
|
1 1
|
||||||
|
2 2
|
||||||
|
3 3
|
||||||
|
4 4
|
||||||
|
5 5
|
||||||
|
6 6
|
||||||
|
7 7
|
||||||
|
8 8
|
||||||
|
9 9
|
||||||
|
10 10
|
||||||
|
11 11
|
||||||
|
12 12
|
||||||
|
13 13
|
||||||
|
14 14
|
||||||
|
15 15
|
||||||
|
drop table t1,t0;
|
||||||
set histogram_size=@save_histogram_size;
|
set histogram_size=@save_histogram_size;
|
||||||
set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
|
set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
|
||||||
set use_stat_tables=@save_use_stat_tables;
|
set use_stat_tables=@save_use_stat_tables;
|
||||||
|
@ -359,13 +359,13 @@ and o_orderkey = l_orderkey
|
|||||||
group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice
|
group by c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice
|
||||||
order by o_totalprice desc, o_orderdate;
|
order by o_totalprice desc, o_orderdate;
|
||||||
id select_type table type possible_keys key key_len ref rows filtered Extra
|
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||||
1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 6005 0.00 Using temporary; Using filesort
|
1 PRIMARY orders ALL PRIMARY,i_o_custkey NULL NULL NULL 1500 100.00 Using where; Using temporary; Using filesort
|
||||||
1 PRIMARY orders eq_ref PRIMARY,i_o_custkey PRIMARY 4 <subquery2>.l_orderkey 1 100.00 Using where
|
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 dbt3_s001.orders.o_orderkey 1 100.00
|
||||||
1 PRIMARY customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1 100.00
|
1 PRIMARY customer eq_ref PRIMARY PRIMARY 4 dbt3_s001.orders.o_custkey 1 100.00
|
||||||
1 PRIMARY lineitem ref PRIMARY,i_l_orderkey,i_l_orderkey_quantity PRIMARY 4 <subquery2>.l_orderkey 4 100.00
|
1 PRIMARY lineitem ref PRIMARY,i_l_orderkey,i_l_orderkey_quantity i_l_orderkey_quantity 4 dbt3_s001.orders.o_orderkey 4 100.00 Using index
|
||||||
2 MATERIALIZED lineitem index NULL PRIMARY 8 NULL 6005 100.00
|
2 MATERIALIZED lineitem index NULL PRIMARY 8 NULL 6005 100.00
|
||||||
Warnings:
|
Warnings:
|
||||||
Note 1003 select `dbt3_s001`.`customer`.`c_name` AS `c_name`,`dbt3_s001`.`customer`.`c_custkey` AS `c_custkey`,`dbt3_s001`.`orders`.`o_orderkey` AS `o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE` AS `o_orderdate`,`dbt3_s001`.`orders`.`o_totalprice` AS `o_totalprice`,sum(`dbt3_s001`.`lineitem`.`l_quantity`) AS `sum(l_quantity)` from <materialize> (select `dbt3_s001`.`lineitem`.`l_orderkey` from `dbt3_s001`.`lineitem` group by `dbt3_s001`.`lineitem`.`l_orderkey` having (sum(`dbt3_s001`.`lineitem`.`l_quantity`) > 250)) join `dbt3_s001`.`customer` join `dbt3_s001`.`orders` join `dbt3_s001`.`lineitem` where ((`dbt3_s001`.`customer`.`c_custkey` = `dbt3_s001`.`orders`.`o_custkey`) and (`dbt3_s001`.`orders`.`o_orderkey` = `<subquery2>`.`l_orderkey`) and (`dbt3_s001`.`lineitem`.`l_orderkey` = `<subquery2>`.`l_orderkey`)) group by `dbt3_s001`.`customer`.`c_name`,`dbt3_s001`.`customer`.`c_custkey`,`dbt3_s001`.`orders`.`o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE`,`dbt3_s001`.`orders`.`o_totalprice` order by `dbt3_s001`.`orders`.`o_totalprice` desc,`dbt3_s001`.`orders`.`o_orderDATE`
|
Note 1003 select `dbt3_s001`.`customer`.`c_name` AS `c_name`,`dbt3_s001`.`customer`.`c_custkey` AS `c_custkey`,`dbt3_s001`.`orders`.`o_orderkey` AS `o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE` AS `o_orderdate`,`dbt3_s001`.`orders`.`o_totalprice` AS `o_totalprice`,sum(`dbt3_s001`.`lineitem`.`l_quantity`) AS `sum(l_quantity)` from <materialize> (select `dbt3_s001`.`lineitem`.`l_orderkey` from `dbt3_s001`.`lineitem` group by `dbt3_s001`.`lineitem`.`l_orderkey` having (sum(`dbt3_s001`.`lineitem`.`l_quantity`) > 250)) join `dbt3_s001`.`customer` join `dbt3_s001`.`orders` join `dbt3_s001`.`lineitem` where ((`dbt3_s001`.`customer`.`c_custkey` = `dbt3_s001`.`orders`.`o_custkey`) and (`<subquery2>`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey`) and (`dbt3_s001`.`lineitem`.`l_orderkey` = `dbt3_s001`.`orders`.`o_orderkey`)) group by `dbt3_s001`.`customer`.`c_name`,`dbt3_s001`.`customer`.`c_custkey`,`dbt3_s001`.`orders`.`o_orderkey`,`dbt3_s001`.`orders`.`o_orderDATE`,`dbt3_s001`.`orders`.`o_totalprice` order by `dbt3_s001`.`orders`.`o_totalprice` desc,`dbt3_s001`.`orders`.`o_orderDATE`
|
||||||
select
|
select
|
||||||
c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity)
|
c_name, c_custkey, o_orderkey, o_orderdate, o_totalprice, sum(l_quantity)
|
||||||
from customer, orders, lineitem
|
from customer, orders, lineitem
|
||||||
@ -1540,6 +1540,68 @@ t
|
|||||||
10:00:00
|
10:00:00
|
||||||
11:00:00
|
11:00:00
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
|
# MDEV-16374: filtered shows 0 for materilization scan for a semi join, which makes optimizer
|
||||||
|
# always pick materialization scan over materialization lookup
|
||||||
|
#
|
||||||
|
create table t0(a int);
|
||||||
|
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||||
|
create table t1 (a int, b int);
|
||||||
|
insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),
|
||||||
|
(11,11),(12,12),(13,13),(14,14),(15,15);
|
||||||
|
set @@optimizer_use_condition_selectivity=2;
|
||||||
|
explain extended select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||||
|
1 PRIMARY t1 ALL NULL NULL NULL NULL 16 100.00 Using where
|
||||||
|
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
|
||||||
|
2 MATERIALIZED t1 ALL NULL NULL NULL NULL 16 100.00 Using temporary
|
||||||
|
Warnings:
|
||||||
|
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (select max(`test`.`t1`.`a`) from `test`.`t1` group by `test`.`t1`.`b`) join `test`.`t1` where (`<subquery2>`.`max(a)` = `test`.`t1`.`a`)
|
||||||
|
select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
a b
|
||||||
|
0 0
|
||||||
|
1 1
|
||||||
|
2 2
|
||||||
|
3 3
|
||||||
|
4 4
|
||||||
|
5 5
|
||||||
|
6 6
|
||||||
|
7 7
|
||||||
|
8 8
|
||||||
|
9 9
|
||||||
|
10 10
|
||||||
|
11 11
|
||||||
|
12 12
|
||||||
|
13 13
|
||||||
|
14 14
|
||||||
|
15 15
|
||||||
|
set @@optimizer_use_condition_selectivity=1;
|
||||||
|
explain extended select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
id select_type table type possible_keys key key_len ref rows filtered Extra
|
||||||
|
1 PRIMARY t1 ALL NULL NULL NULL NULL 16 100.00 Using where
|
||||||
|
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 4 test.t1.a 1 100.00
|
||||||
|
2 MATERIALIZED t1 ALL NULL NULL NULL NULL 16 100.00 Using temporary
|
||||||
|
Warnings:
|
||||||
|
Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from <materialize> (select max(`test`.`t1`.`a`) from `test`.`t1` group by `test`.`t1`.`b`) join `test`.`t1` where (`<subquery2>`.`max(a)` = `test`.`t1`.`a`)
|
||||||
|
select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
a b
|
||||||
|
0 0
|
||||||
|
1 1
|
||||||
|
2 2
|
||||||
|
3 3
|
||||||
|
4 4
|
||||||
|
5 5
|
||||||
|
6 6
|
||||||
|
7 7
|
||||||
|
8 8
|
||||||
|
9 9
|
||||||
|
10 10
|
||||||
|
11 11
|
||||||
|
12 12
|
||||||
|
13 13
|
||||||
|
14 14
|
||||||
|
15 15
|
||||||
|
drop table t1,t0;
|
||||||
set histogram_size=@save_histogram_size;
|
set histogram_size=@save_histogram_size;
|
||||||
set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
|
set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
|
||||||
set use_stat_tables=@save_use_stat_tables;
|
set use_stat_tables=@save_use_stat_tables;
|
||||||
|
@ -334,7 +334,7 @@ SELECT * FROM t1
|
|||||||
WHERE (f1) IN (SELECT f1 FROM t2)
|
WHERE (f1) IN (SELECT f1 FROM t2)
|
||||||
LIMIT 0;
|
LIMIT 0;
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE
|
1 PRIMARY NULL NULL NULL NULL NULL NULL NULL Zero limit
|
||||||
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where
|
2 DEPENDENT SUBQUERY t2 ALL NULL NULL NULL NULL 2 Using where
|
||||||
SELECT * FROM t1
|
SELECT * FROM t1
|
||||||
WHERE (f1) IN (SELECT f1 FROM t2)
|
WHERE (f1) IN (SELECT f1 FROM t2)
|
||||||
|
@ -1655,3 +1655,60 @@ id select_type table type possible_keys key key_len ref rows Extra
|
|||||||
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 11 func 1
|
1 PRIMARY <subquery2> eq_ref distinct_key distinct_key 11 func 1
|
||||||
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
|
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 3 Using where
|
||||||
DROP TABLE t1,t2;
|
DROP TABLE t1,t2;
|
||||||
|
#
|
||||||
|
# MDEV-16225: wrong resultset from query with semijoin=on
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
`id` int(10) NOT NULL AUTO_INCREMENT,
|
||||||
|
`local_name` varchar(64) NOT NULL,
|
||||||
|
PRIMARY KEY (`id`)
|
||||||
|
) ENGINE=InnoDB AUTO_INCREMENT=14 DEFAULT CHARSET=latin1;
|
||||||
|
insert into t1(`id`,`local_name`) values
|
||||||
|
(1,'Cash Advance'),
|
||||||
|
(2,'Cash Advance'),
|
||||||
|
(3,'Rollover'),
|
||||||
|
(4,'AL Installment'),
|
||||||
|
(5,'AL Installment'),
|
||||||
|
(6,'AL Installment'),
|
||||||
|
(7,'AL Installment'),
|
||||||
|
(8,'AL Installment'),
|
||||||
|
(9,'AL Installment'),
|
||||||
|
(10,'Internet Payday'),
|
||||||
|
(11,'Rollover - Internet Payday'),
|
||||||
|
(12,'AL Monthly Installment'),
|
||||||
|
(13,'AL Semi-Monthly Installment');
|
||||||
|
explain
|
||||||
|
SELECT SQL_NO_CACHE t.id
|
||||||
|
FROM t1 t
|
||||||
|
WHERE (
|
||||||
|
t.id IN (SELECT A.id FROM t1 AS A WHERE A.local_name IN (SELECT B.local_name FROM t1 AS B WHERE B.id IN (0,4,12,13,1,10,3,11)))
|
||||||
|
OR
|
||||||
|
(t.id IN (0,4,12,13,1,10,3,11))
|
||||||
|
);
|
||||||
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
|
1 PRIMARY t index PRIMARY PRIMARY 4 NULL 13 Using where; Using index
|
||||||
|
2 MATERIALIZED <subquery3> ALL distinct_key NULL NULL NULL 8
|
||||||
|
2 MATERIALIZED A ALL PRIMARY NULL NULL NULL 13 Using where; Using join buffer (flat, BNL join)
|
||||||
|
3 MATERIALIZED B ALL PRIMARY NULL NULL NULL 13 Using where
|
||||||
|
SELECT SQL_NO_CACHE t.id
|
||||||
|
FROM t1 t
|
||||||
|
WHERE (
|
||||||
|
t.id IN (SELECT A.id FROM t1 AS A WHERE A.local_name IN (SELECT B.local_name FROM t1 AS B WHERE B.id IN (0,4,12,13,1,10,3,11)))
|
||||||
|
OR
|
||||||
|
(t.id IN (0,4,12,13,1,10,3,11))
|
||||||
|
);
|
||||||
|
id
|
||||||
|
1
|
||||||
|
2
|
||||||
|
3
|
||||||
|
4
|
||||||
|
5
|
||||||
|
6
|
||||||
|
7
|
||||||
|
8
|
||||||
|
9
|
||||||
|
10
|
||||||
|
11
|
||||||
|
12
|
||||||
|
13
|
||||||
|
drop table t1;
|
||||||
|
@ -77,9 +77,9 @@ explain select * from t4 where
|
|||||||
t4.a in (select max(t2.a) from t1, t2 group by t2.b) and
|
t4.a in (select max(t2.a) from t1, t2 group by t2.b) and
|
||||||
t4.b in (select max(t2.a) from t1, t2 group by t2.b);
|
t4.b in (select max(t2.a) from t1, t2 group by t2.b);
|
||||||
id select_type table type possible_keys key key_len ref rows Extra
|
id select_type table type possible_keys key key_len ref rows Extra
|
||||||
1 PRIMARY <subquery3> ALL distinct_key NULL NULL NULL 5
|
1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 5
|
||||||
1 PRIMARY <subquery2> ALL distinct_key NULL NULL NULL 5 Using join buffer (flat, BNL join)
|
1 PRIMARY t4 ref a a 5 <subquery2>.max(t2.a) 12 Using index condition
|
||||||
1 PRIMARY t4 ref a a 10 <subquery2>.max(t2.a),<subquery3>.max(t2.a) 12
|
1 PRIMARY <subquery3> eq_ref distinct_key distinct_key 4 test.t4.b 1
|
||||||
3 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
|
3 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
|
||||||
3 MATERIALIZED t1 ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
|
3 MATERIALIZED t1 ALL NULL NULL NULL NULL 10 Using join buffer (flat, BNL join)
|
||||||
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
|
2 MATERIALIZED t2 ALL NULL NULL NULL NULL 5 Using temporary
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
if (!`SELECT count(*) FROM information_schema.plugins WHERE
|
if (!`SELECT count(*) FROM information_schema.plugins WHERE
|
||||||
plugin_name = 'federated' AND plugin_status = 'active' AND
|
plugin_name = 'federated' AND plugin_status = 'active' AND
|
||||||
plugin_description LIKE '%FederatedX%'`){
|
plugin_description LIKE '%transactions%'`){
|
||||||
skip Need FederatedX engine;
|
skip Need FederatedX engine;
|
||||||
}
|
}
|
||||||
|
@ -2,7 +2,7 @@ SELECT * FROM information_schema.engines
|
|||||||
WHERE ENGINE = 'ARCHIVE';
|
WHERE ENGINE = 'ARCHIVE';
|
||||||
ENGINE ARCHIVE
|
ENGINE ARCHIVE
|
||||||
SUPPORT YES
|
SUPPORT YES
|
||||||
COMMENT Archive storage engine
|
COMMENT gzip-compresses tables for a low storage footprint
|
||||||
TRANSACTIONS NO
|
TRANSACTIONS NO
|
||||||
XA NO
|
XA NO
|
||||||
SAVEPOINTS NO
|
SAVEPOINTS NO
|
||||||
|
@ -2,7 +2,7 @@ SELECT * FROM information_schema.engines
|
|||||||
WHERE ENGINE = 'CSV';
|
WHERE ENGINE = 'CSV';
|
||||||
ENGINE CSV
|
ENGINE CSV
|
||||||
SUPPORT YES
|
SUPPORT YES
|
||||||
COMMENT CSV storage engine
|
COMMENT Stores tables as CSV files
|
||||||
TRANSACTIONS NO
|
TRANSACTIONS NO
|
||||||
XA NO
|
XA NO
|
||||||
SAVEPOINTS NO
|
SAVEPOINTS NO
|
||||||
|
@ -2,7 +2,7 @@ SELECT * FROM information_schema.engines
|
|||||||
WHERE ENGINE = 'FEDERATED';
|
WHERE ENGINE = 'FEDERATED';
|
||||||
ENGINE FEDERATED
|
ENGINE FEDERATED
|
||||||
SUPPORT YES
|
SUPPORT YES
|
||||||
COMMENT FederatedX pluggable storage engine
|
COMMENT Allows to access tables on other MariaDB servers, supports transactions and more
|
||||||
TRANSACTIONS YES
|
TRANSACTIONS YES
|
||||||
XA NO
|
XA NO
|
||||||
SAVEPOINTS YES
|
SAVEPOINTS YES
|
||||||
|
@ -2,7 +2,7 @@ SELECT * FROM information_schema.engines
|
|||||||
WHERE ENGINE = 'MyISAM';
|
WHERE ENGINE = 'MyISAM';
|
||||||
ENGINE MyISAM
|
ENGINE MyISAM
|
||||||
SUPPORT DEFAULT
|
SUPPORT DEFAULT
|
||||||
COMMENT MyISAM storage engine
|
COMMENT Non-transactional engine with good performance and small data footprint
|
||||||
TRANSACTIONS NO
|
TRANSACTIONS NO
|
||||||
XA NO
|
XA NO
|
||||||
SAVEPOINTS NO
|
SAVEPOINTS NO
|
||||||
|
@ -377,7 +377,9 @@ send optimize table t1;
|
|||||||
# client 1
|
# client 1
|
||||||
--echo proceed with the normal connection
|
--echo proceed with the normal connection
|
||||||
connection default;
|
connection default;
|
||||||
|
--disable_ps_protocol
|
||||||
handler t1 read next;
|
handler t1 read next;
|
||||||
|
--enable_ps_protocol
|
||||||
handler t1 close;
|
handler t1 close;
|
||||||
# client 2
|
# client 2
|
||||||
--echo read the result from the other connection
|
--echo read the result from the other connection
|
||||||
|
@ -269,7 +269,7 @@ handler t1 open;
|
|||||||
lock table t1 write;
|
lock table t1 write;
|
||||||
alter table t1 engine=csv;
|
alter table t1 engine=csv;
|
||||||
handler t1 read a next;
|
handler t1 read a next;
|
||||||
ERROR HY000: Storage engine CSV of the table `test`.`t1` doesn't have this option
|
Got one of the listed errors
|
||||||
handler t1 close;
|
handler t1 close;
|
||||||
unlock tables;
|
unlock tables;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
@ -326,7 +326,7 @@ let $wait_condition=
|
|||||||
info = "alter table t1 engine=csv";
|
info = "alter table t1 engine=csv";
|
||||||
--source include/wait_condition.inc
|
--source include/wait_condition.inc
|
||||||
connection default;
|
connection default;
|
||||||
--error ER_ILLEGAL_HA
|
--error ER_ILLEGAL_HA,ER_KEY_DOES_NOT_EXITS
|
||||||
handler t1 read a next;
|
handler t1 read a next;
|
||||||
handler t1 close;
|
handler t1 close;
|
||||||
connection con1;
|
connection con1;
|
||||||
|
9
mysql-test/suite/handler/ps.result
Normal file
9
mysql-test/suite/handler/ps.result
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
create table t1 (i int);
|
||||||
|
handler test.t1 open handler_a;
|
||||||
|
flush status;
|
||||||
|
handler handler_a read first;
|
||||||
|
i
|
||||||
|
show status like 'Com_stmt_prepare%';
|
||||||
|
Variable_name Value
|
||||||
|
Com_stmt_prepare OK
|
||||||
|
drop table t1;
|
11
mysql-test/suite/handler/ps.test
Normal file
11
mysql-test/suite/handler/ps.test
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
#
|
||||||
|
# MDEV-15729 Server crashes in Field::make_field upon HANDLER READ executed with PS protocol
|
||||||
|
#
|
||||||
|
create table t1 (i int);
|
||||||
|
handler test.t1 open handler_a;
|
||||||
|
flush status;
|
||||||
|
handler handler_a read first;
|
||||||
|
# handler...read must be prepared in --ps-protocol mode
|
||||||
|
--replace_result $PS_PROTOCOL OK
|
||||||
|
show status like 'Com_stmt_prepare%';
|
||||||
|
drop table t1;
|
5
mysql-test/suite/innodb/r/rename_table.result
Normal file
5
mysql-test/suite/innodb/r/rename_table.result
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
call mtr.add_suppression("InnoDB: (Operating system error|The error means|Cannot rename file)");
|
||||||
|
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
|
||||||
|
RENAME TABLE t1 TO non_existing_db.t1;
|
||||||
|
ERROR HY000: Error on rename of './test/t1' to './non_existing_db/t1' (errno: -1 "Internal error < 0 (Not system error)")
|
||||||
|
DROP TABLE t1;
|
@ -9,9 +9,10 @@
|
|||||||
|
|
||||||
|
|
||||||
# Ignore OS errors
|
# Ignore OS errors
|
||||||
call mtr.add_suppression("InnoDB: File ./test/t1*");
|
call mtr.add_suppression("InnoDB: File ./test/t1");
|
||||||
call mtr.add_suppression("InnoDB: Error number*");
|
call mtr.add_suppression("InnoDB: Error number");
|
||||||
call mtr.add_suppression("InnoDB: File ./test/t1#p#p1#sp#p1sp0.ibd: 'rename' returned OS error*");
|
call mtr.add_suppression("InnoDB: Cannot rename file '.*/test/t1#[Pp]#p1#[Ss][Pp]#p1sp0\\.ibd' to");
|
||||||
|
call mtr.add_suppression("InnoDB: Operating system error number .* in a file operation.");
|
||||||
|
|
||||||
# MDEV-7046: MySQL#74480 - Failing assertion: os_file_status(newpath, &exists, &type)
|
# MDEV-7046: MySQL#74480 - Failing assertion: os_file_status(newpath, &exists, &type)
|
||||||
# after Operating system error number 36 in a file operation
|
# after Operating system error number 36 in a file operation
|
||||||
|
11
mysql-test/suite/innodb/t/rename_table.test
Normal file
11
mysql-test/suite/innodb/t/rename_table.test
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
--source include/have_innodb.inc
|
||||||
|
|
||||||
|
call mtr.add_suppression("InnoDB: (Operating system error|The error means|Cannot rename file)");
|
||||||
|
|
||||||
|
CREATE TABLE t1 (a INT) ENGINE=InnoDB;
|
||||||
|
--replace_result "\\" "/"
|
||||||
|
--error ER_ERROR_ON_RENAME
|
||||||
|
RENAME TABLE t1 TO non_existing_db.t1;
|
||||||
|
|
||||||
|
# Cleanup
|
||||||
|
DROP TABLE t1;
|
@ -40,7 +40,7 @@ sub start_test {
|
|||||||
return "Not run for embedded server" if $::opt_embedded_server;
|
return "Not run for embedded server" if $::opt_embedded_server;
|
||||||
return "Not configured to run ctest" unless -f "../CTestTestfile.cmake";
|
return "Not configured to run ctest" unless -f "../CTestTestfile.cmake";
|
||||||
my ($ctest_vs)= $opt_vs_config ? "--build-config $opt_vs_config" : "";
|
my ($ctest_vs)= $opt_vs_config ? "--build-config $opt_vs_config" : "";
|
||||||
my (@ctest_list)= `cd .. && ctest $opt_vs_config --show-only --verbose`;
|
my (@ctest_list)= `cd .. && ctest $opt_vs_config -E MTR -C default_ignore --show-only --verbose`;
|
||||||
return "No ctest" if $?;
|
return "No ctest" if $?;
|
||||||
|
|
||||||
my ($command, %tests);
|
my ($command, %tests);
|
||||||
|
@ -1840,6 +1840,21 @@ ALTER TABLE t1 DROP INDEX IF EXISTS fk, DROP COLUMN IF EXISTS c;
|
|||||||
SHOW CREATE TABLE t1;
|
SHOW CREATE TABLE t1;
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-14668 ADD PRIMARY KEY IF NOT EXISTS on composite key
|
||||||
|
--echo #
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
`ID` BIGINT(20) NOT NULL,
|
||||||
|
`RANK` MEDIUMINT(4) NOT NULL,
|
||||||
|
`CHECK_POINT` BIGINT(20) NOT NULL,
|
||||||
|
UNIQUE INDEX `HORIZON_UIDX01` (`ID`, `RANK`)
|
||||||
|
) ENGINE=InnoDB;
|
||||||
|
|
||||||
|
ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`);
|
||||||
|
SHOW CREATE TABLE t1;
|
||||||
|
ALTER TABLE t1 ADD PRIMARY KEY IF NOT EXISTS (`ID`, `CHECK_POINT`);
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
--echo #
|
--echo #
|
||||||
--echo # End of 10.0 tests
|
--echo # End of 10.0 tests
|
||||||
--echo #
|
--echo #
|
||||||
|
@ -115,3 +115,17 @@ SELECT a FROM t1 ORDER BY a LIMIT 2 OFFSET 14;
|
|||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
--echo End of 5.1 tests
|
--echo End of 5.1 tests
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # mdev-16235: SELECT over a table with LIMIT 0
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
EXPLAIN
|
||||||
|
SELECT * FROM mysql.slow_log WHERE sql_text != 'foo' LIMIT 0;
|
||||||
|
SELECT * FROM mysql.slow_log WHERE sql_text != 'foo' LIMIT 0;
|
||||||
|
|
||||||
|
EXPLAIN
|
||||||
|
SELECT * FROM mysql.help_topic WHERE help_category_id != example LIMIT 0;
|
||||||
|
SELECT * FROM mysql.help_topic WHERE help_category_id != example LIMIT 0;
|
||||||
|
|
||||||
|
--echo End of 5.5 tests
|
||||||
|
@ -481,7 +481,7 @@ LOCK TABLE t1 WRITE;
|
|||||||
--echo # HANDLER commands are not allowed in LOCK TABLES mode
|
--echo # HANDLER commands are not allowed in LOCK TABLES mode
|
||||||
--error ER_LOCK_OR_ACTIVE_TRANSACTION
|
--error ER_LOCK_OR_ACTIVE_TRANSACTION
|
||||||
HANDLER t1 OPEN;
|
HANDLER t1 OPEN;
|
||||||
--error ER_LOCK_OR_ACTIVE_TRANSACTION
|
--error ER_LOCK_OR_ACTIVE_TRANSACTION,ER_UNKNOWN_TABLE
|
||||||
HANDLER t1 READ FIRST;
|
HANDLER t1 READ FIRST;
|
||||||
--error ER_LOCK_OR_ACTIVE_TRANSACTION
|
--error ER_LOCK_OR_ACTIVE_TRANSACTION
|
||||||
HANDLER t1 CLOSE;
|
HANDLER t1 CLOSE;
|
||||||
|
@ -2627,3 +2627,25 @@ let SEARCH_FILE=$MYSQLTEST_VARDIR/tmp/bug11505.sql;
|
|||||||
let SEARCH_PATTERN=Database: mysql;
|
let SEARCH_PATTERN=Database: mysql;
|
||||||
exec $MYSQL_DUMP mysql func > $SEARCH_FILE;
|
exec $MYSQL_DUMP mysql func > $SEARCH_FILE;
|
||||||
source include/search_pattern_in_file.inc;
|
source include/search_pattern_in_file.inc;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-15021: Fix the order in which routines are called
|
||||||
|
--echo #
|
||||||
|
use test;
|
||||||
|
CREATE FUNCTION f() RETURNS INT RETURN 1;
|
||||||
|
CREATE VIEW v1 AS SELECT f();
|
||||||
|
|
||||||
|
--echo # Running mysqldump -uroot test --routines --tables v1 > **vardir**/test.dmp
|
||||||
|
--exec $MYSQL_DUMP -uroot test --routines --tables v1 > $MYSQLTEST_VARDIR/test.dmp
|
||||||
|
|
||||||
|
DROP VIEW v1;
|
||||||
|
DROP FUNCTION f;
|
||||||
|
|
||||||
|
--echo # Running mysql -uroot test < **vardir**/test.dmp
|
||||||
|
--exec $MYSQL -uroot test < $MYSQLTEST_VARDIR/test.dmp
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Cleanup after succesful import.
|
||||||
|
--echo #
|
||||||
|
DROP VIEW v1;
|
||||||
|
DROP FUNCTION f;
|
||||||
|
@ -80,3 +80,11 @@ DROP DATABASE bug58090;
|
|||||||
|
|
||||||
--exec $MYSQL_SLAP --create-schema=test --init-command="CREATE TABLE t1(a INT)" --silent --concurrency=1 --iterations=1
|
--exec $MYSQL_SLAP --create-schema=test --init-command="CREATE TABLE t1(a INT)" --silent --concurrency=1 --iterations=1
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Bug MDEV-15789 (Upstream: #80329): MYSQLSLAP OPTIONS --AUTO-GENERATE-SQL-GUID-PRIMARY and --AUTO-GENERATE-SQL-SECONDARY-INDEXES DONT WORK
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
--exec $MYSQL_SLAP --concurrency=1 --silent --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-guid-primary --create-schema=slap
|
||||||
|
|
||||||
|
--exec $MYSQL_SLAP --concurrency=1 --silent --iterations=1 --number-int-cols=2 --number-char-cols=3 --auto-generate-sql --auto-generate-sql-secondary-indexes=1 --create-schema=slap
|
||||||
|
@ -1045,6 +1045,24 @@ SELECT * FROM (SELECT t FROM t1 WHERE d IS NULL) sq;
|
|||||||
|
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-16374: filtered shows 0 for materilization scan for a semi join, which makes optimizer
|
||||||
|
--echo # always pick materialization scan over materialization lookup
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
create table t0(a int);
|
||||||
|
insert into t0 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9);
|
||||||
|
create table t1 (a int, b int);
|
||||||
|
insert into t1 values (0,0),(1,1),(2,2),(3,3),(4,4),(5,5),(6,6),(7,7),(8,8),(9,9),(10,10),
|
||||||
|
(11,11),(12,12),(13,13),(14,14),(15,15);
|
||||||
|
set @@optimizer_use_condition_selectivity=2;
|
||||||
|
explain extended select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
set @@optimizer_use_condition_selectivity=1;
|
||||||
|
explain extended select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
select * from t1 where a in (select max(a) from t1 group by b);
|
||||||
|
drop table t1,t0;
|
||||||
|
|
||||||
set histogram_size=@save_histogram_size;
|
set histogram_size=@save_histogram_size;
|
||||||
set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
|
set optimizer_use_condition_selectivity=@save_optimizer_use_condition_selectivity;
|
||||||
set use_stat_tables=@save_use_stat_tables;
|
set use_stat_tables=@save_use_stat_tables;
|
||||||
|
@ -303,3 +303,45 @@ eval $q;
|
|||||||
eval explain $q;
|
eval explain $q;
|
||||||
|
|
||||||
DROP TABLE t1,t2;
|
DROP TABLE t1,t2;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-16225: wrong resultset from query with semijoin=on
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
`id` int(10) NOT NULL AUTO_INCREMENT,
|
||||||
|
`local_name` varchar(64) NOT NULL,
|
||||||
|
PRIMARY KEY (`id`)
|
||||||
|
) ENGINE=InnoDB AUTO_INCREMENT=14 DEFAULT CHARSET=latin1;
|
||||||
|
|
||||||
|
insert into t1(`id`,`local_name`) values
|
||||||
|
(1,'Cash Advance'),
|
||||||
|
(2,'Cash Advance'),
|
||||||
|
(3,'Rollover'),
|
||||||
|
(4,'AL Installment'),
|
||||||
|
(5,'AL Installment'),
|
||||||
|
(6,'AL Installment'),
|
||||||
|
(7,'AL Installment'),
|
||||||
|
(8,'AL Installment'),
|
||||||
|
(9,'AL Installment'),
|
||||||
|
(10,'Internet Payday'),
|
||||||
|
(11,'Rollover - Internet Payday'),
|
||||||
|
(12,'AL Monthly Installment'),
|
||||||
|
(13,'AL Semi-Monthly Installment');
|
||||||
|
|
||||||
|
explain
|
||||||
|
SELECT SQL_NO_CACHE t.id
|
||||||
|
FROM t1 t
|
||||||
|
WHERE (
|
||||||
|
t.id IN (SELECT A.id FROM t1 AS A WHERE A.local_name IN (SELECT B.local_name FROM t1 AS B WHERE B.id IN (0,4,12,13,1,10,3,11)))
|
||||||
|
OR
|
||||||
|
(t.id IN (0,4,12,13,1,10,3,11))
|
||||||
|
);
|
||||||
|
SELECT SQL_NO_CACHE t.id
|
||||||
|
FROM t1 t
|
||||||
|
WHERE (
|
||||||
|
t.id IN (SELECT A.id FROM t1 AS A WHERE A.local_name IN (SELECT B.local_name FROM t1 AS B WHERE B.id IN (0,4,12,13,1,10,3,11)))
|
||||||
|
OR
|
||||||
|
(t.id IN (0,4,12,13,1,10,3,11))
|
||||||
|
);
|
||||||
|
drop table t1;
|
||||||
|
@ -2521,6 +2521,10 @@ static int send_client_reply_packet(MCPVIO_EXT *mpvio,
|
|||||||
mysql->client_flag|= CLIENT_MULTI_RESULTS;
|
mysql->client_flag|= CLIENT_MULTI_RESULTS;
|
||||||
|
|
||||||
#ifdef HAVE_OPENSSL
|
#ifdef HAVE_OPENSSL
|
||||||
|
if (mysql->options.ssl_key || mysql->options.ssl_cert ||
|
||||||
|
mysql->options.ssl_ca || mysql->options.ssl_capath ||
|
||||||
|
mysql->options.ssl_cipher)
|
||||||
|
mysql->options.use_ssl = 1;
|
||||||
if (mysql->options.use_ssl)
|
if (mysql->options.use_ssl)
|
||||||
mysql->client_flag|= CLIENT_SSL;
|
mysql->client_flag|= CLIENT_SSL;
|
||||||
#endif /* HAVE_OPENSSL */
|
#endif /* HAVE_OPENSSL */
|
||||||
|
@ -195,7 +195,7 @@ int main(int argc, char **argv)
|
|||||||
die("database creation failed");
|
die("database creation failed");
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("Creation of the database was successfull");
|
printf("Creation of the database was successful");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,6 +146,11 @@ static void die(const char *fmt, ...)
|
|||||||
exit(1);
|
exit(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define WRITE_LOG(fmt,...) {\
|
||||||
|
char log_buf[1024]; \
|
||||||
|
snprintf(log_buf,sizeof(log_buf), fmt, __VA_ARGS__);\
|
||||||
|
WriteFile(logfile_handle,log_buf, strlen(log_buf), 0 , 0);\
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
spawn-like function to run subprocesses.
|
spawn-like function to run subprocesses.
|
||||||
@ -187,17 +192,22 @@ static intptr_t run_tool(int wait_flag, const char *program,...)
|
|||||||
{
|
{
|
||||||
char tmpdir[FN_REFLEN];
|
char tmpdir[FN_REFLEN];
|
||||||
GetTempPath(FN_REFLEN, tmpdir);
|
GetTempPath(FN_REFLEN, tmpdir);
|
||||||
sprintf_s(logfile_path, "%s\\mysql_upgrade_service.%s.log", tmpdir,
|
sprintf_s(logfile_path, "%smysql_upgrade_service.%s.log", tmpdir,
|
||||||
opt_service);
|
opt_service);
|
||||||
logfile_handle= CreateFile(logfile_path, GENERIC_WRITE, FILE_SHARE_READ,
|
SECURITY_ATTRIBUTES attr= {0};
|
||||||
NULL, TRUNCATE_EXISTING, 0, NULL);
|
attr.nLength= sizeof(SECURITY_ATTRIBUTES);
|
||||||
if (!logfile_handle)
|
attr.bInheritHandle= TRUE;
|
||||||
|
logfile_handle= CreateFile(logfile_path, FILE_APPEND_DATA,
|
||||||
|
FILE_SHARE_READ|FILE_SHARE_WRITE, &attr, CREATE_ALWAYS, 0, NULL);
|
||||||
|
if (logfile_handle == INVALID_HANDLE_VALUE)
|
||||||
{
|
{
|
||||||
die("Cannot open log file %s, windows error %u",
|
die("Cannot open log file %s, windows error %u",
|
||||||
logfile_path, GetLastError());
|
logfile_path, GetLastError());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
WRITE_LOG("Executing %s\r\n", cmdline);
|
||||||
|
|
||||||
/* Start child process */
|
/* Start child process */
|
||||||
STARTUPINFO si= {0};
|
STARTUPINFO si= {0};
|
||||||
si.cb= sizeof(si);
|
si.cb= sizeof(si);
|
||||||
@ -458,7 +468,7 @@ int main(int argc, char **argv)
|
|||||||
log("Phase 3/8: Starting mysqld for upgrade");
|
log("Phase 3/8: Starting mysqld for upgrade");
|
||||||
mysqld_process= (HANDLE)run_tool(P_NOWAIT, mysqld_path,
|
mysqld_process= (HANDLE)run_tool(P_NOWAIT, mysqld_path,
|
||||||
defaults_file_param, "--skip-networking", "--skip-grant-tables",
|
defaults_file_param, "--skip-networking", "--skip-grant-tables",
|
||||||
"--enable-named-pipe", socket_param, NULL);
|
"--enable-named-pipe", socket_param,"--skip-slave-start", NULL);
|
||||||
|
|
||||||
if (mysqld_process == INVALID_HANDLE_VALUE)
|
if (mysqld_process == INVALID_HANDLE_VALUE)
|
||||||
{
|
{
|
||||||
|
@ -6207,6 +6207,9 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name,
|
|||||||
column reference. See create_view_field() for details.
|
column reference. See create_view_field() for details.
|
||||||
*/
|
*/
|
||||||
item= nj_col->create_item(thd);
|
item= nj_col->create_item(thd);
|
||||||
|
if (!item)
|
||||||
|
DBUG_RETURN(NULL);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
*ref != NULL means that *ref contains the item that we need to
|
*ref != NULL means that *ref contains the item that we need to
|
||||||
replace. If the item was aliased by the user, set the alias to
|
replace. If the item was aliased by the user, set the alias to
|
||||||
|
@ -987,6 +987,7 @@ SQL_HANDLER *mysql_ha_read_prepare(THD *thd, TABLE_LIST *tables,
|
|||||||
if (!(handler= mysql_ha_find_handler(thd, tables->alias)))
|
if (!(handler= mysql_ha_find_handler(thd, tables->alias)))
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
tables->table= handler->table; // This is used by fix_fields
|
tables->table= handler->table; // This is used by fix_fields
|
||||||
|
handler->table->pos_in_table_list= tables;
|
||||||
if (mysql_ha_fix_cond_and_key(handler, mode, keyname, key_expr, cond, 1))
|
if (mysql_ha_fix_cond_and_key(handler, mode, keyname, key_expr, cond, 1))
|
||||||
DBUG_RETURN(0);
|
DBUG_RETURN(0);
|
||||||
DBUG_RETURN(handler);
|
DBUG_RETURN(handler);
|
||||||
|
@ -1250,10 +1250,19 @@ JOIN::optimize_inner()
|
|||||||
if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE ||
|
if (cond_value == Item::COND_FALSE || having_value == Item::COND_FALSE ||
|
||||||
(!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS)))
|
(!unit->select_limit_cnt && !(select_options & OPTION_FOUND_ROWS)))
|
||||||
{ /* Impossible cond */
|
{ /* Impossible cond */
|
||||||
DBUG_PRINT("info", (having_value == Item::COND_FALSE ?
|
if (unit->select_limit_cnt)
|
||||||
"Impossible HAVING" : "Impossible WHERE"));
|
{
|
||||||
zero_result_cause= having_value == Item::COND_FALSE ?
|
DBUG_PRINT("info", (having_value == Item::COND_FALSE ?
|
||||||
"Impossible HAVING" : "Impossible WHERE";
|
"Impossible HAVING" : "Impossible WHERE"));
|
||||||
|
zero_result_cause= having_value == Item::COND_FALSE ?
|
||||||
|
"Impossible HAVING" : "Impossible WHERE";
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
DBUG_PRINT("info", ("Zero limit"));
|
||||||
|
zero_result_cause= "Zero limit";
|
||||||
|
conds= 0;
|
||||||
|
}
|
||||||
table_count= top_join_tab_count= 0;
|
table_count= top_join_tab_count= 0;
|
||||||
error= 0;
|
error= 0;
|
||||||
goto setup_subq_exit;
|
goto setup_subq_exit;
|
||||||
@ -25465,21 +25474,18 @@ void JOIN::set_allowed_join_cache_types()
|
|||||||
|
|
||||||
void JOIN::save_query_plan(Join_plan_state *save_to)
|
void JOIN::save_query_plan(Join_plan_state *save_to)
|
||||||
{
|
{
|
||||||
if (keyuse.elements)
|
DYNAMIC_ARRAY tmp_keyuse;
|
||||||
{
|
/* Swap the current and the backup keyuse internal arrays. */
|
||||||
DYNAMIC_ARRAY tmp_keyuse;
|
tmp_keyuse= keyuse;
|
||||||
/* Swap the current and the backup keyuse internal arrays. */
|
keyuse= save_to->keyuse; /* keyuse is reset to an empty array. */
|
||||||
tmp_keyuse= keyuse;
|
save_to->keyuse= tmp_keyuse;
|
||||||
keyuse= save_to->keyuse; /* keyuse is reset to an empty array. */
|
|
||||||
save_to->keyuse= tmp_keyuse;
|
|
||||||
|
|
||||||
for (uint i= 0; i < table_count; i++)
|
for (uint i= 0; i < table_count; i++)
|
||||||
{
|
{
|
||||||
save_to->join_tab_keyuse[i]= join_tab[i].keyuse;
|
save_to->join_tab_keyuse[i]= join_tab[i].keyuse;
|
||||||
join_tab[i].keyuse= NULL;
|
join_tab[i].keyuse= NULL;
|
||||||
save_to->join_tab_checked_keys[i]= join_tab[i].checked_keys;
|
save_to->join_tab_checked_keys[i]= join_tab[i].checked_keys;
|
||||||
join_tab[i].checked_keys.clear_all();
|
join_tab[i].checked_keys.clear_all();
|
||||||
}
|
|
||||||
}
|
}
|
||||||
memcpy((uchar*) save_to->best_positions, (uchar*) best_positions,
|
memcpy((uchar*) save_to->best_positions, (uchar*) best_positions,
|
||||||
sizeof(POSITION) * (table_count + 1));
|
sizeof(POSITION) * (table_count + 1));
|
||||||
@ -25517,20 +25523,17 @@ void JOIN::reset_query_plan()
|
|||||||
|
|
||||||
void JOIN::restore_query_plan(Join_plan_state *restore_from)
|
void JOIN::restore_query_plan(Join_plan_state *restore_from)
|
||||||
{
|
{
|
||||||
if (restore_from->keyuse.elements)
|
DYNAMIC_ARRAY tmp_keyuse;
|
||||||
|
tmp_keyuse= keyuse;
|
||||||
|
keyuse= restore_from->keyuse;
|
||||||
|
restore_from->keyuse= tmp_keyuse;
|
||||||
|
|
||||||
|
for (uint i= 0; i < table_count; i++)
|
||||||
{
|
{
|
||||||
DYNAMIC_ARRAY tmp_keyuse;
|
join_tab[i].keyuse= restore_from->join_tab_keyuse[i];
|
||||||
tmp_keyuse= keyuse;
|
join_tab[i].checked_keys= restore_from->join_tab_checked_keys[i];
|
||||||
keyuse= restore_from->keyuse;
|
|
||||||
restore_from->keyuse= tmp_keyuse;
|
|
||||||
|
|
||||||
for (uint i= 0; i < table_count; i++)
|
|
||||||
{
|
|
||||||
join_tab[i].keyuse= restore_from->join_tab_keyuse[i];
|
|
||||||
join_tab[i].checked_keys= restore_from->join_tab_checked_keys[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
memcpy((uchar*) best_positions, (uchar*) restore_from->best_positions,
|
memcpy((uchar*) best_positions, (uchar*) restore_from->best_positions,
|
||||||
sizeof(POSITION) * (table_count + 1));
|
sizeof(POSITION) * (table_count + 1));
|
||||||
/* Restore SJM nests */
|
/* Restore SJM nests */
|
||||||
|
@ -5957,8 +5957,11 @@ drop_create_field:
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* Check if the table already has a PRIMARY KEY */
|
/* Check if the table already has a PRIMARY KEY */
|
||||||
bool dup_primary_key= key->type == Key::PRIMARY &&
|
bool dup_primary_key=
|
||||||
table->s->primary_key != MAX_KEY;
|
key->type == Key::PRIMARY &&
|
||||||
|
table->s->primary_key != MAX_KEY &&
|
||||||
|
(keyname= table->s->key_info[table->s->primary_key].name) &&
|
||||||
|
my_strcasecmp(system_charset_info, keyname, primary_key_name) == 0;
|
||||||
if (dup_primary_key)
|
if (dup_primary_key)
|
||||||
goto remove_key;
|
goto remove_key;
|
||||||
|
|
||||||
|
10
sql/table.cc
10
sql/table.cc
@ -5426,6 +5426,8 @@ Item *create_view_field(THD *thd, TABLE_LIST *view, Item **field_ref,
|
|||||||
Item_direct_view_ref(thd, &view->view->select_lex.context,
|
Item_direct_view_ref(thd, &view->view->select_lex.context,
|
||||||
field_ref, view->alias,
|
field_ref, view->alias,
|
||||||
name, view));
|
name, view));
|
||||||
|
if (!item)
|
||||||
|
return NULL;
|
||||||
/*
|
/*
|
||||||
Force creation of nullable item for the result tmp table for outer joined
|
Force creation of nullable item for the result tmp table for outer joined
|
||||||
views/derived tables.
|
views/derived tables.
|
||||||
@ -7356,7 +7358,15 @@ int TABLE_LIST::fetch_number_of_rows()
|
|||||||
{
|
{
|
||||||
int error= 0;
|
int error= 0;
|
||||||
if (jtbm_subselect)
|
if (jtbm_subselect)
|
||||||
|
{
|
||||||
|
if (jtbm_subselect->is_jtbm_merged)
|
||||||
|
{
|
||||||
|
table->file->stats.records= jtbm_subselect->jtbm_record_count;
|
||||||
|
set_if_bigger(table->file->stats.records, 2);
|
||||||
|
table->used_stat_records= table->file->stats.records;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
if (is_materialized_derived() && !fill_me)
|
if (is_materialized_derived() && !fill_me)
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -1888,7 +1888,7 @@ maria_declare_plugin(archive)
|
|||||||
&archive_storage_engine,
|
&archive_storage_engine,
|
||||||
"ARCHIVE",
|
"ARCHIVE",
|
||||||
"Brian Aker, MySQL AB",
|
"Brian Aker, MySQL AB",
|
||||||
"Archive storage engine",
|
"gzip-compresses tables for a low storage footprint",
|
||||||
PLUGIN_LICENSE_GPL,
|
PLUGIN_LICENSE_GPL,
|
||||||
archive_db_init, /* Plugin Init */
|
archive_db_init, /* Plugin Init */
|
||||||
NULL, /* Plugin Deinit */
|
NULL, /* Plugin Deinit */
|
||||||
|
@ -1788,7 +1788,7 @@ maria_declare_plugin(csv)
|
|||||||
&csv_storage_engine,
|
&csv_storage_engine,
|
||||||
"CSV",
|
"CSV",
|
||||||
"Brian Aker, MySQL AB",
|
"Brian Aker, MySQL AB",
|
||||||
"CSV storage engine",
|
"Stores tables as CSV files",
|
||||||
PLUGIN_LICENSE_GPL,
|
PLUGIN_LICENSE_GPL,
|
||||||
tina_init_func, /* Plugin Init */
|
tina_init_func, /* Plugin Init */
|
||||||
tina_done_func, /* Plugin Deinit */
|
tina_done_func, /* Plugin Deinit */
|
||||||
|
@ -3472,7 +3472,7 @@ maria_declare_plugin(federated)
|
|||||||
&federated_storage_engine,
|
&federated_storage_engine,
|
||||||
"FEDERATED",
|
"FEDERATED",
|
||||||
"Patrick Galbraith and Brian Aker, MySQL AB",
|
"Patrick Galbraith and Brian Aker, MySQL AB",
|
||||||
"Federated MySQL storage engine",
|
"Allows to access tables on other MariaDB servers",
|
||||||
PLUGIN_LICENSE_GPL,
|
PLUGIN_LICENSE_GPL,
|
||||||
federated_db_init, /* Plugin Init */
|
federated_db_init, /* Plugin Init */
|
||||||
federated_done, /* Plugin Deinit */
|
federated_done, /* Plugin Deinit */
|
||||||
|
@ -3647,7 +3647,7 @@ maria_declare_plugin(federatedx)
|
|||||||
&federatedx_storage_engine,
|
&federatedx_storage_engine,
|
||||||
"FEDERATED",
|
"FEDERATED",
|
||||||
"Patrick Galbraith",
|
"Patrick Galbraith",
|
||||||
"FederatedX pluggable storage engine",
|
"Allows to access tables on other MariaDB servers, supports transactions and more",
|
||||||
PLUGIN_LICENSE_GPL,
|
PLUGIN_LICENSE_GPL,
|
||||||
federatedx_db_init, /* Plugin Init */
|
federatedx_db_init, /* Plugin Init */
|
||||||
federatedx_done, /* Plugin Deinit */
|
federatedx_done, /* Plugin Deinit */
|
||||||
|
@ -2185,6 +2185,24 @@ loop:
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Handle RENAME error.
|
||||||
|
@param name old name of the file
|
||||||
|
@param new_name new name of the file */
|
||||||
|
static void os_file_handle_rename_error(const char* name, const char* new_name)
|
||||||
|
{
|
||||||
|
if (os_file_get_last_error(true) != OS_FILE_DISK_FULL) {
|
||||||
|
ib_logf(IB_LOG_LEVEL_ERROR, "Cannot rename file '%s' to '%s'",
|
||||||
|
name, new_name);
|
||||||
|
} else if (!os_has_said_disk_full) {
|
||||||
|
os_has_said_disk_full = true;
|
||||||
|
/* Disk full error is reported irrespective of the
|
||||||
|
on_error_silent setting. */
|
||||||
|
ib_logf(IB_LOG_LEVEL_ERROR,
|
||||||
|
"Full disk prevents renaming file '%s' to '%s'",
|
||||||
|
name, new_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/***********************************************************************//**
|
/***********************************************************************//**
|
||||||
NOTE! Use the corresponding macro os_file_rename(), not directly this function!
|
NOTE! Use the corresponding macro os_file_rename(), not directly this function!
|
||||||
Renames a file (can also move it to another directory). It is safest that the
|
Renames a file (can also move it to another directory). It is safest that the
|
||||||
@ -2220,8 +2238,7 @@ os_file_rename_func(
|
|||||||
return(TRUE);
|
return(TRUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
os_file_handle_error_no_exit(oldpath, "rename", FALSE, __FILE__, __LINE__);
|
os_file_handle_rename_error(oldpath, newpath);
|
||||||
|
|
||||||
return(FALSE);
|
return(FALSE);
|
||||||
#else
|
#else
|
||||||
int ret;
|
int ret;
|
||||||
@ -2230,8 +2247,7 @@ os_file_rename_func(
|
|||||||
ret = rename(oldpath, newpath);
|
ret = rename(oldpath, newpath);
|
||||||
|
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
os_file_handle_error_no_exit(oldpath, "rename", FALSE, __FILE__, __LINE__);
|
os_file_handle_rename_error(oldpath, newpath);
|
||||||
|
|
||||||
return(FALSE);
|
return(FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2470,7 +2470,7 @@ maria_declare_plugin(myisam)
|
|||||||
&myisam_storage_engine,
|
&myisam_storage_engine,
|
||||||
"MyISAM",
|
"MyISAM",
|
||||||
"MySQL AB",
|
"MySQL AB",
|
||||||
"MyISAM storage engine",
|
"Non-transactional engine with good performance and small data footprint",
|
||||||
PLUGIN_LICENSE_GPL,
|
PLUGIN_LICENSE_GPL,
|
||||||
myisam_init, /* Plugin Init */
|
myisam_init, /* Plugin Init */
|
||||||
NULL, /* Plugin Deinit */
|
NULL, /* Plugin Deinit */
|
||||||
|
@ -44,6 +44,7 @@ IF(NOT LIBJEMALLOC)
|
|||||||
MESSAGE(WARNING "TokuDB is enabled, but jemalloc is not. This configuration is not supported")
|
MESSAGE(WARNING "TokuDB is enabled, but jemalloc is not. This configuration is not supported")
|
||||||
ENDIF()
|
ENDIF()
|
||||||
|
|
||||||
|
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-shadow")
|
||||||
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-vla" DEBUG)
|
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-vla" DEBUG)
|
||||||
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-implicit-fallthrough")
|
MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-implicit-fallthrough")
|
||||||
|
|
||||||
|
@ -149,7 +149,7 @@ set_cflags_if_supported(
|
|||||||
-Wmissing-prototypes
|
-Wmissing-prototypes
|
||||||
-Wmissing-declarations
|
-Wmissing-declarations
|
||||||
-Wpointer-arith
|
-Wpointer-arith
|
||||||
-Wshadow
|
#-Wshadow will fail with GCC-8
|
||||||
${OPTIONAL_CFLAGS}
|
${OPTIONAL_CFLAGS}
|
||||||
## other flags to try:
|
## other flags to try:
|
||||||
#-Wunsafe-loop-optimizations
|
#-Wunsafe-loop-optimizations
|
||||||
|
@ -821,22 +821,22 @@ int toku_ftnode_fetch_callback(CACHEFILE UU(cachefile),
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:toku_ftnode_fetch_callback - "
|
"%s:%d:toku_ftnode_fetch_callback - "
|
||||||
"file[%s], blocknum[%ld], toku_deserialize_ftnode_from "
|
"file[%s], blocknum[%lld], toku_deserialize_ftnode_from "
|
||||||
"failed with a checksum error.\n",
|
"failed with a checksum error.\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
toku_cachefile_fname_in_env(cachefile),
|
toku_cachefile_fname_in_env(cachefile),
|
||||||
blocknum.b);
|
(longlong)blocknum.b);
|
||||||
} else {
|
} else {
|
||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:toku_ftnode_fetch_callback - "
|
"%s:%d:toku_ftnode_fetch_callback - "
|
||||||
"file[%s], blocknum[%ld], toku_deserialize_ftnode_from "
|
"file[%s], blocknum[%lld], toku_deserialize_ftnode_from "
|
||||||
"failed with %d.\n",
|
"failed with %d.\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
toku_cachefile_fname_in_env(cachefile),
|
toku_cachefile_fname_in_env(cachefile),
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
r);
|
r);
|
||||||
}
|
}
|
||||||
// make absolutely sure we crash before doing anything else.
|
// make absolutely sure we crash before doing anything else.
|
||||||
|
@ -656,20 +656,20 @@ exit:
|
|||||||
fprintf(stderr, \
|
fprintf(stderr, \
|
||||||
"%s:%d toku_deserialize_ft_from: " \
|
"%s:%d toku_deserialize_ft_from: " \
|
||||||
"filename[%s] " \
|
"filename[%s] " \
|
||||||
"r[%d] max_acceptable_lsn[%lu]" \
|
"r[%d] max_acceptable_lsn[%llu]" \
|
||||||
"r0[%d] checkpoint_lsn_0[%lu] checkpoint_count_0[%lu] " \
|
"r0[%d] checkpoint_lsn_0[%llu] checkpoint_count_0[%llu] " \
|
||||||
"r1[%d] checkpoint_lsn_1[%lu] checkpoint_count_1[%lu]\n", \
|
"r1[%d] checkpoint_lsn_1[%llu] checkpoint_count_1[%llu]\n", \
|
||||||
__FILE__, \
|
__FILE__, \
|
||||||
__LINE__, \
|
__LINE__, \
|
||||||
fn, \
|
fn, \
|
||||||
r, \
|
r, \
|
||||||
max_acceptable_lsn.lsn, \
|
(ulonglong)max_acceptable_lsn.lsn, \
|
||||||
r0, \
|
r0, \
|
||||||
checkpoint_lsn_0.lsn, \
|
(ulonglong)checkpoint_lsn_0.lsn, \
|
||||||
checkpoint_count_0, \
|
(ulonglong)checkpoint_count_0, \
|
||||||
r1, \
|
r1, \
|
||||||
checkpoint_lsn_1.lsn, \
|
(ulonglong)checkpoint_lsn_1.lsn, \
|
||||||
checkpoint_count_1);
|
(ulonglong)checkpoint_count_1);
|
||||||
|
|
||||||
int toku_deserialize_ft_from(int fd,
|
int toku_deserialize_ft_from(int fd,
|
||||||
const char *fn,
|
const char *fn,
|
||||||
|
@ -1170,11 +1170,11 @@ int verify_ftnode_sub_block(struct sub_block *sb,
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:verify_ftnode_sub_block - "
|
"%s:%d:verify_ftnode_sub_block - "
|
||||||
"file[%s], blocknum[%ld], stored_xsum[%u] != actual_xsum[%u]\n",
|
"file[%s], blocknum[%lld], stored_xsum[%u] != actual_xsum[%u]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
stored_xsum,
|
stored_xsum,
|
||||||
actual_xsum);
|
actual_xsum);
|
||||||
dump_bad_block((Bytef *) sb->uncompressed_ptr, sb->uncompressed_size);
|
dump_bad_block((Bytef *) sb->uncompressed_ptr, sb->uncompressed_size);
|
||||||
@ -1197,11 +1197,11 @@ static int deserialize_ftnode_info(struct sub_block *sb, FTNODE node) {
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_info - "
|
"%s:%d:deserialize_ftnode_info - "
|
||||||
"file[%s], blocknum[%ld], verify_ftnode_sub_block failed with %d\n",
|
"file[%s], blocknum[%lld], verify_ftnode_sub_block failed with %d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
node->blocknum.b,
|
(longlong)node->blocknum.b,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(static_cast<unsigned char *>(sb->uncompressed_ptr),
|
dump_bad_block(static_cast<unsigned char *>(sb->uncompressed_ptr),
|
||||||
sb->uncompressed_size);
|
sb->uncompressed_size);
|
||||||
@ -1253,11 +1253,11 @@ static int deserialize_ftnode_info(struct sub_block *sb, FTNODE node) {
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_info - "
|
"%s:%d:deserialize_ftnode_info - "
|
||||||
"file[%s], blocknum[%ld], data_size[%d] != rb.ndone[%d]\n",
|
"file[%s], blocknum[%lld], data_size[%d] != rb.ndone[%d]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
node->blocknum.b,
|
(longlong)node->blocknum.b,
|
||||||
data_size,
|
data_size,
|
||||||
rb.ndone);
|
rb.ndone);
|
||||||
dump_bad_block(rb.buf, rb.size);
|
dump_bad_block(rb.buf, rb.size);
|
||||||
@ -1388,12 +1388,12 @@ static int deserialize_ftnode_partition(
|
|||||||
if (r != 0) {
|
if (r != 0) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:deserialize_ftnode_partition - "
|
"%s:%d:deserialize_ftnode_partition - "
|
||||||
"file[%s], blocknum[%ld], "
|
"file[%s], blocknum[%lld], "
|
||||||
"verify_ftnode_sub_block failed with %d\n",
|
"verify_ftnode_sub_block failed with %d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
node->blocknum.b,
|
(longlong)node->blocknum.b,
|
||||||
r);
|
r);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
@ -1410,12 +1410,12 @@ static int deserialize_ftnode_partition(
|
|||||||
if (ch != FTNODE_PARTITION_MSG_BUFFER) {
|
if (ch != FTNODE_PARTITION_MSG_BUFFER) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:deserialize_ftnode_partition - "
|
"%s:%d:deserialize_ftnode_partition - "
|
||||||
"file[%s], blocknum[%ld], ch[%d] != "
|
"file[%s], blocknum[%lld], ch[%d] != "
|
||||||
"FTNODE_PARTITION_MSG_BUFFER[%d]\n",
|
"FTNODE_PARTITION_MSG_BUFFER[%d]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
node->blocknum.b,
|
(longlong)node->blocknum.b,
|
||||||
ch,
|
ch,
|
||||||
FTNODE_PARTITION_MSG_BUFFER);
|
FTNODE_PARTITION_MSG_BUFFER);
|
||||||
dump_bad_block(rb.buf, rb.size);
|
dump_bad_block(rb.buf, rb.size);
|
||||||
@ -1433,12 +1433,12 @@ static int deserialize_ftnode_partition(
|
|||||||
if (ch != FTNODE_PARTITION_DMT_LEAVES) {
|
if (ch != FTNODE_PARTITION_DMT_LEAVES) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:deserialize_ftnode_partition - "
|
"%s:%d:deserialize_ftnode_partition - "
|
||||||
"file[%s], blocknum[%ld], ch[%d] != "
|
"file[%s], blocknum[%lld], ch[%d] != "
|
||||||
"FTNODE_PARTITION_DMT_LEAVES[%d]\n",
|
"FTNODE_PARTITION_DMT_LEAVES[%d]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
node->blocknum.b,
|
(longlong)node->blocknum.b,
|
||||||
ch,
|
ch,
|
||||||
FTNODE_PARTITION_DMT_LEAVES);
|
FTNODE_PARTITION_DMT_LEAVES);
|
||||||
dump_bad_block(rb.buf, rb.size);
|
dump_bad_block(rb.buf, rb.size);
|
||||||
@ -1457,11 +1457,11 @@ static int deserialize_ftnode_partition(
|
|||||||
if (rb.ndone != rb.size) {
|
if (rb.ndone != rb.size) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:deserialize_ftnode_partition - "
|
"%s:%d:deserialize_ftnode_partition - "
|
||||||
"file[%s], blocknum[%ld], rb.ndone[%d] != rb.size[%d]\n",
|
"file[%s], blocknum[%lld], rb.ndone[%d] != rb.size[%d]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
node->blocknum.b,
|
(longlong)node->blocknum.b,
|
||||||
rb.ndone,
|
rb.ndone,
|
||||||
rb.size);
|
rb.size);
|
||||||
dump_bad_block(rb.buf, rb.size);
|
dump_bad_block(rb.buf, rb.size);
|
||||||
@ -1485,12 +1485,12 @@ static int decompress_and_deserialize_worker(struct rbuf curr_rbuf,
|
|||||||
const char *fname = toku_ftnode_get_cachefile_fname_in_env(node);
|
const char *fname = toku_ftnode_get_cachefile_fname_in_env(node);
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:decompress_and_deserialize_worker - "
|
"%s:%d:decompress_and_deserialize_worker - "
|
||||||
"file[%s], blocknum[%ld], read_and_decompress_sub_block failed "
|
"file[%s], blocknum[%lld], read_and_decompress_sub_block failed "
|
||||||
"with %d\n",
|
"with %d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
node->blocknum.b,
|
(longlong)node->blocknum.b,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(curr_rbuf.buf, curr_rbuf.size);
|
dump_bad_block(curr_rbuf.buf, curr_rbuf.size);
|
||||||
goto exit;
|
goto exit;
|
||||||
@ -1502,12 +1502,12 @@ static int decompress_and_deserialize_worker(struct rbuf curr_rbuf,
|
|||||||
const char *fname = toku_ftnode_get_cachefile_fname_in_env(node);
|
const char *fname = toku_ftnode_get_cachefile_fname_in_env(node);
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:decompress_and_deserialize_worker - "
|
"%s:%d:decompress_and_deserialize_worker - "
|
||||||
"file[%s], blocknum[%ld], deserialize_ftnode_partition failed "
|
"file[%s], blocknum[%lld], deserialize_ftnode_partition failed "
|
||||||
"with %d\n",
|
"with %d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
node->blocknum.b,
|
(longlong)node->blocknum.b,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(curr_rbuf.buf, curr_rbuf.size);
|
dump_bad_block(curr_rbuf.buf, curr_rbuf.size);
|
||||||
goto exit;
|
goto exit;
|
||||||
@ -1582,11 +1582,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
||||||
"file[%s], blocknum[%ld], rb->size[%u] < 24\n",
|
"file[%s], blocknum[%lld], rb->size[%u] < 24\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
rb->size);
|
rb->size);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
// TODO: What error do we return here?
|
// TODO: What error do we return here?
|
||||||
@ -1602,12 +1602,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
||||||
"file[%s], blocknum[%ld], unrecognized magic number "
|
"file[%s], blocknum[%lld], unrecognized magic number "
|
||||||
"%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
|
"%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
static_cast<const uint8_t*>(magic)[0],
|
static_cast<const uint8_t*>(magic)[0],
|
||||||
static_cast<const uint8_t*>(magic)[1],
|
static_cast<const uint8_t*>(magic)[1],
|
||||||
static_cast<const uint8_t*>(magic)[2],
|
static_cast<const uint8_t*>(magic)[2],
|
||||||
@ -1627,12 +1627,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
||||||
"file[%s], blocknum[%ld], node->layout_version_read_from_disk[%d] "
|
"file[%s], blocknum[%lld], node->layout_version_read_from_disk[%d] "
|
||||||
"< FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES[%d]\n",
|
"< FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES[%d]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
node->layout_version_read_from_disk,
|
node->layout_version_read_from_disk,
|
||||||
FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES);
|
FT_FIRST_LAYOUT_VERSION_WITH_BASEMENT_NODES);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
@ -1667,11 +1667,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
||||||
"file[%s], blocknum[%ld], needed_size[%d] > rb->size[%d]\n",
|
"file[%s], blocknum[%lld], needed_size[%d] > rb->size[%d]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
needed_size,
|
needed_size,
|
||||||
rb->size);
|
rb->size);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
@ -1695,11 +1695,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
||||||
"file[%s], blocknum[%ld], stored_checksum[%d] != checksum[%d]\n",
|
"file[%s], blocknum[%lld], stored_checksum[%d] != checksum[%d]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
stored_checksum,
|
stored_checksum,
|
||||||
checksum);
|
checksum);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
@ -1717,12 +1717,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
||||||
"file[%s], blocknum[%ld], rb->size[%d] - rb->ndone[%d] < "
|
"file[%s], blocknum[%lld], rb->size[%d] - rb->ndone[%d] < "
|
||||||
"sb_node_info.compressed_size[%d] + 8\n",
|
"sb_node_info.compressed_size[%d] + 8\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
rb->size,
|
rb->size,
|
||||||
rb->ndone,
|
rb->ndone,
|
||||||
sb_node_info.compressed_size);
|
sb_node_info.compressed_size);
|
||||||
@ -1744,11 +1744,11 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
||||||
"file[%s], blocknum[%ld], sb_node_info.xsum[%d] != actual_xsum[%d]\n",
|
"file[%s], blocknum[%lld], sb_node_info.xsum[%d] != actual_xsum[%d]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
sb_node_info.xsum,
|
sb_node_info.xsum,
|
||||||
actual_xsum);
|
actual_xsum);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
@ -1774,12 +1774,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
||||||
"file[%s], blocknum[%ld], deserialize_ftnode_info failed with "
|
"file[%s], blocknum[%lld], deserialize_ftnode_info failed with "
|
||||||
"%d\n",
|
"%d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(
|
dump_bad_block(
|
||||||
static_cast<unsigned char *>(sb_node_info.uncompressed_ptr),
|
static_cast<unsigned char *>(sb_node_info.uncompressed_ptr),
|
||||||
@ -1812,12 +1812,12 @@ static int deserialize_ftnode_header_from_rbuf_if_small_enough(
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
"%s:%d:deserialize_ftnode_header_from_rbuf_if_small_enough - "
|
||||||
"file[%s], blocknum[%ld], toku_ftnode_pf_callback failed with "
|
"file[%s], blocknum[%lld], toku_ftnode_pf_callback failed with "
|
||||||
"%d\n",
|
"%d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@ -2164,12 +2164,12 @@ static int deserialize_and_upgrade_ftnode(FTNODE node,
|
|||||||
const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
|
const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:deserialize_and_upgrade_ftnode - "
|
"%s:%d:deserialize_and_upgrade_ftnode - "
|
||||||
"file[%s], blocknum[%ld], "
|
"file[%s], blocknum[%lld], "
|
||||||
"read_and_decompress_block_from_fd_into_rbuf failed with %d\n",
|
"read_and_decompress_block_from_fd_into_rbuf failed with %d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
r);
|
r);
|
||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
@ -2190,12 +2190,12 @@ static int deserialize_and_upgrade_ftnode(FTNODE node,
|
|||||||
const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
|
const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:deserialize_and_upgrade_ftnode - "
|
"%s:%d:deserialize_and_upgrade_ftnode - "
|
||||||
"file[%s], blocknum[%ld], version[%d] > "
|
"file[%s], blocknum[%lld], version[%d] > "
|
||||||
"FT_LAYOUT_VERSION_14[%d]\n",
|
"FT_LAYOUT_VERSION_14[%d]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
version,
|
version,
|
||||||
FT_LAYOUT_VERSION_14);
|
FT_LAYOUT_VERSION_14);
|
||||||
dump_bad_block(rb.buf, rb.size);
|
dump_bad_block(rb.buf, rb.size);
|
||||||
@ -2278,12 +2278,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
|
|||||||
memcmp(magic, "tokunode", 8) != 0) {
|
memcmp(magic, "tokunode", 8) != 0) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:deserialize_ftnode_from_rbuf - "
|
"%s:%d:deserialize_ftnode_from_rbuf - "
|
||||||
"file[%s], blocknum[%ld], unrecognized magic number "
|
"file[%s], blocknum[%lld], unrecognized magic number "
|
||||||
"%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
|
"%2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x %2.2x\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
static_cast<const uint8_t *>(magic)[0],
|
static_cast<const uint8_t *>(magic)[0],
|
||||||
static_cast<const uint8_t *>(magic)[1],
|
static_cast<const uint8_t *>(magic)[1],
|
||||||
static_cast<const uint8_t *>(magic)[2],
|
static_cast<const uint8_t *>(magic)[2],
|
||||||
@ -2309,12 +2309,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
|
|||||||
if (r != 0) {
|
if (r != 0) {
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:deserialize_ftnode_from_rbuf - "
|
"%s:%d:deserialize_ftnode_from_rbuf - "
|
||||||
"file[%s], blocknum[%ld], deserialize_and_upgrade_ftnode "
|
"file[%s], blocknum[%lld], deserialize_and_upgrade_ftnode "
|
||||||
"failed with %d\n",
|
"failed with %d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@ -2355,11 +2355,11 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_from_rbuf - "
|
"%s:%d:deserialize_ftnode_from_rbuf - "
|
||||||
"file[%s], blocknum[%ld], stored_checksum[%d] != checksum[%d]\n",
|
"file[%s], blocknum[%lld], stored_checksum[%d] != checksum[%d]\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
stored_checksum,
|
stored_checksum,
|
||||||
checksum);
|
checksum);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
@ -2377,12 +2377,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_from_rbuf - "
|
"%s:%d:deserialize_ftnode_from_rbuf - "
|
||||||
"file[%s], blocknum[%ld], read_and_decompress_sub_block failed "
|
"file[%s], blocknum[%lld], read_and_decompress_sub_block failed "
|
||||||
"with %d\n",
|
"with %d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(
|
dump_bad_block(
|
||||||
static_cast<unsigned char *>(sb_node_info.uncompressed_ptr),
|
static_cast<unsigned char *>(sb_node_info.uncompressed_ptr),
|
||||||
@ -2398,12 +2398,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_from_rbuf - "
|
"%s:%d:deserialize_ftnode_from_rbuf - "
|
||||||
"file[%s], blocknum[%ld], deserialize_ftnode_info failed with "
|
"file[%s], blocknum[%lld], deserialize_ftnode_info failed with "
|
||||||
"%d\n",
|
"%d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
goto cleanup;
|
goto cleanup;
|
||||||
@ -2470,12 +2470,12 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_from_rbuf - "
|
"%s:%d:deserialize_ftnode_from_rbuf - "
|
||||||
"file[%s], blocknum[%ld], childnum[%d], "
|
"file[%s], blocknum[%lld], childnum[%d], "
|
||||||
"decompress_and_deserialize_worker failed with %d\n",
|
"decompress_and_deserialize_worker failed with %d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
i,
|
i,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
@ -2490,13 +2490,13 @@ static int deserialize_ftnode_from_rbuf(FTNODE *ftnode,
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_from_rbuf - "
|
"%s:%d:deserialize_ftnode_from_rbuf - "
|
||||||
"file[%s], blocknum[%ld], childnum[%d], "
|
"file[%s], blocknum[%lld], childnum[%d], "
|
||||||
"check_and_copy_compressed_sub_block_worker failed with "
|
"check_and_copy_compressed_sub_block_worker failed with "
|
||||||
"%d\n",
|
"%d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
i,
|
i,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(rb->buf, rb->size);
|
dump_bad_block(rb->buf, rb->size);
|
||||||
@ -2641,12 +2641,12 @@ int toku_deserialize_bp_from_compressed(FTNODE node,
|
|||||||
const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
|
const char* fname = toku_cachefile_fname_in_env(bfe->ft->cf);
|
||||||
fprintf(stderr,
|
fprintf(stderr,
|
||||||
"%s:%d:toku_deserialize_bp_from_compressed - "
|
"%s:%d:toku_deserialize_bp_from_compressed - "
|
||||||
"file[%s], blocknum[%ld], "
|
"file[%s], blocknum[%lld], "
|
||||||
"deserialize_ftnode_partition failed with %d\n",
|
"deserialize_ftnode_partition failed with %d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
node->blocknum.b,
|
(longlong)node->blocknum.b,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(static_cast<unsigned char *>(curr_sb->compressed_ptr),
|
dump_bad_block(static_cast<unsigned char *>(curr_sb->compressed_ptr),
|
||||||
curr_sb->compressed_size);
|
curr_sb->compressed_size);
|
||||||
@ -2689,12 +2689,12 @@ static int deserialize_ftnode_from_fd(int fd,
|
|||||||
fprintf(
|
fprintf(
|
||||||
stderr,
|
stderr,
|
||||||
"%s:%d:deserialize_ftnode_from_fd - "
|
"%s:%d:deserialize_ftnode_from_fd - "
|
||||||
"file[%s], blocknum[%ld], deserialize_ftnode_from_rbuf failed with "
|
"file[%s], blocknum[%lld], deserialize_ftnode_from_rbuf failed with "
|
||||||
"%d\n",
|
"%d\n",
|
||||||
__FILE__,
|
__FILE__,
|
||||||
__LINE__,
|
__LINE__,
|
||||||
fname ? fname : "unknown",
|
fname ? fname : "unknown",
|
||||||
blocknum.b,
|
(longlong)blocknum.b,
|
||||||
r);
|
r);
|
||||||
dump_bad_block(rb.buf, rb.size);
|
dump_bad_block(rb.buf, rb.size);
|
||||||
}
|
}
|
||||||
|
@ -107,7 +107,7 @@ size_t toku_malloc_usable_size(void *p) __attribute__((__visibility__("default")
|
|||||||
#define XMALLOC(v) CAST_FROM_VOIDP(v, toku_xmalloc(sizeof(*v)))
|
#define XMALLOC(v) CAST_FROM_VOIDP(v, toku_xmalloc(sizeof(*v)))
|
||||||
#define XMALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xmalloc((n)*sizeof(*v)))
|
#define XMALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xmalloc((n)*sizeof(*v)))
|
||||||
#define XCALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xcalloc((n), (sizeof(*v))))
|
#define XCALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xcalloc((n), (sizeof(*v))))
|
||||||
#define XCALLOC(v) XCALLOC_N(1,(v))
|
#define XCALLOC(v) XCALLOC_N(1,v)
|
||||||
#define XREALLOC(v,s) CAST_FROM_VOIDP(v, toku_xrealloc(v, s))
|
#define XREALLOC(v,s) CAST_FROM_VOIDP(v, toku_xrealloc(v, s))
|
||||||
#define XREALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xrealloc(v, (n)*sizeof(*v)))
|
#define XREALLOC_N(n,v) CAST_FROM_VOIDP(v, toku_xrealloc(v, (n)*sizeof(*v)))
|
||||||
|
|
||||||
|
@ -62,9 +62,6 @@ inline void toku_debug_sync(struct tokutxn *txn, const char *sync_point_name) {
|
|||||||
void *client_extra;
|
void *client_extra;
|
||||||
THD *thd;
|
THD *thd;
|
||||||
|
|
||||||
if (likely(!opt_debug_sync_timeout))
|
|
||||||
return;
|
|
||||||
|
|
||||||
toku_txn_get_client_id(txn, &client_id, &client_extra);
|
toku_txn_get_client_id(txn, &client_id, &client_extra);
|
||||||
thd = reinterpret_cast<THD *>(client_extra);
|
thd = reinterpret_cast<THD *>(client_extra);
|
||||||
DEBUG_SYNC(thd, sync_point_name);
|
DEBUG_SYNC(thd, sync_point_name);
|
||||||
|
@ -162,10 +162,20 @@ typedef struct toku_mutex_aligned {
|
|||||||
#define ZERO_COND_INITIALIZER \
|
#define ZERO_COND_INITIALIZER \
|
||||||
{ 0 }
|
{ 0 }
|
||||||
#elif defined(__APPLE__)
|
#elif defined(__APPLE__)
|
||||||
|
#if TOKU_PTHREAD_DEBUG
|
||||||
#define ZERO_COND_INITIALIZER \
|
#define ZERO_COND_INITIALIZER \
|
||||||
{ \
|
{ \
|
||||||
{ 0 } \
|
{ 0 , { 0 } }, \
|
||||||
|
nullptr, \
|
||||||
|
0 \
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
|
#define ZERO_COND_INITIALIZER \
|
||||||
|
{ \
|
||||||
|
{ 0 , { 0 } }, \
|
||||||
|
nullptr \
|
||||||
|
}
|
||||||
|
#endif
|
||||||
#else // __linux__, at least
|
#else // __linux__, at least
|
||||||
#define ZERO_COND_INITIALIZER \
|
#define ZERO_COND_INITIALIZER \
|
||||||
{}
|
{}
|
||||||
|
@ -18,7 +18,7 @@ set(tokudb_srcs
|
|||||||
## make the shared library
|
## make the shared library
|
||||||
add_library(${LIBTOKUDB} SHARED ${tokudb_srcs})
|
add_library(${LIBTOKUDB} SHARED ${tokudb_srcs})
|
||||||
add_dependencies(${LIBTOKUDB} install_tdb_h generate_log_code)
|
add_dependencies(${LIBTOKUDB} install_tdb_h generate_log_code)
|
||||||
target_link_libraries(${LIBTOKUDB} LINK_PRIVATE locktree_static ft_static util_static lzma snappy ${LIBTOKUPORTABILITY})
|
target_link_libraries(${LIBTOKUDB} LINK_PRIVATE locktree_static ft_static util_static lzma snappy dbug ${LIBTOKUPORTABILITY})
|
||||||
target_link_libraries(${LIBTOKUDB} LINK_PUBLIC ${ZLIB_LIBRARY} )
|
target_link_libraries(${LIBTOKUDB} LINK_PUBLIC ${ZLIB_LIBRARY} )
|
||||||
|
|
||||||
## make the static library
|
## make the static library
|
||||||
|
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/autogen.sh
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/compile
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.guess
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.rpath
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/config.sub
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/depcomp
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/install-sh
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/ltmain.sh
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/build-aux/missing
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/extra/7z2lzma/7z2lzma.bash
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_compress.sh
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh
vendored
Normal file → Executable file
0
storage/tokudb/PerconaFT/third_party/xz-4.999.9beta/tests/test_files.sh
vendored
Normal file → Executable file
@ -80,8 +80,8 @@ void dmt<dmtdata_t, dmtdataout_t, dmtwriter_t>::create_from_sorted_memory_of_fix
|
|||||||
paranoid_invariant(numvalues > 0);
|
paranoid_invariant(numvalues > 0);
|
||||||
void *ptr = toku_mempool_malloc(&this->mp, aligned_memsize);
|
void *ptr = toku_mempool_malloc(&this->mp, aligned_memsize);
|
||||||
paranoid_invariant_notnull(ptr);
|
paranoid_invariant_notnull(ptr);
|
||||||
uint8_t * const CAST_FROM_VOIDP(dest, ptr);
|
uint8_t * CAST_FROM_VOIDP(dest, ptr);
|
||||||
const uint8_t * const CAST_FROM_VOIDP(src, mem);
|
const uint8_t * CAST_FROM_VOIDP(src, mem);
|
||||||
if (pad_bytes == 0) {
|
if (pad_bytes == 0) {
|
||||||
paranoid_invariant(aligned_memsize == mem_length);
|
paranoid_invariant(aligned_memsize == mem_length);
|
||||||
memcpy(dest, src, aligned_memsize);
|
memcpy(dest, src, aligned_memsize);
|
||||||
|
@ -127,7 +127,7 @@ public:
|
|||||||
paranoid_invariant(index != NODE_NULL);
|
paranoid_invariant(index != NODE_NULL);
|
||||||
m_index = index;
|
m_index = index;
|
||||||
}
|
}
|
||||||
} __attribute__((__packed__,aligned(4)));
|
} ;
|
||||||
|
|
||||||
template<>
|
template<>
|
||||||
class subtree_templated<true> {
|
class subtree_templated<true> {
|
||||||
@ -184,7 +184,7 @@ public:
|
|||||||
inline void disable_bit(void) {
|
inline void disable_bit(void) {
|
||||||
m_bitfield &= MASK_INDEX;
|
m_bitfield &= MASK_INDEX;
|
||||||
}
|
}
|
||||||
} __attribute__((__packed__)) ;
|
} ;
|
||||||
|
|
||||||
template<typename omtdata_t, bool subtree_supports_marks>
|
template<typename omtdata_t, bool subtree_supports_marks>
|
||||||
class omt_node_templated {
|
class omt_node_templated {
|
||||||
@ -197,7 +197,7 @@ public:
|
|||||||
// this needs to be in both implementations because we don't have
|
// this needs to be in both implementations because we don't have
|
||||||
// a "static if" the caller can use
|
// a "static if" the caller can use
|
||||||
inline void clear_stolen_bits(void) {}
|
inline void clear_stolen_bits(void) {}
|
||||||
} __attribute__((__packed__,aligned(4)));
|
} ;
|
||||||
|
|
||||||
template<typename omtdata_t>
|
template<typename omtdata_t>
|
||||||
class omt_node_templated<omtdata_t, true> {
|
class omt_node_templated<omtdata_t, true> {
|
||||||
@ -234,7 +234,7 @@ public:
|
|||||||
this->unset_marked_bit();
|
this->unset_marked_bit();
|
||||||
this->unset_marks_below_bit();
|
this->unset_marks_below_bit();
|
||||||
}
|
}
|
||||||
} __attribute__((__packed__,aligned(4)));
|
} ;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +111,6 @@ public:
|
|||||||
|
|
||||||
// wait for the event to become signalled
|
// wait for the event to become signalled
|
||||||
void wait(void);
|
void wait(void);
|
||||||
int wait(ulonglong microseconds);
|
|
||||||
|
|
||||||
// signal the event
|
// signal the event
|
||||||
void signal(void);
|
void signal(void);
|
||||||
@ -152,7 +151,6 @@ public:
|
|||||||
|
|
||||||
// wait for the semaphore to become signalled
|
// wait for the semaphore to become signalled
|
||||||
E_WAIT wait(void);
|
E_WAIT wait(void);
|
||||||
E_WAIT wait(ulonglong microseconds);
|
|
||||||
|
|
||||||
// signal the semaphore to increase the count
|
// signal the semaphore to increase the count
|
||||||
// return true if signalled, false if ignored due to count
|
// return true if signalled, false if ignored due to count
|
||||||
@ -372,28 +370,6 @@ inline void event_t::wait(void) {
|
|||||||
assert_debug(r == 0);
|
assert_debug(r == 0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
inline int event_t::wait(ulonglong microseconds) {
|
|
||||||
timespec waittime = time::offset_timespec(microseconds);
|
|
||||||
int r = pthread_mutex_timedlock(&_mutex, &waittime);
|
|
||||||
if (r == ETIMEDOUT) return ETIMEDOUT;
|
|
||||||
assert_debug(r == 0);
|
|
||||||
while (_signalled == false && _pulsed == false) {
|
|
||||||
r = pthread_cond_timedwait(&_cond, &_mutex, &waittime);
|
|
||||||
if (r == ETIMEDOUT) {
|
|
||||||
r = pthread_mutex_unlock(&_mutex);
|
|
||||||
assert_debug(r == 0);
|
|
||||||
return ETIMEDOUT;
|
|
||||||
}
|
|
||||||
assert_debug(r == 0);
|
|
||||||
}
|
|
||||||
if (_manual_reset == false)
|
|
||||||
_signalled = false;
|
|
||||||
if (_pulsed)
|
|
||||||
_pulsed = false;
|
|
||||||
r = pthread_mutex_unlock(&_mutex);
|
|
||||||
assert_debug(r == 0);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
inline void event_t::signal(void) {
|
inline void event_t::signal(void) {
|
||||||
int r MY_ATTRIBUTE((unused)) = pthread_mutex_lock(&_mutex);
|
int r MY_ATTRIBUTE((unused)) = pthread_mutex_lock(&_mutex);
|
||||||
assert_debug(r == 0);
|
assert_debug(r == 0);
|
||||||
@ -479,31 +455,6 @@ inline semaphore_t::E_WAIT semaphore_t::wait(void) {
|
|||||||
assert_debug(r == 0);
|
assert_debug(r == 0);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
inline semaphore_t::E_WAIT semaphore_t::wait(ulonglong microseconds) {
|
|
||||||
E_WAIT ret;
|
|
||||||
timespec waittime = time::offset_timespec(microseconds);
|
|
||||||
int r = pthread_mutex_timedlock(&_mutex, &waittime);
|
|
||||||
if (r == ETIMEDOUT) return E_TIMEDOUT;
|
|
||||||
assert_debug(r == 0);
|
|
||||||
while (_signalled == 0 && _interrupted == false) {
|
|
||||||
r = pthread_cond_timedwait(&_cond, &_mutex, &waittime);
|
|
||||||
if (r == ETIMEDOUT) {
|
|
||||||
r = pthread_mutex_unlock(&_mutex);
|
|
||||||
assert_debug(r == 0);
|
|
||||||
return E_TIMEDOUT;
|
|
||||||
}
|
|
||||||
assert_debug(r == 0);
|
|
||||||
}
|
|
||||||
if (_interrupted) {
|
|
||||||
ret = E_INTERRUPTED;
|
|
||||||
} else {
|
|
||||||
_signalled--;
|
|
||||||
ret = E_SIGNALLED;
|
|
||||||
}
|
|
||||||
r = pthread_mutex_unlock(&_mutex);
|
|
||||||
assert_debug(r == 0);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
inline bool semaphore_t::signal(void) {
|
inline bool semaphore_t::signal(void) {
|
||||||
bool ret = false;
|
bool ret = false;
|
||||||
int r MY_ATTRIBUTE((unused)) = pthread_mutex_lock(&_mutex);
|
int r MY_ATTRIBUTE((unused)) = pthread_mutex_lock(&_mutex);
|
||||||
|
@ -2359,6 +2359,24 @@ loop:
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Handle RENAME error.
|
||||||
|
@param name old name of the file
|
||||||
|
@param new_name new name of the file */
|
||||||
|
static void os_file_handle_rename_error(const char* name, const char* new_name)
|
||||||
|
{
|
||||||
|
if (os_file_get_last_error(true) != OS_FILE_DISK_FULL) {
|
||||||
|
ib_logf(IB_LOG_LEVEL_ERROR, "Cannot rename file '%s' to '%s'",
|
||||||
|
name, new_name);
|
||||||
|
} else if (!os_has_said_disk_full) {
|
||||||
|
os_has_said_disk_full = true;
|
||||||
|
/* Disk full error is reported irrespective of the
|
||||||
|
on_error_silent setting. */
|
||||||
|
ib_logf(IB_LOG_LEVEL_ERROR,
|
||||||
|
"Full disk prevents renaming file '%s' to '%s'",
|
||||||
|
name, new_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/***********************************************************************//**
|
/***********************************************************************//**
|
||||||
NOTE! Use the corresponding macro os_file_rename(), not directly this function!
|
NOTE! Use the corresponding macro os_file_rename(), not directly this function!
|
||||||
Renames a file (can also move it to another directory). It is safest that the
|
Renames a file (can also move it to another directory). It is safest that the
|
||||||
@ -2394,7 +2412,7 @@ os_file_rename_func(
|
|||||||
return(TRUE);
|
return(TRUE);
|
||||||
}
|
}
|
||||||
|
|
||||||
os_file_handle_error_no_exit(oldpath, "rename", FALSE, __FILE__, __LINE__);
|
os_file_handle_rename_error(oldpath, newpath);
|
||||||
|
|
||||||
return(FALSE);
|
return(FALSE);
|
||||||
#else
|
#else
|
||||||
@ -2404,7 +2422,7 @@ os_file_rename_func(
|
|||||||
ret = rename(oldpath, newpath);
|
ret = rename(oldpath, newpath);
|
||||||
|
|
||||||
if (ret != 0) {
|
if (ret != 0) {
|
||||||
os_file_handle_error_no_exit(oldpath, "rename", FALSE, __FILE__, __LINE__);
|
os_file_handle_rename_error(oldpath, newpath);
|
||||||
|
|
||||||
return(FALSE);
|
return(FALSE);
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user