mirror of
https://github.com/MariaDB/server.git
synced 2025-08-08 11:22:35 +03:00
Merge remote-tracking branch 'origin/10.11' into 11.1
This commit is contained in:
@@ -424,7 +424,8 @@ fedora install:
|
|||||||
- installed-database.sql
|
- installed-database.sql
|
||||||
- upgraded-database.sql
|
- upgraded-database.sql
|
||||||
|
|
||||||
cppcheck:
|
cppcheck:
|
||||||
|
allow_failure: true
|
||||||
stage: sast
|
stage: sast
|
||||||
needs: []
|
needs: []
|
||||||
variables:
|
variables:
|
||||||
@@ -432,33 +433,57 @@ cppcheck:
|
|||||||
GIT_SUBMODULE_STRATEGY: normal
|
GIT_SUBMODULE_STRATEGY: normal
|
||||||
script:
|
script:
|
||||||
- yum install -y cppcheck diffutils
|
- yum install -y cppcheck diffutils
|
||||||
# --template: use a single-line template
|
# --template: output format
|
||||||
# --force: check large directories without warning
|
# --force: check large directories without warning
|
||||||
# -i<directory>: ignore this directory when scanning
|
# -i<directory>: ignore this directory when scanning
|
||||||
|
# -I<directory>: include path, reduces false positives
|
||||||
|
# related to inability to resolve symbols
|
||||||
# -j: run multiple cppcheck threads
|
# -j: run multiple cppcheck threads
|
||||||
# Use newline to escape colon in yaml
|
# Use newline to escape colon in yaml
|
||||||
- >
|
- >
|
||||||
cppcheck --template="{file}:{line}: {severity}: {message}" --force
|
cppcheck --template="{file}:{line}\n{code}\n{severity}: {message}" --force --check-level=exhaustive
|
||||||
client dbug extra include libmariadb libmysqld libservices mysql-test mysys mysys_ssl pcre plugin
|
client dbug extra include libmariadb libmysqld libservices mysql-test mysys mysys_ssl pcre plugin
|
||||||
strings tests unittest vio wsrep-lib sql sql-common storage
|
strings tests unittest vio wsrep-lib sql sql-common storage
|
||||||
-istorage/mroonga -istorage/tokudb -istorage/spider -istorage/rocksdb -iextra/ -ilibmariadb/ -istorage/columnstore
|
-istorage/mroonga -istorage/tokudb -istorage/spider -istorage/rocksdb -iextra/ -ilibmariadb/ -istorage/columnstore
|
||||||
--output-file=cppcheck.txt -j $(nproc)
|
-Iinclude -Istorage/innobase/include
|
||||||
# Parallel jobs may output findings in an nondeterministic order. Sort to match ignorelist.
|
--output-file=initial-cppcheck_output.txt -j $(nproc)
|
||||||
- cat cppcheck.txt | sort > cppcheck_sorted.txt
|
# when including {code} in the cppcheck template, some more pre-processing needs to be done
|
||||||
# Remove line numbers for diff
|
#
|
||||||
- sed 's/:[^:]*:/:/' cppcheck_sorted.txt > cppcheck_sorted_no_line_numbers.txt
|
# sample cppcheck finding: <file>:<line>
|
||||||
|
# foo.bar()
|
||||||
|
# ^
|
||||||
|
# <severity>: <message>
|
||||||
|
#
|
||||||
|
# 1. remove all lines with "^"
|
||||||
|
# 2. merge every 3 lines into 1 so it can be sorted (example: <file> foo.bar() <severity>: <message>)
|
||||||
|
# 3. sort to match ignorelist since parallel jobs may output findings in an nondeterministic order
|
||||||
|
# 4. remove findings likely to be false positives (i.e, "unknown macros")
|
||||||
|
# 5. remove line numbers for diffing against ignorelist
|
||||||
|
- |
|
||||||
|
cat initial-cppcheck_output.txt | grep -v '\^$' > preprocessed-cppcheck_circumflex_removed.txt
|
||||||
|
cat preprocessed-cppcheck_circumflex_removed.txt | awk 'NR%3==1 {printf "%s", (NR==1) ? "" : "\n"; printf "%s", $0} NR%3!=1 {printf " %s", $0}' > preprocessed-cppcheck_oneline.txt
|
||||||
|
cat preprocessed-cppcheck_oneline.txt | sort > preprocessed-cppcheck_sorted.txt
|
||||||
|
cat preprocessed-cppcheck_sorted.txt | grep -v "There is an unknown macro here somewhere" > results-cppcheck_all_findings.txt
|
||||||
|
sed 's/:[0-9]\+//' results-cppcheck_all_findings.txt > preprocessed_final-cppcheck_no_line_nums.txt
|
||||||
# Only print new issues not found in ignore list
|
# Only print new issues not found in ignore list
|
||||||
- echo "Problems found in ignore list that were not discovered by cppcheck (may have been fixed)."
|
- echo "Problems found in ignore list that were not discovered by cppcheck (may have been fixed)."
|
||||||
- diff --changed-group-format='%>' --unchanged-group-format='' cppcheck_sorted_no_line_numbers.txt tests/code_quality/cppcheck_ignorelist.txt || true
|
- diff --changed-group-format='%>' --unchanged-group-format='' preprocessed_final-cppcheck_no_line_nums.txt tests/code_quality/cppcheck_ignorelist.txt || true
|
||||||
- echo "Problems found by cppcheck that were not in ignore list."
|
- echo "Problems found by cppcheck that were not in ignore list."
|
||||||
- diff --changed-group-format='%<' --unchanged-group-format='' cppcheck_sorted_no_line_numbers.txt tests/code_quality/cppcheck_ignorelist.txt > lines_not_ignored.txt || true
|
- diff --changed-group-format='%<' --unchanged-group-format='' preprocessed_final-cppcheck_no_line_nums.txt tests/code_quality/cppcheck_ignorelist.txt > results-cppcheck_new_findings.txt || true
|
||||||
- cat lines_not_ignored.txt && test ! -s lines_not_ignored.txt
|
- cat results-cppcheck_new_findings.txt && test ! -s results-cppcheck_new_findings.txt
|
||||||
artifacts:
|
artifacts:
|
||||||
when: always
|
when: always
|
||||||
paths:
|
paths:
|
||||||
- cppcheck_sorted.txt
|
# save all steps of pre-processing in-case it ever breaks
|
||||||
|
- initial-cppcheck_output.txt
|
||||||
|
- preprocessed-cppcheck_circumflex_removed.txt
|
||||||
|
- preprocessed-cppcheck_sorted.txt
|
||||||
|
- preprocessed_final-cppcheck_no_line_nums.txt
|
||||||
|
- results-cppcheck_all_findings.txt
|
||||||
|
- results-cppcheck_new_findings.txt
|
||||||
|
|
||||||
flawfinder:
|
flawfinder:
|
||||||
|
allow_failure: true
|
||||||
stage: sast
|
stage: sast
|
||||||
needs: []
|
needs: []
|
||||||
variables:
|
variables:
|
||||||
@@ -480,11 +505,12 @@ flawfinder:
|
|||||||
- echo "Problems found in ignore list that were not discovered by flawfinder (may have been fixed)."
|
- echo "Problems found in ignore list that were not discovered by flawfinder (may have been fixed)."
|
||||||
- diff --changed-group-format='%>' --unchanged-group-format='' flawfinder-min-level5.json tests/code_quality/flawfinder_ignorelist.json || true
|
- diff --changed-group-format='%>' --unchanged-group-format='' flawfinder-min-level5.json tests/code_quality/flawfinder_ignorelist.json || true
|
||||||
- echo "Problems found by flawfinder that were not in ignore list."
|
- echo "Problems found by flawfinder that were not in ignore list."
|
||||||
- diff --changed-group-format='%<' --unchanged-group-format='' flawfinder-min-level5.json tests/code_quality/flawfinder_ignorelist.json > lines_not_ignored.txt || true
|
- diff --changed-group-format='%<' --unchanged-group-format='' flawfinder-min-level5.json tests/code_quality/flawfinder_ignorelist.json > flawfinder_new_findings.txt || true
|
||||||
- cat lines_not_ignored.txt && test ! -s lines_not_ignored.txt
|
- cat flawfinder_new_findings.txt && test ! -s flawfinder_new_findings.txt
|
||||||
artifacts:
|
artifacts:
|
||||||
when: always
|
when: always
|
||||||
paths:
|
paths:
|
||||||
|
- flawfinder_new_findings.txt
|
||||||
- flawfinder-all-vulnerabilities.html
|
- flawfinder-all-vulnerabilities.html
|
||||||
- flawfinder-min-level5.json
|
- flawfinder-min-level5.json
|
||||||
|
|
||||||
|
@@ -22,7 +22,7 @@
|
|||||||
|
|
||||||
# InnoDB/Engines
|
# InnoDB/Engines
|
||||||
--let log_expected_matches = $log_slow_verbosity_innodb_expected_matches
|
--let log_expected_matches = $log_slow_verbosity_innodb_expected_matches
|
||||||
--let grep_pattern = ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+\$
|
--let grep_pattern = ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+\$
|
||||||
--source include/log_grep.inc
|
--source include/log_grep.inc
|
||||||
|
|
||||||
--let grep_pattern = ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+\$
|
--let grep_pattern = ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+\$
|
||||||
|
@@ -1 +1 @@
|
|||||||
--innodb_buffer_pool_dump_at_shutdown=off --innodb_buffer_pool_load_at_startup=off --innodb-stats-persistent=1 --innodb-stats-auto-recalc=off
|
--innodb-buffer-pool-size=32M --innodb_buffer_pool_dump_at_shutdown=off --innodb_buffer_pool_load_at_startup=off --innodb-stats-persistent=1 --innodb-stats-auto-recalc=off
|
||||||
|
@@ -8,56 +8,20 @@ c varchar(255),
|
|||||||
d varchar(255),
|
d varchar(255),
|
||||||
primary key(a,b,c,d)
|
primary key(a,b,c,d)
|
||||||
) engine=innodb;
|
) engine=innodb;
|
||||||
SET STATEMENT unique_checks=0,foreign_key_checks=0 FOR
|
SET unique_checks=0, foreign_key_checks= 0;
|
||||||
|
begin;
|
||||||
insert into t1 select
|
insert into t1 select
|
||||||
repeat(uuid(), 7),
|
repeat(uuid(), 7),
|
||||||
repeat(uuid(), 7),
|
repeat(uuid(), 7),
|
||||||
repeat(uuid(), 7),
|
repeat(uuid(), 7),
|
||||||
repeat(uuid(), 7)
|
repeat(uuid(), 7)
|
||||||
from seq_1_to_16384;
|
from seq_1_to_16384;
|
||||||
|
commit;
|
||||||
SET GLOBAL innodb_fast_shutdown=0;
|
SET GLOBAL innodb_fast_shutdown=0;
|
||||||
# restart
|
# restart
|
||||||
set log_slow_verbosity='engine';
|
set log_slow_verbosity='engine';
|
||||||
set long_query_time=0.0;
|
set long_query_time=0.0;
|
||||||
set @js='$analyze_output';
|
set @js='$analyze_output';
|
||||||
select @js;
|
|
||||||
@js
|
|
||||||
{
|
|
||||||
"query_optimization": {
|
|
||||||
"r_total_time_ms": "REPLACED"
|
|
||||||
},
|
|
||||||
"query_block": {
|
|
||||||
"select_id": 1,
|
|
||||||
"cost": 0.011647987,
|
|
||||||
"r_loops": 1,
|
|
||||||
"r_total_time_ms": "REPLACED",
|
|
||||||
"nested_loop": [
|
|
||||||
{
|
|
||||||
"table": {
|
|
||||||
"table_name": "t1",
|
|
||||||
"access_type": "index",
|
|
||||||
"key": "PRIMARY",
|
|
||||||
"key_length": "1028",
|
|
||||||
"used_key_parts": ["a", "b", "c", "d"],
|
|
||||||
"loops": 1,
|
|
||||||
"r_loops": 1,
|
|
||||||
"rows": 1,
|
|
||||||
"r_rows": 16384,
|
|
||||||
"cost": 0.0110178,
|
|
||||||
"r_table_time_ms": "REPLACED",
|
|
||||||
"r_other_time_ms": "REPLACED",
|
|
||||||
"r_engine_stats": {
|
|
||||||
"pages_accessed": "REPLACED",
|
|
||||||
"pages_read_count": "REPLACED",
|
|
||||||
"pages_read_time_ms": "REPLACED"
|
|
||||||
},
|
|
||||||
"filtered": 100,
|
|
||||||
"r_filtered": 100
|
|
||||||
}
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
set @pages_read_time_ms=
|
set @pages_read_time_ms=
|
||||||
(select json_value(@js,'$.query_block.nested_loop[0].table.r_engine_stats.pages_read_time_ms'));
|
(select json_value(@js,'$.query_block.nested_loop[0].table.r_engine_stats.pages_read_time_ms'));
|
||||||
|
|
||||||
|
@@ -18,15 +18,18 @@ create table t1 (
|
|||||||
primary key(a,b,c,d)
|
primary key(a,b,c,d)
|
||||||
) engine=innodb;
|
) engine=innodb;
|
||||||
|
|
||||||
|
SET unique_checks=0, foreign_key_checks= 0;
|
||||||
|
begin;
|
||||||
|
|
||||||
# The data size is 160K * 1K = 160M
|
# The data size is 160K * 1K = 160M
|
||||||
# 16M / (page_size=16K) = 1K pages.
|
# 16M / (page_size=16K) = 1K pages.
|
||||||
SET STATEMENT unique_checks=0,foreign_key_checks=0 FOR
|
|
||||||
insert into t1 select
|
insert into t1 select
|
||||||
repeat(uuid(), 7),
|
repeat(uuid(), 7),
|
||||||
repeat(uuid(), 7),
|
repeat(uuid(), 7),
|
||||||
repeat(uuid(), 7),
|
repeat(uuid(), 7),
|
||||||
repeat(uuid(), 7)
|
repeat(uuid(), 7)
|
||||||
from seq_1_to_16384;
|
from seq_1_to_16384;
|
||||||
|
commit;
|
||||||
|
|
||||||
SET GLOBAL innodb_fast_shutdown=0;
|
SET GLOBAL innodb_fast_shutdown=0;
|
||||||
source include/restart_mysqld.inc;
|
source include/restart_mysqld.inc;
|
||||||
@@ -37,10 +40,6 @@ let $analyze_output= `analyze format=json
|
|||||||
select * from t1 force index (PRIMARY) order by a desc, b desc, c desc, d desc`;
|
select * from t1 force index (PRIMARY) order by a desc, b desc, c desc, d desc`;
|
||||||
evalp set @js='$analyze_output';
|
evalp set @js='$analyze_output';
|
||||||
|
|
||||||
# Print it out for user-friendlines
|
|
||||||
--replace_regex /("(r_[a-z_]*_time_ms|pages[^"]*)": )[^, \n]*/\1"REPLACED"/
|
|
||||||
select @js;
|
|
||||||
|
|
||||||
set @pages_read_time_ms=
|
set @pages_read_time_ms=
|
||||||
(select json_value(@js,'$.query_block.nested_loop[0].table.r_engine_stats.pages_read_time_ms'));
|
(select json_value(@js,'$.query_block.nested_loop[0].table.r_engine_stats.pages_read_time_ms'));
|
||||||
|
|
||||||
|
1
mysql-test/main/analyze_stmt_prefetch_count.opt
Normal file
1
mysql-test/main/analyze_stmt_prefetch_count.opt
Normal file
@@ -0,0 +1 @@
|
|||||||
|
--innodb-buffer-pool-size=32M --innodb_buffer_pool_dump_at_shutdown=off --innodb_buffer_pool_load_at_startup=off --innodb-stats-persistent=1 --innodb-stats-auto-recalc=off
|
60
mysql-test/main/analyze_stmt_prefetch_count.result
Normal file
60
mysql-test/main/analyze_stmt_prefetch_count.result
Normal file
@@ -0,0 +1,60 @@
|
|||||||
|
create table t1 (
|
||||||
|
a varchar(255),
|
||||||
|
b varchar(255),
|
||||||
|
c varchar(255),
|
||||||
|
d varchar(255),
|
||||||
|
primary key(a,b,c,d)
|
||||||
|
) engine=innodb;
|
||||||
|
SET unique_checks=0, foreign_key_checks= 0;
|
||||||
|
begin;
|
||||||
|
insert into t1 select
|
||||||
|
repeat(uuid(), 7),
|
||||||
|
repeat(uuid(), 7),
|
||||||
|
repeat(uuid(), 7),
|
||||||
|
repeat(uuid(), 7)
|
||||||
|
from seq_1_to_16384;
|
||||||
|
insert into t1 values ('z','z','z','z');
|
||||||
|
commit;
|
||||||
|
# Restart the server to make sure we have an empty InnoDB Buffer Pool
|
||||||
|
# (in the test's .opt file we've disabled buffer pool saving/loading
|
||||||
|
# and also tried to disable any background activity)
|
||||||
|
SET GLOBAL innodb_fast_shutdown=0;
|
||||||
|
# restart
|
||||||
|
set @innodb_pages_read0=
|
||||||
|
(select variable_value
|
||||||
|
from information_schema.session_status
|
||||||
|
where variable_name like 'innodb_pages_read');
|
||||||
|
set @js='$analyze_output';
|
||||||
|
set @js=json_extract(@js, '$.query_block.nested_loop[0].table.r_engine_stats');
|
||||||
|
set @pages_accessed= cast(json_value(@js,'$.pages_accessed') as INT);
|
||||||
|
set @pages_read_count= cast(json_value(@js,'$.pages_read_count') as INT);
|
||||||
|
set @pages_prefetch_read_count= cast(json_value(@js,'$.pages_prefetch_read_count') as INT);
|
||||||
|
select @pages_accessed > 1000 and @pages_accessed < 1500;
|
||||||
|
@pages_accessed > 1000 and @pages_accessed < 1500
|
||||||
|
1
|
||||||
|
set @total_read = (@pages_read_count + @pages_prefetch_read_count);
|
||||||
|
select @pages_accessed*0.75 < @total_read, @total_read < @pages_accessed*1.25;
|
||||||
|
@pages_accessed*0.75 < @total_read @total_read < @pages_accessed*1.25
|
||||||
|
1 1
|
||||||
|
set @innodb_pages_read1=
|
||||||
|
(select variable_value
|
||||||
|
from information_schema.session_status
|
||||||
|
where variable_name like 'innodb_pages_read');
|
||||||
|
set @innodb_pages_read_incr= (@innodb_pages_read1 - @innodb_pages_read0);
|
||||||
|
select @innodb_pages_read_incr > 1000, @innodb_pages_read_incr < 1500;
|
||||||
|
@innodb_pages_read_incr > 1000 @innodb_pages_read_incr < 1500
|
||||||
|
1 1
|
||||||
|
set @js='$analyze_output';
|
||||||
|
set @js=json_extract(@js, '$.query_block.nested_loop[0].table.r_engine_stats');
|
||||||
|
# This must just print pages_accessed. No page reads or prefetch reads,
|
||||||
|
# because the previous query has read all the needed pages into the
|
||||||
|
# buffer pool, which is set to be large enough to accomodate the whole
|
||||||
|
# table.
|
||||||
|
select @js;
|
||||||
|
@js
|
||||||
|
{"pages_accessed": NUMBER}
|
||||||
|
set @pages_accessed2= cast(json_value(@js,'$.pages_accessed') as INT);
|
||||||
|
select @pages_accessed2 = @pages_accessed;
|
||||||
|
@pages_accessed2 = @pages_accessed
|
||||||
|
1
|
||||||
|
drop table t1;
|
77
mysql-test/main/analyze_stmt_prefetch_count.test
Normal file
77
mysql-test/main/analyze_stmt_prefetch_count.test
Normal file
@@ -0,0 +1,77 @@
|
|||||||
|
--source include/have_innodb.inc
|
||||||
|
--source include/have_sequence.inc
|
||||||
|
|
||||||
|
|
||||||
|
# Each row is 1K.
|
||||||
|
create table t1 (
|
||||||
|
a varchar(255),
|
||||||
|
b varchar(255),
|
||||||
|
c varchar(255),
|
||||||
|
d varchar(255),
|
||||||
|
primary key(a,b,c,d)
|
||||||
|
) engine=innodb;
|
||||||
|
|
||||||
|
# The data size is 16K * 1K = 16M
|
||||||
|
# 16M / (page_size=16K) = 1K pages.
|
||||||
|
SET unique_checks=0, foreign_key_checks= 0;
|
||||||
|
begin;
|
||||||
|
insert into t1 select
|
||||||
|
repeat(uuid(), 7),
|
||||||
|
repeat(uuid(), 7),
|
||||||
|
repeat(uuid(), 7),
|
||||||
|
repeat(uuid(), 7)
|
||||||
|
from seq_1_to_16384;
|
||||||
|
insert into t1 values ('z','z','z','z');
|
||||||
|
commit;
|
||||||
|
|
||||||
|
--echo # Restart the server to make sure we have an empty InnoDB Buffer Pool
|
||||||
|
--echo # (in the test's .opt file we've disabled buffer pool saving/loading
|
||||||
|
--echo # and also tried to disable any background activity)
|
||||||
|
SET GLOBAL innodb_fast_shutdown=0;
|
||||||
|
--source include/restart_mysqld.inc
|
||||||
|
|
||||||
|
set @innodb_pages_read0=
|
||||||
|
(select variable_value
|
||||||
|
from information_schema.session_status
|
||||||
|
where variable_name like 'innodb_pages_read');
|
||||||
|
|
||||||
|
let $analyze_output= `analyze format=json
|
||||||
|
select * from t1 force index (PRIMARY) order by a,b,c,d`;
|
||||||
|
evalp set @js='$analyze_output';
|
||||||
|
|
||||||
|
set @js=json_extract(@js, '$.query_block.nested_loop[0].table.r_engine_stats');
|
||||||
|
#select @js;
|
||||||
|
set @pages_accessed= cast(json_value(@js,'$.pages_accessed') as INT);
|
||||||
|
set @pages_read_count= cast(json_value(@js,'$.pages_read_count') as INT);
|
||||||
|
set @pages_prefetch_read_count= cast(json_value(@js,'$.pages_prefetch_read_count') as INT);
|
||||||
|
|
||||||
|
select @pages_accessed > 1000 and @pages_accessed < 1500;
|
||||||
|
|
||||||
|
set @total_read = (@pages_read_count + @pages_prefetch_read_count);
|
||||||
|
|
||||||
|
select @pages_accessed*0.75 < @total_read, @total_read < @pages_accessed*1.25;
|
||||||
|
|
||||||
|
set @innodb_pages_read1=
|
||||||
|
(select variable_value
|
||||||
|
from information_schema.session_status
|
||||||
|
where variable_name like 'innodb_pages_read');
|
||||||
|
|
||||||
|
set @innodb_pages_read_incr= (@innodb_pages_read1 - @innodb_pages_read0);
|
||||||
|
|
||||||
|
select @innodb_pages_read_incr > 1000, @innodb_pages_read_incr < 1500;
|
||||||
|
|
||||||
|
let $analyze_output= `analyze format=json
|
||||||
|
select * from t1 force index (PRIMARY) order by a,b,c,d`;
|
||||||
|
evalp set @js='$analyze_output';
|
||||||
|
set @js=json_extract(@js, '$.query_block.nested_loop[0].table.r_engine_stats');
|
||||||
|
|
||||||
|
--echo # This must just print pages_accessed. No page reads or prefetch reads,
|
||||||
|
--echo # because the previous query has read all the needed pages into the
|
||||||
|
--echo # buffer pool, which is set to be large enough to accomodate the whole
|
||||||
|
--echo # table.
|
||||||
|
--replace_regex /[0-9]+/NUMBER/
|
||||||
|
select @js;
|
||||||
|
set @pages_accessed2= cast(json_value(@js,'$.pages_accessed') as INT);
|
||||||
|
|
||||||
|
select @pages_accessed2 = @pages_accessed;
|
||||||
|
drop table t1;
|
@@ -6032,3 +6032,142 @@ a
|
|||||||
0
|
0
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
End of 10.4 tests
|
End of 10.4 tests
|
||||||
|
#
|
||||||
|
# MDEV-29363: Constant subquery causing a crash in pushdown optimization
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (a INT, b INT, c INT);
|
||||||
|
INSERT INTO t1 VALUES (3, 3, 4), (NULL, NULL, 2);
|
||||||
|
EXPLAIN FORMAT=JSON SELECT a,b,c FROM t1 GROUP BY a,b,c
|
||||||
|
HAVING a = (SELECT MIN(b) AS min_b FROM t1) and (a = b or a = c);
|
||||||
|
EXPLAIN
|
||||||
|
{
|
||||||
|
"query_block": {
|
||||||
|
"select_id": 1,
|
||||||
|
"cost": 0.011611947,
|
||||||
|
"filesort": {
|
||||||
|
"sort_key": "t1.b, t1.c",
|
||||||
|
"temporary_table": {
|
||||||
|
"nested_loop": [
|
||||||
|
{
|
||||||
|
"table": {
|
||||||
|
"table_name": "t1",
|
||||||
|
"access_type": "ALL",
|
||||||
|
"loops": 1,
|
||||||
|
"rows": 2,
|
||||||
|
"cost": 0.01034841,
|
||||||
|
"filtered": 100,
|
||||||
|
"attached_condition": "t1.a = (subquery#2) and (t1.b = (subquery#2) or t1.c = (subquery#2))"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"subqueries": [
|
||||||
|
{
|
||||||
|
"query_block": {
|
||||||
|
"select_id": 2,
|
||||||
|
"cost": 0.01034841,
|
||||||
|
"nested_loop": [
|
||||||
|
{
|
||||||
|
"table": {
|
||||||
|
"table_name": "t1",
|
||||||
|
"access_type": "ALL",
|
||||||
|
"loops": 1,
|
||||||
|
"rows": 2,
|
||||||
|
"cost": 0.01034841,
|
||||||
|
"filtered": 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SELECT a,b,c FROM t1 GROUP BY a,b,c
|
||||||
|
HAVING a = (SELECT MIN(b) AS min_b FROM t1) and (a = b or a = c);
|
||||||
|
a b c
|
||||||
|
3 3 4
|
||||||
|
EXPLAIN FORMAT=JSON SELECT a FROM t1 GROUP BY a,b
|
||||||
|
HAVING a = (SELECT MIN(a) AS min_a FROM t1) AND (a = 3 or a > b);
|
||||||
|
EXPLAIN
|
||||||
|
{
|
||||||
|
"query_block": {
|
||||||
|
"select_id": 1,
|
||||||
|
"cost": 0.011611947,
|
||||||
|
"filesort": {
|
||||||
|
"sort_key": "t1.b",
|
||||||
|
"temporary_table": {
|
||||||
|
"nested_loop": [
|
||||||
|
{
|
||||||
|
"table": {
|
||||||
|
"table_name": "t1",
|
||||||
|
"access_type": "ALL",
|
||||||
|
"loops": 1,
|
||||||
|
"rows": 2,
|
||||||
|
"cost": 0.01034841,
|
||||||
|
"filtered": 100,
|
||||||
|
"attached_condition": "t1.a = (subquery#2) and (1 or (subquery#2) > t1.b)"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"subqueries": [
|
||||||
|
{
|
||||||
|
"query_block": {
|
||||||
|
"select_id": 2,
|
||||||
|
"cost": 0.01034841,
|
||||||
|
"nested_loop": [
|
||||||
|
{
|
||||||
|
"table": {
|
||||||
|
"table_name": "t1",
|
||||||
|
"access_type": "ALL",
|
||||||
|
"loops": 1,
|
||||||
|
"rows": 2,
|
||||||
|
"cost": 0.01034841,
|
||||||
|
"filtered": 100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
SELECT a FROM t1 GROUP BY a,b
|
||||||
|
HAVING a = (SELECT MIN(a) AS min_a FROM t1) AND (a = 3 or a > b);
|
||||||
|
a
|
||||||
|
3
|
||||||
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
|
# MDEV-32424: Pushdown: server crashes at JOIN::save_explain_data()
|
||||||
|
# (fixed by the patch for MDEV-29363)
|
||||||
|
#
|
||||||
|
CREATE TABLE t1 (a INT, b INT, c INT);
|
||||||
|
INSERT INTO t1 VALUES (1, 1, 3), (3, 2, 3);
|
||||||
|
SELECT a,b,c FROM t1 GROUP BY a,b,c
|
||||||
|
HAVING a = (SELECT MIN(b) AS min_b FROM t1) and a IN (b, c);
|
||||||
|
a b c
|
||||||
|
1 1 3
|
||||||
|
DROP TABLE t1;
|
||||||
|
#
|
||||||
|
# MDEV-32293: Pushdown: server crashes at check_simple_equality()
|
||||||
|
# (fixed by the patch for MDEV-29363)
|
||||||
|
#
|
||||||
|
CREATE VIEW v1 AS SELECT 1 AS a;
|
||||||
|
SELECT * FROM v1 GROUP BY a HAVING a = 'b' AND a = (a IS NULL);
|
||||||
|
a
|
||||||
|
Warnings:
|
||||||
|
Warning 1292 Truncated incorrect DECIMAL value: 'b'
|
||||||
|
DROP VIEW v1;
|
||||||
|
#
|
||||||
|
# MDEV-32304: Pushdown: server crashes at Item_field::used_tables()
|
||||||
|
# (fixed by the patch for MDEV-29363)
|
||||||
|
#
|
||||||
|
CREATE VIEW v1 AS SELECT 1 AS a;
|
||||||
|
SELECT * FROM v1
|
||||||
|
GROUP BY a HAVING a = (a IS NULL OR a IS NULL);
|
||||||
|
a
|
||||||
|
DROP VIEW v1;
|
||||||
|
End of 10.5 tests
|
||||||
|
@@ -1648,3 +1648,64 @@ SELECT a FROM t1 GROUP BY a HAVING NOT a;
|
|||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
|
||||||
--echo End of 10.4 tests
|
--echo End of 10.4 tests
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-29363: Constant subquery causing a crash in pushdown optimization
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE t1 (a INT, b INT, c INT);
|
||||||
|
INSERT INTO t1 VALUES (3, 3, 4), (NULL, NULL, 2);
|
||||||
|
|
||||||
|
let $q=
|
||||||
|
SELECT a,b,c FROM t1 GROUP BY a,b,c
|
||||||
|
HAVING a = (SELECT MIN(b) AS min_b FROM t1) and (a = b or a = c);
|
||||||
|
|
||||||
|
eval EXPLAIN FORMAT=JSON $q;
|
||||||
|
eval $q;
|
||||||
|
|
||||||
|
let $q=
|
||||||
|
SELECT a FROM t1 GROUP BY a,b
|
||||||
|
HAVING a = (SELECT MIN(a) AS min_a FROM t1) AND (a = 3 or a > b);
|
||||||
|
|
||||||
|
eval EXPLAIN FORMAT=JSON $q;
|
||||||
|
eval $q;
|
||||||
|
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-32424: Pushdown: server crashes at JOIN::save_explain_data()
|
||||||
|
--echo # (fixed by the patch for MDEV-29363)
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE TABLE t1 (a INT, b INT, c INT);
|
||||||
|
INSERT INTO t1 VALUES (1, 1, 3), (3, 2, 3);
|
||||||
|
|
||||||
|
SELECT a,b,c FROM t1 GROUP BY a,b,c
|
||||||
|
HAVING a = (SELECT MIN(b) AS min_b FROM t1) and a IN (b, c);
|
||||||
|
|
||||||
|
DROP TABLE t1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-32293: Pushdown: server crashes at check_simple_equality()
|
||||||
|
--echo # (fixed by the patch for MDEV-29363)
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE VIEW v1 AS SELECT 1 AS a;
|
||||||
|
|
||||||
|
SELECT * FROM v1 GROUP BY a HAVING a = 'b' AND a = (a IS NULL);
|
||||||
|
|
||||||
|
DROP VIEW v1;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-32304: Pushdown: server crashes at Item_field::used_tables()
|
||||||
|
--echo # (fixed by the patch for MDEV-29363)
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
CREATE VIEW v1 AS SELECT 1 AS a;
|
||||||
|
|
||||||
|
SELECT * FROM v1
|
||||||
|
GROUP BY a HAVING a = (a IS NULL OR a IS NULL);
|
||||||
|
|
||||||
|
DROP VIEW v1;
|
||||||
|
|
||||||
|
--echo End of 10.5 tests
|
||||||
|
@@ -23,7 +23,7 @@ UPDATE t1 set b=b+1 where a=1 or a=999;
|
|||||||
[log_grep.inc] lines: 0
|
[log_grep.inc] lines: 0
|
||||||
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
|
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
|
||||||
[log_grep.inc] lines: 0
|
[log_grep.inc] lines: 0
|
||||||
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+$ expected_matches: 2
|
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+$ expected_matches: 2
|
||||||
[log_grep.inc] found expected match count: 2
|
[log_grep.inc] found expected match count: 2
|
||||||
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+$ expected_matches: 2
|
[log_grep.inc] file: log_slow_innodb-verbosity_1 pattern: ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+$ expected_matches: 2
|
||||||
[log_grep.inc] found expected match count: 2
|
[log_grep.inc] found expected match count: 2
|
||||||
@@ -47,7 +47,7 @@ SELECT 1;
|
|||||||
[log_grep.inc] lines: 0
|
[log_grep.inc] lines: 0
|
||||||
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
|
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
|
||||||
[log_grep.inc] lines: 0
|
[log_grep.inc] lines: 0
|
||||||
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+$
|
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+$
|
||||||
[log_grep.inc] lines: 0
|
[log_grep.inc] lines: 0
|
||||||
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+$
|
[log_grep.inc] file: log_slow_innodb-verbosity_2 pattern: ^# Pages_read_time: \d+\.\d+ Engine_time: \d+\.\d+$
|
||||||
[log_grep.inc] lines: 0
|
[log_grep.inc] lines: 0
|
||||||
@@ -85,7 +85,7 @@ INSERT INTO t1 VALUE(1000) pattern: ^# Tmp_tables: \d+ Tmp_disk_tables: \d+$
|
|||||||
[log_grep.inc] lines: 0
|
[log_grep.inc] lines: 0
|
||||||
[log_grep.inc] file: log_slow_innodb-verbosity_3
|
[log_grep.inc] file: log_slow_innodb-verbosity_3
|
||||||
--source include/log_slow_start.inc
|
--source include/log_slow_start.inc
|
||||||
INSERT INTO t1 VALUE(1000) pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_updated: \d+ Old_rows_read: \d+$
|
INSERT INTO t1 VALUE(1000) pattern: ^# Pages_accessed: \d+ Pages_read: \d+ Pages_prefetched: \d+ Pages_updated: \d+ Old_rows_read: \d+$
|
||||||
[log_grep.inc] lines: 0
|
[log_grep.inc] lines: 0
|
||||||
[log_grep.inc] file: log_slow_innodb-verbosity_3
|
[log_grep.inc] file: log_slow_innodb-verbosity_3
|
||||||
--source include/log_slow_start.inc
|
--source include/log_slow_start.inc
|
||||||
|
@@ -12548,6 +12548,34 @@ QUERY LENGTH(trace)>1
|
|||||||
insert into t2 select * from t1 where a<= b and a>4 1
|
insert into t2 select * from t1 where a<= b and a>4 1
|
||||||
drop table t1, t2;
|
drop table t1, t2;
|
||||||
#
|
#
|
||||||
|
# MDEV-34305 Redundant truncation errors/warnings with optimizer_trace enabled
|
||||||
|
#
|
||||||
|
SET @@optimizer_trace='enabled=on';
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
a CHAR(2) NOT NULL PRIMARY KEY,
|
||||||
|
b VARCHAR(20) NOT NULL,
|
||||||
|
KEY (b)
|
||||||
|
) CHARSET=utf8mb4;
|
||||||
|
CREATE TABLE t2 (
|
||||||
|
a CHAR(2) NOT NULL PRIMARY KEY,
|
||||||
|
b VARCHAR(20) NOT NULL,
|
||||||
|
KEY (b)
|
||||||
|
) CHARSET=utf8mb4;
|
||||||
|
INSERT INTO t1 VALUES
|
||||||
|
('AB','MySQLAB'),
|
||||||
|
('JA','Sun Microsystems'),
|
||||||
|
('MS','Microsoft'),
|
||||||
|
('IB','IBM- Inc.'),
|
||||||
|
('GO','Google Inc.');
|
||||||
|
INSERT IGNORE INTO t2 VALUES
|
||||||
|
('AB','Sweden'),
|
||||||
|
('JA','USA'),
|
||||||
|
('MS','United States'),
|
||||||
|
('IB','North America'),
|
||||||
|
('GO','South America');
|
||||||
|
UPDATE t1,t2 SET t1.b=UPPER(t1.b) WHERE t1.b LIKE 'Unknown%';
|
||||||
|
DROP TABLE t1, t2;
|
||||||
|
#
|
||||||
# End of 10.5 tests
|
# End of 10.5 tests
|
||||||
#
|
#
|
||||||
#
|
#
|
||||||
|
@@ -949,6 +949,41 @@ select QUERY, LENGTH(trace)>1 from information_schema.optimizer_trace;
|
|||||||
|
|
||||||
drop table t1, t2;
|
drop table t1, t2;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-34305 Redundant truncation errors/warnings with optimizer_trace enabled
|
||||||
|
--echo #
|
||||||
|
|
||||||
|
SET @@optimizer_trace='enabled=on';
|
||||||
|
|
||||||
|
CREATE TABLE t1 (
|
||||||
|
a CHAR(2) NOT NULL PRIMARY KEY,
|
||||||
|
b VARCHAR(20) NOT NULL,
|
||||||
|
KEY (b)
|
||||||
|
) CHARSET=utf8mb4;
|
||||||
|
|
||||||
|
CREATE TABLE t2 (
|
||||||
|
a CHAR(2) NOT NULL PRIMARY KEY,
|
||||||
|
b VARCHAR(20) NOT NULL,
|
||||||
|
KEY (b)
|
||||||
|
) CHARSET=utf8mb4;
|
||||||
|
|
||||||
|
INSERT INTO t1 VALUES
|
||||||
|
('AB','MySQLAB'),
|
||||||
|
('JA','Sun Microsystems'),
|
||||||
|
('MS','Microsoft'),
|
||||||
|
('IB','IBM- Inc.'),
|
||||||
|
('GO','Google Inc.');
|
||||||
|
|
||||||
|
INSERT IGNORE INTO t2 VALUES
|
||||||
|
('AB','Sweden'),
|
||||||
|
('JA','USA'),
|
||||||
|
('MS','United States'),
|
||||||
|
('IB','North America'),
|
||||||
|
('GO','South America');
|
||||||
|
|
||||||
|
UPDATE t1,t2 SET t1.b=UPPER(t1.b) WHERE t1.b LIKE 'Unknown%';
|
||||||
|
DROP TABLE t1, t2;
|
||||||
|
|
||||||
--echo #
|
--echo #
|
||||||
--echo # End of 10.5 tests
|
--echo # End of 10.5 tests
|
||||||
--echo #
|
--echo #
|
||||||
|
@@ -40,7 +40,7 @@ disconnect master1;
|
|||||||
disconnect master2;
|
disconnect master2;
|
||||||
disconnect master3;
|
disconnect master3;
|
||||||
disconnect master4;
|
disconnect master4;
|
||||||
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1 --log-warnings=3
|
# restart: --init-rpl-role=SLAVE --sync-binlog=1 --log-warnings=3
|
||||||
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-7/ in mysqld.1.err
|
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-7/ in mysqld.1.err
|
||||||
Pre-crash binlog file content:
|
Pre-crash binlog file content:
|
||||||
include/show_binlog_events.inc
|
include/show_binlog_events.inc
|
||||||
@@ -104,7 +104,7 @@ disconnect master1;
|
|||||||
disconnect master2;
|
disconnect master2;
|
||||||
disconnect master3;
|
disconnect master3;
|
||||||
disconnect master4;
|
disconnect master4;
|
||||||
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1 --log-warnings=3
|
# restart: --init-rpl-role=SLAVE --sync-binlog=1 --log-warnings=3
|
||||||
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-11/ in mysqld.1.err
|
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-11/ in mysqld.1.err
|
||||||
Pre-crash binlog file content:
|
Pre-crash binlog file content:
|
||||||
include/show_binlog_events.inc
|
include/show_binlog_events.inc
|
||||||
@@ -173,7 +173,7 @@ disconnect master1;
|
|||||||
disconnect master2;
|
disconnect master2;
|
||||||
disconnect master3;
|
disconnect master3;
|
||||||
disconnect master4;
|
disconnect master4;
|
||||||
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1 --log-warnings=3
|
# restart: --init-rpl-role=SLAVE --sync-binlog=1 --log-warnings=3
|
||||||
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-15/ in mysqld.1.err
|
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-15/ in mysqld.1.err
|
||||||
Pre-crash binlog file content:
|
Pre-crash binlog file content:
|
||||||
include/show_binlog_events.inc
|
include/show_binlog_events.inc
|
||||||
@@ -248,7 +248,7 @@ disconnect master1;
|
|||||||
disconnect master2;
|
disconnect master2;
|
||||||
disconnect master3;
|
disconnect master3;
|
||||||
disconnect master4;
|
disconnect master4;
|
||||||
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1 --log-warnings=3
|
# restart: --init-rpl-role=SLAVE --sync-binlog=1 --log-warnings=3
|
||||||
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-21/ in mysqld.1.err
|
FOUND 1 /Successfully truncated.*to remove transactions starting from GTID 0-1-21/ in mysqld.1.err
|
||||||
Pre-crash binlog file content:
|
Pre-crash binlog file content:
|
||||||
include/show_binlog_events.inc
|
include/show_binlog_events.inc
|
||||||
|
@@ -31,9 +31,9 @@ Log_name File_size
|
|||||||
master-bin.000001 #
|
master-bin.000001 #
|
||||||
master-bin.000002 #
|
master-bin.000002 #
|
||||||
master-bin.000003 #
|
master-bin.000003 #
|
||||||
# restart the server with --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
|
# restart the server with --init-rpl-role=SLAVE --sync-binlog=1
|
||||||
# the server is restarted
|
# the server is restarted
|
||||||
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
|
# restart: --init-rpl-role=SLAVE --sync-binlog=1
|
||||||
connection default;
|
connection default;
|
||||||
#
|
#
|
||||||
# *** Summary: 1 row should be present in both tables; binlog is truncated; number of binlogs at reconnect - 3:
|
# *** Summary: 1 row should be present in both tables; binlog is truncated; number of binlogs at reconnect - 3:
|
||||||
@@ -98,7 +98,7 @@ INSERT INTO t2 VALUES (2, REPEAT("x", 4100));
|
|||||||
INSERT INTO t1 VALUES (2, REPEAT("x", 4100));
|
INSERT INTO t1 VALUES (2, REPEAT("x", 4100));
|
||||||
COMMIT;
|
COMMIT;
|
||||||
connection default;
|
connection default;
|
||||||
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
|
# restart: --init-rpl-role=SLAVE --sync-binlog=1
|
||||||
connection default;
|
connection default;
|
||||||
#
|
#
|
||||||
# *** Summary: 2 rows should be present in both tables; no binlog truncation; one extra binlog file compare with A; number of binlogs at reconnect - 4:
|
# *** Summary: 2 rows should be present in both tables; no binlog truncation; one extra binlog file compare with A; number of binlogs at reconnect - 4:
|
||||||
@@ -155,9 +155,9 @@ Log_name File_size
|
|||||||
master-bin.000001 #
|
master-bin.000001 #
|
||||||
master-bin.000002 #
|
master-bin.000002 #
|
||||||
master-bin.000003 #
|
master-bin.000003 #
|
||||||
# restart the server with --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
|
# restart the server with --init-rpl-role=SLAVE --sync-binlog=1
|
||||||
# the server is restarted
|
# the server is restarted
|
||||||
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
|
# restart: --init-rpl-role=SLAVE --sync-binlog=1
|
||||||
connection default;
|
connection default;
|
||||||
#
|
#
|
||||||
# *** Summary: 2 rows should be present in both tables; no binlog truncation; the same # of binlog files as in B; number of binlogs at reconnect - 4:
|
# *** Summary: 2 rows should be present in both tables; no binlog truncation; the same # of binlog files as in B; number of binlogs at reconnect - 4:
|
||||||
|
@@ -42,7 +42,7 @@ connection default;
|
|||||||
disconnect master1;
|
disconnect master1;
|
||||||
disconnect master2;
|
disconnect master2;
|
||||||
disconnect master3;
|
disconnect master3;
|
||||||
# restart: --rpl-semi-sync-slave-enabled=1 --sync-binlog=1 --log-warnings=3
|
# restart: --init-rpl-role=SLAVE --sync-binlog=1 --log-warnings=3
|
||||||
FOUND 1 /truncated binlog file:.*master.*000002/ in mysqld.1.err
|
FOUND 1 /truncated binlog file:.*master.*000002/ in mysqld.1.err
|
||||||
"One record should be present in table"
|
"One record should be present in table"
|
||||||
SELECT * FROM ti;
|
SELECT * FROM ti;
|
||||||
|
@@ -42,7 +42,7 @@ SELECT @@global.gtid_binlog_pos as 'Before the crash and never logged trx';
|
|||||||
#
|
#
|
||||||
# Server restart
|
# Server restart
|
||||||
#
|
#
|
||||||
--let $restart_parameters= --rpl-semi-sync-slave-enabled=1 --sync-binlog=1 --log-warnings=3
|
--let $restart_parameters= --init-rpl-role=SLAVE --sync-binlog=1 --log-warnings=3
|
||||||
--source include/start_mysqld.inc
|
--source include/start_mysqld.inc
|
||||||
|
|
||||||
# Check error log for a successful truncate message.
|
# Check error log for a successful truncate message.
|
||||||
|
@@ -36,7 +36,7 @@ CREATE TABLE tm (f INT) ENGINE=Aria;
|
|||||||
# Using 'debug_sync' hold 'query1' execution after 'query1' is flushed and
|
# Using 'debug_sync' hold 'query1' execution after 'query1' is flushed and
|
||||||
# synced to binary log but not yet committed. In an another connection hold
|
# synced to binary log but not yet committed. In an another connection hold
|
||||||
# 'query2' execution after 'query2' is flushed and synced to binlog.
|
# 'query2' execution after 'query2' is flushed and synced to binlog.
|
||||||
# Crash and restart server with --rpl-semi-sync-slave-enabled=1
|
# Crash and restart server with --init-rpl-role=SLAVE
|
||||||
#
|
#
|
||||||
# During recovery of binary log 'query1' status is checked with InnoDB engine,
|
# During recovery of binary log 'query1' status is checked with InnoDB engine,
|
||||||
# it will be in prepared but not yet commited. All transactions starting from
|
# it will be in prepared but not yet commited. All transactions starting from
|
||||||
|
@@ -29,7 +29,7 @@ CREATE TABLE t2 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=rocksdb;
|
|||||||
# The transaction is killed along with the server after that.
|
# The transaction is killed along with the server after that.
|
||||||
--let $shutdown_timeout=0
|
--let $shutdown_timeout=0
|
||||||
--let $debug_sync_action = "commit_after_release_LOCK_log SIGNAL con1_ready WAIT_FOR signal_no_signal"
|
--let $debug_sync_action = "commit_after_release_LOCK_log SIGNAL con1_ready WAIT_FOR signal_no_signal"
|
||||||
--let $restart_parameters = --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
|
--let $restart_parameters = --init-rpl-role=SLAVE --sync-binlog=1
|
||||||
--let $test_outcome= 1 row should be present in both tables; binlog is truncated; number of binlogs at reconnect - 3
|
--let $test_outcome= 1 row should be present in both tables; binlog is truncated; number of binlogs at reconnect - 3
|
||||||
--source binlog_truncate_multi_engine.inc
|
--source binlog_truncate_multi_engine.inc
|
||||||
--echo Proof of the truncated binlog file is readable (two transactions must be seen):
|
--echo Proof of the truncated binlog file is readable (two transactions must be seen):
|
||||||
@@ -42,7 +42,7 @@ CREATE TABLE t2 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=rocksdb;
|
|||||||
--let $debug_sync_action = ""
|
--let $debug_sync_action = ""
|
||||||
# Both debug_sync and debug-dbug are required to make sure Engines remember the commit state
|
# Both debug_sync and debug-dbug are required to make sure Engines remember the commit state
|
||||||
# debug_sync alone will not help.
|
# debug_sync alone will not help.
|
||||||
--let $restart_parameters = --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
|
--let $restart_parameters = --init-rpl-role=SLAVE --sync-binlog=1
|
||||||
--let $test_outcome= 2 rows should be present in both tables; no binlog truncation; one extra binlog file compare with A; number of binlogs at reconnect - 4
|
--let $test_outcome= 2 rows should be present in both tables; no binlog truncation; one extra binlog file compare with A; number of binlogs at reconnect - 4
|
||||||
--source binlog_truncate_multi_engine.inc
|
--source binlog_truncate_multi_engine.inc
|
||||||
|
|
||||||
@@ -51,7 +51,7 @@ CREATE TABLE t2 (a INT PRIMARY KEY, b MEDIUMTEXT) ENGINE=rocksdb;
|
|||||||
--let $debug_sync_action = "commit_after_run_commit_ordered SIGNAL con1_ready"
|
--let $debug_sync_action = "commit_after_run_commit_ordered SIGNAL con1_ready"
|
||||||
# Hold off after both engines have committed. The server is shut down.
|
# Hold off after both engines have committed. The server is shut down.
|
||||||
--let $shutdown_timeout=
|
--let $shutdown_timeout=
|
||||||
--let $restart_parameters = --rpl-semi-sync-slave-enabled=1 --sync-binlog=1
|
--let $restart_parameters = --init-rpl-role=SLAVE --sync-binlog=1
|
||||||
--let $test_outcome= 2 rows should be present in both tables; no binlog truncation; the same # of binlog files as in B; number of binlogs at reconnect - 4
|
--let $test_outcome= 2 rows should be present in both tables; no binlog truncation; the same # of binlog files as in B; number of binlogs at reconnect - 4
|
||||||
--source binlog_truncate_multi_engine.inc
|
--source binlog_truncate_multi_engine.inc
|
||||||
|
|
||||||
|
@@ -63,7 +63,7 @@ SELECT @@global.gtid_binlog_state;
|
|||||||
#
|
#
|
||||||
# Server restart
|
# Server restart
|
||||||
#
|
#
|
||||||
--let $restart_parameters= --rpl-semi-sync-slave-enabled=1 --sync-binlog=1 --log-warnings=3
|
--let $restart_parameters= --init-rpl-role=SLAVE --sync-binlog=1 --log-warnings=3
|
||||||
--source include/start_mysqld.inc
|
--source include/start_mysqld.inc
|
||||||
|
|
||||||
# Check error log for a successful truncate message.
|
# Check error log for a successful truncate message.
|
||||||
|
@@ -92,7 +92,7 @@ SELECT @@global.gtid_binlog_state;
|
|||||||
#
|
#
|
||||||
--echo # Failed restart as the semisync slave
|
--echo # Failed restart as the semisync slave
|
||||||
--error 1
|
--error 1
|
||||||
--exec $MYSQLD_LAST_CMD --rpl-semi-sync-slave-enabled=1 >> $MYSQLTEST_VARDIR/log/mysqld.1.err 2>&1
|
--exec $MYSQLD_LAST_CMD --init-rpl-role=SLAVE >> $MYSQLTEST_VARDIR/log/mysqld.1.err 2>&1
|
||||||
|
|
||||||
--echo # Normal restart
|
--echo # Normal restart
|
||||||
--source include/start_mysqld.inc
|
--source include/start_mysqld.inc
|
||||||
|
@@ -5,4 +5,17 @@ CREATE TABLE t1(f1 INT NOT NULL, f2 TEXT)ENGINE=InnoDB;
|
|||||||
INSERT INTO t1 SELECT seq, repeat('a', 4000) FROM seq_1_to_1800;
|
INSERT INTO t1 SELECT seq, repeat('a', 4000) FROM seq_1_to_1800;
|
||||||
# restart: --debug_dbug=+d,before_final_redo_apply --innodb_log_file_size=8M
|
# restart: --debug_dbug=+d,before_final_redo_apply --innodb_log_file_size=8M
|
||||||
# restart: --innodb_log_file_size=10M
|
# restart: --innodb_log_file_size=10M
|
||||||
|
#
|
||||||
|
# MDEV-34519 innodb_log_checkpoint_now crashes when
|
||||||
|
# innodb_read_only is enabled
|
||||||
|
#
|
||||||
|
# restart: --innodb-force-recovery=6
|
||||||
|
SET GLOBAL innodb_log_checkpoint_now=1;
|
||||||
|
Warnings:
|
||||||
|
Warning 138 InnoDB doesn't force checkpoint when innodb-force-recovery=6.
|
||||||
|
# restart: --innodb-read-only=1
|
||||||
|
SET GLOBAL innodb_log_checkpoint_now=1;
|
||||||
|
Warnings:
|
||||||
|
Warning 138 InnoDB doesn't force checkpoint when innodb-read-only=1.
|
||||||
|
# restart
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
@@ -14,4 +14,18 @@ let $shutdown_timeout=0;
|
|||||||
let $restart_parameters=--innodb_log_file_size=10M;
|
let $restart_parameters=--innodb_log_file_size=10M;
|
||||||
let $shutdown_timeout=;
|
let $shutdown_timeout=;
|
||||||
--source include/restart_mysqld.inc
|
--source include/restart_mysqld.inc
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # MDEV-34519 innodb_log_checkpoint_now crashes when
|
||||||
|
--echo # innodb_read_only is enabled
|
||||||
|
--echo #
|
||||||
|
--let $restart_parameters=--innodb-force-recovery=6
|
||||||
|
--source include/restart_mysqld.inc
|
||||||
|
SET GLOBAL innodb_log_checkpoint_now=1;
|
||||||
|
--let $restart_parameters=--innodb-read-only=1
|
||||||
|
--source include/restart_mysqld.inc
|
||||||
|
SET GLOBAL innodb_log_checkpoint_now=1;
|
||||||
|
let $restart_parameters=;
|
||||||
|
--source include/restart_mysqld.inc
|
||||||
|
|
||||||
DROP TABLE t1;
|
DROP TABLE t1;
|
||||||
|
@@ -355,7 +355,7 @@ a`
|
|||||||
show binlog events in 'master-bin.000002' from <binlog_start>;
|
show binlog events in 'master-bin.000002' from <binlog_start>;
|
||||||
Log_name Pos Event_type Server_id End_log_pos Info
|
Log_name Pos Event_type Server_id End_log_pos Info
|
||||||
master-bin.000002 # Gtid 1 # GTID #-#-#
|
master-bin.000002 # Gtid 1 # GTID #-#-#
|
||||||
master-bin.000002 # Query 1 # DELETE FROM `db1``; select 'oops!'`.`t``1`
|
master-bin.000002 # Query 1 # TRUNCATE TABLE `db1``; select 'oops!'`.`t``1`
|
||||||
connection slave;
|
connection slave;
|
||||||
include/start_slave.inc
|
include/start_slave.inc
|
||||||
connection master;
|
connection master;
|
||||||
|
@@ -0,0 +1,39 @@
|
|||||||
|
include/master-slave.inc
|
||||||
|
[connection master]
|
||||||
|
connection master;
|
||||||
|
create table t (val int) engine=MEMORY;
|
||||||
|
# DELETE trigger should never be activated
|
||||||
|
create trigger tr after delete on t for each row update t2 set val = 1;
|
||||||
|
insert into t values (1),(2);
|
||||||
|
include/save_master_gtid.inc
|
||||||
|
connection slave;
|
||||||
|
include/sync_with_master_gtid.inc
|
||||||
|
# Check pre-restart values
|
||||||
|
include/diff_tables.inc [master:test.t,slave:test.t]
|
||||||
|
# Restarting master should empty master and slave `t`
|
||||||
|
connection master;
|
||||||
|
include/rpl_restart_server.inc [server_number=1]
|
||||||
|
connection master;
|
||||||
|
# Validating MEMORY table on master is empty after restart
|
||||||
|
# MYSQL_BINLOG datadir/binlog_file --result-file=assert_file
|
||||||
|
include/assert_grep.inc [Query to truncate the MEMORY table should be the contents of the new event]
|
||||||
|
# Ensuring slave MEMORY table is empty
|
||||||
|
connection master;
|
||||||
|
include/save_master_gtid.inc
|
||||||
|
connection slave;
|
||||||
|
include/sync_with_master_gtid.inc
|
||||||
|
include/diff_tables.inc [master:test.t,slave:test.t]
|
||||||
|
# Ensure new events replicate correctly
|
||||||
|
connection master;
|
||||||
|
insert into t values (3),(4);
|
||||||
|
include/save_master_gtid.inc
|
||||||
|
connection slave;
|
||||||
|
include/sync_with_master_gtid.inc
|
||||||
|
# Validate values on slave, after master restart, do not include those inserted previously
|
||||||
|
include/diff_tables.inc [master:test.t,slave:test.t]
|
||||||
|
#
|
||||||
|
# Cleanup
|
||||||
|
connection master;
|
||||||
|
drop table t;
|
||||||
|
include/rpl_end.inc
|
||||||
|
# End of rpl_memory_engine_truncate_on_restart.test
|
@@ -50,7 +50,7 @@ on slave must be 2
|
|||||||
SELECT @@GLOBAL.gtid_current_pos;
|
SELECT @@GLOBAL.gtid_current_pos;
|
||||||
@@GLOBAL.gtid_current_pos
|
@@GLOBAL.gtid_current_pos
|
||||||
0-1-4
|
0-1-4
|
||||||
# restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1
|
# restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1 --init-rpl-role=SLAVE
|
||||||
connection server_1;
|
connection server_1;
|
||||||
# Ensuring variable rpl_semi_sync_slave_enabled is ON..
|
# Ensuring variable rpl_semi_sync_slave_enabled is ON..
|
||||||
# Ensuring status rpl_semi_sync_slave_status is OFF..
|
# Ensuring status rpl_semi_sync_slave_status is OFF..
|
||||||
@@ -136,7 +136,7 @@ on slave must be 5
|
|||||||
SELECT @@GLOBAL.gtid_current_pos;
|
SELECT @@GLOBAL.gtid_current_pos;
|
||||||
@@GLOBAL.gtid_current_pos
|
@@GLOBAL.gtid_current_pos
|
||||||
0-2-7
|
0-2-7
|
||||||
# restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1
|
# restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1 --init-rpl-role=SLAVE
|
||||||
connection server_2;
|
connection server_2;
|
||||||
# Ensuring variable rpl_semi_sync_slave_enabled is ON..
|
# Ensuring variable rpl_semi_sync_slave_enabled is ON..
|
||||||
# Ensuring status rpl_semi_sync_slave_status is OFF..
|
# Ensuring status rpl_semi_sync_slave_status is OFF..
|
||||||
@@ -221,7 +221,7 @@ on slave must be 7
|
|||||||
SELECT @@GLOBAL.gtid_current_pos;
|
SELECT @@GLOBAL.gtid_current_pos;
|
||||||
@@GLOBAL.gtid_current_pos
|
@@GLOBAL.gtid_current_pos
|
||||||
0-1-9
|
0-1-9
|
||||||
# restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1
|
# restart: --skip-slave-start=1 --rpl-semi-sync-slave-enabled=1 --init-rpl-role=SLAVE
|
||||||
connection server_1;
|
connection server_1;
|
||||||
# Ensuring variable rpl_semi_sync_slave_enabled is ON..
|
# Ensuring variable rpl_semi_sync_slave_enabled is ON..
|
||||||
# Ensuring status rpl_semi_sync_slave_status is OFF..
|
# Ensuring status rpl_semi_sync_slave_status is OFF..
|
||||||
|
@@ -0,0 +1,82 @@
|
|||||||
|
#
|
||||||
|
# This test ensures that a table with engine=memory is kept consistent with
|
||||||
|
# the slave when the master restarts. That is, when the master is restarted, it
|
||||||
|
# should binlog a new TRUNCATE TABLE command for tables with MEMORY engines,
|
||||||
|
# such that after the slave executes these events, its MEMORY-engine tables
|
||||||
|
# should be empty.
|
||||||
|
#
|
||||||
|
# References:
|
||||||
|
# MDEV-25607: Auto-generated DELETE from HEAP table can break replication
|
||||||
|
#
|
||||||
|
--source include/master-slave.inc
|
||||||
|
|
||||||
|
--connection master
|
||||||
|
create table t (val int) engine=MEMORY;
|
||||||
|
|
||||||
|
-- echo # DELETE trigger should never be activated
|
||||||
|
create trigger tr after delete on t for each row update t2 set val = 1;
|
||||||
|
|
||||||
|
insert into t values (1),(2);
|
||||||
|
--source include/save_master_gtid.inc
|
||||||
|
--connection slave
|
||||||
|
--source include/sync_with_master_gtid.inc
|
||||||
|
|
||||||
|
-- echo # Check pre-restart values
|
||||||
|
--let $diff_tables= master:test.t,slave:test.t
|
||||||
|
--source include/diff_tables.inc
|
||||||
|
|
||||||
|
--echo # Restarting master should empty master and slave `t`
|
||||||
|
--connection master
|
||||||
|
--let $seq_no_before_restart= `SELECT REGEXP_REPLACE(@@global.gtid_binlog_pos, "0-1-", "")`
|
||||||
|
--let $rpl_server_number= 1
|
||||||
|
--source include/rpl_restart_server.inc
|
||||||
|
|
||||||
|
--connection master
|
||||||
|
--echo # Validating MEMORY table on master is empty after restart
|
||||||
|
--let $table_size= `select count(*) from t`
|
||||||
|
if ($table_size)
|
||||||
|
{
|
||||||
|
--echo # MEMORY table is not empty
|
||||||
|
--die MEMORY table is not empty
|
||||||
|
}
|
||||||
|
--let $seq_no_after_restart= `SELECT REGEXP_REPLACE(@@global.gtid_binlog_pos, "0-1-", "")`
|
||||||
|
if ($seq_no_before_restart == $seq_no_after_restart)
|
||||||
|
{
|
||||||
|
--echo # Event to empty MEMORY table was not binlogged
|
||||||
|
--die Event to empty MEMORY table was not binlogged
|
||||||
|
}
|
||||||
|
|
||||||
|
--let $binlog_file= query_get_value(SHOW MASTER STATUS, File, 1)
|
||||||
|
--let $datadir=`select @@datadir`
|
||||||
|
--let assert_file= $MYSQLTEST_VARDIR/tmp/binlog_decoded.out
|
||||||
|
--echo # MYSQL_BINLOG datadir/binlog_file --result-file=assert_file
|
||||||
|
--exec $MYSQL_BINLOG $datadir/$binlog_file --result-file=$assert_file
|
||||||
|
|
||||||
|
--let assert_text= Query to truncate the MEMORY table should be the contents of the new event
|
||||||
|
--let assert_count= 1
|
||||||
|
--let assert_select= TRUNCATE TABLE
|
||||||
|
--source include/assert_grep.inc
|
||||||
|
|
||||||
|
--echo # Ensuring slave MEMORY table is empty
|
||||||
|
--connection master
|
||||||
|
--source include/save_master_gtid.inc
|
||||||
|
--connection slave
|
||||||
|
--source include/sync_with_master_gtid.inc
|
||||||
|
--source include/diff_tables.inc
|
||||||
|
|
||||||
|
--echo # Ensure new events replicate correctly
|
||||||
|
--connection master
|
||||||
|
insert into t values (3),(4);
|
||||||
|
--source include/save_master_gtid.inc
|
||||||
|
--connection slave
|
||||||
|
--source include/sync_with_master_gtid.inc
|
||||||
|
|
||||||
|
--echo # Validate values on slave, after master restart, do not include those inserted previously
|
||||||
|
--source include/diff_tables.inc
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # Cleanup
|
||||||
|
--connection master
|
||||||
|
drop table t;
|
||||||
|
--source include/rpl_end.inc
|
||||||
|
--echo # End of rpl_memory_engine_truncate_on_restart.test
|
@@ -74,7 +74,7 @@ source include/wait_for_slave_param.inc;
|
|||||||
|
|
||||||
SELECT @@GLOBAL.gtid_current_pos;
|
SELECT @@GLOBAL.gtid_current_pos;
|
||||||
|
|
||||||
--let $restart_parameters=--skip-slave-start=1 --rpl-semi-sync-slave-enabled=1
|
--let $restart_parameters=--skip-slave-start=1 --rpl-semi-sync-slave-enabled=1 --init-rpl-role=SLAVE
|
||||||
--let $allow_rpl_inited=1
|
--let $allow_rpl_inited=1
|
||||||
--source include/start_mysqld.inc
|
--source include/start_mysqld.inc
|
||||||
--connection server_$server_to_crash
|
--connection server_$server_to_crash
|
||||||
|
@@ -37,7 +37,7 @@ my_crc32_t crc32c_aarch64_available(void)
|
|||||||
static unsigned long getauxval(unsigned int key)
|
static unsigned long getauxval(unsigned int key)
|
||||||
{
|
{
|
||||||
unsigned long val;
|
unsigned long val;
|
||||||
if (elf_aux_info(key, (void *)&val, (int)sizeof(val) != 0)
|
if (elf_aux_info(key, (void *)&val, (int)sizeof(val) != 0))
|
||||||
return 0ul;
|
return 0ul;
|
||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
@@ -264,7 +264,8 @@ static unsigned crc32_avx512(unsigned crc, const char *buf, size_t size,
|
|||||||
c4 = xor3_512(c4, _mm512_clmulepi64_epi128(l1, b384, 0x10),
|
c4 = xor3_512(c4, _mm512_clmulepi64_epi128(l1, b384, 0x10),
|
||||||
extract512_128<3>(l1));
|
extract512_128<3>(l1));
|
||||||
|
|
||||||
__m256i c2 = _mm512_castsi512_si256(_mm512_shuffle_i64x2(c4, c4, 0b01001110));
|
__m256i c2 =
|
||||||
|
_mm512_castsi512_si256(_mm512_shuffle_i64x2(c4, c4, 0b01001110));
|
||||||
c2 = xor256(c2, _mm512_castsi512_si256(c4));
|
c2 = xor256(c2, _mm512_castsi512_si256(c4));
|
||||||
crc_out = xor128(_mm256_extracti64x2_epi64(c2, 1),
|
crc_out = xor128(_mm256_extracti64x2_epi64(c2, 1),
|
||||||
_mm256_castsi256_si128(c2));
|
_mm256_castsi256_si128(c2));
|
||||||
@@ -289,7 +290,8 @@ static unsigned crc32_avx512(unsigned crc, const char *buf, size_t size,
|
|||||||
xor3_512(_mm512_clmulepi64_epi128(lo, b384, 1),
|
xor3_512(_mm512_clmulepi64_epi128(lo, b384, 1),
|
||||||
_mm512_clmulepi64_epi128(lo, b384, 0x10),
|
_mm512_clmulepi64_epi128(lo, b384, 0x10),
|
||||||
extract512_128<3>(lo));
|
extract512_128<3>(lo));
|
||||||
crc512 = xor512(crc512, _mm512_shuffle_i64x2(crc512, crc512, 0b01001110));
|
crc512 =
|
||||||
|
xor512(crc512, _mm512_shuffle_i64x2(crc512, crc512, 0b01001110));
|
||||||
const __m256i crc256 = _mm512_castsi512_si256(crc512);
|
const __m256i crc256 = _mm512_castsi512_si256(crc512);
|
||||||
crc_out = xor128(_mm256_extracti64x2_epi64(crc256, 1),
|
crc_out = xor128(_mm256_extracti64x2_epi64(crc256, 1),
|
||||||
_mm256_castsi256_si128(crc256));
|
_mm256_castsi256_si128(crc256));
|
||||||
@@ -318,7 +320,7 @@ static unsigned crc32_avx512(unsigned crc, const char *buf, size_t size,
|
|||||||
size += 16;
|
size += 16;
|
||||||
if (size) {
|
if (size) {
|
||||||
get_last_two_xmms:
|
get_last_two_xmms:
|
||||||
const __m128i crc2 = crc_out, d = load128(buf + (size - 16));
|
const __m128i crc2 = crc_out, d = load128(buf + ssize_t(size) - 16);
|
||||||
__m128i S = load128(reinterpret_cast<const char*>(shuffle128) + size);
|
__m128i S = load128(reinterpret_cast<const char*>(shuffle128) + size);
|
||||||
crc_out = _mm_shuffle_epi8(crc_out, S);
|
crc_out = _mm_shuffle_epi8(crc_out, S);
|
||||||
S = xor128(S, _mm_set1_epi32(0x80808080));
|
S = xor128(S, _mm_set1_epi32(0x80808080));
|
||||||
|
@@ -103,12 +103,6 @@
|
|||||||
#include <lf.h>
|
#include <lf.h>
|
||||||
#include "my_cpu.h"
|
#include "my_cpu.h"
|
||||||
|
|
||||||
/*
|
|
||||||
when using alloca() leave at least that many bytes of the stack -
|
|
||||||
for functions we might be calling from within this stack frame
|
|
||||||
*/
|
|
||||||
#define ALLOCA_SAFETY_MARGIN 8192
|
|
||||||
|
|
||||||
#define LF_PINBOX_MAX_PINS 65536
|
#define LF_PINBOX_MAX_PINS 65536
|
||||||
|
|
||||||
static void lf_pinbox_real_free(LF_PINS *pins);
|
static void lf_pinbox_real_free(LF_PINS *pins);
|
||||||
@@ -239,26 +233,21 @@ void lf_pinbox_put_pins(LF_PINS *pins)
|
|||||||
} while (!my_atomic_cas32((int32 volatile*) &pinbox->pinstack_top_ver,
|
} while (!my_atomic_cas32((int32 volatile*) &pinbox->pinstack_top_ver,
|
||||||
(int32*) &top_ver,
|
(int32*) &top_ver,
|
||||||
top_ver-pins->link+nr+LF_PINBOX_MAX_PINS));
|
top_ver-pins->link+nr+LF_PINBOX_MAX_PINS));
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ptr_cmp(const void *pa, const void *pb)
|
/*
|
||||||
|
Get the next pointer in the purgatory list.
|
||||||
|
Note that next_node is not used to avoid the extra volatile.
|
||||||
|
*/
|
||||||
|
#define pnext_node(P, X) (*((void **)(((char *)(X)) + (P)->free_ptr_offset)))
|
||||||
|
|
||||||
|
static inline void add_to_purgatory(LF_PINS *pins, void *addr)
|
||||||
{
|
{
|
||||||
const void *const*a= pa;
|
pnext_node(pins->pinbox, addr)= pins->purgatory;
|
||||||
const void *const*b= pb;
|
pins->purgatory= addr;
|
||||||
return *a < *b ? -1 : *a == *b ? 0 : 1;
|
pins->purgatory_count++;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define add_to_purgatory(PINS, ADDR) \
|
|
||||||
do \
|
|
||||||
{ \
|
|
||||||
my_atomic_storeptr_explicit( \
|
|
||||||
(void **)((char *)(ADDR)+(PINS)->pinbox->free_ptr_offset), \
|
|
||||||
(PINS)->purgatory, MY_MEMORY_ORDER_RELEASE); \
|
|
||||||
(PINS)->purgatory= (ADDR); \
|
|
||||||
(PINS)->purgatory_count++; \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Free an object allocated via pinbox allocator
|
Free an object allocated via pinbox allocator
|
||||||
|
|
||||||
@@ -276,139 +265,87 @@ void lf_pinbox_free(LF_PINS *pins, void *addr)
|
|||||||
lf_pinbox_real_free(pins););
|
lf_pinbox_real_free(pins););
|
||||||
}
|
}
|
||||||
|
|
||||||
struct st_harvester {
|
struct st_match_and_save_arg {
|
||||||
void **granary;
|
LF_PINS *pins;
|
||||||
int npins;
|
LF_PINBOX *pinbox;
|
||||||
|
void *old_purgatory;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
callback forlf_dynarray_iterate:
|
Callback for lf_dynarray_iterate:
|
||||||
scan all pins of all threads and accumulate all pins
|
Scan all pins of all threads, for each active (non-null) pin,
|
||||||
|
scan the current thread's purgatory. If present there, move it
|
||||||
|
to a new purgatory. At the end, the old purgatory will contain
|
||||||
|
pointers not pinned by any thread.
|
||||||
*/
|
*/
|
||||||
static int harvest_pins(void *e, void *h)
|
static int match_and_save(void *e, void *a)
|
||||||
{
|
{
|
||||||
LF_PINS *el= e;
|
LF_PINS *el= e;
|
||||||
struct st_harvester *hv= h;
|
struct st_match_and_save_arg *arg= a;
|
||||||
|
|
||||||
int i;
|
int i;
|
||||||
LF_PINS *el_end= el+MY_MIN(hv->npins, LF_DYNARRAY_LEVEL_LENGTH);
|
LF_PINS *el_end= el + LF_DYNARRAY_LEVEL_LENGTH;
|
||||||
for (; el < el_end; el++)
|
for (; el < el_end; el++)
|
||||||
{
|
{
|
||||||
for (i= 0; i < LF_PINBOX_PINS; i++)
|
for (i= 0; i < LF_PINBOX_PINS; i++)
|
||||||
{
|
{
|
||||||
void *p= my_atomic_loadptr((void **)&el->pin[i]);
|
void *p= my_atomic_loadptr((void **)&el->pin[i]);
|
||||||
if (p)
|
if (p)
|
||||||
*hv->granary++= p;
|
{
|
||||||
|
void *cur= arg->old_purgatory;
|
||||||
|
void **list_prev= &arg->old_purgatory;
|
||||||
|
while (cur)
|
||||||
|
{
|
||||||
|
void *next= pnext_node(arg->pinbox, cur);
|
||||||
|
|
||||||
|
if (p == cur)
|
||||||
|
{
|
||||||
|
/* pinned - keeping */
|
||||||
|
add_to_purgatory(arg->pins, cur);
|
||||||
|
/* unlink from old purgatory */
|
||||||
|
*list_prev= next;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
list_prev= (void **)((char *)cur+arg->pinbox->free_ptr_offset);
|
||||||
|
cur= next;
|
||||||
|
}
|
||||||
|
if (!arg->old_purgatory)
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/*
|
|
||||||
hv->npins may become negative below, but it means that
|
|
||||||
we're on the last dynarray page and harvest_pins() won't be
|
|
||||||
called again. We don't bother to make hv->npins() correct
|
|
||||||
(that is 0) in this case.
|
|
||||||
*/
|
|
||||||
hv->npins-= LF_DYNARRAY_LEVEL_LENGTH;
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
|
||||||
callback forlf_dynarray_iterate:
|
|
||||||
scan all pins of all threads and see if addr is present there
|
|
||||||
*/
|
|
||||||
static int match_pins(void *e, void *addr)
|
|
||||||
{
|
|
||||||
LF_PINS *el= e;
|
|
||||||
int i;
|
|
||||||
LF_PINS *el_end= el+LF_DYNARRAY_LEVEL_LENGTH;
|
|
||||||
for (; el < el_end; el++)
|
|
||||||
for (i= 0; i < LF_PINBOX_PINS; i++)
|
|
||||||
if (my_atomic_loadptr((void **)&el->pin[i]) == addr)
|
|
||||||
return 1;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define next_node(P, X) (*((uchar * volatile *)(((uchar *)(X)) + (P)->free_ptr_offset)))
|
|
||||||
#define anext_node(X) next_node(&allocator->pinbox, (X))
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Scan the purgatory and free everything that can be freed
|
Scan the purgatory and free everything that can be freed
|
||||||
*/
|
*/
|
||||||
static void lf_pinbox_real_free(LF_PINS *pins)
|
static void lf_pinbox_real_free(LF_PINS *pins)
|
||||||
{
|
{
|
||||||
int npins;
|
|
||||||
void *list;
|
|
||||||
void **addr= NULL;
|
|
||||||
void *first= NULL, *last= NULL;
|
|
||||||
struct st_my_thread_var *var= my_thread_var;
|
|
||||||
void *stack_ends_here= var ? var->stack_ends_here : NULL;
|
|
||||||
LF_PINBOX *pinbox= pins->pinbox;
|
LF_PINBOX *pinbox= pins->pinbox;
|
||||||
|
|
||||||
npins= pinbox->pins_in_array+1;
|
/* Store info about current purgatory. */
|
||||||
|
struct st_match_and_save_arg arg = {pins, pinbox, pins->purgatory};
|
||||||
#ifdef HAVE_ALLOCA
|
/* Reset purgatory. */
|
||||||
if (stack_ends_here != NULL)
|
pins->purgatory= NULL;
|
||||||
{
|
|
||||||
int alloca_size= sizeof(void *)*LF_PINBOX_PINS*npins;
|
|
||||||
/* create a sorted list of pinned addresses, to speed up searches */
|
|
||||||
if (available_stack_size(&pinbox, stack_ends_here) >
|
|
||||||
alloca_size + ALLOCA_SAFETY_MARGIN)
|
|
||||||
{
|
|
||||||
struct st_harvester hv;
|
|
||||||
addr= (void **) alloca(alloca_size);
|
|
||||||
hv.granary= addr;
|
|
||||||
hv.npins= npins;
|
|
||||||
/* scan the dynarray and accumulate all pinned addresses */
|
|
||||||
lf_dynarray_iterate(&pinbox->pinarray, harvest_pins, &hv);
|
|
||||||
|
|
||||||
npins= (int)(hv.granary-addr);
|
|
||||||
/* and sort them */
|
|
||||||
if (npins)
|
|
||||||
qsort(addr, npins, sizeof(void *), ptr_cmp);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
list= pins->purgatory;
|
|
||||||
pins->purgatory= 0;
|
|
||||||
pins->purgatory_count= 0;
|
pins->purgatory_count= 0;
|
||||||
while (list)
|
|
||||||
|
|
||||||
|
lf_dynarray_iterate(&pinbox->pinarray, match_and_save, &arg);
|
||||||
|
|
||||||
|
if (arg.old_purgatory)
|
||||||
{
|
{
|
||||||
void *cur= list;
|
/* Some objects in the old purgatory were not pinned, free them. */
|
||||||
list= *(void **)((char *)cur+pinbox->free_ptr_offset);
|
void *last= arg.old_purgatory;
|
||||||
if (npins)
|
while (pnext_node(pinbox, last))
|
||||||
{
|
last= pnext_node(pinbox, last);
|
||||||
if (addr) /* use binary search */
|
pinbox->free_func(arg.old_purgatory, last, pinbox->free_func_arg);
|
||||||
{
|
|
||||||
void **a, **b, **c;
|
|
||||||
for (a= addr, b= addr+npins-1, c= a+(b-a)/2; (b-a) > 1; c= a+(b-a)/2)
|
|
||||||
if (cur == *c)
|
|
||||||
a= b= c;
|
|
||||||
else if (cur > *c)
|
|
||||||
a= c;
|
|
||||||
else
|
|
||||||
b= c;
|
|
||||||
if (cur == *a || cur == *b)
|
|
||||||
goto found;
|
|
||||||
}
|
|
||||||
else /* no alloca - no cookie. linear search here */
|
|
||||||
{
|
|
||||||
if (lf_dynarray_iterate(&pinbox->pinarray, match_pins, cur))
|
|
||||||
goto found;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/* not pinned - freeing */
|
|
||||||
if (last)
|
|
||||||
last= next_node(pinbox, last)= (uchar *)cur;
|
|
||||||
else
|
|
||||||
first= last= (uchar *)cur;
|
|
||||||
continue;
|
|
||||||
found:
|
|
||||||
/* pinned - keeping */
|
|
||||||
add_to_purgatory(pins, cur);
|
|
||||||
}
|
}
|
||||||
if (last)
|
|
||||||
pinbox->free_func(first, last, pinbox->free_func_arg);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define next_node(P, X) (*((uchar * volatile *)(((uchar *)(X)) + (P)->free_ptr_offset)))
|
||||||
|
#define anext_node(X) next_node(&allocator->pinbox, (X))
|
||||||
|
|
||||||
/* lock-free memory allocator for fixed-size objects */
|
/* lock-free memory allocator for fixed-size objects */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@@ -338,7 +338,7 @@ static ulonglong my_timer_init_resolution(ulonglong (*this_timer)(void),
|
|||||||
static ulonglong my_timer_init_frequency(MY_TIMER_INFO *mti)
|
static ulonglong my_timer_init_frequency(MY_TIMER_INFO *mti)
|
||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
ulonglong time1, time2, time3, time4;
|
ulonglong time1, time2, time3, time4, denominator;
|
||||||
time1= my_timer_cycles();
|
time1= my_timer_cycles();
|
||||||
time2= my_timer_microseconds();
|
time2= my_timer_microseconds();
|
||||||
time3= time2; /* Avoids a Microsoft/IBM compiler warning */
|
time3= time2; /* Avoids a Microsoft/IBM compiler warning */
|
||||||
@@ -349,7 +349,8 @@ static ulonglong my_timer_init_frequency(MY_TIMER_INFO *mti)
|
|||||||
}
|
}
|
||||||
time4= my_timer_cycles() - mti->cycles.overhead;
|
time4= my_timer_cycles() - mti->cycles.overhead;
|
||||||
time4-= mti->microseconds.overhead;
|
time4-= mti->microseconds.overhead;
|
||||||
return (mti->microseconds.frequency * (time4 - time1)) / (time3 - time2);
|
denominator = ((time3 - time2) == 0) ? 1 : time3 - time2;
|
||||||
|
return (mti->microseconds.frequency * (time4 - time1)) / denominator;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -582,7 +583,7 @@ void my_timer_init(MY_TIMER_INFO *mti)
|
|||||||
&& mti->microseconds.routine
|
&& mti->microseconds.routine
|
||||||
&& mti->cycles.routine)
|
&& mti->cycles.routine)
|
||||||
{
|
{
|
||||||
ulonglong time3, time4;
|
ulonglong time3, time4, denominator;
|
||||||
time1= my_timer_cycles();
|
time1= my_timer_cycles();
|
||||||
time2= my_timer_milliseconds();
|
time2= my_timer_milliseconds();
|
||||||
time3= time2; /* Avoids a Microsoft/IBM compiler warning */
|
time3= time2; /* Avoids a Microsoft/IBM compiler warning */
|
||||||
@@ -592,8 +593,9 @@ void my_timer_init(MY_TIMER_INFO *mti)
|
|||||||
if (time3 - time2 > 10) break;
|
if (time3 - time2 > 10) break;
|
||||||
}
|
}
|
||||||
time4= my_timer_cycles();
|
time4= my_timer_cycles();
|
||||||
|
denominator = ((time4 - time1) == 0) ? 1 : time4 - time1;
|
||||||
mti->milliseconds.frequency=
|
mti->milliseconds.frequency=
|
||||||
(mti->cycles.frequency * (time3 - time2)) / (time4 - time1);
|
(mti->cycles.frequency * (time3 - time2)) / denominator;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -607,7 +609,7 @@ void my_timer_init(MY_TIMER_INFO *mti)
|
|||||||
&& mti->microseconds.routine
|
&& mti->microseconds.routine
|
||||||
&& mti->cycles.routine)
|
&& mti->cycles.routine)
|
||||||
{
|
{
|
||||||
ulonglong time3, time4;
|
ulonglong time3, time4, denominator;
|
||||||
time1= my_timer_cycles();
|
time1= my_timer_cycles();
|
||||||
time2= my_timer_ticks();
|
time2= my_timer_ticks();
|
||||||
time3= time2; /* Avoids a Microsoft/IBM compiler warning */
|
time3= time2; /* Avoids a Microsoft/IBM compiler warning */
|
||||||
@@ -621,8 +623,9 @@ void my_timer_init(MY_TIMER_INFO *mti)
|
|||||||
if (time3 - time2 > 10) break;
|
if (time3 - time2 > 10) break;
|
||||||
}
|
}
|
||||||
time4= my_timer_cycles();
|
time4= my_timer_cycles();
|
||||||
|
denominator = ((time4 - time1) == 0) ? 1 : time4 - time1;
|
||||||
mti->ticks.frequency=
|
mti->ticks.frequency=
|
||||||
(mti->cycles.frequency * (time3 - time2)) / (time4 - time1);
|
(mti->cycles.frequency * (time3 - time2)) / denominator;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -30,6 +30,15 @@ public:
|
|||||||
/* Time spent reading pages, in timer_tracker_frequency() units */
|
/* Time spent reading pages, in timer_tracker_frequency() units */
|
||||||
ulonglong pages_read_time;
|
ulonglong pages_read_time;
|
||||||
|
|
||||||
|
/*
|
||||||
|
Number of pages that we've requested to prefetch while running the query.
|
||||||
|
Note that we don't know:
|
||||||
|
- how much time was spent reading these pages (and how to count the time
|
||||||
|
if reading was done in parallel)
|
||||||
|
- whether the pages were read by "us" or somebody else...
|
||||||
|
*/
|
||||||
|
ulonglong pages_prefetched;
|
||||||
|
|
||||||
ulonglong undo_records_read;
|
ulonglong undo_records_read;
|
||||||
|
|
||||||
/* Time spent in engine, in timer_tracker_frequency() units */
|
/* Time spent in engine, in timer_tracker_frequency() units */
|
||||||
|
11
sql/item.cc
11
sql/item.cc
@@ -11222,10 +11222,15 @@ bool Item::cleanup_excluding_immutables_processor (void *arg)
|
|||||||
if (!(get_extraction_flag() == MARKER_IMMUTABLE))
|
if (!(get_extraction_flag() == MARKER_IMMUTABLE))
|
||||||
return cleanup_processor(arg);
|
return cleanup_processor(arg);
|
||||||
else
|
else
|
||||||
{
|
|
||||||
clear_extraction_flag();
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
bool Item::remove_immutable_flag_processor (void *arg)
|
||||||
|
{
|
||||||
|
if (get_extraction_flag() == MARKER_IMMUTABLE)
|
||||||
|
clear_extraction_flag();
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@@ -2207,6 +2207,7 @@ public:
|
|||||||
virtual bool change_context_processor(void *arg) { return 0; }
|
virtual bool change_context_processor(void *arg) { return 0; }
|
||||||
virtual bool reset_query_id_processor(void *arg) { return 0; }
|
virtual bool reset_query_id_processor(void *arg) { return 0; }
|
||||||
virtual bool is_expensive_processor(void *arg) { return 0; }
|
virtual bool is_expensive_processor(void *arg) { return 0; }
|
||||||
|
bool remove_immutable_flag_processor (void *arg);
|
||||||
|
|
||||||
// FIXME reduce the number of "add field to bitmap" processors
|
// FIXME reduce the number of "add field to bitmap" processors
|
||||||
virtual bool add_field_to_set_processor(void *arg) { return 0; }
|
virtual bool add_field_to_set_processor(void *arg) { return 0; }
|
||||||
|
@@ -40,6 +40,7 @@
|
|||||||
#include "sql_audit.h"
|
#include "sql_audit.h"
|
||||||
#include "mysqld.h"
|
#include "mysqld.h"
|
||||||
#include "ddl_log.h"
|
#include "ddl_log.h"
|
||||||
|
#include "repl_failsafe.h"
|
||||||
|
|
||||||
#include <my_dir.h>
|
#include <my_dir.h>
|
||||||
#include <m_ctype.h> // For test_if_number
|
#include <m_ctype.h> // For test_if_number
|
||||||
@@ -3402,10 +3403,12 @@ bool MYSQL_QUERY_LOG::write(THD *thd, time_t current_time,
|
|||||||
|
|
||||||
if (my_b_printf(&log_file,
|
if (my_b_printf(&log_file,
|
||||||
"# Pages_accessed: %lu Pages_read: %lu "
|
"# Pages_accessed: %lu Pages_read: %lu "
|
||||||
|
"Pages_prefetched: %lu "
|
||||||
"Pages_updated: %lu Old_rows_read: %lu\n"
|
"Pages_updated: %lu Old_rows_read: %lu\n"
|
||||||
"# Pages_read_time: %s Engine_time: %s\n",
|
"# Pages_read_time: %s Engine_time: %s\n",
|
||||||
(ulong) stats->pages_accessed,
|
(ulong) stats->pages_accessed,
|
||||||
(ulong) stats->pages_read_count,
|
(ulong) stats->pages_read_count,
|
||||||
|
(ulong) stats->pages_prefetched,
|
||||||
(ulong) stats->pages_updated,
|
(ulong) stats->pages_updated,
|
||||||
(ulong) stats->undo_records_read,
|
(ulong) stats->undo_records_read,
|
||||||
query_time_buff, lock_time_buff))
|
query_time_buff, lock_time_buff))
|
||||||
@@ -11188,7 +11191,7 @@ Recovery_context::Recovery_context() :
|
|||||||
prev_event_pos(0),
|
prev_event_pos(0),
|
||||||
last_gtid_standalone(false), last_gtid_valid(false), last_gtid_no2pc(false),
|
last_gtid_standalone(false), last_gtid_valid(false), last_gtid_no2pc(false),
|
||||||
last_gtid_engines(0),
|
last_gtid_engines(0),
|
||||||
do_truncate(global_rpl_semi_sync_slave_enabled),
|
do_truncate(rpl_status == RPL_IDLE_SLAVE),
|
||||||
truncate_validated(false), truncate_reset_done(false),
|
truncate_validated(false), truncate_reset_done(false),
|
||||||
truncate_set_in_1st(false), id_binlog(MAX_binlog_id),
|
truncate_set_in_1st(false), id_binlog(MAX_binlog_id),
|
||||||
checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF), gtid_maybe_to_truncate(NULL)
|
checksum_alg(BINLOG_CHECKSUM_ALG_UNDEF), gtid_maybe_to_truncate(NULL)
|
||||||
|
@@ -6790,7 +6790,7 @@ struct my_option my_long_options[]=
|
|||||||
#ifdef HAVE_REPLICATION
|
#ifdef HAVE_REPLICATION
|
||||||
{"init-rpl-role", 0, "Set the replication role",
|
{"init-rpl-role", 0, "Set the replication role",
|
||||||
&rpl_status, &rpl_status, &rpl_role_typelib,
|
&rpl_status, &rpl_status, &rpl_role_typelib,
|
||||||
GET_ENUM, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
GET_ENUM, REQUIRED_ARG, RPL_AUTH_MASTER, 0, 0, 0, 0, 0},
|
||||||
#endif /* HAVE_REPLICATION */
|
#endif /* HAVE_REPLICATION */
|
||||||
{"memlock", 0, "Lock mysqld in memory.", &locked_in_memory,
|
{"memlock", 0, "Lock mysqld in memory.", &locked_in_memory,
|
||||||
&locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
&locked_in_memory, 0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||||
|
@@ -17089,6 +17089,7 @@ static
|
|||||||
void print_range(String *out, const KEY_PART_INFO *key_part,
|
void print_range(String *out, const KEY_PART_INFO *key_part,
|
||||||
KEY_MULTI_RANGE *range, uint n_key_parts)
|
KEY_MULTI_RANGE *range, uint n_key_parts)
|
||||||
{
|
{
|
||||||
|
Check_level_instant_set check_field(current_thd, CHECK_FIELD_IGNORE);
|
||||||
uint flag= range->range_flag;
|
uint flag= range->range_flag;
|
||||||
String key_name;
|
String key_name;
|
||||||
key_name.set_charset(system_charset_info);
|
key_name.set_charset(system_charset_info);
|
||||||
|
@@ -3240,7 +3240,7 @@ static bool open_table_entry_fini(THD *thd, TABLE_SHARE *share, TABLE *entry)
|
|||||||
String query(query_buf, sizeof(query_buf), system_charset_info);
|
String query(query_buf, sizeof(query_buf), system_charset_info);
|
||||||
|
|
||||||
query.length(0);
|
query.length(0);
|
||||||
query.append(STRING_WITH_LEN("DELETE FROM "));
|
query.append(STRING_WITH_LEN("TRUNCATE TABLE "));
|
||||||
append_identifier(thd, &query, &share->db);
|
append_identifier(thd, &query, &share->db);
|
||||||
query.append('.');
|
query.append('.');
|
||||||
append_identifier(thd, &query, &share->table_name);
|
append_identifier(thd, &query, &share->table_name);
|
||||||
|
@@ -1952,6 +1952,8 @@ static void trace_engine_stats(handler *file, Json_writer *writer)
|
|||||||
if (hs->pages_read_time)
|
if (hs->pages_read_time)
|
||||||
writer->add_member("pages_read_time_ms").
|
writer->add_member("pages_read_time_ms").
|
||||||
add_double(hs->pages_read_time * 1000. / timer_tracker_frequency());
|
add_double(hs->pages_read_time * 1000. / timer_tracker_frequency());
|
||||||
|
if (hs->pages_prefetched)
|
||||||
|
writer->add_member("pages_prefetch_read_count").add_ull(hs->pages_prefetched);
|
||||||
if (hs->undo_records_read)
|
if (hs->undo_records_read)
|
||||||
writer->add_member("old_rows_read").add_ull(hs->undo_records_read);
|
writer->add_member("old_rows_read").add_ull(hs->undo_records_read);
|
||||||
writer->end_object();
|
writer->end_object();
|
||||||
|
@@ -11430,6 +11430,19 @@ Item *st_select_lex::pushdown_from_having_into_where(THD *thd, Item *having)
|
|||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Remove IMMUTABLE_FL only after all of the elements of the condition are processed.
|
||||||
|
*/
|
||||||
|
it.rewind();
|
||||||
|
while ((item=it++))
|
||||||
|
{
|
||||||
|
if (item->walk(&Item::remove_immutable_flag_processor, 0, STOP_PTR))
|
||||||
|
{
|
||||||
|
attach_to_conds.empty();
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
}
|
||||||
exit:
|
exit:
|
||||||
thd->lex->current_select= save_curr_select;
|
thd->lex->current_select= save_curr_select;
|
||||||
return having;
|
return having;
|
||||||
|
@@ -2686,8 +2686,8 @@ int prepare_schema_table(THD *thd, LEX *lex, Table_ident *table_ident,
|
|||||||
DBUG_RETURN(1);
|
DBUG_RETURN(1);
|
||||||
lex->query_tables_last= query_tables_last;
|
lex->query_tables_last= query_tables_last;
|
||||||
break;
|
break;
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
case SCH_PROFILES:
|
case SCH_PROFILES:
|
||||||
/*
|
/*
|
||||||
Mark this current profiling record to be discarded. We don't
|
Mark this current profiling record to be discarded. We don't
|
||||||
|
@@ -382,6 +382,7 @@ read_ahead:
|
|||||||
|
|
||||||
if (count)
|
if (count)
|
||||||
{
|
{
|
||||||
|
mariadb_increment_pages_prefetched(count);
|
||||||
DBUG_PRINT("ib_buf", ("random read-ahead %zu pages from %s: %u",
|
DBUG_PRINT("ib_buf", ("random read-ahead %zu pages from %s: %u",
|
||||||
count, space->chain.start->name,
|
count, space->chain.start->name,
|
||||||
low.page_no()));
|
low.page_no()));
|
||||||
@@ -677,6 +678,7 @@ failed:
|
|||||||
|
|
||||||
if (count)
|
if (count)
|
||||||
{
|
{
|
||||||
|
mariadb_increment_pages_prefetched(count);
|
||||||
DBUG_PRINT("ib_buf", ("random read-ahead %zu pages from %s: %u",
|
DBUG_PRINT("ib_buf", ("random read-ahead %zu pages from %s: %u",
|
||||||
count, space->chain.start->name,
|
count, space->chain.start->name,
|
||||||
new_low.page_no()));
|
new_low.page_no()));
|
||||||
|
@@ -18220,13 +18220,28 @@ static my_bool innodb_buf_flush_list_now = TRUE;
|
|||||||
static uint innodb_merge_threshold_set_all_debug
|
static uint innodb_merge_threshold_set_all_debug
|
||||||
= DICT_INDEX_MERGE_THRESHOLD_DEFAULT;
|
= DICT_INDEX_MERGE_THRESHOLD_DEFAULT;
|
||||||
|
|
||||||
|
/** Force an InnoDB log checkpoint. */
|
||||||
/** Force an InnoDB log checkpoint. */
|
/** Force an InnoDB log checkpoint. */
|
||||||
static
|
static
|
||||||
void
|
void
|
||||||
checkpoint_now_set(THD*, st_mysql_sys_var*, void*, const void *save)
|
checkpoint_now_set(THD* thd, st_mysql_sys_var*, void*, const void *save)
|
||||||
{
|
{
|
||||||
if (!*static_cast<const my_bool*>(save))
|
if (!*static_cast<const my_bool*>(save))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
if (srv_read_only_mode)
|
||||||
|
{
|
||||||
|
push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN,
|
||||||
|
HA_ERR_UNSUPPORTED,
|
||||||
|
"InnoDB doesn't force checkpoint "
|
||||||
|
"when %s",
|
||||||
|
(srv_force_recovery
|
||||||
|
== SRV_FORCE_NO_LOG_REDO)
|
||||||
|
? "innodb-force-recovery=6."
|
||||||
|
: "innodb-read-only=1.");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
const auto size= log_sys.is_encrypted()
|
const auto size= log_sys.is_encrypted()
|
||||||
? SIZE_OF_FILE_CHECKPOINT + 8 : SIZE_OF_FILE_CHECKPOINT;
|
? SIZE_OF_FILE_CHECKPOINT + 8 : SIZE_OF_FILE_CHECKPOINT;
|
||||||
mysql_mutex_unlock(&LOCK_global_system_variables);
|
mysql_mutex_unlock(&LOCK_global_system_variables);
|
||||||
|
@@ -79,6 +79,12 @@ inline void mariadb_increment_undo_records_read()
|
|||||||
stats->undo_records_read++;
|
stats->undo_records_read++;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void mariadb_increment_pages_prefetched(ulint n_pages)
|
||||||
|
{
|
||||||
|
if (ha_handler_stats *stats= mariadb_stats)
|
||||||
|
stats->pages_prefetched += n_pages;
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
The following has to be identical code as measure() in sql_analyze_stmt.h
|
The following has to be identical code as measure() in sql_analyze_stmt.h
|
||||||
|
|
||||||
|
@@ -1821,6 +1821,11 @@ int ha_maria::repair(THD *thd, HA_CHECK *param, bool do_optimize)
|
|||||||
_ma_check_print_warning(param, "Number of rows changed from %s to %s",
|
_ma_check_print_warning(param, "Number of rows changed from %s to %s",
|
||||||
llstr(rows, llbuff),
|
llstr(rows, llbuff),
|
||||||
llstr(file->state->records, llbuff2));
|
llstr(file->state->records, llbuff2));
|
||||||
|
/*
|
||||||
|
ma_check_print_warning() may generate an error in case of creating keys
|
||||||
|
for ALTER TABLE. In this case we should signal an error.
|
||||||
|
*/
|
||||||
|
error= thd->is_error();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@@ -575,6 +575,7 @@ int ha_s3::create(const char *name, TABLE *table_arg,
|
|||||||
s3_deinit(s3_client);
|
s3_deinit(s3_client);
|
||||||
if (error)
|
if (error)
|
||||||
maria_delete_table_files(name, 1, 0);
|
maria_delete_table_files(name, 1, 0);
|
||||||
|
}
|
||||||
else
|
else
|
||||||
#endif /* MOVE_TABLE_TO_S3 */
|
#endif /* MOVE_TABLE_TO_S3 */
|
||||||
{
|
{
|
||||||
|
@@ -2510,7 +2510,7 @@ static int initialize_variables_for_repair(HA_CHECK *param,
|
|||||||
*info->state= info->s->state.state;
|
*info->state= info->s->state.state;
|
||||||
if (share->data_file_type == BLOCK_RECORD)
|
if (share->data_file_type == BLOCK_RECORD)
|
||||||
share->state.state.data_file_length= MY_ALIGN(sort_info->filelength,
|
share->state.state.data_file_length= MY_ALIGN(sort_info->filelength,
|
||||||
share->block_size);
|
(my_off_t) share->block_size);
|
||||||
else
|
else
|
||||||
share->state.state.data_file_length= sort_info->filelength;
|
share->state.state.data_file_length= sort_info->filelength;
|
||||||
return 0;
|
return 0;
|
||||||
|
Reference in New Issue
Block a user