1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-30 16:24:05 +03:00

Merged recent changes from mysql-5.5-bugteam into

mysql-5.5-runtime tree.
This commit is contained in:
Dmitry Lenev
2010-10-08 20:33:46 +04:00
32 changed files with 489 additions and 211 deletions

View File

@ -47,7 +47,7 @@ static char *shared_memory_base_name=0;
#endif
static uint opt_protocol=0;
enum operations { DO_CHECK, DO_REPAIR, DO_ANALYZE, DO_OPTIMIZE, DO_UPGRADE };
enum operations { DO_CHECK=1, DO_REPAIR, DO_ANALYZE, DO_OPTIMIZE, DO_UPGRADE };
static struct my_option my_long_options[] =
{
@ -241,6 +241,8 @@ static my_bool
get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
char *argument)
{
int orig_what_to_do= what_to_do;
switch(optid) {
case 'a':
what_to_do = DO_ANALYZE;
@ -315,6 +317,13 @@ get_one_option(int optid, const struct my_option *opt __attribute__((unused)),
opt->name);
break;
}
if (orig_what_to_do && (what_to_do != orig_what_to_do))
{
fprintf(stderr, "Error: %s doesn't support multiple contradicting commands.\n",
my_progname);
return 1;
}
return 0;
}

View File

@ -131,16 +131,16 @@ IF(UNIX)
# Default GCC flags
IF(CMAKE_COMPILER_IS_GNUCC)
SET(COMMON_C_FLAGS "-g -static-libgcc -fno-omit-frame-pointer")
SET(COMMON_C_FLAGS "-g -static-libgcc -fno-omit-frame-pointer -fno-strict-aliasing")
SET(CMAKE_C_FLAGS_DEBUG "-O ${COMMON_C_FLAGS}")
SET(CMAKE_C_FLAGS_RELWITHDEBINFO "-O3 ${COMMON_C_FLAGS}")
ENDIF()
IF(CMAKE_COMPILER_IS_GNUCXX)
SET(COMMON_CXX_FLAGS "-g -static-libgcc -fno-omit-frame-pointer")
SET(COMMON_CXX_FLAGS "-g -static-libgcc -fno-omit-frame-pointer -fno-strict-aliasing")
SET(CMAKE_CXX_FLAGS_DEBUG "-O ${COMMON_CXX_FLAGS}")
SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-O3 ${COMMON_CXX_FLAGS}")
ENDIF()
# HPUX flags
IF(CMAKE_SYSTEM_NAME MATCHES "HP-UX")
IF(CMAKE_C_COMPILER_ID MATCHES "HP")
@ -156,7 +156,7 @@ IF(UNIX)
ENDIF()
SET(WITH_SSL no)
ENDIF()
# Linux flags
IF(CMAKE_SYSTEM_NAME MATCHES "Linux")
IF(CMAKE_C_COMPILER_ID MATCHES "Intel")
@ -173,18 +173,18 @@ IF(UNIX)
SET(WITH_SSL no)
ENDIF()
ENDIF()
# OSX flags
IF(APPLE)
SET(COMMON_C_FLAGS "-g -fno-common")
SET(COMMON_C_FLAGS "-g -fno-common -fno-strict-aliasing")
# XXX: why are we using -felide-constructors on OSX?
SET(COMMON_CXX_FLAGS "-g -fno-common -felide-constructors")
SET(COMMON_CXX_FLAGS "-g -fno-common -felide-constructors -fno-strict-aliasing")
SET(CMAKE_C_FLAGS_DEBUG "-O ${COMMON_C_FLAGS}")
SET(CMAKE_CXX_FLAGS_DEBUG "-O ${COMMON_CXX_FLAGS}")
SET(CMAKE_C_FLAGS_RELWITHDEBINFO "-Os ${COMMON_C_FLAGS}")
SET(CMAKE_CXX_FLAGS_RELWITHDEBINFO "-Os ${COMMON_CXX_FLAGS}")
ENDIF()
# Solaris flags
IF(CMAKE_SYSTEM_NAME MATCHES "SunOS")
IF(CMAKE_SYSTEM_VERSION VERSION_GREATER "5.9")
@ -219,7 +219,7 @@ IF(UNIX)
ENDIF()
ENDIF()
ENDIF()
IF(CMAKE_C_FLAGS_DEBUG)
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG}"
CACHE STRING "Debug C compile flags")

View File

@ -600,3 +600,10 @@ NULL
SELECT -9223372036854775808 MOD -1;
-9223372036854775808 MOD -1
0
#
# Bug #57209 valgrind + Assertion failed: dst > buf
#
SELECT floor(log10(format(concat_ws(5445796E25, 5306463, 30837), -358821)))
as foo;
foo
2

View File

@ -8,7 +8,7 @@ mysql.db OK
mysql.event OK
mysql.func OK
mysql.general_log
note : The storage engine for the table doesn't support optimize
note : The storage engine for the table doesn't support analyze
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
@ -21,7 +21,7 @@ mysql.procs_priv OK
mysql.proxy_priv OK
mysql.servers OK
mysql.slow_log
note : The storage engine for the table doesn't support optimize
note : The storage engine for the table doesn't support analyze
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
@ -29,6 +29,8 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.user OK
mtr.global_suppressions Table is already up to date
mtr.test_suppressions Table is already up to date
mysql.columns_priv OK
mysql.db OK
mysql.event OK
@ -55,10 +57,64 @@ mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.user OK
mysql.columns_priv OK
mysql.db OK
mysql.event OK
mysql.func OK
mysql.general_log
note : The storage engine for the table doesn't support analyze
mysql.help_category OK
mysql.help_keyword OK
mysql.help_relation OK
mysql.help_topic OK
mysql.host OK
mysql.ndb_binlog_index OK
mysql.plugin OK
mysql.proc OK
mysql.procs_priv OK
mysql.proxy_priv OK
mysql.servers OK
mysql.slow_log
note : The storage engine for the table doesn't support analyze
mysql.tables_priv OK
mysql.time_zone OK
mysql.time_zone_leap_second OK
mysql.time_zone_name OK
mysql.time_zone_transition OK
mysql.time_zone_transition_type OK
mysql.user OK
mysql.columns_priv Table is already up to date
mysql.db Table is already up to date
mysql.event Table is already up to date
mysql.func Table is already up to date
mysql.general_log
note : The storage engine for the table doesn't support optimize
mysql.help_category Table is already up to date
mysql.help_keyword Table is already up to date
mysql.help_relation Table is already up to date
mysql.help_topic Table is already up to date
mysql.host Table is already up to date
mysql.ndb_binlog_index Table is already up to date
mysql.plugin Table is already up to date
mysql.proc Table is already up to date
mysql.procs_priv Table is already up to date
mysql.proxy_priv Table is already up to date
mysql.servers Table is already up to date
mysql.slow_log
note : The storage engine for the table doesn't support optimize
mysql.tables_priv Table is already up to date
mysql.time_zone Table is already up to date
mysql.time_zone_leap_second Table is already up to date
mysql.time_zone_name Table is already up to date
mysql.time_zone_transition Table is already up to date
mysql.time_zone_transition_type Table is already up to date
mysql.user Table is already up to date
create table t1 (a int);
create view v1 as select * from t1;
test.t1 OK
test.t1 Table is already up to date
test.t1 OK
test.t1 Table is already up to date
drop view v1;
drop table t1;
create table `t``1`(a int);
@ -127,6 +183,7 @@ Tables_in_test
t1
#mysql50#v-1
v1
test.t1 OK
show tables;
Tables_in_test
t1
@ -200,3 +257,6 @@ Tables_in_test (t1-1)
t1-1
drop table `t1-1`;
End of 5.1 tests
#
# Bug #35269: mysqlcheck behaves different depending on order of parameters
#

View File

@ -0,0 +1,7 @@
UNINSTALL PLUGIN example;
ERROR HY000: Plugin 'example' is force_plus_permanent and can not be unloaded
SELECT PLUGIN_NAME, PLUGIN_STATUS, LOAD_OPTION FROM INFORMATION_SCHEMA.PLUGINS
WHERE PLUGIN_NAME IN ('MyISAM', 'EXAMPLE');
PLUGIN_NAME PLUGIN_STATUS LOAD_OPTION
MyISAM ACTIVE FORCE
EXAMPLE ACTIVE FORCE_PLUS_PERMANENT

View File

@ -5,20 +5,24 @@ drop table if exists t1;
create table t1 (a varchar(100));
insert into t1 values ('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa'),('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb');
Activate debug hook and attempt to retrieve the statement from the cache.
set session debug='+d,wait_in_query_cache_insert';
set debug_sync="wait_in_query_cache_insert SIGNAL parked WAIT_FOR go";
select SQL_CACHE * from t1;;
set debug_sync="now WAIT_FOR parked";
On a second connection; clear the query cache.
show status like 'Qcache_queries_in_cache';
Variable_name Value
Qcache_queries_in_cache 1
set global query_cache_size= 0;
Signal the debug hook to release the lock.
select id from information_schema.processlist where state='wait_in_query_cache_insert' into @thread_id;
kill query @thread_id;
set debug_sync="now SIGNAL go";
Show query cache status.
show status like 'Qcache_queries_in_cache';
Variable_name Value
Qcache_queries_in_cache 0
a
aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
set debug_sync= 'RESET';
set global query_cache_size= 0;
use test;
drop table t1;
@ -32,11 +36,12 @@ SET GLOBAL concurrent_insert= 1;
SET GLOBAL query_cache_size= 1024*512;
SET GLOBAL query_cache_type= ON;
# Switch to connection con1
SET SESSION debug='+d,wait_after_query_cache_invalidate';
SET DEBUG_SYNC = "wait_after_query_cache_invalidate SIGNAL parked WAIT_FOR go";
# Send concurrent insert, will wait in the query cache table invalidate
INSERT INTO t1 VALUES (4);
# Switch to connection default
# Wait for concurrent insert to reach the debug point
SET DEBUG_SYNC = "now WAIT_FOR parked";
# Switch to connection con2
# Send SELECT that shouldn't be cached
SELECT * FROM t1;
@ -46,9 +51,7 @@ a
3
# Switch to connection default
# Notify the concurrent insert to proceed
SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE STATE = 'wait_after_query_cache_invalidate' INTO @thread_id;
KILL QUERY @thread_id;
SET DEBUG_SYNC = "now SIGNAL go";
# Switch to connection con1
# Gather insert result
SHOW STATUS LIKE "Qcache_queries_in_cache";
@ -66,6 +69,7 @@ Variable_name Value
Qcache_queries_in_cache 1
# Disconnect
# Restore defaults
SET DEBUG_SYNC= 'RESET';
RESET QUERY CACHE;
DROP TABLE t1,t2;
SET GLOBAL concurrent_insert= DEFAULT;
@ -108,43 +112,48 @@ bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb
** before the mutex lock in invalidate_table_internal.
** This will allow new result sets to be written into the QC.
**
SET SESSION debug='+d,wait_in_query_cache_invalidate1';
SET SESSION debug='+d,wait_in_query_cache_invalidate2';
SET DEBUG_SYNC="wait_in_query_cache_invalidate1 SIGNAL parked1_1 WAIT_FOR go1_1";
SET DEBUG_SYNC="wait_in_query_cache_invalidate2 SIGNAL parked1_2 WAIT_FOR go1_2";
DELETE FROM t1 WHERE a like '%a%';;
=================================== Connection default
** Assert that the expect process status is obtained.
SET DEBUG_SYNC="now WAIT_FOR parked1_1";
**
=================================== Connection thd2
** On THD2: Insert a result into the cache. This attempt will be blocked
** because of a debug hook placed just before the mutex lock after which
** the first part of the result set is written.
SET SESSION debug='+d,wait_in_query_cache_insert';
SET DEBUG_SYNC="wait_in_query_cache_insert SIGNAL parked2 WAIT_FOR go2 EXECUTE 1";
SELECT SQL_CACHE * FROM t2 UNION SELECT * FROM t3;
=================================== Connection default
** Assert that the SELECT-stmt thread reaches the sync point.
SET DEBUG_SYNC="now WAIT_FOR parked2";
**
**
=================================== Connection thd3
** On THD3: Insert another result into the cache and block on the same
** debug hook.
SET SESSION debug='+d,wait_in_query_cache_insert';
SELECT SQL_CACHE * FROM t4 UNION SELECT * FROM t5;;
SET DEBUG_SYNC="wait_in_query_cache_insert SIGNAL parked3 WAIT_FOR go3 EXECUTE 1";
SELECT SQL_CACHE * FROM t4 UNION SELECT * FROM t5;
=================================== Connection default
** Assert that the two SELECT-stmt threads to reach the hook.
** Assert that the SELECT-stmt thread reaches the sync point.
SET DEBUG_SYNC="now WAIT_FOR parked3";
**
**
** Signal the DELETE thread, THD1, to continue. It will enter the mutex
** lock and set query cache status to TABLE_FLUSH_IN_PROGRESS and then
** unlock the mutex before stopping on the next debug hook.
SELECT SQL_NO_CACHE id FROM information_schema.processlist WHERE state='wait_in_query_cache_invalidate1' LIMIT 1 INTO @flush_thread_id;
KILL QUERY @flush_thread_id;
SET DEBUG_SYNC="now SIGNAL go1_1";
** Assert that we reach the next debug hook.
SET DEBUG_SYNC="now WAIT_FOR parked1_2";
**
** Signal the remaining debug hooks blocking THD2 and THD3.
** The threads will grab the guard mutex enter the wait condition and
** and finally release the mutex. The threads will continue to wait
** until a broadcast signal reaches them causing both threads to
** come alive and check the condition.
SELECT SQL_NO_CACHE id FROM information_schema.processlist WHERE state='wait_in_query_cache_insert' ORDER BY id ASC LIMIT 1 INTO @thread_id;
KILL QUERY @thread_id;
SELECT SQL_NO_CACHE id FROM information_schema.processlist WHERE state='wait_in_query_cache_insert' ORDER BY id DESC LIMIT 1 INTO @thread_id;
KILL QUERY @thread_id;
SET DEBUG_SYNC="now SIGNAL go2";
SET DEBUG_SYNC="now SIGNAL go3";
**
** Finally signal the DELETE statement on THD1 one last time.
** The stmt will complete the query cache invalidation and return
@ -152,8 +161,7 @@ KILL QUERY @thread_id;
** One signal will be sent to the thread group waiting for executing
** invalidations and a broadcast signal will be sent to the thread
** group holding result set writers.
SELECT SQL_NO_CACHE id FROM information_schema.processlist WHERE state='wait_in_query_cache_invalidate2' LIMIT 1 INTO @flush_thread_id;
KILL QUERY @flush_thread_id;
SET DEBUG_SYNC="now SIGNAL go1_2";
**
*************************************************************************
** No tables should be locked
@ -172,6 +180,7 @@ DELETE FROM t4;
DELETE FROM t5;
=================================== Connection thd1
** Done.
SET DEBUG_SYNC= 'RESET';
SET GLOBAL query_cache_size= 0;
# Restore defaults
RESET QUERY CACHE;
@ -179,3 +188,35 @@ FLUSH STATUS;
DROP TABLE t1,t2,t3,t4,t5;
SET GLOBAL query_cache_size= DEFAULT;
SET GLOBAL query_cache_type= DEFAULT;
#
# Bug#56822: Add a thread state for sessions waiting on the query cache lock
#
SET @old_query_cache_size= @@GLOBAL.query_cache_size;
DROP TABLE IF EXISTS t1;
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3);
SET GLOBAL concurrent_insert= 1;
SET GLOBAL query_cache_size= 1024*512;
SET GLOBAL query_cache_type= ON;
# Switch to connection con1
SET DEBUG_SYNC = "wait_in_query_cache_invalidate2 SIGNAL parked WAIT_FOR go";
# Send INSERT, will wait in the query cache table invalidation
INSERT INTO t1 VALUES (4);;
# Switch to connection default
# Wait for insert to reach the debug point
SET DEBUG_SYNC = "now WAIT_FOR parked";
# Switch to connection con2
# Send a query that should wait on the query cache lock
RESET QUERY CACHE;
# Switch to connection default
# Wait for the state to be reflected in the processlist
# Signal that the query cache can be unlocked
SET DEBUG_SYNC="now SIGNAL go";
# Reap con1 and disconnect
# Reap con2 and disconnect
# Restore defaults
SET DEBUG_SYNC= 'RESET';
RESET QUERY CACHE;
DROP TABLE t1;
SET GLOBAL query_cache_size= DEFAULT;
SET GLOBAL query_cache_type= DEFAULT;

View File

@ -680,5 +680,17 @@ id select_type table type possible_keys key key_len ref rows Extra
1 SIMPLE t2 const PRIMARY PRIMARY 8 const 1 Using index
DROP TABLE t1,t2;
#
# Bug#57095: Wrongly chosen expression cache type led to a wrong
# result.
#
CREATE TABLE t1 (`b` datetime );
INSERT INTO t1 VALUES ('2010-01-01 00:00:00'), ('2010-01-01 00:00:00');
SELECT * FROM t1 WHERE b <= coalesce(NULL, now());
b
2010-01-01 00:00:00
2010-01-01 00:00:00
DROP TABLE t1;
#
#
# End of 5.5 tests
#

View File

@ -165,6 +165,7 @@ def information_schema PARTITIONS TABLE_NAME 3 NO varchar 64 192 NULL NULL utf8
def information_schema PARTITIONS TABLE_ROWS 13 0 NO bigint NULL NULL 20 0 NULL NULL bigint(21) unsigned select
def information_schema PARTITIONS TABLE_SCHEMA 2 NO varchar 64 192 NULL NULL utf8 utf8_general_ci varchar(64) select
def information_schema PARTITIONS UPDATE_TIME 20 NULL YES datetime NULL NULL NULL NULL NULL NULL datetime select
def information_schema PLUGINS LOAD_OPTION 11 NO varchar 64 192 NULL NULL utf8 utf8_general_ci varchar(64) select
def information_schema PLUGINS PLUGIN_AUTHOR 8 NULL YES varchar 64 192 NULL NULL utf8 utf8_general_ci varchar(64) select
def information_schema PLUGINS PLUGIN_DESCRIPTION 9 NULL YES longtext 4294967295 4294967295 NULL NULL utf8 utf8_general_ci longtext select
def information_schema PLUGINS PLUGIN_LIBRARY 6 NULL YES varchar 64 192 NULL NULL utf8 utf8_general_ci varchar(64) select
@ -562,6 +563,7 @@ NULL information_schema PARTITIONS CHECKSUM bigint NULL NULL NULL NULL bigint(21
3.0000 information_schema PLUGINS PLUGIN_AUTHOR varchar 64 192 utf8 utf8_general_ci varchar(64)
1.0000 information_schema PLUGINS PLUGIN_DESCRIPTION longtext 4294967295 4294967295 utf8 utf8_general_ci longtext
3.0000 information_schema PLUGINS PLUGIN_LICENSE varchar 80 240 utf8 utf8_general_ci varchar(80)
3.0000 information_schema PLUGINS LOAD_OPTION varchar 64 192 utf8 utf8_general_ci varchar(64)
NULL information_schema PROCESSLIST ID bigint NULL NULL NULL NULL bigint(4)
3.0000 information_schema PROCESSLIST USER varchar 16 48 utf8 utf8_general_ci varchar(16)
3.0000 information_schema PROCESSLIST HOST varchar 64 192 utf8 utf8_general_ci varchar(64)

View File

@ -6,19 +6,19 @@ show status like 'Slave_heartbeat_period';;
Variable_name Slave_heartbeat_period
Value 5.000
change master to master_host='127.0.0.1',master_port=MASTER_PORT, master_user='root', master_heartbeat_period= 4294968;
ERROR HY000: The requested value for the heartbeat period is negative or exceeds the maximum 4294967 seconds
ERROR HY000: The requested value for the heartbeat period is either negative or exceeds the maximum allowed (4294967 seconds).
show status like 'Slave_heartbeat_period';;
Variable_name Slave_heartbeat_period
Value 5.000
change master to master_host='127.0.0.1',master_port=MASTER_PORT, master_user='root', master_heartbeat_period= 0.0009999;
Warnings:
Warning 1624 The requested value for the heartbeat period is less than 1 msec. The period is reset to zero which means no heartbeats will be sending
Warning 1703 The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled.
show status like 'Slave_heartbeat_period';;
Variable_name Slave_heartbeat_period
Value 0.000
change master to master_host='127.0.0.1',master_port=MASTER_PORT, master_user='root', master_heartbeat_period= 4294967;
Warnings:
Warning 1624 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' sec. A sensible value for the period should be less than the timeout.
Warning 1704 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout.
show status like 'Slave_heartbeat_period';;
Variable_name Slave_heartbeat_period
Value 4294967.000
@ -30,7 +30,7 @@ reset slave;
set @@global.slave_net_timeout= 5;
change master to master_host='127.0.0.1',master_port=MASTER_PORT, master_user='root', master_heartbeat_period= 5.001;
Warnings:
Warning 1624 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' sec. A sensible value for the period should be less than the timeout.
Warning 1704 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout.
show status like 'Slave_heartbeat_period';;
Variable_name Slave_heartbeat_period
Value 5.001
@ -42,7 +42,7 @@ Variable_name Slave_heartbeat_period
Value 4.000
set @@global.slave_net_timeout= 3 /* must be a warning */;
Warnings:
Warning 1624 The current value for master_heartbeat_period exceeds the new value of `slave_net_timeout' sec. A sensible value for the period should be less than the timeout.
Warning 1704 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout.
reset slave;
drop table if exists t1;
set @@global.slave_net_timeout= 10;

View File

@ -38,14 +38,14 @@ RESET SLAVE;
*** Warning if updated slave_net_timeout < slave_heartbeat_timeout ***
SET @@global.slave_net_timeout=FLOOR(SLAVE_HEARTBEAT_TIMEOUT)-1;
Warnings:
Warning 1624 The current value for master_heartbeat_period exceeds the new value of `slave_net_timeout' sec. A sensible value for the period should be less than the timeout.
Warning 1704 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout.
SET @@global.slave_net_timeout=@restore_slave_net_timeout;
RESET SLAVE;
*** Warning if updated slave_heartbeat_timeout > slave_net_timeout ***
CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=MASTER_PORT, MASTER_USER='root', MASTER_HEARTBEAT_PERIOD=SLAVE_NET_TIMEOUT;
Warnings:
Warning 1624 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' sec. A sensible value for the period should be less than the timeout.
Warning 1704 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout.
RESET SLAVE;
*** CHANGE MASTER statement only updates slave_heartbeat_period ***
@ -140,7 +140,7 @@ Slave_heartbeat_period 0.001
RESET SLAVE;
CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=MASTER_PORT, MASTER_USER='root', MASTER_HEARTBEAT_PERIOD=0.0009;
Warnings:
Warning 1624 The requested value for the heartbeat period is less than 1 msec. The period is reset to zero which means no heartbeats will be sending
Warning 1703 The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled.
SHOW GLOBAL STATUS LIKE 'slave_heartbeat_period';
Variable_name Value
Slave_heartbeat_period 0.000
@ -149,19 +149,19 @@ RESET SLAVE;
*** Max slave_heartbeat_timeout ***
CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=MASTER_PORT, MASTER_USER='root', MASTER_HEARTBEAT_PERIOD=4294967;
Warnings:
Warning 1624 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' sec. A sensible value for the period should be less than the timeout.
Warning 1704 The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout.
SHOW GLOBAL STATUS LIKE 'slave_heartbeat_period';
Variable_name Value
Slave_heartbeat_period 4294967.000
RESET SLAVE;
CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=MASTER_PORT, MASTER_USER='root', MASTER_HEARTBEAT_PERIOD=4294968;
ERROR HY000: The requested value for the heartbeat period is negative or exceeds the maximum 4294967 seconds
ERROR HY000: The requested value for the heartbeat period is either negative or exceeds the maximum allowed (4294967 seconds).
RESET SLAVE;
CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=MASTER_PORT, MASTER_USER='root', MASTER_HEARTBEAT_PERIOD=8589935;
ERROR HY000: The requested value for the heartbeat period is negative or exceeds the maximum 4294967 seconds
ERROR HY000: The requested value for the heartbeat period is either negative or exceeds the maximum allowed (4294967 seconds).
RESET SLAVE;
CHANGE MASTER TO MASTER_HOST='127.0.0.1', MASTER_PORT=MASTER_PORT, MASTER_USER='root', MASTER_HEARTBEAT_PERIOD=4294967296;
ERROR HY000: The requested value for the heartbeat period is negative or exceeds the maximum 4294967 seconds
ERROR HY000: The requested value for the heartbeat period is either negative or exceeds the maximum allowed (4294967 seconds).
RESET SLAVE;
*** Misc incorrect values ***

View File

@ -458,3 +458,9 @@ SELECT 2 DIV -2;
SELECT -(1 DIV 0);
# Crashed the server with SIGFPE before the bugfix
SELECT -9223372036854775808 MOD -1;
--echo #
--echo # Bug #57209 valgrind + Assertion failed: dst > buf
--echo #
SELECT floor(log10(format(concat_ws(5445796E25, 5306463, 30837), -358821)))
as foo;

View File

@ -23,10 +23,13 @@ drop database if exists client_test_db;
# Bug #13783 mysqlcheck tries to optimize and analyze information_schema
#
--replace_result 'Table is already up to date' OK
--exec $MYSQL_CHECK --all-databases --analyze --optimize
--exec $MYSQL_CHECK --all-databases --analyze
--exec $MYSQL_CHECK --all-databases --optimize
--replace_result 'Table is already up to date' OK
--exec $MYSQL_CHECK --analyze --optimize --databases test information_schema mysql
--exec $MYSQL_CHECK --analyze --optimize information_schema schemata
--exec $MYSQL_CHECK --analyze --databases test information_schema mysql
--exec $MYSQL_CHECK --optimize --databases test information_schema mysql
--exec $MYSQL_CHECK --analyze information_schema schemata
--exec $MYSQL_CHECK --optimize information_schema schemata
#
# Bug #16502: mysqlcheck tries to check views
@ -34,9 +37,11 @@ drop database if exists client_test_db;
create table t1 (a int);
create view v1 as select * from t1;
--replace_result 'Table is already up to date' OK
--exec $MYSQL_CHECK --analyze --optimize --databases test
--exec $MYSQL_CHECK --analyze --databases test
--exec $MYSQL_CHECK --optimize --databases test
--replace_result 'Table is already up to date' OK
--exec $MYSQL_CHECK --all-in-1 --analyze --optimize --databases test
--exec $MYSQL_CHECK --all-in-1 --analyze --databases test
--exec $MYSQL_CHECK --all-in-1 --optimize --databases test
drop view v1;
drop table t1;
@ -113,7 +118,8 @@ show tables;
let $MYSQLD_DATADIR= `select @@datadir`;
--copy_file $MYSQLD_DATADIR/test/v1.frm $MYSQLD_DATADIR/test/v-1.frm
show tables;
--exec $MYSQL_CHECK --check-upgrade --fix-table-names --databases test
--exec $MYSQL_CHECK --check-upgrade --databases test
--exec $MYSQL_CHECK --fix-table-names --databases test
show tables;
drop view v1, `v-1`;
drop table t1;
@ -212,3 +218,14 @@ show tables like 't1-1';
drop table `t1-1`;
--echo End of 5.1 tests
--echo #
--echo # Bug #35269: mysqlcheck behaves different depending on order of parameters
--echo #
--error 13
--exec $MYSQL_CHECK -a --fix-table-names test "#mysql50#t1-1"
--error 1
--exec $MYSQL_CHECK -aoc test "#mysql50#t1-1"

View File

@ -0,0 +1,3 @@
$EXAMPLE_PLUGIN_OPT
$EXAMPLE_PLUGIN_LOAD
--loose-plugin-example=FORCE_PLUS_PERMANENT

View File

@ -0,0 +1,8 @@
--source include/not_windows_embedded.inc
--source include/have_example_plugin.inc
--error ER_PLUGIN_IS_PERMANENT
UNINSTALL PLUGIN example;
SELECT PLUGIN_NAME, PLUGIN_STATUS, LOAD_OPTION FROM INFORMATION_SCHEMA.PLUGINS
WHERE PLUGIN_NAME IN ('MyISAM', 'EXAMPLE');

View File

@ -1,6 +1,6 @@
--source include/not_embedded.inc
--source include/have_query_cache.inc
--source include/have_debug.inc
--source include/have_debug_sync.inc
#
# Bug #30887 Server crashes on SET GLOBAL query_cache_size=0
@ -18,12 +18,11 @@ connect (bug30887con2, localhost, root, ,test);
connection bug30887con1;
--echo Activate debug hook and attempt to retrieve the statement from the cache.
set session debug='+d,wait_in_query_cache_insert';
set debug_sync="wait_in_query_cache_insert SIGNAL parked WAIT_FOR go";
--send select SQL_CACHE * from t1;
connection default;
let $wait_condition= select count(*)= 1 from information_schema.processlist where state= 'wait_in_query_cache_insert';
--source include/wait_condition.inc
set debug_sync="now WAIT_FOR parked";
connection bug30887con2;
--echo On a second connection; clear the query cache.
@ -32,14 +31,18 @@ set global query_cache_size= 0;
connection default;
--echo Signal the debug hook to release the lock.
select id from information_schema.processlist where state='wait_in_query_cache_insert' into @thread_id;
kill query @thread_id;
set debug_sync="now SIGNAL go";
--echo Show query cache status.
show status like 'Qcache_queries_in_cache';
connection bug30887con1;
--reap
disconnect bug30887con1;
disconnect bug30887con2;
connection default;
set debug_sync= 'RESET';
set global query_cache_size= 0;
use test;
drop table t1;
@ -67,18 +70,14 @@ connect(con2,localhost,root,,test,,);
connection con1;
--echo # Switch to connection con1
SET SESSION debug='+d,wait_after_query_cache_invalidate';
SET DEBUG_SYNC = "wait_after_query_cache_invalidate SIGNAL parked WAIT_FOR go";
--echo # Send concurrent insert, will wait in the query cache table invalidate
--send INSERT INTO t1 VALUES (4)
connection default;
--echo # Switch to connection default
--echo # Wait for concurrent insert to reach the debug point
let $wait_condition=
SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE STATE = "wait_after_query_cache_invalidate" AND
INFO = "INSERT INTO t1 VALUES (4)";
--source include/wait_condition.inc
SET DEBUG_SYNC = "now WAIT_FOR parked";
connection con2;
--echo # Switch to connection con2
@ -88,9 +87,7 @@ SELECT * FROM t1;
connection default;
--echo # Switch to connection default
--echo # Notify the concurrent insert to proceed
SELECT ID FROM INFORMATION_SCHEMA.PROCESSLIST
WHERE STATE = 'wait_after_query_cache_invalidate' INTO @thread_id;
KILL QUERY @thread_id;
SET DEBUG_SYNC = "now SIGNAL go";
connection con1;
--echo # Switch to connection con1
@ -107,6 +104,7 @@ disconnect con2;
connection default;
--echo # Restore defaults
SET DEBUG_SYNC= 'RESET';
RESET QUERY CACHE;
DROP TABLE t1,t2;
SET GLOBAL concurrent_insert= DEFAULT;
@ -157,15 +155,14 @@ SELECT SQL_CACHE * FROM t1;
--echo ** before the mutex lock in invalidate_table_internal.
--echo ** This will allow new result sets to be written into the QC.
--echo **
SET SESSION debug='+d,wait_in_query_cache_invalidate1';
SET SESSION debug='+d,wait_in_query_cache_invalidate2';
SET DEBUG_SYNC="wait_in_query_cache_invalidate1 SIGNAL parked1_1 WAIT_FOR go1_1";
SET DEBUG_SYNC="wait_in_query_cache_invalidate2 SIGNAL parked1_2 WAIT_FOR go1_2";
--send DELETE FROM t1 WHERE a like '%a%';
connection default;
--echo =================================== Connection default
--echo ** Assert that the expect process status is obtained.
LET $wait_condition= SELECT SQL_NO_CACHE COUNT(*)= 1 FROM information_schema.processlist WHERE state= 'wait_in_query_cache_invalidate1';
--source include/wait_condition.inc
SET DEBUG_SYNC="now WAIT_FOR parked1_1";
-- echo **
connection thd2;
@ -173,32 +170,36 @@ connection thd2;
--echo ** On THD2: Insert a result into the cache. This attempt will be blocked
--echo ** because of a debug hook placed just before the mutex lock after which
--echo ** the first part of the result set is written.
SET SESSION debug='+d,wait_in_query_cache_insert';
SET DEBUG_SYNC="wait_in_query_cache_insert SIGNAL parked2 WAIT_FOR go2 EXECUTE 1";
--send SELECT SQL_CACHE * FROM t2 UNION SELECT * FROM t3
connection default;
--echo =================================== Connection default
--echo ** Assert that the SELECT-stmt thread reaches the sync point.
SET DEBUG_SYNC="now WAIT_FOR parked2";
--echo **
--echo **
connection thd3;
--echo =================================== Connection thd3
--echo ** On THD3: Insert another result into the cache and block on the same
--echo ** debug hook.
SET SESSION debug='+d,wait_in_query_cache_insert';
--send SELECT SQL_CACHE * FROM t4 UNION SELECT * FROM t5;
SET DEBUG_SYNC="wait_in_query_cache_insert SIGNAL parked3 WAIT_FOR go3 EXECUTE 1";
--send SELECT SQL_CACHE * FROM t4 UNION SELECT * FROM t5
connection default;
--echo =================================== Connection default
--echo ** Assert that the two SELECT-stmt threads to reach the hook.
LET $wait_condition= SELECT SQL_NO_CACHE COUNT(*)= 2 FROM information_schema.processlist WHERE state='wait_in_query_cache_insert';
--source include/wait_condition.inc
--echo ** Assert that the SELECT-stmt thread reaches the sync point.
SET DEBUG_SYNC="now WAIT_FOR parked3";
--echo **
--echo **
--echo ** Signal the DELETE thread, THD1, to continue. It will enter the mutex
--echo ** lock and set query cache status to TABLE_FLUSH_IN_PROGRESS and then
--echo ** unlock the mutex before stopping on the next debug hook.
SELECT SQL_NO_CACHE id FROM information_schema.processlist WHERE state='wait_in_query_cache_invalidate1' LIMIT 1 INTO @flush_thread_id;
KILL QUERY @flush_thread_id;
SET DEBUG_SYNC="now SIGNAL go1_1";
--echo ** Assert that we reach the next debug hook.
LET $wait_condition= SELECT SQL_NO_CACHE COUNT(*)= 1 FROM information_schema.processlist WHERE state='wait_in_query_cache_invalidate2';
--source include/wait_condition.inc
SET DEBUG_SYNC="now WAIT_FOR parked1_2";
--echo **
--echo ** Signal the remaining debug hooks blocking THD2 and THD3.
@ -206,10 +207,8 @@ LET $wait_condition= SELECT SQL_NO_CACHE COUNT(*)= 1 FROM information_schema.pro
--echo ** and finally release the mutex. The threads will continue to wait
--echo ** until a broadcast signal reaches them causing both threads to
--echo ** come alive and check the condition.
SELECT SQL_NO_CACHE id FROM information_schema.processlist WHERE state='wait_in_query_cache_insert' ORDER BY id ASC LIMIT 1 INTO @thread_id;
KILL QUERY @thread_id;
SELECT SQL_NO_CACHE id FROM information_schema.processlist WHERE state='wait_in_query_cache_insert' ORDER BY id DESC LIMIT 1 INTO @thread_id;
KILL QUERY @thread_id;
SET DEBUG_SYNC="now SIGNAL go2";
SET DEBUG_SYNC="now SIGNAL go3";
--echo **
--echo ** Finally signal the DELETE statement on THD1 one last time.
@ -218,11 +217,7 @@ KILL QUERY @thread_id;
--echo ** One signal will be sent to the thread group waiting for executing
--echo ** invalidations and a broadcast signal will be sent to the thread
--echo ** group holding result set writers.
SELECT SQL_NO_CACHE id FROM information_schema.processlist WHERE state='wait_in_query_cache_invalidate2' LIMIT 1 INTO @flush_thread_id;
KILL QUERY @flush_thread_id;
LET $wait_condition= SELECT SQL_NO_CACHE COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE Id = @flush_thread_id AND Command = 'Sleep';
--source include/wait_condition.inc
SET DEBUG_SYNC="now SIGNAL go1_2";
--echo **
--echo *************************************************************************
@ -250,6 +245,7 @@ connection default;
disconnect thd1;
disconnect thd2;
disconnect thd3;
SET DEBUG_SYNC= 'RESET';
SET GLOBAL query_cache_size= 0;
connection default;
@ -259,4 +255,66 @@ FLUSH STATUS;
DROP TABLE t1,t2,t3,t4,t5;
SET GLOBAL query_cache_size= DEFAULT;
SET GLOBAL query_cache_type= DEFAULT;
exit;
--echo #
--echo # Bug#56822: Add a thread state for sessions waiting on the query cache lock
--echo #
SET @old_query_cache_size= @@GLOBAL.query_cache_size;
--disable_warnings
DROP TABLE IF EXISTS t1;
--enable_warnings
CREATE TABLE t1 (a INT);
INSERT INTO t1 VALUES (1),(2),(3);
SET GLOBAL concurrent_insert= 1;
SET GLOBAL query_cache_size= 1024*512;
SET GLOBAL query_cache_type= ON;
connect(con1,localhost,root,,test,,);
connect(con2,localhost,root,,test,,);
connection con1;
--echo # Switch to connection con1
SET DEBUG_SYNC = "wait_in_query_cache_invalidate2 SIGNAL parked WAIT_FOR go";
--echo # Send INSERT, will wait in the query cache table invalidation
--send INSERT INTO t1 VALUES (4);
connection default;
--echo # Switch to connection default
--echo # Wait for insert to reach the debug point
SET DEBUG_SYNC = "now WAIT_FOR parked";
connection con2;
--echo # Switch to connection con2
--echo # Send a query that should wait on the query cache lock
--send RESET QUERY CACHE
connection default;
--echo # Switch to connection default
--echo # Wait for the state to be reflected in the processlist
let $wait_condition=
SELECT COUNT(*) = 1 FROM information_schema.processlist
WHERE state = "Waiting for query cache lock" AND info = "RESET QUERY CACHE";
--source include/wait_condition.inc
--echo # Signal that the query cache can be unlocked
SET DEBUG_SYNC="now SIGNAL go";
connection con1;
--echo # Reap con1 and disconnect
--reap
disconnect con1;
connection con2;
--echo # Reap con2 and disconnect
--reap
disconnect con2;
connection default;
--echo # Restore defaults
SET DEBUG_SYNC= 'RESET';
RESET QUERY CACHE;
DROP TABLE t1;
SET GLOBAL query_cache_size= DEFAULT;
SET GLOBAL query_cache_type= DEFAULT;

View File

@ -484,6 +484,16 @@ explain select * from t2 where f1=STR_TO_DATE('4/1/2010', '%m/%d/%Y');
DROP TABLE t1,t2;
--echo #
--echo # Bug#57095: Wrongly chosen expression cache type led to a wrong
--echo # result.
--echo #
CREATE TABLE t1 (`b` datetime );
INSERT INTO t1 VALUES ('2010-01-01 00:00:00'), ('2010-01-01 00:00:00');
SELECT * FROM t1 WHERE b <= coalesce(NULL, now());
DROP TABLE t1;
--echo #
--echo #
--echo # End of 5.5 tests
--echo #

View File

@ -1,15 +1,15 @@
/* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License as
published by the Free Software Foundation; version 2 of the
License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
@ -181,7 +181,7 @@ mysql_declare_plugin_end;
To support all this variety, the dialog plugin has a callback function
"authentication_dialog_ask". If the client has a function of this name
dialog plugin will use it for communication with the user. Otherwise
a default gets() based implementation will be used.
a default fgets() based implementation will be used.
*/
/**
@ -208,12 +208,15 @@ static mysql_authentication_dialog_ask_t ask;
static char *builtin_ask(MYSQL *mysql __attribute__((unused)),
int type __attribute__((unused)),
const char *prompt,
char *buf, int buf_len __attribute__((unused)))
char *buf, int buf_len)
{
char *ptr;
fputs(prompt, stdout);
fputc(' ', stdout);
if (gets(buf) == 0)
return 0;
if (fgets(buf, buf_len, stdin) == NULL)
return NULL;
if ((ptr= strchr(buf, '\n')))
*ptr= 0;
return buf;
}

View File

@ -1783,8 +1783,7 @@ bool agg_item_set_converter(DTCollation &coll, const char *fname,
In case we're in statement prepare, create conversion item
in its memory: it will be reused on each execute.
*/
arena= thd->is_stmt_prepare() ? thd->activate_stmt_arena_if_needed(&backup)
: NULL;
arena= thd->activate_stmt_arena_if_needed(&backup);
for (i= 0, arg= args; i < nargs; i++, arg+= item_sep)
{
@ -7356,9 +7355,11 @@ Item_cache* Item_cache::get_cache(const Item *item, const Item_result type)
case DECIMAL_RESULT:
return new Item_cache_decimal();
case STRING_RESULT:
if (item->field_type() == MYSQL_TYPE_DATE ||
item->field_type() == MYSQL_TYPE_DATETIME ||
item->field_type() == MYSQL_TYPE_TIME)
/* Not all functions that return DATE/TIME are actually DATE/TIME funcs. */
if ((item->field_type() == MYSQL_TYPE_DATE ||
item->field_type() == MYSQL_TYPE_DATETIME ||
item->field_type() == MYSQL_TYPE_TIME) &&
(const_cast<Item*>(item))->result_as_longlong())
return new Item_cache_datetime(item->field_type());
return new Item_cache_str(item);
case ROW_RESULT:

View File

@ -2299,7 +2299,8 @@ String *Item_func_format::val_str_ascii(String *str)
if (lc->grouping[0] > 0 &&
str_length >= dec_length + 1 + lc->grouping[0])
{
char buf[DECIMAL_MAX_STR_LENGTH * 2]; /* 2 - in the worst case when grouping=1 */
/* We need space for ',' between each group of digits as well. */
char buf[2 * FLOATING_POINT_BUFFER];
int count;
const char *grouping= lc->grouping;
char sign_length= *str->ptr() == '-' ? 1 : 0;
@ -2323,7 +2324,7 @@ String *Item_func_format::val_str_ascii(String *str)
count will be initialized to -1 and
we'll never get into this "if" anymore.
*/
if (!count)
if (count == 0)
{
*--dst= lc->thousand_sep;
if (grouping[1])

View File

@ -3202,6 +3202,11 @@ static int init_common_variables()
return 1;
set_server_version();
#ifndef EMBEDDED_LIBRARY
if (opt_help && !opt_verbose)
unireg_abort(0);
#endif /*!EMBEDDED_LIBRARY*/
DBUG_PRINT("info",("%s Ver %s for %s on %s\n",my_progname,
server_version, SYSTEM_TYPE,MACHINE_TYPE));
@ -3239,12 +3244,11 @@ static int init_common_variables()
desired page sizes.
*/
int nelem;
int max_desired_page_size;
int max_page_size;
size_t max_desired_page_size;
if (opt_super_large_pages)
max_page_size= SUPER_LARGE_PAGESIZE;
max_desired_page_size= SUPER_LARGE_PAGESIZE;
else
max_page_size= LARGE_PAGESIZE;
max_desired_page_size= LARGE_PAGESIZE;
nelem = getpagesizes(NULL, 0);
if (nelem > 0)
{

View File

@ -57,6 +57,13 @@ struct scheduler_functions
*/
enum scheduler_types
{
/*
The default of --thread-handling is the first one in the
thread_handling_names array, this array has to be consistent with
the order in this array, so to change default one has to change
the first entry in this enum and the first entry in the
thread_handling_names array.
*/
SCHEDULER_ONE_THREAD_PER_CONNECTION=0,
SCHEDULER_NO_THREADS,
SCHEDULER_TYPES_COUNT

View File

@ -6152,7 +6152,7 @@ ER_WARN_ENGINE_TRANSACTION_ROLLBACK
ER_SLAVE_HEARTBEAT_FAILURE
eng "Unexpected master's heartbeat data: %s"
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE
eng "The requested value for the heartbeat period %s %s"
eng "The requested value for the heartbeat period is either negative or exceeds the maximum allowed (%s seconds)."
ER_NDB_REPLICATION_SCHEMA_ERROR
eng "Bad schema for mysql.ndb_replication table. Message: %-.64s"
@ -6382,3 +6382,13 @@ ER_GRANT_PLUGIN_USER_EXISTS
ER_TRUNCATE_ILLEGAL_FK 42000
eng "Cannot truncate a table referenced in a foreign key constraint (%.192s)"
ER_PLUGIN_IS_PERMANENT
eng "Plugin '%s' is force_plus_permanent and can not be unloaded"
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN
eng "The requested value for the heartbeat period is less than 1 millisecond. The value is reset to 0, meaning that heartbeating will effectively be disabled."
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX
eng "The requested value for the heartbeat period exceeds the value of `slave_net_timeout' seconds. A sensible value for the period should be less than the timeout."

View File

@ -334,6 +334,7 @@ TODO list:
#include "tztime.h" // struct Time_zone
#include "sql_acl.h" // SELECT_ACL
#include "sql_base.h" // TMP_TABLE_KEY_EXTRA
#include "debug_sync.h" // DEBUG_SYNC
#ifdef HAVE_QUERY_CACHE
#include <m_ctype.h>
#include <my_dir.h>
@ -371,32 +372,6 @@ TODO list:
__LINE__,(ulong)(B)));B->query()->unlock_reading();}
#define DUMP(C) DBUG_EXECUTE("qcache", {\
(C)->cache_dump(); (C)->queries_dump();(C)->tables_dump();})
/**
Causes the thread to wait in a spin lock for a query kill signal.
This function is used by the test frame work to identify race conditions.
The signal is caught and ignored and the thread is not killed.
*/
static void debug_wait_for_kill(const char *info)
{
DBUG_ENTER("debug_wait_for_kill");
const char *prev_info;
THD *thd;
thd= current_thd;
prev_info= thd->proc_info;
thd->proc_info= info;
sql_print_information("%s", info);
while(!thd->killed)
my_sleep(1000);
thd->killed= THD::NOT_KILLED;
sql_print_information("Exit debug_wait_for_kill");
thd->proc_info= prev_info;
DBUG_VOID_RETURN;
}
#else
#define RW_WLOCK(M) mysql_rwlock_wrlock(M)
#define RW_RLOCK(M) mysql_rwlock_rdlock(M)
@ -408,6 +383,51 @@ static void debug_wait_for_kill(const char *info)
#define DUMP(C)
#endif
/**
Macro that executes the requested action at a synchronization point
only if the thread has a associated THD session.
*/
#if defined(ENABLED_DEBUG_SYNC)
#define QC_DEBUG_SYNC(name) \
do { \
THD *thd= current_thd; \
if (thd) \
DEBUG_SYNC(thd, name); \
} while (0)
#else
#define QC_DEBUG_SYNC(name)
#endif
/**
Thread state to be used when the query cache lock needs to be acquired.
Sets the thread state name in the constructor, resets on destructor.
*/
struct Query_cache_wait_state
{
THD *m_thd;
const char *m_proc_info;
Query_cache_wait_state(THD *thd, const char *func,
const char *file, unsigned int line)
: m_thd(thd)
{
if (m_thd)
m_proc_info= set_thd_proc_info(m_thd,
"Waiting for query cache lock",
func, file, line);
}
~Query_cache_wait_state()
{
if (m_thd)
set_thd_proc_info(m_thd, m_proc_info, NULL, NULL, 0);
}
};
/**
Serialize access to the query cache.
If the lock cannot be granted the thread hangs in a conditional wait which
@ -429,6 +449,8 @@ static void debug_wait_for_kill(const char *info)
bool Query_cache::try_lock(bool use_timeout)
{
bool interrupt= FALSE;
THD *thd= current_thd;
Query_cache_wait_state wait_state(thd, __func__, __FILE__, __LINE__);
DBUG_ENTER("Query_cache::try_lock");
mysql_mutex_lock(&structure_guard_mutex);
@ -438,7 +460,6 @@ bool Query_cache::try_lock(bool use_timeout)
{
m_cache_lock_status= Query_cache::LOCKED;
#ifndef DBUG_OFF
THD *thd= current_thd;
if (thd)
m_cache_lock_thread_id= thd->thread_id;
#endif
@ -497,6 +518,8 @@ bool Query_cache::try_lock(bool use_timeout)
void Query_cache::lock_and_suspend(void)
{
THD *thd= current_thd;
Query_cache_wait_state wait_state(thd, __func__, __FILE__, __LINE__);
DBUG_ENTER("Query_cache::lock_and_suspend");
mysql_mutex_lock(&structure_guard_mutex);
@ -504,7 +527,6 @@ void Query_cache::lock_and_suspend(void)
mysql_cond_wait(&COND_cache_status_changed, &structure_guard_mutex);
m_cache_lock_status= Query_cache::LOCKED_NO_WAIT;
#ifndef DBUG_OFF
THD *thd= current_thd;
if (thd)
m_cache_lock_thread_id= thd->thread_id;
#endif
@ -525,6 +547,8 @@ void Query_cache::lock_and_suspend(void)
void Query_cache::lock(void)
{
THD *thd= current_thd;
Query_cache_wait_state wait_state(thd, __func__, __FILE__, __LINE__);
DBUG_ENTER("Query_cache::lock");
mysql_mutex_lock(&structure_guard_mutex);
@ -532,7 +556,6 @@ void Query_cache::lock(void)
mysql_cond_wait(&COND_cache_status_changed, &structure_guard_mutex);
m_cache_lock_status= Query_cache::LOCKED;
#ifndef DBUG_OFF
THD *thd= current_thd;
if (thd)
m_cache_lock_thread_id= thd->thread_id;
#endif
@ -872,9 +895,7 @@ Query_cache::insert(Query_cache_tls *query_cache_tls,
if (is_disabled() || query_cache_tls->first_query_block == NULL)
DBUG_VOID_RETURN;
DBUG_EXECUTE_IF("wait_in_query_cache_insert",
debug_wait_for_kill("wait_in_query_cache_insert"); );
QC_DEBUG_SYNC("wait_in_query_cache_insert");
if (try_lock())
DBUG_VOID_RETURN;
@ -1779,8 +1800,7 @@ void Query_cache::invalidate(THD *thd, TABLE_LIST *tables_used,
invalidate_table(thd, tables_used);
}
DBUG_EXECUTE_IF("wait_after_query_cache_invalidate",
debug_wait_for_kill("wait_after_query_cache_invalidate"););
DEBUG_SYNC(thd, "wait_after_query_cache_invalidate");
DBUG_VOID_RETURN;
}
@ -1971,8 +1991,7 @@ void Query_cache::flush()
if (is_disabled())
DBUG_VOID_RETURN;
DBUG_EXECUTE_IF("wait_in_query_cache_flush1",
debug_wait_for_kill("wait_in_query_cache_flush1"););
QC_DEBUG_SYNC("wait_in_query_cache_flush1");
lock_and_suspend();
if (query_cache_size > 0)
@ -2312,9 +2331,7 @@ void Query_cache::free_cache()
void Query_cache::flush_cache()
{
DBUG_EXECUTE_IF("wait_in_query_cache_flush2",
debug_wait_for_kill("wait_in_query_cache_flush2"););
QC_DEBUG_SYNC("wait_in_query_cache_flush2");
my_hash_reset(&queries);
while (queries_blocks != 0)
@ -2760,8 +2777,7 @@ void Query_cache::invalidate_table(THD *thd, TABLE *table)
void Query_cache::invalidate_table(THD *thd, uchar * key, uint32 key_length)
{
DBUG_EXECUTE_IF("wait_in_query_cache_invalidate1",
debug_wait_for_kill("wait_in_query_cache_invalidate1"); );
DEBUG_SYNC(thd, "wait_in_query_cache_invalidate1");
/*
Lock the query cache and queue all invalidation attempts to avoid
@ -2769,9 +2785,7 @@ void Query_cache::invalidate_table(THD *thd, uchar * key, uint32 key_length)
*/
lock();
DBUG_EXECUTE_IF("wait_in_query_cache_invalidate2",
debug_wait_for_kill("wait_in_query_cache_invalidate2"); );
DEBUG_SYNC(thd, "wait_in_query_cache_invalidate2");
if (query_cache_size > 0)
invalidate_table_internal(thd, key, key_length);
@ -2821,7 +2835,6 @@ Query_cache::invalidate_query_block_list(THD *thd,
Query_cache_block *query_block= list_root->next->block();
BLOCK_LOCK_WR(query_block);
free_query(query_block);
DBUG_EXECUTE_IF("debug_cache_locks", sleep(10););
}
}

View File

@ -42,9 +42,8 @@ extern struct st_mysql_plugin *mysql_mandatory_plugins[];
@note The order of the enumeration is critical.
@see construct_options
*/
static const char *global_plugin_typelib_names[]=
{ "OFF", "ON", "FORCE", NULL };
enum enum_plugin_load_policy {PLUGIN_OFF, PLUGIN_ON, PLUGIN_FORCE};
const char *global_plugin_typelib_names[]=
{ "OFF", "ON", "FORCE", "FORCE_PLUS_PERMANENT", NULL };
static TYPELIB global_plugin_typelib=
{ array_elements(global_plugin_typelib_names)-1,
"", global_plugin_typelib_names, NULL };
@ -800,6 +799,7 @@ static bool plugin_add(MEM_ROOT *tmp_root,
tmp.name.length= name_len;
tmp.ref_count= 0;
tmp.state= PLUGIN_IS_UNINITIALIZED;
tmp.load_option= PLUGIN_ON;
if (test_plugin_options(tmp_root, &tmp, argc, argv))
tmp.state= PLUGIN_IS_DISABLED;
@ -1241,7 +1241,7 @@ int plugin_init(int *argc, char **argv, int flags)
tmp.name.str= (char *)plugin->name;
tmp.name.length= strlen(plugin->name);
tmp.state= 0;
tmp.is_mandatory= mandatory;
tmp.load_option= mandatory ? PLUGIN_FORCE : PLUGIN_ON;
/*
If the performance schema is compiled in,
@ -1260,7 +1260,7 @@ int plugin_init(int *argc, char **argv, int flags)
to work, by using '--skip-performance-schema' (the plugin)
*/
if (!my_strcasecmp(&my_charset_latin1, plugin->name, "PERFORMANCE_SCHEMA"))
tmp.is_mandatory= true;
tmp.load_option= PLUGIN_FORCE;
free_root(&tmp_root, MYF(MY_MARK_BLOCKS_FREE));
if (test_plugin_options(&tmp_root, &tmp, argc, argv))
@ -1338,7 +1338,8 @@ int plugin_init(int *argc, char **argv, int flags)
while ((plugin_ptr= *(--reap)))
{
mysql_mutex_unlock(&LOCK_plugin);
if (plugin_ptr->is_mandatory)
if (plugin_ptr->load_option == PLUGIN_FORCE ||
plugin_ptr->load_option == PLUGIN_FORCE_PLUS_PERMANENT)
reaped_mandatory_plugin= TRUE;
plugin_deinitialize(plugin_ptr, true);
mysql_mutex_lock(&LOCK_plugin);
@ -1848,6 +1849,11 @@ bool mysql_uninstall_plugin(THD *thd, const LEX_STRING *name)
my_error(ER_SP_DOES_NOT_EXIST, MYF(0), "PLUGIN", name->str);
goto err;
}
if (plugin->load_option == PLUGIN_FORCE_PLUS_PERMANENT)
{
my_error(ER_PLUGIN_IS_PERMANENT, MYF(0), name->str);
goto err;
}
plugin->state= PLUGIN_IS_DELETED;
if (plugin->ref_count)
@ -3058,7 +3064,8 @@ static int construct_options(MEM_ROOT *mem_root, struct st_plugin_int *tmp,
plugin_dash.length + 1);
strxmov(plugin_name_with_prefix_ptr, plugin_dash.str, plugin_name_ptr, NullS);
if (!tmp->is_mandatory)
if (tmp->load_option != PLUGIN_FORCE &&
tmp->load_option != PLUGIN_FORCE_PLUS_PERMANENT)
{
/* support --skip-plugin-foo syntax */
options[0].name= plugin_name_ptr;
@ -3318,7 +3325,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
{
struct sys_var_chain chain= { NULL, NULL };
bool disable_plugin;
enum_plugin_load_policy plugin_load_policy= tmp->is_mandatory ? PLUGIN_FORCE : PLUGIN_ON;
enum_plugin_load_option plugin_load_option= tmp->load_option;
MEM_ROOT *mem_root= alloc_root_inited(&tmp->mem_root) ?
&tmp->mem_root : &plugin_mem_root;
@ -3339,7 +3346,7 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
*/
if (!(my_strcasecmp(&my_charset_latin1, tmp->name.str, "federated") &&
my_strcasecmp(&my_charset_latin1, tmp->name.str, "ndbcluster")))
plugin_load_policy= PLUGIN_OFF;
plugin_load_option= PLUGIN_OFF;
for (opt= tmp->plugin->system_vars; opt && *opt; opt++)
count+= 2; /* --{plugin}-{optname} and --plugin-{plugin}-{optname} */
@ -3363,8 +3370,9 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
We adjust the default value to account for the hardcoded exceptions
we have set for the federated and ndbcluster storage engines.
*/
if (!tmp->is_mandatory)
opts[0].def_value= opts[1].def_value= plugin_load_policy;
if (tmp->load_option != PLUGIN_FORCE &&
tmp->load_option != PLUGIN_FORCE_PLUS_PERMANENT)
opts[0].def_value= opts[1].def_value= plugin_load_option;
error= handle_options(argc, &argv, opts, NULL);
(*argc)++; /* add back one for the program name */
@ -3379,12 +3387,13 @@ static int test_plugin_options(MEM_ROOT *tmp_root, struct st_plugin_int *tmp,
Set plugin loading policy from option value. First element in the option
list is always the <plugin name> option value.
*/
if (!tmp->is_mandatory)
plugin_load_policy= (enum_plugin_load_policy)*(ulong*)opts[0].value;
if (tmp->load_option != PLUGIN_FORCE &&
tmp->load_option != PLUGIN_FORCE_PLUS_PERMANENT)
plugin_load_option= (enum_plugin_load_option) *(ulong*) opts[0].value;
}
disable_plugin= (plugin_load_policy == PLUGIN_OFF);
tmp->is_mandatory= (plugin_load_policy == PLUGIN_FORCE);
disable_plugin= (plugin_load_option == PLUGIN_OFF);
tmp->load_option= plugin_load_option;
/*
If the plugin is disabled it should not be initialized.

View File

@ -32,6 +32,9 @@
class sys_var;
enum SHOW_COMP_OPTION { SHOW_OPTION_YES, SHOW_OPTION_NO, SHOW_OPTION_DISABLED};
enum enum_plugin_load_option { PLUGIN_OFF, PLUGIN_ON, PLUGIN_FORCE,
PLUGIN_FORCE_PLUS_PERMANENT };
extern const char *global_plugin_typelib_names[];
#include <my_sys.h>
@ -95,7 +98,7 @@ struct st_plugin_int
void *data; /* plugin type specific, e.g. handlerton */
MEM_ROOT mem_root; /* memory for dynamic plugin structures */
sys_var *system_vars; /* server variables for this plugin */
bool is_mandatory; /* If true then plugin must not fail to load */
enum enum_plugin_load_option load_option; /* OFF, ON, FORCE, F+PERMANENT */
};
@ -110,6 +113,7 @@ typedef struct st_plugin_int *plugin_ref;
#define plugin_data(pi,cast) ((cast)((pi)->data))
#define plugin_name(pi) (&((pi)->name))
#define plugin_state(pi) ((pi)->state)
#define plugin_load_option(pi) ((pi)->load_option)
#define plugin_equals(p1,p2) ((p1) == (p2))
#else
typedef struct st_plugin_int **plugin_ref;
@ -118,6 +122,7 @@ typedef struct st_plugin_int **plugin_ref;
#define plugin_data(pi,cast) ((cast)((pi)[0]->data))
#define plugin_name(pi) (&((pi)[0]->name))
#define plugin_state(pi) ((pi)[0]->state)
#define plugin_load_option(pi) ((pi)[0]->load_option)
#define plugin_equals(p1,p2) ((p1) && (p2) && (p1)[0] == (p2)[0])
#endif

View File

@ -211,6 +211,11 @@ static my_bool show_plugins(THD *thd, plugin_ref plugin,
}
table->field[9]->set_notnull();
table->field[10]->store(
global_plugin_typelib_names[plugin_load_option(plugin)],
strlen(global_plugin_typelib_names[plugin_load_option(plugin)]),
cs);
return schema_table_store_record(thd, table);
}
@ -7214,6 +7219,7 @@ ST_FIELD_INFO plugin_fields_info[]=
{"PLUGIN_AUTHOR", NAME_CHAR_LEN, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
{"PLUGIN_DESCRIPTION", 65535, MYSQL_TYPE_STRING, 0, 1, 0, SKIP_OPEN_TABLE},
{"PLUGIN_LICENSE", 80, MYSQL_TYPE_STRING, 0, 1, "License", SKIP_OPEN_TABLE},
{"LOAD_OPTION", 64, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE},
{0, 0, MYSQL_TYPE_STRING, 0, 0, 0, SKIP_OPEN_TABLE}
};

View File

@ -1933,35 +1933,28 @@ master_def:
| MASTER_HEARTBEAT_PERIOD_SYM EQ NUM_literal
{
Lex->mi.heartbeat_period= (float) $3->val_real();
if (Lex->mi.heartbeat_period > SLAVE_MAX_HEARTBEAT_PERIOD ||
Lex->mi.heartbeat_period < 0.0)
{
const char format[]= "%d seconds";
char buf[4*sizeof(SLAVE_MAX_HEARTBEAT_PERIOD) + sizeof(format)];
sprintf(buf, format, SLAVE_MAX_HEARTBEAT_PERIOD);
my_error(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
MYF(0), " is negative or exceeds the maximum ", buf);
MYSQL_YYABORT;
if (Lex->mi.heartbeat_period > SLAVE_MAX_HEARTBEAT_PERIOD ||
Lex->mi.heartbeat_period < 0.0)
{
const char format[]= "%d";
char buf[4*sizeof(SLAVE_MAX_HEARTBEAT_PERIOD) + sizeof(format)];
sprintf(buf, format, SLAVE_MAX_HEARTBEAT_PERIOD);
my_error(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE, MYF(0), buf);
MYSQL_YYABORT;
}
if (Lex->mi.heartbeat_period > slave_net_timeout)
{
push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE),
" exceeds the value of `slave_net_timeout' sec.",
" A sensible value for the period should be"
" less than the timeout.");
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX,
ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX));
}
if (Lex->mi.heartbeat_period < 0.001)
{
if (Lex->mi.heartbeat_period != 0.0)
{
push_warning_printf(YYTHD, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE),
" is less than 1 msec.",
" The period is reset to zero which means"
" no heartbeats will be sending");
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN,
ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MIN));
Lex->mi.heartbeat_period= 0.0;
}
Lex->mi.heartbeat_opt= LEX_MASTER_INFO::LEX_MI_DISABLE;

View File

@ -2014,15 +2014,6 @@ static Sys_var_ulong Sys_thread_cache_size(
GLOBAL_VAR(thread_cache_size), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(0, 16384), DEFAULT(0), BLOCK_SIZE(1));
#if HAVE_POOL_OF_THREADS == 1
static Sys_var_ulong Sys_thread_pool_size(
"thread_pool_size",
"How many threads we should create to handle query requests in "
"case of 'thread_handling=pool-of-threads'",
GLOBAL_VAR(thread_pool_size), CMD_LINE(REQUIRED_ARG),
VALID_RANGE(1, 16384), DEFAULT(20), BLOCK_SIZE(0));
#endif
/**
Can't change the 'next' tx_isolation if we are already in a
transaction.
@ -2943,11 +2934,8 @@ static bool fix_slave_net_timeout(sys_var *self, THD *thd, enum_var_type type)
(active_mi? active_mi->heartbeat_period : 0.0)));
if (active_mi && slave_net_timeout < active_mi->heartbeat_period)
push_warning_printf(thd, MYSQL_ERROR::WARN_LEVEL_WARN,
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE,
"The current value for master_heartbeat_period"
" exceeds the new value of `slave_net_timeout' sec."
" A sensible value for the period should be"
" less than the timeout.");
ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX,
ER(ER_SLAVE_HEARTBEAT_VALUE_OUT_OF_RANGE_MAX));
mysql_mutex_unlock(&LOCK_active_mi);
return false;
}

View File

@ -458,12 +458,10 @@ class Sys_var_proxy_user: public sys_var
public:
Sys_var_proxy_user(const char *name_arg,
const char *comment, enum charset_enum is_os_charset_arg)
: sys_var(&all_sys_vars, name_arg, comment,
: sys_var(&all_sys_vars, name_arg, comment,
sys_var::READONLY+sys_var::ONLY_SESSION, 0, -1,
NO_ARG, SHOW_CHAR, (intptr)NULL,
0, VARIABLE_NOT_IN_BINLOG,
0, 0,
0, 0, PARSE_NORMAL)
NO_ARG, SHOW_CHAR, 0, NULL, VARIABLE_NOT_IN_BINLOG,
NULL, NULL, 0, NULL, PARSE_NORMAL)
{
is_os_charset= is_os_charset_arg == IN_FS_CHARSET;
option.var_type= GET_STR;

View File

@ -43,7 +43,7 @@ typedef enum {
IBUF_OP_DELETE = 2,
/* Number of different operation types. */
IBUF_OP_COUNT = 3,
IBUF_OP_COUNT = 3
} ibuf_op_t;
/** Combinations of operations that can be buffered. Because the enum

View File

@ -267,7 +267,7 @@ enum row_search_result {
secondary index leaf page was not in
the buffer pool, and the operation was
enqueued in the insert/delete buffer */
ROW_NOT_DELETED_REF, /*!< BTR_DELETE was specified, and
ROW_NOT_DELETED_REF /*!< BTR_DELETE was specified, and
row_purge_poss_sec() failed */
};

View File

@ -513,7 +513,7 @@ start:
arg_count= max(arg_count, arg_index);
goto start;
}
DBUG_ASSERT(0);
return 0;
}