mirror of
https://github.com/MariaDB/server.git
synced 2025-08-01 03:47:19 +03:00
Remove one of the major sources of race condiitons in mariadb-test. Normally, mariadb_close() sends COM_QUIT to the server and immediately disconnects. In mariadb-test it means the test can switch to another connection and sends queries to the server before the server even started parsing the COM_QUIT packet and these queries can see the connection as fully active, as it didn't reach dispatch_command yet. This is a major source of instability in tests and many - but not all, still less than a half - tests employ workarounds. The correct one is a pair count_sessions.inc/wait_until_count_sessions.inc. Also very popular was wait_until_disconnected.inc, which was completely useless, because it verifies that the connection is closed, and after disconnect it always is, it didn't verify whether the server processed COM_QUIT. Sadly the placebo was as widely used as the real thing. Let's fix this by making mariadb-test `disconnect` command _to wait_ for the server to confirm. This makes almost all workarounds redundant. In some cases count_sessions.inc/wait_until_count_sessions.inc is still needed, though, as only `disconnect` command is changed: * after external tools, like `exec $MYSQL` * after failed `connect` command * replication, after `STOP SLAVE` * Federated/CONNECT/SPIDER/etc after `DROP TABLE` and also in some XA tests, because an XA transaction is dissociated from the THD very late, after the server has closed the client connection. Collateral cleanups: fix comments, remove some redundant statements: * DROP IF EXISTS if nothing is known to exist * DROP table/view before DROP DATABASE * REVOKE privileges before DROP USER etc
59 lines
1.9 KiB
Plaintext
59 lines
1.9 KiB
Plaintext
--source include/have_innodb.inc
|
|
|
|
CREATE TABLE `t`(`id` INT, PRIMARY KEY(`id`)) ENGINE=InnoDB STATS_PERSISTENT=0;
|
|
|
|
INSERT INTO t VALUES (1);
|
|
|
|
SET GLOBAL innodb_monitor_disable="lock_row_lock_time";
|
|
SET GLOBAL innodb_monitor_disable="lock_row_lock_time_max";
|
|
SET GLOBAL innodb_monitor_reset_all='lock_row_lock_time';
|
|
SET GLOBAL innodb_monitor_reset_all='lock_row_lock_time_max';
|
|
SET GLOBAL innodb_monitor_enable="lock_row_lock_time";
|
|
SET GLOBAL innodb_monitor_enable="lock_row_lock_time_max";
|
|
|
|
BEGIN;
|
|
SELECT * FROM t FOR UPDATE;
|
|
|
|
# We can't predict (innodb/lock)_row_lock_time_avg value, because it's counted
|
|
# as the whole waiting time divided by the amount of waits. The
|
|
# corresponding counters in lock_sys can't be reset with any query.
|
|
|
|
--disable_result_log
|
|
SELECT @innodb_row_lock_time_before := variable_value
|
|
FROM information_schema.global_status
|
|
WHERE LOWER(variable_name) = 'innodb_row_lock_time';
|
|
--enable_result_log
|
|
|
|
--connect(con1,localhost,root,,)
|
|
SET innodb_lock_wait_timeout = 1;
|
|
--error ER_LOCK_WAIT_TIMEOUT
|
|
SELECT * FROM t FOR UPDATE;
|
|
--disconnect con1
|
|
|
|
--connection default
|
|
COMMIT;
|
|
|
|
SELECT variable_value - @innodb_row_lock_time_before > 100
|
|
FROM information_schema.global_status
|
|
WHERE LOWER(variable_name) = 'innodb_row_lock_time';
|
|
# We can't use 'variable_value - @innodb_row_lock_time_max_before' trick for
|
|
# innodb_row_lock_time_max, because we can't reset it, and we don't know the
|
|
# initial value at the moment of the test execution.
|
|
SELECT variable_value > 100
|
|
FROM information_schema.global_status
|
|
WHERE LOWER(variable_name) = 'innodb_row_lock_time_max';
|
|
SELECT count_reset > 100
|
|
FROM INFORMATION_SCHEMA.INNODB_METRICS
|
|
WHERE NAME='lock_row_lock_time';
|
|
SELECT count_reset > 100
|
|
FROM INFORMATION_SCHEMA.INNODB_METRICS
|
|
WHERE NAME='lock_row_lock_time_max';
|
|
|
|
DROP TABLE t;
|
|
|
|
--disable_warnings
|
|
SET GLOBAL innodb_monitor_enable=default;
|
|
SET GLOBAL innodb_monitor_disable=default;
|
|
SET GLOBAL innodb_monitor_reset_all=default;
|
|
--enable_warnings
|