mirror of
https://github.com/MariaDB/server.git
synced 2025-08-07 00:04:31 +03:00
Remove one of the major sources of race condiitons in mariadb-test. Normally, mariadb_close() sends COM_QUIT to the server and immediately disconnects. In mariadb-test it means the test can switch to another connection and sends queries to the server before the server even started parsing the COM_QUIT packet and these queries can see the connection as fully active, as it didn't reach dispatch_command yet. This is a major source of instability in tests and many - but not all, still less than a half - tests employ workarounds. The correct one is a pair count_sessions.inc/wait_until_count_sessions.inc. Also very popular was wait_until_disconnected.inc, which was completely useless, because it verifies that the connection is closed, and after disconnect it always is, it didn't verify whether the server processed COM_QUIT. Sadly the placebo was as widely used as the real thing. Let's fix this by making mariadb-test `disconnect` command _to wait_ for the server to confirm. This makes almost all workarounds redundant. In some cases count_sessions.inc/wait_until_count_sessions.inc is still needed, though, as only `disconnect` command is changed: * after external tools, like `exec $MYSQL` * after failed `connect` command * replication, after `STOP SLAVE` * Federated/CONNECT/SPIDER/etc after `DROP TABLE` and also in some XA tests, because an XA transaction is dissociated from the THD very late, after the server has closed the client connection. Collateral cleanups: fix comments, remove some redundant statements: * DROP IF EXISTS if nothing is known to exist * DROP table/view before DROP DATABASE * REVOKE privileges before DROP USER etc
177 lines
5.6 KiB
PHP
177 lines
5.6 KiB
PHP
--echo #
|
|
--echo # Testing savepoints with handlers that supports it
|
|
--echo #
|
|
connect(con1, localhost, root,,);
|
|
connect(con2, localhost, root,,);
|
|
connect(con3, localhost, root,,);
|
|
connection default;
|
|
--echo #
|
|
--echo # ROLLBACK TO SAVEPOINT releases transactional locks,
|
|
--echo # but has no effect on open HANDLERs
|
|
--echo #
|
|
create table t1 (a int, key a (a));
|
|
insert into t1 (a) values (1), (2), (3), (4), (5);
|
|
create table t2 like t1;
|
|
create table t3 like t1;
|
|
begin;
|
|
--echo # Have something before the savepoint
|
|
select * from t3;
|
|
savepoint sv;
|
|
handler t1 open;
|
|
handler t1 read a first;
|
|
handler t1 read a next;
|
|
select * from t2;
|
|
connection con1;
|
|
--echo # Sending:
|
|
--send drop table t1
|
|
connection con2;
|
|
--echo # Sending:
|
|
--send drop table t2
|
|
connection default;
|
|
--echo # Let DROP TABLE statements sync in. We must use
|
|
--echo # a separate connection for that, because otherwise SELECT
|
|
--echo # will auto-close the HANDLERs, becaues there are pending
|
|
--echo # exclusive locks against them.
|
|
connection con3;
|
|
--echo # Waiting for 'drop table t1' to get blocked...
|
|
let $wait_condition=select count(*)=1 from information_schema.processlist
|
|
where state='Waiting for table metadata lock' and
|
|
info='drop table t1';
|
|
--source include/wait_condition.inc
|
|
--echo # Waiting for 'drop table t2' to get blocked...
|
|
let $wait_condition=select count(*)=1 from information_schema.processlist
|
|
where state='Waiting for table metadata lock' and
|
|
info='drop table t2';
|
|
--source include/wait_condition.inc
|
|
--echo # Demonstrate that t2 lock was released and t2 was dropped
|
|
--echo # after ROLLBACK TO SAVEPOINT
|
|
connection default;
|
|
rollback to savepoint sv;
|
|
connection con2;
|
|
--echo # Reaping 'drop table t2'...
|
|
--reap
|
|
--echo # Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler
|
|
--echo # lock.
|
|
connection default;
|
|
handler t1 read a next;
|
|
handler t1 read a next;
|
|
--echo # Demonstrate that the drop will go through as soon as we close the
|
|
--echo # HANDLER
|
|
handler t1 close;
|
|
connection con1;
|
|
--echo # Reaping 'drop table t1'...
|
|
--reap
|
|
connection default;
|
|
commit;
|
|
drop table t3;
|
|
--echo #
|
|
--echo # A few special cases when using SAVEPOINT/ROLLBACK TO
|
|
--echo # SAVEPOINT and HANDLER.
|
|
--echo #
|
|
--echo # Show that rollback to the savepoint taken in the beginning
|
|
--echo # of the transaction doesn't release mdl lock on
|
|
--echo # the HANDLER that was opened later.
|
|
--echo #
|
|
create table t1 (a int, key using btree (a));
|
|
insert into t1 (a) values (1), (2), (3), (4), (5);
|
|
create table t2 like t1;
|
|
begin;
|
|
savepoint sv;
|
|
handler t1 open;
|
|
handler t1 read a first;
|
|
handler t1 read a next;
|
|
select * from t2;
|
|
connection con1;
|
|
--echo # Sending:
|
|
--send drop table t1
|
|
connection con2;
|
|
--echo # Sending:
|
|
--send drop table t2
|
|
connection default;
|
|
--echo # Let DROP TABLE statements sync in. We must use
|
|
--echo # a separate connection for that, because otherwise SELECT
|
|
--echo # will auto-close the HANDLERs, becaues there are pending
|
|
--echo # exclusive locks against them.
|
|
connection con3;
|
|
--echo # Waiting for 'drop table t1' to get blocked...
|
|
let $wait_condition=select count(*)=1 from information_schema.processlist
|
|
where state='Waiting for table metadata lock' and
|
|
info='drop table t1';
|
|
--source include/wait_condition.inc
|
|
--echo # Waiting for 'drop table t2' to get blocked...
|
|
let $wait_condition=select count(*)=1 from information_schema.processlist
|
|
where state='Waiting for table metadata lock' and
|
|
info='drop table t2';
|
|
--source include/wait_condition.inc
|
|
--echo # Demonstrate that t2 lock was released and t2 was dropped
|
|
--echo # after ROLLBACK TO SAVEPOINT
|
|
connection default;
|
|
rollback to savepoint sv;
|
|
connection con2;
|
|
--echo # Reaping 'drop table t2'...
|
|
--reap
|
|
--echo # Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler
|
|
--echo # lock.
|
|
connection default;
|
|
handler t1 read a next;
|
|
handler t1 read a next;
|
|
--echo # Demonstrate that the drop will go through as soon as we close the
|
|
--echo # HANDLER
|
|
handler t1 close;
|
|
connection con1;
|
|
--echo # Reaping 'drop table t1'...
|
|
--reap
|
|
connection default;
|
|
commit;
|
|
--echo #
|
|
--echo # Show that rollback to the savepoint taken in the beginning
|
|
--echo # of the transaction works properly (no valgrind warnins, etc),
|
|
--echo # even though it's done after the HANDLER mdl lock that was there
|
|
--echo # at the beginning is released and added again.
|
|
--echo #
|
|
create table t1 (a int, key using btree (a));
|
|
insert into t1 (a) values (1), (2), (3), (4), (5);
|
|
create table t2 like t1;
|
|
create table t3 like t1;
|
|
insert into t3 (a) select a from t1;
|
|
begin;
|
|
handler t1 open;
|
|
savepoint sv;
|
|
handler t1 read a first;
|
|
select * from t2;
|
|
handler t1 close;
|
|
handler t3 open;
|
|
handler t3 read a first;
|
|
rollback to savepoint sv;
|
|
connection con1;
|
|
drop table t1, t2;
|
|
--echo # Sending:
|
|
--send drop table t3
|
|
--echo # Let DROP TABLE statement sync in.
|
|
connection con2;
|
|
--echo # Waiting for 'drop table t3' to get blocked...
|
|
let $wait_condition=select count(*)=1 from information_schema.processlist
|
|
where state='Waiting for table metadata lock' and
|
|
info='drop table t3';
|
|
--source include/wait_condition.inc
|
|
--echo # Demonstrate that ROLLBACK TO SAVEPOINT didn't release the handler
|
|
--echo # lock.
|
|
connection default;
|
|
handler t3 read a next;
|
|
--echo # Demonstrate that the drop will go through as soon as we close the
|
|
--echo # HANDLER
|
|
handler t3 close;
|
|
connection con1;
|
|
--echo # Reaping 'drop table t3'...
|
|
--reap
|
|
connection default;
|
|
commit;
|
|
|
|
--echo #
|
|
--echo # Cleanup for savepoint.inc
|
|
--echo #
|
|
disconnect con1;
|
|
disconnect con2;
|
|
disconnect con3;
|
|
connection default;
|