1
0
mirror of https://github.com/MariaDB/server.git synced 2025-05-28 13:01:41 +03:00
mariadb/mysql-test/suite/innodb/r/innodb_bug84958.result
Aleksey Midenkov 2347ffd843 MDEV-20301 InnoDB's MVCC has O(N^2) behaviors
If there're multiple row versions in InnoDB, reading one row from PK
may have O(N) complexity and reading from secondary keys may have
O(N^2) complexity.

The problem occurs when there are many pending versions of the same
row, meaning that the primary key is the same, but a secondary key is
different.  The slowdown occurs when the secondary index is
traversed. This patch creates a helper class for the function
row_sel_get_clust_rec_for_mysql() which can remember and re-use
cached_clust_rec & cached_old_vers so that rec_get_offsets() does not
need to be called over and over for the clustered record.

Corrections by Kevin Lewis <kevin.lewis@oracle.com>

MDEV-20341 Unstable innodb.innodb_bug14704286

Removed test that tested the ability of interrupting long query which
is not long anymore.
2019-08-14 19:10:17 +03:00

82 lines
1.8 KiB
Plaintext

#
# Bug #84958 InnoDB's MVCC has O(N^2) behaviors
# https://bugs.mysql.com/bug.php?id=84958
#
# Set up the test with a procedure and a function.
#
CREATE PROCEDURE insert_n(start int, end int)
BEGIN
DECLARE i INT DEFAULT start;
WHILE i <= end do
INSERT INTO t1 VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = i;
SET i = i + 1;
END WHILE;
END~~
CREATE FUNCTION num_pages_get()
RETURNS INT
BEGIN
DECLARE ret INT;
SELECT variable_value INTO ret
FROM information_schema.global_status
WHERE variable_name = 'innodb_buffer_pool_read_requests';
RETURN ret;
END~~
#
# Create a table with one record in it and start an RR transaction
#
CREATE TABLE t1 (a INT, b INT, c INT, PRIMARY KEY(a,b), KEY (b,c))
ENGINE=InnoDB;
BEGIN;
SELECT * FROM t1;
a b c
#
# Create 100 newer record versions in con2 and con3
#
connect con2, localhost, root,,;
connection con2;
INSERT INTO t1 VALUES (1, 2, 3) ON DUPLICATE KEY UPDATE c = NULL;
CALL insert_n(1, 50);;
connect con3, localhost, root,,;
connection con3;
CALL insert_n(51, 100);;
connection con2;
connection con3;
INSERT INTO t1 VALUES (1, 2, 1) ON DUPLICATE KEY UPDATE c = NULL;
connection default;
#
# Connect to default and record how many pages were accessed
# when selecting the record using the secondary key.
#
SET @num_pages_1 = num_pages_get();
SELECT * FROM t1 force index (b);
a b c
SET @num_pages_2= num_pages_get();
SELECT @num_pages_2 - @num_pages_1 < 500;
@num_pages_2 - @num_pages_1 < 500
1
#
# Commit and show the final record.
#
SELECT * FROM t1;
a b c
SELECT * FROM t1 force index (b);
a b c
COMMIT;
SELECT * FROM t1 force index (b);
a b c
1 2 NULL
SELECT * FROM t1;
a b c
1 2 NULL
CHECK TABLE t1;
Table Op Msg_type Msg_text
test.t1 check status OK
#
# Cleanup
#
disconnect con2;
disconnect con3;
DROP TABLE t1;
DROP PROCEDURE insert_n;
DROP FUNCTION num_pages_get;