mirror of
https://github.com/MariaDB/server.git
synced 2025-07-26 07:02:12 +03:00
Added argument to maria_end_bulk_insert() to know if the table will be deleted after the operation Fixed wrong call to strmake Don't call bulk insert in case of inserting only one row (speed optimization as starting/stopping bulk insert Allow storing year 2155 in year field When running with purify/valgrind avoid copying structures over themself Added hook 'trnnam_end_trans_hook' that is called when transaction ends Added trn->used_tables that is used to an entry for all tables used by transaction Fixed that ndb doesn't crash on duplicate key error when start_bulk_insert/end_bulk_insert are not called include/maria.h: Added argument to maria_end_bulk_insert() to know if the table will be deleted after the operation include/my_tree.h: Added macro 'reset_free_element()' to be able to ignore calls to the external free function. Is used to optimize end-bulk-insert in case of failures, in which case we don't want write the remaining keys in the tree mysql-test/install_test_db.sh: Upgrade to new mysql_install_db options mysql-test/r/maria-mvcc.result: New tests mysql-test/r/maria.result: New tests mysql-test/suite/ndb/r/ndb_auto_increment.result: Fixed error message now when bulk insert is not always called mysql-test/suite/ndb/t/ndb_auto_increment.test: Fixed error message now when bulk insert is not always called mysql-test/t/maria-mvcc.test: Added testing of versioning of count(*) mysql-test/t/maria-page-checksum.test: Added comment mysql-test/t/maria.test: More tests mysys/hash.c: Code style change sql/field.cc: Allow storing year 2155 in year field sql/ha_ndbcluster.cc: Added new argument to end_bulk_insert() to signal if the bulk insert should ignored sql/ha_ndbcluster.h: Added new argument to end_bulk_insert() to signal if the bulk insert should ignored sql/ha_partition.cc: Added new argument to end_bulk_insert() to signal if the bulk insert should ignored sql/ha_partition.h: Added new argument to end_bulk_insert() to signal if the bulk insert should ignored sql/handler.cc: Don't call get_dup_key() if there is no table object. This can happen if the handler generates a duplicate key error on commit sql/handler.h: Added new argument to end_bulk_insert() to signal if the bulk insert should ignored (ie, the table will be deleted) sql/item.cc: Style fix Removed compiler warning sql/log_event.cc: Added new argument to ha_end_bulk_insert() sql/log_event_old.cc: Added new argument to ha_end_bulk_insert() sql/mysqld.cc: Removed compiler warning sql/protocol.cc: Added DBUG sql/sql_class.cc: Added DBUG Fixed wrong call to strmake sql/sql_insert.cc: Don't call bulk insert in case of inserting only one row (speed optimization as starting/stopping bulk insert involves a lot of if's) Added new argument to ha_end_bulk_insert() sql/sql_load.cc: Added new argument to ha_end_bulk_insert() sql/sql_parse.cc: Style fixes Avoid goto in common senario sql/sql_select.cc: When running with purify/valgrind avoid copying structures over themself. This is not a real bug in itself, but it's a waste of cycles and causes valgrind warnings sql/sql_select.h: Avoid copying structures over themself. This is not a real bug in itself, but it's a waste of cycles and causes valgrind warnings sql/sql_table.cc: Call HA_EXTRA_PREPARE_FOR_DROP if table created by ALTER TABLE is going to be dropped Added new argument to ha_end_bulk_insert() storage/archive/ha_archive.cc: Added new argument to end_bulk_insert() storage/archive/ha_archive.h: Added new argument to end_bulk_insert() storage/federated/ha_federated.cc: Added new argument to end_bulk_insert() storage/federated/ha_federated.h: Added new argument to end_bulk_insert() storage/maria/Makefile.am: Added ma_state.c and ma_state.h storage/maria/ha_maria.cc: Versioning of count(*) and checksum - share->state.state is now assumed to be correct, not handler->state - Call _ma_setup_live_state() in external lock to get count(*)/checksum versioning. In case of not versioned and not concurrent insertable table, file->s->state.state contains the correct state information Other things: - file->s -> share - Added DBUG_ASSERT() for unlikely case - Optimized end_bulk_insert() to not write anything if table is going to be deleted (as in failed alter table) - Indentation changes in external_lock becasue of removed 'goto' caused a big conflict even if very little was changed storage/maria/ha_maria.h: New argument to end_bulk_insert() storage/maria/ma_blockrec.c: Update for versioning of count(*) and checksum Keep share->state.state.data_file_length up to date (not info->state->data_file_length) Moved _ma_block_xxxx_status() and maria_versioning() functions to ma_state.c storage/maria/ma_check.c: Update and use share->state.state instead of info->state info->s to share Update info->state at end of repair Call _ma_reset_state() to update share->state_history at end of repair storage/maria/ma_checkpoint.c: Call _ma_remove_not_visible_states() on checkpoint to clean up not visible state history from tables storage/maria/ma_close.c: Remember state history for running transaction even if table is closed storage/maria/ma_commit.c: Ensure we always call trnman_commit_trn() even if other calls fails. If we don't do that, the translog and state structures will not be freed storage/maria/ma_delete.c: Versioning of count(*) and checksum: - Always update info->state->checksum and info->state->records storage/maria/ma_delete_all.c: Versioning of count(*) and checksum: - Ensure that share->state.state is updated, as here is where we store the primary information storage/maria/ma_dynrec.c: Use lock_key_trees instead of concurrent_insert to check if trees should be locked. This allows us to lock trees both for concurrent_insert and for index versioning. storage/maria/ma_extra.c: Versioning of count(*) and checksum: - Use share->state.state instead of info->state - share->concurrent_insert -> share->non_transactional_concurrent_insert - Don't update share->state.state from info->state if transactional table Optimization: - Don't flush io_cache or bitmap if we are using FLUSH_IGNORE_CHANGED storage/maria/ma_info.c: Get most state information from current state storage/maria/ma_init.c: Add hash table and free function to store states for closed tables Install hook for transaction commit/rollback to update history state storage/maria/ma_key_recover.c: Versioning of count(*) and checksum: - Use share->state.state instead of info->state storage/maria/ma_locking.c: Versioning of count(*) and checksum: - Call virtual functions (if exists) to restore/update status - Move _ma_xxx_status() functions to ma_state.c info->s -> share storage/maria/ma_open.c: Versioning of count(*) and checksum: - For not transactional tables, set info->state to point to new allocated state structure. - Initialize new info->state_start variable that points to state at start of transaction - Copy old history states from hash table (maria_stored_states) first time the table is opened - Split flag share->concurrent_insert to non_transactional_concurrent_insert & lock_key_tree - For now, only enable versioning of tables without keys (to be fixed in soon!) - Added new virtual function to restore status in maria_lock_database) More DBUG storage/maria/ma_page.c: Versioning of count(*) and checksum: - Use share->state.state instead of info->state - Modify share->state.state.key_file_length under share->intern_lock storage/maria/ma_range.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees info->s -> share storage/maria/ma_recovery.c: Versioning of count(*) and checksum: - Use share->state.state instead of info->state - Update state information on close and when reenabling logging storage/maria/ma_rkey.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees storage/maria/ma_rnext.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees storage/maria/ma_rnext_same.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees - Only skip rows based on file length if non_transactional_concurrent_insert is set storage/maria/ma_rprev.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees storage/maria/ma_rsame.c: Versioning of count(*) and checksum: - Lock trees based on share->lock_key_trees storage/maria/ma_sort.c: Use share->state.state instead of info->state Fixed indentation storage/maria/ma_static.c: Added maria_stored_state storage/maria/ma_update.c: Versioning of count(*) and checksum: - Always update info->state->checksum and info->state->records - Remove optimization for index file update as it doesn't work for transactional tables storage/maria/ma_write.c: Versioning of count(*) and checksum: - Always update info->state->checksum and info->state->records storage/maria/maria_def.h: Move MARIA_STATUS_INFO to ma_state.h Changes to MARIA_SHARE: - Added state_history to store count(*)/checksum states - Added in_trans as counter if table is used by running transactions - Split concurrent_insert into lock_key_trees and on_transactional_concurrent_insert. - Added virtual function lock_restore_status Changes to MARIA_HA: - save_state -> state_save - Added state_start to store state at start of transaction storage/maria/maria_pack.c: Versioning of count(*) and checksum: - Use share->state.state instead of info->state Indentation fixes storage/maria/trnman.c: Added hook 'trnnam_end_trans_hook' that is called when transaction ends Added trn->used_tables that is used to an entry for all tables used by transaction More DBUG Changed return type of trnman_end_trn() to my_bool Added trnman_get_min_trid() to get minimum trid in use. Added trnman_exists_active_transactions() to check if there exist a running transaction started between two commit id storage/maria/trnman.h: Added 'used_tables' Moved all pointers into same groups to get better memory alignment storage/maria/trnman_public.h: Added prototypes for new functions and variables Chagned return type of trnman_end_trn() to my_bool storage/myisam/ha_myisam.cc: Added argument to end_bulk_insert() if operation should be aborted storage/myisam/ha_myisam.h: Added argument to end_bulk_insert() if operation should be aborted storage/maria/ma_state.c: Functions to handle state of count(*) and checksum storage/maria/ma_state.h: Structures and declarations to handle state of count(*) and checksum
294 lines
9.0 KiB
Plaintext
294 lines
9.0 KiB
Plaintext
-- source include/have_multi_ndb.inc
|
|
-- source include/not_embedded.inc
|
|
|
|
--disable_warnings
|
|
connection server1;
|
|
DROP TABLE IF EXISTS t1,t2;
|
|
connection server2;
|
|
DROP TABLE IF EXISTS t1;
|
|
connection server1;
|
|
--enable_warnings
|
|
|
|
set @old_auto_increment_offset = @@session.auto_increment_offset;
|
|
set @old_auto_increment_increment = @@session.auto_increment_increment;
|
|
set @old_ndb_autoincrement_prefetch_sz = @@session.ndb_autoincrement_prefetch_sz;
|
|
|
|
flush status;
|
|
|
|
create table t1 (a int not null auto_increment primary key) engine ndb;
|
|
|
|
# Step 1: Verify simple insert
|
|
insert into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
# Step 2: Verify simple update with higher than highest value causes
|
|
# next insert to use updated_value + 1
|
|
update t1 set a = 5 where a = 1;
|
|
insert into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
# Step 3: Verify insert that inserts higher than highest value causes
|
|
# next insert to use inserted_value + 1
|
|
insert into t1 values (7);
|
|
insert into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
# Step 4: Verify that insert into hole, lower than highest value doesn't
|
|
# affect next insert
|
|
insert into t1 values (2);
|
|
insert into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
# Step 5: Verify that update into hole, lower than highest value doesn't
|
|
# affect next insert
|
|
update t1 set a = 4 where a = 2;
|
|
insert into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
# Step 6: Verify that delete of highest value doesn't cause the next
|
|
# insert to reuse this value
|
|
delete from t1 where a = 10;
|
|
insert into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
# Step 7: Verify that REPLACE has the same effect as INSERT
|
|
replace t1 values (NULL);
|
|
select * from t1 order by a;
|
|
replace t1 values (15);
|
|
select * from t1 order by a;
|
|
replace into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
# Step 8: Verify that REPLACE has the same effect as UPDATE
|
|
replace t1 values (15);
|
|
select * from t1 order by a;
|
|
|
|
# Step 9: Verify that IGNORE doesn't affect auto_increment
|
|
insert ignore into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
insert ignore into t1 values (15), (NULL);
|
|
select * from t1 order by a;
|
|
|
|
# Step 10: Verify that on duplicate key as UPDATE behaves as an
|
|
# UPDATE
|
|
insert into t1 values (15)
|
|
on duplicate key update a = 20;
|
|
insert into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
# Step 11: Verify that on duplicate key as INSERT behaves as INSERT
|
|
insert into t1 values (NULL) on duplicate key update a = 30;
|
|
select * from t1 order by a;
|
|
insert into t1 values (30) on duplicate key update a = 40;
|
|
select * from t1 order by a;
|
|
|
|
#Step 12: Vefify INSERT IGNORE (bug#32055)
|
|
insert ignore into t1 values(600),(NULL),(NULL),(610),(NULL);
|
|
select * from t1 order by a;
|
|
drop table t1;
|
|
|
|
#Step 13: Verify auto_increment of unique key
|
|
create table t1 (a int not null primary key,
|
|
b int not null unique auto_increment) engine ndb;
|
|
insert into t1 values (1, NULL);
|
|
insert into t1 values (3, NULL);
|
|
update t1 set b = 3 where a = 3;
|
|
insert into t1 values (4, NULL);
|
|
select * from t1 order by a;
|
|
drop table t1;
|
|
|
|
#Step 14: Verify that auto_increment_increment and auto_increment_offset
|
|
# work as expected
|
|
|
|
CREATE TABLE t1 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=NDBCLUSTER;
|
|
|
|
CREATE TABLE t2 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=MYISAM;
|
|
|
|
SET @@session.auto_increment_increment=10;
|
|
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
SELECT * FROM t1 ORDER BY pk;
|
|
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
|
|
TRUNCATE t1;
|
|
TRUNCATE t2;
|
|
SET @@session.auto_increment_offset=5;
|
|
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
INSERT INTO t1 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
|
|
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
INSERT INTO t2 (pk,b,c) VALUES (27,4,3),(NULL,5,4),(99,6,5),(NULL,7,6);
|
|
SELECT * FROM t1 ORDER BY pk;
|
|
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
|
|
TRUNCATE t1;
|
|
TRUNCATE t2;
|
|
SET @@session.auto_increment_increment=2;
|
|
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
SELECT * FROM t1 ORDER BY pk;
|
|
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
|
|
DROP TABLE t1, t2;
|
|
|
|
CREATE TABLE t1 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
|
|
|
|
CREATE TABLE t2 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=MYISAM AUTO_INCREMENT = 7;
|
|
|
|
SET @@session.auto_increment_offset=1;
|
|
SET @@session.auto_increment_increment=1;
|
|
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
SELECT * FROM t1 ORDER BY pk;
|
|
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
|
|
DROP TABLE t1, t2;
|
|
|
|
CREATE TABLE t1 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 3;
|
|
|
|
CREATE TABLE t2 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=MYISAM AUTO_INCREMENT = 3;
|
|
|
|
SET @@session.auto_increment_offset=5;
|
|
SET @@session.auto_increment_increment=10;
|
|
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
SELECT * FROM t1 ORDER BY pk;
|
|
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
|
|
DROP TABLE t1, t2;
|
|
|
|
CREATE TABLE t1 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 7;
|
|
|
|
CREATE TABLE t2 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=MYISAM AUTO_INCREMENT = 7;
|
|
|
|
SET @@session.auto_increment_offset=5;
|
|
SET @@session.auto_increment_increment=10;
|
|
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
SELECT * FROM t1 ORDER BY pk;
|
|
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
|
|
DROP TABLE t1, t2;
|
|
|
|
CREATE TABLE t1 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 5;
|
|
|
|
CREATE TABLE t2 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=MYISAM AUTO_INCREMENT = 5;
|
|
|
|
SET @@session.auto_increment_offset=5;
|
|
SET @@session.auto_increment_increment=10;
|
|
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
SELECT * FROM t1 ORDER BY pk;
|
|
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
|
|
DROP TABLE t1, t2;
|
|
|
|
CREATE TABLE t1 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=NDBCLUSTER AUTO_INCREMENT = 100;
|
|
|
|
CREATE TABLE t2 (
|
|
pk INT NOT NULL PRIMARY KEY AUTO_INCREMENT,
|
|
b INT NOT NULL,
|
|
c INT NOT NULL UNIQUE
|
|
) ENGINE=MYISAM AUTO_INCREMENT = 100;
|
|
|
|
SET @@session.auto_increment_offset=5;
|
|
SET @@session.auto_increment_increment=10;
|
|
INSERT INTO t1 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
INSERT INTO t2 (b,c) VALUES (1,0),(2,1),(3,2);
|
|
SELECT * FROM t1 ORDER BY pk;
|
|
SELECT COUNT(t1.pk) FROM t1, t2 WHERE t1.pk = t2.pk AND t1.b = t2.b AND t1.c = t1.c;
|
|
DROP TABLE t1, t2;
|
|
|
|
#Step 15: Now verify that behaviour on multiple MySQL Servers behave
|
|
# properly. Start by dropping table and recreating it to start
|
|
# counters and id caches from zero again.
|
|
--disable_warnings
|
|
connection server2;
|
|
SET @@session.auto_increment_offset=1;
|
|
SET @@session.auto_increment_increment=1;
|
|
set ndb_autoincrement_prefetch_sz = 32;
|
|
drop table if exists t1;
|
|
connection server1;
|
|
SET @@session.auto_increment_offset=1;
|
|
SET @@session.auto_increment_increment=1;
|
|
set ndb_autoincrement_prefetch_sz = 32;
|
|
--enable_warnings
|
|
|
|
|
|
create table t1 (a int not null auto_increment primary key) engine ndb;
|
|
# Basic test, ensure that the second server gets a new range.
|
|
#Generate record with key = 1
|
|
insert into t1 values (NULL);
|
|
connection server2;
|
|
#Generate record with key = 33
|
|
insert into t1 values (NULL);
|
|
connection server1;
|
|
select * from t1 order by a;
|
|
|
|
#This insert should not affect the range of the second server
|
|
insert into t1 values (20);
|
|
connection server2;
|
|
insert into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
connection server1;
|
|
#This insert should remove cached values but also skip values already
|
|
#taken by server2, given that there is no method of communicating with
|
|
#the other server it should also cause a conflict
|
|
connection server1;
|
|
|
|
insert into t1 values (35);
|
|
insert into t1 values (NULL);
|
|
connection server2;
|
|
--error ER_DUP_ENTRY, ER_DUP_KEY
|
|
insert into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
insert into t1 values (100);
|
|
insert into t1 values (NULL);
|
|
connection server1;
|
|
insert into t1 values (NULL);
|
|
select * from t1 order by a;
|
|
|
|
set auto_increment_offset = @old_auto_increment_offset;
|
|
set auto_increment_increment = @old_auto_increment_increment;
|
|
set ndb_autoincrement_prefetch_sz = @old_ndb_autoincrement_prefetch_sz;
|
|
|
|
drop table t1;
|