mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
Applied innodb-5.1-ss269 snapshot.
Fixed BUGS: #3300: "UPDATE statement with no index column in where condition locks all rows" Implement semi-consistent read to reduce lock conflicts at the cost of breaking serializability. ha_innobase::unlock_row(): reset the "did semi consistent read" flag ha_innobase::was_semi_consistent_read(), ha_innobase::try_semi_consistent_read(): new methods row_prebuilt_t, row_create_prebuilt(): add field row_read_type for keeping track of semi-consistent reads row_vers_build_for_semi_consistent_read(), row_sel_build_committed_vers_for_mysql(): new functions row_search_for_mysql(): implement semi-consistent reads #9802: "Foreign key checks disallow alter table". Added test cases. #12456: "Cursor shows incorrect data - DML does not affect, probably caching" This patch implements a high-granularity read view to be used with cursors. In this high-granularity consistent read view modifications done by the creating transaction after the cursor is created or future transactions are not visible. But those modifications that transaction did before the cursor was created are visible. #12701: "Support >4GB buffer pool and log files on 64-bit Windows" Do not call os_file_create_tmpfile() at runtime. Instead, create all tempfiles at startup and guard access to them with mutexes. #13778: "If FOREIGN_KEY_CHECKS=0, one can create inconsistent FOREIGN KEYs". When FOREIGN_KEY_CHECKS=0 we still need to check that datatypes between foreign key references are compatible. #14189: "VARBINARY and BINARY variables: trailing space ignored with InnoDB" innobase_init(): Assert that DATA_MYSQL_BINARY_CHARSET_COLL == my_charset_bin.number. dtype_get_pad_char(): Do not pad VARBINARY or BINARY columns. row_ins_cascade_calc_update_vec(): Refuse ON UPDATE CASCADE when trying to change the length of a VARBINARY column that refers to or is referenced by a BINARY column. BINARY columns are no longer padded on comparison, and thus they cannot be padded on storage either. #14747: "Race condition can cause btr_search_drop_page_hash_index() to crash" Note that buf_block_t::index should be protected by btr_search_latch or an s-latch or x-latch on the index page. btr_search_drop_page_hash_index(): Read block->index while holding btr_search_latch and use the cached value in the loop. Remove some redundant assertions. #15108: "mysqld crashes when innodb_log_file_size is set > 4G" #15308: "Problem of Order with Enum Column in Primary Key" #15550: "mysqld crashes in printing a FOREIGN KEY error in InnoDB" row_ins_foreign_report_add_err(): When printing the parent record, use the index in the parent table rather than the index in the child table. #15653: "Slow inserts to InnoDB if many thousands of .ibd files" Keep track on unflushed modifications to file spaces. When there are tens of thousands of file spaces, flushing all files in fil_flush_file_spaces() would be very slow. fil_flush_file_spaces(): Only flush unflushed file spaces. fil_space_t, fil_system_t: Add a list of unflushed spaces. #15991: "innodb-file-per-table + symlink database + rename = cr" os_file_handle_error(): Map the error codes EXDEV, ENOTDIR, and EISDIR to the new code OS_FILE_PATH_ERROR. Treat this code as OS_FILE_PATH_ERROR. This fixes the crash on RENAME TABLE when the .ibd file is a symbolic link to a different file system. #16157: "InnoDB crashes when main location settings are empty" This patch is from Heikki. #16298: "InnoDB segfaults in INSERTs in upgrade of 4.0 -> 5.0 tables with VARCHAR BINARY" dict_load_columns(): Set the charset-collation code DATA_MYSQL_BINARY_CHARSET_COLL for those binary string columns that lack a charset-collation code, i.e., the tables were created with an older version of MySQL/InnoDB than 4.1.2. #16229: "MySQL/InnoDB uses full explicit table locks in trigger processing" Take a InnoDB table lock only if user has explicitly requested a table lock. Added some additional comments to store_lock() and external_lock(). #16387: "InnoDB crash when dropping a foreign key <table>_ibfk_0" Do not mistake TABLENAME_ibfk_0 for auto-generated id. dict_table_get_highest_foreign_id(): Ignore foreign constraint identifiers starting with the pattern TABLENAME_ibfk_0. #16582: "InnoDB: Error in an adaptive hash index pointer to page" Account for a race condition when dropping the adaptive hash index for a B-tree page. btr_search_drop_page_hash_index(): Retry the operation if a hash index with different parameters was built meanwhile. Add diagnostics for the case that hash node pointers to the page remain. btr_search_info_update_hash(), btr_search_info_update_slow(): Document the parameter "info" as in/out. #16814: "SHOW INNODB STATUS format error in LATEST FOREIGN KEY ERROR section" Add a missing newline to the LAST FOREIGN KEY ERROR section in SHOW INNODB STATUS output. dict_foreign_error_report(): Always print a newline after invoking dict_print_info_on_foreign_key_in_create_format(). #16827: "Better InnoDB error message if ibdata files omitted from my.cnf" #17126: "CHECK TABLE on InnoDB causes a short hang during check of adaptive hash" CHECK TABLE blocking other queries, by releasing the btr_search_latch periodically during the adaptive hash table validation. #17405: "Valgrind: conditional jump or move depends on unititialised values" buf_block_init(): Reset magic_n, buf_fix_count and io_fix to avoid testing uninitialized variables.
This commit is contained in:
@ -1784,7 +1784,7 @@ innodb_sync_spin_loops 20
|
||||
show variables like "innodb_thread_concurrency";
|
||||
Variable_name Value
|
||||
innodb_thread_concurrency 20
|
||||
set global innodb_thread_concurrency=1000;
|
||||
set global innodb_thread_concurrency=1001;
|
||||
show variables like "innodb_thread_concurrency";
|
||||
Variable_name Value
|
||||
innodb_thread_concurrency 1000
|
||||
@ -2661,6 +2661,32 @@ insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten');
|
||||
insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven');
|
||||
insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point');
|
||||
insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken');
|
||||
update t1 set filler = 'boo' where a = 1;
|
||||
update t2 set filler ='email' where a = 4;
|
||||
select a,hex(b),hex(c),filler from t1 order by filler;
|
||||
a hex(b) hex(c) filler
|
||||
1 61626364656667 61626364656667 boo
|
||||
4 D0B1 D0B1 eight
|
||||
4 5B 5B five
|
||||
4 E880BD E880BD four
|
||||
4 E880BDD0B1E880BD E880BDD0B1E880BD seven
|
||||
4 E880BDE880BD E880BDE880BD six
|
||||
3 71727374757677 71727374757677 three
|
||||
2 696A6B696C6D6E 696A6B696C6D6E two
|
||||
select a,hex(b),hex(c),filler from t2 order by filler;
|
||||
a hex(b) hex(c) filler
|
||||
4 05630563 05630563 email
|
||||
4 0563 0563 email
|
||||
4 05612020 05612020 email
|
||||
4 01FC 01FC email
|
||||
4 0120 0120 email
|
||||
4 00640065 00640065 email
|
||||
4 00E400E50068 00E400E50068 email
|
||||
4 0000E400 0000E400 email
|
||||
4 0000563001FC0563 0000563001FC0563 email
|
||||
1 0061006200630064006500660067 0061006200630064006500660067 one
|
||||
3 0071007200730074007500760077 0071007200730074007500760077 three
|
||||
2 0069006A006B0069006C006D006E 0069006A006B0069006C006D006E two
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
create table t1 (
|
||||
@ -2689,6 +2715,32 @@ insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten');
|
||||
insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven');
|
||||
insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point');
|
||||
insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken');
|
||||
update t1 set filler = 'boo' where a = 1;
|
||||
update t2 set filler ='email' where a = 4;
|
||||
select a,hex(b),hex(c),filler from t1 order by filler;
|
||||
a hex(b) hex(c) filler
|
||||
1 61626364656667 61626364656667 boo
|
||||
4 D0B1 D0B1 eight
|
||||
4 5B 5B five
|
||||
4 E880BD E880BD four
|
||||
4 E880BDD0B1E880BD E880BDD0B1E880BD seven
|
||||
4 E880BDE880BD E880BDE880BD six
|
||||
3 71727374757677 71727374757677 three
|
||||
2 696A6B696C6D6E 696A6B696C6D6E two
|
||||
select a,hex(b),hex(c),filler from t2 order by filler;
|
||||
a hex(b) hex(c) filler
|
||||
4 05630563 05630563 email
|
||||
4 0563 0563 email
|
||||
4 05612020 05612020 email
|
||||
4 01FC 01FC email
|
||||
4 0120 0120 email
|
||||
4 00640065 00640065 email
|
||||
4 00E400E50068 00E400E50068 email
|
||||
4 0000E400 0000E400 email
|
||||
4 0000563001FC0563 0000563001FC0563 email
|
||||
1 0061006200630064006500660067 0061006200630064006500660067 one
|
||||
3 0071007200730074007500760077 0071007200730074007500760077 three
|
||||
2 0069006A006B0069006C006D006E 0069006A006B0069006C006D006E two
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
create table t1 (
|
||||
@ -2717,6 +2769,32 @@ insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten');
|
||||
insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven');
|
||||
insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point');
|
||||
insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken');
|
||||
update t1 set filler = 'boo' where a = 1;
|
||||
update t2 set filler ='email' where a = 4;
|
||||
select a,hex(b),hex(c),filler from t1 order by filler;
|
||||
a hex(b) hex(c) filler
|
||||
1 61626364656667 61626364656667 boo
|
||||
4 D0B1 D0B1 eight
|
||||
4 5B 5B five
|
||||
4 E880BD E880BD four
|
||||
4 E880BDD0B1E880BD E880BDD0B1E880BD seven
|
||||
4 E880BDE880BD E880BDE880BD six
|
||||
3 71727374757677 71727374757677 three
|
||||
2 696A6B696C6D6E 696A6B696C6D6E two
|
||||
select a,hex(b),hex(c),filler from t2 order by filler;
|
||||
a hex(b) hex(c) filler
|
||||
4 0120 0120 email
|
||||
4 01FC 01FC email
|
||||
4 0563 0563 email
|
||||
4 0000563001FC0563 0000563001FC0563 email
|
||||
4 0000E400 0000E400 email
|
||||
4 00640065 00640065 email
|
||||
4 00E400E50068 00E400E50068 email
|
||||
4 05612020 05612020 email
|
||||
4 05630563 05630563 email
|
||||
1 0061006200630064006500660067 0061006200630064006500660067 one
|
||||
3 0071007200730074007500760077 0071007200730074007500760077 three
|
||||
2 0069006A006B0069006C006D006E 0069006A006B0069006C006D006E two
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
create table t1 (
|
||||
@ -2741,9 +2819,92 @@ insert into t2 values (4,_ucs2 0x01fc,_ucs2 0x01fc,'seven');
|
||||
insert into t2 values (4,_ucs2 0x0120,_ucs2 0x0120,'eight');
|
||||
insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten');
|
||||
insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken');
|
||||
update t1 set filler = 'boo' where a = 1;
|
||||
update t2 set filler ='email' where a = 4;
|
||||
select a,hex(b),hex(c),filler from t1 order by filler;
|
||||
a hex(b) hex(c) filler
|
||||
1 61626364656667 61626364656667 boo
|
||||
4 D0B1 D0B1 eight
|
||||
4 5B 5B five
|
||||
4 E880BD E880BD four
|
||||
3 71727374757677 71727374757677 three
|
||||
2 696A6B696C6D6E 696A6B696C6D6E two
|
||||
select a,hex(b),hex(c),filler from t2 order by filler;
|
||||
a hex(b) hex(c) filler
|
||||
4 0000E400 0000E400 email
|
||||
4 00640065 00640065 email
|
||||
4 00E400E50068 00E400E50068 email
|
||||
4 0120 0120 email
|
||||
4 01FC 01FC email
|
||||
4 05612020 05612020 email
|
||||
4 0563 0563 email
|
||||
1 61626364656667 61626364656667 one
|
||||
3 71727374757677 71727374757677 three
|
||||
2 696A6B696C6D6E 696A6B696C6D6E two
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
commit;
|
||||
set foreign_key_checks=0;
|
||||
create table t2 (a int primary key, b int, foreign key (b) references t1(a)) engine = innodb;
|
||||
create table t1(a char(10) primary key, b varchar(20)) engine = innodb;
|
||||
ERROR HY000: Can't create table 'test.t1' (errno: 150)
|
||||
set foreign_key_checks=1;
|
||||
drop table t2;
|
||||
set foreign_key_checks=0;
|
||||
create table t1(a varchar(10) primary key) engine = innodb DEFAULT CHARSET=latin1;
|
||||
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb DEFAULT CHARSET=utf8;
|
||||
ERROR HY000: Can't create table 'test.t2' (errno: 150)
|
||||
set foreign_key_checks=1;
|
||||
drop table t1;
|
||||
set foreign_key_checks=0;
|
||||
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb;
|
||||
create table t1(a varchar(10) primary key) engine = innodb;
|
||||
alter table t1 modify column a int;
|
||||
Got one of the listed errors
|
||||
set foreign_key_checks=1;
|
||||
drop table t2,t1;
|
||||
set foreign_key_checks=0;
|
||||
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb DEFAULT CHARSET=latin1;
|
||||
create table t1(a varchar(10) primary key) engine = innodb DEFAULT CHARSET=latin1;
|
||||
alter table t1 convert to character set utf8;
|
||||
set foreign_key_checks=1;
|
||||
drop table t2,t1;
|
||||
set foreign_key_checks=0;
|
||||
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb DEFAULT CHARSET=latin1;
|
||||
create table t3(a varchar(10) primary key) engine = innodb DEFAULT CHARSET=utf8;
|
||||
rename table t3 to t1;
|
||||
ERROR HY000: Error on rename of './test/t3' to './test/t1' (errno: 150)
|
||||
set foreign_key_checks=1;
|
||||
drop table t2,t3;
|
||||
create table t1(a int primary key) row_format=redundant engine=innodb;
|
||||
create table t2(a int primary key,constraint foreign key(a)references t1(a)) row_format=compact engine=innodb;
|
||||
create table t3(a int primary key) row_format=compact engine=innodb;
|
||||
create table t4(a int primary key,constraint foreign key(a)references t3(a)) row_format=redundant engine=innodb;
|
||||
insert into t1 values(1);
|
||||
insert into t3 values(1);
|
||||
insert into t2 values(2);
|
||||
ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test/t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
|
||||
insert into t4 values(2);
|
||||
ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test/t4`, CONSTRAINT `t4_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t3` (`a`))
|
||||
insert into t2 values(1);
|
||||
insert into t4 values(1);
|
||||
update t1 set a=2;
|
||||
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test/t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
|
||||
update t2 set a=2;
|
||||
ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test/t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
|
||||
update t3 set a=2;
|
||||
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test/t4`, CONSTRAINT `t4_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t3` (`a`))
|
||||
update t4 set a=2;
|
||||
ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test/t4`, CONSTRAINT `t4_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t3` (`a`))
|
||||
truncate t1;
|
||||
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test/t2`, CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`))
|
||||
truncate t3;
|
||||
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test/t4`, CONSTRAINT `t4_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t3` (`a`))
|
||||
truncate t2;
|
||||
truncate t4;
|
||||
truncate t1;
|
||||
truncate t3;
|
||||
drop table t4,t3,t2,t1;
|
||||
create table t1 (a varchar(255) character set utf8,
|
||||
b varchar(255) character set utf8,
|
||||
c varchar(255) character set utf8,
|
||||
@ -2757,7 +2918,284 @@ d varchar(255) character set utf8,
|
||||
e varchar(255) character set utf8,
|
||||
key (a,b,c,d,e)) engine=innodb;
|
||||
ERROR 42000: Specified key was too long; max key length is 3072 bytes
|
||||
End of 5.0 tests
|
||||
create table t1 (s1 varbinary(2),primary key (s1)) engine=innodb;
|
||||
create table t2 (s1 binary(2),primary key (s1)) engine=innodb;
|
||||
create table t3 (s1 varchar(2) binary,primary key (s1)) engine=innodb;
|
||||
create table t4 (s1 char(2) binary,primary key (s1)) engine=innodb;
|
||||
insert into t1 values (0x41),(0x4120),(0x4100);
|
||||
insert into t2 values (0x41),(0x4120),(0x4100);
|
||||
ERROR 23000: Duplicate entry 'A' for key 'PRIMARY'
|
||||
insert into t2 values (0x41),(0x4120);
|
||||
insert into t3 values (0x41),(0x4120),(0x4100);
|
||||
ERROR 23000: Duplicate entry 'A ' for key 'PRIMARY'
|
||||
insert into t3 values (0x41),(0x4100);
|
||||
insert into t4 values (0x41),(0x4120),(0x4100);
|
||||
ERROR 23000: Duplicate entry 'A' for key 'PRIMARY'
|
||||
insert into t4 values (0x41),(0x4100);
|
||||
select hex(s1) from t1;
|
||||
hex(s1)
|
||||
41
|
||||
4100
|
||||
4120
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
4100
|
||||
4120
|
||||
select hex(s1) from t3;
|
||||
hex(s1)
|
||||
4100
|
||||
41
|
||||
select hex(s1) from t4;
|
||||
hex(s1)
|
||||
4100
|
||||
41
|
||||
drop table t1,t2,t3,t4;
|
||||
create table t1 (a int primary key,s1 varbinary(3) not null unique) engine=innodb;
|
||||
create table t2 (s1 binary(2) not null, constraint c foreign key(s1) references t1(s1) on update cascade) engine=innodb;
|
||||
insert into t1 values(1,0x4100),(2,0x41),(3,0x4120),(4,0x42);
|
||||
insert into t2 values(0x42);
|
||||
ERROR 23000: Cannot add or update a child row: a foreign key constraint fails (`test/t2`, CONSTRAINT `c` FOREIGN KEY (`s1`) REFERENCES `t1` (`s1`) ON UPDATE CASCADE)
|
||||
insert into t2 values(0x41);
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
4100
|
||||
update t1 set s1=0x123456 where a=2;
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
4100
|
||||
update t1 set s1=0x12 where a=1;
|
||||
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test/t2`, CONSTRAINT `c` FOREIGN KEY (`s1`) REFERENCES `t1` (`s1`) ON UPDATE CASCADE)
|
||||
update t1 set s1=0x12345678 where a=1;
|
||||
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test/t2`, CONSTRAINT `c` FOREIGN KEY (`s1`) REFERENCES `t1` (`s1`) ON UPDATE CASCADE)
|
||||
update t1 set s1=0x123457 where a=1;
|
||||
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test/t2`, CONSTRAINT `c` FOREIGN KEY (`s1`) REFERENCES `t1` (`s1`) ON UPDATE CASCADE)
|
||||
update t1 set s1=0x1220 where a=1;
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
1220
|
||||
update t1 set s1=0x1200 where a=1;
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
1200
|
||||
update t1 set s1=0x4200 where a=1;
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
4200
|
||||
delete from t1 where a=1;
|
||||
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test/t2`, CONSTRAINT `c` FOREIGN KEY (`s1`) REFERENCES `t1` (`s1`) ON UPDATE CASCADE)
|
||||
delete from t1 where a=2;
|
||||
update t2 set s1=0x4120;
|
||||
delete from t1;
|
||||
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test/t2`, CONSTRAINT `c` FOREIGN KEY (`s1`) REFERENCES `t1` (`s1`) ON UPDATE CASCADE)
|
||||
delete from t1 where a!=3;
|
||||
select a,hex(s1) from t1;
|
||||
a hex(s1)
|
||||
3 4120
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
4120
|
||||
drop table t2,t1;
|
||||
create table t1 (a int primary key,s1 varchar(2) binary not null unique) engine=innodb;
|
||||
create table t2 (s1 char(2) binary not null, constraint c foreign key(s1) references t1(s1) on update cascade) engine=innodb;
|
||||
insert into t1 values(1,0x4100),(2,0x41);
|
||||
insert into t2 values(0x41);
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
41
|
||||
update t1 set s1=0x1234 where a=1;
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
41
|
||||
update t1 set s1=0x12 where a=2;
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
12
|
||||
delete from t1 where a=1;
|
||||
delete from t1 where a=2;
|
||||
ERROR 23000: Cannot delete or update a parent row: a foreign key constraint fails (`test/t2`, CONSTRAINT `c` FOREIGN KEY (`s1`) REFERENCES `t1` (`s1`) ON UPDATE CASCADE)
|
||||
select a,hex(s1) from t1;
|
||||
a hex(s1)
|
||||
2 12
|
||||
select hex(s1) from t2;
|
||||
hex(s1)
|
||||
12
|
||||
drop table t2,t1;
|
||||
CREATE TABLE t1 (
|
||||
ind enum('0','1','2') NOT NULL default '0',
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
CREATE TABLE t2 (
|
||||
ind enum('0','1','2') NOT NULL default '0',
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=ucs2;
|
||||
INSERT INTO t1 VALUES ('1', ''),('2', '');
|
||||
INSERT INTO t2 VALUES ('1', ''),('2', '');
|
||||
SELECT hex(ind),hex(string1) FROM t1 ORDER BY string1;
|
||||
hex(ind) hex(string1)
|
||||
31
|
||||
32
|
||||
SELECT hex(ind),hex(string1) FROM t2 ORDER BY string1;
|
||||
hex(ind) hex(string1)
|
||||
0031
|
||||
0032
|
||||
drop table t1,t2;
|
||||
CREATE TABLE t1 (
|
||||
ind set('0','1','2') NOT NULL default '0',
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
CREATE TABLE t2 (
|
||||
ind set('0','1','2') NOT NULL default '0',
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=ucs2;
|
||||
INSERT INTO t1 VALUES ('1', ''),('2', '');
|
||||
INSERT INTO t2 VALUES ('1', ''),('2', '');
|
||||
SELECT hex(ind),hex(string1) FROM t1 ORDER BY string1;
|
||||
hex(ind) hex(string1)
|
||||
31
|
||||
32
|
||||
SELECT hex(ind),hex(string1) FROM t2 ORDER BY string1;
|
||||
hex(ind) hex(string1)
|
||||
0031
|
||||
0032
|
||||
drop table t1,t2;
|
||||
CREATE TABLE t1 (
|
||||
ind bit not null,
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
CREATE TABLE t2 (
|
||||
ind bit not null,
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=ucs2;
|
||||
insert into t1 values(0,''),(1,'');
|
||||
insert into t2 values(0,''),(1,'');
|
||||
select hex(ind),hex(string1) from t1 order by string1;
|
||||
hex(ind) hex(string1)
|
||||
0
|
||||
1
|
||||
select hex(ind),hex(string1) from t2 order by string1;
|
||||
hex(ind) hex(string1)
|
||||
0
|
||||
1
|
||||
drop table t1,t2;
|
||||
create table t2 (
|
||||
a int, b char(10), filler char(10), primary key(a, b(2))
|
||||
) character set utf8 engine = innodb;
|
||||
insert into t2 values (1,'abcdefg','one');
|
||||
insert into t2 values (2,'ijkilmn','two');
|
||||
insert into t2 values (3, 'qrstuvw','three');
|
||||
update t2 set a=5, filler='booo' where a=1;
|
||||
drop table t2;
|
||||
create table t2 (
|
||||
a int, b char(10), filler char(10), primary key(a, b(2))
|
||||
) character set ucs2 engine = innodb;
|
||||
insert into t2 values (1,'abcdefg','one');
|
||||
insert into t2 values (2,'ijkilmn','two');
|
||||
insert into t2 values (3, 'qrstuvw','three');
|
||||
update t2 set a=5, filler='booo' where a=1;
|
||||
drop table t2;
|
||||
create table t1(a int not null, b char(110),primary key(a,b(100))) engine=innodb default charset=utf8;
|
||||
insert into t1 values(1,'abcdefg'),(2,'defghijk');
|
||||
insert into t1 values(6,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1);
|
||||
insert into t1 values(7,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2);
|
||||
select a,hex(b) from t1 order by b;
|
||||
a hex(b)
|
||||
1 61626364656667
|
||||
2 6465666768696A6B
|
||||
6 D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1
|
||||
7 D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2
|
||||
update t1 set b = 'three' where a = 6;
|
||||
drop table t1;
|
||||
create table t1(a int not null, b text(110),primary key(a,b(100))) engine=innodb default charset=utf8;
|
||||
insert into t1 values(1,'abcdefg'),(2,'defghijk');
|
||||
insert into t1 values(6,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1);
|
||||
insert into t1 values(7,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2);
|
||||
select a,hex(b) from t1 order by b;
|
||||
a hex(b)
|
||||
1 61626364656667
|
||||
2 6465666768696A6B
|
||||
6 D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1
|
||||
7 D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2
|
||||
update t1 set b = 'three' where a = 6;
|
||||
drop table t1;
|
||||
CREATE TABLE t1(a INT, PRIMARY KEY(a)) ENGINE=InnoDB;
|
||||
CREATE TABLE t2(a INT) ENGINE=InnoDB;
|
||||
ALTER TABLE t2 ADD FOREIGN KEY (a) REFERENCES t1(a);
|
||||
ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_1;
|
||||
ALTER TABLE t2 ADD CONSTRAINT t2_ibfk_0 FOREIGN KEY (a) REFERENCES t1(a);
|
||||
ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_0;
|
||||
SHOW CREATE TABLE t2;
|
||||
Table Create Table
|
||||
t2 CREATE TABLE `t2` (
|
||||
`a` int(11) DEFAULT NULL,
|
||||
KEY `t2_ibfk_0` (`a`),
|
||||
CONSTRAINT `t2_ibfk_0` FOREIGN KEY (`a`) REFERENCES `t1` (`a`),
|
||||
CONSTRAINT `t2_ibfk_1` FOREIGN KEY (`a`) REFERENCES `t1` (`a`)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||
DROP TABLE t2,t1;
|
||||
create table t1(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
insert into t1(a) values (1),(2),(3);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
update t1 set b = 5 where a = 2;
|
||||
create trigger t1t before insert on t1 for each row begin set NEW.b = NEW.a * 10 + 5, NEW.c = NEW.a / 10; end |
|
||||
set autocommit = 0;
|
||||
insert into t1(a) values (10),(20),(30),(40),(50),(60),(70),(80),(90),(100),
|
||||
(11),(21),(31),(41),(51),(61),(71),(81),(91),(101),
|
||||
(12),(22),(32),(42),(52),(62),(72),(82),(92),(102),
|
||||
(13),(23),(33),(43),(53),(63),(73),(83),(93),(103),
|
||||
(14),(24),(34),(44),(54),(64),(74),(84),(94),(104);
|
||||
commit;
|
||||
commit;
|
||||
drop trigger t1t;
|
||||
drop table t1;
|
||||
create table t1(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
create table t2(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
create table t3(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
create table t4(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
create table t5(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
insert into t1(a) values (1),(2),(3);
|
||||
insert into t2(a) values (1),(2),(3);
|
||||
insert into t3(a) values (1),(2),(3);
|
||||
insert into t4(a) values (1),(2),(3);
|
||||
insert into t3(a) values (5),(7),(8);
|
||||
insert into t4(a) values (5),(7),(8);
|
||||
insert into t5(a) values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12);
|
||||
create trigger t1t before insert on t1 for each row begin
|
||||
INSERT INTO t2 SET a = NEW.a;
|
||||
end |
|
||||
create trigger t2t before insert on t2 for each row begin
|
||||
DELETE FROM t3 WHERE a = NEW.a;
|
||||
end |
|
||||
create trigger t3t before delete on t3 for each row begin
|
||||
UPDATE t4 SET b = b + 1 WHERE a = OLD.a;
|
||||
end |
|
||||
create trigger t4t before update on t4 for each row begin
|
||||
UPDATE t5 SET b = b + 1 where a = NEW.a;
|
||||
end |
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
update t1 set b = b + 5 where a = 1;
|
||||
update t2 set b = b + 5 where a = 1;
|
||||
update t3 set b = b + 5 where a = 1;
|
||||
update t4 set b = b + 5 where a = 1;
|
||||
insert into t5(a) values(20);
|
||||
set autocommit = 0;
|
||||
insert into t1(a) values(7);
|
||||
insert into t2(a) values(8);
|
||||
delete from t2 where a = 3;
|
||||
update t4 set b = b + 1 where a = 3;
|
||||
commit;
|
||||
drop trigger t1t;
|
||||
drop trigger t2t;
|
||||
drop trigger t3t;
|
||||
drop trigger t4t;
|
||||
drop table t1, t2, t3, t4, t5;
|
||||
CREATE TABLE t1 (
|
||||
field1 varchar(8) NOT NULL DEFAULT '',
|
||||
field2 varchar(8) NOT NULL DEFAULT '',
|
||||
|
35
mysql-test/r/innodb_unsafe_binlog.result
Normal file
35
mysql-test/r/innodb_unsafe_binlog.result
Normal file
@ -0,0 +1,35 @@
|
||||
drop table if exists t1,t2;
|
||||
create table t1 (id int not null, f_id int not null, f int not null,
|
||||
primary key(f_id, id)) engine=innodb;
|
||||
create table t2 (id int not null,s_id int not null,s varchar(200),
|
||||
primary key(id)) engine=innodb;
|
||||
INSERT INTO t1 VALUES (8, 1, 3);
|
||||
INSERT INTO t1 VALUES (1, 2, 1);
|
||||
INSERT INTO t2 VALUES (1, 0, '');
|
||||
INSERT INTO t2 VALUES (8, 1, '');
|
||||
commit;
|
||||
DELETE ml.* FROM t1 AS ml LEFT JOIN t2 AS mm ON (mm.id=ml.id)
|
||||
WHERE mm.id IS NULL;
|
||||
select ml.* from t1 as ml left join t2 as mm on (mm.id=ml.id)
|
||||
where mm.id is null lock in share mode;
|
||||
id f_id f
|
||||
drop table t1,t2;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t1 lock in share mode;
|
||||
a b
|
||||
1 1
|
||||
2 2
|
||||
3 1
|
||||
4 2
|
||||
5 1
|
||||
6 2
|
||||
update t1 set b = 5 where b = 1;
|
||||
set autocommit = 0;
|
||||
select * from t1 where a = 2 and b = 2 for update;
|
||||
ERROR HY000: Lock wait timeout exceeded; try restarting transaction
|
||||
commit;
|
||||
commit;
|
||||
drop table t1;
|
@ -1273,7 +1273,7 @@ show variables like "innodb_sync_spin_loops";
|
||||
|
||||
# Test for innodb_thread_concurrency variable
|
||||
show variables like "innodb_thread_concurrency";
|
||||
set global innodb_thread_concurrency=1000;
|
||||
set global innodb_thread_concurrency=1001;
|
||||
show variables like "innodb_thread_concurrency";
|
||||
set global innodb_thread_concurrency=0;
|
||||
show variables like "innodb_thread_concurrency";
|
||||
@ -1610,6 +1610,10 @@ insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten');
|
||||
insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven');
|
||||
insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point');
|
||||
insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken');
|
||||
update t1 set filler = 'boo' where a = 1;
|
||||
update t2 set filler ='email' where a = 4;
|
||||
select a,hex(b),hex(c),filler from t1 order by filler;
|
||||
select a,hex(b),hex(c),filler from t2 order by filler;
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
|
||||
@ -1639,6 +1643,10 @@ insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten');
|
||||
insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven');
|
||||
insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point');
|
||||
insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken');
|
||||
update t1 set filler = 'boo' where a = 1;
|
||||
update t2 set filler ='email' where a = 4;
|
||||
select a,hex(b),hex(c),filler from t1 order by filler;
|
||||
select a,hex(b),hex(c),filler from t2 order by filler;
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
|
||||
@ -1668,6 +1676,10 @@ insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten');
|
||||
insert into t2 values (4,_ucs2 0x05630563,_ucs2 0x05630563,'eleven');
|
||||
insert into t2 values (4,_ucs2 0x0563001fc0563,_ucs2 0x0563001fc0563,'point');
|
||||
insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken');
|
||||
update t1 set filler = 'boo' where a = 1;
|
||||
update t2 set filler ='email' where a = 4;
|
||||
select a,hex(b),hex(c),filler from t1 order by filler;
|
||||
select a,hex(b),hex(c),filler from t2 order by filler;
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
|
||||
@ -1693,10 +1705,102 @@ insert into t2 values (4,_ucs2 0x01fc,_ucs2 0x01fc,'seven');
|
||||
insert into t2 values (4,_ucs2 0x0120,_ucs2 0x0120,'eight');
|
||||
insert into t2 values (4,_ucs2 0x0563,_ucs2 0x0563,'ten');
|
||||
insert into t2 values (4,_ucs2 0x05612020,_ucs2 0x05612020,'taken');
|
||||
update t1 set filler = 'boo' where a = 1;
|
||||
update t2 set filler ='email' where a = 4;
|
||||
select a,hex(b),hex(c),filler from t1 order by filler;
|
||||
select a,hex(b),hex(c),filler from t2 order by filler;
|
||||
drop table t1;
|
||||
drop table t2;
|
||||
commit;
|
||||
|
||||
# tests for bugs #9802 and #13778
|
||||
|
||||
# test that FKs between invalid types are not accepted
|
||||
|
||||
set foreign_key_checks=0;
|
||||
create table t2 (a int primary key, b int, foreign key (b) references t1(a)) engine = innodb;
|
||||
--replace_result $MYSQLTEST_VARDIR . master-data/ ''
|
||||
-- error 1005
|
||||
create table t1(a char(10) primary key, b varchar(20)) engine = innodb;
|
||||
set foreign_key_checks=1;
|
||||
drop table t2;
|
||||
|
||||
# test that FKs between different charsets are not accepted in CREATE even
|
||||
# when f_k_c is 0
|
||||
|
||||
set foreign_key_checks=0;
|
||||
create table t1(a varchar(10) primary key) engine = innodb DEFAULT CHARSET=latin1;
|
||||
--replace_result $MYSQLTEST_VARDIR . master-data/ ''
|
||||
-- error 1005
|
||||
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb DEFAULT CHARSET=utf8;
|
||||
set foreign_key_checks=1;
|
||||
drop table t1;
|
||||
|
||||
# test that invalid datatype conversions with ALTER are not allowed
|
||||
|
||||
set foreign_key_checks=0;
|
||||
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb;
|
||||
create table t1(a varchar(10) primary key) engine = innodb;
|
||||
-- error 1025,1025
|
||||
alter table t1 modify column a int;
|
||||
set foreign_key_checks=1;
|
||||
drop table t2,t1;
|
||||
|
||||
# test that charset conversions with ALTER are allowed when f_k_c is 0
|
||||
|
||||
set foreign_key_checks=0;
|
||||
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb DEFAULT CHARSET=latin1;
|
||||
create table t1(a varchar(10) primary key) engine = innodb DEFAULT CHARSET=latin1;
|
||||
alter table t1 convert to character set utf8;
|
||||
set foreign_key_checks=1;
|
||||
drop table t2,t1;
|
||||
|
||||
# test that RENAME does not allow invalid charsets when f_k_c is 0
|
||||
|
||||
set foreign_key_checks=0;
|
||||
create table t2 (a varchar(10), foreign key (a) references t1(a)) engine = innodb DEFAULT CHARSET=latin1;
|
||||
create table t3(a varchar(10) primary key) engine = innodb DEFAULT CHARSET=utf8;
|
||||
--replace_result $MYSQLTEST_VARDIR . master-data/ ''
|
||||
-- error 1025
|
||||
rename table t3 to t1;
|
||||
set foreign_key_checks=1;
|
||||
drop table t2,t3;
|
||||
|
||||
# test that foreign key errors are reported correctly (Bug #15550)
|
||||
|
||||
create table t1(a int primary key) row_format=redundant engine=innodb;
|
||||
create table t2(a int primary key,constraint foreign key(a)references t1(a)) row_format=compact engine=innodb;
|
||||
create table t3(a int primary key) row_format=compact engine=innodb;
|
||||
create table t4(a int primary key,constraint foreign key(a)references t3(a)) row_format=redundant engine=innodb;
|
||||
|
||||
insert into t1 values(1);
|
||||
insert into t3 values(1);
|
||||
-- error 1452
|
||||
insert into t2 values(2);
|
||||
-- error 1452
|
||||
insert into t4 values(2);
|
||||
insert into t2 values(1);
|
||||
insert into t4 values(1);
|
||||
-- error 1451
|
||||
update t1 set a=2;
|
||||
-- error 1452
|
||||
update t2 set a=2;
|
||||
-- error 1451
|
||||
update t3 set a=2;
|
||||
-- error 1452
|
||||
update t4 set a=2;
|
||||
-- error 1451
|
||||
truncate t1;
|
||||
-- error 1451
|
||||
truncate t3;
|
||||
truncate t2;
|
||||
truncate t4;
|
||||
truncate t1;
|
||||
truncate t3;
|
||||
|
||||
drop table t4,t3,t2,t1;
|
||||
|
||||
|
||||
#
|
||||
# Test that we can create a large (>1K) key
|
||||
#
|
||||
@ -1714,7 +1818,272 @@ create table t1 (a varchar(255) character set utf8,
|
||||
e varchar(255) character set utf8,
|
||||
key (a,b,c,d,e)) engine=innodb;
|
||||
|
||||
--echo End of 5.0 tests
|
||||
|
||||
# test the padding of BINARY types and collations (Bug #14189)
|
||||
|
||||
create table t1 (s1 varbinary(2),primary key (s1)) engine=innodb;
|
||||
create table t2 (s1 binary(2),primary key (s1)) engine=innodb;
|
||||
create table t3 (s1 varchar(2) binary,primary key (s1)) engine=innodb;
|
||||
create table t4 (s1 char(2) binary,primary key (s1)) engine=innodb;
|
||||
|
||||
insert into t1 values (0x41),(0x4120),(0x4100);
|
||||
-- error 1062
|
||||
insert into t2 values (0x41),(0x4120),(0x4100);
|
||||
insert into t2 values (0x41),(0x4120);
|
||||
-- error 1062
|
||||
insert into t3 values (0x41),(0x4120),(0x4100);
|
||||
insert into t3 values (0x41),(0x4100);
|
||||
-- error 1062
|
||||
insert into t4 values (0x41),(0x4120),(0x4100);
|
||||
insert into t4 values (0x41),(0x4100);
|
||||
select hex(s1) from t1;
|
||||
select hex(s1) from t2;
|
||||
select hex(s1) from t3;
|
||||
select hex(s1) from t4;
|
||||
drop table t1,t2,t3,t4;
|
||||
|
||||
create table t1 (a int primary key,s1 varbinary(3) not null unique) engine=innodb;
|
||||
create table t2 (s1 binary(2) not null, constraint c foreign key(s1) references t1(s1) on update cascade) engine=innodb;
|
||||
|
||||
insert into t1 values(1,0x4100),(2,0x41),(3,0x4120),(4,0x42);
|
||||
-- error 1452
|
||||
insert into t2 values(0x42);
|
||||
insert into t2 values(0x41);
|
||||
select hex(s1) from t2;
|
||||
update t1 set s1=0x123456 where a=2;
|
||||
select hex(s1) from t2;
|
||||
-- error 1451
|
||||
update t1 set s1=0x12 where a=1;
|
||||
-- error 1451
|
||||
update t1 set s1=0x12345678 where a=1;
|
||||
-- error 1451
|
||||
update t1 set s1=0x123457 where a=1;
|
||||
update t1 set s1=0x1220 where a=1;
|
||||
select hex(s1) from t2;
|
||||
update t1 set s1=0x1200 where a=1;
|
||||
select hex(s1) from t2;
|
||||
update t1 set s1=0x4200 where a=1;
|
||||
select hex(s1) from t2;
|
||||
-- error 1451
|
||||
delete from t1 where a=1;
|
||||
delete from t1 where a=2;
|
||||
update t2 set s1=0x4120;
|
||||
-- error 1451
|
||||
delete from t1;
|
||||
delete from t1 where a!=3;
|
||||
select a,hex(s1) from t1;
|
||||
select hex(s1) from t2;
|
||||
|
||||
drop table t2,t1;
|
||||
|
||||
create table t1 (a int primary key,s1 varchar(2) binary not null unique) engine=innodb;
|
||||
create table t2 (s1 char(2) binary not null, constraint c foreign key(s1) references t1(s1) on update cascade) engine=innodb;
|
||||
|
||||
insert into t1 values(1,0x4100),(2,0x41);
|
||||
insert into t2 values(0x41);
|
||||
select hex(s1) from t2;
|
||||
update t1 set s1=0x1234 where a=1;
|
||||
select hex(s1) from t2;
|
||||
update t1 set s1=0x12 where a=2;
|
||||
select hex(s1) from t2;
|
||||
delete from t1 where a=1;
|
||||
-- error 1451
|
||||
delete from t1 where a=2;
|
||||
select a,hex(s1) from t1;
|
||||
select hex(s1) from t2;
|
||||
|
||||
drop table t2,t1;
|
||||
#
|
||||
# Test cases for bug #15308 Problem of Order with Enum Column in Primary Key
|
||||
#
|
||||
CREATE TABLE t1 (
|
||||
ind enum('0','1','2') NOT NULL default '0',
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
CREATE TABLE t2 (
|
||||
ind enum('0','1','2') NOT NULL default '0',
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=ucs2;
|
||||
|
||||
INSERT INTO t1 VALUES ('1', ''),('2', '');
|
||||
INSERT INTO t2 VALUES ('1', ''),('2', '');
|
||||
SELECT hex(ind),hex(string1) FROM t1 ORDER BY string1;
|
||||
SELECT hex(ind),hex(string1) FROM t2 ORDER BY string1;
|
||||
drop table t1,t2;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
ind set('0','1','2') NOT NULL default '0',
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
CREATE TABLE t2 (
|
||||
ind set('0','1','2') NOT NULL default '0',
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=ucs2;
|
||||
|
||||
INSERT INTO t1 VALUES ('1', ''),('2', '');
|
||||
INSERT INTO t2 VALUES ('1', ''),('2', '');
|
||||
SELECT hex(ind),hex(string1) FROM t1 ORDER BY string1;
|
||||
SELECT hex(ind),hex(string1) FROM t2 ORDER BY string1;
|
||||
drop table t1,t2;
|
||||
|
||||
CREATE TABLE t1 (
|
||||
ind bit not null,
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
|
||||
CREATE TABLE t2 (
|
||||
ind bit not null,
|
||||
string1 varchar(250) NOT NULL,
|
||||
PRIMARY KEY (ind)
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=ucs2;
|
||||
insert into t1 values(0,''),(1,'');
|
||||
insert into t2 values(0,''),(1,'');
|
||||
select hex(ind),hex(string1) from t1 order by string1;
|
||||
select hex(ind),hex(string1) from t2 order by string1;
|
||||
drop table t1,t2;
|
||||
|
||||
# tests for bug #14056 Column prefix index on UTF-8 primary key column causes 'Can't find record..'
|
||||
|
||||
create table t2 (
|
||||
a int, b char(10), filler char(10), primary key(a, b(2))
|
||||
) character set utf8 engine = innodb;
|
||||
|
||||
insert into t2 values (1,'abcdefg','one');
|
||||
insert into t2 values (2,'ijkilmn','two');
|
||||
insert into t2 values (3, 'qrstuvw','three');
|
||||
update t2 set a=5, filler='booo' where a=1;
|
||||
drop table t2;
|
||||
create table t2 (
|
||||
a int, b char(10), filler char(10), primary key(a, b(2))
|
||||
) character set ucs2 engine = innodb;
|
||||
|
||||
insert into t2 values (1,'abcdefg','one');
|
||||
insert into t2 values (2,'ijkilmn','two');
|
||||
insert into t2 values (3, 'qrstuvw','three');
|
||||
update t2 set a=5, filler='booo' where a=1;
|
||||
drop table t2;
|
||||
|
||||
create table t1(a int not null, b char(110),primary key(a,b(100))) engine=innodb default charset=utf8;
|
||||
insert into t1 values(1,'abcdefg'),(2,'defghijk');
|
||||
insert into t1 values(6,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1);
|
||||
insert into t1 values(7,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2);
|
||||
select a,hex(b) from t1 order by b;
|
||||
update t1 set b = 'three' where a = 6;
|
||||
drop table t1;
|
||||
create table t1(a int not null, b text(110),primary key(a,b(100))) engine=innodb default charset=utf8;
|
||||
insert into t1 values(1,'abcdefg'),(2,'defghijk');
|
||||
insert into t1 values(6,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1);
|
||||
insert into t1 values(7,_utf8 0xD0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B1D0B2);
|
||||
select a,hex(b) from t1 order by b;
|
||||
update t1 set b = 'three' where a = 6;
|
||||
drop table t1;
|
||||
|
||||
# Ensure that <tablename>_ibfk_0 is not mistreated as a
|
||||
# generated foreign key identifier. (Bug #16387)
|
||||
|
||||
CREATE TABLE t1(a INT, PRIMARY KEY(a)) ENGINE=InnoDB;
|
||||
CREATE TABLE t2(a INT) ENGINE=InnoDB;
|
||||
ALTER TABLE t2 ADD FOREIGN KEY (a) REFERENCES t1(a);
|
||||
ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_1;
|
||||
ALTER TABLE t2 ADD CONSTRAINT t2_ibfk_0 FOREIGN KEY (a) REFERENCES t1(a);
|
||||
ALTER TABLE t2 DROP FOREIGN KEY t2_ibfk_0;
|
||||
SHOW CREATE TABLE t2;
|
||||
DROP TABLE t2,t1;
|
||||
|
||||
#
|
||||
# Test case for bug #16229: MySQL/InnoDB uses full explicit table locks in trigger processing
|
||||
#
|
||||
|
||||
connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
insert into t1(a) values (1),(2),(3);
|
||||
commit;
|
||||
connection b;
|
||||
set autocommit = 0;
|
||||
update t1 set b = 5 where a = 2;
|
||||
connection a;
|
||||
delimiter |;
|
||||
create trigger t1t before insert on t1 for each row begin set NEW.b = NEW.a * 10 + 5, NEW.c = NEW.a / 10; end |
|
||||
delimiter ;|
|
||||
set autocommit = 0;
|
||||
connection a;
|
||||
insert into t1(a) values (10),(20),(30),(40),(50),(60),(70),(80),(90),(100),
|
||||
(11),(21),(31),(41),(51),(61),(71),(81),(91),(101),
|
||||
(12),(22),(32),(42),(52),(62),(72),(82),(92),(102),
|
||||
(13),(23),(33),(43),(53),(63),(73),(83),(93),(103),
|
||||
(14),(24),(34),(44),(54),(64),(74),(84),(94),(104);
|
||||
connection b;
|
||||
commit;
|
||||
connection a;
|
||||
commit;
|
||||
drop trigger t1t;
|
||||
drop table t1;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
#
|
||||
# Another trigger test
|
||||
#
|
||||
connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
create table t2(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
create table t3(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
create table t4(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
create table t5(a int not null, b int, c int, d int, primary key(a)) engine=innodb;
|
||||
insert into t1(a) values (1),(2),(3);
|
||||
insert into t2(a) values (1),(2),(3);
|
||||
insert into t3(a) values (1),(2),(3);
|
||||
insert into t4(a) values (1),(2),(3);
|
||||
insert into t3(a) values (5),(7),(8);
|
||||
insert into t4(a) values (5),(7),(8);
|
||||
insert into t5(a) values (1),(2),(3),(4),(5),(6),(7),(8),(9),(10),(11),(12);
|
||||
|
||||
delimiter |;
|
||||
create trigger t1t before insert on t1 for each row begin
|
||||
INSERT INTO t2 SET a = NEW.a;
|
||||
end |
|
||||
|
||||
create trigger t2t before insert on t2 for each row begin
|
||||
DELETE FROM t3 WHERE a = NEW.a;
|
||||
end |
|
||||
|
||||
create trigger t3t before delete on t3 for each row begin
|
||||
UPDATE t4 SET b = b + 1 WHERE a = OLD.a;
|
||||
end |
|
||||
|
||||
create trigger t4t before update on t4 for each row begin
|
||||
UPDATE t5 SET b = b + 1 where a = NEW.a;
|
||||
end |
|
||||
delimiter ;|
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
update t1 set b = b + 5 where a = 1;
|
||||
update t2 set b = b + 5 where a = 1;
|
||||
update t3 set b = b + 5 where a = 1;
|
||||
update t4 set b = b + 5 where a = 1;
|
||||
insert into t5(a) values(20);
|
||||
connection b;
|
||||
set autocommit = 0;
|
||||
insert into t1(a) values(7);
|
||||
insert into t2(a) values(8);
|
||||
delete from t2 where a = 3;
|
||||
update t4 set b = b + 1 where a = 3;
|
||||
commit;
|
||||
drop trigger t1t;
|
||||
drop trigger t2t;
|
||||
drop trigger t3t;
|
||||
drop trigger t4t;
|
||||
drop table t1, t2, t3, t4, t5;
|
||||
connection default;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
|
||||
#
|
||||
# Test that cascading updates leading to duplicate keys give the correct
|
||||
|
1
mysql-test/t/innodb_unsafe_binlog-master.opt
Normal file
1
mysql-test/t/innodb_unsafe_binlog-master.opt
Normal file
@ -0,0 +1 @@
|
||||
--innodb_locks_unsafe_for_binlog=true
|
55
mysql-test/t/innodb_unsafe_binlog.test
Normal file
55
mysql-test/t/innodb_unsafe_binlog.test
Normal file
@ -0,0 +1,55 @@
|
||||
-- source include/have_innodb.inc
|
||||
#
|
||||
# Note that these tests uses a innodb_locks_unsafe_for_binlog option.
|
||||
#
|
||||
#
|
||||
# Test cases for a bug #15650
|
||||
#
|
||||
|
||||
--disable_warnings
|
||||
drop table if exists t1,t2;
|
||||
--enable_warnings
|
||||
create table t1 (id int not null, f_id int not null, f int not null,
|
||||
primary key(f_id, id)) engine=innodb;
|
||||
create table t2 (id int not null,s_id int not null,s varchar(200),
|
||||
primary key(id)) engine=innodb;
|
||||
INSERT INTO t1 VALUES (8, 1, 3);
|
||||
INSERT INTO t1 VALUES (1, 2, 1);
|
||||
INSERT INTO t2 VALUES (1, 0, '');
|
||||
INSERT INTO t2 VALUES (8, 1, '');
|
||||
commit;
|
||||
DELETE ml.* FROM t1 AS ml LEFT JOIN t2 AS mm ON (mm.id=ml.id)
|
||||
WHERE mm.id IS NULL;
|
||||
select ml.* from t1 as ml left join t2 as mm on (mm.id=ml.id)
|
||||
where mm.id is null lock in share mode;
|
||||
drop table t1,t2;
|
||||
|
||||
#
|
||||
# Test case for unlock row bug where unlock releases all locks granted for
|
||||
# a row. Only the latest lock should be released.
|
||||
#
|
||||
|
||||
connect (a,localhost,root,,);
|
||||
connect (b,localhost,root,,);
|
||||
connection a;
|
||||
create table t1(a int not null, b int, primary key(a)) engine=innodb;
|
||||
insert into t1 values(1,1),(2,2),(3,1),(4,2),(5,1),(6,2);
|
||||
commit;
|
||||
set autocommit = 0;
|
||||
select * from t1 lock in share mode;
|
||||
update t1 set b = 5 where b = 1;
|
||||
connection b;
|
||||
set autocommit = 0;
|
||||
#
|
||||
# S-lock to records (2,2),(4,2), and (6,2) should not be released in a update
|
||||
#
|
||||
--error 1205
|
||||
select * from t1 where a = 2 and b = 2 for update;
|
||||
connection a;
|
||||
commit;
|
||||
connection b;
|
||||
commit;
|
||||
drop table t1;
|
||||
disconnect a;
|
||||
disconnect b;
|
||||
|
3156
sql/ha_innodb.cc
3156
sql/ha_innodb.cc
File diff suppressed because it is too large
Load Diff
218
sql/ha_innodb.h
218
sql/ha_innodb.h
@ -7,12 +7,12 @@
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */
|
||||
|
||||
/*
|
||||
This file is based on ha_berkeley.h of MySQL distribution
|
||||
@ -34,8 +34,8 @@ typedef struct st_innobase_share {
|
||||
|
||||
|
||||
my_bool innobase_query_caching_of_table_permitted(THD* thd, char* full_name,
|
||||
uint full_name_len,
|
||||
ulonglong *unused);
|
||||
uint full_name_len,
|
||||
ulonglong *unused);
|
||||
|
||||
/* The class defining a handle to an Innodb table */
|
||||
class ha_innobase: public handler
|
||||
@ -47,22 +47,20 @@ class ha_innobase: public handler
|
||||
THD* user_thd; /* the thread handle of the user
|
||||
currently using the handle; this is
|
||||
set in external_lock function */
|
||||
query_id_t last_query_id; /* the latest query id where the
|
||||
query_id_t last_query_id; /* the latest query id where the
|
||||
handle was used */
|
||||
THR_LOCK_DATA lock;
|
||||
INNOBASE_SHARE *share;
|
||||
THR_LOCK_DATA lock;
|
||||
INNOBASE_SHARE *share;
|
||||
|
||||
gptr alloc_ptr;
|
||||
byte* upd_buff; /* buffer used in updates */
|
||||
byte* key_val_buff; /* buffer used in converting
|
||||
search key values from MySQL format
|
||||
to Innodb format */
|
||||
byte* upd_buff; /* buffer used in updates */
|
||||
byte* key_val_buff; /* buffer used in converting
|
||||
search key values from MySQL format
|
||||
to Innodb format */
|
||||
ulong upd_and_key_val_buff_len;
|
||||
/* the length of each of the previous
|
||||
two buffers */
|
||||
ulong int_table_flags;
|
||||
uint primary_key;
|
||||
uint last_dup_key;
|
||||
ulong int_table_flags;
|
||||
uint primary_key;
|
||||
ulong start_of_scan; /* this is set to 1 when we are
|
||||
starting a table scan but have not
|
||||
yet fetched any row, else 0 */
|
||||
@ -70,10 +68,9 @@ class ha_innobase: public handler
|
||||
ROW_SEL_EXACT, ROW_SEL_EXACT_PREFIX,
|
||||
or undefined */
|
||||
uint num_write_row; /* number of write_row() calls */
|
||||
ulong max_supported_row_length(const byte *buf);
|
||||
|
||||
uint store_key_val_for_row(uint keynr, char* buff, uint buff_len,
|
||||
const byte* record);
|
||||
const byte* record);
|
||||
int update_thd(THD* thd);
|
||||
int change_active_index(uint keynr);
|
||||
int general_fetch(byte* buf, uint direction, uint match_mode);
|
||||
@ -81,27 +78,27 @@ class ha_innobase: public handler
|
||||
|
||||
/* Init values for the class: */
|
||||
public:
|
||||
ha_innobase(TABLE_SHARE *table_arg);
|
||||
~ha_innobase() {}
|
||||
ha_innobase(TABLE_SHARE *table_arg);
|
||||
~ha_innobase() {}
|
||||
/*
|
||||
Get the row type from the storage engine. If this method returns
|
||||
ROW_TYPE_NOT_USED, the information in HA_CREATE_INFO should be used.
|
||||
*/
|
||||
enum row_type get_row_type() const;
|
||||
|
||||
const char* table_type() const { return("InnoDB");}
|
||||
const char* table_type() const { return("InnoDB");}
|
||||
const char *index_type(uint key_number) { return "BTREE"; }
|
||||
const char** bas_ext() const;
|
||||
ulong table_flags() const { return int_table_flags; }
|
||||
const char** bas_ext() const;
|
||||
ulong table_flags() const { return int_table_flags; }
|
||||
ulong index_flags(uint idx, uint part, bool all_parts) const
|
||||
{
|
||||
return (HA_READ_NEXT |
|
||||
HA_READ_PREV |
|
||||
HA_READ_ORDER |
|
||||
HA_READ_RANGE |
|
||||
HA_KEYREAD_ONLY);
|
||||
HA_KEYREAD_ONLY);
|
||||
}
|
||||
uint max_supported_keys() const { return MAX_KEY; }
|
||||
uint max_supported_keys() const { return MAX_KEY; }
|
||||
/* An InnoDB page must store >= 2 keys;
|
||||
a secondary key record must also contain the
|
||||
primary key value:
|
||||
@ -109,116 +106,113 @@ class ha_innobase: public handler
|
||||
less than 1 / 4 of page size which is 16 kB;
|
||||
but currently MySQL does not work with keys
|
||||
whose size is > MAX_KEY_LENGTH */
|
||||
uint max_supported_key_length() const { return 3500; }
|
||||
uint max_supported_key_part_length() const;
|
||||
uint max_supported_key_length() const { return 3500; }
|
||||
uint max_supported_key_part_length() const;
|
||||
const key_map *keys_to_use_for_scanning() { return &key_map_full; }
|
||||
bool has_transactions() { return 1;}
|
||||
bool has_transactions() { return 1;}
|
||||
|
||||
int open(const char *name, int mode, uint test_if_locked);
|
||||
int close(void);
|
||||
double scan_time();
|
||||
int open(const char *name, int mode, uint test_if_locked);
|
||||
int close(void);
|
||||
double scan_time();
|
||||
double read_time(uint index, uint ranges, ha_rows rows);
|
||||
|
||||
int write_row(byte * buf);
|
||||
int update_row(const byte * old_data, byte * new_data);
|
||||
int delete_row(const byte * buf);
|
||||
int write_row(byte * buf);
|
||||
int update_row(const byte * old_data, byte * new_data);
|
||||
int delete_row(const byte * buf);
|
||||
bool was_semi_consistent_read();
|
||||
void try_semi_consistent_read(bool yes);
|
||||
void unlock_row();
|
||||
|
||||
int index_init(uint index, bool sorted);
|
||||
int index_end();
|
||||
int index_read(byte * buf, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_idx(byte * buf, uint index, const byte * key,
|
||||
int index_init(uint index, bool sorted);
|
||||
int index_end();
|
||||
int index_read(byte * buf, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_idx(byte * buf, uint index, const byte * key,
|
||||
uint key_len, enum ha_rkey_function find_flag);
|
||||
int index_read_last(byte * buf, const byte * key, uint key_len);
|
||||
int index_next(byte * buf);
|
||||
int index_next_same(byte * buf, const byte *key, uint keylen);
|
||||
int index_prev(byte * buf);
|
||||
int index_first(byte * buf);
|
||||
int index_last(byte * buf);
|
||||
int index_next(byte * buf);
|
||||
int index_next_same(byte * buf, const byte *key, uint keylen);
|
||||
int index_prev(byte * buf);
|
||||
int index_first(byte * buf);
|
||||
int index_last(byte * buf);
|
||||
|
||||
int rnd_init(bool scan);
|
||||
int rnd_end();
|
||||
int rnd_next(byte *buf);
|
||||
int rnd_pos(byte * buf, byte *pos);
|
||||
int rnd_init(bool scan);
|
||||
int rnd_end();
|
||||
int rnd_next(byte *buf);
|
||||
int rnd_pos(byte * buf, byte *pos);
|
||||
|
||||
void position(const byte *record);
|
||||
void info(uint);
|
||||
int analyze(THD* thd,HA_CHECK_OPT* check_opt);
|
||||
int optimize(THD* thd,HA_CHECK_OPT* check_opt);
|
||||
void position(const byte *record);
|
||||
void info(uint);
|
||||
int analyze(THD* thd,HA_CHECK_OPT* check_opt);
|
||||
int optimize(THD* thd,HA_CHECK_OPT* check_opt);
|
||||
int discard_or_import_tablespace(my_bool discard);
|
||||
int extra(enum ha_extra_function operation);
|
||||
int external_lock(THD *thd, int lock_type);
|
||||
int extra(enum ha_extra_function operation);
|
||||
int external_lock(THD *thd, int lock_type);
|
||||
int transactional_table_lock(THD *thd, int lock_type);
|
||||
int start_stmt(THD *thd, thr_lock_type lock_type);
|
||||
int start_stmt(THD *thd, thr_lock_type lock_type);
|
||||
|
||||
int ha_retrieve_all_cols()
|
||||
{
|
||||
ha_set_all_bits_in_read_set();
|
||||
return extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
}
|
||||
int ha_retrieve_all_pk()
|
||||
{
|
||||
ha_set_primary_key_in_read_set();
|
||||
return extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
|
||||
}
|
||||
void position(byte *record);
|
||||
ha_rows records_in_range(uint inx, key_range *min_key, key_range
|
||||
int ha_retrieve_all_cols()
|
||||
{
|
||||
ha_set_all_bits_in_read_set();
|
||||
return extra(HA_EXTRA_RETRIEVE_ALL_COLS);
|
||||
}
|
||||
int ha_retrieve_all_pk()
|
||||
{
|
||||
ha_set_primary_key_in_read_set();
|
||||
return extra(HA_EXTRA_RETRIEVE_PRIMARY_KEY);
|
||||
}
|
||||
void position(byte *record);
|
||||
ha_rows records_in_range(uint inx, key_range *min_key, key_range
|
||||
*max_key);
|
||||
ha_rows estimate_rows_upper_bound();
|
||||
|
||||
int create(const char *name, register TABLE *form,
|
||||
int create(const char *name, register TABLE *form,
|
||||
HA_CREATE_INFO *create_info);
|
||||
int delete_all_rows();
|
||||
int delete_table(const char *name);
|
||||
int delete_table(const char *name);
|
||||
int rename_table(const char* from, const char* to);
|
||||
int check(THD* thd, HA_CHECK_OPT* check_opt);
|
||||
char* update_table_comment(const char* comment);
|
||||
char* update_table_comment(const char* comment);
|
||||
char* get_foreign_key_create_info();
|
||||
int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list);
|
||||
int get_foreign_key_list(THD *thd, List<FOREIGN_KEY_INFO> *f_key_list);
|
||||
bool can_switch_engines();
|
||||
uint referenced_by_foreign_key();
|
||||
uint referenced_by_foreign_key();
|
||||
void free_foreign_key_create_info(char* str);
|
||||
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
|
||||
enum thr_lock_type lock_type);
|
||||
THR_LOCK_DATA **store_lock(THD *thd, THR_LOCK_DATA **to,
|
||||
enum thr_lock_type lock_type);
|
||||
void init_table_handle_for_HANDLER();
|
||||
ulonglong get_auto_increment();
|
||||
int reset_auto_increment(ulonglong value);
|
||||
|
||||
virtual bool get_error_message(int error, String *buf);
|
||||
|
||||
uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; }
|
||||
/*
|
||||
ask handler about permission to cache table during query registration
|
||||
*/
|
||||
my_bool register_query_cache_table(THD *thd, char *table_key,
|
||||
uint8 table_cache_type() { return HA_CACHE_TBL_ASKTRANSACT; }
|
||||
/*
|
||||
ask handler about permission to cache table during query registration
|
||||
*/
|
||||
my_bool register_query_cache_table(THD *thd, char *table_key,
|
||||
uint key_length,
|
||||
qc_engine_callback *call_back,
|
||||
ulonglong *engine_data)
|
||||
{
|
||||
*call_back= innobase_query_caching_of_table_permitted;
|
||||
*engine_data= 0;
|
||||
return innobase_query_caching_of_table_permitted(thd, table_key,
|
||||
key_length,
|
||||
engine_data);
|
||||
}
|
||||
static char *get_mysql_bin_log_name();
|
||||
static ulonglong get_mysql_bin_log_pos();
|
||||
bool primary_key_is_clustered() { return true; }
|
||||
int cmp_ref(const byte *ref1, const byte *ref2);
|
||||
{
|
||||
*call_back= innobase_query_caching_of_table_permitted;
|
||||
*engine_data= 0;
|
||||
return innobase_query_caching_of_table_permitted(thd, table_key,
|
||||
key_length,
|
||||
engine_data);
|
||||
}
|
||||
static char *get_mysql_bin_log_name();
|
||||
static ulonglong get_mysql_bin_log_pos();
|
||||
bool primary_key_is_clustered() { return true; }
|
||||
int cmp_ref(const byte *ref1, const byte *ref2);
|
||||
bool check_if_incompatible_data(HA_CREATE_INFO *info,
|
||||
uint table_changes);
|
||||
};
|
||||
|
||||
extern SHOW_VAR innodb_status_variables[];
|
||||
extern uint innobase_init_flags, innobase_lock_type;
|
||||
extern uint innobase_flush_log_at_trx_commit;
|
||||
extern ulong innobase_cache_size, innobase_fast_shutdown;
|
||||
extern ulong innobase_fast_shutdown;
|
||||
extern ulong innobase_large_page_size;
|
||||
extern char *innobase_home, *innobase_tmpdir, *innobase_logdir;
|
||||
extern long innobase_lock_scan_time;
|
||||
extern long innobase_mirrored_log_groups, innobase_log_files_in_group;
|
||||
extern longlong innobase_buffer_pool_size, innobase_log_file_size;
|
||||
extern long innobase_log_buffer_size;
|
||||
@ -232,17 +226,18 @@ extern char *innobase_log_group_home_dir, *innobase_log_arch_dir;
|
||||
extern char *innobase_unix_file_flush_method;
|
||||
/* The following variables have to be my_bool for SHOW VARIABLES to work */
|
||||
extern my_bool innobase_log_archive,
|
||||
innobase_use_doublewrite,
|
||||
innobase_use_checksums,
|
||||
innobase_use_large_pages,
|
||||
innobase_use_native_aio,
|
||||
innobase_file_per_table, innobase_locks_unsafe_for_binlog,
|
||||
innobase_create_status_file;
|
||||
extern my_bool innobase_very_fast_shutdown; /* set this to 1 just before
|
||||
calling innobase_end() if you want
|
||||
InnoDB to shut down without
|
||||
flushing the buffer pool: this
|
||||
is equivalent to a 'crash' */
|
||||
innobase_use_doublewrite,
|
||||
innobase_use_checksums,
|
||||
innobase_use_large_pages,
|
||||
innobase_use_native_aio,
|
||||
innobase_file_per_table, innobase_locks_unsafe_for_binlog,
|
||||
innobase_create_status_file;
|
||||
extern my_bool innobase_very_fast_shutdown; /* set this to 1 just before
|
||||
calling innobase_end() if
|
||||
you want InnoDB to shut down
|
||||
without flushing the buffer
|
||||
pool: this is equivalent to
|
||||
a 'crash' */
|
||||
extern "C" {
|
||||
extern ulong srv_max_buf_pool_modified_pct;
|
||||
extern ulong srv_max_purge_lag;
|
||||
@ -254,8 +249,6 @@ extern ulong srv_thread_concurrency;
|
||||
extern ulong srv_commit_concurrency;
|
||||
}
|
||||
|
||||
extern TYPELIB innobase_lock_typelib;
|
||||
|
||||
bool innobase_init(void);
|
||||
int innobase_end(ha_panic_function type);
|
||||
bool innobase_flush_logs(void);
|
||||
@ -267,10 +260,10 @@ uint innobase_get_free_space(void);
|
||||
*/
|
||||
#if 0
|
||||
int innobase_report_binlog_offset_and_commit(
|
||||
THD* thd,
|
||||
THD* thd,
|
||||
void* trx_handle,
|
||||
char* log_file_name,
|
||||
my_off_t end_offset);
|
||||
char* log_file_name,
|
||||
my_off_t end_offset);
|
||||
int innobase_commit_complete(void* trx_handle);
|
||||
void innobase_store_binlog_offset_and_flush_log(char *binlog_name,longlong offset);
|
||||
#endif
|
||||
@ -302,7 +295,7 @@ int innobase_xa_recover(
|
||||
/*====================*/
|
||||
/* out: number of prepared transactions
|
||||
stored in xid_list */
|
||||
XID* xid_list, /* in/out: prepared transactions */
|
||||
XID* xid_list, /* in/out: prepared transactions */
|
||||
uint len); /* in: number of slots in xid_list */
|
||||
|
||||
/***********************************************************************
|
||||
@ -323,11 +316,8 @@ int innobase_rollback_by_xid(
|
||||
XID *xid); /* in : X/Open XA Transaction Identification */
|
||||
|
||||
|
||||
int innobase_xa_end(THD *thd);
|
||||
|
||||
|
||||
int innobase_repl_report_sent_binlog(THD *thd, char *log_file_name,
|
||||
my_off_t end_offset);
|
||||
my_off_t end_offset);
|
||||
|
||||
/***********************************************************************
|
||||
Create a consistent view for a cursor based on current transaction
|
||||
|
@ -144,7 +144,7 @@ btr_root_get(
|
||||
|
||||
root = btr_page_get(space, root_page_no, RW_X_LATCH, mtr);
|
||||
ut_a((ibool)!!page_is_comp(root) ==
|
||||
UT_LIST_GET_FIRST(tree->tree_indexes)->table->comp);
|
||||
dict_table_is_comp(UT_LIST_GET_FIRST(tree->tree_indexes)->table));
|
||||
|
||||
return(root);
|
||||
}
|
||||
@ -186,9 +186,9 @@ btr_get_prev_user_rec(
|
||||
mtr);
|
||||
/* The caller must already have a latch to the brother */
|
||||
ut_ad((mtr_memo_contains(mtr, buf_block_align(prev_page),
|
||||
MTR_MEMO_PAGE_S_FIX))
|
||||
|| (mtr_memo_contains(mtr, buf_block_align(prev_page),
|
||||
MTR_MEMO_PAGE_X_FIX)));
|
||||
MTR_MEMO_PAGE_S_FIX))
|
||||
|| (mtr_memo_contains(mtr, buf_block_align(prev_page),
|
||||
MTR_MEMO_PAGE_X_FIX)));
|
||||
ut_a(page_is_comp(prev_page) == page_is_comp(page));
|
||||
|
||||
return(page_rec_get_prev(page_get_supremum_rec(prev_page)));
|
||||
@ -234,9 +234,9 @@ btr_get_next_user_rec(
|
||||
mtr);
|
||||
/* The caller must already have a latch to the brother */
|
||||
ut_ad((mtr_memo_contains(mtr, buf_block_align(next_page),
|
||||
MTR_MEMO_PAGE_S_FIX))
|
||||
|| (mtr_memo_contains(mtr, buf_block_align(next_page),
|
||||
MTR_MEMO_PAGE_X_FIX)));
|
||||
MTR_MEMO_PAGE_S_FIX))
|
||||
|| (mtr_memo_contains(mtr, buf_block_align(next_page),
|
||||
MTR_MEMO_PAGE_X_FIX)));
|
||||
|
||||
ut_a(page_is_comp(next_page) == page_is_comp(page));
|
||||
return(page_rec_get_next(page_get_infimum_rec(next_page)));
|
||||
@ -257,9 +257,9 @@ btr_page_create(
|
||||
mtr_t* mtr) /* in: mtr */
|
||||
{
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
page_create(page, mtr,
|
||||
UT_LIST_GET_FIRST(tree->tree_indexes)->table->comp);
|
||||
dict_table_is_comp(UT_LIST_GET_FIRST(tree->tree_indexes)->table));
|
||||
buf_block_align(page)->check_index_page_at_flush = TRUE;
|
||||
|
||||
btr_page_set_index_id(page, tree->id, mtr);
|
||||
@ -293,7 +293,7 @@ btr_page_alloc_for_ibuf(
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
flst_remove(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
|
||||
new_page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE,
|
||||
new_page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE,
|
||||
mtr);
|
||||
ut_ad(flst_validate(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST, mtr));
|
||||
|
||||
@ -413,11 +413,11 @@ btr_page_free_for_ibuf(
|
||||
page_t* root;
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
root = btr_root_get(tree, mtr);
|
||||
|
||||
flst_add_first(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
|
||||
page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, mtr);
|
||||
page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, mtr);
|
||||
|
||||
ut_ad(flst_validate(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
|
||||
mtr));
|
||||
@ -442,7 +442,7 @@ btr_page_free_low(
|
||||
ulint page_no;
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
/* The page gets invalid for optimistic searches: increment the frame
|
||||
modify clock */
|
||||
|
||||
@ -483,7 +483,7 @@ btr_page_free(
|
||||
ulint level;
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
level = btr_page_get_level(page, mtr);
|
||||
|
||||
btr_page_free_low(tree, page, level, mtr);
|
||||
@ -589,7 +589,7 @@ btr_page_get_father_for_rec(
|
||||
ULINT_UNDEFINED, &heap);
|
||||
|
||||
if (btr_node_ptr_get_child_page_no(node_ptr, offsets) !=
|
||||
buf_frame_get_page_no(page)) {
|
||||
buf_frame_get_page_no(page)) {
|
||||
rec_t* print_rec;
|
||||
fputs("InnoDB: Dump of the child page:\n", stderr);
|
||||
buf_page_print(buf_frame_align(page));
|
||||
@ -677,13 +677,13 @@ btr_create(
|
||||
buf_page_dbg_add_level(ibuf_hdr_frame, SYNC_TREE_NODE_NEW);
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
ut_ad(buf_frame_get_page_no(ibuf_hdr_frame)
|
||||
== IBUF_HEADER_PAGE_NO);
|
||||
== IBUF_HEADER_PAGE_NO);
|
||||
/* Allocate then the next page to the segment: it will be the
|
||||
tree root page */
|
||||
tree root page */
|
||||
|
||||
page_no = fseg_alloc_free_page(
|
||||
page_no = fseg_alloc_free_page(
|
||||
ibuf_hdr_frame + IBUF_HEADER
|
||||
+ IBUF_TREE_SEG_HEADER, IBUF_TREE_ROOT_PAGE_NO,
|
||||
+ IBUF_TREE_SEG_HEADER, IBUF_TREE_ROOT_PAGE_NO,
|
||||
FSP_UP, mtr);
|
||||
ut_ad(page_no == IBUF_TREE_ROOT_PAGE_NO);
|
||||
|
||||
@ -845,8 +845,8 @@ btr_page_reorganize_low(
|
||||
ulint max_ins_size2;
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
ut_ad(!!page_is_comp(page) == index->table->comp);
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
|
||||
data_size1 = page_get_data_size(page);
|
||||
max_ins_size1 = page_get_max_insert_size_after_reorganize(page, 1);
|
||||
|
||||
@ -892,7 +892,7 @@ btr_page_reorganize_low(
|
||||
if (data_size1 != data_size2 || max_ins_size1 != max_ins_size2) {
|
||||
buf_page_print(page);
|
||||
buf_page_print(new_page);
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: page old data size %lu new data size %lu\n"
|
||||
"InnoDB: Error: page old max ins size %lu new max ins size %lu\n"
|
||||
"InnoDB: Submit a detailed bug report to http://bugs.mysql.com\n",
|
||||
@ -955,7 +955,7 @@ btr_page_empty(
|
||||
mtr_t* mtr) /* in: mtr */
|
||||
{
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
btr_search_drop_page_hash_index(page);
|
||||
|
||||
/* Recreate the page: note that global data on page (possible
|
||||
@ -1001,7 +1001,7 @@ btr_root_raise_and_insert(
|
||||
ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree),
|
||||
MTR_MEMO_X_LOCK));
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(root),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
btr_search_drop_page_hash_index(root);
|
||||
|
||||
/* Allocate a new page to the tree. Root splitting is done by first
|
||||
@ -1044,7 +1044,7 @@ btr_root_raise_and_insert(
|
||||
child */
|
||||
|
||||
node_ptr = dict_tree_build_node_ptr(tree, rec, new_page_no, heap,
|
||||
level);
|
||||
level);
|
||||
/* Reorganize the root to get free space */
|
||||
btr_page_reorganize(root, cursor->index, mtr);
|
||||
|
||||
@ -1105,9 +1105,9 @@ btr_page_get_split_rec_to_left(
|
||||
insert_point = btr_cur_get_rec(cursor);
|
||||
|
||||
if (page_header_get_ptr(page, PAGE_LAST_INSERT)
|
||||
== page_rec_get_next(insert_point)) {
|
||||
== page_rec_get_next(insert_point)) {
|
||||
|
||||
infimum = page_get_infimum_rec(page);
|
||||
infimum = page_get_infimum_rec(page);
|
||||
|
||||
/* If the convergence is in the middle of a page, include also
|
||||
the record immediately before the new insert to the upper
|
||||
@ -1115,12 +1115,12 @@ btr_page_get_split_rec_to_left(
|
||||
lots of records smaller than the convergence point. */
|
||||
|
||||
if (infimum != insert_point
|
||||
&& page_rec_get_next(infimum) != insert_point) {
|
||||
&& page_rec_get_next(infimum) != insert_point) {
|
||||
|
||||
*split_rec = insert_point;
|
||||
} else {
|
||||
*split_rec = page_rec_get_next(insert_point);
|
||||
}
|
||||
*split_rec = page_rec_get_next(insert_point);
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
@ -1162,7 +1162,7 @@ btr_page_get_split_rec_to_right(
|
||||
if (page_rec_is_supremum(next_rec)) {
|
||||
split_at_new:
|
||||
/* Split at the new record to insert */
|
||||
*split_rec = NULL;
|
||||
*split_rec = NULL;
|
||||
} else {
|
||||
rec_t* next_next_rec = page_rec_get_next(next_rec);
|
||||
if (page_rec_is_supremum(next_next_rec)) {
|
||||
@ -1265,13 +1265,13 @@ btr_page_get_sure_split_rec(
|
||||
n++;
|
||||
|
||||
if (incl_data + page_dir_calc_reserved_space(n)
|
||||
>= total_space / 2) {
|
||||
>= total_space / 2) {
|
||||
|
||||
if (incl_data + page_dir_calc_reserved_space(n)
|
||||
<= free_space) {
|
||||
/* The next record will be the first on
|
||||
the right half page if it is not the
|
||||
supremum record of page */
|
||||
if (incl_data + page_dir_calc_reserved_space(n)
|
||||
<= free_space) {
|
||||
/* The next record will be the first on
|
||||
the right half page if it is not the
|
||||
supremum record of page */
|
||||
|
||||
if (rec == ins_rec) {
|
||||
rec = NULL;
|
||||
@ -1286,7 +1286,7 @@ btr_page_get_sure_split_rec(
|
||||
if (!page_rec_is_supremum(next_rec)) {
|
||||
rec = next_rec;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func_exit:
|
||||
if (UNIV_LIKELY_NULL(heap)) {
|
||||
@ -1351,14 +1351,14 @@ btr_page_insert_fits(
|
||||
} else if (cmp_dtuple_rec(tuple, split_rec, offsets) >= 0) {
|
||||
|
||||
rec = page_rec_get_next(page_get_infimum_rec(page));
|
||||
end_rec = split_rec;
|
||||
end_rec = split_rec;
|
||||
} else {
|
||||
rec = split_rec;
|
||||
end_rec = page_get_supremum_rec(page);
|
||||
}
|
||||
|
||||
if (total_data + page_dir_calc_reserved_space(total_n_recs)
|
||||
<= free_space) {
|
||||
<= free_space) {
|
||||
|
||||
/* Ok, there will be enough available space on the
|
||||
half page where the tuple is inserted */
|
||||
@ -1379,7 +1379,7 @@ btr_page_insert_fits(
|
||||
total_n_recs--;
|
||||
|
||||
if (total_data + page_dir_calc_reserved_space(total_n_recs)
|
||||
<= free_space) {
|
||||
<= free_space) {
|
||||
|
||||
/* Ok, there will be enough available space on the
|
||||
half page where the tuple is inserted */
|
||||
@ -1416,15 +1416,12 @@ btr_insert_on_non_leaf_level(
|
||||
first parameter for btr_cur_search_to_nth_level. */
|
||||
|
||||
btr_cur_search_to_nth_level(UT_LIST_GET_FIRST(tree->tree_indexes),
|
||||
level, tuple, PAGE_CUR_LE,
|
||||
BTR_CONT_MODIFY_TREE,
|
||||
&cursor, 0, mtr);
|
||||
level, tuple, PAGE_CUR_LE, BTR_CONT_MODIFY_TREE,
|
||||
&cursor, 0, mtr);
|
||||
|
||||
err = btr_cur_pessimistic_insert(BTR_NO_LOCKING_FLAG
|
||||
| BTR_KEEP_SYS_FLAG
|
||||
| BTR_NO_UNDO_LOG_FLAG,
|
||||
&cursor, tuple,
|
||||
&rec, &dummy_big_rec, NULL, mtr);
|
||||
| BTR_KEEP_SYS_FLAG | BTR_NO_UNDO_LOG_FLAG,
|
||||
&cursor, tuple, &rec, &dummy_big_rec, NULL, mtr);
|
||||
ut_a(err == DB_SUCCESS);
|
||||
}
|
||||
|
||||
@ -1455,12 +1452,12 @@ btr_attach_half_pages(
|
||||
ulint lower_page_no;
|
||||
ulint upper_page_no;
|
||||
dtuple_t* node_ptr_upper;
|
||||
mem_heap_t* heap;
|
||||
mem_heap_t* heap;
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(new_page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
ut_a(page_is_comp(page) == page_is_comp(new_page));
|
||||
|
||||
/* Create a memory heap where the data tuple is stored */
|
||||
@ -1500,7 +1497,7 @@ btr_attach_half_pages(
|
||||
half */
|
||||
|
||||
node_ptr_upper = dict_tree_build_node_ptr(tree, split_rec,
|
||||
upper_page_no, heap, level);
|
||||
upper_page_no, heap, level);
|
||||
|
||||
/* Insert it next to the pointer to the lower half. Note that this
|
||||
may generate recursion leading to a split on the higher level. */
|
||||
@ -1600,7 +1597,7 @@ func_start:
|
||||
page = btr_cur_get_page(cursor);
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
ut_ad(page_get_n_recs(page) >= 2);
|
||||
|
||||
page_no = buf_frame_get_page_no(page);
|
||||
@ -1796,7 +1793,7 @@ btr_level_list_remove(
|
||||
|
||||
ut_ad(tree && page && mtr);
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
/* Get the previous and next page numbers of page */
|
||||
|
||||
prev_page_no = btr_page_get_prev(page, mtr);
|
||||
@ -1945,7 +1942,7 @@ btr_lift_page_up(
|
||||
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
|
||||
ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
father_page = buf_frame_align(
|
||||
btr_page_get_father_node_ptr(tree, page, mtr));
|
||||
|
||||
@ -1958,7 +1955,7 @@ btr_lift_page_up(
|
||||
btr_page_empty(father_page, mtr);
|
||||
|
||||
/* Move records to the father */
|
||||
page_copy_rec_list_end(father_page, page, page_get_infimum_rec(page),
|
||||
page_copy_rec_list_end(father_page, page, page_get_infimum_rec(page),
|
||||
index, mtr);
|
||||
lock_update_copy_and_discard(father_page, page);
|
||||
|
||||
@ -2014,7 +2011,7 @@ btr_compress(
|
||||
page = btr_cur_get_page(cursor);
|
||||
tree = btr_cur_get_tree(cursor);
|
||||
comp = page_is_comp(page);
|
||||
ut_a((ibool)!!comp == cursor->index->table->comp);
|
||||
ut_a((ibool)!!comp == dict_table_is_comp(cursor->index->table));
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, dict_tree_get_lock(tree),
|
||||
MTR_MEMO_X_LOCK));
|
||||
@ -2266,7 +2263,7 @@ btr_discard_page(
|
||||
lock_update_discard(page_get_supremum_rec(merge_page), page);
|
||||
} else {
|
||||
lock_update_discard(page_rec_get_next(
|
||||
page_get_infimum_rec(merge_page)), page);
|
||||
page_get_infimum_rec(merge_page)), page);
|
||||
}
|
||||
|
||||
/* Free the file page */
|
||||
@ -2341,8 +2338,8 @@ btr_print_recursive(
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
fprintf(stderr, "NODE ON LEVEL %lu page number %lu\n",
|
||||
(ulong) btr_page_get_level(page, mtr),
|
||||
(ulong) buf_frame_get_page_no(page));
|
||||
(ulong) btr_page_get_level(page, mtr),
|
||||
(ulong) buf_frame_get_page_no(page));
|
||||
|
||||
index = UT_LIST_GET_FIRST(tree->tree_indexes);
|
||||
page_print(page, index, width, width);
|
||||
@ -2413,6 +2410,7 @@ btr_print_tree(
|
||||
}
|
||||
#endif /* UNIV_BTR_PRINT */
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
/****************************************************************
|
||||
Checks that the node pointer to a page is appropriate. */
|
||||
|
||||
@ -2458,6 +2456,7 @@ btr_check_node_ptr(
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/****************************************************************
|
||||
Display identification information for a record. */
|
||||
@ -2481,7 +2480,7 @@ the index. */
|
||||
|
||||
ibool
|
||||
btr_index_rec_validate(
|
||||
/*====================*/
|
||||
/*===================*/
|
||||
/* out: TRUE if ok */
|
||||
rec_t* rec, /* in: index record */
|
||||
dict_index_t* index, /* in: index */
|
||||
@ -2501,18 +2500,20 @@ btr_index_rec_validate(
|
||||
page = buf_frame_align(rec);
|
||||
|
||||
if (UNIV_UNLIKELY(index->type & DICT_UNIVERSAL)) {
|
||||
/* The insert buffer index tree can contain records from any
|
||||
other index: we cannot check the number of fields or
|
||||
their length */
|
||||
/* The insert buffer index tree can contain records from any
|
||||
other index: we cannot check the number of fields or
|
||||
their length */
|
||||
|
||||
return(TRUE);
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
if (UNIV_UNLIKELY((ibool)!!page_is_comp(page) != index->table->comp)) {
|
||||
if (UNIV_UNLIKELY((ibool)!!page_is_comp(page)
|
||||
!= dict_table_is_comp(index->table))) {
|
||||
btr_index_rec_validate_report(page, rec, index);
|
||||
fprintf(stderr, "InnoDB: compact flag=%lu, should be %lu\n",
|
||||
(ulong) !!page_is_comp(page),
|
||||
(ulong) index->table->comp);
|
||||
(ulong) dict_table_is_comp(index->table));
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
@ -2546,12 +2547,12 @@ btr_index_rec_validate(
|
||||
their type is CHAR. */
|
||||
|
||||
if ((dict_index_get_nth_field(index, i)->prefix_len == 0
|
||||
&& len != UNIV_SQL_NULL && fixed_size
|
||||
&& len != fixed_size)
|
||||
&& len != UNIV_SQL_NULL && fixed_size
|
||||
&& len != fixed_size)
|
||||
||
|
||||
(dict_index_get_nth_field(index, i)->prefix_len > 0
|
||||
&& len != UNIV_SQL_NULL
|
||||
&& len >
|
||||
&& len != UNIV_SQL_NULL
|
||||
&& len >
|
||||
dict_index_get_nth_field(index, i)->prefix_len)) {
|
||||
|
||||
btr_index_rec_validate_report(page, rec, index);
|
||||
@ -2590,7 +2591,7 @@ btr_index_page_validate(
|
||||
page_t* page, /* in: index page */
|
||||
dict_index_t* index) /* in: index */
|
||||
{
|
||||
page_cur_t cur;
|
||||
page_cur_t cur;
|
||||
ibool ret = TRUE;
|
||||
|
||||
page_cur_set_before_first(page, &cur);
|
||||
@ -2740,7 +2741,7 @@ loop:
|
||||
left_page_no = btr_page_get_prev(page, &mtr);
|
||||
|
||||
ut_a((page_get_n_recs(page) > 0)
|
||||
|| ((level == 0) &&
|
||||
|| ((level == 0) &&
|
||||
(buf_frame_get_page_no(page) == dict_tree_get_page(tree))));
|
||||
|
||||
if (right_page_no != FIL_NULL) {
|
||||
@ -2776,8 +2777,8 @@ loop:
|
||||
rec_print(stderr, rec, index);
|
||||
putc('\n', stderr);
|
||||
|
||||
ret = FALSE;
|
||||
}
|
||||
ret = FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
if (level > 0 && left_page_no == FIL_NULL) {
|
||||
@ -2822,9 +2823,9 @@ loop:
|
||||
&mtr);
|
||||
rec_print(stderr, rec, index);
|
||||
putc('\n', stderr);
|
||||
ret = FALSE;
|
||||
ret = FALSE;
|
||||
|
||||
goto node_ptr_fails;
|
||||
goto node_ptr_fails;
|
||||
}
|
||||
|
||||
if (btr_page_get_level(page, &mtr) > 0) {
|
||||
@ -2836,11 +2837,11 @@ loop:
|
||||
page_rec_get_next(
|
||||
page_get_infimum_rec(page)),
|
||||
0, heap,
|
||||
btr_page_get_level(page, &mtr));
|
||||
btr_page_get_level(page, &mtr));
|
||||
|
||||
if (cmp_dtuple_rec(node_ptr_tuple, node_ptr,
|
||||
offsets)) {
|
||||
rec_t* first_rec = page_rec_get_next(
|
||||
rec_t* first_rec = page_rec_get_next(
|
||||
page_get_infimum_rec(page));
|
||||
|
||||
btr_validate_report1(index, level, page);
|
||||
@ -2855,9 +2856,9 @@ loop:
|
||||
fputs("InnoDB: first rec ", stderr);
|
||||
rec_print(stderr, first_rec, index);
|
||||
putc('\n', stderr);
|
||||
ret = FALSE;
|
||||
ret = FALSE;
|
||||
|
||||
goto node_ptr_fails;
|
||||
goto node_ptr_fails;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2899,7 +2900,7 @@ loop:
|
||||
right_node_ptr);
|
||||
|
||||
if (right_node_ptr != page_rec_get_next(
|
||||
page_get_infimum_rec(
|
||||
page_get_infimum_rec(
|
||||
right_father_page))) {
|
||||
ret = FALSE;
|
||||
fputs(
|
||||
|
@ -128,7 +128,7 @@ btr_cur_latch_leaves(
|
||||
ulint space, /* in: space id */
|
||||
ulint page_no, /* in: page number of the leaf */
|
||||
ulint latch_mode, /* in: BTR_SEARCH_LEAF, ... */
|
||||
btr_cur_t* cursor, /* in: cursor */
|
||||
btr_cur_t* cursor, /* in: cursor */
|
||||
mtr_t* mtr) /* in: mtr */
|
||||
{
|
||||
ulint left_page_no;
|
||||
@ -261,7 +261,7 @@ btr_cur_search_to_nth_level(
|
||||
ulint up_match;
|
||||
ulint up_bytes;
|
||||
ulint low_match;
|
||||
ulint low_bytes;
|
||||
ulint low_bytes;
|
||||
ulint height;
|
||||
ulint savepoint;
|
||||
ulint rw_latch;
|
||||
@ -320,7 +320,7 @@ btr_cur_search_to_nth_level(
|
||||
&& mode != PAGE_CUR_LE_OR_EXTENDS
|
||||
#endif /* PAGE_CUR_LE_OR_EXTENDS */
|
||||
&& srv_use_adaptive_hash_indexes
|
||||
&& btr_search_guess_on_hash(index, info, tuple, mode,
|
||||
&& btr_search_guess_on_hash(index, info, tuple, mode,
|
||||
latch_mode, cursor,
|
||||
has_search_latch, mtr)) {
|
||||
|
||||
@ -334,7 +334,7 @@ btr_cur_search_to_nth_level(
|
||||
|| mode != PAGE_CUR_LE);
|
||||
btr_cur_n_sea++;
|
||||
|
||||
return;
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
@ -482,7 +482,7 @@ retry_page_get:
|
||||
}
|
||||
|
||||
if ((latch_mode != BTR_MODIFY_TREE)
|
||||
&& (latch_mode != BTR_CONT_MODIFY_TREE)) {
|
||||
&& (latch_mode != BTR_CONT_MODIFY_TREE)) {
|
||||
|
||||
/* Release the tree s-latch */
|
||||
|
||||
@ -513,7 +513,7 @@ retry_page_get:
|
||||
page = btr_page_get(space,
|
||||
page_no, RW_X_LATCH, mtr);
|
||||
ut_a((ibool)!!page_is_comp(page)
|
||||
== index->table->comp);
|
||||
== dict_table_is_comp(index->table));
|
||||
}
|
||||
|
||||
break;
|
||||
@ -583,7 +583,7 @@ btr_cur_open_at_index_side(
|
||||
ulint root_height = 0; /* remove warning */
|
||||
rec_t* node_ptr;
|
||||
ulint estimate;
|
||||
ulint savepoint;
|
||||
ulint savepoint;
|
||||
mem_heap_t* heap = NULL;
|
||||
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
ulint* offsets = offsets_;
|
||||
@ -641,7 +641,7 @@ btr_cur_open_at_index_side(
|
||||
waiting for the tree latch. */
|
||||
|
||||
if ((latch_mode != BTR_MODIFY_TREE)
|
||||
&& (latch_mode != BTR_CONT_MODIFY_TREE)) {
|
||||
&& (latch_mode != BTR_CONT_MODIFY_TREE)) {
|
||||
|
||||
/* Release the tree s-latch */
|
||||
|
||||
@ -658,10 +658,10 @@ btr_cur_open_at_index_side(
|
||||
}
|
||||
|
||||
if (height == 0) {
|
||||
if (estimate) {
|
||||
btr_cur_add_path_info(cursor, height,
|
||||
root_height);
|
||||
}
|
||||
if (estimate) {
|
||||
btr_cur_add_path_info(cursor, height,
|
||||
root_height);
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
@ -976,7 +976,7 @@ calculate_sizes_again:
|
||||
/* The record is so big that we have to store some fields
|
||||
externally on separate database pages */
|
||||
|
||||
big_rec_vec = dtuple_convert_big_rec(index, entry, NULL, 0);
|
||||
big_rec_vec = dtuple_convert_big_rec(index, entry, NULL, 0);
|
||||
|
||||
if (big_rec_vec == NULL) {
|
||||
|
||||
@ -993,13 +993,13 @@ calculate_sizes_again:
|
||||
type = index->type;
|
||||
|
||||
if ((type & DICT_CLUSTERED)
|
||||
&& (dict_tree_get_space_reserve(index->tree) + rec_size > max_size)
|
||||
&& (page_get_n_recs(page) >= 2)
|
||||
&& (0 == level)
|
||||
&& (btr_page_get_split_rec_to_right(cursor, &dummy_rec)
|
||||
|| btr_page_get_split_rec_to_left(cursor, &dummy_rec))) {
|
||||
&& (dict_tree_get_space_reserve(index->tree) + rec_size > max_size)
|
||||
&& (page_get_n_recs(page) >= 2)
|
||||
&& (0 == level)
|
||||
&& (btr_page_get_split_rec_to_right(cursor, &dummy_rec)
|
||||
|| btr_page_get_split_rec_to_left(cursor, &dummy_rec))) {
|
||||
|
||||
if (big_rec_vec) {
|
||||
if (big_rec_vec) {
|
||||
dtuple_convert_back_big_rec(index, entry, big_rec_vec);
|
||||
}
|
||||
|
||||
@ -1007,22 +1007,22 @@ calculate_sizes_again:
|
||||
}
|
||||
|
||||
if (!(((max_size >= rec_size)
|
||||
&& (max_size >= BTR_CUR_PAGE_REORGANIZE_LIMIT))
|
||||
|| (page_get_max_insert_size(page, 1) >= rec_size)
|
||||
|| (page_get_n_recs(page) <= 1))) {
|
||||
&& (max_size >= BTR_CUR_PAGE_REORGANIZE_LIMIT))
|
||||
|| (page_get_max_insert_size(page, 1) >= rec_size)
|
||||
|| (page_get_n_recs(page) <= 1))) {
|
||||
|
||||
if (big_rec_vec) {
|
||||
if (big_rec_vec) {
|
||||
dtuple_convert_back_big_rec(index, entry, big_rec_vec);
|
||||
}
|
||||
return(DB_FAIL);
|
||||
}
|
||||
|
||||
/* Check locks and write to the undo log, if specified */
|
||||
err = btr_cur_ins_lock_and_undo(flags, cursor, entry, thr, &inherit);
|
||||
/* Check locks and write to the undo log, if specified */
|
||||
err = btr_cur_ins_lock_and_undo(flags, cursor, entry, thr, &inherit);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
if (big_rec_vec) {
|
||||
if (big_rec_vec) {
|
||||
dtuple_convert_back_big_rec(index, entry, big_rec_vec);
|
||||
}
|
||||
return(err);
|
||||
@ -1181,12 +1181,12 @@ btr_cur_pessimistic_insert(
|
||||
/* The record is so big that we have to store some fields
|
||||
externally on separate database pages */
|
||||
|
||||
big_rec_vec = dtuple_convert_big_rec(index, entry, NULL, 0);
|
||||
big_rec_vec = dtuple_convert_big_rec(index, entry, NULL, 0);
|
||||
|
||||
if (big_rec_vec == NULL) {
|
||||
|
||||
if (n_extents > 0) {
|
||||
fil_space_release_free_extents(index->space,
|
||||
fil_space_release_free_extents(index->space,
|
||||
n_reserved);
|
||||
}
|
||||
return(DB_TOO_BIG_RECORD);
|
||||
@ -1304,7 +1304,7 @@ btr_cur_update_in_place_log(
|
||||
byte* log_ptr;
|
||||
page_t* page = ut_align_down(rec, UNIV_PAGE_SIZE);
|
||||
ut_ad(flags < 256);
|
||||
ut_ad(!!page_is_comp(page) == index->table->comp);
|
||||
ut_ad(!!page_is_comp(page) == dict_table_is_comp(index->table));
|
||||
|
||||
log_ptr = mlog_open_and_write_index(mtr, rec, index, page_is_comp(page)
|
||||
? MLOG_COMP_REC_UPDATE_IN_PLACE
|
||||
@ -1390,7 +1390,7 @@ btr_cur_parse_update_in_place(
|
||||
goto func_exit;
|
||||
}
|
||||
|
||||
ut_a((ibool)!!page_is_comp(page) == index->table->comp);
|
||||
ut_a((ibool)!!page_is_comp(page) == dict_table_is_comp(index->table));
|
||||
rec = page + rec_offset;
|
||||
|
||||
/* We do not need to reserve btr_search_latch, as the page is only
|
||||
@ -1443,7 +1443,7 @@ btr_cur_update_in_place(
|
||||
|
||||
rec = btr_cur_get_rec(cursor);
|
||||
index = cursor->index;
|
||||
ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
|
||||
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
|
||||
trx = thr_get_trx(thr);
|
||||
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
|
||||
#ifdef UNIV_DEBUG
|
||||
@ -1466,19 +1466,19 @@ btr_cur_update_in_place(
|
||||
|
||||
block = buf_block_align(rec);
|
||||
ut_ad(!!page_is_comp(buf_block_get_frame(block))
|
||||
== index->table->comp);
|
||||
== dict_table_is_comp(index->table));
|
||||
|
||||
if (block->is_hashed) {
|
||||
/* The function row_upd_changes_ord_field_binary works only
|
||||
if the update vector was built for a clustered index, we must
|
||||
NOT call it if index is secondary */
|
||||
|
||||
if (!(index->type & DICT_CLUSTERED)
|
||||
|| row_upd_changes_ord_field_binary(NULL, index, update)) {
|
||||
if (!(index->type & DICT_CLUSTERED)
|
||||
|| row_upd_changes_ord_field_binary(NULL, index, update)) {
|
||||
|
||||
/* Remove possible hash index pointer to this record */
|
||||
btr_search_update_hash_on_delete(cursor);
|
||||
}
|
||||
/* Remove possible hash index pointer to this record */
|
||||
btr_search_update_hash_on_delete(cursor);
|
||||
}
|
||||
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
}
|
||||
@ -1558,7 +1558,7 @@ btr_cur_optimistic_update(
|
||||
page = btr_cur_get_page(cursor);
|
||||
rec = btr_cur_get_rec(cursor);
|
||||
index = cursor->index;
|
||||
ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
|
||||
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
|
||||
|
||||
heap = mem_heap_create(1024);
|
||||
offsets = rec_get_offsets(rec, index, NULL, ULINT_UNDEFINED, &heap);
|
||||
@ -1633,8 +1633,8 @@ btr_cur_optimistic_update(
|
||||
}
|
||||
|
||||
if (!(((max_size >= BTR_CUR_PAGE_REORGANIZE_LIMIT)
|
||||
&& (max_size >= new_rec_size))
|
||||
|| (page_get_n_recs(page) <= 1))) {
|
||||
&& (max_size >= new_rec_size))
|
||||
|| (page_get_n_recs(page) <= 1))) {
|
||||
|
||||
/* There was not enough space, or it did not pay to
|
||||
reorganize: for simplicity, we decide what to do assuming a
|
||||
@ -1655,7 +1655,7 @@ btr_cur_optimistic_update(
|
||||
return(err);
|
||||
}
|
||||
|
||||
/* Ok, we may do the replacement. Store on the page infimum the
|
||||
/* Ok, we may do the replacement. Store on the page infimum the
|
||||
explicit locks on rec, before deleting rec (see the comment in
|
||||
.._pessimistic_update). */
|
||||
|
||||
@ -1693,7 +1693,7 @@ btr_cur_optimistic_update(
|
||||
|
||||
lock_rec_restore_from_page_infimum(rec, page);
|
||||
|
||||
page_cur_move_to_next(page_cursor);
|
||||
page_cur_move_to_next(page_cursor);
|
||||
|
||||
mem_heap_free(heap);
|
||||
|
||||
@ -1734,7 +1734,7 @@ btr_cur_pess_upd_restore_supremum(
|
||||
|
||||
/* We must already have an x-latch to prev_page! */
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(prev_page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
|
||||
lock_rec_reset_and_inherit_gap_locks(page_get_supremum_rec(prev_page),
|
||||
rec);
|
||||
@ -1882,8 +1882,8 @@ btr_cur_pessimistic_update(
|
||||
ut_min(page_get_free_space_of_empty(page_is_comp(page)) / 2,
|
||||
REC_MAX_DATA_SIZE))) {
|
||||
|
||||
big_rec_vec = dtuple_convert_big_rec(index, new_entry,
|
||||
ext_vect, n_ext_vect);
|
||||
big_rec_vec = dtuple_convert_big_rec(index, new_entry,
|
||||
ext_vect, n_ext_vect);
|
||||
if (big_rec_vec == NULL) {
|
||||
|
||||
err = DB_TOO_BIG_RECORD;
|
||||
@ -2009,7 +2009,7 @@ btr_cur_del_mark_set_clust_rec_log(
|
||||
ut_ad(flags < 256);
|
||||
ut_ad(val <= 1);
|
||||
|
||||
ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
|
||||
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
|
||||
|
||||
log_ptr = mlog_open_and_write_index(mtr, rec, index,
|
||||
page_rec_is_comp(rec)
|
||||
@ -2056,7 +2056,8 @@ btr_cur_parse_del_mark_set_clust_rec(
|
||||
ulint offset;
|
||||
rec_t* rec;
|
||||
|
||||
ut_ad(!page || !!page_is_comp(page) == index->table->comp);
|
||||
ut_ad(!page
|
||||
|| !!page_is_comp(page) == dict_table_is_comp(index->table));
|
||||
|
||||
if (end_ptr < ptr + 2) {
|
||||
|
||||
@ -2142,7 +2143,7 @@ btr_cur_del_mark_set_clust_rec(
|
||||
|
||||
rec = btr_cur_get_rec(cursor);
|
||||
index = cursor->index;
|
||||
ut_ad(!!page_rec_is_comp(rec) == index->table->comp);
|
||||
ut_ad(!!page_rec_is_comp(rec) == dict_table_is_comp(index->table));
|
||||
offsets = rec_get_offsets(rec, index, offsets, ULINT_UNDEFINED, &heap);
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
@ -2315,7 +2316,7 @@ btr_cur_del_mark_set_sec_rec(
|
||||
|
||||
block = buf_block_align(rec);
|
||||
ut_ad(!!page_is_comp(buf_block_get_frame(block))
|
||||
== cursor->index->table->comp);
|
||||
== dict_table_is_comp(cursor->index->table));
|
||||
|
||||
if (block->is_hashed) {
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
@ -2555,8 +2556,8 @@ btr_cur_pessimistic_delete(
|
||||
}
|
||||
|
||||
if (UNIV_UNLIKELY(page_get_n_recs(page) < 2)
|
||||
&& UNIV_UNLIKELY(dict_tree_get_page(btr_cur_get_tree(cursor))
|
||||
!= buf_frame_get_page_no(page))) {
|
||||
&& UNIV_UNLIKELY(dict_tree_get_page(btr_cur_get_tree(cursor))
|
||||
!= buf_frame_get_page_no(page))) {
|
||||
|
||||
/* If there is only one record, drop the whole page in
|
||||
btr_discard_page, if this is not the root page */
|
||||
@ -2573,8 +2574,8 @@ btr_cur_pessimistic_delete(
|
||||
level = btr_page_get_level(page, mtr);
|
||||
|
||||
if (level > 0
|
||||
&& UNIV_UNLIKELY(rec == page_rec_get_next(
|
||||
page_get_infimum_rec(page)))) {
|
||||
&& UNIV_UNLIKELY(rec == page_rec_get_next(
|
||||
page_get_infimum_rec(page)))) {
|
||||
|
||||
rec_t* next_rec = page_rec_get_next(rec);
|
||||
|
||||
@ -2687,8 +2688,8 @@ btr_estimate_n_rows_in_range(
|
||||
btr_path_t* slot1;
|
||||
btr_path_t* slot2;
|
||||
ibool diverged;
|
||||
ibool diverged_lot;
|
||||
ulint divergence_level;
|
||||
ibool diverged_lot;
|
||||
ulint divergence_level;
|
||||
ib_longlong n_rows;
|
||||
ulint i;
|
||||
mtr_t mtr;
|
||||
@ -2730,13 +2731,13 @@ btr_estimate_n_rows_in_range(
|
||||
/* We have the path information for the range in path1 and path2 */
|
||||
|
||||
n_rows = 1;
|
||||
diverged = FALSE; /* This becomes true when the path is not
|
||||
the same any more */
|
||||
diverged_lot = FALSE; /* This becomes true when the paths are
|
||||
not the same or adjacent any more */
|
||||
diverged = FALSE; /* This becomes true when the path is not
|
||||
the same any more */
|
||||
diverged_lot = FALSE; /* This becomes true when the paths are
|
||||
not the same or adjacent any more */
|
||||
divergence_level = 1000000; /* This is the level where paths diverged
|
||||
a lot */
|
||||
for (i = 0; ; i++) {
|
||||
a lot */
|
||||
for (i = 0; ; i++) {
|
||||
ut_ad(i < BTR_PATH_ARRAY_N_SLOTS);
|
||||
|
||||
slot1 = path1 + i;
|
||||
@ -2745,27 +2746,27 @@ btr_estimate_n_rows_in_range(
|
||||
if (slot1->nth_rec == ULINT_UNDEFINED
|
||||
|| slot2->nth_rec == ULINT_UNDEFINED) {
|
||||
|
||||
if (i > divergence_level + 1) {
|
||||
/* In trees whose height is > 1 our algorithm
|
||||
tends to underestimate: multiply the estimate
|
||||
by 2: */
|
||||
if (i > divergence_level + 1) {
|
||||
/* In trees whose height is > 1 our algorithm
|
||||
tends to underestimate: multiply the estimate
|
||||
by 2: */
|
||||
|
||||
n_rows = n_rows * 2;
|
||||
}
|
||||
n_rows = n_rows * 2;
|
||||
}
|
||||
|
||||
/* Do not estimate the number of rows in the range
|
||||
to over 1 / 2 of the estimated rows in the whole
|
||||
to over 1 / 2 of the estimated rows in the whole
|
||||
table */
|
||||
|
||||
if (n_rows > index->table->stat_n_rows / 2) {
|
||||
n_rows = index->table->stat_n_rows / 2;
|
||||
n_rows = index->table->stat_n_rows / 2;
|
||||
|
||||
/* If there are just 0 or 1 rows in the table,
|
||||
then we estimate all rows are in the range */
|
||||
|
||||
if (n_rows == 0) {
|
||||
n_rows = index->table->stat_n_rows;
|
||||
}
|
||||
if (n_rows == 0) {
|
||||
n_rows = index->table->stat_n_rows;
|
||||
}
|
||||
}
|
||||
|
||||
return(n_rows);
|
||||
@ -2779,7 +2780,7 @@ btr_estimate_n_rows_in_range(
|
||||
n_rows = slot2->nth_rec - slot1->nth_rec;
|
||||
|
||||
if (n_rows > 1) {
|
||||
diverged_lot = TRUE;
|
||||
diverged_lot = TRUE;
|
||||
divergence_level = i;
|
||||
}
|
||||
} else {
|
||||
@ -2791,23 +2792,23 @@ btr_estimate_n_rows_in_range(
|
||||
|
||||
} else if (diverged && !diverged_lot) {
|
||||
|
||||
if (slot1->nth_rec < slot1->n_recs
|
||||
|| slot2->nth_rec > 1) {
|
||||
if (slot1->nth_rec < slot1->n_recs
|
||||
|| slot2->nth_rec > 1) {
|
||||
|
||||
diverged_lot = TRUE;
|
||||
diverged_lot = TRUE;
|
||||
divergence_level = i;
|
||||
|
||||
n_rows = 0;
|
||||
|
||||
if (slot1->nth_rec < slot1->n_recs) {
|
||||
n_rows += slot1->n_recs
|
||||
- slot1->nth_rec;
|
||||
if (slot1->nth_rec < slot1->n_recs) {
|
||||
n_rows += slot1->n_recs
|
||||
- slot1->nth_rec;
|
||||
}
|
||||
|
||||
if (slot2->nth_rec > 1) {
|
||||
n_rows += slot2->nth_rec - 1;
|
||||
n_rows += slot2->nth_rec - 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if (diverged_lot) {
|
||||
|
||||
n_rows = (n_rows * (slot1->n_recs + slot2->n_recs))
|
||||
@ -2931,7 +2932,7 @@ btr_estimate_number_of_different_key_vals(
|
||||
in the table. */
|
||||
|
||||
if (btr_page_get_prev(page, &mtr) != FIL_NULL
|
||||
|| btr_page_get_next(page, &mtr) != FIL_NULL) {
|
||||
|| btr_page_get_next(page, &mtr) != FIL_NULL) {
|
||||
|
||||
n_diff[n_cols]++;
|
||||
}
|
||||
@ -2960,8 +2961,8 @@ btr_estimate_number_of_different_key_vals(
|
||||
+ BTR_KEY_VAL_ESTIMATE_N_PAGES - 1
|
||||
+ total_external_size
|
||||
+ not_empty_flag)
|
||||
/ (BTR_KEY_VAL_ESTIMATE_N_PAGES
|
||||
+ total_external_size);
|
||||
/ (BTR_KEY_VAL_ESTIMATE_N_PAGES
|
||||
+ total_external_size);
|
||||
|
||||
/* If the tree is small, smaller than <
|
||||
10 * BTR_KEY_VAL_ESTIMATE_N_PAGES + total_external_size, then
|
||||
@ -3333,8 +3334,8 @@ btr_store_big_rec_extern_fields(
|
||||
big_rec_t* big_rec_vec, /* in: vector containing fields
|
||||
to be stored externally */
|
||||
mtr_t* local_mtr __attribute__((unused))) /* in: mtr
|
||||
containing the latch to rec and to the
|
||||
tree */
|
||||
containing the latch to rec and to the
|
||||
tree */
|
||||
{
|
||||
byte* data;
|
||||
ulint local_len;
|
||||
@ -3392,6 +3393,9 @@ btr_store_big_rec_extern_fields(
|
||||
return(DB_OUT_OF_FILE_SPACE);
|
||||
}
|
||||
|
||||
mlog_write_ulint(page + FIL_PAGE_TYPE,
|
||||
FIL_PAGE_TYPE_BLOB, MLOG_2BYTES, &mtr);
|
||||
|
||||
page_no = buf_frame_get_page_no(page);
|
||||
|
||||
if (prev_page_no != FIL_NULL) {
|
||||
@ -3505,8 +3509,8 @@ btr_free_externally_stored_field(
|
||||
rollback and we do not want to free
|
||||
inherited fields */
|
||||
mtr_t* local_mtr __attribute__((unused))) /* in: mtr
|
||||
containing the latch to data an an
|
||||
X-latch to the index tree */
|
||||
containing the latch to data an an
|
||||
X-latch to the index tree */
|
||||
{
|
||||
page_t* page;
|
||||
page_t* rec_page;
|
||||
@ -3637,7 +3641,7 @@ btr_rec_free_externally_stored_fields(
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
/* Free possible externally stored fields in the record */
|
||||
|
||||
ut_ad(index->table->comp == !!rec_offs_comp(offsets));
|
||||
ut_ad(dict_table_is_comp(index->table) == !!rec_offs_comp(offsets));
|
||||
n_fields = rec_offs_n_fields(offsets);
|
||||
|
||||
for (i = 0; i < n_fields; i++) {
|
||||
|
@ -92,9 +92,9 @@ btr_pcur_store_position(
|
||||
offs = ut_align_offset(rec, UNIV_PAGE_SIZE);
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_S_FIX)
|
||||
|| mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_S_FIX)
|
||||
|| mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
ut_a(cursor->latch_mode != BTR_NO_LATCHES);
|
||||
|
||||
if (UNIV_UNLIKELY(page_get_n_recs(page) == 0)) {
|
||||
@ -158,7 +158,7 @@ btr_pcur_copy_stored_position(
|
||||
mem_free(pcur_receive->old_rec_buf);
|
||||
}
|
||||
|
||||
ut_memcpy((byte*)pcur_receive, (byte*)pcur_donate, sizeof(btr_pcur_t));
|
||||
ut_memcpy(pcur_receive, pcur_donate, sizeof(btr_pcur_t));
|
||||
|
||||
if (pcur_donate->old_rec_buf) {
|
||||
|
||||
@ -195,7 +195,7 @@ btr_pcur_restore_position(
|
||||
whose ordering fields are identical to
|
||||
the ones of the original user record */
|
||||
ulint latch_mode, /* in: BTR_SEARCH_LEAF, ... */
|
||||
btr_pcur_t* cursor, /* in: detached persistent cursor */
|
||||
btr_pcur_t* cursor, /* in: detached persistent cursor */
|
||||
mtr_t* mtr) /* in: mtr */
|
||||
{
|
||||
dict_tree_t* tree;
|
||||
@ -206,9 +206,9 @@ btr_pcur_restore_position(
|
||||
mem_heap_t* heap;
|
||||
|
||||
if (UNIV_UNLIKELY(cursor->old_stored != BTR_PCUR_OLD_STORED)
|
||||
|| UNIV_UNLIKELY(cursor->pos_state != BTR_PCUR_WAS_POSITIONED
|
||||
&& cursor->pos_state != BTR_PCUR_IS_POSITIONED)) {
|
||||
ut_print_buf(stderr, (const byte*)cursor, sizeof(btr_pcur_t));
|
||||
|| UNIV_UNLIKELY(cursor->pos_state != BTR_PCUR_WAS_POSITIONED
|
||||
&& cursor->pos_state != BTR_PCUR_IS_POSITIONED)) {
|
||||
ut_print_buf(stderr, cursor, sizeof(btr_pcur_t));
|
||||
if (cursor->trx_if_known) {
|
||||
trx_print(stderr, cursor->trx_if_known, 0);
|
||||
}
|
||||
@ -219,8 +219,8 @@ btr_pcur_restore_position(
|
||||
if (UNIV_UNLIKELY(cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE
|
||||
|| cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE)) {
|
||||
|
||||
/* In these cases we do not try an optimistic restoration,
|
||||
but always do a search */
|
||||
/* In these cases we do not try an optimistic restoration,
|
||||
but always do a search */
|
||||
|
||||
btr_cur_open_at_index_side(
|
||||
cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE,
|
||||
@ -243,8 +243,8 @@ btr_pcur_restore_position(
|
||||
/* Try optimistic restoration */
|
||||
|
||||
if (UNIV_LIKELY(buf_page_optimistic_get(latch_mode,
|
||||
cursor->block_when_stored, page,
|
||||
cursor->modify_clock, mtr))) {
|
||||
cursor->block_when_stored, page,
|
||||
cursor->modify_clock, mtr))) {
|
||||
cursor->pos_state = BTR_PCUR_IS_POSITIONED;
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
buf_page_dbg_add_level(page, SYNC_TREE_NODE);
|
||||
@ -309,8 +309,8 @@ btr_pcur_restore_position(
|
||||
cursor->search_mode = old_mode;
|
||||
|
||||
if (cursor->rel_pos == BTR_PCUR_ON
|
||||
&& btr_pcur_is_on_user_rec(cursor, mtr)
|
||||
&& 0 == cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor),
|
||||
&& btr_pcur_is_on_user_rec(cursor, mtr)
|
||||
&& 0 == cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor),
|
||||
rec_get_offsets(btr_pcur_get_rec(cursor),
|
||||
btr_pcur_get_btr_cur(cursor)->index,
|
||||
NULL, ULINT_UNDEFINED, &heap))) {
|
||||
@ -542,7 +542,7 @@ btr_pcur_open_on_user_rec(
|
||||
ulint mode, /* in: PAGE_CUR_L, ... */
|
||||
ulint latch_mode, /* in: BTR_SEARCH_LEAF or
|
||||
BTR_MODIFY_LEAF */
|
||||
btr_pcur_t* cursor, /* in: memory buffer for persistent
|
||||
btr_pcur_t* cursor, /* in: memory buffer for persistent
|
||||
cursor */
|
||||
mtr_t* mtr) /* in: mtr */
|
||||
{
|
||||
|
@ -191,7 +191,7 @@ static
|
||||
void
|
||||
btr_search_info_update_hash(
|
||||
/*========================*/
|
||||
btr_search_t* info, /* in: search info */
|
||||
btr_search_t* info, /* in/out: search info */
|
||||
btr_cur_t* cursor) /* in: cursor which was just positioned */
|
||||
{
|
||||
dict_index_t* index;
|
||||
@ -244,7 +244,7 @@ btr_search_info_update_hash(
|
||||
if ((info->side == BTR_SEARCH_LEFT_SIDE && cmp > 0)
|
||||
|| (info->side == BTR_SEARCH_RIGHT_SIDE && cmp <= 0)) {
|
||||
|
||||
goto set_new_recomm;
|
||||
goto set_new_recomm;
|
||||
}
|
||||
|
||||
info->n_hash_potential++;
|
||||
@ -337,15 +337,15 @@ btr_search_update_block_hash_info(
|
||||
ut_a(info->magic_n == BTR_SEARCH_MAGIC_N);
|
||||
|
||||
if ((block->n_hash_helps > 0)
|
||||
&& (info->n_hash_potential > 0)
|
||||
&& (block->n_fields == info->n_fields)
|
||||
&& (block->n_bytes == info->n_bytes)
|
||||
&& (block->side == info->side)) {
|
||||
&& (info->n_hash_potential > 0)
|
||||
&& (block->n_fields == info->n_fields)
|
||||
&& (block->n_bytes == info->n_bytes)
|
||||
&& (block->side == info->side)) {
|
||||
|
||||
if ((block->is_hashed)
|
||||
&& (block->curr_n_fields == info->n_fields)
|
||||
&& (block->curr_n_bytes == info->n_bytes)
|
||||
&& (block->curr_side == info->side)) {
|
||||
&& (block->curr_n_fields == info->n_fields)
|
||||
&& (block->curr_n_bytes == info->n_bytes)
|
||||
&& (block->curr_side == info->side)) {
|
||||
|
||||
/* The search would presumably have succeeded using
|
||||
the hash index */
|
||||
@ -366,19 +366,19 @@ btr_search_update_block_hash_info(
|
||||
}
|
||||
|
||||
if ((block->n_hash_helps > page_get_n_recs(block->frame)
|
||||
/ BTR_SEARCH_PAGE_BUILD_LIMIT)
|
||||
&& (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
|
||||
/ BTR_SEARCH_PAGE_BUILD_LIMIT)
|
||||
&& (info->n_hash_potential >= BTR_SEARCH_BUILD_LIMIT)) {
|
||||
|
||||
if ((!block->is_hashed)
|
||||
|| (block->n_hash_helps
|
||||
if ((!block->is_hashed)
|
||||
|| (block->n_hash_helps
|
||||
> 2 * page_get_n_recs(block->frame))
|
||||
|| (block->n_fields != block->curr_n_fields)
|
||||
|| (block->n_bytes != block->curr_n_bytes)
|
||||
|| (block->side != block->curr_side)) {
|
||||
|| (block->n_fields != block->curr_n_fields)
|
||||
|| (block->n_bytes != block->curr_n_bytes)
|
||||
|| (block->side != block->curr_side)) {
|
||||
|
||||
/* Build a new hash index on the page */
|
||||
/* Build a new hash index on the page */
|
||||
|
||||
return(TRUE);
|
||||
return(TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
@ -415,20 +415,20 @@ btr_search_update_hash_ref(
|
||||
ut_a(!block->is_hashed || block->index == cursor->index);
|
||||
|
||||
if (block->is_hashed
|
||||
&& (info->n_hash_potential > 0)
|
||||
&& (block->curr_n_fields == info->n_fields)
|
||||
&& (block->curr_n_bytes == info->n_bytes)
|
||||
&& (block->curr_side == info->side)) {
|
||||
&& (info->n_hash_potential > 0)
|
||||
&& (block->curr_n_fields == info->n_fields)
|
||||
&& (block->curr_n_bytes == info->n_bytes)
|
||||
&& (block->curr_side == info->side)) {
|
||||
mem_heap_t* heap = NULL;
|
||||
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
|
||||
|
||||
rec = btr_cur_get_rec(cursor);
|
||||
rec = btr_cur_get_rec(cursor);
|
||||
|
||||
if (!page_rec_is_user_rec(rec)) {
|
||||
if (!page_rec_is_user_rec(rec)) {
|
||||
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
tree_id = ((cursor->index)->tree)->id;
|
||||
fold = rec_fold(rec, rec_get_offsets(rec, cursor->index,
|
||||
@ -452,7 +452,7 @@ Updates the search info. */
|
||||
void
|
||||
btr_search_info_update_slow(
|
||||
/*========================*/
|
||||
btr_search_t* info, /* in: search info */
|
||||
btr_search_t* info, /* in/out: search info */
|
||||
btr_cur_t* cursor) /* in: cursor which was just positioned */
|
||||
{
|
||||
buf_block_t* block;
|
||||
@ -531,15 +531,15 @@ btr_search_check_guess(
|
||||
/*===================*/
|
||||
/* out: TRUE if success */
|
||||
btr_cur_t* cursor, /* in: guessed cursor position */
|
||||
ibool can_only_compare_to_cursor_rec,
|
||||
/* in: if we do not have a latch on the page
|
||||
ibool can_only_compare_to_cursor_rec,
|
||||
/* in: if we do not have a latch on the page
|
||||
of cursor, but only a latch on
|
||||
btr_search_latch, then ONLY the columns
|
||||
btr_search_latch, then ONLY the columns
|
||||
of the record UNDER the cursor are
|
||||
protected, not the next or previous record
|
||||
in the chain: we cannot look at the next or
|
||||
previous record to check our guess! */
|
||||
dtuple_t* tuple, /* in: data tuple */
|
||||
dtuple_t* tuple, /* in: data tuple */
|
||||
ulint mode, /* in: PAGE_CUR_L, PAGE_CUR_LE, PAGE_CUR_G,
|
||||
or PAGE_CUR_GE */
|
||||
mtr_t* mtr) /* in: mtr */
|
||||
@ -598,8 +598,8 @@ btr_search_check_guess(
|
||||
}
|
||||
|
||||
if (can_only_compare_to_cursor_rec) {
|
||||
/* Since we could not determine if our guess is right just by
|
||||
looking at the record under the cursor, return FALSE */
|
||||
/* Since we could not determine if our guess is right just by
|
||||
looking at the record under the cursor, return FALSE */
|
||||
goto exit_func;
|
||||
}
|
||||
|
||||
@ -681,13 +681,13 @@ btr_search_guess_on_hash(
|
||||
btr_search_t* info, /* in: index search info */
|
||||
dtuple_t* tuple, /* in: logical record */
|
||||
ulint mode, /* in: PAGE_CUR_L, ... */
|
||||
ulint latch_mode, /* in: BTR_SEARCH_LEAF, ...;
|
||||
ulint latch_mode, /* in: BTR_SEARCH_LEAF, ...;
|
||||
NOTE that only if has_search_latch
|
||||
is 0, we will have a latch set on
|
||||
the cursor page, otherwise we assume
|
||||
the caller uses his search latch
|
||||
to protect the record! */
|
||||
btr_cur_t* cursor, /* out: tree cursor */
|
||||
btr_cur_t* cursor, /* out: tree cursor */
|
||||
ulint has_search_latch,/* in: latch mode the caller
|
||||
currently has on btr_search_latch:
|
||||
RW_S_LATCH, RW_X_LATCH, or 0 */
|
||||
@ -699,7 +699,7 @@ btr_search_guess_on_hash(
|
||||
ulint fold;
|
||||
ulint tuple_n_fields;
|
||||
dulint tree_id;
|
||||
ibool can_only_compare_to_cursor_rec = TRUE;
|
||||
ibool can_only_compare_to_cursor_rec = TRUE;
|
||||
#ifdef notdefined
|
||||
btr_cur_t cursor2;
|
||||
btr_pcur_t pcur;
|
||||
@ -729,7 +729,7 @@ btr_search_guess_on_hash(
|
||||
if (UNIV_UNLIKELY(tuple_n_fields == cursor->n_fields)
|
||||
&& (cursor->n_bytes > 0)) {
|
||||
|
||||
return(FALSE);
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
tree_id = (index->tree)->id;
|
||||
@ -798,10 +798,10 @@ btr_search_guess_on_hash(
|
||||
record to determine if our guess for the cursor position is
|
||||
right. */
|
||||
if (UNIV_EXPECT(ut_dulint_cmp(tree_id, btr_page_get_index_id(page)), 0)
|
||||
|| !btr_search_check_guess(cursor, can_only_compare_to_cursor_rec,
|
||||
tuple, mode, mtr)) {
|
||||
|| !btr_search_check_guess(cursor,
|
||||
can_only_compare_to_cursor_rec, tuple, mode, mtr)) {
|
||||
if (UNIV_LIKELY(!has_search_latch)) {
|
||||
btr_leaf_page_release(page, latch_mode, mtr);
|
||||
btr_leaf_page_release(page, latch_mode, mtr);
|
||||
}
|
||||
|
||||
goto failure;
|
||||
@ -889,7 +889,8 @@ Drops a page hash index. */
|
||||
void
|
||||
btr_search_drop_page_hash_index(
|
||||
/*============================*/
|
||||
page_t* page) /* in: index page, s- or x-latched */
|
||||
page_t* page) /* in: index page, s- or x-latched, or an index page
|
||||
for which we know that block->buf_fix_count == 0 */
|
||||
{
|
||||
hash_table_t* table;
|
||||
buf_block_t* block;
|
||||
@ -904,18 +905,19 @@ btr_search_drop_page_hash_index(
|
||||
ulint* folds;
|
||||
ulint i;
|
||||
mem_heap_t* heap;
|
||||
dict_index_t* index;
|
||||
ulint* offsets;
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_SHARED));
|
||||
ut_ad(!rw_lock_own(&btr_search_latch, RW_LOCK_EX));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
retry:
|
||||
rw_lock_s_lock(&btr_search_latch);
|
||||
|
||||
block = buf_block_align(page);
|
||||
|
||||
if (!block->is_hashed) {
|
||||
if (UNIV_LIKELY(!block->is_hashed)) {
|
||||
|
||||
rw_lock_s_unlock(&btr_search_latch);
|
||||
|
||||
@ -926,17 +928,22 @@ btr_search_drop_page_hash_index(
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
|
||||
|| rw_lock_own(&(block->lock), RW_LOCK_EX)
|
||||
|| (block->buf_fix_count == 0));
|
||||
|| rw_lock_own(&(block->lock), RW_LOCK_EX)
|
||||
|| (block->buf_fix_count == 0));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
n_fields = block->curr_n_fields;
|
||||
n_bytes = block->curr_n_bytes;
|
||||
index = block->index;
|
||||
|
||||
ut_a(n_fields + n_bytes > 0);
|
||||
/* NOTE: The fields of block must not be accessed after
|
||||
releasing btr_search_latch, as the index page might only
|
||||
be s-latched! */
|
||||
|
||||
rw_lock_s_unlock(&btr_search_latch);
|
||||
|
||||
ut_a(n_fields + n_bytes > 0);
|
||||
|
||||
n_recs = page_get_n_recs(page);
|
||||
|
||||
/* Calculate and cache fold values into an array for fast deletion
|
||||
@ -949,33 +956,21 @@ btr_search_drop_page_hash_index(
|
||||
rec = page_get_infimum_rec(page);
|
||||
rec = page_rec_get_next(rec);
|
||||
|
||||
if (!page_rec_is_supremum(rec)) {
|
||||
ut_a(n_fields <= rec_get_n_fields(rec, block->index));
|
||||
|
||||
if (n_bytes > 0) {
|
||||
ut_a(n_fields < rec_get_n_fields(rec, block->index));
|
||||
}
|
||||
}
|
||||
|
||||
tree_id = btr_page_get_index_id(page);
|
||||
|
||||
ut_a(0 == ut_dulint_cmp(tree_id, index->id));
|
||||
|
||||
prev_fold = 0;
|
||||
|
||||
heap = NULL;
|
||||
offsets = NULL;
|
||||
|
||||
if (block->index == NULL) {
|
||||
|
||||
mem_analyze_corruption((byte*)block);
|
||||
|
||||
ut_a(block->index != NULL);
|
||||
}
|
||||
|
||||
while (!page_rec_is_supremum(rec)) {
|
||||
/* FIXME: in a mixed tree, not all records may have enough
|
||||
ordering fields: */
|
||||
offsets = rec_get_offsets(rec, block->index,
|
||||
offsets, n_fields + (n_bytes > 0), &heap);
|
||||
offsets = rec_get_offsets(rec, index, offsets,
|
||||
n_fields + (n_bytes > 0), &heap);
|
||||
ut_a(rec_offs_n_fields(offsets) == n_fields + (n_bytes > 0));
|
||||
fold = rec_fold(rec, offsets, n_fields, n_bytes, tree_id);
|
||||
|
||||
if (fold == prev_fold && prev_fold != 0) {
|
||||
@ -999,6 +994,26 @@ next_rec:
|
||||
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
|
||||
if (UNIV_UNLIKELY(!block->is_hashed)) {
|
||||
/* Someone else has meanwhile dropped the hash index */
|
||||
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
ut_a(block->index == index);
|
||||
|
||||
if (UNIV_UNLIKELY(block->curr_n_fields != n_fields)
|
||||
|| UNIV_UNLIKELY(block->curr_n_bytes != n_bytes)) {
|
||||
|
||||
/* Someone else has meanwhile built a new hash index on the
|
||||
page, with different parameters */
|
||||
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
|
||||
mem_free(folds);
|
||||
goto retry;
|
||||
}
|
||||
|
||||
for (i = 0; i < n_cached; i++) {
|
||||
|
||||
ha_remove_all_nodes_to_page(table, folds[i], page);
|
||||
@ -1006,8 +1021,20 @@ next_rec:
|
||||
|
||||
block->is_hashed = FALSE;
|
||||
block->index = NULL;
|
||||
cleanup:
|
||||
if (UNIV_UNLIKELY(block->n_pointers)) {
|
||||
/* Corruption */
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Corruption of adaptive hash index. After dropping\n"
|
||||
"InnoDB: the hash index to a page of %s, still %lu hash nodes remain.\n",
|
||||
index->name, (ulong) block->n_pointers);
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
btr_search_validate();
|
||||
} else {
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
}
|
||||
|
||||
mem_free(folds);
|
||||
}
|
||||
@ -1100,8 +1127,8 @@ btr_search_build_page_hash_index(
|
||||
rw_lock_s_lock(&btr_search_latch);
|
||||
|
||||
if (block->is_hashed && ((block->curr_n_fields != n_fields)
|
||||
|| (block->curr_n_bytes != n_bytes)
|
||||
|| (block->curr_side != side))) {
|
||||
|| (block->curr_n_bytes != n_bytes)
|
||||
|| (block->curr_side != side))) {
|
||||
|
||||
rw_lock_s_unlock(&btr_search_latch);
|
||||
|
||||
@ -1125,8 +1152,8 @@ btr_search_build_page_hash_index(
|
||||
}
|
||||
|
||||
if (dict_index_get_n_unique_in_tree(index) < n_fields
|
||||
|| (dict_index_get_n_unique_in_tree(index) == n_fields
|
||||
&& n_bytes > 0)) {
|
||||
|| (dict_index_get_n_unique_in_tree(index) == n_fields
|
||||
&& n_bytes > 0)) {
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1177,7 +1204,7 @@ btr_search_build_page_hash_index(
|
||||
n_cached++;
|
||||
}
|
||||
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
offsets = rec_get_offsets(next_rec, index, offsets,
|
||||
@ -1209,8 +1236,8 @@ btr_search_build_page_hash_index(
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
|
||||
if (block->is_hashed && ((block->curr_n_fields != n_fields)
|
||||
|| (block->curr_n_bytes != n_bytes)
|
||||
|| (block->curr_side != side))) {
|
||||
|| (block->curr_n_bytes != n_bytes)
|
||||
|| (block->curr_side != side))) {
|
||||
goto exit_func;
|
||||
}
|
||||
|
||||
@ -1393,13 +1420,13 @@ btr_search_update_hash_node_on_insert(
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
|
||||
if ((cursor->flag == BTR_CUR_HASH)
|
||||
&& (cursor->n_fields == block->curr_n_fields)
|
||||
&& (cursor->n_bytes == block->curr_n_bytes)
|
||||
&& (block->curr_side == BTR_SEARCH_RIGHT_SIDE)) {
|
||||
&& (cursor->n_fields == block->curr_n_fields)
|
||||
&& (cursor->n_bytes == block->curr_n_bytes)
|
||||
&& (block->curr_side == BTR_SEARCH_RIGHT_SIDE)) {
|
||||
|
||||
table = btr_search_sys->hash_index;
|
||||
table = btr_search_sys->hash_index;
|
||||
|
||||
ha_search_and_update_if_found(table, cursor->fold, rec,
|
||||
ha_search_and_update_if_found(table, cursor->fold, rec,
|
||||
page_rec_get_next(rec));
|
||||
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
@ -1495,9 +1522,9 @@ btr_search_update_hash_on_insert(
|
||||
goto check_next_rec;
|
||||
}
|
||||
|
||||
if (fold != ins_fold) {
|
||||
if (fold != ins_fold) {
|
||||
|
||||
if (!locked) {
|
||||
if (!locked) {
|
||||
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
|
||||
@ -1516,7 +1543,7 @@ check_next_rec:
|
||||
|
||||
if (side == BTR_SEARCH_RIGHT_SIDE) {
|
||||
|
||||
if (!locked) {
|
||||
if (!locked) {
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
|
||||
locked = TRUE;
|
||||
@ -1530,7 +1557,7 @@ check_next_rec:
|
||||
|
||||
if (ins_fold != next_fold) {
|
||||
|
||||
if (!locked) {
|
||||
if (!locked) {
|
||||
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
|
||||
@ -1573,14 +1600,29 @@ btr_search_validate(void)
|
||||
ulint n_page_dumps = 0;
|
||||
ibool ok = TRUE;
|
||||
ulint i;
|
||||
ulint cell_count;
|
||||
mem_heap_t* heap = NULL;
|
||||
ulint offsets_[REC_OFFS_NORMAL_SIZE];
|
||||
ulint* offsets = offsets_;
|
||||
*offsets_ = (sizeof offsets_) / sizeof *offsets_;
|
||||
|
||||
/* How many cells to check before temporarily releasing
|
||||
btr_search_latch. */
|
||||
ulint chunk_size = 10000;
|
||||
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
|
||||
for (i = 0; i < hash_get_n_cells(btr_search_sys->hash_index); i++) {
|
||||
cell_count = hash_get_n_cells(btr_search_sys->hash_index);
|
||||
|
||||
for (i = 0; i < cell_count; i++) {
|
||||
/* We release btr_search_latch every once in a while to
|
||||
give other queries a chance to run. */
|
||||
if ((i != 0) && ((i % chunk_size) == 0)) {
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
os_thread_yield();
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
}
|
||||
|
||||
node = hash_get_nth_cell(btr_search_sys->hash_index, i)->node;
|
||||
|
||||
while (node != NULL) {
|
||||
@ -1592,11 +1634,11 @@ btr_search_validate(void)
|
||||
+ (block->curr_n_bytes > 0), &heap);
|
||||
|
||||
if (!block->is_hashed
|
||||
|| node->fold != rec_fold((rec_t*)(node->data),
|
||||
offsets,
|
||||
block->curr_n_fields,
|
||||
block->curr_n_bytes,
|
||||
btr_page_get_index_id(page))) {
|
||||
|| node->fold != rec_fold((rec_t*)(node->data),
|
||||
offsets,
|
||||
block->curr_n_fields,
|
||||
block->curr_n_bytes,
|
||||
btr_page_get_index_id(page))) {
|
||||
ok = FALSE;
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
@ -1610,9 +1652,9 @@ btr_search_validate(void)
|
||||
(ulong) node->fold,
|
||||
(ulong) rec_fold((rec_t*)(node->data),
|
||||
offsets,
|
||||
block->curr_n_fields,
|
||||
block->curr_n_bytes,
|
||||
btr_page_get_index_id(page)));
|
||||
block->curr_n_fields,
|
||||
block->curr_n_bytes,
|
||||
btr_page_get_index_id(page)));
|
||||
|
||||
fputs("InnoDB: Record ", stderr);
|
||||
rec_print_new(stderr, (rec_t*)node->data,
|
||||
@ -1620,9 +1662,9 @@ btr_search_validate(void)
|
||||
fprintf(stderr, "\nInnoDB: on that page."
|
||||
"Page mem address %p, is hashed %lu, n fields %lu, n bytes %lu\n"
|
||||
"side %lu\n",
|
||||
page, (ulong) block->is_hashed,
|
||||
(ulong) block->curr_n_fields,
|
||||
(ulong) block->curr_n_bytes, (ulong) block->curr_side);
|
||||
page, (ulong) block->is_hashed,
|
||||
(ulong) block->curr_n_fields,
|
||||
(ulong) block->curr_n_bytes, (ulong) block->curr_side);
|
||||
|
||||
if (n_page_dumps < 20) {
|
||||
buf_page_print(page);
|
||||
@ -1634,9 +1676,20 @@ btr_search_validate(void)
|
||||
}
|
||||
}
|
||||
|
||||
if (!ha_validate(btr_search_sys->hash_index)) {
|
||||
for (i = 0; i < cell_count; i += chunk_size) {
|
||||
ulint end_index = ut_min(i + chunk_size - 1, cell_count - 1);
|
||||
|
||||
ok = FALSE;
|
||||
/* We release btr_search_latch every once in a while to
|
||||
give other queries a chance to run. */
|
||||
if (i != 0) {
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
os_thread_yield();
|
||||
rw_lock_x_lock(&btr_search_latch);
|
||||
}
|
||||
|
||||
if (!ha_validate(btr_search_sys->hash_index, i, end_index)) {
|
||||
ok = FALSE;
|
||||
}
|
||||
}
|
||||
|
||||
rw_lock_x_unlock(&btr_search_latch);
|
||||
|
@ -239,27 +239,27 @@ to a file. Note that we must be careful to calculate the same value on
|
||||
ulint
|
||||
buf_calc_page_new_checksum(
|
||||
/*=======================*/
|
||||
/* out: checksum */
|
||||
byte* page) /* in: buffer page */
|
||||
/* out: checksum */
|
||||
byte* page) /* in: buffer page */
|
||||
{
|
||||
ulint checksum;
|
||||
ulint checksum;
|
||||
|
||||
/* Since the field FIL_PAGE_FILE_FLUSH_LSN, and in versions <= 4.1.x
|
||||
..._ARCH_LOG_NO, are written outside the buffer pool to the first
|
||||
pages of data files, we have to skip them in the page checksum
|
||||
calculation.
|
||||
/* Since the field FIL_PAGE_FILE_FLUSH_LSN, and in versions <= 4.1.x
|
||||
..._ARCH_LOG_NO, are written outside the buffer pool to the first
|
||||
pages of data files, we have to skip them in the page checksum
|
||||
calculation.
|
||||
We must also skip the field FIL_PAGE_SPACE_OR_CHKSUM where the
|
||||
checksum is stored, and also the last 8 bytes of page because
|
||||
there we store the old formula checksum. */
|
||||
|
||||
checksum = ut_fold_binary(page + FIL_PAGE_OFFSET,
|
||||
checksum = ut_fold_binary(page + FIL_PAGE_OFFSET,
|
||||
FIL_PAGE_FILE_FLUSH_LSN - FIL_PAGE_OFFSET)
|
||||
+ ut_fold_binary(page + FIL_PAGE_DATA,
|
||||
UNIV_PAGE_SIZE - FIL_PAGE_DATA
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM);
|
||||
checksum = checksum & 0xFFFFFFFFUL;
|
||||
+ ut_fold_binary(page + FIL_PAGE_DATA,
|
||||
UNIV_PAGE_SIZE - FIL_PAGE_DATA
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM);
|
||||
checksum = checksum & 0xFFFFFFFFUL;
|
||||
|
||||
return(checksum);
|
||||
return(checksum);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
@ -273,16 +273,16 @@ because this takes that field as an input! */
|
||||
ulint
|
||||
buf_calc_page_old_checksum(
|
||||
/*=======================*/
|
||||
/* out: checksum */
|
||||
byte* page) /* in: buffer page */
|
||||
/* out: checksum */
|
||||
byte* page) /* in: buffer page */
|
||||
{
|
||||
ulint checksum;
|
||||
ulint checksum;
|
||||
|
||||
checksum = ut_fold_binary(page, FIL_PAGE_FILE_FLUSH_LSN);
|
||||
checksum = ut_fold_binary(page, FIL_PAGE_FILE_FLUSH_LSN);
|
||||
|
||||
checksum = checksum & 0xFFFFFFFFUL;
|
||||
checksum = checksum & 0xFFFFFFFFUL;
|
||||
|
||||
return(checksum);
|
||||
return(checksum);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
@ -302,11 +302,11 @@ buf_page_is_corrupted(
|
||||
dulint current_lsn;
|
||||
#endif
|
||||
if (mach_read_from_4(read_buf + FIL_PAGE_LSN + 4)
|
||||
!= mach_read_from_4(read_buf + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
|
||||
!= mach_read_from_4(read_buf + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
|
||||
|
||||
/* Stored log sequence numbers at the start and the end
|
||||
of page do not match */
|
||||
of page do not match */
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
@ -314,8 +314,8 @@ buf_page_is_corrupted(
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
if (recv_lsn_checks_on && log_peek_lsn(¤t_lsn)) {
|
||||
if (ut_dulint_cmp(current_lsn,
|
||||
mach_read_from_8(read_buf + FIL_PAGE_LSN))
|
||||
< 0) {
|
||||
mach_read_from_8(read_buf + FIL_PAGE_LSN))
|
||||
< 0) {
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
@ -324,52 +324,57 @@ buf_page_is_corrupted(
|
||||
"InnoDB: Your database may be corrupt or you may have copied the InnoDB\n"
|
||||
"InnoDB: tablespace but not the InnoDB log files. See\n"
|
||||
"http://dev.mysql.com/doc/mysql/en/backing-up.html for more information.\n",
|
||||
(ulong) mach_read_from_4(read_buf + FIL_PAGE_OFFSET),
|
||||
(ulong) ut_dulint_get_high(
|
||||
mach_read_from_8(read_buf + FIL_PAGE_LSN)),
|
||||
(ulong) ut_dulint_get_low(
|
||||
mach_read_from_8(read_buf + FIL_PAGE_LSN)),
|
||||
(ulong) ut_dulint_get_high(current_lsn),
|
||||
(ulong) ut_dulint_get_low(current_lsn));
|
||||
(ulong) mach_read_from_4(read_buf + FIL_PAGE_OFFSET),
|
||||
(ulong) ut_dulint_get_high(
|
||||
mach_read_from_8(read_buf + FIL_PAGE_LSN)),
|
||||
(ulong) ut_dulint_get_low(
|
||||
mach_read_from_8(read_buf + FIL_PAGE_LSN)),
|
||||
(ulong) ut_dulint_get_high(current_lsn),
|
||||
(ulong) ut_dulint_get_low(current_lsn));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
/* If we use checksums validation, make additional check before returning
|
||||
TRUE to ensure that the checksum is not equal to BUF_NO_CHECKSUM_MAGIC which
|
||||
might be stored by InnoDB with checksums disabled.
|
||||
Otherwise, skip checksum calculation and return FALSE */
|
||||
/* If we use checksums validation, make additional check before
|
||||
returning TRUE to ensure that the checksum is not equal to
|
||||
BUF_NO_CHECKSUM_MAGIC which might be stored by InnoDB with checksums
|
||||
disabled. Otherwise, skip checksum calculation and return FALSE */
|
||||
|
||||
if (srv_use_checksums) {
|
||||
old_checksum = buf_calc_page_old_checksum(read_buf);
|
||||
if (srv_use_checksums) {
|
||||
old_checksum = buf_calc_page_old_checksum(read_buf);
|
||||
|
||||
old_checksum_field = mach_read_from_4(read_buf + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM);
|
||||
old_checksum_field = mach_read_from_4(read_buf + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM);
|
||||
|
||||
/* There are 2 valid formulas for old_checksum_field:
|
||||
1. Very old versions of InnoDB only stored 8 byte lsn to the start
|
||||
and the end of the page.
|
||||
2. Newer InnoDB versions store the old formula checksum there. */
|
||||
/* There are 2 valid formulas for old_checksum_field:
|
||||
|
||||
if (old_checksum_field != mach_read_from_4(read_buf + FIL_PAGE_LSN)
|
||||
&& old_checksum_field != old_checksum
|
||||
&& old_checksum_field != BUF_NO_CHECKSUM_MAGIC) {
|
||||
1. Very old versions of InnoDB only stored 8 byte lsn to the
|
||||
start and the end of the page.
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
2. Newer InnoDB versions store the old formula checksum
|
||||
there. */
|
||||
|
||||
checksum = buf_calc_page_new_checksum(read_buf);
|
||||
checksum_field = mach_read_from_4(read_buf + FIL_PAGE_SPACE_OR_CHKSUM);
|
||||
if (old_checksum_field != mach_read_from_4(read_buf
|
||||
+ FIL_PAGE_LSN)
|
||||
&& old_checksum_field != old_checksum
|
||||
&& old_checksum_field != BUF_NO_CHECKSUM_MAGIC) {
|
||||
|
||||
/* InnoDB versions < 4.0.14 and < 4.1.1 stored the space id
|
||||
(always equal to 0), to FIL_PAGE_SPACE_SPACE_OR_CHKSUM */
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
if (checksum_field != 0 && checksum_field != checksum
|
||||
&& checksum_field != BUF_NO_CHECKSUM_MAGIC) {
|
||||
checksum = buf_calc_page_new_checksum(read_buf);
|
||||
checksum_field = mach_read_from_4(read_buf +
|
||||
FIL_PAGE_SPACE_OR_CHKSUM);
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
}
|
||||
/* InnoDB versions < 4.0.14 and < 4.1.1 stored the space id
|
||||
(always equal to 0), to FIL_PAGE_SPACE_SPACE_OR_CHKSUM */
|
||||
|
||||
if (checksum_field != 0 && checksum_field != checksum
|
||||
&& checksum_field != BUF_NO_CHECKSUM_MAGIC) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
@ -393,9 +398,9 @@ buf_page_print(
|
||||
fputs("InnoDB: End of page dump\n", stderr);
|
||||
|
||||
checksum = srv_use_checksums ?
|
||||
buf_calc_page_new_checksum(read_buf) : BUF_NO_CHECKSUM_MAGIC;
|
||||
buf_calc_page_new_checksum(read_buf) : BUF_NO_CHECKSUM_MAGIC;
|
||||
old_checksum = srv_use_checksums ?
|
||||
buf_calc_page_old_checksum(read_buf) : BUF_NO_CHECKSUM_MAGIC;
|
||||
buf_calc_page_old_checksum(read_buf) : BUF_NO_CHECKSUM_MAGIC;
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
@ -417,18 +422,19 @@ buf_page_print(
|
||||
(ulong) mach_read_from_4(read_buf + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID));
|
||||
|
||||
if (mach_read_from_2(read_buf + TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_TYPE)
|
||||
== TRX_UNDO_INSERT) {
|
||||
fprintf(stderr,
|
||||
== TRX_UNDO_INSERT) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Page may be an insert undo log page\n");
|
||||
} else if (mach_read_from_2(read_buf + TRX_UNDO_PAGE_HDR
|
||||
+ TRX_UNDO_PAGE_TYPE)
|
||||
== TRX_UNDO_UPDATE) {
|
||||
fprintf(stderr,
|
||||
== TRX_UNDO_UPDATE) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Page may be an update undo log page\n");
|
||||
}
|
||||
|
||||
if (fil_page_get_type(read_buf) == FIL_PAGE_INDEX) {
|
||||
fprintf(stderr,
|
||||
switch (fil_page_get_type(read_buf)) {
|
||||
case FIL_PAGE_INDEX:
|
||||
fprintf(stderr,
|
||||
"InnoDB: Page may be an index page where index id is %lu %lu\n",
|
||||
(ulong) ut_dulint_get_high(btr_page_get_index_id(read_buf)),
|
||||
(ulong) ut_dulint_get_low(btr_page_get_index_id(read_buf)));
|
||||
@ -438,19 +444,50 @@ buf_page_print(
|
||||
|
||||
if (dict_sys != NULL) {
|
||||
|
||||
index = dict_index_find_on_id_low(
|
||||
index = dict_index_find_on_id_low(
|
||||
btr_page_get_index_id(read_buf));
|
||||
if (index) {
|
||||
if (index) {
|
||||
fputs("InnoDB: (", stderr);
|
||||
dict_index_name_print(stderr, NULL, index);
|
||||
fputs(")\n", stderr);
|
||||
}
|
||||
}
|
||||
} else if (fil_page_get_type(read_buf) == FIL_PAGE_INODE) {
|
||||
break;
|
||||
case FIL_PAGE_INODE:
|
||||
fputs("InnoDB: Page may be an 'inode' page\n", stderr);
|
||||
} else if (fil_page_get_type(read_buf) == FIL_PAGE_IBUF_FREE_LIST) {
|
||||
break;
|
||||
case FIL_PAGE_IBUF_FREE_LIST:
|
||||
fputs("InnoDB: Page may be an insert buffer free list page\n",
|
||||
stderr);
|
||||
break;
|
||||
case FIL_PAGE_TYPE_ALLOCATED:
|
||||
fputs("InnoDB: Page may be a freshly allocated page\n",
|
||||
stderr);
|
||||
break;
|
||||
case FIL_PAGE_IBUF_BITMAP:
|
||||
fputs("InnoDB: Page may be an insert buffer bitmap page\n",
|
||||
stderr);
|
||||
break;
|
||||
case FIL_PAGE_TYPE_SYS:
|
||||
fputs("InnoDB: Page may be a system page\n",
|
||||
stderr);
|
||||
break;
|
||||
case FIL_PAGE_TYPE_TRX_SYS:
|
||||
fputs("InnoDB: Page may be a transaction system page\n",
|
||||
stderr);
|
||||
break;
|
||||
case FIL_PAGE_TYPE_FSP_HDR:
|
||||
fputs("InnoDB: Page may be a file space header page\n",
|
||||
stderr);
|
||||
break;
|
||||
case FIL_PAGE_TYPE_XDES:
|
||||
fputs("InnoDB: Page may be an extent descriptor page\n",
|
||||
stderr);
|
||||
break;
|
||||
case FIL_PAGE_TYPE_BLOB:
|
||||
fputs("InnoDB: Page may be a BLOB page\n",
|
||||
stderr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -464,12 +501,17 @@ buf_block_init(
|
||||
byte* frame) /* in: pointer to buffer frame, or NULL if in
|
||||
the case of AWE there is no frame */
|
||||
{
|
||||
block->magic_n = 0;
|
||||
|
||||
block->state = BUF_BLOCK_NOT_USED;
|
||||
|
||||
block->frame = frame;
|
||||
|
||||
block->awe_info = NULL;
|
||||
|
||||
block->buf_fix_count = 0;
|
||||
block->io_fix = 0;
|
||||
|
||||
block->modify_clock = ut_dulint_zero;
|
||||
|
||||
block->file_page_was_freed = FALSE;
|
||||
@ -518,7 +560,7 @@ buf_pool_init(
|
||||
ut_a(srv_use_awe || n_frames == max_size);
|
||||
|
||||
if (n_frames > curr_size) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: AWE: Error: you must specify in my.cnf .._awe_mem_mb larger\n"
|
||||
"InnoDB: than .._buffer_pool_size. Now the former is %lu pages,\n"
|
||||
"InnoDB: the latter %lu pages.\n", (ulong) curr_size, (ulong) n_frames);
|
||||
@ -548,12 +590,12 @@ buf_pool_init(
|
||||
|
||||
if ((curr_size % ((1024 * 1024) / UNIV_PAGE_SIZE)) != 0) {
|
||||
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: AWE: Error: physical memory must be allocated in full megabytes.\n"
|
||||
"InnoDB: Trying to allocate %lu database pages.\n",
|
||||
(ulong) curr_size);
|
||||
|
||||
return(NULL);
|
||||
return(NULL);
|
||||
}
|
||||
|
||||
if (!os_awe_allocate_physical_mem(&(buf_pool->awe_info),
|
||||
@ -708,11 +750,11 @@ buf_pool_init(
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
|
||||
if (srv_use_adaptive_hash_indexes) {
|
||||
btr_search_sys_create(
|
||||
btr_search_sys_create(
|
||||
curr_size * UNIV_PAGE_SIZE / sizeof(void*) / 64);
|
||||
} else {
|
||||
/* Create only a small dummy system */
|
||||
btr_search_sys_create(1000);
|
||||
/* Create only a small dummy system */
|
||||
btr_search_sys_create(1000);
|
||||
}
|
||||
|
||||
return(buf_pool);
|
||||
@ -751,7 +793,7 @@ buf_awe_map_page_to_frame(
|
||||
|
||||
while (bck) {
|
||||
if (bck->state == BUF_BLOCK_FILE_PAGE
|
||||
&& (bck->buf_fix_count != 0 || bck->io_fix != 0)) {
|
||||
&& (bck->buf_fix_count != 0 || bck->io_fix != 0)) {
|
||||
|
||||
/* We have to skip this */
|
||||
bck = UT_LIST_GET_PREV(awe_LRU_free_mapped, bck);
|
||||
@ -839,7 +881,7 @@ the buffer pool. */
|
||||
|
||||
void
|
||||
buf_page_make_young(
|
||||
/*=================*/
|
||||
/*================*/
|
||||
buf_frame_t* frame) /* in: buffer frame of a file page */
|
||||
{
|
||||
buf_block_t* block;
|
||||
@ -1077,11 +1119,11 @@ buf_page_get_gen(
|
||||
|
||||
ut_ad(mtr);
|
||||
ut_ad((rw_latch == RW_S_LATCH)
|
||||
|| (rw_latch == RW_X_LATCH)
|
||||
|| (rw_latch == RW_NO_LATCH));
|
||||
|| (rw_latch == RW_X_LATCH)
|
||||
|| (rw_latch == RW_NO_LATCH));
|
||||
ut_ad((mode != BUF_GET_NO_LATCH) || (rw_latch == RW_NO_LATCH));
|
||||
ut_ad((mode == BUF_GET) || (mode == BUF_GET_IF_IN_POOL)
|
||||
|| (mode == BUF_GET_NO_LATCH) || (mode == BUF_GET_NOWAIT));
|
||||
|| (mode == BUF_GET_NO_LATCH) || (mode == BUF_GET_NOWAIT));
|
||||
#ifndef UNIV_LOG_DEBUG
|
||||
ut_ad(!ibuf_inside() || ibuf_page(space, offset));
|
||||
#endif
|
||||
@ -1211,24 +1253,24 @@ loop:
|
||||
} else if (rw_latch == RW_NO_LATCH) {
|
||||
|
||||
if (must_read) {
|
||||
/* Let us wait until the read operation
|
||||
/* Let us wait until the read operation
|
||||
completes */
|
||||
|
||||
for (;;) {
|
||||
mutex_enter(&(buf_pool->mutex));
|
||||
for (;;) {
|
||||
mutex_enter(&(buf_pool->mutex));
|
||||
|
||||
if (block->io_fix == BUF_IO_READ) {
|
||||
if (block->io_fix == BUF_IO_READ) {
|
||||
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
|
||||
/* Sleep 20 milliseconds */
|
||||
/* Sleep 20 milliseconds */
|
||||
|
||||
os_thread_sleep(20000);
|
||||
os_thread_sleep(20000);
|
||||
} else {
|
||||
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
|
||||
break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1412,14 +1454,14 @@ buf_page_get_known_nowait(
|
||||
block = buf_block_align(guess);
|
||||
|
||||
if (block->state == BUF_BLOCK_REMOVE_HASH) {
|
||||
/* Another thread is just freeing the block from the LRU list
|
||||
of the buffer pool: do not try to access this page; this
|
||||
/* Another thread is just freeing the block from the LRU list
|
||||
of the buffer pool: do not try to access this page; this
|
||||
attempt to access the page can only come through the hash
|
||||
index because when the buffer block state is ..._REMOVE_HASH,
|
||||
we have already removed it from the page address hash table
|
||||
of the buffer pool. */
|
||||
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
@ -1499,9 +1541,9 @@ buf_page_init_for_backup_restore(
|
||||
/* Set the state of the block */
|
||||
block->magic_n = BUF_BLOCK_MAGIC_N;
|
||||
|
||||
block->state = BUF_BLOCK_FILE_PAGE;
|
||||
block->space = space;
|
||||
block->offset = offset;
|
||||
block->state = BUF_BLOCK_FILE_PAGE;
|
||||
block->space = space;
|
||||
block->offset = offset;
|
||||
|
||||
block->lock_hash_val = 0;
|
||||
block->lock_mutex = NULL;
|
||||
@ -1512,14 +1554,14 @@ buf_page_init_for_backup_restore(
|
||||
block->oldest_modification = ut_dulint_zero;
|
||||
|
||||
block->accessed = FALSE;
|
||||
block->buf_fix_count = 0;
|
||||
block->buf_fix_count = 0;
|
||||
block->io_fix = 0;
|
||||
|
||||
block->n_hash_helps = 0;
|
||||
block->is_hashed = FALSE;
|
||||
block->n_fields = 1;
|
||||
block->n_bytes = 0;
|
||||
block->side = BTR_SEARCH_LEFT_SIDE;
|
||||
block->n_fields = 1;
|
||||
block->n_bytes = 0;
|
||||
block->side = BTR_SEARCH_LEFT_SIDE;
|
||||
|
||||
block->file_page_was_freed = FALSE;
|
||||
}
|
||||
@ -1543,9 +1585,9 @@ buf_page_init(
|
||||
/* Set the state of the block */
|
||||
block->magic_n = BUF_BLOCK_MAGIC_N;
|
||||
|
||||
block->state = BUF_BLOCK_FILE_PAGE;
|
||||
block->space = space;
|
||||
block->offset = offset;
|
||||
block->state = BUF_BLOCK_FILE_PAGE;
|
||||
block->space = space;
|
||||
block->offset = offset;
|
||||
|
||||
block->check_index_page_at_flush = FALSE;
|
||||
block->index = NULL;
|
||||
@ -1555,19 +1597,19 @@ buf_page_init(
|
||||
|
||||
/* Insert into the hash table of file pages */
|
||||
|
||||
if (buf_page_hash_get(space, offset)) {
|
||||
fprintf(stderr,
|
||||
if (buf_page_hash_get(space, offset)) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: page %lu %lu already found from the hash table\n",
|
||||
(ulong) space,
|
||||
(ulong) offset);
|
||||
#ifdef UNIV_DEBUG
|
||||
buf_print();
|
||||
buf_LRU_print();
|
||||
buf_validate();
|
||||
buf_LRU_validate();
|
||||
buf_print();
|
||||
buf_LRU_print();
|
||||
buf_validate();
|
||||
buf_LRU_validate();
|
||||
#endif /* UNIV_DEBUG */
|
||||
ut_a(0);
|
||||
}
|
||||
ut_a(0);
|
||||
}
|
||||
|
||||
HASH_INSERT(buf_block_t, hash, buf_pool->page_hash,
|
||||
buf_page_address_fold(space, offset), block);
|
||||
@ -1578,14 +1620,14 @@ buf_page_init(
|
||||
block->oldest_modification = ut_dulint_zero;
|
||||
|
||||
block->accessed = FALSE;
|
||||
block->buf_fix_count = 0;
|
||||
block->buf_fix_count = 0;
|
||||
block->io_fix = 0;
|
||||
|
||||
block->n_hash_helps = 0;
|
||||
block->is_hashed = FALSE;
|
||||
block->n_fields = 1;
|
||||
block->n_bytes = 0;
|
||||
block->side = BTR_SEARCH_LEFT_SIDE;
|
||||
block->n_fields = 1;
|
||||
block->n_bytes = 0;
|
||||
block->side = BTR_SEARCH_LEFT_SIDE;
|
||||
|
||||
block->file_page_was_freed = FALSE;
|
||||
}
|
||||
@ -1651,7 +1693,7 @@ buf_page_init_for_read(
|
||||
}
|
||||
|
||||
if (*err == DB_TABLESPACE_DELETED
|
||||
|| NULL != buf_page_hash_get(space, offset)) {
|
||||
|| NULL != buf_page_hash_get(space, offset)) {
|
||||
|
||||
/* The page belongs to a space which has been deleted or is
|
||||
being deleted, or the page is already in buf_pool, return */
|
||||
@ -1673,7 +1715,7 @@ buf_page_init_for_read(
|
||||
|
||||
/* The block must be put to the LRU list, to the old blocks */
|
||||
|
||||
buf_LRU_add_block(block, TRUE); /* TRUE == to old blocks */
|
||||
buf_LRU_add_block(block, TRUE); /* TRUE == to old blocks */
|
||||
|
||||
block->io_fix = BUF_IO_READ;
|
||||
buf_pool->n_pend_reads++;
|
||||
@ -1687,7 +1729,7 @@ buf_page_init_for_read(
|
||||
|
||||
rw_lock_x_lock_gen(&(block->lock), BUF_IO_READ);
|
||||
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
|
||||
if (mode == BUF_READ_IBUF_PAGES_ONLY) {
|
||||
|
||||
@ -1779,6 +1821,10 @@ buf_page_create(
|
||||
|
||||
frame = block->frame;
|
||||
|
||||
memset(frame + FIL_PAGE_PREV, 0xff, 4);
|
||||
memset(frame + FIL_PAGE_NEXT, 0xff, 4);
|
||||
mach_write_to_2(frame + FIL_PAGE_TYPE, FIL_PAGE_TYPE_ALLOCATED);
|
||||
|
||||
/* Reset to zero the file flush lsn field in the page; if the first
|
||||
page of an ibdata file is 'created' in this function into the buffer
|
||||
pool then we lose the original contents of the file flush lsn stamp.
|
||||
@ -1827,7 +1873,7 @@ buf_page_io_complete(
|
||||
+ FIL_PAGE_OFFSET);
|
||||
if (read_page_no != 0
|
||||
&& !trx_doublewrite_page_inside(read_page_no)
|
||||
&& read_page_no != block->offset) {
|
||||
&& read_page_no != block->offset) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: page n:o stored in the page read in is %lu, should be %lu!\n",
|
||||
@ -1837,7 +1883,7 @@ buf_page_io_complete(
|
||||
to the 4 first bytes of the page end lsn field */
|
||||
|
||||
if (buf_page_is_corrupted(block->frame)) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Database page corruption on disk or a failed\n"
|
||||
"InnoDB: file read of page %lu.\n", (ulong) block->offset);
|
||||
|
||||
@ -1846,7 +1892,7 @@ buf_page_io_complete(
|
||||
|
||||
buf_page_print(block->frame);
|
||||
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Database page corruption on disk or a failed\n"
|
||||
"InnoDB: file read of page %lu.\n", (ulong) block->offset);
|
||||
fputs(
|
||||
@ -1869,8 +1915,8 @@ buf_page_io_complete(
|
||||
fputs(
|
||||
"InnoDB: Ending processing because of a corrupt database page.\n",
|
||||
stderr);
|
||||
exit(1);
|
||||
}
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (recv_recovery_is_on()) {
|
||||
@ -2001,8 +2047,8 @@ buf_validate(void)
|
||||
|
||||
#ifdef UNIV_IBUF_DEBUG
|
||||
ut_a((block->io_fix == BUF_IO_READ)
|
||||
|| ibuf_count_get(block->space, block->offset)
|
||||
== 0);
|
||||
|| ibuf_count_get(block->space, block->offset)
|
||||
== 0);
|
||||
#endif
|
||||
if (block->io_fix == BUF_IO_WRITE) {
|
||||
|
||||
@ -2036,7 +2082,7 @@ buf_validate(void)
|
||||
} else if (block->state == BUF_BLOCK_NOT_USED) {
|
||||
n_free++;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (n_lru + n_free > buf_pool->curr_size) {
|
||||
fprintf(stderr, "n LRU %lu, n free %lu\n", (ulong) n_lru, (ulong) n_free);
|
||||
@ -2077,7 +2123,7 @@ buf_print(void)
|
||||
ulint j;
|
||||
dulint id;
|
||||
ulint n_found;
|
||||
buf_frame_t* frame;
|
||||
buf_frame_t* frame;
|
||||
dict_index_t* index;
|
||||
|
||||
ut_ad(buf_pool);
|
||||
@ -2147,8 +2193,8 @@ buf_print(void)
|
||||
|
||||
fprintf(stderr,
|
||||
"Block count for index %lu in buffer is about %lu",
|
||||
(ulong) ut_dulint_get_low(index_ids[i]),
|
||||
(ulong) counts[i]);
|
||||
(ulong) ut_dulint_get_low(index_ids[i]),
|
||||
(ulong) counts[i]);
|
||||
|
||||
if (index) {
|
||||
putc(' ', stderr);
|
||||
@ -2171,23 +2217,23 @@ Returns the number of latched pages in the buffer pool. */
|
||||
ulint
|
||||
buf_get_latched_pages_number(void)
|
||||
{
|
||||
buf_block_t* block;
|
||||
ulint i;
|
||||
ulint fixed_pages_number = 0;
|
||||
buf_block_t* block;
|
||||
ulint i;
|
||||
ulint fixed_pages_number = 0;
|
||||
|
||||
mutex_enter(&(buf_pool->mutex));
|
||||
mutex_enter(&(buf_pool->mutex));
|
||||
|
||||
for (i = 0; i < buf_pool->curr_size; i++) {
|
||||
for (i = 0; i < buf_pool->curr_size; i++) {
|
||||
|
||||
block = buf_pool_get_nth_block(buf_pool, i);
|
||||
block = buf_pool_get_nth_block(buf_pool, i);
|
||||
|
||||
if (((block->buf_fix_count != 0) || (block->io_fix != 0)) &&
|
||||
block->magic_n == BUF_BLOCK_MAGIC_N )
|
||||
fixed_pages_number++;
|
||||
}
|
||||
if (((block->buf_fix_count != 0) || (block->io_fix != 0)) &&
|
||||
block->magic_n == BUF_BLOCK_MAGIC_N )
|
||||
fixed_pages_number++;
|
||||
}
|
||||
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
return fixed_pages_number;
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
return fixed_pages_number;
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
@ -2216,10 +2262,10 @@ buf_get_modified_ratio_pct(void)
|
||||
mutex_enter(&(buf_pool->mutex));
|
||||
|
||||
ratio = (100 * UT_LIST_GET_LEN(buf_pool->flush_list))
|
||||
/ (1 + UT_LIST_GET_LEN(buf_pool->LRU)
|
||||
+ UT_LIST_GET_LEN(buf_pool->free));
|
||||
/ (1 + UT_LIST_GET_LEN(buf_pool->LRU)
|
||||
+ UT_LIST_GET_LEN(buf_pool->free));
|
||||
|
||||
/* 1 + is there to avoid division by zero */
|
||||
/* 1 + is there to avoid division by zero */
|
||||
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
|
||||
@ -2297,10 +2343,9 @@ buf_print_io(
|
||||
|
||||
if (buf_pool->n_page_gets > buf_pool->n_page_gets_old) {
|
||||
fprintf(file, "Buffer pool hit rate %lu / 1000\n",
|
||||
(ulong) (1000
|
||||
- ((1000 *
|
||||
(buf_pool->n_pages_read - buf_pool->n_pages_read_old))
|
||||
/ (buf_pool->n_page_gets - buf_pool->n_page_gets_old))));
|
||||
(ulong) (1000 -
|
||||
((1000 * (buf_pool->n_pages_read - buf_pool->n_pages_read_old))
|
||||
/ (buf_pool->n_page_gets - buf_pool->n_page_gets_old))));
|
||||
} else {
|
||||
fputs("No buffer pool page gets since the last printout\n",
|
||||
file);
|
||||
@ -2322,7 +2367,7 @@ void
|
||||
buf_refresh_io_stats(void)
|
||||
/*======================*/
|
||||
{
|
||||
buf_pool->last_printout_time = time(NULL);
|
||||
buf_pool->last_printout_time = time(NULL);
|
||||
buf_pool->n_page_gets_old = buf_pool->n_page_gets;
|
||||
buf_pool->n_pages_read_old = buf_pool->n_pages_read;
|
||||
buf_pool->n_pages_created_old = buf_pool->n_pages_created;
|
||||
@ -2355,10 +2400,10 @@ buf_all_freed(void)
|
||||
fprintf(stderr,
|
||||
"Page %lu %lu still fixed or dirty\n",
|
||||
(ulong) block->space, (ulong) block->offset);
|
||||
ut_error;
|
||||
ut_error;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
|
||||
|
@ -30,7 +30,7 @@ Created 11/11/1995 Heikki Tuuri
|
||||
flushed along with the original page. */
|
||||
|
||||
#define BUF_FLUSH_AREA ut_min(BUF_READ_AHEAD_AREA,\
|
||||
buf_pool->curr_size / 16)
|
||||
buf_pool->curr_size / 16)
|
||||
|
||||
/**********************************************************************
|
||||
Validates the flush list. */
|
||||
@ -55,10 +55,10 @@ buf_flush_insert_into_flush_list(
|
||||
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
|
||||
|
||||
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
|
||||
|| (ut_dulint_cmp(
|
||||
(UT_LIST_GET_FIRST(buf_pool->flush_list))
|
||||
->oldest_modification,
|
||||
block->oldest_modification) <= 0));
|
||||
|| (ut_dulint_cmp(
|
||||
(UT_LIST_GET_FIRST(buf_pool->flush_list))
|
||||
->oldest_modification,
|
||||
block->oldest_modification) <= 0));
|
||||
|
||||
UT_LIST_ADD_FIRST(flush_list, buf_pool->flush_list, block);
|
||||
|
||||
@ -120,14 +120,14 @@ buf_flush_ready_for_replace(
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: buffer block state %lu in the LRU list!\n",
|
||||
(ulong)block->state);
|
||||
ut_print_buf(stderr, (byte*)block, sizeof(buf_block_t));
|
||||
ut_print_buf(stderr, block, sizeof(buf_block_t));
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
if ((ut_dulint_cmp(block->oldest_modification, ut_dulint_zero) > 0)
|
||||
|| (block->buf_fix_count != 0)
|
||||
|| (block->io_fix != 0)) {
|
||||
|| (block->buf_fix_count != 0)
|
||||
|| (block->io_fix != 0)) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
@ -152,8 +152,8 @@ buf_flush_ready_for_flush(
|
||||
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
|
||||
|
||||
if ((ut_dulint_cmp(block->oldest_modification, ut_dulint_zero) > 0)
|
||||
&& (block->io_fix == 0)) {
|
||||
if (flush_type != BUF_FLUSH_LRU) {
|
||||
&& (block->io_fix == 0)) {
|
||||
if (flush_type != BUF_FLUSH_LRU) {
|
||||
|
||||
return(TRUE);
|
||||
|
||||
@ -205,7 +205,7 @@ buf_flush_write_complete(
|
||||
buf_pool->n_flush[block->flush_type]); */
|
||||
|
||||
if ((buf_pool->n_flush[block->flush_type] == 0)
|
||||
&& (buf_pool->init_flush[block->flush_type] == FALSE)) {
|
||||
&& (buf_pool->init_flush[block->flush_type] == FALSE)) {
|
||||
|
||||
/* The running flush batch has ended */
|
||||
|
||||
@ -252,17 +252,17 @@ buf_flush_buffered_writes(void)
|
||||
for (i = 0; i < trx_doublewrite->first_free; i++) {
|
||||
|
||||
block = trx_doublewrite->buf_block_arr[i];
|
||||
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
|
||||
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
|
||||
|
||||
if (mach_read_from_4(block->frame + FIL_PAGE_LSN + 4)
|
||||
!= mach_read_from_4(block->frame + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
!= mach_read_from_4(block->frame + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: ERROR: The page to be written seems corrupt!\n"
|
||||
"InnoDB: The lsn fields do not match! Noticed in the buffer pool\n"
|
||||
"InnoDB: before posting to the doublewrite buffer.\n");
|
||||
}
|
||||
}
|
||||
|
||||
if (block->check_index_page_at_flush
|
||||
&& !page_simple_validate(block->frame)) {
|
||||
@ -281,9 +281,9 @@ buf_flush_buffered_writes(void)
|
||||
}
|
||||
}
|
||||
|
||||
/* increment the doublewrite flushed pages counter */
|
||||
srv_dblwr_pages_written+= trx_doublewrite->first_free;
|
||||
srv_dblwr_writes++;
|
||||
/* increment the doublewrite flushed pages counter */
|
||||
srv_dblwr_pages_written+= trx_doublewrite->first_free;
|
||||
srv_dblwr_writes++;
|
||||
|
||||
if (trx_doublewrite->first_free > TRX_SYS_DOUBLEWRITE_BLOCK_SIZE) {
|
||||
len = TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE;
|
||||
@ -294,14 +294,14 @@ buf_flush_buffered_writes(void)
|
||||
fil_io(OS_FILE_WRITE,
|
||||
TRUE, TRX_SYS_SPACE,
|
||||
trx_doublewrite->block1, 0, len,
|
||||
(void*)trx_doublewrite->write_buf, NULL);
|
||||
(void*)trx_doublewrite->write_buf, NULL);
|
||||
|
||||
write_buf = trx_doublewrite->write_buf;
|
||||
|
||||
for (len2 = 0; len2 + UNIV_PAGE_SIZE <= len; len2 += UNIV_PAGE_SIZE) {
|
||||
if (mach_read_from_4(write_buf + len2 + FIL_PAGE_LSN + 4)
|
||||
!= mach_read_from_4(write_buf + len2 + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
|
||||
for (len2 = 0; len2 + UNIV_PAGE_SIZE <= len; len2 += UNIV_PAGE_SIZE) {
|
||||
if (mach_read_from_4(write_buf + len2 + FIL_PAGE_LSN + 4)
|
||||
!= mach_read_from_4(write_buf + len2 + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: ERROR: The page to be written seems corrupt!\n"
|
||||
@ -316,19 +316,19 @@ buf_flush_buffered_writes(void)
|
||||
fil_io(OS_FILE_WRITE,
|
||||
TRUE, TRX_SYS_SPACE,
|
||||
trx_doublewrite->block2, 0, len,
|
||||
(void*)(trx_doublewrite->write_buf
|
||||
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE),
|
||||
(void*)(trx_doublewrite->write_buf
|
||||
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE),
|
||||
NULL);
|
||||
|
||||
write_buf = trx_doublewrite->write_buf
|
||||
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE * UNIV_PAGE_SIZE;
|
||||
for (len2 = 0; len2 + UNIV_PAGE_SIZE <= len;
|
||||
len2 += UNIV_PAGE_SIZE) {
|
||||
if (mach_read_from_4(write_buf + len2
|
||||
+ FIL_PAGE_LSN + 4)
|
||||
!= mach_read_from_4(write_buf + len2
|
||||
if (mach_read_from_4(write_buf + len2
|
||||
+ FIL_PAGE_LSN + 4)
|
||||
!= mach_read_from_4(write_buf + len2
|
||||
+ UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: ERROR: The page to be written seems corrupt!\n"
|
||||
@ -349,22 +349,23 @@ buf_flush_buffered_writes(void)
|
||||
block = trx_doublewrite->buf_block_arr[i];
|
||||
|
||||
if (mach_read_from_4(block->frame + FIL_PAGE_LSN + 4)
|
||||
!= mach_read_from_4(block->frame + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
!= mach_read_from_4(block->frame + UNIV_PAGE_SIZE
|
||||
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4)) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: ERROR: The page to be written seems corrupt!\n"
|
||||
"InnoDB: The lsn fields do not match! Noticed in the buffer pool\n"
|
||||
"InnoDB: after posting and flushing the doublewrite buffer.\n"
|
||||
"InnoDB: Page buf fix count %lu, io fix %lu, state %lu\n",
|
||||
(ulong)block->buf_fix_count, (ulong)block->io_fix,
|
||||
(ulong)block->state);
|
||||
}
|
||||
(ulong)block->buf_fix_count,
|
||||
(ulong)block->io_fix,
|
||||
(ulong)block->state);
|
||||
}
|
||||
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
|
||||
|
||||
fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
|
||||
FALSE, block->space, block->offset, 0, UNIV_PAGE_SIZE,
|
||||
(void*)block->frame, (void*)block);
|
||||
(void*)block->frame, (void*)block);
|
||||
}
|
||||
|
||||
/* Wake possible simulated aio thread to actually post the
|
||||
@ -451,13 +452,13 @@ buf_flush_init_for_writing(
|
||||
/* Write the page number and the space id */
|
||||
|
||||
mach_write_to_4(page + FIL_PAGE_OFFSET, page_no);
|
||||
mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, space);
|
||||
mach_write_to_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID, space);
|
||||
|
||||
/* Store the new formula checksum */
|
||||
|
||||
mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM,
|
||||
srv_use_checksums ?
|
||||
buf_calc_page_new_checksum(page) : BUF_NO_CHECKSUM_MAGIC);
|
||||
buf_calc_page_new_checksum(page) : BUF_NO_CHECKSUM_MAGIC);
|
||||
|
||||
/* We overwrite the first 4 bytes of the end lsn field to store
|
||||
the old formula checksum. Since it depends also on the field
|
||||
@ -466,7 +467,7 @@ buf_flush_init_for_writing(
|
||||
|
||||
mach_write_to_4(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM,
|
||||
srv_use_checksums ?
|
||||
buf_calc_page_old_checksum(page) : BUF_NO_CHECKSUM_MAGIC);
|
||||
buf_calc_page_old_checksum(page) : BUF_NO_CHECKSUM_MAGIC);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
@ -506,7 +507,7 @@ buf_flush_write_block_low(
|
||||
if (!srv_use_doublewrite_buf || !trx_doublewrite) {
|
||||
fil_io(OS_FILE_WRITE | OS_AIO_SIMULATED_WAKE_LATER,
|
||||
FALSE, block->space, block->offset, 0, UNIV_PAGE_SIZE,
|
||||
(void*)block->frame, (void*)block);
|
||||
(void*)block->frame, (void*)block);
|
||||
} else {
|
||||
buf_flush_post_to_doublewrite_buf(block);
|
||||
}
|
||||
@ -540,7 +541,7 @@ buf_flush_try_page(
|
||||
ut_a(!block || block->state == BUF_BLOCK_FILE_PAGE);
|
||||
|
||||
if (flush_type == BUF_FLUSH_LIST
|
||||
&& block && buf_flush_ready_for_flush(block, flush_type)) {
|
||||
&& block && buf_flush_ready_for_flush(block, flush_type)) {
|
||||
|
||||
block->io_fix = BUF_IO_WRITE;
|
||||
|
||||
@ -681,7 +682,7 @@ buf_flush_try_page(
|
||||
fprintf(stderr,
|
||||
"Flushing single page space %lu, page no %lu \n",
|
||||
(ulong) block->space,
|
||||
(ulong) block->offset);
|
||||
(ulong) block->offset);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
@ -738,12 +739,12 @@ buf_flush_try_neighbors(
|
||||
ut_a(!block || block->state == BUF_BLOCK_FILE_PAGE);
|
||||
|
||||
if (block && flush_type == BUF_FLUSH_LRU && i != offset
|
||||
&& !block->old) {
|
||||
&& !block->old) {
|
||||
|
||||
/* We avoid flushing 'non-old' blocks in an LRU flush,
|
||||
because the flushed blocks are soon freed */
|
||||
/* We avoid flushing 'non-old' blocks in an LRU flush,
|
||||
because the flushed blocks are soon freed */
|
||||
|
||||
continue;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (block && buf_flush_ready_for_flush(block, flush_type)
|
||||
@ -798,7 +799,7 @@ buf_flush_batch(
|
||||
exceed min_n), otherwise ignored */
|
||||
{
|
||||
buf_block_t* block;
|
||||
ulint page_count = 0;
|
||||
ulint page_count = 0;
|
||||
ulint old_page_count;
|
||||
ulint space;
|
||||
ulint offset;
|
||||
@ -811,7 +812,7 @@ buf_flush_batch(
|
||||
mutex_enter(&(buf_pool->mutex));
|
||||
|
||||
if ((buf_pool->n_flush[flush_type] > 0)
|
||||
|| (buf_pool->init_flush[flush_type] == TRUE)) {
|
||||
|| (buf_pool->init_flush[flush_type] == TRUE)) {
|
||||
|
||||
/* There is already a flush batch of the same type running */
|
||||
|
||||
@ -832,22 +833,22 @@ buf_flush_batch(
|
||||
/* Start from the end of the list looking for a suitable
|
||||
block to be flushed. */
|
||||
|
||||
if (flush_type == BUF_FLUSH_LRU) {
|
||||
if (flush_type == BUF_FLUSH_LRU) {
|
||||
block = UT_LIST_GET_LAST(buf_pool->LRU);
|
||||
} else {
|
||||
} else {
|
||||
ut_ad(flush_type == BUF_FLUSH_LIST);
|
||||
|
||||
block = UT_LIST_GET_LAST(buf_pool->flush_list);
|
||||
if (!block
|
||||
|| (ut_dulint_cmp(block->oldest_modification,
|
||||
lsn_limit) >= 0)) {
|
||||
|| (ut_dulint_cmp(block->oldest_modification,
|
||||
lsn_limit) >= 0)) {
|
||||
/* We have flushed enough */
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
found = FALSE;
|
||||
found = FALSE;
|
||||
|
||||
/* Note that after finding a single flushable page, we try to
|
||||
flush also all its neighbors, and after that start from the
|
||||
@ -855,7 +856,7 @@ buf_flush_batch(
|
||||
during the flushing and we cannot safely preserve within this
|
||||
function a pointer to a block in the list! */
|
||||
|
||||
while ((block != NULL) && !found) {
|
||||
while ((block != NULL) && !found) {
|
||||
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
|
||||
|
||||
if (buf_flush_ready_for_flush(block, flush_type)) {
|
||||
@ -887,19 +888,19 @@ buf_flush_batch(
|
||||
|
||||
block = UT_LIST_GET_PREV(flush_list, block);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* If we could not find anything to flush, leave the loop */
|
||||
/* If we could not find anything to flush, leave the loop */
|
||||
|
||||
if (!found) {
|
||||
break;
|
||||
}
|
||||
if (!found) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
(buf_pool->init_flush)[flush_type] = FALSE;
|
||||
|
||||
if ((buf_pool->n_flush[flush_type] == 0)
|
||||
&& (buf_pool->init_flush[flush_type] == FALSE)) {
|
||||
&& (buf_pool->init_flush[flush_type] == FALSE)) {
|
||||
|
||||
/* The running flush batch has ended */
|
||||
|
||||
@ -921,8 +922,8 @@ buf_flush_batch(
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
if (page_count != ULINT_UNDEFINED)
|
||||
srv_buf_pool_flushed+= page_count;
|
||||
if (page_count != ULINT_UNDEFINED)
|
||||
srv_buf_pool_flushed+= page_count;
|
||||
|
||||
return(page_count);
|
||||
}
|
||||
@ -962,9 +963,9 @@ buf_flush_LRU_recommendation(void)
|
||||
block = UT_LIST_GET_LAST(buf_pool->LRU);
|
||||
|
||||
while ((block != NULL)
|
||||
&& (n_replaceable < BUF_FLUSH_FREE_BLOCK_MARGIN
|
||||
+ BUF_FLUSH_EXTRA_MARGIN)
|
||||
&& (distance < BUF_LRU_FREE_SEARCH_LEN)) {
|
||||
&& (n_replaceable < BUF_FLUSH_FREE_BLOCK_MARGIN
|
||||
+ BUF_FLUSH_EXTRA_MARGIN)
|
||||
&& (distance < BUF_LRU_FREE_SEARCH_LEN)) {
|
||||
|
||||
if (buf_flush_ready_for_replace(block)) {
|
||||
n_replaceable++;
|
||||
@ -1009,7 +1010,7 @@ buf_flush_free_margin(void)
|
||||
/* There was an LRU type flush batch already running;
|
||||
let us wait for it to end */
|
||||
|
||||
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
|
||||
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -86,10 +86,10 @@ scan_again:
|
||||
block = UT_LIST_GET_LAST(buf_pool->LRU);
|
||||
|
||||
while (block != NULL) {
|
||||
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
|
||||
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
|
||||
|
||||
if (block->space == id
|
||||
&& (block->buf_fix_count > 0 || block->io_fix != 0)) {
|
||||
&& (block->buf_fix_count > 0 || block->io_fix != 0)) {
|
||||
|
||||
/* We cannot remove this page during this scan yet;
|
||||
maybe the system is currently reading it in, or
|
||||
@ -106,7 +106,7 @@ scan_again:
|
||||
printf(
|
||||
"Dropping space %lu page %lu\n",
|
||||
(ulong) block->space,
|
||||
(ulong) block->offset);
|
||||
(ulong) block->offset);
|
||||
}
|
||||
#endif
|
||||
if (block->is_hashed) {
|
||||
@ -146,7 +146,7 @@ next_page:
|
||||
if (!all_freed) {
|
||||
os_thread_sleep(20000);
|
||||
|
||||
goto scan_again;
|
||||
goto scan_again;
|
||||
}
|
||||
}
|
||||
|
||||
@ -193,7 +193,7 @@ ibool
|
||||
buf_LRU_search_and_free_block(
|
||||
/*==========================*/
|
||||
/* out: TRUE if freed */
|
||||
ulint n_iterations) /* in: how many times this has been called
|
||||
ulint n_iterations) /* in: how many times this has been called
|
||||
repeatedly without result: a high value means
|
||||
that we should search farther; if value is
|
||||
k < 10, then we only search k/10 * [number
|
||||
@ -210,7 +210,7 @@ buf_LRU_search_and_free_block(
|
||||
block = UT_LIST_GET_LAST(buf_pool->LRU);
|
||||
|
||||
while (block != NULL) {
|
||||
ut_a(block->in_LRU_list);
|
||||
ut_a(block->in_LRU_list);
|
||||
if (buf_flush_ready_for_replace(block)) {
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
@ -218,7 +218,7 @@ buf_LRU_search_and_free_block(
|
||||
fprintf(stderr,
|
||||
"Putting space %lu page %lu to free list\n",
|
||||
(ulong) block->space,
|
||||
(ulong) block->offset);
|
||||
(ulong) block->offset);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
@ -246,8 +246,8 @@ buf_LRU_search_and_free_block(
|
||||
distance++;
|
||||
|
||||
if (!freed && n_iterations <= 10
|
||||
&& distance > 100 + (n_iterations * buf_pool->curr_size)
|
||||
/ 10) {
|
||||
&& distance > 100 + (n_iterations * buf_pool->curr_size)
|
||||
/ 10) {
|
||||
buf_pool->LRU_flush_ended = 0;
|
||||
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
@ -258,7 +258,7 @@ buf_LRU_search_and_free_block(
|
||||
if (buf_pool->LRU_flush_ended > 0) {
|
||||
buf_pool->LRU_flush_ended--;
|
||||
}
|
||||
if (!freed) {
|
||||
if (!freed) {
|
||||
buf_pool->LRU_flush_ended = 0;
|
||||
}
|
||||
mutex_exit(&(buf_pool->mutex));
|
||||
@ -334,16 +334,16 @@ buf_LRU_get_free_block(void)
|
||||
buf_block_t* block = NULL;
|
||||
ibool freed;
|
||||
ulint n_iterations = 1;
|
||||
ibool mon_value_was = FALSE;
|
||||
ibool mon_value_was = FALSE;
|
||||
ibool started_monitor = FALSE;
|
||||
loop:
|
||||
mutex_enter(&(buf_pool->mutex));
|
||||
|
||||
if (!recv_recovery_on && UT_LIST_GET_LEN(buf_pool->free)
|
||||
+ UT_LIST_GET_LEN(buf_pool->LRU) < buf_pool->max_size / 10) {
|
||||
ut_print_timestamp(stderr);
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
" InnoDB: ERROR: over 9 / 10 of the buffer pool is occupied by\n"
|
||||
"InnoDB: lock heaps or the adaptive hash index! Check that your\n"
|
||||
"InnoDB: transactions do not set too many row locks.\n"
|
||||
@ -360,12 +360,12 @@ loop:
|
||||
|
||||
if (!buf_lru_switched_on_innodb_mon) {
|
||||
|
||||
/* Over 80 % of the buffer pool is occupied by lock
|
||||
/* Over 80 % of the buffer pool is occupied by lock
|
||||
heaps or the adaptive hash index. This may be a memory
|
||||
leak! */
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: WARNING: over 4 / 5 of the buffer pool is occupied by\n"
|
||||
"InnoDB: lock heaps or the adaptive hash index! Check that your\n"
|
||||
"InnoDB: transactions do not set too many row locks.\n"
|
||||
@ -398,7 +398,7 @@ loop:
|
||||
UT_LIST_REMOVE(free, buf_pool->free, block);
|
||||
block->in_free_list = FALSE;
|
||||
ut_a(block->state != BUF_BLOCK_FILE_PAGE);
|
||||
ut_a(!block->in_LRU_list);
|
||||
ut_a(!block->in_LRU_list);
|
||||
|
||||
if (srv_use_awe) {
|
||||
if (block->frame) {
|
||||
@ -456,7 +456,7 @@ loop:
|
||||
(ulong) fil_n_pending_log_flushes,
|
||||
(ulong) fil_n_pending_tablespace_flushes,
|
||||
(ulong) os_n_file_reads, (ulong) os_n_file_writes,
|
||||
(ulong) os_n_fsyncs);
|
||||
(ulong) os_n_fsyncs);
|
||||
|
||||
mon_value_was = srv_print_innodb_monitor;
|
||||
started_monitor = TRUE;
|
||||
@ -467,7 +467,7 @@ loop:
|
||||
/* No free block was found: try to flush the LRU list */
|
||||
|
||||
buf_flush_free_margin();
|
||||
++srv_buf_pool_wait_free;
|
||||
++srv_buf_pool_wait_free;
|
||||
|
||||
os_aio_simulated_wake_handler_threads();
|
||||
|
||||
@ -561,7 +561,7 @@ buf_LRU_old_init(void)
|
||||
|
||||
while (block != NULL) {
|
||||
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
|
||||
ut_a(block->in_LRU_list);
|
||||
ut_a(block->in_LRU_list);
|
||||
block->old = TRUE;
|
||||
block = UT_LIST_GET_NEXT(LRU, block);
|
||||
}
|
||||
@ -822,7 +822,7 @@ buf_LRU_block_free_non_file_page(
|
||||
ut_ad(block);
|
||||
|
||||
ut_a((block->state == BUF_BLOCK_MEMORY)
|
||||
|| (block->state == BUF_BLOCK_READY_FOR_USE));
|
||||
|| (block->state == BUF_BLOCK_READY_FOR_USE));
|
||||
|
||||
ut_a(block->n_pointers == 0);
|
||||
ut_a(!block->in_free_list);
|
||||
@ -871,30 +871,30 @@ buf_LRU_block_remove_hashed_page(
|
||||
|
||||
/* Note that if AWE is enabled the block may not have a frame at all */
|
||||
|
||||
buf_block_modify_clock_inc(block);
|
||||
buf_block_modify_clock_inc(block);
|
||||
|
||||
if (block != buf_page_hash_get(block->space, block->offset)) {
|
||||
fprintf(stderr,
|
||||
if (block != buf_page_hash_get(block->space, block->offset)) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: page %lu %lu not found from the hash table\n",
|
||||
(ulong) block->space,
|
||||
(ulong) block->offset);
|
||||
if (buf_page_hash_get(block->space, block->offset)) {
|
||||
fprintf(stderr,
|
||||
if (buf_page_hash_get(block->space, block->offset)) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: From hash table we find block %p of %lu %lu which is not %p\n",
|
||||
buf_page_hash_get(block->space, block->offset),
|
||||
(ulong) buf_page_hash_get(block->space, block->offset)->space,
|
||||
(ulong) buf_page_hash_get(block->space, block->offset)->offset,
|
||||
(ulong) buf_page_hash_get(block->space, block->offset)->space,
|
||||
(ulong) buf_page_hash_get(block->space, block->offset)->offset,
|
||||
block);
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
buf_print();
|
||||
buf_LRU_print();
|
||||
buf_validate();
|
||||
buf_LRU_validate();
|
||||
buf_print();
|
||||
buf_LRU_print();
|
||||
buf_validate();
|
||||
buf_LRU_validate();
|
||||
#endif
|
||||
ut_a(0);
|
||||
}
|
||||
ut_a(0);
|
||||
}
|
||||
|
||||
HASH_DELETE(buf_block_t, hash, buf_pool->page_hash,
|
||||
buf_page_address_fold(block->space, block->offset),
|
||||
|
@ -82,11 +82,11 @@ buf_read_page_low(
|
||||
|
||||
if (trx_doublewrite && space == TRX_SYS_SPACE
|
||||
&& ( (offset >= trx_doublewrite->block1
|
||||
&& offset < trx_doublewrite->block1
|
||||
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE)
|
||||
|| (offset >= trx_doublewrite->block2
|
||||
&& offset < trx_doublewrite->block2
|
||||
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE))) {
|
||||
&& offset < trx_doublewrite->block1
|
||||
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE)
|
||||
|| (offset >= trx_doublewrite->block2
|
||||
&& offset < trx_doublewrite->block2
|
||||
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE))) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Warning: trying to read doublewrite buffer page %lu\n",
|
||||
@ -112,8 +112,8 @@ buf_read_page_low(
|
||||
/* Trx sys header is so low in the latching order that we play
|
||||
safe and do not leave the i/o-completion to an asynchronous
|
||||
i/o-thread. Ibuf bitmap pages must always be read with
|
||||
syncronous i/o, to make sure they do not get involved in
|
||||
thread deadlocks. */
|
||||
syncronous i/o, to make sure they do not get involved in
|
||||
thread deadlocks. */
|
||||
|
||||
sync = TRUE;
|
||||
}
|
||||
@ -132,9 +132,9 @@ buf_read_page_low(
|
||||
#ifdef UNIV_DEBUG
|
||||
if (buf_debug_prints) {
|
||||
fprintf(stderr,
|
||||
"Posting read request for page %lu, sync %lu\n",
|
||||
"Posting read request for page %lu, sync %lu\n",
|
||||
(ulong) offset,
|
||||
(ulong) sync);
|
||||
(ulong) sync);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -188,15 +188,15 @@ buf_read_ahead_random(
|
||||
ulint i;
|
||||
|
||||
if (srv_startup_is_before_trx_rollback_phase) {
|
||||
/* No read-ahead to avoid thread deadlocks */
|
||||
return(0);
|
||||
/* No read-ahead to avoid thread deadlocks */
|
||||
return(0);
|
||||
}
|
||||
|
||||
if (ibuf_bitmap_page(offset) || trx_sys_hdr_page(space, offset)) {
|
||||
|
||||
/* If it is an ibuf bitmap page or trx sys hdr, we do
|
||||
no read-ahead, as that could break the ibuf page access
|
||||
order */
|
||||
no read-ahead, as that could break the ibuf page access
|
||||
order */
|
||||
|
||||
return(0);
|
||||
}
|
||||
@ -238,8 +238,8 @@ buf_read_ahead_random(
|
||||
block = buf_page_hash_get(space, i);
|
||||
|
||||
if ((block)
|
||||
&& (block->LRU_position > LRU_recent_limit)
|
||||
&& block->accessed) {
|
||||
&& (block->LRU_position > LRU_recent_limit)
|
||||
&& block->accessed) {
|
||||
|
||||
recent_blocks++;
|
||||
}
|
||||
@ -270,7 +270,7 @@ buf_read_ahead_random(
|
||||
if (!ibuf_bitmap_page(i)) {
|
||||
count += buf_read_page_low(&err, FALSE, ibuf_mode
|
||||
| OS_AIO_SIMULATED_WAKE_LATER,
|
||||
space, tablespace_version, i);
|
||||
space, tablespace_version, i);
|
||||
if (err == DB_TABLESPACE_DELETED) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
@ -293,11 +293,11 @@ buf_read_ahead_random(
|
||||
fprintf(stderr,
|
||||
"Random read-ahead space %lu offset %lu pages %lu\n",
|
||||
(ulong) space, (ulong) offset,
|
||||
(ulong) count);
|
||||
(ulong) count);
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
++srv_read_ahead_rnd;
|
||||
++srv_read_ahead_rnd;
|
||||
return(count);
|
||||
}
|
||||
|
||||
@ -330,9 +330,9 @@ buf_read_page(
|
||||
|
||||
count2 = buf_read_page_low(&err, TRUE, BUF_READ_ANY_PAGE, space,
|
||||
tablespace_version, offset);
|
||||
srv_buf_pool_reads+= count2;
|
||||
srv_buf_pool_reads+= count2;
|
||||
if (err == DB_TABLESPACE_DELETED) {
|
||||
ut_print_timestamp(stderr);
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: trying to access tablespace %lu page no. %lu,\n"
|
||||
"InnoDB: but the tablespace does not exist or is just being dropped.\n",
|
||||
@ -393,15 +393,15 @@ buf_read_ahead_linear(
|
||||
ulint i;
|
||||
|
||||
if (srv_startup_is_before_trx_rollback_phase) {
|
||||
/* No read-ahead to avoid thread deadlocks */
|
||||
return(0);
|
||||
/* No read-ahead to avoid thread deadlocks */
|
||||
return(0);
|
||||
}
|
||||
|
||||
if (ibuf_bitmap_page(offset) || trx_sys_hdr_page(space, offset)) {
|
||||
|
||||
/* If it is an ibuf bitmap page or trx sys hdr, we do
|
||||
no read-ahead, as that could break the ibuf page access
|
||||
order */
|
||||
no read-ahead, as that could break the ibuf page access
|
||||
order */
|
||||
|
||||
return(0);
|
||||
}
|
||||
@ -458,9 +458,10 @@ buf_read_ahead_linear(
|
||||
/* Not accessed */
|
||||
fail_count++;
|
||||
|
||||
} else if (pred_block && (ut_ulint_cmp(block->LRU_position,
|
||||
pred_block->LRU_position)
|
||||
!= asc_or_desc)) {
|
||||
} else if (pred_block
|
||||
&& (ut_ulint_cmp(block->LRU_position,
|
||||
pred_block->LRU_position)
|
||||
!= asc_or_desc)) {
|
||||
/* Accesses not in the right order */
|
||||
|
||||
fail_count++;
|
||||
@ -503,13 +504,13 @@ buf_read_ahead_linear(
|
||||
|
||||
if ((offset == low) && (succ_offset == offset + 1)) {
|
||||
|
||||
/* This is ok, we can continue */
|
||||
new_offset = pred_offset;
|
||||
/* This is ok, we can continue */
|
||||
new_offset = pred_offset;
|
||||
|
||||
} else if ((offset == high - 1) && (pred_offset == offset - 1)) {
|
||||
|
||||
/* This is ok, we can continue */
|
||||
new_offset = succ_offset;
|
||||
/* This is ok, we can continue */
|
||||
new_offset = succ_offset;
|
||||
} else {
|
||||
/* Successor or predecessor not in the right order */
|
||||
|
||||
@ -556,7 +557,7 @@ buf_read_ahead_linear(
|
||||
if (!ibuf_bitmap_page(i)) {
|
||||
count += buf_read_page_low(&err, FALSE, ibuf_mode
|
||||
| OS_AIO_SIMULATED_WAKE_LATER,
|
||||
space, tablespace_version, i);
|
||||
space, tablespace_version, i);
|
||||
if (err == DB_TABLESPACE_DELETED) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
@ -585,7 +586,7 @@ buf_read_ahead_linear(
|
||||
}
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
++srv_read_ahead_seq;
|
||||
++srv_read_ahead_seq;
|
||||
return(count);
|
||||
}
|
||||
|
||||
@ -704,11 +705,11 @@ buf_read_recv_pages(
|
||||
|
||||
if ((i + 1 == n_stored) && sync) {
|
||||
buf_read_page_low(&err, TRUE, BUF_READ_ANY_PAGE, space,
|
||||
tablespace_version, page_nos[i]);
|
||||
tablespace_version, page_nos[i]);
|
||||
} else {
|
||||
buf_read_page_low(&err, FALSE, BUF_READ_ANY_PAGE
|
||||
| OS_AIO_SIMULATED_WAKE_LATER,
|
||||
space, tablespace_version, page_nos[i]);
|
||||
| OS_AIO_SIMULATED_WAKE_LATER,
|
||||
space, tablespace_version, page_nos[i]);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -18,10 +18,10 @@ Created 5/30/1994 Heikki Tuuri
|
||||
#include "dict0dict.h"
|
||||
#include "btr0cur.h"
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
byte data_error; /* data pointers of tuple fields are initialized
|
||||
to point here for error checking */
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
ulint data_dummy; /* this is used to fool the compiler in
|
||||
dtuple_validate */
|
||||
#endif /* UNIV_DEBUG */
|
||||
@ -29,7 +29,7 @@ ulint data_dummy; /* this is used to fool the compiler in
|
||||
/* Some non-inlined functions used in the MySQL interface: */
|
||||
void
|
||||
dfield_set_data_noninline(
|
||||
dfield_t* field, /* in: field */
|
||||
dfield_t* field, /* in: field */
|
||||
void* data, /* in: data */
|
||||
ulint len) /* in: length or UNIV_SQL_NULL */
|
||||
{
|
||||
@ -49,13 +49,13 @@ dfield_get_len_noninline(
|
||||
}
|
||||
ulint
|
||||
dtuple_get_n_fields_noninline(
|
||||
dtuple_t* tuple) /* in: tuple */
|
||||
dtuple_t* tuple) /* in: tuple */
|
||||
{
|
||||
return(dtuple_get_n_fields(tuple));
|
||||
}
|
||||
dfield_t*
|
||||
dtuple_get_nth_field_noninline(
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
ulint n) /* in: index of field */
|
||||
{
|
||||
return(dtuple_get_nth_field(tuple, n));
|
||||
@ -144,12 +144,12 @@ dtuple_t*
|
||||
dtuple_create_for_mysql(
|
||||
/*====================*/
|
||||
/* out, own created dtuple */
|
||||
void** heap, /* out: created memory heap */
|
||||
ulint n_fields) /* in: number of fields */
|
||||
void** heap, /* out: created memory heap */
|
||||
ulint n_fields) /* in: number of fields */
|
||||
{
|
||||
*heap = (void*)mem_heap_create(500);
|
||||
*heap = (void*)mem_heap_create(500);
|
||||
|
||||
return(dtuple_create(*((mem_heap_t**)heap), n_fields));
|
||||
return(dtuple_create(*((mem_heap_t**)heap), n_fields));
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
@ -160,7 +160,7 @@ dtuple_free_for_mysql(
|
||||
/*==================*/
|
||||
void* heap) /* in: memory heap where tuple was created */
|
||||
{
|
||||
mem_heap_free((mem_heap_t*)heap);
|
||||
mem_heap_free((mem_heap_t*)heap);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
@ -189,7 +189,7 @@ dfield_check_typed_no_assert(
|
||||
dfield_t* field) /* in: data field */
|
||||
{
|
||||
if (dfield_get_type(field)->mtype > DATA_MYSQL
|
||||
|| dfield_get_type(field)->mtype < DATA_VARCHAR) {
|
||||
|| dfield_get_type(field)->mtype < DATA_VARCHAR) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: data field type %lu, len %lu\n",
|
||||
@ -211,7 +211,7 @@ dtuple_check_typed_no_assert(
|
||||
dtuple_t* tuple) /* in: tuple */
|
||||
{
|
||||
dfield_t* field;
|
||||
ulint i;
|
||||
ulint i;
|
||||
|
||||
if (dtuple_get_n_fields(tuple) > REC_MAX_N_FIELDS) {
|
||||
fprintf(stderr,
|
||||
@ -247,7 +247,7 @@ dfield_check_typed(
|
||||
dfield_t* field) /* in: data field */
|
||||
{
|
||||
if (dfield_get_type(field)->mtype > DATA_MYSQL
|
||||
|| dfield_get_type(field)->mtype < DATA_VARCHAR) {
|
||||
|| dfield_get_type(field)->mtype < DATA_VARCHAR) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: data field type %lu, len %lu\n",
|
||||
@ -270,7 +270,7 @@ dtuple_check_typed(
|
||||
dtuple_t* tuple) /* in: tuple */
|
||||
{
|
||||
dfield_t* field;
|
||||
ulint i;
|
||||
ulint i;
|
||||
|
||||
for (i = 0; i < dtuple_get_n_fields(tuple); i++) {
|
||||
|
||||
@ -294,11 +294,11 @@ dtuple_validate(
|
||||
dtuple_t* tuple) /* in: tuple */
|
||||
{
|
||||
dfield_t* field;
|
||||
byte* data;
|
||||
ulint n_fields;
|
||||
ulint len;
|
||||
ulint i;
|
||||
ulint j;
|
||||
byte* data;
|
||||
ulint n_fields;
|
||||
ulint len;
|
||||
ulint i;
|
||||
ulint j;
|
||||
|
||||
ut_ad(tuple->magic_n == DATA_TUPLE_MAGIC_N);
|
||||
|
||||
@ -431,6 +431,22 @@ dfield_print_also_hex(
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************************************************
|
||||
Print a dfield value using ut_print_buf. */
|
||||
|
||||
void
|
||||
dfield_print_raw(
|
||||
/*=============*/
|
||||
FILE* f, /* in: output stream */
|
||||
dfield_t* dfield) /* in: dfield */
|
||||
{
|
||||
if (dfield->len != UNIV_SQL_NULL) {
|
||||
ut_print_buf(f, dfield->data, dfield->len);
|
||||
} else {
|
||||
fputs(" SQL NULL", f);
|
||||
}
|
||||
}
|
||||
|
||||
/**************************************************************
|
||||
The following function prints the contents of a tuple. */
|
||||
|
||||
@ -440,7 +456,6 @@ dtuple_print(
|
||||
FILE* f, /* in: output stream */
|
||||
dtuple_t* tuple) /* in: tuple */
|
||||
{
|
||||
dfield_t* field;
|
||||
ulint n_fields;
|
||||
ulint i;
|
||||
|
||||
@ -451,13 +466,7 @@ dtuple_print(
|
||||
for (i = 0; i < n_fields; i++) {
|
||||
fprintf(f, " %lu:", (ulong) i);
|
||||
|
||||
field = dtuple_get_nth_field(tuple, i);
|
||||
|
||||
if (field->len != UNIV_SQL_NULL) {
|
||||
ut_print_buf(f, field->data, field->len);
|
||||
} else {
|
||||
fputs(" SQL NULL", f);
|
||||
}
|
||||
dfield_print_raw(f, dtuple_get_nth_field(tuple, i));
|
||||
|
||||
putc(';', f);
|
||||
}
|
||||
@ -525,9 +534,9 @@ dtuple_convert_big_rec(
|
||||
n_fields = 0;
|
||||
|
||||
while (rec_get_converted_size(index, entry)
|
||||
>= ut_min(page_get_free_space_of_empty(
|
||||
index->table->comp) / 2,
|
||||
REC_MAX_DATA_SIZE)) {
|
||||
>= ut_min(page_get_free_space_of_empty(
|
||||
dict_table_is_comp(index->table)) / 2,
|
||||
REC_MAX_DATA_SIZE)) {
|
||||
|
||||
longest = 0;
|
||||
for (i = dict_index_get_n_unique_in_tree(index);
|
||||
@ -551,11 +560,11 @@ dtuple_convert_big_rec(
|
||||
dfield = dtuple_get_nth_field(entry, i);
|
||||
|
||||
if (dfield->len != UNIV_SQL_NULL &&
|
||||
dfield->len > longest) {
|
||||
dfield->len > longest) {
|
||||
|
||||
longest = dfield->len;
|
||||
longest = dfield->len;
|
||||
|
||||
longest_i = i;
|
||||
longest_i = i;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -563,7 +572,9 @@ dtuple_convert_big_rec(
|
||||
/* We do not store externally fields which are smaller than
|
||||
DICT_MAX_INDEX_COL_LEN */
|
||||
|
||||
ut_a(DICT_MAX_INDEX_COL_LEN > REC_1BYTE_OFFS_LIMIT);
|
||||
#if DICT_MAX_INDEX_COL_LEN <= REC_1BYTE_OFFS_LIMIT
|
||||
# error "DICT_MAX_INDEX_COL_LEN <= REC_1BYTE_OFFS_LIMIT"
|
||||
#endif
|
||||
|
||||
if (longest < BTR_EXTERN_FIELD_REF_SIZE + 10
|
||||
+ DICT_MAX_INDEX_COL_LEN) {
|
||||
@ -641,7 +652,7 @@ dtuple_convert_back_big_rec(
|
||||
ut_memcpy(((byte*)dfield->data)
|
||||
+ dfield->len - BTR_EXTERN_FIELD_REF_SIZE,
|
||||
vector->fields[i].data,
|
||||
vector->fields[i].len);
|
||||
vector->fields[i].len);
|
||||
dfield->len = dfield->len + vector->fields[i].len
|
||||
- BTR_EXTERN_FIELD_REF_SIZE;
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ innobase_get_at_most_n_mbchars(
|
||||
ulint prefix_len, /* in: prefix length in bytes of the index
|
||||
(this has to be divided by mbmaxlen to get the
|
||||
number of CHARACTERS n in the prefix) */
|
||||
ulint data_len, /* in: length of the string in bytes */
|
||||
ulint data_len, /* in: length of the string in bytes */
|
||||
const char* str); /* in: character string */
|
||||
|
||||
/* At the database startup we store the default-charset collation number of
|
||||
@ -41,7 +41,7 @@ charset-collation code for them. */
|
||||
ulint data_mysql_default_charset_coll = 99999999;
|
||||
|
||||
dtype_t dtype_binary_val = {DATA_BINARY, 0, 0, 0, 0, 0};
|
||||
dtype_t* dtype_binary = &dtype_binary_val;
|
||||
dtype_t* dtype_binary = &dtype_binary_val;
|
||||
|
||||
/*************************************************************************
|
||||
Determine how many bytes the first n characters of the given string occupy.
|
||||
@ -97,9 +97,9 @@ dtype_is_string_type(
|
||||
/* out: TRUE if string type */
|
||||
ulint mtype) /* in: InnoDB main data type code: DATA_CHAR, ... */
|
||||
{
|
||||
if (mtype <= DATA_BLOB
|
||||
|| mtype == DATA_MYSQL
|
||||
|| mtype == DATA_VARMYSQL) {
|
||||
if (mtype <= DATA_BLOB
|
||||
|| mtype == DATA_MYSQL
|
||||
|| mtype == DATA_VARMYSQL) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
@ -119,9 +119,9 @@ dtype_is_binary_string_type(
|
||||
ulint mtype, /* in: main data type */
|
||||
ulint prtype) /* in: precise type */
|
||||
{
|
||||
if ((mtype == DATA_FIXBINARY)
|
||||
|| (mtype == DATA_BINARY)
|
||||
|| (mtype == DATA_BLOB && (prtype & DATA_BINARY_TYPE))) {
|
||||
if ((mtype == DATA_FIXBINARY)
|
||||
|| (mtype == DATA_BINARY)
|
||||
|| (mtype == DATA_BLOB && (prtype & DATA_BINARY_TYPE))) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
@ -143,7 +143,7 @@ dtype_is_non_binary_string_type(
|
||||
ulint prtype) /* in: precise type */
|
||||
{
|
||||
if (dtype_is_string_type(mtype) == TRUE
|
||||
&& dtype_is_binary_string_type(mtype, prtype) == FALSE) {
|
||||
&& dtype_is_binary_string_type(mtype, prtype) == FALSE) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
@ -252,7 +252,7 @@ dtype_print(
|
||||
} else if (prtype == DATA_ENGLISH) {
|
||||
fputs("DATA_ENGLISH", stderr);
|
||||
} else {
|
||||
fprintf(stderr, "prtype %lu", (ulong) mtype);
|
||||
fprintf(stderr, "prtype %lu", (ulong) prtype);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -59,7 +59,7 @@ dict_hdr_get_new_id(
|
||||
mtr_t mtr;
|
||||
|
||||
ut_ad((type == DICT_HDR_TABLE_ID) || (type == DICT_HDR_INDEX_ID)
|
||||
|| (type == DICT_HDR_MIX_ID));
|
||||
|| (type == DICT_HDR_MIX_ID));
|
||||
|
||||
mtr_start(&mtr);
|
||||
|
||||
@ -237,16 +237,15 @@ dict_boot(void)
|
||||
header. */
|
||||
|
||||
dict_sys->row_id = ut_dulint_add(
|
||||
ut_dulint_align_up(
|
||||
mtr_read_dulint(dict_hdr + DICT_HDR_ROW_ID,
|
||||
&mtr),
|
||||
DICT_HDR_ROW_ID_WRITE_MARGIN),
|
||||
DICT_HDR_ROW_ID_WRITE_MARGIN);
|
||||
ut_dulint_align_up(
|
||||
mtr_read_dulint(dict_hdr + DICT_HDR_ROW_ID, &mtr),
|
||||
DICT_HDR_ROW_ID_WRITE_MARGIN),
|
||||
DICT_HDR_ROW_ID_WRITE_MARGIN);
|
||||
|
||||
/* Insert into the dictionary cache the descriptions of the basic
|
||||
system tables */
|
||||
/*-------------------------*/
|
||||
table = dict_mem_table_create("SYS_TABLES", DICT_HDR_SPACE, 8, FALSE);
|
||||
table = dict_mem_table_create("SYS_TABLES", DICT_HDR_SPACE, 8, 0);
|
||||
|
||||
dict_mem_table_add_col(table, "NAME", DATA_BINARY, 0, 0, 0);
|
||||
dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0, 0);
|
||||
@ -265,7 +264,7 @@ dict_boot(void)
|
||||
index = dict_mem_index_create("SYS_TABLES", "CLUST_IND",
|
||||
DICT_HDR_SPACE, DICT_UNIQUE | DICT_CLUSTERED, 1);
|
||||
|
||||
dict_mem_index_add_field(index, "NAME", 0, 0);
|
||||
dict_mem_index_add_field(index, "NAME", 0);
|
||||
|
||||
index->id = DICT_TABLES_ID;
|
||||
|
||||
@ -275,14 +274,14 @@ dict_boot(void)
|
||||
/*-------------------------*/
|
||||
index = dict_mem_index_create("SYS_TABLES", "ID_IND",
|
||||
DICT_HDR_SPACE, DICT_UNIQUE, 1);
|
||||
dict_mem_index_add_field(index, "ID", 0, 0);
|
||||
dict_mem_index_add_field(index, "ID", 0);
|
||||
|
||||
index->id = DICT_TABLE_IDS_ID;
|
||||
success = dict_index_add_to_cache(table, index, mtr_read_ulint(
|
||||
dict_hdr + DICT_HDR_TABLE_IDS, MLOG_4BYTES, &mtr));
|
||||
ut_a(success);
|
||||
/*-------------------------*/
|
||||
table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, 7, FALSE);
|
||||
table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, 7, 0);
|
||||
|
||||
dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY,0,0,0);
|
||||
dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4, 0);
|
||||
@ -300,15 +299,15 @@ dict_boot(void)
|
||||
index = dict_mem_index_create("SYS_COLUMNS", "CLUST_IND",
|
||||
DICT_HDR_SPACE, DICT_UNIQUE | DICT_CLUSTERED, 2);
|
||||
|
||||
dict_mem_index_add_field(index, "TABLE_ID", 0, 0);
|
||||
dict_mem_index_add_field(index, "POS", 0, 0);
|
||||
dict_mem_index_add_field(index, "TABLE_ID", 0);
|
||||
dict_mem_index_add_field(index, "POS", 0);
|
||||
|
||||
index->id = DICT_COLUMNS_ID;
|
||||
success = dict_index_add_to_cache(table, index, mtr_read_ulint(
|
||||
dict_hdr + DICT_HDR_COLUMNS, MLOG_4BYTES, &mtr));
|
||||
ut_a(success);
|
||||
/*-------------------------*/
|
||||
table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, 7, FALSE);
|
||||
table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, 7, 0);
|
||||
|
||||
dict_mem_table_add_col(table, "TABLE_ID", DATA_BINARY, 0,0,0);
|
||||
dict_mem_table_add_col(table, "ID", DATA_BINARY, 0, 0, 0);
|
||||
@ -336,15 +335,15 @@ dict_boot(void)
|
||||
index = dict_mem_index_create("SYS_INDEXES", "CLUST_IND",
|
||||
DICT_HDR_SPACE, DICT_UNIQUE | DICT_CLUSTERED, 2);
|
||||
|
||||
dict_mem_index_add_field(index, "TABLE_ID", 0, 0);
|
||||
dict_mem_index_add_field(index, "ID", 0, 0);
|
||||
dict_mem_index_add_field(index, "TABLE_ID", 0);
|
||||
dict_mem_index_add_field(index, "ID", 0);
|
||||
|
||||
index->id = DICT_INDEXES_ID;
|
||||
success = dict_index_add_to_cache(table, index, mtr_read_ulint(
|
||||
dict_hdr + DICT_HDR_INDEXES, MLOG_4BYTES, &mtr));
|
||||
ut_a(success);
|
||||
/*-------------------------*/
|
||||
table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, FALSE);
|
||||
table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, 0);
|
||||
|
||||
dict_mem_table_add_col(table, "INDEX_ID", DATA_BINARY, 0,0,0);
|
||||
dict_mem_table_add_col(table, "POS", DATA_INT, 0, 4, 0);
|
||||
@ -357,8 +356,8 @@ dict_boot(void)
|
||||
index = dict_mem_index_create("SYS_FIELDS", "CLUST_IND",
|
||||
DICT_HDR_SPACE, DICT_UNIQUE | DICT_CLUSTERED, 2);
|
||||
|
||||
dict_mem_index_add_field(index, "INDEX_ID", 0, 0);
|
||||
dict_mem_index_add_field(index, "POS", 0, 0);
|
||||
dict_mem_index_add_field(index, "INDEX_ID", 0);
|
||||
dict_mem_index_add_field(index, "POS", 0);
|
||||
|
||||
index->id = DICT_FIELDS_ID;
|
||||
success = dict_index_add_to_cache(table, index, mtr_read_ulint(
|
||||
|
@ -33,7 +33,7 @@ dtuple_t*
|
||||
dict_create_sys_tables_tuple(
|
||||
/*=========================*/
|
||||
/* out: the tuple which should be inserted */
|
||||
dict_table_t* table, /* in: table */
|
||||
dict_table_t* table, /* in: table */
|
||||
mem_heap_t* heap) /* in: memory heap from which the memory for
|
||||
the built tuple is allocated */
|
||||
{
|
||||
@ -62,9 +62,13 @@ dict_create_sys_tables_tuple(
|
||||
/* 4: N_COLS ---------------------------*/
|
||||
dfield = dtuple_get_nth_field(entry, 2);
|
||||
|
||||
#if DICT_TF_COMPACT != 1
|
||||
#error
|
||||
#endif
|
||||
|
||||
ptr = mem_heap_alloc(heap, 4);
|
||||
mach_write_to_4(ptr, table->n_def
|
||||
| ((ulint) table->comp << 31));
|
||||
| ((table->flags & DICT_TF_COMPACT) << 31));
|
||||
dfield_set_data(dfield, ptr, 4);
|
||||
/* 5: TYPE -----------------------------*/
|
||||
dfield = dtuple_get_nth_field(entry, 3);
|
||||
@ -91,7 +95,7 @@ dict_create_sys_tables_tuple(
|
||||
/* 8: CLUSTER_NAME ---------------------*/
|
||||
dfield = dtuple_get_nth_field(entry, 6);
|
||||
|
||||
if (table->type == DICT_TABLE_CLUSTER_MEMBER) {
|
||||
if (table->type == DICT_TABLE_CLUSTER_MEMBER) {
|
||||
dfield_set_data(dfield, table->cluster_name,
|
||||
ut_strlen(table->cluster_name));
|
||||
ut_error; /* Oracle-style clusters are not supported yet */
|
||||
@ -120,7 +124,7 @@ dtuple_t*
|
||||
dict_create_sys_columns_tuple(
|
||||
/*==========================*/
|
||||
/* out: the tuple which should be inserted */
|
||||
dict_table_t* table, /* in: table */
|
||||
dict_table_t* table, /* in: table */
|
||||
ulint i, /* in: column number */
|
||||
mem_heap_t* heap) /* in: memory heap from which the memory for
|
||||
the built tuple is allocated */
|
||||
@ -318,7 +322,7 @@ dtuple_t*
|
||||
dict_create_sys_indexes_tuple(
|
||||
/*==========================*/
|
||||
/* out: the tuple which should be inserted */
|
||||
dict_index_t* index, /* in: index */
|
||||
dict_index_t* index, /* in: index */
|
||||
mem_heap_t* heap) /* in: memory heap from which the memory for
|
||||
the built tuple is allocated */
|
||||
{
|
||||
@ -410,7 +414,7 @@ dtuple_t*
|
||||
dict_create_sys_fields_tuple(
|
||||
/*=========================*/
|
||||
/* out: the tuple which should be inserted */
|
||||
dict_index_t* index, /* in: index */
|
||||
dict_index_t* index, /* in: index */
|
||||
ulint i, /* in: field number */
|
||||
mem_heap_t* heap) /* in: memory heap from which the memory for
|
||||
the built tuple is allocated */
|
||||
@ -426,8 +430,8 @@ dict_create_sys_fields_tuple(
|
||||
ut_ad(index && heap);
|
||||
|
||||
for (j = 0; j < index->n_fields; j++) {
|
||||
if (dict_index_get_nth_field(index, j)->prefix_len > 0) {
|
||||
index_contains_column_prefix_field = TRUE;
|
||||
if (dict_index_get_nth_field(index, j)->prefix_len > 0) {
|
||||
index_contains_column_prefix_field = TRUE;
|
||||
}
|
||||
}
|
||||
|
||||
@ -455,13 +459,13 @@ dict_create_sys_fields_tuple(
|
||||
we store the number of the field to the 2 HIGH bytes
|
||||
and the prefix length to the 2 low bytes, */
|
||||
|
||||
mach_write_to_4(ptr, (i << 16) + field->prefix_len);
|
||||
mach_write_to_4(ptr, (i << 16) + field->prefix_len);
|
||||
} else {
|
||||
/* Else we store the number of the field to the 2 LOW bytes.
|
||||
/* Else we store the number of the field to the 2 LOW bytes.
|
||||
This is to keep the storage format compatible with
|
||||
InnoDB versions < 4.0.14. */
|
||||
|
||||
mach_write_to_4(ptr, i);
|
||||
mach_write_to_4(ptr, i);
|
||||
}
|
||||
|
||||
dfield_set_data(dfield, ptr, 4);
|
||||
@ -547,7 +551,7 @@ dict_build_index_def_step(
|
||||
node->table = table;
|
||||
|
||||
ut_ad((UT_LIST_GET_LEN(table->indexes) > 0)
|
||||
|| (index->type & DICT_CLUSTERED));
|
||||
|| (index->type & DICT_CLUSTERED));
|
||||
|
||||
index->id = dict_hdr_get_new_id(DICT_HDR_INDEX_ID);
|
||||
|
||||
@ -634,7 +638,7 @@ dict_create_index_tree_step(
|
||||
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
|
||||
|
||||
node->page_no = btr_create(index->type, index->space, index->id,
|
||||
table->comp, &mtr);
|
||||
dict_table_is_comp(table), &mtr);
|
||||
/* printf("Created a new index tree in space %lu root page %lu\n",
|
||||
index->space, index->page_no); */
|
||||
|
||||
@ -671,7 +675,7 @@ dict_drop_index_tree(
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
ut_a(!dict_sys->sys_indexes->comp);
|
||||
ut_a(!dict_table_is_comp(dict_sys->sys_indexes));
|
||||
ptr = rec_get_nth_field_old(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, &len);
|
||||
|
||||
ut_ad(len == 4);
|
||||
@ -743,7 +747,7 @@ dict_truncate_index_tree(
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
ut_a(!dict_sys->sys_indexes->comp);
|
||||
ut_a(!dict_table_is_comp(dict_sys->sys_indexes));
|
||||
ptr = rec_get_nth_field_old(rec, DICT_SYS_INDEXES_PAGE_NO_FIELD, &len);
|
||||
|
||||
ut_ad(len == 4);
|
||||
@ -1167,16 +1171,16 @@ dict_create_or_check_foreign_constraint_tables(void)
|
||||
table2 = dict_table_get_low("SYS_FOREIGN_COLS");
|
||||
|
||||
if (table1 && table2
|
||||
&& UT_LIST_GET_LEN(table1->indexes) == 3
|
||||
&& UT_LIST_GET_LEN(table2->indexes) == 1) {
|
||||
&& UT_LIST_GET_LEN(table1->indexes) == 3
|
||||
&& UT_LIST_GET_LEN(table2->indexes) == 1) {
|
||||
|
||||
/* Foreign constraint system tables have already been
|
||||
created, and they are ok */
|
||||
/* Foreign constraint system tables have already been
|
||||
created, and they are ok */
|
||||
|
||||
mutex_exit(&(dict_sys->mutex));
|
||||
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
return(DB_SUCCESS);
|
||||
}
|
||||
|
||||
mutex_exit(&(dict_sys->mutex));
|
||||
|
||||
@ -1263,9 +1267,9 @@ dict_create_or_check_foreign_constraint_tables(void)
|
||||
|
||||
row_mysql_unlock_data_dictionary(trx);
|
||||
|
||||
trx_free_for_mysql(trx);
|
||||
trx_free_for_mysql(trx);
|
||||
|
||||
if (error == DB_SUCCESS) {
|
||||
if (error == DB_SUCCESS) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Foreign key constraint system tables created\n");
|
||||
}
|
||||
@ -1324,7 +1328,7 @@ dict_create_add_foreigns_to_dictionary(
|
||||
|
||||
if (NULL == dict_table_get_low("SYS_FOREIGN")) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: table SYS_FOREIGN not found from internal data dictionary\n");
|
||||
"InnoDB: table SYS_FOREIGN not found from internal data dictionary\n");
|
||||
|
||||
return(DB_ERROR);
|
||||
}
|
||||
@ -1432,12 +1436,12 @@ loop:
|
||||
"in front of the user-defined constraint name).\n",
|
||||
ef);
|
||||
fputs("Note that InnoDB's FOREIGN KEY system tables store\n"
|
||||
"constraint names as case-insensitive, with the\n"
|
||||
"MySQL standard latin1_swedish_ci collation. If you\n"
|
||||
"create tables or databases whose names differ only in\n"
|
||||
"the character case, then collisions in constraint\n"
|
||||
"names can occur. Workaround: name your constraints\n"
|
||||
"explicitly with unique names.\n",
|
||||
"constraint names as case-insensitive, with the\n"
|
||||
"MySQL standard latin1_swedish_ci collation. If you\n"
|
||||
"create tables or databases whose names differ only in\n"
|
||||
"the character case, then collisions in constraint\n"
|
||||
"names can occur. Workaround: name your constraints\n"
|
||||
"explicitly with unique names.\n",
|
||||
ef);
|
||||
|
||||
mutex_exit(&dict_foreign_err_mutex);
|
||||
@ -1446,7 +1450,7 @@ loop:
|
||||
}
|
||||
|
||||
if (error != DB_SUCCESS) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Foreign key constraint creation failed:\n"
|
||||
"InnoDB: internal error number %lu\n", (ulong) error);
|
||||
|
||||
|
@ -196,9 +196,10 @@ dict_foreign_free(
|
||||
/* Stream for storing detailed information about the latest foreign key
|
||||
and unique key errors */
|
||||
FILE* dict_foreign_err_file = NULL;
|
||||
mutex_t dict_foreign_err_mutex; /* mutex protecting the foreign
|
||||
mutex_t dict_foreign_err_mutex; /* mutex protecting the foreign
|
||||
and unique error buffers */
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/**********************************************************************
|
||||
Makes all characters in a NUL-terminated UTF-8 string lower case. */
|
||||
|
||||
@ -209,6 +210,7 @@ dict_casedn_str(
|
||||
{
|
||||
innobase_casedn_str(a);
|
||||
}
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
/************************************************************************
|
||||
Checks if the database name in two table names is the same. */
|
||||
@ -594,9 +596,9 @@ dict_index_get_nth_field_pos(
|
||||
field = dict_index_get_nth_field(index, pos);
|
||||
|
||||
if (field->col == field2->col
|
||||
&& (field->prefix_len == 0
|
||||
&& (field->prefix_len == 0
|
||||
|| (field->prefix_len >= field2->prefix_len
|
||||
&& field2->prefix_len != 0))) {
|
||||
&& field2->prefix_len != 0))) {
|
||||
|
||||
return(pos);
|
||||
}
|
||||
@ -751,7 +753,7 @@ dict_table_get(
|
||||
mutex_exit(&(dict_sys->mutex));
|
||||
|
||||
if (table != NULL) {
|
||||
if (!table->stat_initialized) {
|
||||
if (!table->stat_initialized) {
|
||||
dict_update_statistics(table);
|
||||
}
|
||||
}
|
||||
@ -780,13 +782,13 @@ dict_table_get_and_increment_handle_count(
|
||||
|
||||
if (table != NULL) {
|
||||
|
||||
table->n_mysql_handles_opened++;
|
||||
table->n_mysql_handles_opened++;
|
||||
}
|
||||
|
||||
mutex_exit(&(dict_sys->mutex));
|
||||
|
||||
if (table != NULL) {
|
||||
if (!table->stat_initialized && !table->ibd_file_missing) {
|
||||
if (!table->stat_initialized && !table->ibd_file_missing) {
|
||||
dict_update_statistics(table);
|
||||
}
|
||||
}
|
||||
@ -969,7 +971,7 @@ dict_table_rename_in_cache(
|
||||
if (table2) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: dictionary cache already contains a table of name %s\n",
|
||||
new_name);
|
||||
new_name);
|
||||
return(FALSE);
|
||||
}
|
||||
}
|
||||
@ -1085,10 +1087,10 @@ dict_table_rename_in_cache(
|
||||
old_id = mem_strdup(foreign->id);
|
||||
|
||||
if (ut_strlen(foreign->id) > ut_strlen(old_name)
|
||||
+ ((sizeof dict_ibfk) - 1)
|
||||
&& 0 == ut_memcmp(foreign->id, old_name,
|
||||
ut_strlen(old_name))
|
||||
&& 0 == ut_memcmp(
|
||||
+ ((sizeof dict_ibfk) - 1)
|
||||
&& 0 == ut_memcmp(foreign->id, old_name,
|
||||
ut_strlen(old_name))
|
||||
&& 0 == ut_memcmp(
|
||||
foreign->id + ut_strlen(old_name),
|
||||
dict_ibfk, (sizeof dict_ibfk) - 1)) {
|
||||
|
||||
@ -1096,7 +1098,7 @@ dict_table_rename_in_cache(
|
||||
|
||||
if (ut_strlen(table->name) > ut_strlen(old_name)) {
|
||||
foreign->id = mem_heap_alloc(
|
||||
foreign->heap,
|
||||
foreign->heap,
|
||||
ut_strlen(table->name)
|
||||
+ ut_strlen(old_id) + 1);
|
||||
}
|
||||
@ -1112,11 +1114,11 @@ dict_table_rename_in_cache(
|
||||
db_len = dict_get_db_name_len(table->name) + 1;
|
||||
|
||||
if (dict_get_db_name_len(table->name)
|
||||
> dict_get_db_name_len(foreign->id)) {
|
||||
> dict_get_db_name_len(foreign->id)) {
|
||||
|
||||
foreign->id = mem_heap_alloc(
|
||||
foreign->heap,
|
||||
db_len + ut_strlen(old_id) + 1);
|
||||
foreign->heap,
|
||||
db_len + ut_strlen(old_id) + 1);
|
||||
}
|
||||
|
||||
/* Replace the database prefix in id with the
|
||||
@ -1418,7 +1420,7 @@ dict_index_add_to_cache(
|
||||
}
|
||||
|
||||
ut_a(UT_LIST_GET_LEN(table->indexes) == 0
|
||||
|| (index->type & DICT_CLUSTERED) == 0);
|
||||
|| (index->type & DICT_CLUSTERED) == 0);
|
||||
}
|
||||
|
||||
success = dict_index_find_cols(table, index);
|
||||
@ -1580,7 +1582,7 @@ dict_index_find_cols(
|
||||
field = dict_index_get_nth_field(index, i);
|
||||
|
||||
fold = ut_fold_ulint_pair(ut_fold_string(table->name),
|
||||
ut_fold_string(field->name));
|
||||
ut_fold_string(field->name));
|
||||
|
||||
HASH_SEARCH(hash, dict_sys->col_hash, fold, col,
|
||||
(ut_strcmp(col->name, field->name) == 0)
|
||||
@ -1588,7 +1590,7 @@ dict_index_find_cols(
|
||||
== 0));
|
||||
if (col == NULL) {
|
||||
|
||||
return(FALSE);
|
||||
return(FALSE);
|
||||
} else {
|
||||
field->col = col;
|
||||
}
|
||||
@ -1605,12 +1607,11 @@ dict_index_add_col(
|
||||
/*===============*/
|
||||
dict_index_t* index, /* in: index */
|
||||
dict_col_t* col, /* in: column */
|
||||
ulint order, /* in: order criterion */
|
||||
ulint prefix_len) /* in: column prefix length */
|
||||
{
|
||||
dict_field_t* field;
|
||||
|
||||
dict_mem_index_add_field(index, col->name, order, prefix_len);
|
||||
dict_mem_index_add_field(index, col->name, prefix_len);
|
||||
|
||||
field = dict_index_get_nth_field(index, index->n_def - 1);
|
||||
|
||||
@ -1632,17 +1633,6 @@ dict_index_add_col(
|
||||
if (!(dtype_get_prtype(&col->type) & DATA_NOT_NULL)) {
|
||||
index->n_nullable++;
|
||||
}
|
||||
|
||||
if (index->n_def > 1) {
|
||||
const dict_field_t* field2 =
|
||||
dict_index_get_nth_field(index, index->n_def - 2);
|
||||
field->fixed_offs = (!field2->fixed_len ||
|
||||
field2->fixed_offs == ULINT_UNDEFINED)
|
||||
? ULINT_UNDEFINED
|
||||
: field2->fixed_len + field2->fixed_offs;
|
||||
} else {
|
||||
field->fixed_offs = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
@ -1664,8 +1654,7 @@ dict_index_copy(
|
||||
for (i = start; i < end; i++) {
|
||||
|
||||
field = dict_index_get_nth_field(index2, i);
|
||||
dict_index_add_col(index1, field->col, field->order,
|
||||
field->prefix_len);
|
||||
dict_index_add_col(index1, field->col, field->prefix_len);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1750,10 +1739,8 @@ dict_index_build_internal_clust(
|
||||
|
||||
/* Create a new index object with certainly enough fields */
|
||||
new_index = dict_mem_index_create(table->name,
|
||||
index->name,
|
||||
table->space,
|
||||
index->type,
|
||||
index->n_fields + table->n_cols);
|
||||
index->name, table->space, index->type,
|
||||
index->n_fields + table->n_cols);
|
||||
|
||||
/* Copy other relevant data from the old index struct to the new
|
||||
struct: it inherits the values */
|
||||
@ -1769,7 +1756,7 @@ dict_index_build_internal_clust(
|
||||
|
||||
/* Add the mix id column */
|
||||
dict_index_add_col(new_index,
|
||||
dict_table_get_sys_col(table, DATA_MIX_ID), 0, 0);
|
||||
dict_table_get_sys_col(table, DATA_MIX_ID), 0);
|
||||
|
||||
/* Copy the rest of fields */
|
||||
dict_index_copy(new_index, index, table->mix_len,
|
||||
@ -1801,21 +1788,27 @@ dict_index_build_internal_clust(
|
||||
|
||||
trx_id_pos = new_index->n_def;
|
||||
|
||||
ut_ad(DATA_ROW_ID == 0);
|
||||
ut_ad(DATA_TRX_ID == 1);
|
||||
ut_ad(DATA_ROLL_PTR == 2);
|
||||
#if DATA_ROW_ID != 0
|
||||
# error "DATA_ROW_ID != 0"
|
||||
#endif
|
||||
#if DATA_TRX_ID != 1
|
||||
# error "DATA_TRX_ID != 1"
|
||||
#endif
|
||||
#if DATA_ROLL_PTR != 2
|
||||
# error "DATA_ROLL_PTR != 2"
|
||||
#endif
|
||||
|
||||
if (!(index->type & DICT_UNIQUE)) {
|
||||
dict_index_add_col(new_index,
|
||||
dict_table_get_sys_col(table, DATA_ROW_ID), 0, 0);
|
||||
dict_table_get_sys_col(table, DATA_ROW_ID), 0);
|
||||
trx_id_pos++;
|
||||
}
|
||||
|
||||
dict_index_add_col(new_index,
|
||||
dict_table_get_sys_col(table, DATA_TRX_ID), 0, 0);
|
||||
dict_table_get_sys_col(table, DATA_TRX_ID), 0);
|
||||
|
||||
dict_index_add_col(new_index,
|
||||
dict_table_get_sys_col(table, DATA_ROLL_PTR), 0, 0);
|
||||
dict_table_get_sys_col(table, DATA_ROLL_PTR), 0);
|
||||
|
||||
for (i = 0; i < trx_id_pos; i++) {
|
||||
|
||||
@ -1829,7 +1822,7 @@ dict_index_build_internal_clust(
|
||||
}
|
||||
|
||||
if (dict_index_get_nth_field(new_index, i)->prefix_len
|
||||
> 0) {
|
||||
> 0) {
|
||||
new_index->trx_id_offset = 0;
|
||||
|
||||
break;
|
||||
@ -1857,7 +1850,7 @@ dict_index_build_internal_clust(
|
||||
|
||||
if (field->prefix_len == 0) {
|
||||
|
||||
field->col->aux = 0;
|
||||
field->col->aux = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1869,7 +1862,7 @@ dict_index_build_internal_clust(
|
||||
ut_ad(col->type.mtype != DATA_SYS);
|
||||
|
||||
if (col->aux == ULINT_UNDEFINED) {
|
||||
dict_index_add_col(new_index, col, 0, 0);
|
||||
dict_index_add_col(new_index, col, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1884,7 +1877,7 @@ dict_index_build_internal_clust(
|
||||
|
||||
if (field->prefix_len == 0) {
|
||||
|
||||
field->col->clust_pos = i;
|
||||
field->col->clust_pos = i;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1927,11 +1920,8 @@ dict_index_build_internal_non_clust(
|
||||
|
||||
/* Create a new index */
|
||||
new_index = dict_mem_index_create(table->name,
|
||||
index->name,
|
||||
index->space,
|
||||
index->type,
|
||||
index->n_fields
|
||||
+ 1 + clust_index->n_uniq);
|
||||
index->name, index->space, index->type,
|
||||
index->n_fields + 1 + clust_index->n_uniq);
|
||||
|
||||
/* Copy other relevant data from the old index
|
||||
struct to the new struct: it inherits the values */
|
||||
@ -1961,7 +1951,7 @@ dict_index_build_internal_non_clust(
|
||||
|
||||
if (field->prefix_len == 0) {
|
||||
|
||||
field->col->aux = 0;
|
||||
field->col->aux = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1973,8 +1963,8 @@ dict_index_build_internal_non_clust(
|
||||
field = dict_index_get_nth_field(clust_index, i);
|
||||
|
||||
if (field->col->aux == ULINT_UNDEFINED) {
|
||||
dict_index_add_col(new_index, field->col, 0,
|
||||
field->prefix_len);
|
||||
dict_index_add_col(new_index, field->col,
|
||||
field->prefix_len);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2104,8 +2094,11 @@ dict_foreign_find_index(
|
||||
dict_table_t* table, /* in: table */
|
||||
const char** columns,/* in: array of column names */
|
||||
ulint n_cols, /* in: number of columns */
|
||||
dict_index_t* types_idx)/* in: NULL or an index to whose types the
|
||||
column types must match */
|
||||
dict_index_t* types_idx, /* in: NULL or an index to whose types the
|
||||
column types must match */
|
||||
ibool check_charsets) /* in: whether to check charsets.
|
||||
only has an effect if types_idx !=
|
||||
NULL. */
|
||||
{
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
dict_index_t* index;
|
||||
@ -2130,14 +2123,15 @@ dict_foreign_find_index(
|
||||
|
||||
if (0 != innobase_strcasecmp(columns[i],
|
||||
col_name)) {
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
if (types_idx && !cmp_types_are_equal(
|
||||
dict_index_get_nth_type(index, i),
|
||||
dict_index_get_nth_type(types_idx, i))) {
|
||||
dict_index_get_nth_type(index, i),
|
||||
dict_index_get_nth_type(types_idx, i),
|
||||
check_charsets)) {
|
||||
|
||||
break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2157,6 +2151,7 @@ dict_foreign_find_index(
|
||||
InnoDB Hot Backup builds. Besides, this function should never
|
||||
be called in InnoDB Hot Backup. */
|
||||
ut_error;
|
||||
return(NULL);
|
||||
#endif /* UNIV_HOTBACKUP */
|
||||
}
|
||||
|
||||
@ -2190,8 +2185,9 @@ dict_foreign_error_report(
|
||||
fputs(msg, file);
|
||||
fputs(" Constraint:\n", file);
|
||||
dict_print_info_on_foreign_key_in_create_format(file, NULL, fk, TRUE);
|
||||
putc('\n', file);
|
||||
if (fk->foreign_index) {
|
||||
fputs("\nThe index in the foreign key in table is ", file);
|
||||
fputs("The index in the foreign key in table is ", file);
|
||||
ut_print_name(file, NULL, fk->foreign_index->name);
|
||||
fputs(
|
||||
"\nSee http://dev.mysql.com/doc/mysql/en/InnoDB_foreign_key_constraints.html\n"
|
||||
@ -2212,14 +2208,15 @@ dict_foreign_add_to_cache(
|
||||
/*======================*/
|
||||
/* out: DB_SUCCESS or error code */
|
||||
dict_foreign_t* foreign, /* in, own: foreign key constraint */
|
||||
ibool check_types) /* in: TRUE=check type compatibility */
|
||||
ibool check_charsets) /* in: TRUE=check charset
|
||||
compatibility */
|
||||
{
|
||||
dict_table_t* for_table;
|
||||
dict_table_t* ref_table;
|
||||
dict_foreign_t* for_in_cache = NULL;
|
||||
dict_index_t* index;
|
||||
ibool added_to_referenced_list= FALSE;
|
||||
FILE* ef = dict_foreign_err_file;
|
||||
FILE* ef = dict_foreign_err_file;
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(mutex_own(&(dict_sys->mutex)));
|
||||
@ -2248,16 +2245,10 @@ dict_foreign_add_to_cache(
|
||||
}
|
||||
|
||||
if (for_in_cache->referenced_table == NULL && ref_table) {
|
||||
dict_index_t* types_idx;
|
||||
if (check_types) {
|
||||
types_idx = for_in_cache->foreign_index;
|
||||
} else {
|
||||
types_idx = NULL;
|
||||
}
|
||||
index = dict_foreign_find_index(ref_table,
|
||||
(const char**) for_in_cache->referenced_col_names,
|
||||
for_in_cache->n_fields,
|
||||
types_idx);
|
||||
for_in_cache->foreign_index, check_charsets);
|
||||
|
||||
if (index == NULL) {
|
||||
dict_foreign_error_report(ef, for_in_cache,
|
||||
@ -2269,7 +2260,7 @@ dict_foreign_add_to_cache(
|
||||
mem_heap_free(foreign->heap);
|
||||
}
|
||||
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
|
||||
for_in_cache->referenced_table = ref_table;
|
||||
@ -2281,16 +2272,10 @@ dict_foreign_add_to_cache(
|
||||
}
|
||||
|
||||
if (for_in_cache->foreign_table == NULL && for_table) {
|
||||
dict_index_t* types_idx;
|
||||
if (check_types) {
|
||||
types_idx = for_in_cache->referenced_index;
|
||||
} else {
|
||||
types_idx = NULL;
|
||||
}
|
||||
index = dict_foreign_find_index(for_table,
|
||||
(const char**) for_in_cache->foreign_col_names,
|
||||
for_in_cache->n_fields,
|
||||
types_idx);
|
||||
for_in_cache->referenced_index, check_charsets);
|
||||
|
||||
if (index == NULL) {
|
||||
dict_foreign_error_report(ef, for_in_cache,
|
||||
@ -2308,7 +2293,7 @@ dict_foreign_add_to_cache(
|
||||
mem_heap_free(foreign->heap);
|
||||
}
|
||||
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
return(DB_CANNOT_ADD_CONSTRAINT);
|
||||
}
|
||||
|
||||
for_in_cache->foreign_table = for_table;
|
||||
@ -2459,8 +2444,8 @@ dict_scan_id(
|
||||
}
|
||||
} else {
|
||||
while (!isspace(*ptr) && *ptr != '(' && *ptr != ')'
|
||||
&& (accept_also_dot || *ptr != '.')
|
||||
&& *ptr != ',' && *ptr != '\0') {
|
||||
&& (accept_also_dot || *ptr != '.')
|
||||
&& *ptr != ',' && *ptr != '\0') {
|
||||
|
||||
ptr++;
|
||||
}
|
||||
@ -2499,7 +2484,7 @@ dict_scan_id(
|
||||
id_len = strlen((char*) b);
|
||||
|
||||
if (id_len >= 3 && b[id_len - 1] == 0xA0
|
||||
&& b[id_len - 2] == 0xC2) {
|
||||
&& b[id_len - 2] == 0xC2) {
|
||||
|
||||
/* Strip the 2 last bytes */
|
||||
|
||||
@ -2542,18 +2527,18 @@ dict_scan_col(
|
||||
*success = TRUE;
|
||||
*column = NULL;
|
||||
} else {
|
||||
for (i = 0; i < dict_table_get_n_cols(table); i++) {
|
||||
for (i = 0; i < dict_table_get_n_cols(table); i++) {
|
||||
|
||||
col = dict_table_get_nth_col(table, i);
|
||||
|
||||
if (0 == innobase_strcasecmp(col->name, *name)) {
|
||||
/* Found */
|
||||
/* Found */
|
||||
|
||||
*success = TRUE;
|
||||
*column = col;
|
||||
strcpy((char*) *name, col->name);
|
||||
*success = TRUE;
|
||||
*column = col;
|
||||
strcpy((char*) *name, col->name);
|
||||
|
||||
break;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2564,6 +2549,7 @@ dict_scan_col(
|
||||
InnoDB Hot Backup builds. Besides, this function should never
|
||||
be called in InnoDB Hot Backup. */
|
||||
ut_error;
|
||||
return(NULL);
|
||||
#endif /* UNIV_HOTBACKUP */
|
||||
}
|
||||
|
||||
@ -2669,6 +2655,7 @@ dict_scan_table_name(
|
||||
InnoDB Hot Backup builds. Besides, this function should never
|
||||
be called in InnoDB Hot Backup. */
|
||||
ut_error;
|
||||
return(NULL);
|
||||
#endif /* UNIV_HOTBACKUP */
|
||||
}
|
||||
|
||||
@ -2714,8 +2701,8 @@ dict_strip_comments(
|
||||
char* str;
|
||||
const char* sptr;
|
||||
char* ptr;
|
||||
/* unclosed quote character (0 if none) */
|
||||
char quote = 0;
|
||||
/* unclosed quote character (0 if none) */
|
||||
char quote = 0;
|
||||
|
||||
str = mem_alloc(strlen(sql_string) + 1);
|
||||
|
||||
@ -2743,15 +2730,15 @@ scan_more:
|
||||
/* Starting quote: remember the quote character. */
|
||||
quote = *sptr;
|
||||
} else if (*sptr == '#'
|
||||
|| (sptr[0] == '-' && sptr[1] == '-' &&
|
||||
sptr[2] == ' ')) {
|
||||
|| (sptr[0] == '-' && sptr[1] == '-' &&
|
||||
sptr[2] == ' ')) {
|
||||
for (;;) {
|
||||
/* In Unix a newline is 0x0A while in Windows
|
||||
it is 0x0D followed by 0x0A */
|
||||
|
||||
if (*sptr == (char)0x0A
|
||||
|| *sptr == (char)0x0D
|
||||
|| *sptr == '\0') {
|
||||
|| *sptr == (char)0x0D
|
||||
|| *sptr == '\0') {
|
||||
|
||||
goto scan_more;
|
||||
}
|
||||
@ -2762,7 +2749,7 @@ scan_more:
|
||||
for (;;) {
|
||||
if (*sptr == '*' && *(sptr + 1) == '/') {
|
||||
|
||||
sptr += 2;
|
||||
sptr += 2;
|
||||
|
||||
goto scan_more;
|
||||
}
|
||||
@ -2808,9 +2795,10 @@ dict_table_get_highest_foreign_id(
|
||||
|
||||
while (foreign) {
|
||||
if (ut_strlen(foreign->id) > ((sizeof dict_ibfk) - 1) + len
|
||||
&& 0 == ut_memcmp(foreign->id, table->name, len)
|
||||
&& 0 == ut_memcmp(foreign->id + len,
|
||||
dict_ibfk, (sizeof dict_ibfk) - 1)) {
|
||||
&& 0 == ut_memcmp(foreign->id, table->name, len)
|
||||
&& 0 == ut_memcmp(foreign->id + len,
|
||||
dict_ibfk, (sizeof dict_ibfk) - 1)
|
||||
&& foreign->id[len + ((sizeof dict_ibfk) - 1)] != '0') {
|
||||
/* It is of the >= 4.0.18 format */
|
||||
|
||||
id = strtoul(foreign->id + len + ((sizeof dict_ibfk) - 1),
|
||||
@ -2884,7 +2872,7 @@ dict_create_foreign_constraints_low(
|
||||
ulint highest_id_so_far = 0;
|
||||
dict_index_t* index;
|
||||
dict_foreign_t* foreign;
|
||||
const char* ptr = sql_string;
|
||||
const char* ptr = sql_string;
|
||||
const char* start_of_latest_foreign = sql_string;
|
||||
FILE* ef = dict_foreign_err_file;
|
||||
const char* constraint_name;
|
||||
@ -2982,7 +2970,7 @@ loop:
|
||||
ut_a(success);
|
||||
|
||||
if (!isspace(*ptr) && *ptr != '"' && *ptr != '`') {
|
||||
goto loop;
|
||||
goto loop;
|
||||
}
|
||||
|
||||
while (isspace(*ptr)) {
|
||||
@ -3005,8 +2993,7 @@ loop:
|
||||
command, determine if there are any foreign keys, and
|
||||
if so, immediately reject the command if the table is a
|
||||
temporary one. For now, this kludge will work. */
|
||||
if (reject_fks && (UT_LIST_GET_LEN(table->foreign_list) > 0))
|
||||
{
|
||||
if (reject_fks && (UT_LIST_GET_LEN(table->foreign_list) > 0)) {
|
||||
return DB_CANNOT_ADD_CONSTRAINT;
|
||||
}
|
||||
|
||||
@ -3028,7 +3015,7 @@ loop:
|
||||
}
|
||||
|
||||
if (!isspace(*ptr)) {
|
||||
goto loop;
|
||||
goto loop;
|
||||
}
|
||||
|
||||
ptr = dict_accept(ptr, "KEY", &success);
|
||||
@ -3057,7 +3044,7 @@ loop:
|
||||
/* We do not flag a syntax error here because in an
|
||||
ALTER TABLE we may also have DROP FOREIGN KEY abc */
|
||||
|
||||
goto loop;
|
||||
goto loop;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3097,7 +3084,7 @@ col_loop1:
|
||||
/* Try to find an index which contains the columns
|
||||
as the first fields and in the right order */
|
||||
|
||||
index = dict_foreign_find_index(table, column_names, i, NULL);
|
||||
index = dict_foreign_find_index(table, column_names, i, NULL, TRUE);
|
||||
|
||||
if (!index) {
|
||||
mutex_enter(&dict_foreign_err_mutex);
|
||||
@ -3362,8 +3349,7 @@ try_find_index:
|
||||
|
||||
if (referenced_table) {
|
||||
index = dict_foreign_find_index(referenced_table,
|
||||
column_names, i,
|
||||
foreign->foreign_index);
|
||||
column_names, i, foreign->foreign_index, TRUE);
|
||||
if (!index) {
|
||||
dict_foreign_free(foreign);
|
||||
mutex_enter(&dict_foreign_err_mutex);
|
||||
@ -3505,14 +3491,14 @@ loop:
|
||||
|
||||
if (!isspace(*ptr)) {
|
||||
|
||||
goto loop;
|
||||
goto loop;
|
||||
}
|
||||
|
||||
ptr = dict_accept(ptr, "FOREIGN", &success);
|
||||
|
||||
if (!success) {
|
||||
|
||||
goto loop;
|
||||
goto loop;
|
||||
}
|
||||
|
||||
ptr = dict_accept(ptr, "KEY", &success);
|
||||
@ -3539,8 +3525,8 @@ loop:
|
||||
|
||||
while (foreign != NULL) {
|
||||
if (0 == strcmp(foreign->id, id)
|
||||
|| (strchr(foreign->id, '/')
|
||||
&& 0 == strcmp(id,
|
||||
|| (strchr(foreign->id, '/')
|
||||
&& 0 == strcmp(id,
|
||||
dict_remove_db_name(foreign->id)))) {
|
||||
/* Found */
|
||||
break;
|
||||
@ -3705,7 +3691,7 @@ dict_tree_find_index_low(
|
||||
&& UNIV_UNLIKELY(table->type != DICT_TABLE_ORDINARY)) {
|
||||
|
||||
/* Get the mix id of the record */
|
||||
ut_a(!table->comp);
|
||||
ut_a(!dict_table_is_comp(table));
|
||||
|
||||
mix_id = mach_dulint_read_compressed(
|
||||
rec_get_nth_field_old(rec, table->mix_len, &len));
|
||||
@ -3801,7 +3787,7 @@ dict_is_mixed_table_rec(
|
||||
byte* mix_id_field;
|
||||
ulint len;
|
||||
|
||||
ut_ad(!table->comp);
|
||||
ut_ad(!dict_table_is_comp(table));
|
||||
|
||||
mix_id_field = rec_get_nth_field_old(rec,
|
||||
table->mix_len, &len);
|
||||
@ -3847,7 +3833,7 @@ dict_tree_build_node_ptr(
|
||||
pointer */
|
||||
ulint page_no,/* in: page number to put in node pointer */
|
||||
mem_heap_t* heap, /* in: memory heap where pointer created */
|
||||
ulint level) /* in: level of rec in tree: 0 means leaf
|
||||
ulint level) /* in: level of rec in tree: 0 means leaf
|
||||
level */
|
||||
{
|
||||
dtuple_t* tuple;
|
||||
@ -3864,12 +3850,12 @@ dict_tree_build_node_ptr(
|
||||
on non-leaf levels we remove the last field, which
|
||||
contains the page number of the child page */
|
||||
|
||||
ut_a(!ind->table->comp);
|
||||
ut_a(!dict_table_is_comp(ind->table));
|
||||
n_unique = rec_get_n_fields_old(rec);
|
||||
|
||||
if (level > 0) {
|
||||
ut_a(n_unique > 1);
|
||||
n_unique--;
|
||||
ut_a(n_unique > 1);
|
||||
n_unique--;
|
||||
}
|
||||
} else {
|
||||
n_unique = dict_index_get_n_unique_in_tree(ind);
|
||||
@ -3927,7 +3913,7 @@ dict_tree_copy_rec_order_prefix(
|
||||
index = dict_tree_find_index_low(tree, rec);
|
||||
|
||||
if (UNIV_UNLIKELY(tree->type & DICT_UNIVERSAL)) {
|
||||
ut_a(!index->table->comp);
|
||||
ut_a(!dict_table_is_comp(index->table));
|
||||
n = rec_get_n_fields_old(rec);
|
||||
} else {
|
||||
n = dict_index_get_n_unique_in_tree(index);
|
||||
@ -3954,7 +3940,8 @@ dict_tree_build_data_tuple(
|
||||
|
||||
ind = dict_tree_find_index_low(tree, rec);
|
||||
|
||||
ut_ad(ind->table->comp || n_fields <= rec_get_n_fields_old(rec));
|
||||
ut_ad(dict_table_is_comp(ind->table)
|
||||
|| n_fields <= rec_get_n_fields_old(rec));
|
||||
|
||||
tuple = dtuple_create(heap, n_fields);
|
||||
|
||||
@ -3978,7 +3965,7 @@ dict_index_calc_min_rec_len(
|
||||
ulint sum = 0;
|
||||
ulint i;
|
||||
|
||||
if (UNIV_LIKELY(index->table->comp)) {
|
||||
if (dict_table_is_comp(index->table)) {
|
||||
ulint nullable = 0;
|
||||
sum = REC_N_NEW_EXTRA_BYTES;
|
||||
for (i = 0; i < dict_index_get_n_fields(index); i++) {
|
||||
@ -4023,7 +4010,7 @@ dict_update_statistics_low(
|
||||
/*=======================*/
|
||||
dict_table_t* table, /* in: table */
|
||||
ibool has_dict_mutex __attribute__((unused)))
|
||||
/* in: TRUE if the caller has the
|
||||
/* in: TRUE if the caller has the
|
||||
dictionary mutex */
|
||||
{
|
||||
dict_index_t* index;
|
||||
@ -4094,7 +4081,7 @@ dict_update_statistics_low(
|
||||
|
||||
table->stat_initialized = TRUE;
|
||||
|
||||
table->stat_modified_counter = 0;
|
||||
table->stat_modified_counter = 0;
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
@ -4200,7 +4187,7 @@ dict_table_print_low(
|
||||
(ulong) ut_dulint_get_high(table->id),
|
||||
(ulong) ut_dulint_get_low(table->id),
|
||||
(ulong) table->n_cols,
|
||||
(ulong) UT_LIST_GET_LEN(table->indexes),
|
||||
(ulong) UT_LIST_GET_LEN(table->indexes),
|
||||
(ulong) table->stat_n_rows);
|
||||
|
||||
for (i = 0; i < table->n_cols - 1; i++) {
|
||||
@ -4278,7 +4265,7 @@ dict_index_print_low(
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
" INDEX: name %s, id %lu %lu, fields %lu/%lu, type %lu\n"
|
||||
" INDEX: name %s, id %lu %lu, fields %lu/%lu, uniq %lu, type %lu\n"
|
||||
" root page %lu, appr.key vals %lu,"
|
||||
" leaf pages %lu, size pages %lu\n"
|
||||
" FIELDS: ",
|
||||
@ -4286,7 +4273,9 @@ dict_index_print_low(
|
||||
(ulong) ut_dulint_get_high(tree->id),
|
||||
(ulong) ut_dulint_get_low(tree->id),
|
||||
(ulong) index->n_user_defined_cols,
|
||||
(ulong) index->n_fields, (ulong) index->type,
|
||||
(ulong) index->n_fields,
|
||||
(ulong) index->n_uniq,
|
||||
(ulong) index->type,
|
||||
(ulong) tree->page,
|
||||
(ulong) n_vals,
|
||||
(ulong) index->stat_n_leaf_pages,
|
||||
@ -4363,7 +4352,7 @@ dict_print_info_on_foreign_key_in_create_format(
|
||||
ut_print_name(file, trx, foreign->foreign_col_names[i]);
|
||||
if (++i < foreign->n_fields) {
|
||||
fputs(", ", file);
|
||||
} else {
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ dict_get_first_table_name_in_db(
|
||||
|
||||
sys_tables = dict_table_get_low("SYS_TABLES");
|
||||
sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
|
||||
ut_a(!sys_tables->comp);
|
||||
ut_a(!dict_table_is_comp(sys_tables));
|
||||
|
||||
tuple = dtuple_create(heap, 1);
|
||||
dfield = dtuple_get_nth_field(tuple, 0);
|
||||
@ -84,7 +84,7 @@ loop:
|
||||
field = rec_get_nth_field_old(rec, 0, &len);
|
||||
|
||||
if (len < strlen(name)
|
||||
|| ut_memcmp(name, field, strlen(name)) != 0) {
|
||||
|| ut_memcmp(name, field, strlen(name)) != 0) {
|
||||
/* Not found */
|
||||
|
||||
btr_pcur_close(&pcur);
|
||||
@ -94,11 +94,11 @@ loop:
|
||||
return(NULL);
|
||||
}
|
||||
|
||||
if (!rec_get_deleted_flag(rec, sys_tables->comp)) {
|
||||
if (!rec_get_deleted_flag(rec, 0)) {
|
||||
|
||||
/* We found one */
|
||||
|
||||
char* table_name = mem_strdupl((char*) field, len);
|
||||
char* table_name = mem_strdupl((char*) field, len);
|
||||
|
||||
btr_pcur_close(&pcur);
|
||||
mtr_commit(&mtr);
|
||||
@ -169,11 +169,11 @@ loop:
|
||||
|
||||
field = rec_get_nth_field_old(rec, 0, &len);
|
||||
|
||||
if (!rec_get_deleted_flag(rec, sys_tables->comp)) {
|
||||
if (!rec_get_deleted_flag(rec, 0)) {
|
||||
|
||||
/* We found one */
|
||||
|
||||
char* table_name = mem_strdupl((char*) field, len);
|
||||
char* table_name = mem_strdupl((char*) field, len);
|
||||
|
||||
btr_pcur_store_position(&pcur, &mtr);
|
||||
|
||||
@ -235,7 +235,7 @@ dict_check_tablespaces_and_store_max_id(
|
||||
|
||||
sys_tables = dict_table_get_low("SYS_TABLES");
|
||||
sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
|
||||
ut_a(!sys_tables->comp);
|
||||
ut_a(!dict_table_is_comp(sys_tables));
|
||||
|
||||
btr_pcur_open_at_index_side(TRUE, sys_index, BTR_SEARCH_LEAF, &pcur,
|
||||
TRUE, &mtr);
|
||||
@ -254,7 +254,7 @@ loop:
|
||||
known space id */
|
||||
|
||||
/* printf("Biggest space id in data dictionary %lu\n",
|
||||
max_space_id); */
|
||||
max_space_id); */
|
||||
fil_set_max_space_id_if_bigger(max_space_id);
|
||||
|
||||
mutex_exit(&(dict_sys->mutex));
|
||||
@ -264,11 +264,11 @@ loop:
|
||||
|
||||
field = rec_get_nth_field_old(rec, 0, &len);
|
||||
|
||||
if (!rec_get_deleted_flag(rec, sys_tables->comp)) {
|
||||
if (!rec_get_deleted_flag(rec, 0)) {
|
||||
|
||||
/* We found one */
|
||||
|
||||
char* name = mem_strdupl((char*) field, len);
|
||||
char* name = mem_strdupl((char*) field, len);
|
||||
|
||||
field = rec_get_nth_field_old(rec, 9, &len);
|
||||
ut_a(len == 4);
|
||||
@ -343,7 +343,7 @@ dict_load_columns(
|
||||
|
||||
sys_columns = dict_table_get_low("SYS_COLUMNS");
|
||||
sys_index = UT_LIST_GET_FIRST(sys_columns->indexes);
|
||||
ut_a(!sys_columns->comp);
|
||||
ut_a(!dict_table_is_comp(sys_columns));
|
||||
|
||||
tuple = dtuple_create(heap, 1);
|
||||
dfield = dtuple_get_nth_field(tuple, 0);
|
||||
@ -356,13 +356,13 @@ dict_load_columns(
|
||||
|
||||
btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
|
||||
BTR_SEARCH_LEAF, &pcur, &mtr);
|
||||
for (i = 0; i < table->n_cols - DATA_N_SYS_COLS; i++) {
|
||||
for (i = 0; i < table->n_cols - DATA_N_SYS_COLS; i++) {
|
||||
|
||||
rec = btr_pcur_get_rec(&pcur);
|
||||
|
||||
ut_a(btr_pcur_is_on_user_rec(&pcur, &mtr));
|
||||
|
||||
ut_a(!rec_get_deleted_flag(rec, sys_columns->comp));
|
||||
ut_a(!rec_get_deleted_flag(rec, 0));
|
||||
|
||||
field = rec_get_nth_field_old(rec, 0, &len);
|
||||
ut_ad(len == 8);
|
||||
@ -385,13 +385,23 @@ dict_load_columns(
|
||||
field = rec_get_nth_field_old(rec, 6, &len);
|
||||
prtype = mach_read_from_4(field);
|
||||
|
||||
if (dtype_is_non_binary_string_type(mtype, prtype)
|
||||
&& dtype_get_charset_coll(prtype) == 0) {
|
||||
/* This is a non-binary string type, and the table
|
||||
was created with < 4.1.2. Use the default charset. */
|
||||
if (dtype_get_charset_coll(prtype) == 0
|
||||
&& dtype_is_string_type(mtype)) {
|
||||
/* The table was created with < 4.1.2. */
|
||||
|
||||
prtype = dtype_form_prtype(prtype,
|
||||
if (dtype_is_binary_string_type(mtype, prtype)) {
|
||||
/* Use the binary collation for
|
||||
string columns of binary type. */
|
||||
|
||||
prtype = dtype_form_prtype(prtype,
|
||||
DATA_MYSQL_BINARY_CHARSET_COLL);
|
||||
} else {
|
||||
/* Use the default charset for
|
||||
other than binary columns. */
|
||||
|
||||
prtype = dtype_form_prtype(prtype,
|
||||
data_mysql_default_charset_coll);
|
||||
}
|
||||
}
|
||||
|
||||
field = rec_get_nth_field_old(rec, 7, &len);
|
||||
@ -466,7 +476,7 @@ dict_load_fields(
|
||||
|
||||
sys_fields = dict_table_get_low("SYS_FIELDS");
|
||||
sys_index = UT_LIST_GET_FIRST(sys_fields->indexes);
|
||||
ut_a(!sys_fields->comp);
|
||||
ut_a(!dict_table_is_comp(sys_fields));
|
||||
|
||||
tuple = dtuple_create(heap, 1);
|
||||
dfield = dtuple_get_nth_field(tuple, 0);
|
||||
@ -479,12 +489,12 @@ dict_load_fields(
|
||||
|
||||
btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
|
||||
BTR_SEARCH_LEAF, &pcur, &mtr);
|
||||
for (i = 0; i < index->n_fields; i++) {
|
||||
for (i = 0; i < index->n_fields; i++) {
|
||||
|
||||
rec = btr_pcur_get_rec(&pcur);
|
||||
|
||||
ut_a(btr_pcur_is_on_user_rec(&pcur, &mtr));
|
||||
if (rec_get_deleted_flag(rec, sys_fields->comp)) {
|
||||
if (rec_get_deleted_flag(rec, 0)) {
|
||||
dict_load_report_deleted_index(table->name, i);
|
||||
}
|
||||
|
||||
@ -506,14 +516,14 @@ dict_load_fields(
|
||||
pos_and_prefix_len = mach_read_from_4(field);
|
||||
|
||||
ut_a((pos_and_prefix_len & 0xFFFFUL) == i
|
||||
|| (pos_and_prefix_len & 0xFFFF0000UL) == (i << 16));
|
||||
|| (pos_and_prefix_len & 0xFFFF0000UL) == (i << 16));
|
||||
|
||||
if ((i == 0 && pos_and_prefix_len > 0)
|
||||
|| (pos_and_prefix_len & 0xFFFF0000UL) > 0) {
|
||||
|| (pos_and_prefix_len & 0xFFFF0000UL) > 0) {
|
||||
|
||||
prefix_len = pos_and_prefix_len & 0xFFFFUL;
|
||||
prefix_len = pos_and_prefix_len & 0xFFFFUL;
|
||||
} else {
|
||||
prefix_len = 0;
|
||||
prefix_len = 0;
|
||||
}
|
||||
|
||||
ut_a(0 == ut_strcmp("COL_NAME",
|
||||
@ -523,7 +533,7 @@ dict_load_fields(
|
||||
field = rec_get_nth_field_old(rec, 4, &len);
|
||||
|
||||
dict_mem_index_add_field(index,
|
||||
mem_heap_strdupl(heap, (char*) field, len), 0, prefix_len);
|
||||
mem_heap_strdupl(heap, (char*) field, len), prefix_len);
|
||||
|
||||
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
|
||||
}
|
||||
@ -569,7 +579,7 @@ dict_load_indexes(
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
if ((ut_dulint_get_high(table->id) == 0)
|
||||
&& (ut_dulint_get_low(table->id) < DICT_HDR_FIRST_ID)) {
|
||||
&& (ut_dulint_get_low(table->id) < DICT_HDR_FIRST_ID)) {
|
||||
is_sys_table = TRUE;
|
||||
} else {
|
||||
is_sys_table = FALSE;
|
||||
@ -579,7 +589,7 @@ dict_load_indexes(
|
||||
|
||||
sys_indexes = dict_table_get_low("SYS_INDEXES");
|
||||
sys_index = UT_LIST_GET_FIRST(sys_indexes->indexes);
|
||||
ut_a(!sys_indexes->comp);
|
||||
ut_a(!dict_table_is_comp(sys_indexes));
|
||||
|
||||
tuple = dtuple_create(heap, 1);
|
||||
dfield = dtuple_get_nth_field(tuple, 0);
|
||||
@ -592,7 +602,7 @@ dict_load_indexes(
|
||||
|
||||
btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
|
||||
BTR_SEARCH_LEAF, &pcur, &mtr);
|
||||
for (;;) {
|
||||
for (;;) {
|
||||
if (!btr_pcur_is_on_user_rec(&pcur, &mtr)) {
|
||||
|
||||
break;
|
||||
@ -607,7 +617,7 @@ dict_load_indexes(
|
||||
break;
|
||||
}
|
||||
|
||||
if (rec_get_deleted_flag(rec, table->comp)) {
|
||||
if (rec_get_deleted_flag(rec, dict_table_is_comp(table))) {
|
||||
dict_load_report_deleted_index(table->name,
|
||||
ULINT_UNDEFINED);
|
||||
|
||||
@ -658,7 +668,7 @@ dict_load_indexes(
|
||||
}
|
||||
|
||||
if ((type & DICT_CLUSTERED) == 0
|
||||
&& NULL == dict_table_get_first_index(table)) {
|
||||
&& NULL == dict_table_get_first_index(table)) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: trying to load index %s for table %s\n"
|
||||
@ -672,16 +682,16 @@ dict_load_indexes(
|
||||
}
|
||||
|
||||
if (is_sys_table
|
||||
&& ((type & DICT_CLUSTERED)
|
||||
|| ((table == dict_sys->sys_tables)
|
||||
&& (name_len == (sizeof "ID_IND") - 1)
|
||||
&& (0 == ut_memcmp(name_buf, "ID_IND",
|
||||
&& ((type & DICT_CLUSTERED)
|
||||
|| ((table == dict_sys->sys_tables)
|
||||
&& (name_len == (sizeof "ID_IND") - 1)
|
||||
&& (0 == ut_memcmp(name_buf, "ID_IND",
|
||||
name_len))))) {
|
||||
|
||||
/* The index was created in memory already at booting
|
||||
of the database server */
|
||||
} else {
|
||||
index = dict_mem_index_create(table->name, name_buf,
|
||||
index = dict_mem_index_create(table->name, name_buf,
|
||||
space, type, n_fields);
|
||||
index->id = id;
|
||||
|
||||
@ -729,6 +739,7 @@ dict_load_table(
|
||||
ulint len;
|
||||
ulint space;
|
||||
ulint n_cols;
|
||||
ulint flags;
|
||||
ulint err;
|
||||
mtr_t mtr;
|
||||
|
||||
@ -742,7 +753,7 @@ dict_load_table(
|
||||
|
||||
sys_tables = dict_table_get_low("SYS_TABLES");
|
||||
sys_index = UT_LIST_GET_FIRST(sys_tables->indexes);
|
||||
ut_a(!sys_tables->comp);
|
||||
ut_a(!dict_table_is_comp(sys_tables));
|
||||
|
||||
tuple = dtuple_create(heap, 1);
|
||||
dfield = dtuple_get_nth_field(tuple, 0);
|
||||
@ -755,7 +766,7 @@ dict_load_table(
|
||||
rec = btr_pcur_get_rec(&pcur);
|
||||
|
||||
if (!btr_pcur_is_on_user_rec(&pcur, &mtr)
|
||||
|| rec_get_deleted_flag(rec, sys_tables->comp)) {
|
||||
|| rec_get_deleted_flag(rec, 0)) {
|
||||
/* Not found */
|
||||
|
||||
btr_pcur_close(&pcur);
|
||||
@ -817,10 +828,15 @@ dict_load_table(
|
||||
field = rec_get_nth_field_old(rec, 4, &len);
|
||||
n_cols = mach_read_from_4(field);
|
||||
|
||||
flags = 0;
|
||||
|
||||
/* The high-order bit of N_COLS is the "compact format" flag. */
|
||||
table = dict_mem_table_create(name, space,
|
||||
n_cols & ~0x80000000UL,
|
||||
!!(n_cols & 0x80000000UL));
|
||||
if (n_cols & 0x80000000UL) {
|
||||
flags |= DICT_TF_COMPACT;
|
||||
}
|
||||
|
||||
table = dict_mem_table_create(name, space, n_cols & ~0x80000000UL,
|
||||
flags);
|
||||
|
||||
table->ibd_file_missing = ibd_file_missing;
|
||||
|
||||
@ -846,7 +862,7 @@ dict_load_table(
|
||||
}
|
||||
|
||||
if ((table->type == DICT_TABLE_CLUSTER)
|
||||
|| (table->type == DICT_TABLE_CLUSTER_MEMBER)) {
|
||||
|| (table->type == DICT_TABLE_CLUSTER_MEMBER)) {
|
||||
|
||||
field = rec_get_nth_field_old(rec, 7, &len);
|
||||
ut_a(len == 4);
|
||||
@ -872,9 +888,9 @@ dict_load_table(
|
||||
/*
|
||||
if (err != DB_SUCCESS) {
|
||||
|
||||
mutex_enter(&dict_foreign_err_mutex);
|
||||
mutex_enter(&dict_foreign_err_mutex);
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
" InnoDB: Error: could not make a foreign key definition to match\n"
|
||||
@ -903,7 +919,7 @@ dict_load_table_on_id(
|
||||
{
|
||||
byte id_buf[8];
|
||||
btr_pcur_t pcur;
|
||||
mem_heap_t* heap;
|
||||
mem_heap_t* heap;
|
||||
dtuple_t* tuple;
|
||||
dfield_t* dfield;
|
||||
dict_index_t* sys_table_ids;
|
||||
@ -928,7 +944,7 @@ dict_load_table_on_id(
|
||||
sys_tables = dict_sys->sys_tables;
|
||||
sys_table_ids = dict_table_get_next_index(
|
||||
dict_table_get_first_index(sys_tables));
|
||||
ut_a(!sys_tables->comp);
|
||||
ut_a(!dict_table_is_comp(sys_tables));
|
||||
heap = mem_heap_create(256);
|
||||
|
||||
tuple = dtuple_create(heap, 1);
|
||||
@ -945,7 +961,7 @@ dict_load_table_on_id(
|
||||
rec = btr_pcur_get_rec(&pcur);
|
||||
|
||||
if (!btr_pcur_is_on_user_rec(&pcur, &mtr)
|
||||
|| rec_get_deleted_flag(rec, sys_tables->comp)) {
|
||||
|| rec_get_deleted_flag(rec, 0)) {
|
||||
/* Not found */
|
||||
|
||||
btr_pcur_close(&pcur);
|
||||
@ -1042,7 +1058,7 @@ dict_load_foreign_cols(
|
||||
|
||||
sys_foreign_cols = dict_table_get_low("SYS_FOREIGN_COLS");
|
||||
sys_index = UT_LIST_GET_FIRST(sys_foreign_cols->indexes);
|
||||
ut_a(!sys_foreign_cols->comp);
|
||||
ut_a(!dict_table_is_comp(sys_foreign_cols));
|
||||
|
||||
tuple = dtuple_create(foreign->heap, 1);
|
||||
dfield = dtuple_get_nth_field(tuple, 0);
|
||||
@ -1052,12 +1068,12 @@ dict_load_foreign_cols(
|
||||
|
||||
btr_pcur_open_on_user_rec(sys_index, tuple, PAGE_CUR_GE,
|
||||
BTR_SEARCH_LEAF, &pcur, &mtr);
|
||||
for (i = 0; i < foreign->n_fields; i++) {
|
||||
for (i = 0; i < foreign->n_fields; i++) {
|
||||
|
||||
rec = btr_pcur_get_rec(&pcur);
|
||||
|
||||
ut_a(btr_pcur_is_on_user_rec(&pcur, &mtr));
|
||||
ut_a(!rec_get_deleted_flag(rec, sys_foreign_cols->comp));
|
||||
ut_a(!rec_get_deleted_flag(rec, 0));
|
||||
|
||||
field = rec_get_nth_field_old(rec, 0, &len);
|
||||
ut_a(len == ut_strlen(id));
|
||||
@ -1069,11 +1085,11 @@ dict_load_foreign_cols(
|
||||
|
||||
field = rec_get_nth_field_old(rec, 4, &len);
|
||||
foreign->foreign_col_names[i] =
|
||||
mem_heap_strdupl(foreign->heap, (char*) field, len);
|
||||
mem_heap_strdupl(foreign->heap, (char*) field, len);
|
||||
|
||||
field = rec_get_nth_field_old(rec, 5, &len);
|
||||
foreign->referenced_col_names[i] =
|
||||
mem_heap_strdupl(foreign->heap, (char*) field, len);
|
||||
mem_heap_strdupl(foreign->heap, (char*) field, len);
|
||||
|
||||
btr_pcur_move_to_next_user_rec(&pcur, &mtr);
|
||||
}
|
||||
@ -1091,7 +1107,7 @@ dict_load_foreign(
|
||||
/* out: DB_SUCCESS or error code */
|
||||
const char* id, /* in: foreign constraint id as a
|
||||
null-terminated string */
|
||||
ibool check_types)/* in: TRUE=check type compatibility */
|
||||
ibool check_charsets)/* in: TRUE=check charset compatibility */
|
||||
{
|
||||
dict_foreign_t* foreign;
|
||||
dict_table_t* sys_foreign;
|
||||
@ -1115,7 +1131,7 @@ dict_load_foreign(
|
||||
|
||||
sys_foreign = dict_table_get_low("SYS_FOREIGN");
|
||||
sys_index = UT_LIST_GET_FIRST(sys_foreign->indexes);
|
||||
ut_a(!sys_foreign->comp);
|
||||
ut_a(!dict_table_is_comp(sys_foreign));
|
||||
|
||||
tuple = dtuple_create(heap2, 1);
|
||||
dfield = dtuple_get_nth_field(tuple, 0);
|
||||
@ -1128,7 +1144,7 @@ dict_load_foreign(
|
||||
rec = btr_pcur_get_rec(&pcur);
|
||||
|
||||
if (!btr_pcur_is_on_user_rec(&pcur, &mtr)
|
||||
|| rec_get_deleted_flag(rec, sys_foreign->comp)) {
|
||||
|| rec_get_deleted_flag(rec, 0)) {
|
||||
/* Not found */
|
||||
|
||||
fprintf(stderr,
|
||||
@ -1179,11 +1195,11 @@ dict_load_foreign(
|
||||
|
||||
field = rec_get_nth_field_old(rec, 3, &len);
|
||||
foreign->foreign_table_name =
|
||||
mem_heap_strdupl(foreign->heap, (char*) field, len);
|
||||
mem_heap_strdupl(foreign->heap, (char*) field, len);
|
||||
|
||||
field = rec_get_nth_field_old(rec, 4, &len);
|
||||
foreign->referenced_table_name =
|
||||
mem_heap_strdupl(foreign->heap, (char*) field, len);
|
||||
mem_heap_strdupl(foreign->heap, (char*) field, len);
|
||||
|
||||
btr_pcur_close(&pcur);
|
||||
mtr_commit(&mtr);
|
||||
@ -1204,7 +1220,7 @@ dict_load_foreign(
|
||||
a new foreign key constraint but loading one from the data
|
||||
dictionary. */
|
||||
|
||||
return(dict_foreign_add_to_cache(foreign, check_types));
|
||||
return(dict_foreign_add_to_cache(foreign, check_charsets));
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
@ -1219,10 +1235,11 @@ dict_load_foreigns(
|
||||
/*===============*/
|
||||
/* out: DB_SUCCESS or error code */
|
||||
const char* table_name, /* in: table name */
|
||||
ibool check_types) /* in: TRUE=check type compatibility */
|
||||
ibool check_charsets) /* in: TRUE=check charset
|
||||
compatibility */
|
||||
{
|
||||
btr_pcur_t pcur;
|
||||
mem_heap_t* heap;
|
||||
mem_heap_t* heap;
|
||||
dtuple_t* tuple;
|
||||
dfield_t* dfield;
|
||||
dict_index_t* sec_index;
|
||||
@ -1249,7 +1266,7 @@ dict_load_foreigns(
|
||||
return(DB_ERROR);
|
||||
}
|
||||
|
||||
ut_a(!sys_foreign->comp);
|
||||
ut_a(!dict_table_is_comp(sys_foreign));
|
||||
mtr_start(&mtr);
|
||||
|
||||
/* Get the secondary index based on FOR_NAME from table
|
||||
@ -1304,7 +1321,7 @@ loop:
|
||||
goto next_rec;
|
||||
}
|
||||
|
||||
if (rec_get_deleted_flag(rec, sys_foreign->comp)) {
|
||||
if (rec_get_deleted_flag(rec, 0)) {
|
||||
|
||||
goto next_rec;
|
||||
}
|
||||
@ -1319,7 +1336,7 @@ loop:
|
||||
|
||||
/* Load the foreign constraint definition to the dictionary cache */
|
||||
|
||||
err = dict_load_foreign(id, check_types);
|
||||
err = dict_load_foreign(id, check_charsets);
|
||||
|
||||
if (err != DB_SUCCESS) {
|
||||
btr_pcur_close(&pcur);
|
||||
|
@ -36,13 +36,13 @@ dict_mem_table_create(
|
||||
ignored if the table is made a member of
|
||||
a cluster */
|
||||
ulint n_cols, /* in: number of columns */
|
||||
ibool comp) /* in: TRUE=compact page format */
|
||||
ulint flags) /* in: table flags */
|
||||
{
|
||||
dict_table_t* table;
|
||||
mem_heap_t* heap;
|
||||
|
||||
ut_ad(name);
|
||||
ut_ad(comp == FALSE || comp == TRUE);
|
||||
ut_ad(!(flags & ~DICT_TF_COMPACT));
|
||||
|
||||
heap = mem_heap_create(DICT_HEAP_SIZE);
|
||||
|
||||
@ -51,12 +51,12 @@ dict_mem_table_create(
|
||||
table->heap = heap;
|
||||
|
||||
table->type = DICT_TABLE_ORDINARY;
|
||||
table->flags = flags;
|
||||
table->name = mem_heap_strdup(heap, name);
|
||||
table->dir_path_of_temp_table = NULL;
|
||||
table->space = space;
|
||||
table->ibd_file_missing = FALSE;
|
||||
table->tablespace_discarded = FALSE;
|
||||
table->comp = comp;
|
||||
table->n_def = 0;
|
||||
table->n_cols = n_cols + DATA_N_SYS_COLS;
|
||||
table->mem_fix = 0;
|
||||
@ -114,7 +114,7 @@ dict_mem_cluster_create(
|
||||
dict_table_t* cluster;
|
||||
|
||||
/* Clustered tables cannot work with the compact record format. */
|
||||
cluster = dict_mem_table_create(name, space, n_cols, FALSE);
|
||||
cluster = dict_mem_table_create(name, space, n_cols, 0);
|
||||
|
||||
cluster->type = DICT_TABLE_CLUSTER;
|
||||
cluster->mix_len = mix_len;
|
||||
@ -261,8 +261,6 @@ dict_mem_index_add_field(
|
||||
/*=====================*/
|
||||
dict_index_t* index, /* in: index */
|
||||
const char* name, /* in: column name */
|
||||
ulint order, /* in: order criterion; 0 means an
|
||||
ascending order */
|
||||
ulint prefix_len) /* in: 0 or the column prefix length
|
||||
in a MySQL index like
|
||||
INDEX (textcol(25)) */
|
||||
@ -277,8 +275,6 @@ dict_mem_index_add_field(
|
||||
field = dict_index_get_nth_field(index, index->n_def - 1);
|
||||
|
||||
field->name = name;
|
||||
field->order = order;
|
||||
|
||||
field->prefix_len = prefix_len;
|
||||
}
|
||||
|
||||
|
@ -17,7 +17,7 @@ Created 12/29/1997 Heikki Tuuri
|
||||
#include "row0sel.h"
|
||||
|
||||
/* The RND function seed */
|
||||
ulint eval_rnd = 128367121;
|
||||
ulint eval_rnd = 128367121;
|
||||
|
||||
/* Dummy adress used when we should allocate a buffer of size 0 in
|
||||
the function below */
|
||||
@ -44,7 +44,7 @@ eval_node_alloc_val_buf(
|
||||
byte* data;
|
||||
|
||||
ut_ad(que_node_get_type(node) == QUE_NODE_SYMBOL
|
||||
|| que_node_get_type(node) == QUE_NODE_FUNC);
|
||||
|| que_node_get_type(node) == QUE_NODE_FUNC);
|
||||
|
||||
dfield = que_node_get_val(node);
|
||||
|
||||
@ -81,7 +81,7 @@ eval_node_free_val_buf(
|
||||
byte* data;
|
||||
|
||||
ut_ad(que_node_get_type(node) == QUE_NODE_SYMBOL
|
||||
|| que_node_get_type(node) == QUE_NODE_FUNC);
|
||||
|| que_node_get_type(node) == QUE_NODE_FUNC);
|
||||
|
||||
dfield = que_node_get_val(node);
|
||||
|
||||
@ -568,7 +568,7 @@ eval_binary_to_number(
|
||||
str1 = dfield_get_data(dfield);
|
||||
len1 = dfield_get_len(dfield);
|
||||
|
||||
if (len1 > 4) {
|
||||
if (len1 > 4) {
|
||||
ut_error;
|
||||
}
|
||||
|
||||
|
@ -90,7 +90,7 @@ directory, and we must set the base file path explicitly */
|
||||
const char* fil_path_to_mysql_datadir = ".";
|
||||
|
||||
/* The number of fsyncs done to the log */
|
||||
ulint fil_n_log_flushes = 0;
|
||||
ulint fil_n_log_flushes = 0;
|
||||
|
||||
ulint fil_n_pending_log_flushes = 0;
|
||||
ulint fil_n_pending_tablespace_flushes = 0;
|
||||
@ -177,10 +177,15 @@ struct fil_space_struct {
|
||||
may need to access the ibuf bitmap page in the
|
||||
tablespade: dropping of the tablespace is
|
||||
forbidden if this is > 0 */
|
||||
hash_node_t hash; /* hash chain node */
|
||||
hash_node_t hash; /* hash chain node */
|
||||
hash_node_t name_hash;/* hash chain the name_hash table */
|
||||
rw_lock_t latch; /* latch protecting the file space storage
|
||||
allocation */
|
||||
UT_LIST_NODE_T(fil_space_t) unflushed_spaces;
|
||||
/* list of spaces with at least one unflushed
|
||||
file we have written to */
|
||||
ibool is_in_unflushed_spaces; /* TRUE if this space is
|
||||
currently in the list above */
|
||||
UT_LIST_NODE_T(fil_space_t) space_list;
|
||||
/* list of all spaces */
|
||||
ibuf_data_t* ibuf_data;
|
||||
@ -213,6 +218,12 @@ struct fil_system_struct {
|
||||
not put to this list: they are opened
|
||||
after the startup, and kept open until
|
||||
shutdown */
|
||||
UT_LIST_BASE_NODE_T(fil_space_t) unflushed_spaces;
|
||||
/* base node for the list of those
|
||||
tablespaces whose files contain
|
||||
unflushed writes; those spaces have
|
||||
at least one file node where
|
||||
modification_counter > flush_counter */
|
||||
ulint n_open; /* number of files currently open */
|
||||
ulint max_n_open; /* n_open is not allowed to exceed
|
||||
this */
|
||||
@ -389,6 +400,36 @@ fil_space_get_ibuf_data(
|
||||
return(space->ibuf_data);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
Checks if all the file nodes in a space are flushed. The caller must hold
|
||||
the fil_system mutex. */
|
||||
static
|
||||
ibool
|
||||
fil_space_is_flushed(
|
||||
/*=================*/
|
||||
/* out: TRUE if all are flushed */
|
||||
fil_space_t* space) /* in: space */
|
||||
{
|
||||
fil_node_t* node;
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(mutex_own(&(fil_system->mutex)));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
node = UT_LIST_GET_FIRST(space->chain);
|
||||
|
||||
while (node) {
|
||||
if (node->modification_counter > node->flush_counter) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
node = UT_LIST_GET_NEXT(chain, node);
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
Appends a new file to the chain of files of a space. File must be closed. */
|
||||
|
||||
@ -469,10 +510,12 @@ fil_node_open_file(
|
||||
ulint size_low;
|
||||
ulint size_high;
|
||||
ibool ret;
|
||||
ibool success;
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
byte* buf2;
|
||||
byte* page;
|
||||
ibool success;
|
||||
ulint space_id;
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(mutex_own(&(system->mutex)));
|
||||
@ -505,19 +548,19 @@ fil_node_open_file(
|
||||
ut_a(0);
|
||||
}
|
||||
|
||||
ut_a(space->purpose != FIL_LOG);
|
||||
ut_a(space->id != 0);
|
||||
|
||||
os_file_get_size(node->handle, &size_low, &size_high);
|
||||
|
||||
size_bytes = (((ib_longlong)size_high) << 32)
|
||||
+ (ib_longlong)size_low;
|
||||
+ (ib_longlong)size_low;
|
||||
#ifdef UNIV_HOTBACKUP
|
||||
node->size = (ulint) (size_bytes / UNIV_PAGE_SIZE);
|
||||
|
||||
#else
|
||||
ut_a(space->purpose != FIL_LOG);
|
||||
ut_a(space->id != 0);
|
||||
|
||||
if (size_bytes < FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: the size of single-table tablespace file %s\n"
|
||||
"InnoDB: is only %lu %lu, should be at least %lu!\n", node->name,
|
||||
(ulong) size_high,
|
||||
@ -544,7 +587,7 @@ fil_node_open_file(
|
||||
os_file_close(node->handle);
|
||||
|
||||
if (space_id == ULINT_UNDEFINED || space_id == 0) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: tablespace id %lu in file %s is not sensible\n",
|
||||
(ulong) space_id,
|
||||
node->name);
|
||||
@ -553,7 +596,7 @@ fil_node_open_file(
|
||||
}
|
||||
|
||||
if (space_id != space->id) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: tablespace id is %lu in the data dictionary\n"
|
||||
"InnoDB: but in file %s it is %lu!\n", space->id, node->name, space_id);
|
||||
|
||||
@ -581,7 +624,7 @@ fil_node_open_file(
|
||||
OS_FILE_AIO, OS_LOG_FILE, &ret);
|
||||
} else if (node->is_raw_disk) {
|
||||
node->handle = os_file_create(node->name,
|
||||
OS_FILE_OPEN_RAW,
|
||||
OS_FILE_OPEN_RAW,
|
||||
OS_FILE_AIO, OS_DATA_FILE, &ret);
|
||||
} else {
|
||||
node->handle = os_file_create(node->name, OS_FILE_OPEN,
|
||||
@ -669,7 +712,7 @@ fil_try_to_close_file_in_LRU(
|
||||
|
||||
while (node != NULL) {
|
||||
if (node->modification_counter == node->flush_counter
|
||||
&& node->n_pending_flushes == 0) {
|
||||
&& node->n_pending_flushes == 0) {
|
||||
|
||||
fil_node_close_file(node, system);
|
||||
|
||||
@ -680,11 +723,11 @@ fil_try_to_close_file_in_LRU(
|
||||
fputs("InnoDB: cannot close file ", stderr);
|
||||
ut_print_filename(stderr, node->name);
|
||||
fprintf(stderr, ", because n_pending_flushes %lu\n",
|
||||
(ulong) node->n_pending_flushes);
|
||||
(ulong) node->n_pending_flushes);
|
||||
}
|
||||
|
||||
if (print_info
|
||||
&& node->modification_counter != node->flush_counter) {
|
||||
&& node->modification_counter != node->flush_counter) {
|
||||
fputs("InnoDB: cannot close file ", stderr);
|
||||
ut_print_filename(stderr, node->name);
|
||||
fprintf(stderr,
|
||||
@ -841,6 +884,16 @@ fil_node_free(
|
||||
|
||||
node->modification_counter = node->flush_counter;
|
||||
|
||||
if (space->is_in_unflushed_spaces
|
||||
&& fil_space_is_flushed(space)) {
|
||||
|
||||
space->is_in_unflushed_spaces = FALSE;
|
||||
|
||||
UT_LIST_REMOVE(unflushed_spaces,
|
||||
system->unflushed_spaces,
|
||||
space);
|
||||
}
|
||||
|
||||
fil_node_close_file(node, system);
|
||||
}
|
||||
|
||||
@ -1004,6 +1057,8 @@ try_again:
|
||||
|
||||
HASH_INSERT(fil_space_t, name_hash, system->name_hash,
|
||||
ut_fold_string(name), space);
|
||||
space->is_in_unflushed_spaces = FALSE;
|
||||
|
||||
UT_LIST_ADD_LAST(space_list, system->space_list, space);
|
||||
|
||||
mutex_exit(&(system->mutex));
|
||||
@ -1032,8 +1087,8 @@ fil_assign_new_space_id(void)
|
||||
id = system->max_assigned_id;
|
||||
|
||||
if (id > (SRV_LOG_SPACE_FIRST_ID / 2) && (id % 1000000UL == 0)) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
"InnoDB: Warning: you are running out of new single-table tablespace id's.\n"
|
||||
"InnoDB: Current counter is %lu and it must not exceed %lu!\n"
|
||||
"InnoDB: To reset the counter to zero you have to dump all your tables and\n"
|
||||
@ -1042,8 +1097,8 @@ fil_assign_new_space_id(void)
|
||||
}
|
||||
|
||||
if (id >= SRV_LOG_SPACE_FIRST_ID) {
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
"InnoDB: You have run out of single-table tablespace id's!\n"
|
||||
"InnoDB: Current counter is %lu.\n"
|
||||
"InnoDB: To reset the counter to zero you have to dump all your tables and\n"
|
||||
@ -1092,13 +1147,20 @@ fil_space_free(
|
||||
HASH_DELETE(fil_space_t, hash, system->spaces, id, space);
|
||||
|
||||
HASH_SEARCH(name_hash, system->name_hash, ut_fold_string(space->name),
|
||||
namespace, 0 == strcmp(space->name, namespace->name));
|
||||
namespace, 0 == strcmp(space->name, namespace->name));
|
||||
ut_a(namespace);
|
||||
ut_a(space == namespace);
|
||||
|
||||
HASH_DELETE(fil_space_t, name_hash, system->name_hash,
|
||||
ut_fold_string(space->name), space);
|
||||
|
||||
if (space->is_in_unflushed_spaces) {
|
||||
space->is_in_unflushed_spaces = FALSE;
|
||||
|
||||
UT_LIST_REMOVE(unflushed_spaces, system->unflushed_spaces,
|
||||
space);
|
||||
}
|
||||
|
||||
UT_LIST_REMOVE(space_list, system->space_list, space);
|
||||
|
||||
ut_a(space->magic_n == FIL_SPACE_MAGIC_N);
|
||||
@ -1157,7 +1219,7 @@ fil_space_get_size(
|
||||
/* out: space size, 0 if space not found */
|
||||
ulint id) /* in: space id */
|
||||
{
|
||||
fil_system_t* system = fil_system;
|
||||
fil_system_t* system = fil_system;
|
||||
fil_node_t* node;
|
||||
fil_space_t* space;
|
||||
ulint size;
|
||||
@ -1250,6 +1312,7 @@ fil_system_create(
|
||||
|
||||
system->tablespace_version = 0;
|
||||
|
||||
UT_LIST_INIT(system->unflushed_spaces);
|
||||
UT_LIST_INIT(system->space_list);
|
||||
|
||||
return(system);
|
||||
@ -1266,7 +1329,7 @@ fil_init(
|
||||
ut_a(fil_system == NULL);
|
||||
|
||||
/*printf("Initializing the tablespace cache with max %lu open files\n",
|
||||
max_n_open); */
|
||||
max_n_open); */
|
||||
fil_system = fil_system_create(FIL_SYSTEM_HASH_SIZE, max_n_open);
|
||||
}
|
||||
|
||||
@ -1305,8 +1368,8 @@ fil_open_log_and_system_tablespace_files(void)
|
||||
"InnoDB: tablespace files open for the whole time mysqld is running, and\n"
|
||||
"InnoDB: needs to open also some .ibd files if the file-per-table storage\n"
|
||||
"InnoDB: model is used. Current open files %lu, max allowed open files %lu.\n",
|
||||
(ulong) system->n_open,
|
||||
(ulong) system->max_n_open);
|
||||
(ulong) system->n_open,
|
||||
(ulong) system->max_n_open);
|
||||
}
|
||||
node = UT_LIST_GET_NEXT(chain, node);
|
||||
}
|
||||
@ -1389,7 +1452,7 @@ fil_ibuf_init_at_db_start(void)
|
||||
space = UT_LIST_GET_FIRST(fil_system->space_list);
|
||||
|
||||
ut_a(space);
|
||||
ut_a(space->purpose == FIL_TABLESPACE);
|
||||
ut_a(space->purpose == FIL_TABLESPACE);
|
||||
|
||||
space->ibuf_data = ibuf_data_init_for_space(space->id);
|
||||
}
|
||||
@ -1451,7 +1514,7 @@ fil_write_flushed_lsn_to_data_files(
|
||||
always open. */
|
||||
|
||||
if (space->purpose == FIL_TABLESPACE
|
||||
&& space->id == 0) {
|
||||
&& space->id == 0) {
|
||||
sum_of_sizes = 0;
|
||||
|
||||
node = UT_LIST_GET_FIRST(space->chain);
|
||||
@ -1581,7 +1644,7 @@ Decrements the count of pending insert buffer page merges. */
|
||||
|
||||
void
|
||||
fil_decr_pending_ibuf_merges(
|
||||
/*========================*/
|
||||
/*=========================*/
|
||||
ulint id) /* in: space id */
|
||||
{
|
||||
fil_system_t* system = fil_system;
|
||||
@ -1706,13 +1769,13 @@ datadir that we should use in replaying the file operations. */
|
||||
byte*
|
||||
fil_op_log_parse_or_replay(
|
||||
/*=======================*/
|
||||
/* out: end of log record, or NULL if the
|
||||
/* out: end of log record, or NULL if the
|
||||
record was not completely contained between
|
||||
ptr and end_ptr */
|
||||
byte* ptr, /* in: buffer containing the log record body,
|
||||
byte* ptr, /* in: buffer containing the log record body,
|
||||
or an initial segment of it, if the record does
|
||||
not fir completely between ptr and end_ptr */
|
||||
byte* end_ptr, /* in: buffer end */
|
||||
byte* end_ptr, /* in: buffer end */
|
||||
ulint type, /* in: the type of this log record */
|
||||
ibool do_replay, /* in: TRUE if we want to replay the
|
||||
operation, and not just parse the log record */
|
||||
@ -1804,7 +1867,7 @@ fil_op_log_parse_or_replay(
|
||||
with the same name */
|
||||
|
||||
if (fil_get_space_id_for_table(new_name)
|
||||
== ULINT_UNDEFINED) {
|
||||
== ULINT_UNDEFINED) {
|
||||
/* We do not care of the old name, that is
|
||||
why we pass NULL as the first argument */
|
||||
ut_a(fil_rename_tablespace(NULL, space_id,
|
||||
@ -2036,7 +2099,7 @@ fil_rename_tablespace_in_mem(
|
||||
const char* old_name = space->name;
|
||||
|
||||
HASH_SEARCH(name_hash, system->name_hash, ut_fold_string(old_name),
|
||||
space2, 0 == strcmp(old_name, space2->name));
|
||||
space2, 0 == strcmp(old_name, space2->name));
|
||||
if (space != space2) {
|
||||
fputs("InnoDB: Error: cannot find ", stderr);
|
||||
ut_print_filename(stderr, old_name);
|
||||
@ -2046,7 +2109,7 @@ fil_rename_tablespace_in_mem(
|
||||
}
|
||||
|
||||
HASH_SEARCH(name_hash, system->name_hash, ut_fold_string(path),
|
||||
space2, 0 == strcmp(path, space2->name));
|
||||
space2, 0 == strcmp(path, space2->name));
|
||||
if (space2 != NULL) {
|
||||
fputs("InnoDB: Error: ", stderr);
|
||||
ut_print_filename(stderr, path);
|
||||
@ -2123,7 +2186,7 @@ fil_rename_tablespace(
|
||||
fil_node_t* node;
|
||||
ulint count = 0;
|
||||
char* path;
|
||||
ibool old_name_was_specified = TRUE;
|
||||
ibool old_name_was_specified = TRUE;
|
||||
char* old_path;
|
||||
|
||||
ut_a(id != 0);
|
||||
@ -2274,7 +2337,7 @@ fil_create_new_single_table_tablespace(
|
||||
tablespace file in pages,
|
||||
must be >= FIL_IBD_FILE_INITIAL_SIZE */
|
||||
{
|
||||
os_file_t file;
|
||||
os_file_t file;
|
||||
ibool ret;
|
||||
ulint err;
|
||||
byte* buf2;
|
||||
@ -2287,7 +2350,7 @@ fil_create_new_single_table_tablespace(
|
||||
path = fil_make_ibd_name(tablename, is_temp);
|
||||
|
||||
file = os_file_create(path, OS_FILE_CREATE, OS_FILE_NORMAL,
|
||||
OS_DATA_FILE, &ret);
|
||||
OS_DATA_FILE, &ret);
|
||||
if (ret == FALSE) {
|
||||
ut_print_timestamp(stderr);
|
||||
fputs(" InnoDB: Error creating file ", stderr);
|
||||
@ -2299,7 +2362,7 @@ fil_create_new_single_table_tablespace(
|
||||
err = os_file_get_last_error(TRUE);
|
||||
|
||||
if (err == OS_FILE_ALREADY_EXISTS) {
|
||||
fputs(
|
||||
fputs(
|
||||
"InnoDB: The file already exists though the corresponding table did not\n"
|
||||
"InnoDB: exist in the InnoDB data dictionary. Have you moved InnoDB\n"
|
||||
"InnoDB: .ibd files around without using the SQL commands\n"
|
||||
@ -2463,7 +2526,7 @@ fil_reset_too_high_lsns(
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fputs(
|
||||
fputs(
|
||||
" InnoDB: Error: trying to open a table, but could not\n"
|
||||
"InnoDB: open the tablespace file ", stderr);
|
||||
ut_print_filename(stderr, filepath);
|
||||
@ -2503,11 +2566,11 @@ fil_reset_too_high_lsns(
|
||||
" InnoDB: Flush lsn in the tablespace file %lu to be imported\n"
|
||||
"InnoDB: is %lu %lu, which exceeds current system lsn %lu %lu.\n"
|
||||
"InnoDB: We reset the lsn's in the file ",
|
||||
(ulong) space_id,
|
||||
(ulong) ut_dulint_get_high(flush_lsn),
|
||||
(ulong) ut_dulint_get_low(flush_lsn),
|
||||
(ulong) ut_dulint_get_high(current_lsn),
|
||||
(ulong) ut_dulint_get_low(current_lsn));
|
||||
(ulong) space_id,
|
||||
(ulong) ut_dulint_get_high(flush_lsn),
|
||||
(ulong) ut_dulint_get_low(flush_lsn),
|
||||
(ulong) ut_dulint_get_high(current_lsn),
|
||||
(ulong) ut_dulint_get_low(current_lsn));
|
||||
ut_print_filename(stderr, filepath);
|
||||
fputs(".\n", stderr);
|
||||
|
||||
@ -2532,7 +2595,7 @@ fil_reset_too_high_lsns(
|
||||
page_no = mach_read_from_4(page + FIL_PAGE_OFFSET);
|
||||
|
||||
buf_flush_init_for_writing(page, current_lsn, space_id,
|
||||
page_no);
|
||||
page_no);
|
||||
success = os_file_write(filepath, file, page,
|
||||
(ulint)(offset & 0xFFFFFFFFUL),
|
||||
(ulint)(offset >> 32), UNIV_PAGE_SIZE);
|
||||
@ -2615,7 +2678,7 @@ fil_open_single_table_tablespace(
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fputs(
|
||||
fputs(
|
||||
" InnoDB: Error: trying to open a table, but could not\n"
|
||||
"InnoDB: open the tablespace file ", stderr);
|
||||
ut_print_filename(stderr, filepath);
|
||||
@ -2657,7 +2720,7 @@ fil_open_single_table_tablespace(
|
||||
if (space_id != id) {
|
||||
ut_print_timestamp(stderr);
|
||||
|
||||
fputs(
|
||||
fputs(
|
||||
" InnoDB: Error: tablespace id in file ", stderr);
|
||||
ut_print_filename(stderr, filepath);
|
||||
fprintf(stderr, " is %lu, but in the InnoDB\n"
|
||||
@ -2744,6 +2807,7 @@ fil_load_single_table_tablespace(
|
||||
filename);
|
||||
srv_normalize_path_for_win(filepath);
|
||||
#ifdef __WIN__
|
||||
# ifndef UNIV_HOTBACKUP
|
||||
/* If lower_case_table_names is 0 or 2, then MySQL allows database
|
||||
directory names with upper case letters. On Windows, all table and
|
||||
database names in InnoDB are internally always in lower case. Put the
|
||||
@ -2751,6 +2815,7 @@ fil_load_single_table_tablespace(
|
||||
internal data dictionary. */
|
||||
|
||||
dict_casedn_str(filepath);
|
||||
# endif /* !UNIV_HOTBACKUP */
|
||||
#endif
|
||||
file = os_file_create_simple_no_error_handling(filepath, OS_FILE_OPEN,
|
||||
OS_FILE_READ_ONLY, &success);
|
||||
@ -2758,7 +2823,7 @@ fil_load_single_table_tablespace(
|
||||
/* The following call prints an error message */
|
||||
os_file_get_last_error(TRUE);
|
||||
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: could not open single-table tablespace file\n"
|
||||
"InnoDB: %s!\n"
|
||||
"InnoDB: We do not continue the crash recovery, because the table may become\n"
|
||||
@ -2792,7 +2857,7 @@ fil_load_single_table_tablespace(
|
||||
/* The following call prints an error message */
|
||||
os_file_get_last_error(TRUE);
|
||||
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: could not measure the size of single-table tablespace file\n"
|
||||
"InnoDB: %s!\n"
|
||||
"InnoDB: We do not continue crash recovery, because the table will become\n"
|
||||
@ -2830,7 +2895,7 @@ fil_load_single_table_tablespace(
|
||||
size = (((ib_longlong)size_high) << 32) + (ib_longlong)size_low;
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
if (size < FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: the size of single-table tablespace file %s\n"
|
||||
"InnoDB: is only %lu %lu, should be at least %lu!", filepath,
|
||||
(ulong) size_high,
|
||||
@ -2859,7 +2924,7 @@ fil_load_single_table_tablespace(
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
if (space_id == ULINT_UNDEFINED || space_id == 0) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: tablespace id %lu in file %s is not sensible\n",
|
||||
(ulong) space_id,
|
||||
filepath);
|
||||
@ -3001,7 +3066,7 @@ fil_load_single_table_tablespaces(void)
|
||||
os_file_dir_t dbdir;
|
||||
os_file_stat_t dbinfo;
|
||||
os_file_stat_t fileinfo;
|
||||
ulint err = DB_SUCCESS;
|
||||
ulint err = DB_SUCCESS;
|
||||
|
||||
/* The datadir of MySQL is always the default directory of mysqld */
|
||||
|
||||
@ -3024,9 +3089,9 @@ fil_load_single_table_tablespaces(void)
|
||||
/* printf("Looking at %s in datadir\n", dbinfo.name); */
|
||||
|
||||
if (dbinfo.type == OS_FILE_TYPE_FILE
|
||||
|| dbinfo.type == OS_FILE_TYPE_UNKNOWN) {
|
||||
|| dbinfo.type == OS_FILE_TYPE_UNKNOWN) {
|
||||
|
||||
goto next_datadir_item;
|
||||
goto next_datadir_item;
|
||||
}
|
||||
|
||||
/* We found a symlink or a directory; try opening it to see
|
||||
@ -3059,19 +3124,19 @@ fil_load_single_table_tablespaces(void)
|
||||
&fileinfo);
|
||||
while (ret == 0) {
|
||||
/* printf(
|
||||
" Looking at file %s\n", fileinfo.name); */
|
||||
" Looking at file %s\n", fileinfo.name); */
|
||||
|
||||
if (fileinfo.type == OS_FILE_TYPE_DIR) {
|
||||
if (fileinfo.type == OS_FILE_TYPE_DIR) {
|
||||
|
||||
goto next_file_item;
|
||||
goto next_file_item;
|
||||
}
|
||||
|
||||
/* We found a symlink or a file */
|
||||
if (strlen(fileinfo.name) > 4
|
||||
&& 0 == strcmp(fileinfo.name +
|
||||
&& 0 == strcmp(fileinfo.name +
|
||||
strlen(fileinfo.name) - 4,
|
||||
".ibd")) {
|
||||
/* The name ends in .ibd; try opening
|
||||
/* The name ends in .ibd; try opening
|
||||
the file */
|
||||
fil_load_single_table_tablespace(
|
||||
dbinfo.name, fileinfo.name);
|
||||
@ -3121,7 +3186,7 @@ void
|
||||
fil_print_orphaned_tablespaces(void)
|
||||
/*================================*/
|
||||
{
|
||||
fil_system_t* system = fil_system;
|
||||
fil_system_t* system = fil_system;
|
||||
fil_space_t* space;
|
||||
|
||||
mutex_enter(&(system->mutex));
|
||||
@ -3129,7 +3194,7 @@ fil_print_orphaned_tablespaces(void)
|
||||
space = UT_LIST_GET_FIRST(system->space_list);
|
||||
|
||||
while (space) {
|
||||
if (space->purpose == FIL_TABLESPACE && space->id != 0
|
||||
if (space->purpose == FIL_TABLESPACE && space->id != 0
|
||||
&& !space->mark) {
|
||||
fputs("InnoDB: Warning: tablespace ", stderr);
|
||||
ut_print_filename(stderr, space->name);
|
||||
@ -3285,7 +3350,7 @@ fil_space_for_table_exists_in_mem(
|
||||
|
||||
if (space == NULL) {
|
||||
if (namespace == NULL) {
|
||||
ut_print_timestamp(stderr);
|
||||
ut_print_timestamp(stderr);
|
||||
fputs(" InnoDB: Error: table ", stderr);
|
||||
ut_print_filename(stderr, name);
|
||||
fprintf(stderr, "\n"
|
||||
@ -3297,7 +3362,7 @@ fil_space_for_table_exists_in_mem(
|
||||
"InnoDB: table still exists in the InnoDB internal data dictionary.\n",
|
||||
(ulong) id);
|
||||
} else {
|
||||
ut_print_timestamp(stderr);
|
||||
ut_print_timestamp(stderr);
|
||||
fputs(" InnoDB: Error: table ", stderr);
|
||||
ut_print_filename(stderr, name);
|
||||
fprintf(stderr, "\n"
|
||||
@ -3456,7 +3521,7 @@ fil_extend_space_to_desired_size(
|
||||
/ (4096 * ((1024 * 1024) / UNIV_PAGE_SIZE));
|
||||
offset_low = ((start_page_no - file_start_page_no)
|
||||
% (4096 * ((1024 * 1024) / UNIV_PAGE_SIZE)))
|
||||
* UNIV_PAGE_SIZE;
|
||||
* UNIV_PAGE_SIZE;
|
||||
#ifdef UNIV_HOTBACKUP
|
||||
success = os_file_write(node->name, node->handle, buf,
|
||||
offset_low, offset_high,
|
||||
@ -3496,6 +3561,7 @@ fil_extend_space_to_desired_size(
|
||||
|
||||
*actual_size = space->size;
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
if (space_id == 0) {
|
||||
ulint pages_per_mb = (1024 * 1024) / UNIV_PAGE_SIZE;
|
||||
|
||||
@ -3505,10 +3571,11 @@ fil_extend_space_to_desired_size(
|
||||
srv_data_file_sizes[srv_n_data_files - 1] =
|
||||
(node->size / pages_per_mb) * pages_per_mb;
|
||||
}
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
/*
|
||||
printf("Extended %s to %lu, actual size %lu pages\n", space->name,
|
||||
size_after_extend, *actual_size); */
|
||||
printf("Extended %s to %lu, actual size %lu pages\n", space->name,
|
||||
size_after_extend, *actual_size); */
|
||||
mutex_exit(&(system->mutex));
|
||||
|
||||
fil_flush(space_id);
|
||||
@ -3527,7 +3594,7 @@ void
|
||||
fil_extend_tablespaces_to_stored_len(void)
|
||||
/*======================================*/
|
||||
{
|
||||
fil_system_t* system = fil_system;
|
||||
fil_system_t* system = fil_system;
|
||||
fil_space_t* space;
|
||||
byte* buf;
|
||||
ulint actual_size;
|
||||
@ -3542,11 +3609,11 @@ fil_extend_tablespaces_to_stored_len(void)
|
||||
space = UT_LIST_GET_FIRST(system->space_list);
|
||||
|
||||
while (space) {
|
||||
ut_a(space->purpose == FIL_TABLESPACE);
|
||||
ut_a(space->purpose == FIL_TABLESPACE);
|
||||
|
||||
mutex_exit(&(system->mutex)); /* no need to protect with a
|
||||
mutex, because this is a single-
|
||||
threaded operation */
|
||||
mutex, because this is a
|
||||
single-threaded operation */
|
||||
error = fil_read(TRUE, space->id, 0, 0, UNIV_PAGE_SIZE, buf,
|
||||
NULL);
|
||||
ut_a(error == DB_SUCCESS);
|
||||
@ -3705,7 +3772,7 @@ fil_node_prepare_for_io(
|
||||
}
|
||||
|
||||
if (node->n_pending == 0 && space->purpose == FIL_TABLESPACE
|
||||
&& space->id != 0) {
|
||||
&& space->id != 0) {
|
||||
/* The node is in the LRU list, remove it */
|
||||
|
||||
ut_a(UT_LIST_GET_LEN(system->LRU) > 0);
|
||||
@ -3742,6 +3809,14 @@ fil_node_complete_io(
|
||||
if (type == OS_FILE_WRITE) {
|
||||
system->modification_counter++;
|
||||
node->modification_counter = system->modification_counter;
|
||||
|
||||
if (!node->space->is_in_unflushed_spaces) {
|
||||
|
||||
node->space->is_in_unflushed_spaces = TRUE;
|
||||
UT_LIST_ADD_FIRST(unflushed_spaces,
|
||||
system->unflushed_spaces,
|
||||
node->space);
|
||||
}
|
||||
}
|
||||
|
||||
if (node->n_pending == 0 && node->space->purpose == FIL_TABLESPACE
|
||||
@ -3751,6 +3826,31 @@ fil_node_complete_io(
|
||||
}
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Report information about an invalid page access. */
|
||||
static
|
||||
void
|
||||
fil_report_invalid_page_access(
|
||||
/*===========================*/
|
||||
ulint block_offset, /* in: block offset */
|
||||
ulint space_id, /* in: space id */
|
||||
const char* space_name, /* in: space name */
|
||||
ulint byte_offset, /* in: byte offset */
|
||||
ulint len, /* in: I/O length */
|
||||
ulint type) /* in: I/O type */
|
||||
{
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: trying to access page number %lu in space %lu,\n"
|
||||
"InnoDB: space name %s,\n"
|
||||
"InnoDB: which is outside the tablespace bounds.\n"
|
||||
"InnoDB: Byte offset %lu, len %lu, i/o type %lu.\n"
|
||||
"InnoDB: If you get this error at mysqld startup, please check that\n"
|
||||
"InnoDB: your my.cnf matches the ibdata files that you have in the\n"
|
||||
"InnoDB: MySQL server.\n",
|
||||
(ulong) block_offset, (ulong) space_id, space_name,
|
||||
(ulong) byte_offset, (ulong) len, (ulong) type);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Reads or writes data. This operation is asynchronous (aio). */
|
||||
|
||||
@ -3825,11 +3925,11 @@ fil_io(
|
||||
mode = OS_AIO_NORMAL;
|
||||
}
|
||||
|
||||
if (type == OS_FILE_READ) {
|
||||
srv_data_read+= len;
|
||||
} else if (type == OS_FILE_WRITE) {
|
||||
srv_data_written+= len;
|
||||
}
|
||||
if (type == OS_FILE_READ) {
|
||||
srv_data_read+= len;
|
||||
} else if (type == OS_FILE_WRITE) {
|
||||
srv_data_written+= len;
|
||||
}
|
||||
|
||||
/* Reserve the fil_system mutex and make sure that we can open at
|
||||
least one file while holding it, if the file is not already open */
|
||||
@ -3857,14 +3957,8 @@ fil_io(
|
||||
|
||||
for (;;) {
|
||||
if (node == NULL) {
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: trying to access page number %lu in space %lu,\n"
|
||||
"InnoDB: space name %s,\n"
|
||||
"InnoDB: which is outside the tablespace bounds.\n"
|
||||
"InnoDB: Byte offset %lu, len %lu, i/o type %lu\n",
|
||||
(ulong) block_offset, (ulong) space_id,
|
||||
space->name, (ulong) byte_offset, (ulong) len,
|
||||
(ulong) type);
|
||||
fil_report_invalid_page_access(block_offset, space_id,
|
||||
space->name, byte_offset, len, type);
|
||||
|
||||
ut_error;
|
||||
}
|
||||
@ -3891,17 +3985,12 @@ fil_io(
|
||||
/* Check that at least the start offset is within the bounds of a
|
||||
single-table tablespace */
|
||||
if (space->purpose == FIL_TABLESPACE && space->id != 0
|
||||
&& node->size <= block_offset) {
|
||||
&& node->size <= block_offset) {
|
||||
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: trying to access page number %lu in space %lu,\n"
|
||||
"InnoDB: space name %s,\n"
|
||||
"InnoDB: which is outside the tablespace bounds.\n"
|
||||
"InnoDB: Byte offset %lu, len %lu, i/o type %lu\n",
|
||||
(ulong) block_offset, (ulong) space_id,
|
||||
space->name, (ulong) byte_offset, (ulong) len,
|
||||
(ulong) type);
|
||||
ut_a(0);
|
||||
fil_report_invalid_page_access(block_offset, space_id,
|
||||
space->name, byte_offset, len, type);
|
||||
|
||||
ut_error;
|
||||
}
|
||||
|
||||
/* Now we have made the changes in the data structures of system */
|
||||
@ -3914,7 +4003,7 @@ fil_io(
|
||||
+ byte_offset;
|
||||
|
||||
ut_a(node->size - block_offset >=
|
||||
(byte_offset + len + (UNIV_PAGE_SIZE - 1)) / UNIV_PAGE_SIZE);
|
||||
(byte_offset + len + (UNIV_PAGE_SIZE - 1)) / UNIV_PAGE_SIZE);
|
||||
|
||||
/* Do aio */
|
||||
|
||||
@ -4033,7 +4122,7 @@ fil_aio_wait(
|
||||
srv_set_io_thread_op_info(segment, "native aio handle");
|
||||
#ifdef WIN_ASYNC_IO
|
||||
ret = os_aio_windows_handle(segment, 0, &fil_node,
|
||||
&message, &type);
|
||||
&message, &type);
|
||||
#elif defined(POSIX_ASYNC_IO)
|
||||
ret = os_aio_posix_handle(segment, &fil_node, &message);
|
||||
#else
|
||||
@ -4044,7 +4133,7 @@ fil_aio_wait(
|
||||
srv_set_io_thread_op_info(segment, "simulated aio handle");
|
||||
|
||||
ret = os_aio_simulated_handle(segment, &fil_node,
|
||||
&message, &type);
|
||||
&message, &type);
|
||||
}
|
||||
|
||||
ut_a(ret);
|
||||
@ -4162,6 +4251,16 @@ retry:
|
||||
skip_flush:
|
||||
if (node->flush_counter < old_mod_counter) {
|
||||
node->flush_counter = old_mod_counter;
|
||||
|
||||
if (space->is_in_unflushed_spaces
|
||||
&& fil_space_is_flushed(space)) {
|
||||
|
||||
space->is_in_unflushed_spaces = FALSE;
|
||||
|
||||
UT_LIST_REMOVE(unflushed_spaces,
|
||||
system->unflushed_spaces,
|
||||
space);
|
||||
}
|
||||
}
|
||||
|
||||
if (space->purpose == FIL_TABLESPACE) {
|
||||
@ -4193,13 +4292,13 @@ fil_flush_file_spaces(
|
||||
|
||||
mutex_enter(&(system->mutex));
|
||||
|
||||
space = UT_LIST_GET_FIRST(system->space_list);
|
||||
space = UT_LIST_GET_FIRST(system->unflushed_spaces);
|
||||
|
||||
while (space) {
|
||||
if (space->purpose == purpose && !space->is_being_deleted) {
|
||||
|
||||
space->n_pending_flushes++; /* prevent dropping of the
|
||||
space while we are
|
||||
space->n_pending_flushes++; /* prevent dropping of
|
||||
the space while we are
|
||||
flushing */
|
||||
mutex_exit(&(system->mutex));
|
||||
|
||||
@ -4209,7 +4308,7 @@ fil_flush_file_spaces(
|
||||
|
||||
space->n_pending_flushes--;
|
||||
}
|
||||
space = UT_LIST_GET_NEXT(space_list, space);
|
||||
space = UT_LIST_GET_NEXT(unflushed_spaces, space);
|
||||
}
|
||||
|
||||
mutex_exit(&(system->mutex));
|
||||
@ -4313,7 +4412,7 @@ Sets the file page type. */
|
||||
void
|
||||
fil_page_set_type(
|
||||
/*==============*/
|
||||
byte* page, /* in: file page */
|
||||
byte* page, /* in: file page */
|
||||
ulint type) /* in: type */
|
||||
{
|
||||
ut_ad(page);
|
||||
@ -4329,7 +4428,7 @@ fil_page_get_type(
|
||||
/*==============*/
|
||||
/* out: type; NOTE that if the type has not been
|
||||
written to page, the return value not defined */
|
||||
byte* page) /* in: file page */
|
||||
byte* page) /* in: file page */
|
||||
{
|
||||
ut_ad(page);
|
||||
|
||||
|
@ -236,7 +236,7 @@ ulint
|
||||
fseg_n_reserved_pages_low(
|
||||
/*======================*/
|
||||
/* out: number of reserved pages */
|
||||
fseg_inode_t* header, /* in: segment inode */
|
||||
fseg_inode_t* header, /* in: segment inode */
|
||||
ulint* used, /* out: number of pages used (<= reserved) */
|
||||
mtr_t* mtr); /* in: mtr handle */
|
||||
/************************************************************************
|
||||
@ -290,7 +290,7 @@ fseg_alloc_free_page_low(
|
||||
/* out: the allocated page number, FIL_NULL
|
||||
if no page could be allocated */
|
||||
ulint space, /* in: space */
|
||||
fseg_inode_t* seg_inode, /* in: segment inode */
|
||||
fseg_inode_t* seg_inode, /* in: segment inode */
|
||||
ulint hint, /* in: hint of which page would be desirable */
|
||||
byte direction, /* in: if the new page is needed because
|
||||
of an index page split, and records are
|
||||
@ -610,8 +610,10 @@ xdes_calc_descriptor_page(
|
||||
/* out: descriptor page offset */
|
||||
ulint offset) /* in: page offset */
|
||||
{
|
||||
ut_ad(UNIV_PAGE_SIZE > XDES_ARR_OFFSET
|
||||
+ (XDES_DESCRIBED_PER_PAGE / FSP_EXTENT_SIZE) * XDES_SIZE);
|
||||
#if UNIV_PAGE_SIZE <= XDES_ARR_OFFSET \
|
||||
+ (XDES_DESCRIBED_PER_PAGE / FSP_EXTENT_SIZE) * XDES_SIZE
|
||||
# error
|
||||
#endif
|
||||
|
||||
return(ut_2pow_round(offset, XDES_DESCRIBED_PER_PAGE));
|
||||
}
|
||||
@ -690,7 +692,7 @@ xdes_get_descriptor_with_space_hdr(
|
||||
}
|
||||
|
||||
return(descr_page + XDES_ARR_OFFSET
|
||||
+ XDES_SIZE * xdes_calc_descriptor_index(offset));
|
||||
+ XDES_SIZE * xdes_calc_descriptor_index(offset));
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
@ -789,7 +791,7 @@ Inits a file page whose prior contents should be ignored. */
|
||||
static
|
||||
void
|
||||
fsp_init_file_page_low(
|
||||
/*=====================*/
|
||||
/*===================*/
|
||||
byte* ptr) /* in: pointer to a page */
|
||||
{
|
||||
page_t* page;
|
||||
@ -807,7 +809,6 @@ fsp_init_file_page_low(
|
||||
|
||||
/***************************************************************
|
||||
Inits a file page whose prior contents should be ignored. */
|
||||
|
||||
static
|
||||
void
|
||||
fsp_init_file_page(
|
||||
@ -891,9 +892,13 @@ fsp_header_init(
|
||||
|
||||
fsp_init_file_page(page, mtr);
|
||||
|
||||
mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_TYPE_FSP_HDR,
|
||||
MLOG_2BYTES, mtr);
|
||||
|
||||
header = FSP_HEADER_OFFSET + page;
|
||||
|
||||
mlog_write_ulint(header + FSP_SPACE_ID, space, MLOG_4BYTES, mtr);
|
||||
mlog_write_ulint(header + FSP_NOT_USED, 0, MLOG_4BYTES, mtr);
|
||||
|
||||
mlog_write_ulint(header + FSP_SIZE, size, MLOG_4BYTES, mtr);
|
||||
mlog_write_ulint(header + FSP_FREE_LIMIT, 0, MLOG_4BYTES, mtr);
|
||||
@ -933,10 +938,10 @@ fsp_header_get_space_id(
|
||||
id = mach_read_from_4(page + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID);
|
||||
|
||||
if (id != fsp_id) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: space id in fsp header %lu, but in the page header %lu\n",
|
||||
(ulong) fsp_id,
|
||||
(ulong) id);
|
||||
(ulong) fsp_id, (ulong) id);
|
||||
|
||||
return(ULINT_UNDEFINED);
|
||||
}
|
||||
|
||||
@ -1116,10 +1121,10 @@ fsp_try_extend_data_file(
|
||||
size_increase = SRV_AUTO_EXTEND_INCREMENT;
|
||||
}
|
||||
} else {
|
||||
if (space == 0) {
|
||||
if (space == 0) {
|
||||
size_increase = SRV_AUTO_EXTEND_INCREMENT;
|
||||
} else {
|
||||
/* We extend single-table tablespaces first one extent
|
||||
/* We extend single-table tablespaces first one extent
|
||||
at a time, but for bigger tablespaces more. It is not
|
||||
enough to extend always by one extent, because some
|
||||
extents are frag page extents. */
|
||||
@ -1135,14 +1140,14 @@ fsp_try_extend_data_file(
|
||||
|
||||
*actual_increase = new_size - old_size;
|
||||
|
||||
return(FALSE);
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
size = FSP_EXTENT_SIZE;
|
||||
}
|
||||
|
||||
if (size < 32 * FSP_EXTENT_SIZE) {
|
||||
size_increase = FSP_EXTENT_SIZE;
|
||||
size_increase = FSP_EXTENT_SIZE;
|
||||
} else {
|
||||
/* Below in fsp_fill_free_list() we assume
|
||||
that we add at most FSP_FREE_ADD extents at
|
||||
@ -1192,7 +1197,7 @@ fsp_fill_free_list(
|
||||
ulint limit;
|
||||
ulint size;
|
||||
xdes_t* descr;
|
||||
ulint count = 0;
|
||||
ulint count = 0;
|
||||
ulint frag_n_used;
|
||||
page_t* descr_page;
|
||||
page_t* ibuf_page;
|
||||
@ -1225,7 +1230,7 @@ fsp_fill_free_list(
|
||||
i = limit;
|
||||
|
||||
while ((init_space && i < 1)
|
||||
|| ((i + FSP_EXTENT_SIZE <= size) && (count < FSP_FREE_ADD))) {
|
||||
|| ((i + FSP_EXTENT_SIZE <= size) && (count < FSP_FREE_ADD))) {
|
||||
|
||||
mlog_write_ulint(header + FSP_FREE_LIMIT, i + FSP_EXTENT_SIZE,
|
||||
MLOG_4BYTES, mtr);
|
||||
@ -1233,7 +1238,7 @@ fsp_fill_free_list(
|
||||
/* Update the free limit info in the log system and make
|
||||
a checkpoint */
|
||||
if (space == 0) {
|
||||
log_fsp_current_free_limit_set_and_checkpoint(
|
||||
log_fsp_current_free_limit_set_and_checkpoint(
|
||||
(i + FSP_EXTENT_SIZE)
|
||||
/ ((1024 * 1024) / UNIV_PAGE_SIZE));
|
||||
}
|
||||
@ -1252,6 +1257,8 @@ fsp_fill_free_list(
|
||||
SYNC_FSP_PAGE);
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
fsp_init_file_page(descr_page, mtr);
|
||||
mlog_write_ulint(descr_page + FIL_PAGE_TYPE,
|
||||
FIL_PAGE_TYPE_XDES, MLOG_2BYTES, mtr);
|
||||
}
|
||||
|
||||
/* Initialize the ibuf bitmap page in a separate
|
||||
@ -1279,7 +1286,9 @@ fsp_fill_free_list(
|
||||
mtr);
|
||||
xdes_init(descr, mtr);
|
||||
|
||||
ut_ad(XDES_DESCRIBED_PER_PAGE % FSP_EXTENT_SIZE == 0);
|
||||
#if XDES_DESCRIBED_PER_PAGE % FSP_EXTENT_SIZE
|
||||
# error "XDES_DESCRIBED_PER_PAGE % FSP_EXTENT_SIZE != 0"
|
||||
#endif
|
||||
|
||||
if (0 == i % XDES_DESCRIBED_PER_PAGE) {
|
||||
|
||||
@ -1289,18 +1298,18 @@ fsp_fill_free_list(
|
||||
|
||||
xdes_set_bit(descr, XDES_FREE_BIT, 0, FALSE, mtr);
|
||||
xdes_set_bit(descr, XDES_FREE_BIT,
|
||||
FSP_IBUF_BITMAP_OFFSET, FALSE, mtr);
|
||||
FSP_IBUF_BITMAP_OFFSET, FALSE, mtr);
|
||||
xdes_set_state(descr, XDES_FREE_FRAG, mtr);
|
||||
|
||||
flst_add_last(header + FSP_FREE_FRAG,
|
||||
descr + XDES_FLST_NODE, mtr);
|
||||
descr + XDES_FLST_NODE, mtr);
|
||||
frag_n_used = mtr_read_ulint(header + FSP_FRAG_N_USED,
|
||||
MLOG_4BYTES, mtr);
|
||||
MLOG_4BYTES, mtr);
|
||||
mlog_write_ulint(header + FSP_FRAG_N_USED,
|
||||
frag_n_used + 2, MLOG_4BYTES, mtr);
|
||||
frag_n_used + 2, MLOG_4BYTES, mtr);
|
||||
} else {
|
||||
flst_add_last(header + FSP_FREE,
|
||||
descr + XDES_FLST_NODE, mtr);
|
||||
descr + XDES_FLST_NODE, mtr);
|
||||
count++;
|
||||
}
|
||||
|
||||
@ -1436,22 +1445,22 @@ fsp_alloc_free_page(
|
||||
space_size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES, mtr);
|
||||
|
||||
if (space_size <= page_no) {
|
||||
/* It must be that we are extending a single-table tablespace
|
||||
/* It must be that we are extending a single-table tablespace
|
||||
whose size is still < 64 pages */
|
||||
|
||||
ut_a(space != 0);
|
||||
if (page_no >= FSP_EXTENT_SIZE) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: trying to extend a single-table tablespace %lu\n"
|
||||
"InnoDB: by single page(s) though the space size %lu. Page no %lu.\n",
|
||||
(ulong) space, (ulong) space_size, (ulong) page_no);
|
||||
return(FIL_NULL);
|
||||
}
|
||||
success = fsp_try_extend_data_file_with_pages(space, page_no,
|
||||
header, mtr);
|
||||
header, mtr);
|
||||
if (!success) {
|
||||
/* No disk space left */
|
||||
return(FIL_NULL);
|
||||
return(FIL_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1572,7 +1581,7 @@ fsp_free_page(
|
||||
}
|
||||
|
||||
if (xdes_is_free(descr, mtr)) {
|
||||
/* The extent has become free: move it to another list */
|
||||
/* The extent has become free: move it to another list */
|
||||
flst_remove(header + FSP_FREE_FRAG, descr + XDES_FLST_NODE,
|
||||
mtr);
|
||||
fsp_free_extent(space, page, mtr);
|
||||
@ -1716,7 +1725,8 @@ fsp_alloc_seg_inode_page(
|
||||
|
||||
buf_block_align(page)->check_index_page_at_flush = FALSE;
|
||||
|
||||
fil_page_set_type(page, FIL_PAGE_INODE);
|
||||
mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_INODE,
|
||||
MLOG_2BYTES, mtr);
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
buf_page_dbg_add_level(page, SYNC_FSP_PAGE);
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
@ -1776,7 +1786,7 @@ fsp_alloc_seg_inode(
|
||||
inode = fsp_seg_inode_page_get_nth_inode(page, n, mtr);
|
||||
|
||||
if (ULINT_UNDEFINED == fsp_seg_inode_page_find_free(page, n + 1,
|
||||
mtr)) {
|
||||
mtr)) {
|
||||
/* There are no other unused headers left on the page: move it
|
||||
to another list */
|
||||
|
||||
@ -1865,7 +1875,7 @@ ulint
|
||||
fseg_get_nth_frag_page_no(
|
||||
/*======================*/
|
||||
/* out: page number, FIL_NULL if not in use */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
ulint n, /* in: slot index */
|
||||
mtr_t* mtr __attribute__((unused))) /* in: mtr handle */
|
||||
{
|
||||
@ -1883,7 +1893,7 @@ UNIV_INLINE
|
||||
void
|
||||
fseg_set_nth_frag_page_no(
|
||||
/*======================*/
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
ulint n, /* in: slot index */
|
||||
ulint page_no,/* in: page number to set */
|
||||
mtr_t* mtr) /* in: mtr handle */
|
||||
@ -1905,7 +1915,7 @@ fseg_find_free_frag_page_slot(
|
||||
/*==========================*/
|
||||
/* out: slot index; ULINT_UNDEFINED if none
|
||||
found */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
mtr_t* mtr) /* in: mtr handle */
|
||||
{
|
||||
ulint i;
|
||||
@ -1933,7 +1943,7 @@ fseg_find_last_used_frag_page_slot(
|
||||
/*===============================*/
|
||||
/* out: slot index; ULINT_UNDEFINED if none
|
||||
found */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
mtr_t* mtr) /* in: mtr handle */
|
||||
{
|
||||
ulint i;
|
||||
@ -1961,7 +1971,7 @@ ulint
|
||||
fseg_get_n_frag_pages(
|
||||
/*==================*/
|
||||
/* out: number of fragment pages */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
mtr_t* mtr) /* in: mtr handle */
|
||||
{
|
||||
ulint i;
|
||||
@ -2021,8 +2031,8 @@ fseg_create_general(
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(!mutex_own(&kernel_mutex)
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
latch = fil_space_get_latch(space);
|
||||
|
||||
@ -2087,6 +2097,8 @@ fseg_create_general(
|
||||
|
||||
header = byte_offset
|
||||
+ buf_page_get(space, page, RW_X_LATCH, mtr);
|
||||
mlog_write_ulint(header - byte_offset + FIL_PAGE_TYPE,
|
||||
FIL_PAGE_TYPE_SYS, MLOG_2BYTES, mtr);
|
||||
}
|
||||
|
||||
mlog_write_ulint(header + FSEG_HDR_OFFSET,
|
||||
@ -2137,7 +2149,7 @@ ulint
|
||||
fseg_n_reserved_pages_low(
|
||||
/*======================*/
|
||||
/* out: number of reserved pages */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
fseg_inode_t* inode, /* in: segment inode */
|
||||
ulint* used, /* out: number of pages used (<= reserved) */
|
||||
mtr_t* mtr) /* in: mtr handle */
|
||||
{
|
||||
@ -2167,7 +2179,7 @@ ulint
|
||||
fseg_n_reserved_pages(
|
||||
/*==================*/
|
||||
/* out: number of reserved pages */
|
||||
fseg_header_t* header, /* in: segment header */
|
||||
fseg_header_t* header, /* in: segment header */
|
||||
ulint* used, /* out: number of pages used (<= reserved) */
|
||||
mtr_t* mtr) /* in: mtr handle */
|
||||
{
|
||||
@ -2179,8 +2191,8 @@ fseg_n_reserved_pages(
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(!mutex_own(&kernel_mutex)
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
mtr_x_lock(fil_space_get_latch(space), mtr);
|
||||
|
||||
@ -2233,11 +2245,11 @@ fseg_fill_free_list(
|
||||
descr = xdes_get_descriptor(space, hint, mtr);
|
||||
|
||||
if ((descr == NULL) ||
|
||||
(XDES_FREE != xdes_get_state(descr, mtr))) {
|
||||
(XDES_FREE != xdes_get_state(descr, mtr))) {
|
||||
|
||||
/* We cannot allocate the desired extent: stop */
|
||||
|
||||
return;
|
||||
return;
|
||||
}
|
||||
|
||||
descr = fsp_alloc_free_extent(space, hint, mtr);
|
||||
@ -2269,7 +2281,7 @@ fseg_alloc_free_extent(
|
||||
{
|
||||
xdes_t* descr;
|
||||
dulint seg_id;
|
||||
fil_addr_t first;
|
||||
fil_addr_t first;
|
||||
|
||||
if (flst_get_len(inode + FSEG_FREE, mtr) > 0) {
|
||||
/* Segment free list is not empty, allocate from it */
|
||||
@ -2311,7 +2323,7 @@ fseg_alloc_free_page_low(
|
||||
/* out: the allocated page number, FIL_NULL
|
||||
if no page could be allocated */
|
||||
ulint space, /* in: space */
|
||||
fseg_inode_t* seg_inode, /* in: segment inode */
|
||||
fseg_inode_t* seg_inode, /* in: segment inode */
|
||||
ulint hint, /* in: hint of which page would be desirable */
|
||||
byte direction, /* in: if the new page is needed because
|
||||
of an index page split, and records are
|
||||
@ -2347,7 +2359,7 @@ fseg_alloc_free_page_low(
|
||||
space_header = fsp_get_space_header(space, mtr);
|
||||
|
||||
descr = xdes_get_descriptor_with_space_hdr(space_header, space,
|
||||
hint, mtr);
|
||||
hint, mtr);
|
||||
if (descr == NULL) {
|
||||
/* Hint outside space or too high above free limit: reset
|
||||
hint */
|
||||
@ -2358,9 +2370,9 @@ fseg_alloc_free_page_low(
|
||||
/* In the big if-else below we look for ret_page and ret_descr */
|
||||
/*-------------------------------------------------------------*/
|
||||
if ((xdes_get_state(descr, mtr) == XDES_FSEG)
|
||||
&& (0 == ut_dulint_cmp(mtr_read_dulint(descr + XDES_ID,
|
||||
&& (0 == ut_dulint_cmp(mtr_read_dulint(descr + XDES_ID,
|
||||
mtr), seg_id))
|
||||
&& (xdes_get_bit(descr, XDES_FREE_BIT,
|
||||
&& (xdes_get_bit(descr, XDES_FREE_BIT,
|
||||
hint % FSP_EXTENT_SIZE, mtr) == TRUE)) {
|
||||
|
||||
/* 1. We can take the hinted page
|
||||
@ -2409,9 +2421,9 @@ fseg_alloc_free_page_low(
|
||||
}
|
||||
/*-------------------------------------------------------------*/
|
||||
} else if ((xdes_get_state(descr, mtr) == XDES_FSEG)
|
||||
&& (0 == ut_dulint_cmp(mtr_read_dulint(descr + XDES_ID,
|
||||
&& (0 == ut_dulint_cmp(mtr_read_dulint(descr + XDES_ID,
|
||||
mtr), seg_id))
|
||||
&& (!xdes_is_full(descr, mtr))) {
|
||||
&& (!xdes_is_full(descr, mtr))) {
|
||||
|
||||
/* 4. We can take the page from the same extent as the
|
||||
======================================================
|
||||
@ -2484,11 +2496,11 @@ fseg_alloc_free_page_low(
|
||||
space_size = fil_space_get_size(space);
|
||||
|
||||
if (space_size <= ret_page) {
|
||||
/* It must be that we are extending a single-table
|
||||
/* It must be that we are extending a single-table
|
||||
tablespace whose size is still < 64 pages */
|
||||
|
||||
if (ret_page >= FSP_EXTENT_SIZE) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error (2): trying to extend a single-table tablespace %lu\n"
|
||||
"InnoDB: by single page(s) though the space size %lu. Page no %lu.\n",
|
||||
(ulong) space, (ulong) space_size,
|
||||
@ -2572,8 +2584,8 @@ fseg_alloc_free_page_general(
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(!mutex_own(&kernel_mutex)
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
latch = fil_space_get_latch(space);
|
||||
|
||||
@ -2584,7 +2596,7 @@ fseg_alloc_free_page_general(
|
||||
excess pages from the insert buffer free list */
|
||||
|
||||
if (space == 0) {
|
||||
ibuf_free_excess_pages(space);
|
||||
ibuf_free_excess_pages(space);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2642,7 +2654,7 @@ fsp_reserve_free_pages(
|
||||
/*===================*/
|
||||
/* out: TRUE if there were >= 3 free
|
||||
pages, or we were able to extend */
|
||||
ulint space, /* in: space id, must be != 0 */
|
||||
ulint space, /* in: space id, must be != 0 */
|
||||
fsp_header_t* space_header, /* in: header of that space,
|
||||
x-latched */
|
||||
ulint size, /* in: size of the tablespace in pages,
|
||||
@ -2722,8 +2734,8 @@ fsp_reserve_free_extents(
|
||||
ut_ad(mtr);
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(!mutex_own(&kernel_mutex)
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
*n_reserved = n_ext;
|
||||
|
||||
@ -2842,10 +2854,11 @@ fsp_get_available_space_in_free_extents(
|
||||
mtr_commit(&mtr);
|
||||
|
||||
if (size < FSP_EXTENT_SIZE) {
|
||||
ut_a(space != 0); /* This must be a single-table
|
||||
tablespace */
|
||||
return(0); /* TODO: count free frag pages and return
|
||||
a value based on that */
|
||||
ut_a(space != 0); /* This must be a single-table
|
||||
tablespace */
|
||||
|
||||
return(0); /* TODO: count free frag pages and
|
||||
return a value based on that */
|
||||
}
|
||||
|
||||
/* Below we play safe when counting free extents above the free limit:
|
||||
@ -2987,7 +3000,7 @@ fseg_free_page_low(
|
||||
|
||||
for (i = 0;; i++) {
|
||||
if (fseg_get_nth_frag_page_no(seg_inode, i, mtr)
|
||||
== page) {
|
||||
== page) {
|
||||
|
||||
fseg_set_nth_frag_page_no(seg_inode, i,
|
||||
FIL_NULL, mtr);
|
||||
@ -3023,7 +3036,7 @@ fseg_free_page_low(
|
||||
ut_print_buf(stderr, seg_inode, 40);
|
||||
putc('\n', stderr);
|
||||
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Serious error: InnoDB is trying to free space %lu page %lu,\n"
|
||||
"InnoDB: which does not belong to segment %lu %lu but belongs\n"
|
||||
"InnoDB: to segment %lu %lu.\n",
|
||||
@ -3056,7 +3069,7 @@ fseg_free_page_low(
|
||||
xdes_set_bit(descr, XDES_CLEAN_BIT, page % FSP_EXTENT_SIZE, TRUE, mtr);
|
||||
|
||||
if (xdes_is_free(descr, mtr)) {
|
||||
/* The extent has become free: free it to space */
|
||||
/* The extent has become free: free it to space */
|
||||
flst_remove(seg_inode + FSEG_NOT_FULL,
|
||||
descr + XDES_FLST_NODE, mtr);
|
||||
fsp_free_extent(space, page, mtr);
|
||||
@ -3078,8 +3091,8 @@ fseg_free_page(
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(!mutex_own(&kernel_mutex)
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
mtr_x_lock(fil_space_get_latch(space), mtr);
|
||||
|
||||
@ -3116,7 +3129,7 @@ fseg_free_extent(
|
||||
ut_a(xdes_get_state(descr, mtr) == XDES_FSEG);
|
||||
ut_a(0 == ut_dulint_cmp(
|
||||
mtr_read_dulint(descr + XDES_ID, mtr),
|
||||
mtr_read_dulint(seg_inode + FSEG_ID, mtr)));
|
||||
mtr_read_dulint(seg_inode + FSEG_ID, mtr)));
|
||||
|
||||
first_page_in_extent = page - (page % FSP_EXTENT_SIZE);
|
||||
|
||||
@ -3157,7 +3170,7 @@ fseg_free_extent(
|
||||
#ifdef UNIV_DEBUG_FILE_ACCESSES
|
||||
for (i = 0; i < FSP_EXTENT_SIZE; i++) {
|
||||
|
||||
buf_page_set_file_page_was_freed(space,
|
||||
buf_page_set_file_page_was_freed(space,
|
||||
first_page_in_extent + i);
|
||||
}
|
||||
#endif
|
||||
@ -3189,8 +3202,8 @@ fseg_free_step(
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(!mutex_own(&kernel_mutex)
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
mtr_x_lock(fil_space_get_latch(space), mtr);
|
||||
|
||||
@ -3264,8 +3277,8 @@ fseg_free_step_not_header(
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(!mutex_own(&kernel_mutex)
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
|| mtr_memo_contains(mtr, fil_space_get_latch(space),
|
||||
MTR_MEMO_X_LOCK));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
mtr_x_lock(fil_space_get_latch(space), mtr);
|
||||
|
||||
@ -3524,7 +3537,7 @@ fseg_print_low(
|
||||
ulint reserved;
|
||||
ulint used;
|
||||
ulint page_no;
|
||||
dulint d_var;
|
||||
dulint d_var;
|
||||
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(inode),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
@ -3551,7 +3564,7 @@ fseg_print_low(
|
||||
(ulong) seg_id_high, (ulong) seg_id_low, (ulong) space, (ulong) page_no,
|
||||
(ulong) reserved, (ulong) used, (ulong) n_full,
|
||||
(ulong) n_frag, (ulong) n_free, (ulong) n_not_full,
|
||||
(ulong) n_used);
|
||||
(ulong) n_used);
|
||||
}
|
||||
|
||||
/***********************************************************************
|
||||
@ -3716,32 +3729,36 @@ fsp_validate(
|
||||
|
||||
while (!fil_addr_is_null(node_addr)) {
|
||||
|
||||
for (n = 0; n < FSP_SEG_INODES_PER_PAGE; n++) {
|
||||
for (n = 0; n < FSP_SEG_INODES_PER_PAGE; n++) {
|
||||
|
||||
mtr_start(&mtr);
|
||||
mtr_x_lock(fil_space_get_latch(space), &mtr);
|
||||
mtr_start(&mtr);
|
||||
mtr_x_lock(fil_space_get_latch(space), &mtr);
|
||||
|
||||
seg_inode_page = fut_get_ptr(space, node_addr, RW_X_LATCH,
|
||||
&mtr) - FSEG_INODE_PAGE_NODE;
|
||||
seg_inode_page = fut_get_ptr(space, node_addr,
|
||||
RW_X_LATCH, &mtr) - FSEG_INODE_PAGE_NODE;
|
||||
|
||||
seg_inode = fsp_seg_inode_page_get_nth_inode(seg_inode_page,
|
||||
n, &mtr);
|
||||
ut_a(ut_dulint_cmp(mach_read_from_8(seg_inode + FSEG_ID),
|
||||
ut_dulint_zero) != 0);
|
||||
fseg_validate_low(seg_inode, &mtr);
|
||||
seg_inode = fsp_seg_inode_page_get_nth_inode(
|
||||
seg_inode_page, n, &mtr);
|
||||
ut_a(ut_dulint_cmp(
|
||||
mach_read_from_8(seg_inode + FSEG_ID),
|
||||
ut_dulint_zero) != 0);
|
||||
fseg_validate_low(seg_inode, &mtr);
|
||||
|
||||
descr_count += flst_get_len(seg_inode + FSEG_FREE, &mtr);
|
||||
descr_count += flst_get_len(seg_inode + FSEG_FULL, &mtr);
|
||||
descr_count += flst_get_len(seg_inode + FSEG_NOT_FULL, &mtr);
|
||||
descr_count += flst_get_len(seg_inode + FSEG_FREE,
|
||||
&mtr);
|
||||
descr_count += flst_get_len(seg_inode + FSEG_FULL,
|
||||
&mtr);
|
||||
descr_count += flst_get_len(seg_inode + FSEG_NOT_FULL,
|
||||
&mtr);
|
||||
|
||||
n_used2 += fseg_get_n_frag_pages(seg_inode, &mtr);
|
||||
n_used2 += fseg_get_n_frag_pages(seg_inode, &mtr);
|
||||
|
||||
next_node_addr = flst_get_next_addr(seg_inode_page
|
||||
+ FSEG_INODE_PAGE_NODE, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
}
|
||||
next_node_addr = flst_get_next_addr(seg_inode_page
|
||||
+ FSEG_INODE_PAGE_NODE, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
}
|
||||
|
||||
node_addr = next_node_addr;
|
||||
node_addr = next_node_addr;
|
||||
}
|
||||
|
||||
mtr_start(&mtr);
|
||||
@ -3757,45 +3774,48 @@ fsp_validate(
|
||||
|
||||
while (!fil_addr_is_null(node_addr)) {
|
||||
|
||||
for (n = 0; n < FSP_SEG_INODES_PER_PAGE; n++) {
|
||||
for (n = 0; n < FSP_SEG_INODES_PER_PAGE; n++) {
|
||||
|
||||
mtr_start(&mtr);
|
||||
mtr_x_lock(fil_space_get_latch(space), &mtr);
|
||||
mtr_start(&mtr);
|
||||
mtr_x_lock(fil_space_get_latch(space), &mtr);
|
||||
|
||||
seg_inode_page = fut_get_ptr(space, node_addr, RW_X_LATCH,
|
||||
&mtr) - FSEG_INODE_PAGE_NODE;
|
||||
seg_inode_page = fut_get_ptr(space, node_addr,
|
||||
RW_X_LATCH, &mtr) - FSEG_INODE_PAGE_NODE;
|
||||
|
||||
seg_inode = fsp_seg_inode_page_get_nth_inode(seg_inode_page,
|
||||
n, &mtr);
|
||||
if (ut_dulint_cmp(mach_read_from_8(seg_inode + FSEG_ID),
|
||||
ut_dulint_zero) != 0) {
|
||||
fseg_validate_low(seg_inode, &mtr);
|
||||
seg_inode = fsp_seg_inode_page_get_nth_inode(
|
||||
seg_inode_page, n, &mtr);
|
||||
if (ut_dulint_cmp(mach_read_from_8(
|
||||
seg_inode + FSEG_ID),
|
||||
ut_dulint_zero) != 0) {
|
||||
fseg_validate_low(seg_inode, &mtr);
|
||||
|
||||
descr_count += flst_get_len(seg_inode + FSEG_FREE,
|
||||
&mtr);
|
||||
descr_count += flst_get_len(seg_inode + FSEG_FULL,
|
||||
&mtr);
|
||||
descr_count += flst_get_len(seg_inode + FSEG_NOT_FULL,
|
||||
&mtr);
|
||||
n_used2 += fseg_get_n_frag_pages(seg_inode, &mtr);
|
||||
descr_count += flst_get_len(
|
||||
seg_inode + FSEG_FREE, &mtr);
|
||||
descr_count += flst_get_len(
|
||||
seg_inode + FSEG_FULL, &mtr);
|
||||
descr_count += flst_get_len(
|
||||
seg_inode + FSEG_NOT_FULL, &mtr);
|
||||
n_used2 += fseg_get_n_frag_pages(
|
||||
seg_inode, &mtr);
|
||||
}
|
||||
|
||||
next_node_addr = flst_get_next_addr(seg_inode_page
|
||||
+ FSEG_INODE_PAGE_NODE, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
}
|
||||
|
||||
next_node_addr = flst_get_next_addr(seg_inode_page
|
||||
+ FSEG_INODE_PAGE_NODE, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
}
|
||||
|
||||
node_addr = next_node_addr;
|
||||
node_addr = next_node_addr;
|
||||
}
|
||||
|
||||
ut_a(descr_count * FSP_EXTENT_SIZE == free_limit);
|
||||
ut_a(n_used + n_full_frag_pages
|
||||
== n_used2 + 2* ((free_limit + XDES_DESCRIBED_PER_PAGE - 1)
|
||||
/ XDES_DESCRIBED_PER_PAGE)
|
||||
+ seg_inode_len_full + seg_inode_len_free);
|
||||
/ XDES_DESCRIBED_PER_PAGE)
|
||||
+ seg_inode_len_full + seg_inode_len_free);
|
||||
ut_a(frag_n_used == n_used);
|
||||
|
||||
mtr_commit(&mtr2);
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
@ -3822,7 +3842,7 @@ fsp_print(
|
||||
ulint seg_id_high;
|
||||
ulint n;
|
||||
ulint n_segs = 0;
|
||||
dulint d_var;
|
||||
dulint d_var;
|
||||
mtr_t mtr;
|
||||
mtr_t mtr2;
|
||||
|
||||
@ -3879,28 +3899,29 @@ fsp_print(
|
||||
|
||||
while (!fil_addr_is_null(node_addr)) {
|
||||
|
||||
for (n = 0; n < FSP_SEG_INODES_PER_PAGE; n++) {
|
||||
for (n = 0; n < FSP_SEG_INODES_PER_PAGE; n++) {
|
||||
|
||||
mtr_start(&mtr);
|
||||
mtr_x_lock(fil_space_get_latch(space), &mtr);
|
||||
mtr_start(&mtr);
|
||||
mtr_x_lock(fil_space_get_latch(space), &mtr);
|
||||
|
||||
seg_inode_page = fut_get_ptr(space, node_addr, RW_X_LATCH,
|
||||
&mtr) - FSEG_INODE_PAGE_NODE;
|
||||
seg_inode_page = fut_get_ptr(space, node_addr,
|
||||
RW_X_LATCH, &mtr) - FSEG_INODE_PAGE_NODE;
|
||||
|
||||
seg_inode = fsp_seg_inode_page_get_nth_inode(seg_inode_page,
|
||||
n, &mtr);
|
||||
ut_a(ut_dulint_cmp(mach_read_from_8(seg_inode + FSEG_ID),
|
||||
ut_dulint_zero) != 0);
|
||||
fseg_print_low(seg_inode, &mtr);
|
||||
seg_inode = fsp_seg_inode_page_get_nth_inode(
|
||||
seg_inode_page, n, &mtr);
|
||||
ut_a(ut_dulint_cmp(mach_read_from_8(
|
||||
seg_inode + FSEG_ID),
|
||||
ut_dulint_zero) != 0);
|
||||
fseg_print_low(seg_inode, &mtr);
|
||||
|
||||
n_segs++;
|
||||
n_segs++;
|
||||
|
||||
next_node_addr = flst_get_next_addr(seg_inode_page
|
||||
+ FSEG_INODE_PAGE_NODE, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
}
|
||||
next_node_addr = flst_get_next_addr(seg_inode_page
|
||||
+ FSEG_INODE_PAGE_NODE, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
}
|
||||
|
||||
node_addr = next_node_addr;
|
||||
node_addr = next_node_addr;
|
||||
}
|
||||
|
||||
mtr_start(&mtr);
|
||||
@ -3914,29 +3935,30 @@ fsp_print(
|
||||
|
||||
while (!fil_addr_is_null(node_addr)) {
|
||||
|
||||
for (n = 0; n < FSP_SEG_INODES_PER_PAGE; n++) {
|
||||
for (n = 0; n < FSP_SEG_INODES_PER_PAGE; n++) {
|
||||
|
||||
mtr_start(&mtr);
|
||||
mtr_x_lock(fil_space_get_latch(space), &mtr);
|
||||
mtr_start(&mtr);
|
||||
mtr_x_lock(fil_space_get_latch(space), &mtr);
|
||||
|
||||
seg_inode_page = fut_get_ptr(space, node_addr, RW_X_LATCH,
|
||||
&mtr) - FSEG_INODE_PAGE_NODE;
|
||||
seg_inode_page = fut_get_ptr(space, node_addr,
|
||||
RW_X_LATCH, &mtr) - FSEG_INODE_PAGE_NODE;
|
||||
|
||||
seg_inode = fsp_seg_inode_page_get_nth_inode(seg_inode_page,
|
||||
n, &mtr);
|
||||
if (ut_dulint_cmp(mach_read_from_8(seg_inode + FSEG_ID),
|
||||
ut_dulint_zero) != 0) {
|
||||
seg_inode = fsp_seg_inode_page_get_nth_inode(
|
||||
seg_inode_page, n, &mtr);
|
||||
if (ut_dulint_cmp(mach_read_from_8(
|
||||
seg_inode + FSEG_ID),
|
||||
ut_dulint_zero) != 0) {
|
||||
|
||||
fseg_print_low(seg_inode, &mtr);
|
||||
n_segs++;
|
||||
fseg_print_low(seg_inode, &mtr);
|
||||
n_segs++;
|
||||
}
|
||||
|
||||
next_node_addr = flst_get_next_addr(seg_inode_page
|
||||
+ FSEG_INODE_PAGE_NODE, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
}
|
||||
|
||||
next_node_addr = flst_get_next_addr(seg_inode_page
|
||||
+ FSEG_INODE_PAGE_NODE, &mtr);
|
||||
mtr_commit(&mtr);
|
||||
}
|
||||
|
||||
node_addr = next_node_addr;
|
||||
node_addr = next_node_addr;
|
||||
}
|
||||
|
||||
mtr_commit(&mtr2);
|
||||
|
@ -504,7 +504,7 @@ flst_print(
|
||||
|
||||
ut_ad(base && mtr);
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(base),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
frame = buf_frame_align(base);
|
||||
|
||||
len = flst_get_len(base, mtr);
|
||||
@ -512,7 +512,7 @@ flst_print(
|
||||
fprintf(stderr,
|
||||
"FILE-BASED LIST:\n"
|
||||
"Base node in space %lu page %lu byte offset %lu; len %lu\n",
|
||||
(ulong) buf_frame_get_space_id(frame),
|
||||
(ulong) buf_frame_get_page_no(frame),
|
||||
(ulong) (base - frame), (ulong) len);
|
||||
(ulong) buf_frame_get_space_id(frame),
|
||||
(ulong) buf_frame_get_page_no(frame),
|
||||
(ulong) (base - frame), (ulong) len);
|
||||
}
|
||||
|
@ -281,20 +281,26 @@ ha_remove_all_nodes_to_page(
|
||||
}
|
||||
|
||||
/*****************************************************************
|
||||
Validates a hash table. */
|
||||
Validates a given range of the cells in hash table. */
|
||||
|
||||
ibool
|
||||
ha_validate(
|
||||
/*========*/
|
||||
/* out: TRUE if ok */
|
||||
hash_table_t* table) /* in: hash table */
|
||||
/* out: TRUE if ok */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint start_index, /* in: start index */
|
||||
ulint end_index) /* in: end index */
|
||||
{
|
||||
hash_cell_t* cell;
|
||||
ha_node_t* node;
|
||||
ibool ok = TRUE;
|
||||
ulint i;
|
||||
|
||||
for (i = 0; i < hash_get_n_cells(table); i++) {
|
||||
ut_a(start_index <= end_index);
|
||||
ut_a(start_index < hash_get_n_cells(table));
|
||||
ut_a(end_index < hash_get_n_cells(table));
|
||||
|
||||
for (i = start_index; i <= end_index; i++) {
|
||||
|
||||
cell = hash_get_nth_cell(table, i);
|
||||
|
||||
|
@ -19,8 +19,8 @@ Reserves the mutex for a fold value in a hash table. */
|
||||
void
|
||||
hash_mutex_enter(
|
||||
/*=============*/
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold) /* in: fold */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold) /* in: fold */
|
||||
{
|
||||
mutex_enter(hash_get_mutex(table, fold));
|
||||
}
|
||||
@ -31,8 +31,8 @@ Releases the mutex for a fold value in a hash table. */
|
||||
void
|
||||
hash_mutex_exit(
|
||||
/*============*/
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold) /* in: fold */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold) /* in: fold */
|
||||
{
|
||||
mutex_exit(hash_get_mutex(table, fold));
|
||||
}
|
||||
@ -43,7 +43,7 @@ Reserves all the mutexes of a hash table, in an ascending order. */
|
||||
void
|
||||
hash_mutex_enter_all(
|
||||
/*=================*/
|
||||
hash_table_t* table) /* in: hash table */
|
||||
hash_table_t* table) /* in: hash table */
|
||||
{
|
||||
ulint i;
|
||||
|
||||
@ -59,7 +59,7 @@ Releases all the mutexes of a hash table. */
|
||||
void
|
||||
hash_mutex_exit_all(
|
||||
/*================*/
|
||||
hash_table_t* table) /* in: hash table */
|
||||
hash_table_t* table) /* in: hash table */
|
||||
{
|
||||
ulint i;
|
||||
|
||||
|
@ -29,7 +29,7 @@ Created 7/19/1997 Heikki Tuuri
|
||||
#include "log0recv.h"
|
||||
#include "que0que.h"
|
||||
|
||||
/* STRUCTURE OF AN INSERT BUFFER RECORD
|
||||
/* STRUCTURE OF AN INSERT BUFFER RECORD
|
||||
|
||||
In versions < 4.1.x:
|
||||
|
||||
@ -140,8 +140,7 @@ access order rules. */
|
||||
/* The insert buffer control structure */
|
||||
ibuf_t* ibuf = NULL;
|
||||
|
||||
static
|
||||
ulint ibuf_rnd = 986058871;
|
||||
static ulint ibuf_rnd = 986058871;
|
||||
|
||||
ulint ibuf_flush_count = 0;
|
||||
|
||||
@ -533,7 +532,7 @@ ibuf_data_init_for_space(
|
||||
|
||||
sprintf(buf, "SYS_IBUF_TABLE_%lu", (ulong) space);
|
||||
/* use old-style record format for the insert buffer */
|
||||
table = dict_mem_table_create(buf, space, 2, FALSE);
|
||||
table = dict_mem_table_create(buf, space, 2, 0);
|
||||
|
||||
dict_mem_table_add_col(table, "PAGE_NO", DATA_BINARY, 0, 0, 0);
|
||||
dict_mem_table_add_col(table, "TYPES", DATA_BINARY, 0, 0, 0);
|
||||
@ -545,8 +544,8 @@ ibuf_data_init_for_space(
|
||||
index = dict_mem_index_create(buf, "CLUST_IND", space,
|
||||
DICT_CLUSTERED | DICT_UNIVERSAL | DICT_IBUF,2);
|
||||
|
||||
dict_mem_index_add_field(index, "PAGE_NO", 0, 0);
|
||||
dict_mem_index_add_field(index, "TYPES", 0, 0);
|
||||
dict_mem_index_add_field(index, "PAGE_NO", 0);
|
||||
dict_mem_index_add_field(index, "TYPES", 0);
|
||||
|
||||
index->id = ut_dulint_add(DICT_IBUF_ID_MIN, space);
|
||||
|
||||
@ -574,18 +573,18 @@ ibuf_bitmap_page_init(
|
||||
{
|
||||
ulint bit_offset;
|
||||
ulint byte_offset;
|
||||
ulint i;
|
||||
|
||||
/* Write all zeros to the bitmap */
|
||||
|
||||
bit_offset = XDES_DESCRIBED_PER_PAGE * IBUF_BITS_PER_PAGE;
|
||||
|
||||
byte_offset = bit_offset / 8 + 1;
|
||||
byte_offset = bit_offset / 8 + 1; /* better: (bit_offset + 7) / 8 */
|
||||
|
||||
for (i = IBUF_BITMAP; i < IBUF_BITMAP + byte_offset; i++) {
|
||||
fil_page_set_type(page, FIL_PAGE_IBUF_BITMAP);
|
||||
|
||||
*(page + i) = (byte)0;
|
||||
}
|
||||
memset(page + IBUF_BITMAP, 0, byte_offset);
|
||||
|
||||
/* The remaining area (up to the page trailer) is uninitialized. */
|
||||
|
||||
mlog_write_initial_log_record(page, MLOG_IBUF_BITMAP_INIT, mtr);
|
||||
}
|
||||
@ -621,8 +620,9 @@ ibuf_bitmap_page_get_bits(
|
||||
page_t* page, /* in: bitmap page */
|
||||
ulint page_no,/* in: page whose bits to get */
|
||||
ulint bit, /* in: IBUF_BITMAP_FREE, IBUF_BITMAP_BUFFERED, ... */
|
||||
mtr_t* mtr __attribute__((unused))) /* in: mtr containing an x-latch
|
||||
to the bitmap page */
|
||||
mtr_t* mtr __attribute__((unused))) /* in: mtr containing an
|
||||
x-latch to the bitmap
|
||||
page */
|
||||
{
|
||||
ulint byte_offset;
|
||||
ulint bit_offset;
|
||||
@ -630,12 +630,14 @@ ibuf_bitmap_page_get_bits(
|
||||
ulint value;
|
||||
|
||||
ut_ad(bit < IBUF_BITS_PER_PAGE);
|
||||
ut_ad(IBUF_BITS_PER_PAGE % 2 == 0);
|
||||
#if IBUF_BITS_PER_PAGE % 2
|
||||
# error "IBUF_BITS_PER_PAGE % 2 != 0"
|
||||
#endif
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
|
||||
bit_offset = (page_no % XDES_DESCRIBED_PER_PAGE) * IBUF_BITS_PER_PAGE
|
||||
+ bit;
|
||||
+ bit;
|
||||
|
||||
byte_offset = bit_offset / 8;
|
||||
bit_offset = bit_offset % 8;
|
||||
@ -672,15 +674,18 @@ ibuf_bitmap_page_set_bits(
|
||||
ulint map_byte;
|
||||
|
||||
ut_ad(bit < IBUF_BITS_PER_PAGE);
|
||||
ut_ad(IBUF_BITS_PER_PAGE % 2 == 0);
|
||||
#if IBUF_BITS_PER_PAGE % 2
|
||||
# error "IBUF_BITS_PER_PAGE % 2 != 0"
|
||||
#endif
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX));
|
||||
#ifdef UNIV_IBUF_DEBUG
|
||||
ut_a((bit != IBUF_BITMAP_BUFFERED) || (val != FALSE)
|
||||
|| (0 == ibuf_count_get(buf_frame_get_space_id(page), page_no)));
|
||||
|| (0 == ibuf_count_get(buf_frame_get_space_id(page),
|
||||
page_no)));
|
||||
#endif
|
||||
bit_offset = (page_no % XDES_DESCRIBED_PER_PAGE) * IBUF_BITS_PER_PAGE
|
||||
+ bit;
|
||||
+ bit;
|
||||
|
||||
byte_offset = bit_offset / 8;
|
||||
bit_offset = bit_offset % 8;
|
||||
@ -715,8 +720,8 @@ ibuf_bitmap_page_no_calc(
|
||||
ulint page_no) /* in: tablespace page number */
|
||||
{
|
||||
return(FSP_IBUF_BITMAP_OFFSET
|
||||
+ XDES_DESCRIBED_PER_PAGE
|
||||
* (page_no / XDES_DESCRIBED_PER_PAGE));
|
||||
+ XDES_DESCRIBED_PER_PAGE
|
||||
* (page_no / XDES_DESCRIBED_PER_PAGE));
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
@ -1122,13 +1127,18 @@ ibuf_dummy_index_create(
|
||||
{
|
||||
dict_table_t* table;
|
||||
dict_index_t* index;
|
||||
|
||||
table = dict_mem_table_create("IBUF_DUMMY",
|
||||
DICT_HDR_SPACE, n, comp);
|
||||
DICT_HDR_SPACE, n, comp ? DICT_TF_COMPACT : 0);
|
||||
|
||||
index = dict_mem_index_create("IBUF_DUMMY", "IBUF_DUMMY",
|
||||
DICT_HDR_SPACE, 0, n);
|
||||
DICT_HDR_SPACE, 0, n);
|
||||
|
||||
index->table = table;
|
||||
|
||||
/* avoid ut_ad(index->cached) in dict_index_get_n_unique_in_tree */
|
||||
index->cached = TRUE;
|
||||
|
||||
return(index);
|
||||
}
|
||||
/************************************************************************
|
||||
@ -1136,7 +1146,7 @@ Add a column to the dummy index */
|
||||
static
|
||||
void
|
||||
ibuf_dummy_index_add_col(
|
||||
/*====================*/
|
||||
/*=====================*/
|
||||
dict_index_t* index, /* in: dummy index */
|
||||
dtype_t* type, /* in: the data type of the column */
|
||||
ulint len) /* in: length of the column */
|
||||
@ -1148,7 +1158,7 @@ ibuf_dummy_index_add_col(
|
||||
dtype_get_len(type),
|
||||
dtype_get_prec(type));
|
||||
dict_index_add_col(index,
|
||||
dict_table_get_nth_col(index->table, i), 0, len);
|
||||
dict_table_get_nth_col(index->table, i), len);
|
||||
}
|
||||
/************************************************************************
|
||||
Deallocates a dummy index for inserting a record to a non-clustered index.
|
||||
@ -1156,7 +1166,7 @@ Deallocates a dummy index for inserting a record to a non-clustered index.
|
||||
static
|
||||
void
|
||||
ibuf_dummy_index_free(
|
||||
/*====================*/
|
||||
/*==================*/
|
||||
dict_index_t* index) /* in: dummy index */
|
||||
{
|
||||
dict_table_t* table = index->table;
|
||||
@ -1674,9 +1684,10 @@ ibuf_add_free_page(
|
||||
/* Add the page to the free list and update the ibuf size data */
|
||||
|
||||
flst_add_last(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
|
||||
page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
|
||||
page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
|
||||
|
||||
fil_page_set_type(page, FIL_PAGE_IBUF_FREE_LIST);
|
||||
mlog_write_ulint(page + FIL_PAGE_TYPE, FIL_PAGE_IBUF_FREE_LIST,
|
||||
MLOG_2BYTES, &mtr);
|
||||
|
||||
ibuf_data->seg_size++;
|
||||
ibuf_data->free_list_len++;
|
||||
@ -1780,7 +1791,7 @@ ibuf_remove_free_page(
|
||||
|
||||
ut_ad(page_no == flst_get_last(root + PAGE_HEADER
|
||||
+ PAGE_BTR_IBUF_FREE_LIST, &mtr)
|
||||
.page);
|
||||
.page);
|
||||
|
||||
page = buf_page_get(space, page_no, RW_X_LATCH, &mtr);
|
||||
|
||||
@ -1791,7 +1802,7 @@ ibuf_remove_free_page(
|
||||
/* Remove the page from the free list and update the ibuf size data */
|
||||
|
||||
flst_remove(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
|
||||
page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
|
||||
page + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST_NODE, &mtr);
|
||||
|
||||
ibuf_data->seg_size--;
|
||||
ibuf_data->free_list_len--;
|
||||
@ -1829,7 +1840,7 @@ ibuf_free_excess_pages(
|
||||
ulint i;
|
||||
|
||||
if (space != 0) {
|
||||
fprintf(stderr,
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: calling ibuf_free_excess_pages for space %lu\n", (ulong) space);
|
||||
return;
|
||||
}
|
||||
@ -1948,14 +1959,14 @@ ibuf_get_merge_page_nos(
|
||||
rec_space_id = ibuf_rec_get_space(rec);
|
||||
|
||||
if (rec_space_id != first_space_id
|
||||
|| rec_page_no / IBUF_MERGE_AREA
|
||||
!= first_page_no / IBUF_MERGE_AREA) {
|
||||
|| rec_page_no / IBUF_MERGE_AREA
|
||||
!= first_page_no / IBUF_MERGE_AREA) {
|
||||
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
if (rec_page_no != prev_page_no
|
||||
|| rec_space_id != prev_space_id) {
|
||||
|| rec_space_id != prev_space_id) {
|
||||
n_pages++;
|
||||
}
|
||||
|
||||
@ -1992,19 +2003,19 @@ ibuf_get_merge_page_nos(
|
||||
ut_a(*n_stored < IBUF_MAX_N_PAGES_MERGED);
|
||||
#endif
|
||||
if ((rec_space_id != prev_space_id
|
||||
|| rec_page_no != prev_page_no)
|
||||
&& (prev_space_id != 0 || prev_page_no != 0)) {
|
||||
|| rec_page_no != prev_page_no)
|
||||
&& (prev_space_id != 0 || prev_page_no != 0)) {
|
||||
|
||||
if ((prev_page_no == first_page_no
|
||||
&& prev_space_id == first_space_id)
|
||||
|| contract
|
||||
|| (volume_for_page >
|
||||
((IBUF_MERGE_THRESHOLD - 1)
|
||||
* 4 * UNIV_PAGE_SIZE
|
||||
/ IBUF_PAGE_SIZE_PER_FREE_SPACE)
|
||||
/ IBUF_MERGE_THRESHOLD)) {
|
||||
&& prev_space_id == first_space_id)
|
||||
|| contract
|
||||
|| (volume_for_page >
|
||||
((IBUF_MERGE_THRESHOLD - 1)
|
||||
* 4 * UNIV_PAGE_SIZE
|
||||
/ IBUF_PAGE_SIZE_PER_FREE_SPACE)
|
||||
/ IBUF_MERGE_THRESHOLD)) {
|
||||
|
||||
space_ids[*n_stored] = prev_space_id;
|
||||
space_ids[*n_stored] = prev_space_id;
|
||||
space_versions[*n_stored]
|
||||
= fil_space_get_version(
|
||||
prev_space_id);
|
||||
@ -2016,10 +2027,10 @@ ibuf_get_merge_page_nos(
|
||||
}
|
||||
|
||||
if (rec_space_id != first_space_id
|
||||
|| rec_page_no / IBUF_MERGE_AREA
|
||||
!= first_page_no / IBUF_MERGE_AREA) {
|
||||
|| rec_page_no / IBUF_MERGE_AREA
|
||||
!= first_page_no / IBUF_MERGE_AREA) {
|
||||
|
||||
break;
|
||||
break;
|
||||
}
|
||||
|
||||
volume_for_page = 0;
|
||||
@ -2137,16 +2148,16 @@ loop:
|
||||
|
||||
/* This tree is empty */
|
||||
|
||||
data->empty = TRUE;
|
||||
data->empty = TRUE;
|
||||
|
||||
ibuf_exit();
|
||||
ibuf_exit();
|
||||
|
||||
mtr_commit(&mtr);
|
||||
btr_pcur_close(&pcur);
|
||||
mtr_commit(&mtr);
|
||||
btr_pcur_close(&pcur);
|
||||
|
||||
mutex_exit(&ibuf_mutex);
|
||||
mutex_exit(&ibuf_mutex);
|
||||
|
||||
goto loop;
|
||||
goto loop;
|
||||
}
|
||||
|
||||
mutex_exit(&ibuf_mutex);
|
||||
@ -2204,7 +2215,7 @@ ibuf_contract_for_n_pages(
|
||||
them */
|
||||
{
|
||||
ulint sum_bytes = 0;
|
||||
ulint sum_pages = 0;
|
||||
ulint sum_pages = 0;
|
||||
ulint n_bytes;
|
||||
ulint n_pag2;
|
||||
|
||||
@ -2317,7 +2328,7 @@ ibuf_get_volume_buffered(
|
||||
}
|
||||
|
||||
if (page_no != ibuf_rec_get_page_no(rec)
|
||||
|| space != ibuf_rec_get_space(rec)) {
|
||||
|| space != ibuf_rec_get_space(rec)) {
|
||||
|
||||
goto count_later;
|
||||
}
|
||||
@ -2356,7 +2367,7 @@ ibuf_get_volume_buffered(
|
||||
}
|
||||
|
||||
if (page_no != ibuf_rec_get_page_no(rec)
|
||||
|| space != ibuf_rec_get_space(rec)) {
|
||||
|| space != ibuf_rec_get_space(rec)) {
|
||||
|
||||
goto count_later;
|
||||
}
|
||||
@ -2380,7 +2391,7 @@ count_later:
|
||||
}
|
||||
|
||||
if (page_no != ibuf_rec_get_page_no(rec)
|
||||
|| space != ibuf_rec_get_space(rec)) {
|
||||
|| space != ibuf_rec_get_space(rec)) {
|
||||
|
||||
return(volume);
|
||||
}
|
||||
@ -2417,7 +2428,7 @@ count_later:
|
||||
}
|
||||
|
||||
if (page_no != ibuf_rec_get_page_no(rec)
|
||||
|| space != ibuf_rec_get_space(rec)) {
|
||||
|| space != ibuf_rec_get_space(rec)) {
|
||||
|
||||
return(volume);
|
||||
}
|
||||
@ -2448,7 +2459,7 @@ ibuf_update_max_tablespace_id(void)
|
||||
ibuf_data = fil_space_get_ibuf_data(0);
|
||||
|
||||
ibuf_index = ibuf_data->index;
|
||||
ut_a(!ibuf_index->table->comp);
|
||||
ut_a(!dict_table_is_comp(ibuf_index->table));
|
||||
|
||||
ibuf_enter();
|
||||
|
||||
@ -2588,12 +2599,12 @@ ibuf_insert_low(
|
||||
|
||||
heap = mem_heap_create(512);
|
||||
|
||||
/* Build the entry which contains the space id and the page number as
|
||||
/* Build the entry which contains the space id and the page number as
|
||||
the first fields and the type information for other fields, and which
|
||||
will be inserted to the insert buffer. */
|
||||
|
||||
ibuf_entry = ibuf_entry_build(entry, index->table->comp,
|
||||
space, page_no, heap);
|
||||
ibuf_entry = ibuf_entry_build(entry, dict_table_is_comp(index->table),
|
||||
space, page_no, heap);
|
||||
|
||||
/* Open a cursor to the insert buffer tree to calculate if we can add
|
||||
the new entry to it without exceeding the free space limit for the
|
||||
@ -2610,7 +2621,7 @@ ibuf_insert_low(
|
||||
#ifdef UNIV_IBUF_DEBUG
|
||||
ut_a((buffered == 0) || ibuf_count_get(space, page_no));
|
||||
#endif
|
||||
mtr_start(&bitmap_mtr);
|
||||
mtr_start(&bitmap_mtr);
|
||||
|
||||
bitmap_page = ibuf_bitmap_get_map_page(space, page_no, &bitmap_mtr);
|
||||
|
||||
@ -2632,7 +2643,7 @@ ibuf_insert_low(
|
||||
> ibuf_index_page_calc_free_from_bits(bits)) {
|
||||
mtr_commit(&bitmap_mtr);
|
||||
|
||||
/* It may not fit */
|
||||
/* It may not fit */
|
||||
err = DB_STRONG_FAIL;
|
||||
|
||||
do_merge = TRUE;
|
||||
@ -2641,7 +2652,7 @@ ibuf_insert_low(
|
||||
space_ids, space_versions,
|
||||
page_nos, &n_stored);
|
||||
goto function_exit;
|
||||
}
|
||||
}
|
||||
|
||||
/* Set the bitmap bit denoting that the insert buffer contains
|
||||
buffered entries for this index page, if the bit is not set yet */
|
||||
@ -2703,7 +2714,7 @@ function_exit:
|
||||
ibuf_count_get(space, page_no) + 1);
|
||||
}
|
||||
#endif
|
||||
if (mode == BTR_MODIFY_TREE) {
|
||||
if (mode == BTR_MODIFY_TREE) {
|
||||
ut_ad(ibuf_validate_low());
|
||||
|
||||
mutex_exit(&ibuf_mutex);
|
||||
@ -2711,10 +2722,10 @@ function_exit:
|
||||
}
|
||||
|
||||
mtr_commit(&mtr);
|
||||
btr_pcur_close(&pcur);
|
||||
btr_pcur_close(&pcur);
|
||||
ibuf_exit();
|
||||
|
||||
mem_heap_free(heap);
|
||||
mem_heap_free(heap);
|
||||
|
||||
mutex_enter(&ibuf_mutex);
|
||||
|
||||
@ -2725,7 +2736,7 @@ function_exit:
|
||||
|
||||
mutex_exit(&ibuf_mutex);
|
||||
|
||||
if ((mode == BTR_MODIFY_TREE) && (err == DB_SUCCESS)) {
|
||||
if ((mode == BTR_MODIFY_TREE) && (err == DB_SUCCESS)) {
|
||||
ibuf_contract_after_insert(entry_size);
|
||||
}
|
||||
|
||||
@ -2763,7 +2774,8 @@ ibuf_insert(
|
||||
ut_a(!(index->type & DICT_CLUSTERED));
|
||||
|
||||
if (rec_get_converted_size(index, entry)
|
||||
>= page_get_free_space_of_empty(index->table->comp) / 2) {
|
||||
>= page_get_free_space_of_empty(
|
||||
dict_table_is_comp(index->table)) / 2) {
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
@ -2810,7 +2822,8 @@ ibuf_insert_to_index_page(
|
||||
ut_ad(ibuf_inside());
|
||||
ut_ad(dtuple_check_typed(entry));
|
||||
|
||||
if (UNIV_UNLIKELY(index->table->comp != (ibool)!!page_is_comp(page))) {
|
||||
if (UNIV_UNLIKELY(dict_table_is_comp(index->table)
|
||||
!= (ibool)!!page_is_comp(page))) {
|
||||
fputs(
|
||||
"InnoDB: Trying to insert a record from the insert buffer to an index page\n"
|
||||
"InnoDB: but the 'compact' flag does not match!\n", stderr);
|
||||
@ -2827,7 +2840,7 @@ ibuf_insert_to_index_page(
|
||||
dump:
|
||||
buf_page_print(page);
|
||||
|
||||
dtuple_print(stderr, entry);
|
||||
dtuple_print(stderr, entry);
|
||||
|
||||
fputs(
|
||||
"InnoDB: The table where where this index record belongs\n"
|
||||
@ -3103,9 +3116,9 @@ ibuf_merge_or_delete_for_page(
|
||||
|
||||
if (!trx_sys_multiple_tablespace_format) {
|
||||
ut_a(trx_doublewrite_must_reset_space_ids);
|
||||
search_tuple = ibuf_search_tuple_build(space, page_no, heap);
|
||||
search_tuple = ibuf_search_tuple_build(space, page_no, heap);
|
||||
} else {
|
||||
search_tuple = ibuf_new_search_tuple_build(space, page_no,
|
||||
search_tuple = ibuf_new_search_tuple_build(space, page_no,
|
||||
heap);
|
||||
}
|
||||
|
||||
@ -3187,7 +3200,7 @@ loop:
|
||||
|
||||
/* Check if the entry is for this index page */
|
||||
if (ibuf_rec_get_page_no(ibuf_rec) != page_no
|
||||
|| ibuf_rec_get_space(ibuf_rec) != space) {
|
||||
|| ibuf_rec_get_space(ibuf_rec) != space) {
|
||||
if (page) {
|
||||
page_header_reset_last_insert(page, &mtr);
|
||||
}
|
||||
@ -3198,7 +3211,7 @@ loop:
|
||||
fputs("InnoDB: Discarding record\n ", stderr);
|
||||
rec_print_old(stderr, ibuf_rec);
|
||||
fputs("\n from the insert buffer!\n\n", stderr);
|
||||
} else if (page) {
|
||||
} else if (page) {
|
||||
/* Now we have at pcur a record which should be
|
||||
inserted to the index page; NOTE that the call below
|
||||
copies pointers to fields in ibuf_rec, and we must
|
||||
@ -3213,7 +3226,7 @@ loop:
|
||||
heap, &dummy_index);
|
||||
#ifdef UNIV_IBUF_DEBUG
|
||||
volume += rec_get_converted_size(dummy_index, entry)
|
||||
+ page_dir_calc_reserved_space(1);
|
||||
+ page_dir_calc_reserved_space(1);
|
||||
ut_a(volume <= 4 * UNIV_PAGE_SIZE
|
||||
/ IBUF_PAGE_SIZE_PER_FREE_SPACE);
|
||||
#endif
|
||||
@ -3235,7 +3248,7 @@ loop:
|
||||
|
||||
if (btr_pcur_is_after_last_on_page(&pcur, &mtr)) {
|
||||
mtr_commit(&mtr);
|
||||
btr_pcur_close(&pcur);
|
||||
btr_pcur_close(&pcur);
|
||||
|
||||
goto loop;
|
||||
}
|
||||
@ -3274,7 +3287,7 @@ reset_bit:
|
||||
n_inserts, volume, page_no); */
|
||||
#endif
|
||||
mtr_commit(&mtr);
|
||||
btr_pcur_close(&pcur);
|
||||
btr_pcur_close(&pcur);
|
||||
mem_heap_free(heap);
|
||||
|
||||
/* Protect our statistics keeping from race conditions */
|
||||
@ -3374,7 +3387,7 @@ loop:
|
||||
|
||||
if (btr_pcur_is_after_last_on_page(&pcur, &mtr)) {
|
||||
mtr_commit(&mtr);
|
||||
btr_pcur_close(&pcur);
|
||||
btr_pcur_close(&pcur);
|
||||
|
||||
ibuf_exit();
|
||||
|
||||
@ -3384,7 +3397,7 @@ loop:
|
||||
|
||||
leave_loop:
|
||||
mtr_commit(&mtr);
|
||||
btr_pcur_close(&pcur);
|
||||
btr_pcur_close(&pcur);
|
||||
|
||||
/* Protect our statistics keeping from race conditions */
|
||||
mutex_enter(&ibuf_mutex);
|
||||
@ -3468,7 +3481,7 @@ ibuf_is_empty(void)
|
||||
"InnoDB: run to completion.\n");
|
||||
}
|
||||
} else {
|
||||
ut_a(data->empty == FALSE);
|
||||
ut_a(data->empty == FALSE);
|
||||
|
||||
is_empty = FALSE;
|
||||
}
|
||||
@ -3503,10 +3516,10 @@ ibuf_print(
|
||||
|
||||
while (data) {
|
||||
fprintf(file,
|
||||
"Ibuf for space %lu: size %lu, free list len %lu, seg size %lu,",
|
||||
(ulong) data->space, (ulong) data->size,
|
||||
(ulong) data->free_list_len,
|
||||
(ulong) data->seg_size);
|
||||
"Ibuf for space %lu: size %lu, free list len %lu, seg size %lu,",
|
||||
(ulong) data->space, (ulong) data->size,
|
||||
(ulong) data->free_list_len,
|
||||
(ulong) data->seg_size);
|
||||
|
||||
if (data->empty) {
|
||||
fputs(" is empty\n", file);
|
||||
@ -3516,21 +3529,21 @@ ibuf_print(
|
||||
fprintf(file,
|
||||
"Ibuf for space %lu: size %lu, free list len %lu, seg size %lu,\n"
|
||||
"%lu inserts, %lu merged recs, %lu merges\n",
|
||||
(ulong) data->space,
|
||||
(ulong) data->size,
|
||||
(ulong) data->free_list_len,
|
||||
(ulong) data->seg_size,
|
||||
(ulong) data->n_inserts,
|
||||
(ulong) data->n_merged_recs,
|
||||
(ulong) data->n_merges);
|
||||
(ulong) data->space,
|
||||
(ulong) data->size,
|
||||
(ulong) data->free_list_len,
|
||||
(ulong) data->seg_size,
|
||||
(ulong) data->n_inserts,
|
||||
(ulong) data->n_merged_recs,
|
||||
(ulong) data->n_merges);
|
||||
#ifdef UNIV_IBUF_DEBUG
|
||||
for (i = 0; i < IBUF_COUNT_N_PAGES; i++) {
|
||||
if (ibuf_count_get(data->space, i) > 0) {
|
||||
|
||||
fprintf(stderr,
|
||||
"Ibuf count for page %lu is %lu\n",
|
||||
(ulong) i,
|
||||
(ulong) ibuf_count_get(data->space, i));
|
||||
(ulong) i,
|
||||
(ulong) ibuf_count_get(data->space, i));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -144,7 +144,7 @@ UNIV_INLINE
|
||||
ulint
|
||||
btr_node_ptr_get_child_page_no(
|
||||
/*===========================*/
|
||||
/* out: child node address */
|
||||
/* out: child node address */
|
||||
rec_t* rec, /* in: node pointer record */
|
||||
const ulint* offsets);/* in: array returned by rec_get_offsets() */
|
||||
/****************************************************************
|
||||
@ -277,6 +277,7 @@ btr_node_ptr_delete(
|
||||
dict_tree_t* tree, /* in: index tree */
|
||||
page_t* page, /* in: page whose node pointer is deleted */
|
||||
mtr_t* mtr); /* in: mtr */
|
||||
#ifdef UNIV_DEBUG
|
||||
/****************************************************************
|
||||
Checks that the node pointer to a page is appropriate. */
|
||||
|
||||
@ -287,6 +288,7 @@ btr_check_node_ptr(
|
||||
dict_tree_t* tree, /* in: index tree */
|
||||
page_t* page, /* in: index page */
|
||||
mtr_t* mtr); /* in: mtr */
|
||||
#endif /* UNIV_DEBUG */
|
||||
/*****************************************************************
|
||||
Tries to merge the page first to the left immediate brother if such a
|
||||
brother exists, and the node pointers to the current page and to the
|
||||
@ -412,7 +414,7 @@ the index. */
|
||||
|
||||
ibool
|
||||
btr_index_rec_validate(
|
||||
/*====================*/
|
||||
/*===================*/
|
||||
/* out: TRUE if ok */
|
||||
rec_t* rec, /* in: index record */
|
||||
dict_index_t* index, /* in: index */
|
||||
@ -429,7 +431,7 @@ btr_validate_tree(
|
||||
dict_tree_t* tree, /* in: tree */
|
||||
trx_t* trx); /* in: transaction or NULL */
|
||||
|
||||
#define BTR_N_LEAF_PAGES 1
|
||||
#define BTR_N_LEAF_PAGES 1
|
||||
#define BTR_TOTAL_SIZE 2
|
||||
|
||||
#ifndef UNIV_NONINL
|
||||
|
@ -124,9 +124,9 @@ btr_page_get_next(
|
||||
{
|
||||
ut_ad(page && mtr);
|
||||
ut_ad(mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_X_FIX)
|
||||
|| mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_S_FIX));
|
||||
MTR_MEMO_PAGE_X_FIX)
|
||||
|| mtr_memo_contains(mtr, buf_block_align(page),
|
||||
MTR_MEMO_PAGE_S_FIX));
|
||||
|
||||
return(mach_read_from_4(page + FIL_PAGE_NEXT));
|
||||
}
|
||||
@ -182,7 +182,7 @@ UNIV_INLINE
|
||||
ulint
|
||||
btr_node_ptr_get_child_page_no(
|
||||
/*===========================*/
|
||||
/* out: child node address */
|
||||
/* out: child node address */
|
||||
rec_t* rec, /* in: node pointer record */
|
||||
const ulint* offsets)/* in: array returned by rec_get_offsets() */
|
||||
{
|
||||
|
@ -72,7 +72,7 @@ UNIV_INLINE
|
||||
void
|
||||
btr_cur_position(
|
||||
/*=============*/
|
||||
dict_index_t* index, /* in: index */
|
||||
dict_index_t* index, /* in: index */
|
||||
rec_t* rec, /* in: record in tree */
|
||||
btr_cur_t* cursor);/* in: cursor */
|
||||
/************************************************************************
|
||||
|
@ -53,7 +53,8 @@ btr_cur_get_page(
|
||||
btr_cur_t* cursor) /* in: tree cursor */
|
||||
{
|
||||
page_t* page = buf_frame_align(page_cur_get_rec(&(cursor->page_cur)));
|
||||
ut_ad(!!page_is_comp(page) == cursor->index->table->comp);
|
||||
ut_ad(!!page_is_comp(page)
|
||||
== dict_table_is_comp(cursor->index->table));
|
||||
return(page);
|
||||
}
|
||||
|
||||
@ -75,7 +76,7 @@ UNIV_INLINE
|
||||
void
|
||||
btr_cur_position(
|
||||
/*=============*/
|
||||
dict_index_t* index, /* in: index */
|
||||
dict_index_t* index, /* in: index */
|
||||
rec_t* rec, /* in: record in tree */
|
||||
btr_cur_t* cursor) /* in: cursor */
|
||||
{
|
||||
@ -104,8 +105,8 @@ btr_cur_compress_recommendation(
|
||||
page = btr_cur_get_page(cursor);
|
||||
|
||||
if ((page_get_data_size(page) < BTR_CUR_PAGE_COMPRESS_LIMIT)
|
||||
|| ((btr_page_get_next(page, mtr) == FIL_NULL)
|
||||
&& (btr_page_get_prev(page, mtr) == FIL_NULL))) {
|
||||
|| ((btr_page_get_next(page, mtr) == FIL_NULL)
|
||||
&& (btr_page_get_prev(page, mtr) == FIL_NULL))) {
|
||||
|
||||
/* The page fillfactor has dropped below a predefined
|
||||
minimum value OR the level in the B-tree contains just
|
||||
@ -113,11 +114,11 @@ btr_cur_compress_recommendation(
|
||||
root page. */
|
||||
|
||||
if (dict_tree_get_page((cursor->index)->tree)
|
||||
== buf_frame_get_page_no(page)) {
|
||||
== buf_frame_get_page_no(page)) {
|
||||
|
||||
/* It is the root page */
|
||||
/* It is the root page */
|
||||
|
||||
return(FALSE);
|
||||
return(FALSE);
|
||||
}
|
||||
|
||||
return(TRUE);
|
||||
@ -148,9 +149,9 @@ btr_cur_can_delete_without_compress(
|
||||
page = btr_cur_get_page(cursor);
|
||||
|
||||
if ((page_get_data_size(page) - rec_size < BTR_CUR_PAGE_COMPRESS_LIMIT)
|
||||
|| ((btr_page_get_next(page, mtr) == FIL_NULL)
|
||||
&& (btr_page_get_prev(page, mtr) == FIL_NULL))
|
||||
|| (page_get_n_recs(page) < 2)) {
|
||||
|| ((btr_page_get_next(page, mtr) == FIL_NULL)
|
||||
&& (btr_page_get_prev(page, mtr) == FIL_NULL))
|
||||
|| (page_get_n_recs(page) < 2)) {
|
||||
|
||||
/* The page fillfactor will drop below a predefined
|
||||
minimum value, OR the level in the B-tree contains just
|
||||
@ -158,11 +159,11 @@ btr_cur_can_delete_without_compress(
|
||||
compression if this is not the root page. */
|
||||
|
||||
if (dict_tree_get_page((cursor->index)->tree)
|
||||
== buf_frame_get_page_no(page)) {
|
||||
== buf_frame_get_page_no(page)) {
|
||||
|
||||
/* It is the root page */
|
||||
/* It is the root page */
|
||||
|
||||
return(TRUE);
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
|
@ -152,7 +152,7 @@ btr_pcur_open_on_user_rec(
|
||||
ulint mode, /* in: PAGE_CUR_L, ... */
|
||||
ulint latch_mode, /* in: BTR_SEARCH_LEAF or
|
||||
BTR_MODIFY_LEAF */
|
||||
btr_pcur_t* cursor, /* in: memory buffer for persistent
|
||||
btr_pcur_t* cursor, /* in: memory buffer for persistent
|
||||
cursor */
|
||||
mtr_t* mtr); /* in: mtr */
|
||||
/**************************************************************************
|
||||
@ -208,7 +208,7 @@ btr_pcur_restore_position(
|
||||
whose ordering fields are identical to
|
||||
the ones of the original user record */
|
||||
ulint latch_mode, /* in: BTR_SEARCH_LEAF, ... */
|
||||
btr_pcur_t* cursor, /* in: detached persistent cursor */
|
||||
btr_pcur_t* cursor, /* in: detached persistent cursor */
|
||||
mtr_t* mtr); /* in: mtr */
|
||||
/******************************************************************
|
||||
If the latch mode of the cursor is BTR_LEAF_SEARCH or BTR_LEAF_MODIFY,
|
||||
|
@ -198,7 +198,7 @@ btr_pcur_is_on_user_rec(
|
||||
ut_ad(cursor->latch_mode != BTR_NO_LATCHES);
|
||||
|
||||
if ((btr_pcur_is_before_first_on_page(cursor, mtr))
|
||||
|| (btr_pcur_is_after_last_on_page(cursor, mtr))) {
|
||||
|| (btr_pcur_is_after_last_on_page(cursor, mtr))) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
|
@ -62,8 +62,8 @@ btr_search_guess_on_hash(
|
||||
btr_search_t* info, /* in: index search info */
|
||||
dtuple_t* tuple, /* in: logical record */
|
||||
ulint mode, /* in: PAGE_CUR_L, ... */
|
||||
ulint latch_mode, /* in: BTR_SEARCH_LEAF, ... */
|
||||
btr_cur_t* cursor, /* out: tree cursor */
|
||||
ulint latch_mode, /* in: BTR_SEARCH_LEAF, ... */
|
||||
btr_cur_t* cursor, /* out: tree cursor */
|
||||
ulint has_search_latch,/* in: latch mode the caller
|
||||
currently has on btr_search_latch:
|
||||
RW_S_LATCH, RW_X_LATCH, or 0 */
|
||||
|
@ -16,7 +16,7 @@ Updates the search info. */
|
||||
void
|
||||
btr_search_info_update_slow(
|
||||
/*========================*/
|
||||
btr_search_t* info, /* in: search info */
|
||||
btr_search_t* info, /* in/out: search info */
|
||||
btr_cur_t* cursor);/* in: cursor which was just positioned */
|
||||
|
||||
/************************************************************************
|
||||
|
@ -15,7 +15,7 @@ Created 2/17/1996 Heikki Tuuri
|
||||
#include "page0types.h"
|
||||
|
||||
typedef struct btr_pcur_struct btr_pcur_t;
|
||||
typedef struct btr_cur_struct btr_cur_t;
|
||||
typedef struct btr_cur_struct btr_cur_t;
|
||||
typedef struct btr_search_struct btr_search_t;
|
||||
|
||||
#endif
|
||||
|
@ -55,14 +55,14 @@ Created 11/5/1995 Heikki Tuuri
|
||||
/* Magic value to use instead of checksums when they are disabled */
|
||||
#define BUF_NO_CHECKSUM_MAGIC 0xDEADBEEFUL
|
||||
|
||||
extern buf_pool_t* buf_pool; /* The buffer pool of the database */
|
||||
extern buf_pool_t* buf_pool; /* The buffer pool of the database */
|
||||
#ifdef UNIV_DEBUG
|
||||
extern ibool buf_debug_prints;/* If this is set TRUE, the program
|
||||
prints info whenever read or flush
|
||||
occurs */
|
||||
#endif /* UNIV_DEBUG */
|
||||
extern ulint srv_buf_pool_write_requests; /* variable to count write request
|
||||
issued */
|
||||
issued */
|
||||
|
||||
/************************************************************************
|
||||
Creates the buffer pool. */
|
||||
@ -134,7 +134,7 @@ buf_frame_copy(
|
||||
NOTE! The following macros should be used instead of buf_page_get_gen,
|
||||
to improve debugging. Only values RW_S_LATCH and RW_X_LATCH are allowed
|
||||
in LA! */
|
||||
#define buf_page_get(SP, OF, LA, MTR) buf_page_get_gen(\
|
||||
#define buf_page_get(SP, OF, LA, MTR) buf_page_get_gen(\
|
||||
SP, OF, LA, NULL,\
|
||||
BUF_GET, __FILE__, __LINE__, MTR)
|
||||
/******************************************************************
|
||||
@ -143,13 +143,13 @@ read the contents of the page unless you know it is safe. Do not modify
|
||||
the contents of the page! We have separated this case, because it is
|
||||
error-prone programming not to set a latch, and it should be used
|
||||
with care. */
|
||||
#define buf_page_get_with_no_latch(SP, OF, MTR) buf_page_get_gen(\
|
||||
#define buf_page_get_with_no_latch(SP, OF, MTR) buf_page_get_gen(\
|
||||
SP, OF, RW_NO_LATCH, NULL,\
|
||||
BUF_GET_NO_LATCH, __FILE__, __LINE__, MTR)
|
||||
/******************************************************************
|
||||
NOTE! The following macros should be used instead of buf_page_get_gen, to
|
||||
improve debugging. Only values RW_S_LATCH and RW_X_LATCH are allowed as LA! */
|
||||
#define buf_page_get_nowait(SP, OF, LA, MTR) buf_page_get_gen(\
|
||||
#define buf_page_get_nowait(SP, OF, LA, MTR) buf_page_get_gen(\
|
||||
SP, OF, LA, NULL,\
|
||||
BUF_GET_NOWAIT, __FILE__, __LINE__, MTR)
|
||||
/******************************************************************
|
||||
@ -266,7 +266,7 @@ the buffer pool. */
|
||||
|
||||
void
|
||||
buf_page_make_young(
|
||||
/*=================*/
|
||||
/*================*/
|
||||
buf_frame_t* frame); /* in: buffer frame of a file page */
|
||||
/************************************************************************
|
||||
Returns TRUE if the page can be found in the buffer pool hash table. NOTE
|
||||
@ -396,8 +396,8 @@ on 32-bit and 64-bit architectures. */
|
||||
ulint
|
||||
buf_calc_page_new_checksum(
|
||||
/*=======================*/
|
||||
/* out: checksum */
|
||||
byte* page); /* in: buffer page */
|
||||
/* out: checksum */
|
||||
byte* page); /* in: buffer page */
|
||||
/************************************************************************
|
||||
In versions < 4.0.14 and < 4.1.1 there was a bug that the checksum only
|
||||
looked at the first few bytes of the page. This calculates that old
|
||||
@ -409,8 +409,8 @@ because this takes that field as an input! */
|
||||
ulint
|
||||
buf_calc_page_old_checksum(
|
||||
/*=======================*/
|
||||
/* out: checksum */
|
||||
byte* page); /* in: buffer page */
|
||||
/* out: checksum */
|
||||
byte* page); /* in: buffer page */
|
||||
/************************************************************************
|
||||
Checks if a page is corrupt. */
|
||||
|
||||
@ -745,8 +745,6 @@ struct buf_block_struct{
|
||||
buffer pool which are index pages,
|
||||
but this flag is not set because
|
||||
we do not keep track of all pages */
|
||||
dict_index_t* index; /* index for which the adaptive
|
||||
hash index has been created */
|
||||
/* 2. Page flushing fields */
|
||||
|
||||
UT_LIST_NODE_T(buf_block_t) flush_list;
|
||||
@ -833,7 +831,13 @@ struct buf_block_struct{
|
||||
records with the same prefix should be
|
||||
indexed in the hash index */
|
||||
|
||||
/* The following 4 fields are protected by btr_search_latch: */
|
||||
/* These 6 fields may only be modified when we have
|
||||
an x-latch on btr_search_latch AND
|
||||
a) we are holding an s-latch or x-latch on block->lock or
|
||||
b) we know that block->buf_fix_count == 0.
|
||||
|
||||
An exception to this is when we init or create a page
|
||||
in the buffer pool in buf0buf.c. */
|
||||
|
||||
ibool is_hashed; /* TRUE if hash index has already been
|
||||
built on this page; note that it does
|
||||
@ -850,6 +854,8 @@ struct buf_block_struct{
|
||||
ulint curr_side; /* BTR_SEARCH_LEFT_SIDE or
|
||||
BTR_SEARCH_RIGHT_SIDE in hash
|
||||
indexing */
|
||||
dict_index_t* index; /* Index for which the adaptive
|
||||
hash index has been created. */
|
||||
/* 6. Debug fields */
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
rw_lock_t debug_latch; /* in the debug version, each thread
|
||||
@ -857,9 +863,9 @@ struct buf_block_struct{
|
||||
an s-latch here; so we can use the
|
||||
debug utilities in sync0rw */
|
||||
#endif
|
||||
ibool file_page_was_freed;
|
||||
/* this is set to TRUE when fsp
|
||||
frees a page in buffer pool */
|
||||
ibool file_page_was_freed;
|
||||
/* this is set to TRUE when fsp
|
||||
frees a page in buffer pool */
|
||||
};
|
||||
|
||||
#define BUF_BLOCK_MAGIC_N 41526563
|
||||
@ -971,7 +977,7 @@ struct buf_pool_struct{
|
||||
physical memory is mapped to a frame */
|
||||
UT_LIST_BASE_NODE_T(buf_block_t) LRU;
|
||||
/* base node of the LRU list */
|
||||
buf_block_t* LRU_old; /* pointer to the about 3/8 oldest
|
||||
buf_block_t* LRU_old; /* pointer to the about 3/8 oldest
|
||||
blocks in the LRU list; NULL if LRU
|
||||
length less than BUF_LRU_OLD_MIN_LEN */
|
||||
ulint LRU_old_len; /* length of the LRU list from
|
||||
|
@ -82,7 +82,8 @@ buf_pool_is_block(
|
||||
void* ptr) /* in: pointer to memory */
|
||||
{
|
||||
if ((buf_pool->blocks <= (buf_block_t*)ptr)
|
||||
&& ((buf_block_t*)ptr < buf_pool->blocks + buf_pool->max_size)) {
|
||||
&& ((buf_block_t*)ptr < buf_pool->blocks
|
||||
+ buf_pool->max_size)) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
@ -150,7 +151,7 @@ buf_block_get_frame(
|
||||
ut_ad(block < buf_pool->blocks + buf_pool->max_size);
|
||||
ut_ad(block->state != BUF_BLOCK_NOT_USED);
|
||||
ut_ad((block->state != BUF_BLOCK_FILE_PAGE)
|
||||
|| (block->buf_fix_count > 0));
|
||||
|| (block->buf_fix_count > 0));
|
||||
|
||||
return(block->frame);
|
||||
}
|
||||
@ -208,7 +209,7 @@ buf_block_align(
|
||||
frame_zero = buf_pool->frame_zero;
|
||||
|
||||
if (UNIV_UNLIKELY((ulint)ptr < (ulint)frame_zero)
|
||||
|| UNIV_UNLIKELY((ulint)ptr > (ulint)(buf_pool->high_end))) {
|
||||
|| UNIV_UNLIKELY((ulint)ptr > (ulint)(buf_pool->high_end))) {
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
@ -218,7 +219,7 @@ buf_block_align(
|
||||
"InnoDB: corruption. If this happens in an InnoDB database recovery,\n"
|
||||
"InnoDB: you can look from section 6.1 at http://www.innodb.com/ibman.html\n"
|
||||
"InnoDB: how to force recovery.\n",
|
||||
ptr, frame_zero,
|
||||
ptr, frame_zero,
|
||||
buf_pool->high_end);
|
||||
ut_error;
|
||||
}
|
||||
@ -244,7 +245,7 @@ buf_frame_align(
|
||||
frame = ut_align_down(ptr, UNIV_PAGE_SIZE);
|
||||
|
||||
if (UNIV_UNLIKELY((ulint)frame < (ulint)(buf_pool->frame_zero))
|
||||
|| UNIV_UNLIKELY((ulint)frame >= (ulint)(buf_pool->high_end))) {
|
||||
|| UNIV_UNLIKELY((ulint)frame >= (ulint)(buf_pool->high_end))) {
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
@ -254,7 +255,7 @@ buf_frame_align(
|
||||
"InnoDB: corruption. If this happens in an InnoDB database recovery,\n"
|
||||
"InnoDB: you can look from section 6.1 at http://www.innodb.com/ibman.html\n"
|
||||
"InnoDB: how to force recovery.\n",
|
||||
ptr, buf_pool->frame_zero,
|
||||
ptr, buf_pool->frame_zero,
|
||||
buf_pool->high_end);
|
||||
ut_error;
|
||||
}
|
||||
@ -448,7 +449,7 @@ buf_frame_modify_clock_inc(
|
||||
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad((mutex_own(&(buf_pool->mutex)) && (block->buf_fix_count == 0))
|
||||
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
|
||||
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
|
||||
#endif /*UNIV_SYNC_DEBUG */
|
||||
|
||||
UT_DULINT_INC(block->modify_clock);
|
||||
@ -469,7 +470,7 @@ buf_block_modify_clock_inc(
|
||||
{
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad((mutex_own(&(buf_pool->mutex)) && (block->buf_fix_count == 0))
|
||||
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
|
||||
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
UT_DULINT_INC(block->modify_clock);
|
||||
@ -489,7 +490,7 @@ buf_block_get_modify_clock(
|
||||
{
|
||||
#ifdef UNIV_SYNC_DEBUG
|
||||
ut_ad(rw_lock_own(&(block->lock), RW_LOCK_SHARED)
|
||||
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
|
||||
|| rw_lock_own(&(block->lock), RW_LOCK_EXCLUSIVE));
|
||||
#endif /* UNIV_SYNC_DEBUG */
|
||||
|
||||
return(block->modify_clock);
|
||||
@ -654,9 +655,9 @@ void
|
||||
buf_page_dbg_add_level(
|
||||
/*===================*/
|
||||
buf_frame_t* frame __attribute__((unused)), /* in: buffer page
|
||||
where we have acquired latch */
|
||||
where we have acquired latch */
|
||||
ulint level __attribute__((unused))) /* in: latching order
|
||||
level */
|
||||
level */
|
||||
{
|
||||
sync_thread_add_level(&(buf_block_align(frame)->lock), level);
|
||||
}
|
||||
|
@ -110,8 +110,8 @@ available to replacement in the free list and at the end of the LRU list (to
|
||||
make sure that a read-ahead batch can be read efficiently in a single
|
||||
sweep). */
|
||||
|
||||
#define BUF_FLUSH_FREE_BLOCK_MARGIN (5 + BUF_READ_AHEAD_AREA)
|
||||
#define BUF_FLUSH_EXTRA_MARGIN (BUF_FLUSH_FREE_BLOCK_MARGIN / 4 + 100)
|
||||
#define BUF_FLUSH_FREE_BLOCK_MARGIN (5 + BUF_READ_AHEAD_AREA)
|
||||
#define BUF_FLUSH_EXTRA_MARGIN (BUF_FLUSH_FREE_BLOCK_MARGIN / 4 + 100)
|
||||
|
||||
#ifndef UNIV_NONINL
|
||||
#include "buf0flu.ic"
|
||||
|
@ -62,7 +62,7 @@ buf_flush_note_modification(
|
||||
mtr->start_lsn) <= 0);
|
||||
}
|
||||
|
||||
++srv_buf_pool_write_requests;
|
||||
++srv_buf_pool_write_requests;
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
|
@ -73,7 +73,7 @@ ibool
|
||||
buf_LRU_search_and_free_block(
|
||||
/*==========================*/
|
||||
/* out: TRUE if freed */
|
||||
ulint n_iterations); /* in: how many times this has been called
|
||||
ulint n_iterations); /* in: how many times this has been called
|
||||
repeatedly without result: a high value means
|
||||
that we should search farther; if value is
|
||||
k < 10, then we only search k/10 * number
|
||||
|
@ -21,7 +21,7 @@ typedef struct big_rec_struct big_rec_t;
|
||||
/* Some non-inlined functions used in the MySQL interface: */
|
||||
void
|
||||
dfield_set_data_noninline(
|
||||
dfield_t* field, /* in: field */
|
||||
dfield_t* field, /* in: field */
|
||||
void* data, /* in: data */
|
||||
ulint len); /* in: length or UNIV_SQL_NULL */
|
||||
void*
|
||||
@ -32,10 +32,10 @@ dfield_get_len_noninline(
|
||||
dfield_t* field); /* in: field */
|
||||
ulint
|
||||
dtuple_get_n_fields_noninline(
|
||||
dtuple_t* tuple); /* in: tuple */
|
||||
dtuple_t* tuple); /* in: tuple */
|
||||
dfield_t*
|
||||
dtuple_get_nth_field_noninline(
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
ulint n); /* in: index of field */
|
||||
|
||||
/*************************************************************************
|
||||
@ -77,7 +77,7 @@ UNIV_INLINE
|
||||
void
|
||||
dfield_set_len(
|
||||
/*===========*/
|
||||
dfield_t* field, /* in: field */
|
||||
dfield_t* field, /* in: field */
|
||||
ulint len); /* in: length or UNIV_SQL_NULL */
|
||||
/*************************************************************************
|
||||
Sets pointer to the data and length in a field. */
|
||||
@ -85,7 +85,7 @@ UNIV_INLINE
|
||||
void
|
||||
dfield_set_data(
|
||||
/*============*/
|
||||
dfield_t* field, /* in: field */
|
||||
dfield_t* field, /* in: field */
|
||||
const void* data, /* in: data */
|
||||
ulint len); /* in: length or UNIV_SQL_NULL */
|
||||
/**************************************************************************
|
||||
@ -102,7 +102,7 @@ UNIV_INLINE
|
||||
void
|
||||
dfield_copy_data(
|
||||
/*=============*/
|
||||
dfield_t* field1, /* in: field to copy to */
|
||||
dfield_t* field1, /* in: field to copy to */
|
||||
dfield_t* field2);/* in: field to copy from */
|
||||
/*************************************************************************
|
||||
Copies a data field to another. */
|
||||
@ -138,7 +138,7 @@ ulint
|
||||
dtuple_get_n_fields(
|
||||
/*================*/
|
||||
/* out: number of fields */
|
||||
dtuple_t* tuple); /* in: tuple */
|
||||
dtuple_t* tuple); /* in: tuple */
|
||||
/*************************************************************************
|
||||
Gets nth field of a tuple. */
|
||||
UNIV_INLINE
|
||||
@ -146,7 +146,7 @@ dfield_t*
|
||||
dtuple_get_nth_field(
|
||||
/*=================*/
|
||||
/* out: nth field */
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
ulint n); /* in: index of field */
|
||||
/*************************************************************************
|
||||
Gets info bits in a data tuple. */
|
||||
@ -155,14 +155,14 @@ ulint
|
||||
dtuple_get_info_bits(
|
||||
/*=================*/
|
||||
/* out: info bits */
|
||||
dtuple_t* tuple); /* in: tuple */
|
||||
dtuple_t* tuple); /* in: tuple */
|
||||
/*************************************************************************
|
||||
Sets info bits in a data tuple. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
dtuple_set_info_bits(
|
||||
/*=================*/
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
ulint info_bits); /* in: info bits */
|
||||
/*************************************************************************
|
||||
Gets number of fields used in record comparisons. */
|
||||
@ -189,7 +189,7 @@ UNIV_INLINE
|
||||
dtuple_t*
|
||||
dtuple_create(
|
||||
/*==========*/
|
||||
/* out, own: created tuple */
|
||||
/* out, own: created tuple */
|
||||
mem_heap_t* heap, /* in: memory heap where the tuple
|
||||
is created */
|
||||
ulint n_fields); /* in: number of fields */
|
||||
@ -201,7 +201,7 @@ dtuple_t*
|
||||
dtuple_create_for_mysql(
|
||||
/*====================*/
|
||||
/* out, own created dtuple */
|
||||
void** heap, /* out: created memory heap */
|
||||
void** heap, /* out: created memory heap */
|
||||
ulint n_fields); /* in: number of fields */
|
||||
/*************************************************************************
|
||||
Frees a dtuple used in MySQL. */
|
||||
@ -320,6 +320,14 @@ void
|
||||
dfield_print_also_hex(
|
||||
/*==================*/
|
||||
dfield_t* dfield); /* in: dfield */
|
||||
/*****************************************************************
|
||||
Print a dfield value using ut_print_buf. */
|
||||
|
||||
void
|
||||
dfield_print_raw(
|
||||
/*=============*/
|
||||
FILE* f, /* in: output stream */
|
||||
dfield_t* dfield); /* in: dfield */
|
||||
/**************************************************************
|
||||
The following function prints the contents of a tuple. */
|
||||
|
||||
@ -401,7 +409,7 @@ struct dtuple_struct {
|
||||
|
||||
/* A slot for a field in a big rec vector */
|
||||
|
||||
typedef struct big_rec_field_struct big_rec_field_t;
|
||||
typedef struct big_rec_field_struct big_rec_field_t;
|
||||
struct big_rec_field_struct {
|
||||
ulint field_no; /* field number in record */
|
||||
ulint len; /* stored data len */
|
||||
|
@ -9,7 +9,9 @@ Created 5/30/1994 Heikki Tuuri
|
||||
#include "mem0mem.h"
|
||||
#include "ut0rnd.h"
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
extern byte data_error;
|
||||
#endif /* UNIV_DEBUG */
|
||||
|
||||
/*************************************************************************
|
||||
Gets pointer to the type struct of SQL data field. */
|
||||
@ -50,7 +52,7 @@ dfield_get_data(
|
||||
{
|
||||
ut_ad(field);
|
||||
ut_ad((field->len == UNIV_SQL_NULL)
|
||||
|| (field->data != &data_error));
|
||||
|| (field->data != &data_error));
|
||||
|
||||
return(field->data);
|
||||
}
|
||||
@ -67,7 +69,7 @@ dfield_get_len(
|
||||
{
|
||||
ut_ad(field);
|
||||
ut_ad((field->len == UNIV_SQL_NULL)
|
||||
|| (field->data != &data_error));
|
||||
|| (field->data != &data_error));
|
||||
|
||||
return(field->len);
|
||||
}
|
||||
@ -78,7 +80,7 @@ UNIV_INLINE
|
||||
void
|
||||
dfield_set_len(
|
||||
/*===========*/
|
||||
dfield_t* field, /* in: field */
|
||||
dfield_t* field, /* in: field */
|
||||
ulint len) /* in: length or UNIV_SQL_NULL */
|
||||
{
|
||||
ut_ad(field);
|
||||
@ -92,7 +94,7 @@ UNIV_INLINE
|
||||
void
|
||||
dfield_set_data(
|
||||
/*============*/
|
||||
dfield_t* field, /* in: field */
|
||||
dfield_t* field, /* in: field */
|
||||
const void* data, /* in: data */
|
||||
ulint len) /* in: length or UNIV_SQL_NULL */
|
||||
{
|
||||
@ -108,7 +110,7 @@ UNIV_INLINE
|
||||
void
|
||||
dfield_copy_data(
|
||||
/*=============*/
|
||||
dfield_t* field1, /* in: field to copy to */
|
||||
dfield_t* field1, /* in: field to copy to */
|
||||
dfield_t* field2) /* in: field to copy from */
|
||||
{
|
||||
ut_ad(field1 && field2);
|
||||
@ -144,8 +146,9 @@ dfield_datas_are_binary_equal(
|
||||
len = field1->len;
|
||||
|
||||
if ((len != field2->len)
|
||||
|| ((len != UNIV_SQL_NULL)
|
||||
&& (0 != ut_memcmp(field1->data, field2->data, len)))) {
|
||||
|| ((len != UNIV_SQL_NULL)
|
||||
&& (0 != ut_memcmp(field1->data, field2->data,
|
||||
len)))) {
|
||||
|
||||
return(FALSE);
|
||||
}
|
||||
@ -160,7 +163,7 @@ ulint
|
||||
dtuple_get_info_bits(
|
||||
/*=================*/
|
||||
/* out: info bits */
|
||||
dtuple_t* tuple) /* in: tuple */
|
||||
dtuple_t* tuple) /* in: tuple */
|
||||
{
|
||||
ut_ad(tuple);
|
||||
|
||||
@ -173,7 +176,7 @@ UNIV_INLINE
|
||||
void
|
||||
dtuple_set_info_bits(
|
||||
/*=================*/
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
ulint info_bits) /* in: info bits */
|
||||
{
|
||||
ut_ad(tuple);
|
||||
@ -219,7 +222,7 @@ ulint
|
||||
dtuple_get_n_fields(
|
||||
/*================*/
|
||||
/* out: number of fields */
|
||||
dtuple_t* tuple) /* in: tuple */
|
||||
dtuple_t* tuple) /* in: tuple */
|
||||
{
|
||||
ut_ad(tuple);
|
||||
|
||||
@ -233,7 +236,7 @@ dfield_t*
|
||||
dtuple_get_nth_field(
|
||||
/*=================*/
|
||||
/* out: nth field */
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
dtuple_t* tuple, /* in: tuple */
|
||||
ulint n) /* in: index of field */
|
||||
{
|
||||
ut_ad(tuple);
|
||||
@ -249,7 +252,7 @@ UNIV_INLINE
|
||||
dtuple_t*
|
||||
dtuple_create(
|
||||
/*==========*/
|
||||
/* out, own: created tuple */
|
||||
/* out, own: created tuple */
|
||||
mem_heap_t* heap, /* in: memory heap where the tuple
|
||||
is created */
|
||||
ulint n_fields) /* in: number of fields */
|
||||
@ -259,7 +262,7 @@ dtuple_create(
|
||||
ut_ad(heap);
|
||||
|
||||
tuple = (dtuple_t*) mem_heap_alloc(heap, sizeof(dtuple_t)
|
||||
+ n_fields * sizeof(dfield_t));
|
||||
+ n_fields * sizeof(dfield_t));
|
||||
tuple->info_bits = 0;
|
||||
tuple->n_fields = n_fields;
|
||||
tuple->n_fields_cmp = n_fields;
|
||||
@ -292,10 +295,10 @@ dtuple_get_data_size(
|
||||
dtuple_t* tuple) /* in: typed data tuple */
|
||||
{
|
||||
dfield_t* field;
|
||||
ulint n_fields;
|
||||
ulint len;
|
||||
ulint i;
|
||||
ulint sum = 0;
|
||||
ulint n_fields;
|
||||
ulint len;
|
||||
ulint i;
|
||||
ulint sum = 0;
|
||||
|
||||
ut_ad(tuple);
|
||||
ut_ad(dtuple_check_typed(tuple));
|
||||
@ -423,7 +426,7 @@ dtuple_contains_null(
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (dfield_get_len(dtuple_get_nth_field(tuple, i))
|
||||
== UNIV_SQL_NULL) {
|
||||
== UNIV_SQL_NULL) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
@ -13,13 +13,14 @@ Created 1/16/1996 Heikki Tuuri
|
||||
|
||||
extern ulint data_mysql_default_charset_coll;
|
||||
#define DATA_MYSQL_LATIN1_SWEDISH_CHARSET_COLL 8
|
||||
#define DATA_MYSQL_BINARY_CHARSET_COLL 63
|
||||
|
||||
/* SQL data type struct */
|
||||
typedef struct dtype_struct dtype_t;
|
||||
|
||||
/* This variable is initialized as the standard binary variable length
|
||||
data type */
|
||||
extern dtype_t* dtype_binary;
|
||||
extern dtype_t* dtype_binary;
|
||||
|
||||
/*-------------------------------------------*/
|
||||
/* The 'MAIN TYPE' of a column */
|
||||
@ -101,7 +102,7 @@ InnoDB's own internal system tables have different precise types for their
|
||||
columns, and for them the precise type is usually not used at all.
|
||||
*/
|
||||
|
||||
#define DATA_ENGLISH 4 /* English language character string: this
|
||||
#define DATA_ENGLISH 4 /* English language character string: this
|
||||
is a relic from pre-MySQL time and only used
|
||||
for InnoDB's own system tables */
|
||||
#define DATA_ERROR 111 /* another relic from pre-MySQL time */
|
||||
@ -127,7 +128,7 @@ be less than 256 */
|
||||
a row in a compressed form */
|
||||
#define DATA_MIX_ID_LEN 9 /* maximum stored length for mix id (in a
|
||||
compressed dulint form) */
|
||||
#define DATA_N_SYS_COLS 4 /* number of system columns defined above */
|
||||
#define DATA_N_SYS_COLS 4 /* number of system columns defined above */
|
||||
|
||||
/* Flags ORed to the precise data type */
|
||||
#define DATA_NOT_NULL 256 /* this is ORed to the precise type when
|
||||
@ -311,7 +312,7 @@ dtype_get_pad_char(
|
||||
/*===============*/
|
||||
/* out: padding character code, or
|
||||
ULINT_UNDEFINED if no padding specified */
|
||||
dtype_t* type); /* in: type */
|
||||
const dtype_t* type); /* in: type */
|
||||
/***************************************************************************
|
||||
Returns the size of a fixed size data type, 0 if not a fixed size type. */
|
||||
UNIV_INLINE
|
||||
|
@ -8,6 +8,7 @@ Created 1/16/1996 Heikki Tuuri
|
||||
|
||||
#include "mach0data.h"
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/**********************************************************************
|
||||
Get the variable length bounds of the given character set.
|
||||
|
||||
@ -20,6 +21,7 @@ innobase_get_cset_width(
|
||||
ulint cset, /* in: MySQL charset-collation code */
|
||||
ulint* mbminlen, /* out: minimum length of a char (in bytes) */
|
||||
ulint* mbmaxlen); /* out: maximum length of a char (in bytes) */
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
/*************************************************************************
|
||||
Gets the MySQL charset-collation code for MySQL string types. */
|
||||
@ -55,9 +57,21 @@ dtype_set_mblen(
|
||||
{
|
||||
ut_ad(type);
|
||||
if (dtype_is_string_type(type->mtype)) {
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
innobase_get_cset_width(dtype_get_charset_coll(type->prtype),
|
||||
&type->mbminlen, &type->mbmaxlen);
|
||||
ut_ad(type->mbminlen <= type->mbmaxlen);
|
||||
#else /* !UNIV_HOTBACKUP */
|
||||
#ifdef notdefined
|
||||
printf("ibbackup: DEBUG: type->mtype=%lu, type->prtype=%lu\n",
|
||||
type->mtype, type->prtype);
|
||||
#endif
|
||||
ut_a(type->mtype <= DATA_BINARY);
|
||||
#ifdef notdefined
|
||||
ut_a(type->prtype == (DATA_BINARY | DATA_NOT_NULL));
|
||||
#endif
|
||||
type->mbminlen = type->mbmaxlen = 1;
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
} else {
|
||||
type->mbminlen = type->mbmaxlen = 0;
|
||||
}
|
||||
@ -188,26 +202,35 @@ dtype_get_pad_char(
|
||||
/*===============*/
|
||||
/* out: padding character code, or
|
||||
ULINT_UNDEFINED if no padding specified */
|
||||
dtype_t* type) /* in: type */
|
||||
const dtype_t* type) /* in: type */
|
||||
{
|
||||
if (type->mtype == DATA_CHAR
|
||||
|| type->mtype == DATA_VARCHAR
|
||||
|| type->mtype == DATA_BINARY
|
||||
|| type->mtype == DATA_FIXBINARY
|
||||
|| type->mtype == DATA_MYSQL
|
||||
|| type->mtype == DATA_VARMYSQL
|
||||
|| (type->mtype == DATA_BLOB
|
||||
&& (type->prtype & DATA_BINARY_TYPE) == 0)) {
|
||||
|
||||
switch (type->mtype) {
|
||||
case DATA_FIXBINARY:
|
||||
case DATA_BINARY:
|
||||
if (UNIV_UNLIKELY(dtype_get_charset_coll(type->prtype)
|
||||
== DATA_MYSQL_BINARY_CHARSET_COLL)) {
|
||||
/* Starting from 5.0.18, do not pad
|
||||
VARBINARY or BINARY columns. */
|
||||
return(ULINT_UNDEFINED);
|
||||
}
|
||||
/* Fall through */
|
||||
case DATA_CHAR:
|
||||
case DATA_VARCHAR:
|
||||
case DATA_MYSQL:
|
||||
case DATA_VARMYSQL:
|
||||
/* Space is the padding character for all char and binary
|
||||
strings, and starting from 5.0.3, also for TEXT strings. */
|
||||
strings, and starting from 5.0.3, also for TEXT strings. */
|
||||
|
||||
return((ulint)' ');
|
||||
return(0x20);
|
||||
case DATA_BLOB:
|
||||
if ((type->prtype & DATA_BINARY_TYPE) == 0) {
|
||||
return(0x20);
|
||||
}
|
||||
/* Fall through */
|
||||
default:
|
||||
/* No padding specified */
|
||||
return(ULINT_UNDEFINED);
|
||||
}
|
||||
|
||||
/* No padding specified */
|
||||
|
||||
return(ULINT_UNDEFINED);
|
||||
}
|
||||
|
||||
/**************************************************************************
|
||||
@ -233,7 +256,7 @@ dtype_new_store_for_order_and_null_size(
|
||||
buf[0] = buf[0] | 128;
|
||||
}
|
||||
|
||||
/* In versions < 4.1.2 we had: if (type->prtype & DATA_NONLATIN1) {
|
||||
/* In versions < 4.1.2 we had: if (type->prtype & DATA_NONLATIN1) {
|
||||
buf[0] = buf[0] | 64;
|
||||
}
|
||||
*/
|
||||
@ -267,7 +290,7 @@ dtype_read_for_order_and_null_size(
|
||||
type->prtype = buf[1];
|
||||
|
||||
if (buf[0] & 128) {
|
||||
type->prtype = type->prtype | DATA_BINARY_TYPE;
|
||||
type->prtype = type->prtype | DATA_BINARY_TYPE;
|
||||
}
|
||||
|
||||
type->len = mach_read_from_2(buf + 2);
|
||||
@ -329,7 +352,6 @@ dtype_new_read_for_order_and_null_size(
|
||||
dtype_set_mblen(type);
|
||||
}
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/***************************************************************************
|
||||
Returns the size of a fixed size data type, 0 if not a fixed size type. */
|
||||
UNIV_INLINE
|
||||
@ -374,8 +396,13 @@ dtype_get_fixed_size(
|
||||
if (type->prtype & DATA_BINARY_TYPE) {
|
||||
return(dtype_get_len(type));
|
||||
} else {
|
||||
#ifdef UNIV_HOTBACKUP
|
||||
if (type->mbminlen == type->mbmaxlen) {
|
||||
return(dtype_get_len(type));
|
||||
}
|
||||
#else /* UNIV_HOTBACKUP */
|
||||
/* We play it safe here and ask MySQL for
|
||||
mbminlen and mbmaxlen. Although
|
||||
mbminlen and mbmaxlen. Although
|
||||
type->mbminlen and type->mbmaxlen are
|
||||
initialized if and only if type->prtype
|
||||
is (in one of the 3 functions in this file),
|
||||
@ -405,6 +432,7 @@ dtype_get_fixed_size(
|
||||
if (mbminlen == mbmaxlen) {
|
||||
return(dtype_get_len(type));
|
||||
}
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
}
|
||||
/* fall through for variable-length charsets */
|
||||
case DATA_VARCHAR:
|
||||
@ -476,7 +504,6 @@ dtype_get_min_size(
|
||||
|
||||
return(0);
|
||||
}
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
|
||||
/***************************************************************************
|
||||
Returns a stored SQL NULL size for a type. For fixed length types it is
|
||||
|
@ -42,14 +42,14 @@ Created 5/24/1996 Heikki Tuuri
|
||||
#define DB_CANNOT_ADD_CONSTRAINT 38 /* adding a foreign key constraint
|
||||
to a table failed */
|
||||
#define DB_CORRUPTION 39 /* data structure corruption noticed */
|
||||
#define DB_COL_APPEARS_TWICE_IN_INDEX 40 /* InnoDB cannot handle an index
|
||||
where same column appears twice */
|
||||
#define DB_COL_APPEARS_TWICE_IN_INDEX 40/* InnoDB cannot handle an index
|
||||
where same column appears twice */
|
||||
#define DB_CANNOT_DROP_CONSTRAINT 41 /* dropping a foreign key constraint
|
||||
from a table failed */
|
||||
#define DB_NO_SAVEPOINT 42 /* no savepoint exists with the given
|
||||
name */
|
||||
#define DB_TABLESPACE_ALREADY_EXISTS 43 /* we cannot create a new single-table
|
||||
tablespace because a file of the same
|
||||
tablespace because a file of the same
|
||||
name already exists */
|
||||
#define DB_TABLESPACE_DELETED 44 /* tablespace does not exist or is
|
||||
being dropped right now */
|
||||
@ -63,9 +63,9 @@ Created 5/24/1996 Heikki Tuuri
|
||||
table */
|
||||
|
||||
/* The following are partial failure codes */
|
||||
#define DB_FAIL 1000
|
||||
#define DB_OVERFLOW 1001
|
||||
#define DB_UNDERFLOW 1002
|
||||
#define DB_FAIL 1000
|
||||
#define DB_OVERFLOW 1001
|
||||
#define DB_UNDERFLOW 1002
|
||||
#define DB_STRONG_FAIL 1003
|
||||
#define DB_RECORD_NOT_FOUND 1500
|
||||
#define DB_END_OF_INDEX 1501
|
||||
|
@ -50,7 +50,9 @@ dict_sys_read_row_id(
|
||||
/* out: row id */
|
||||
byte* field) /* in: record field */
|
||||
{
|
||||
ut_ad(DATA_ROW_ID_LEN == 6);
|
||||
#if DATA_ROW_ID_LEN != 6
|
||||
# error "DATA_ROW_ID_LEN != 6"
|
||||
#endif
|
||||
|
||||
return(mach_read_from_6(field));
|
||||
}
|
||||
@ -64,7 +66,9 @@ dict_sys_write_row_id(
|
||||
byte* field, /* in: record field */
|
||||
dulint row_id) /* in: row id */
|
||||
{
|
||||
ut_ad(DATA_ROW_ID_LEN == 6);
|
||||
#if DATA_ROW_ID_LEN != 6
|
||||
# error "DATA_ROW_ID_LEN != 6"
|
||||
#endif
|
||||
|
||||
mach_write_to_6(field, row_id);
|
||||
}
|
||||
|
@ -26,6 +26,7 @@ Created 1/8/1996 Heikki Tuuri
|
||||
#include "ut0byte.h"
|
||||
#include "trx0types.h"
|
||||
|
||||
#ifndef UNIV_HOTBACKUP
|
||||
/**********************************************************************
|
||||
Makes all characters in a NUL-terminated UTF-8 string lower case. */
|
||||
|
||||
@ -33,6 +34,7 @@ void
|
||||
dict_casedn_str(
|
||||
/*============*/
|
||||
char* a); /* in/out: string to put in lower case */
|
||||
#endif /* !UNIV_HOTBACKUP */
|
||||
/************************************************************************
|
||||
Get the database name length in a table name. */
|
||||
|
||||
@ -197,7 +199,8 @@ dict_foreign_add_to_cache(
|
||||
/*======================*/
|
||||
/* out: DB_SUCCESS or error code */
|
||||
dict_foreign_t* foreign, /* in, own: foreign key constraint */
|
||||
ibool check_types); /* in: TRUE=check type compatibility */
|
||||
ibool check_charsets);/* in: TRUE=check charset
|
||||
compatibility */
|
||||
/*************************************************************************
|
||||
Checks if a table is referenced by foreign keys. */
|
||||
|
||||
@ -305,7 +308,7 @@ Checks if a table is in the dictionary cache. */
|
||||
UNIV_INLINE
|
||||
dict_table_t*
|
||||
dict_table_check_if_in_cache_low(
|
||||
/*==============================*/
|
||||
/*=============================*/
|
||||
/* out: table, NULL if not found */
|
||||
const char* table_name); /* in: table name */
|
||||
/**************************************************************************
|
||||
@ -484,6 +487,15 @@ dict_table_get_sys_col_no(
|
||||
dict_table_t* table, /* in: table */
|
||||
ulint sys); /* in: DATA_ROW_ID, ... */
|
||||
/************************************************************************
|
||||
Check whether the table uses the compact page format. */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
dict_table_is_comp(
|
||||
/*===============*/
|
||||
/* out: TRUE if table uses the
|
||||
compact page format */
|
||||
const dict_table_t* table); /* in: table */
|
||||
/************************************************************************
|
||||
Checks if a column is in the ordering columns of the clustered index of a
|
||||
table. Column prefixes are treated like whole columns. */
|
||||
|
||||
@ -659,7 +671,6 @@ dict_index_add_col(
|
||||
/*===============*/
|
||||
dict_index_t* index, /* in: index */
|
||||
dict_col_t* col, /* in: column */
|
||||
ulint order, /* in: order criterion */
|
||||
ulint prefix_len); /* in: column prefix length */
|
||||
/***********************************************************************
|
||||
Copies types of fields contained in index to tuple. */
|
||||
@ -679,13 +690,6 @@ dict_index_get_tree(
|
||||
/* out: index tree */
|
||||
dict_index_t* index); /* in: index */
|
||||
/*************************************************************************
|
||||
Gets the field order criterion. */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
dict_field_get_order(
|
||||
/*=================*/
|
||||
dict_field_t* field);
|
||||
/*************************************************************************
|
||||
Gets the field column. */
|
||||
UNIV_INLINE
|
||||
dict_col_t*
|
||||
@ -770,7 +774,7 @@ dict_tree_build_node_ptr(
|
||||
pointer */
|
||||
ulint page_no,/* in: page number to put in node pointer */
|
||||
mem_heap_t* heap, /* in: memory heap where pointer created */
|
||||
ulint level); /* in: level of rec in tree: 0 means leaf
|
||||
ulint level); /* in: level of rec in tree: 0 means leaf
|
||||
level */
|
||||
/**************************************************************************
|
||||
Copies an initial segment of a physical record, long enough to specify an
|
||||
@ -940,13 +944,13 @@ struct dict_sys_struct{
|
||||
header and flushed to a file; in
|
||||
recovery this must be derived from
|
||||
the log records */
|
||||
hash_table_t* table_hash; /* hash table of the tables, based
|
||||
hash_table_t* table_hash; /* hash table of the tables, based
|
||||
on name */
|
||||
hash_table_t* table_id_hash; /* hash table of the tables, based
|
||||
hash_table_t* table_id_hash; /* hash table of the tables, based
|
||||
on id */
|
||||
hash_table_t* col_hash; /* hash table of the columns */
|
||||
hash_table_t* col_hash; /* hash table of the columns */
|
||||
UT_LIST_BASE_NODE_T(dict_table_t)
|
||||
table_LRU; /* LRU list of tables */
|
||||
table_LRU; /* LRU list of tables */
|
||||
ulint size; /* varying space in bytes occupied
|
||||
by the data dictionary table and
|
||||
index objects */
|
||||
|
@ -189,6 +189,25 @@ dict_table_get_sys_col_no(
|
||||
return(table->n_cols - DATA_N_SYS_COLS + sys);
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Check whether the table uses the compact page format. */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
dict_table_is_comp(
|
||||
/*===============*/
|
||||
/* out: TRUE if table uses the
|
||||
compact page format */
|
||||
const dict_table_t* table) /* in: table */
|
||||
{
|
||||
ut_ad(table);
|
||||
|
||||
#if DICT_TF_COMPACT != TRUE
|
||||
#error
|
||||
#endif
|
||||
|
||||
return(UNIV_LIKELY(table->flags & DICT_TF_COMPACT));
|
||||
}
|
||||
|
||||
/************************************************************************
|
||||
Gets the number of fields in the internal representation of an index,
|
||||
including fields added by the dictionary system. */
|
||||
@ -326,19 +345,6 @@ dict_index_get_tree(
|
||||
return(index->tree);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Gets the field order criterion. */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
dict_field_get_order(
|
||||
/*=================*/
|
||||
dict_field_t* field)
|
||||
{
|
||||
ut_ad(field);
|
||||
|
||||
return(field->order);
|
||||
}
|
||||
|
||||
/*************************************************************************
|
||||
Gets the field column. */
|
||||
UNIV_INLINE
|
||||
@ -494,7 +500,7 @@ Checks if a table is in the dictionary cache. */
|
||||
UNIV_INLINE
|
||||
dict_table_t*
|
||||
dict_table_check_if_in_cache_low(
|
||||
/*==============================*/
|
||||
/*=============================*/
|
||||
/* out: table, NULL if not found */
|
||||
const char* table_name) /* in: table name */
|
||||
{
|
||||
|
@ -82,7 +82,8 @@ dict_load_foreigns(
|
||||
/*===============*/
|
||||
/* out: DB_SUCCESS or error code */
|
||||
const char* table_name, /* in: table name */
|
||||
ibool check_types); /* in: TRUE=check type compatibility */
|
||||
ibool check_charsets);/* in: TRUE=check charsets
|
||||
compatibility */
|
||||
/************************************************************************
|
||||
Prints to the standard output information on all tables found in the data
|
||||
dictionary system table. */
|
||||
|
@ -29,12 +29,9 @@ Created 1/8/1996 Heikki Tuuri
|
||||
combination of types */
|
||||
#define DICT_CLUSTERED 1 /* clustered index */
|
||||
#define DICT_UNIQUE 2 /* unique index */
|
||||
#define DICT_UNIVERSAL 4 /* index which can contain records from any
|
||||
#define DICT_UNIVERSAL 4 /* index which can contain records from any
|
||||
other index */
|
||||
#define DICT_IBUF 8 /* insert buffer tree */
|
||||
|
||||
/* Flags for ordering an index field: OR'ing of the flags allowed */
|
||||
#define DICT_DESCEND 1 /* in descending order (default ascending) */
|
||||
#define DICT_IBUF 8 /* insert buffer tree */
|
||||
|
||||
/* Types for a table object */
|
||||
#define DICT_TABLE_ORDINARY 1
|
||||
@ -42,6 +39,9 @@ combination of types */
|
||||
#define DICT_TABLE_CLUSTER 3 /* this means that the table is
|
||||
really a cluster definition */
|
||||
|
||||
/* Table flags */
|
||||
#define DICT_TF_COMPACT 1 /* compact page format */
|
||||
|
||||
/**************************************************************************
|
||||
Creates a table memory object. */
|
||||
|
||||
@ -55,7 +55,7 @@ dict_mem_table_create(
|
||||
is ignored if the table is made
|
||||
a member of a cluster */
|
||||
ulint n_cols, /* in: number of columns */
|
||||
ibool comp); /* in: TRUE=compact page format */
|
||||
ulint flags); /* in: table flags */
|
||||
/**************************************************************************
|
||||
Creates a cluster memory object. */
|
||||
|
||||
@ -116,8 +116,6 @@ dict_mem_index_add_field(
|
||||
/*=====================*/
|
||||
dict_index_t* index, /* in: index */
|
||||
const char* name, /* in: column name */
|
||||
ulint order, /* in: order criterion; 0 means an
|
||||
ascending order */
|
||||
ulint prefix_len); /* in: 0 or the column prefix length
|
||||
in a MySQL index like
|
||||
INDEX (textcol(25)) */
|
||||
@ -163,8 +161,6 @@ UTF-8 charset. In that charset, a character may take at most 3 bytes. */
|
||||
struct dict_field_struct{
|
||||
dict_col_t* col; /* pointer to the table column */
|
||||
const char* name; /* name of the column */
|
||||
ulint order; /* flags for ordering this field:
|
||||
DICT_DESCEND, ... */
|
||||
ulint prefix_len; /* 0 or the length of the column
|
||||
prefix in bytes in a MySQL index of
|
||||
type, e.g., INDEX (textcol(25));
|
||||
@ -175,10 +171,6 @@ struct dict_field_struct{
|
||||
ulint fixed_len; /* 0 or the fixed length of the
|
||||
column if smaller than
|
||||
DICT_MAX_INDEX_COL_LEN */
|
||||
ulint fixed_offs; /* offset to the field, or
|
||||
ULINT_UNDEFINED if it is not fixed
|
||||
within the record (due to preceding
|
||||
variable-length fields) */
|
||||
};
|
||||
|
||||
/* Data structure for an index tree */
|
||||
@ -311,6 +303,7 @@ a foreign key constraint is enforced, therefore RESTRICT just means no flag */
|
||||
struct dict_table_struct{
|
||||
dulint id; /* id of the table or cluster */
|
||||
ulint type; /* DICT_TABLE_ORDINARY, ... */
|
||||
ulint flags; /* DICT_TF_COMPACT, ... */
|
||||
mem_heap_t* heap; /* memory heap */
|
||||
const char* name; /* table name */
|
||||
const char* dir_path_of_temp_table;/* NULL or the directory path
|
||||
@ -328,7 +321,6 @@ struct dict_table_struct{
|
||||
ibool tablespace_discarded;/* this flag is set TRUE when the
|
||||
user calls DISCARD TABLESPACE on this table,
|
||||
and reset to FALSE in IMPORT TABLESPACE */
|
||||
ibool comp; /* flag: TRUE=compact page format */
|
||||
hash_node_t name_hash; /* hash chain node */
|
||||
hash_node_t id_hash; /* hash chain node */
|
||||
ulint n_def; /* number of columns defined so far */
|
||||
@ -408,9 +400,9 @@ struct dict_table_struct{
|
||||
database pages */
|
||||
ulint stat_sum_of_other_index_sizes;
|
||||
/* other indexes in database pages */
|
||||
ibool stat_initialized; /* TRUE if statistics have
|
||||
ibool stat_initialized; /* TRUE if statistics have
|
||||
been calculated the first time
|
||||
after database startup or table creation */
|
||||
after database startup or table creation */
|
||||
ulint stat_modified_counter;
|
||||
/* when a row is inserted, updated, or deleted,
|
||||
we add 1 to this number; we calculate new
|
||||
|
@ -106,7 +106,9 @@ dyn_array_create(
|
||||
size sizeof(dyn_array_t) */
|
||||
{
|
||||
ut_ad(arr);
|
||||
ut_ad(DYN_ARRAY_DATA_SIZE < DYN_BLOCK_FULL_FLAG);
|
||||
#if DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG
|
||||
# error "DYN_ARRAY_DATA_SIZE >= DYN_BLOCK_FULL_FLAG"
|
||||
#endif
|
||||
|
||||
arr->heap = NULL;
|
||||
arr->used = 0;
|
||||
@ -296,7 +298,7 @@ dyn_array_get_data_size(
|
||||
dyn_array_t* arr) /* in: dyn array */
|
||||
{
|
||||
dyn_block_t* block;
|
||||
ulint sum = 0;
|
||||
ulint sum = 0;
|
||||
|
||||
ut_ad(arr);
|
||||
ut_ad(arr->magic_n == DYN_BLOCK_MAGIC_N);
|
||||
|
@ -77,7 +77,7 @@ Gets a iboolean value from a query node. */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
eval_node_get_ibool_val(
|
||||
/*===================*/
|
||||
/*====================*/
|
||||
/* out: iboolean value */
|
||||
que_node_t* node); /* in: query graph node */
|
||||
/*********************************************************************
|
||||
|
@ -149,7 +149,7 @@ Gets a iboolean value from a query node. */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
eval_node_get_ibool_val(
|
||||
/*===================*/
|
||||
/*====================*/
|
||||
/* out: iboolean value */
|
||||
que_node_t* node) /* in: query graph node */
|
||||
{
|
||||
@ -170,7 +170,7 @@ Sets a iboolean value as the value of a function node. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
eval_node_set_ibool_val(
|
||||
/*===================*/
|
||||
/*====================*/
|
||||
func_node_t* func_node, /* in: function node */
|
||||
ibool val) /* in: value to set */
|
||||
{
|
||||
|
@ -63,7 +63,20 @@ extern fil_addr_t fil_addr_null;
|
||||
#define FIL_PAGE_LSN 16 /* lsn of the end of the newest
|
||||
modification log record to the page */
|
||||
#define FIL_PAGE_TYPE 24 /* file page type: FIL_PAGE_INDEX,...,
|
||||
2 bytes */
|
||||
2 bytes.
|
||||
|
||||
The contents of this field can only
|
||||
be trusted in the following case:
|
||||
if the page is an uncompressed
|
||||
B-tree index page, then it is
|
||||
guaranteed that the value is
|
||||
FIL_PAGE_INDEX.
|
||||
The opposite does not hold.
|
||||
|
||||
In tablespaces created by
|
||||
MySQL/InnoDB 5.1.7 or later, the
|
||||
contents of this field is valid
|
||||
for all uncompressed pages. */
|
||||
#define FIL_PAGE_FILE_FLUSH_LSN 26 /* this is only defined for the
|
||||
first page in a data file: the file
|
||||
has been flushed to disk at least up
|
||||
@ -79,14 +92,22 @@ extern fil_addr_t fil_addr_null;
|
||||
to the last 4 bytes of FIL_PAGE_LSN */
|
||||
#define FIL_PAGE_DATA_END 8
|
||||
|
||||
/* File page types */
|
||||
#define FIL_PAGE_INDEX 17855
|
||||
#define FIL_PAGE_UNDO_LOG 2
|
||||
#define FIL_PAGE_INODE 3
|
||||
#define FIL_PAGE_IBUF_FREE_LIST 4
|
||||
/* File page types (values of FIL_PAGE_TYPE) */
|
||||
#define FIL_PAGE_INDEX 17855 /* B-tree node */
|
||||
#define FIL_PAGE_UNDO_LOG 2 /* Undo log page */
|
||||
#define FIL_PAGE_INODE 3 /* Index node */
|
||||
#define FIL_PAGE_IBUF_FREE_LIST 4 /* Insert buffer free list */
|
||||
/* File page types introduced in MySQL/InnoDB 5.1.7 */
|
||||
#define FIL_PAGE_TYPE_ALLOCATED 0 /* Freshly allocated page */
|
||||
#define FIL_PAGE_IBUF_BITMAP 5 /* Insert buffer bitmap */
|
||||
#define FIL_PAGE_TYPE_SYS 6 /* System page */
|
||||
#define FIL_PAGE_TYPE_TRX_SYS 7 /* Transaction system data */
|
||||
#define FIL_PAGE_TYPE_FSP_HDR 8 /* File space header */
|
||||
#define FIL_PAGE_TYPE_XDES 9 /* Extent descriptor page */
|
||||
#define FIL_PAGE_TYPE_BLOB 10 /* Uncompressed BLOB page */
|
||||
|
||||
/* Space types */
|
||||
#define FIL_TABLESPACE 501
|
||||
#define FIL_TABLESPACE 501
|
||||
#define FIL_LOG 502
|
||||
|
||||
extern ulint fil_n_log_flushes;
|
||||
@ -271,7 +292,7 @@ Decrements the count of pending insert buffer page merges. */
|
||||
|
||||
void
|
||||
fil_decr_pending_ibuf_merges(
|
||||
/*========================*/
|
||||
/*=========================*/
|
||||
ulint id); /* in: space id */
|
||||
/***********************************************************************
|
||||
Parses the body of a log record written about an .ibd file operation. That is,
|
||||
@ -289,13 +310,13 @@ datadir that we should use in replaying the file operations. */
|
||||
byte*
|
||||
fil_op_log_parse_or_replay(
|
||||
/*=======================*/
|
||||
/* out: end of log record, or NULL if the
|
||||
/* out: end of log record, or NULL if the
|
||||
record was not completely contained between
|
||||
ptr and end_ptr */
|
||||
byte* ptr, /* in: buffer containing the log record body,
|
||||
byte* ptr, /* in: buffer containing the log record body,
|
||||
or an initial segment of it, if the record does
|
||||
not fir completely between ptr and end_ptr */
|
||||
byte* end_ptr, /* in: buffer end */
|
||||
byte* end_ptr, /* in: buffer end */
|
||||
ulint type, /* in: the type of this log record */
|
||||
ibool do_replay, /* in: TRUE if we want to replay the
|
||||
operation, and not just parse the log record */
|
||||
@ -665,7 +686,7 @@ Sets the file page type. */
|
||||
void
|
||||
fil_page_set_type(
|
||||
/*==============*/
|
||||
byte* page, /* in: file page */
|
||||
byte* page, /* in: file page */
|
||||
ulint type); /* in: type */
|
||||
/*************************************************************************
|
||||
Gets the file page type. */
|
||||
@ -675,7 +696,7 @@ fil_page_get_type(
|
||||
/*==============*/
|
||||
/* out: type; NOTE that if the type has not been
|
||||
written to page, the return value not defined */
|
||||
byte* page); /* in: file page */
|
||||
byte* page); /* in: file page */
|
||||
|
||||
|
||||
typedef struct fil_space_struct fil_space_t;
|
||||
|
@ -80,8 +80,8 @@ Reads the space id from the first page of a tablespace. */
|
||||
ulint
|
||||
fsp_header_get_space_id(
|
||||
/*====================*/
|
||||
/* out: space id, ULINT UNDEFINED if error */
|
||||
page_t* page); /* in: first page of a tablespace */
|
||||
/* out: space id, ULINT UNDEFINED if error */
|
||||
page_t* page); /* in: first page of a tablespace */
|
||||
/**************************************************************************
|
||||
Writes the space id to a tablespace header. This function is used past the
|
||||
buffer pool when we in fil0fil.c create a new single-table tablespace. */
|
||||
@ -158,7 +158,7 @@ ulint
|
||||
fseg_n_reserved_pages(
|
||||
/*==================*/
|
||||
/* out: number of reserved pages */
|
||||
fseg_header_t* header, /* in: segment header */
|
||||
fseg_header_t* header, /* in: segment header */
|
||||
ulint* used, /* out: number of pages used (<= reserved) */
|
||||
mtr_t* mtr); /* in: mtr handle */
|
||||
/**************************************************************************
|
||||
@ -232,9 +232,9 @@ ibool
|
||||
fsp_reserve_free_extents(
|
||||
/*=====================*/
|
||||
/* out: TRUE if we were able to make the reservation */
|
||||
ulint* n_reserved,/* out: number of extents actually reserved; if we
|
||||
return TRUE and the tablespace size is < 64 pages,
|
||||
then this can be 0, otherwise it is n_ext */
|
||||
ulint* n_reserved,/* out: number of extents actually reserved; if we
|
||||
return TRUE and the tablespace size is < 64 pages,
|
||||
then this can be 0, otherwise it is n_ext */
|
||||
ulint space, /* in: space id */
|
||||
ulint n_ext, /* in: number of extents to reserve */
|
||||
ulint alloc_type,/* in: FSP_NORMAL, FSP_UNDO, or FSP_CLEANING */
|
||||
|
@ -99,13 +99,15 @@ ha_remove_all_nodes_to_page(
|
||||
ulint fold, /* in: fold value */
|
||||
page_t* page); /* in: buffer page */
|
||||
/*****************************************************************
|
||||
Validates a hash table. */
|
||||
Validates a given range of the cells in hash table. */
|
||||
|
||||
ibool
|
||||
ha_validate(
|
||||
/*========*/
|
||||
/* out: TRUE if ok */
|
||||
hash_table_t* table); /* in: hash table */
|
||||
/* out: TRUE if ok */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint start_index, /* in: start index */
|
||||
ulint end_index); /* in: end index */
|
||||
/*****************************************************************
|
||||
Prints info of a hash table. */
|
||||
|
||||
|
@ -109,8 +109,8 @@ do {\
|
||||
\
|
||||
while (struct3333->NAME != DATA) {\
|
||||
\
|
||||
ut_a(struct3333);\
|
||||
struct3333 = struct3333->NAME;\
|
||||
ut_a(struct3333);\
|
||||
}\
|
||||
\
|
||||
struct3333->NAME = DATA->NAME;\
|
||||
@ -153,8 +153,8 @@ hash_cell_t*
|
||||
hash_get_nth_cell(
|
||||
/*==============*/
|
||||
/* out: pointer to cell */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint n); /* in: cell index */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint n); /* in: cell index */
|
||||
/*****************************************************************
|
||||
Returns the number of cells in a hash table. */
|
||||
UNIV_INLINE
|
||||
@ -229,8 +229,8 @@ ulint
|
||||
hash_get_mutex_no(
|
||||
/*==============*/
|
||||
/* out: mutex number */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold); /* in: fold */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold); /* in: fold */
|
||||
/****************************************************************
|
||||
Gets the nth heap in a hash table. */
|
||||
UNIV_INLINE
|
||||
@ -238,8 +238,8 @@ mem_heap_t*
|
||||
hash_get_nth_heap(
|
||||
/*==============*/
|
||||
/* out: mem heap */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint i); /* in: index of the heap */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint i); /* in: index of the heap */
|
||||
/****************************************************************
|
||||
Gets the heap for a fold value in a hash table. */
|
||||
UNIV_INLINE
|
||||
@ -247,8 +247,8 @@ mem_heap_t*
|
||||
hash_get_heap(
|
||||
/*==========*/
|
||||
/* out: mem heap */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold); /* in: fold */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold); /* in: fold */
|
||||
/****************************************************************
|
||||
Gets the nth mutex in a hash table. */
|
||||
UNIV_INLINE
|
||||
@ -256,8 +256,8 @@ mutex_t*
|
||||
hash_get_nth_mutex(
|
||||
/*===============*/
|
||||
/* out: mutex */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint i); /* in: index of the mutex */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint i); /* in: index of the mutex */
|
||||
/****************************************************************
|
||||
Gets the mutex for a fold value in a hash table. */
|
||||
UNIV_INLINE
|
||||
@ -265,38 +265,38 @@ mutex_t*
|
||||
hash_get_mutex(
|
||||
/*===========*/
|
||||
/* out: mutex */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold); /* in: fold */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold); /* in: fold */
|
||||
/****************************************************************
|
||||
Reserves the mutex for a fold value in a hash table. */
|
||||
|
||||
void
|
||||
hash_mutex_enter(
|
||||
/*=============*/
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold); /* in: fold */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold); /* in: fold */
|
||||
/****************************************************************
|
||||
Releases the mutex for a fold value in a hash table. */
|
||||
|
||||
void
|
||||
hash_mutex_exit(
|
||||
/*============*/
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold); /* in: fold */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold); /* in: fold */
|
||||
/****************************************************************
|
||||
Reserves all the mutexes of a hash table, in an ascending order. */
|
||||
|
||||
void
|
||||
hash_mutex_enter_all(
|
||||
/*=================*/
|
||||
hash_table_t* table); /* in: hash table */
|
||||
hash_table_t* table); /* in: hash table */
|
||||
/****************************************************************
|
||||
Releases all the mutexes of a hash table. */
|
||||
|
||||
void
|
||||
hash_mutex_exit_all(
|
||||
/*================*/
|
||||
hash_table_t* table); /* in: hash table */
|
||||
hash_table_t* table); /* in: hash table */
|
||||
|
||||
|
||||
struct hash_cell_struct{
|
||||
|
@ -15,8 +15,8 @@ hash_cell_t*
|
||||
hash_get_nth_cell(
|
||||
/*==============*/
|
||||
/* out: pointer to cell */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint n) /* in: cell index */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint n) /* in: cell index */
|
||||
{
|
||||
ut_ad(n < table->n_cells);
|
||||
|
||||
@ -55,8 +55,8 @@ ulint
|
||||
hash_get_mutex_no(
|
||||
/*==============*/
|
||||
/* out: mutex number */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold) /* in: fold */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold) /* in: fold */
|
||||
{
|
||||
return(ut_2pow_remainder(fold, table->n_mutexes));
|
||||
}
|
||||
@ -68,8 +68,8 @@ mem_heap_t*
|
||||
hash_get_nth_heap(
|
||||
/*==============*/
|
||||
/* out: mem heap */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint i) /* in: index of the heap */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint i) /* in: index of the heap */
|
||||
{
|
||||
ut_ad(i < table->n_mutexes);
|
||||
|
||||
@ -83,8 +83,8 @@ mem_heap_t*
|
||||
hash_get_heap(
|
||||
/*==========*/
|
||||
/* out: mem heap */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold) /* in: fold */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold) /* in: fold */
|
||||
{
|
||||
ulint i;
|
||||
|
||||
@ -104,8 +104,8 @@ mutex_t*
|
||||
hash_get_nth_mutex(
|
||||
/*===============*/
|
||||
/* out: mutex */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint i) /* in: index of the mutex */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint i) /* in: index of the mutex */
|
||||
{
|
||||
ut_ad(i < table->n_mutexes);
|
||||
|
||||
@ -119,8 +119,8 @@ mutex_t*
|
||||
hash_get_mutex(
|
||||
/*===========*/
|
||||
/* out: mutex */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold) /* in: fold */
|
||||
hash_table_t* table, /* in: hash table */
|
||||
ulint fold) /* in: fold */
|
||||
{
|
||||
ulint i;
|
||||
|
||||
|
@ -88,8 +88,8 @@ ibuf_should_try(
|
||||
decide */
|
||||
{
|
||||
if (!(index->type & DICT_CLUSTERED)
|
||||
&& (ignore_sec_unique || !(index->type & DICT_UNIQUE))
|
||||
&& ibuf->meter > IBUF_THRESHOLD) {
|
||||
&& (ignore_sec_unique || !(index->type & DICT_UNIQUE))
|
||||
&& ibuf->meter > IBUF_THRESHOLD) {
|
||||
|
||||
ibuf_flush_count++;
|
||||
|
||||
@ -206,8 +206,9 @@ ibuf_update_free_bits_if_full(
|
||||
before = ibuf_index_page_calc_free_bits(max_ins_size);
|
||||
|
||||
if (max_ins_size >= increase) {
|
||||
ut_ad(ULINT_UNDEFINED > UNIV_PAGE_SIZE);
|
||||
|
||||
#if ULINT32_UNDEFINED <= UNIV_PAGE_SIZE
|
||||
# error "ULINT32_UNDEFINED <= UNIV_PAGE_SIZE"
|
||||
#endif
|
||||
after = ibuf_index_page_calc_free_bits(max_ins_size
|
||||
- increase);
|
||||
#ifdef UNIV_IBUF_DEBUG
|
||||
|
@ -427,7 +427,7 @@ to a lock. */
|
||||
void
|
||||
lock_rec_unlock(
|
||||
/*============*/
|
||||
trx_t* trx, /* in: transaction that has set a record
|
||||
trx_t* trx, /* in: transaction that has set a record
|
||||
lock */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint lock_mode); /* in: LOCK_S or LOCK_X */
|
||||
@ -477,7 +477,7 @@ searching for a lock in the hash table. */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
lock_rec_fold(
|
||||
/*===========*/
|
||||
/*==========*/
|
||||
/* out: folded value */
|
||||
ulint space, /* in: space */
|
||||
ulint page_no);/* in: page number */
|
||||
@ -634,7 +634,7 @@ extern lock_sys_t* lock_sys;
|
||||
the bit is set; locks of this type are created
|
||||
when records are removed from the index chain
|
||||
of records */
|
||||
#define LOCK_REC_NOT_GAP 1024 /* this bit means that the lock is only on
|
||||
#define LOCK_REC_NOT_GAP 1024 /* this bit means that the lock is only on
|
||||
the index record and does NOT block inserts
|
||||
to the gap before the index record; this is
|
||||
used in the case when we retrieve a record
|
||||
|
@ -19,7 +19,7 @@ typedef struct log_group_struct log_group_t;
|
||||
|
||||
#ifdef UNIV_DEBUG
|
||||
extern ibool log_do_write;
|
||||
extern ibool log_debug_writes;
|
||||
extern ibool log_debug_writes;
|
||||
#else /* UNIV_DEBUG */
|
||||
# define log_do_write TRUE
|
||||
#endif /* UNIV_DEBUG */
|
||||
@ -493,9 +493,9 @@ Peeks the current lsn. */
|
||||
ibool
|
||||
log_peek_lsn(
|
||||
/*=========*/
|
||||
/* out: TRUE if success, FALSE if could not get the
|
||||
log system mutex */
|
||||
dulint* lsn); /* out: if returns TRUE, current lsn is here */
|
||||
/* out: TRUE if success, FALSE if could not get the
|
||||
log system mutex */
|
||||
dulint* lsn); /* out: if returns TRUE, current lsn is here */
|
||||
/**************************************************************************
|
||||
Refreshes the statistics used to print per-second averages. */
|
||||
|
||||
@ -514,7 +514,7 @@ extern log_t* log_sys;
|
||||
/* The counting of lsn's starts from this value: this must be non-zero */
|
||||
#define LOG_START_LSN ut_dulint_create(0, 16 * OS_FILE_LOG_BLOCK_SIZE)
|
||||
|
||||
#define LOG_BUFFER_SIZE (srv_log_buffer_size * UNIV_PAGE_SIZE)
|
||||
#define LOG_BUFFER_SIZE (srv_log_buffer_size * UNIV_PAGE_SIZE)
|
||||
#define LOG_ARCHIVE_BUF_SIZE (srv_log_buffer_size * UNIV_PAGE_SIZE / 4)
|
||||
|
||||
/* Offsets of a log block header */
|
||||
@ -571,8 +571,8 @@ extern log_t* log_sys;
|
||||
|
||||
#define LOG_CHECKPOINT_ARRAY_END (LOG_CHECKPOINT_GROUP_ARRAY\
|
||||
+ LOG_MAX_N_GROUPS * 8)
|
||||
#define LOG_CHECKPOINT_CHECKSUM_1 LOG_CHECKPOINT_ARRAY_END
|
||||
#define LOG_CHECKPOINT_CHECKSUM_2 (4 + LOG_CHECKPOINT_ARRAY_END)
|
||||
#define LOG_CHECKPOINT_CHECKSUM_1 LOG_CHECKPOINT_ARRAY_END
|
||||
#define LOG_CHECKPOINT_CHECKSUM_2 (4 + LOG_CHECKPOINT_ARRAY_END)
|
||||
#define LOG_CHECKPOINT_FSP_FREE_LIMIT (8 + LOG_CHECKPOINT_ARRAY_END)
|
||||
/* current fsp free limit in
|
||||
tablespace 0, in units of one
|
||||
@ -774,11 +774,11 @@ struct log_struct{
|
||||
called */
|
||||
|
||||
/* Fields involved in checkpoints */
|
||||
ulint log_group_capacity; /* capacity of the log group; if
|
||||
the checkpoint age exceeds this, it is
|
||||
a serious error because it is possible
|
||||
we will then overwrite log and spoil
|
||||
crash recovery */
|
||||
ulint log_group_capacity; /* capacity of the log group; if
|
||||
the checkpoint age exceeds this, it is
|
||||
a serious error because it is possible
|
||||
we will then overwrite log and spoil
|
||||
crash recovery */
|
||||
ulint max_modified_age_async;
|
||||
/* when this recommended value for lsn
|
||||
- buf_pool_get_oldest_modification()
|
||||
|
@ -318,14 +318,14 @@ log_reserve_and_write_fast(
|
||||
|
||||
if (data_len >= OS_FILE_LOG_BLOCK_SIZE - LOG_BLOCK_TRL_SIZE) {
|
||||
|
||||
/* The string does not fit within the current log block
|
||||
or the log block would become full */
|
||||
/* The string does not fit within the current log block
|
||||
or the log block would become full */
|
||||
|
||||
*success = FALSE;
|
||||
*success = FALSE;
|
||||
|
||||
mutex_exit(&(log->mutex));
|
||||
|
||||
return(ut_dulint_zero);
|
||||
return(ut_dulint_zero);
|
||||
}
|
||||
|
||||
*start_lsn = log->lsn;
|
||||
@ -333,7 +333,7 @@ log_reserve_and_write_fast(
|
||||
ut_memcpy(log->buf + log->buf_free, str, len);
|
||||
|
||||
log_block_set_data_len(ut_align_down(log->buf + log->buf_free,
|
||||
OS_FILE_LOG_BLOCK_SIZE),
|
||||
OS_FILE_LOG_BLOCK_SIZE),
|
||||
data_len);
|
||||
#ifdef UNIV_LOG_DEBUG
|
||||
log->old_buf_free = log->buf_free;
|
||||
|
@ -341,7 +341,7 @@ extern ibool recv_recovery_on;
|
||||
extern ibool recv_no_ibuf_operations;
|
||||
extern ibool recv_needed_recovery;
|
||||
|
||||
extern ibool recv_lsn_checks_on;
|
||||
extern ibool recv_lsn_checks_on;
|
||||
#ifdef UNIV_HOTBACKUP
|
||||
extern ibool recv_is_making_a_backup;
|
||||
#endif /* UNIV_HOTBACKUP */
|
||||
|
@ -24,8 +24,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_1(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to byte where to store */
|
||||
ulint n); /* in: ulint integer to be stored, >= 0, < 256 */
|
||||
byte* b, /* in: pointer to byte where to store */
|
||||
ulint n); /* in: ulint integer to be stored, >= 0, < 256 */
|
||||
/************************************************************
|
||||
The following function is used to fetch data from one byte. */
|
||||
UNIV_INLINE
|
||||
@ -33,7 +33,7 @@ ulint
|
||||
mach_read_from_1(
|
||||
/*=============*/
|
||||
/* out: ulint integer, >= 0, < 256 */
|
||||
byte* b); /* in: pointer to byte */
|
||||
byte* b); /* in: pointer to byte */
|
||||
/***********************************************************
|
||||
The following function is used to store data in two consecutive
|
||||
bytes. We store the most significant byte to the lower address. */
|
||||
@ -41,8 +41,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_2(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to two bytes where to store */
|
||||
ulint n); /* in: ulint integer to be stored, >= 0, < 64k */
|
||||
byte* b, /* in: pointer to two bytes where to store */
|
||||
ulint n); /* in: ulint integer to be stored, >= 0, < 64k */
|
||||
/************************************************************
|
||||
The following function is used to fetch data from two consecutive
|
||||
bytes. The most significant byte is at the lowest address. */
|
||||
@ -51,7 +51,7 @@ ulint
|
||||
mach_read_from_2(
|
||||
/*=============*/
|
||||
/* out: ulint integer, >= 0, < 64k */
|
||||
byte* b); /* in: pointer to two bytes */
|
||||
byte* b); /* in: pointer to two bytes */
|
||||
|
||||
/************************************************************
|
||||
The following function is used to convert a 16-bit data item
|
||||
@ -80,8 +80,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_3(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to 3 bytes where to store */
|
||||
ulint n); /* in: ulint integer to be stored */
|
||||
byte* b, /* in: pointer to 3 bytes where to store */
|
||||
ulint n); /* in: ulint integer to be stored */
|
||||
/************************************************************
|
||||
The following function is used to fetch data from 3 consecutive
|
||||
bytes. The most significant byte is at the lowest address. */
|
||||
@ -90,7 +90,7 @@ ulint
|
||||
mach_read_from_3(
|
||||
/*=============*/
|
||||
/* out: ulint integer */
|
||||
byte* b); /* in: pointer to 3 bytes */
|
||||
byte* b); /* in: pointer to 3 bytes */
|
||||
/***********************************************************
|
||||
The following function is used to store data in four consecutive
|
||||
bytes. We store the most significant byte to the lowest address. */
|
||||
@ -98,8 +98,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_4(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to four bytes where to store */
|
||||
ulint n); /* in: ulint integer to be stored */
|
||||
byte* b, /* in: pointer to four bytes where to store */
|
||||
ulint n); /* in: ulint integer to be stored */
|
||||
/************************************************************
|
||||
The following function is used to fetch data from 4 consecutive
|
||||
bytes. The most significant byte is at the lowest address. */
|
||||
@ -108,7 +108,7 @@ ulint
|
||||
mach_read_from_4(
|
||||
/*=============*/
|
||||
/* out: ulint integer */
|
||||
byte* b); /* in: pointer to four bytes */
|
||||
byte* b); /* in: pointer to four bytes */
|
||||
/*************************************************************
|
||||
Writes a ulint in a compressed form (1..5 bytes). */
|
||||
UNIV_INLINE
|
||||
@ -116,8 +116,8 @@ ulint
|
||||
mach_write_compressed(
|
||||
/*==================*/
|
||||
/* out: stored size in bytes */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
ulint n); /* in: ulint integer to be stored */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
ulint n); /* in: ulint integer to be stored */
|
||||
/*************************************************************
|
||||
Returns the size of an ulint when written in the compressed form. */
|
||||
UNIV_INLINE
|
||||
@ -125,7 +125,7 @@ ulint
|
||||
mach_get_compressed_size(
|
||||
/*=====================*/
|
||||
/* out: compressed size in bytes */
|
||||
ulint n); /* in: ulint integer to be stored */
|
||||
ulint n); /* in: ulint integer to be stored */
|
||||
/*************************************************************
|
||||
Reads a ulint in a compressed form. */
|
||||
UNIV_INLINE
|
||||
@ -133,7 +133,7 @@ ulint
|
||||
mach_read_compressed(
|
||||
/*=================*/
|
||||
/* out: read integer */
|
||||
byte* b); /* in: pointer to memory from where to read */
|
||||
byte* b); /* in: pointer to memory from where to read */
|
||||
/***********************************************************
|
||||
The following function is used to store data in 6 consecutive
|
||||
bytes. We store the most significant byte to the lowest address. */
|
||||
@ -141,8 +141,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_6(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to 6 bytes where to store */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
byte* b, /* in: pointer to 6 bytes where to store */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
/************************************************************
|
||||
The following function is used to fetch data from 6 consecutive
|
||||
bytes. The most significant byte is at the lowest address. */
|
||||
@ -151,7 +151,7 @@ dulint
|
||||
mach_read_from_6(
|
||||
/*=============*/
|
||||
/* out: dulint integer */
|
||||
byte* b); /* in: pointer to 6 bytes */
|
||||
byte* b); /* in: pointer to 6 bytes */
|
||||
/***********************************************************
|
||||
The following function is used to store data in 7 consecutive
|
||||
bytes. We store the most significant byte to the lowest address. */
|
||||
@ -159,8 +159,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_7(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to 7 bytes where to store */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
byte* b, /* in: pointer to 7 bytes where to store */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
/************************************************************
|
||||
The following function is used to fetch data from 7 consecutive
|
||||
bytes. The most significant byte is at the lowest address. */
|
||||
@ -169,7 +169,7 @@ dulint
|
||||
mach_read_from_7(
|
||||
/*=============*/
|
||||
/* out: dulint integer */
|
||||
byte* b); /* in: pointer to 7 bytes */
|
||||
byte* b); /* in: pointer to 7 bytes */
|
||||
/***********************************************************
|
||||
The following function is used to store data in 8 consecutive
|
||||
bytes. We store the most significant byte to the lowest address. */
|
||||
@ -177,8 +177,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_8(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to 8 bytes where to store */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
byte* b, /* in: pointer to 8 bytes where to store */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
/************************************************************
|
||||
The following function is used to fetch data from 8 consecutive
|
||||
bytes. The most significant byte is at the lowest address. */
|
||||
@ -187,7 +187,7 @@ dulint
|
||||
mach_read_from_8(
|
||||
/*=============*/
|
||||
/* out: dulint integer */
|
||||
byte* b); /* in: pointer to 8 bytes */
|
||||
byte* b); /* in: pointer to 8 bytes */
|
||||
/*************************************************************
|
||||
Writes a dulint in a compressed form (5..9 bytes). */
|
||||
UNIV_INLINE
|
||||
@ -195,8 +195,8 @@ ulint
|
||||
mach_dulint_write_compressed(
|
||||
/*=========================*/
|
||||
/* out: size in bytes */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
/*************************************************************
|
||||
Returns the size of a dulint when written in the compressed form. */
|
||||
UNIV_INLINE
|
||||
@ -204,7 +204,7 @@ ulint
|
||||
mach_dulint_get_compressed_size(
|
||||
/*============================*/
|
||||
/* out: compressed size in bytes */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
/*************************************************************
|
||||
Reads a dulint in a compressed form. */
|
||||
UNIV_INLINE
|
||||
@ -212,7 +212,7 @@ dulint
|
||||
mach_dulint_read_compressed(
|
||||
/*========================*/
|
||||
/* out: read dulint */
|
||||
byte* b); /* in: pointer to memory from where to read */
|
||||
byte* b); /* in: pointer to memory from where to read */
|
||||
/*************************************************************
|
||||
Writes a dulint in a compressed form (1..11 bytes). */
|
||||
UNIV_INLINE
|
||||
@ -220,8 +220,8 @@ ulint
|
||||
mach_dulint_write_much_compressed(
|
||||
/*==============================*/
|
||||
/* out: size in bytes */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
/*************************************************************
|
||||
Returns the size of a dulint when written in the compressed form. */
|
||||
UNIV_INLINE
|
||||
@ -229,7 +229,7 @@ ulint
|
||||
mach_dulint_get_much_compressed_size(
|
||||
/*=================================*/
|
||||
/* out: compressed size in bytes */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
dulint n); /* in: dulint integer to be stored */
|
||||
/*************************************************************
|
||||
Reads a dulint in a compressed form. */
|
||||
UNIV_INLINE
|
||||
@ -237,7 +237,7 @@ dulint
|
||||
mach_dulint_read_much_compressed(
|
||||
/*=============================*/
|
||||
/* out: read dulint */
|
||||
byte* b); /* in: pointer to memory from where to read */
|
||||
byte* b); /* in: pointer to memory from where to read */
|
||||
/*************************************************************
|
||||
Reads a ulint in a compressed form if the log record fully contains it. */
|
||||
|
||||
@ -246,7 +246,7 @@ mach_parse_compressed(
|
||||
/*==================*/
|
||||
/* out: pointer to end of the stored field, NULL if
|
||||
not complete */
|
||||
byte* ptr, /* in: pointer to buffer from where to read */
|
||||
byte* ptr, /* in: pointer to buffer from where to read */
|
||||
byte* end_ptr,/* in: pointer to end of the buffer */
|
||||
ulint* val); /* out: read value */
|
||||
/*************************************************************
|
||||
@ -257,7 +257,7 @@ mach_dulint_parse_compressed(
|
||||
/*=========================*/
|
||||
/* out: pointer to end of the stored field, NULL if
|
||||
not complete */
|
||||
byte* ptr, /* in: pointer to buffer from where to read */
|
||||
byte* ptr, /* in: pointer to buffer from where to read */
|
||||
byte* end_ptr,/* in: pointer to end of the buffer */
|
||||
dulint* val); /* out: read value */
|
||||
/*************************************************************
|
||||
@ -267,31 +267,31 @@ double
|
||||
mach_double_read(
|
||||
/*=============*/
|
||||
/* out: double read */
|
||||
byte* b); /* in: pointer to memory from where to read */
|
||||
byte* b); /* in: pointer to memory from where to read */
|
||||
/*************************************************************
|
||||
Writes a double. It is stored in a little-endian format. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
mach_double_write(
|
||||
/*==============*/
|
||||
byte* b, /* in: pointer to memory where to write */
|
||||
double d); /* in: double */
|
||||
byte* b, /* in: pointer to memory where to write */
|
||||
double d); /* in: double */
|
||||
/*************************************************************
|
||||
Reads a float. It is stored in a little-endian format. */
|
||||
UNIV_INLINE
|
||||
float
|
||||
mach_float_read(
|
||||
/*=============*/
|
||||
/*============*/
|
||||
/* out: float read */
|
||||
byte* b); /* in: pointer to memory from where to read */
|
||||
byte* b); /* in: pointer to memory from where to read */
|
||||
/*************************************************************
|
||||
Writes a float. It is stored in a little-endian format. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
mach_float_write(
|
||||
/*==============*/
|
||||
byte* b, /* in: pointer to memory where to write */
|
||||
float d); /* in: float */
|
||||
/*=============*/
|
||||
byte* b, /* in: pointer to memory where to write */
|
||||
float d); /* in: float */
|
||||
/*************************************************************
|
||||
Reads a ulint stored in the little-endian format. */
|
||||
UNIV_INLINE
|
||||
|
@ -13,8 +13,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_1(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to byte where to store */
|
||||
ulint n) /* in: ulint integer to be stored, >= 0, < 256 */
|
||||
byte* b, /* in: pointer to byte where to store */
|
||||
ulint n) /* in: ulint integer to be stored, >= 0, < 256 */
|
||||
{
|
||||
ut_ad(b);
|
||||
ut_ad(n <= 0xFFUL);
|
||||
@ -29,7 +29,7 @@ ulint
|
||||
mach_read_from_1(
|
||||
/*=============*/
|
||||
/* out: ulint integer, >= 0, < 256 */
|
||||
byte* b) /* in: pointer to byte */
|
||||
byte* b) /* in: pointer to byte */
|
||||
{
|
||||
ut_ad(b);
|
||||
return((ulint)(b[0]));
|
||||
@ -42,8 +42,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_2(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to two bytes where to store */
|
||||
ulint n) /* in: ulint integer to be stored */
|
||||
byte* b, /* in: pointer to two bytes where to store */
|
||||
ulint n) /* in: ulint integer to be stored */
|
||||
{
|
||||
ut_ad(b);
|
||||
ut_ad(n <= 0xFFFFUL);
|
||||
@ -60,12 +60,12 @@ ulint
|
||||
mach_read_from_2(
|
||||
/*=============*/
|
||||
/* out: ulint integer */
|
||||
byte* b) /* in: pointer to 2 bytes */
|
||||
byte* b) /* in: pointer to 2 bytes */
|
||||
{
|
||||
ut_ad(b);
|
||||
return( ((ulint)(b[0]) << 8)
|
||||
+ (ulint)(b[1])
|
||||
);
|
||||
);
|
||||
}
|
||||
|
||||
/************************************************************
|
||||
@ -106,8 +106,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_3(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to 3 bytes where to store */
|
||||
ulint n) /* in: ulint integer to be stored */
|
||||
byte* b, /* in: pointer to 3 bytes where to store */
|
||||
ulint n) /* in: ulint integer to be stored */
|
||||
{
|
||||
ut_ad(b);
|
||||
ut_ad(n <= 0xFFFFFFUL);
|
||||
@ -125,13 +125,13 @@ ulint
|
||||
mach_read_from_3(
|
||||
/*=============*/
|
||||
/* out: ulint integer */
|
||||
byte* b) /* in: pointer to 3 bytes */
|
||||
byte* b) /* in: pointer to 3 bytes */
|
||||
{
|
||||
ut_ad(b);
|
||||
return( ((ulint)(b[0]) << 16)
|
||||
+ ((ulint)(b[1]) << 8)
|
||||
+ (ulint)(b[2])
|
||||
);
|
||||
);
|
||||
}
|
||||
|
||||
/***********************************************************
|
||||
@ -141,8 +141,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_4(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to four bytes where to store */
|
||||
ulint n) /* in: ulint integer to be stored */
|
||||
byte* b, /* in: pointer to four bytes where to store */
|
||||
ulint n) /* in: ulint integer to be stored */
|
||||
{
|
||||
ut_ad(b);
|
||||
|
||||
@ -172,7 +172,7 @@ ulint
|
||||
mach_read_from_4(
|
||||
/*=============*/
|
||||
/* out: ulint integer */
|
||||
byte* b) /* in: pointer to four bytes */
|
||||
byte* b) /* in: pointer to four bytes */
|
||||
{
|
||||
#if (0 == 1) && !defined(__STDC__) && defined(UNIV_INTEL) && (UNIV_WORD_SIZE == 4) && defined(UNIV_VISUALC)
|
||||
/* We do not use this even on Intel, because unaligned accesses may
|
||||
@ -194,7 +194,7 @@ mach_read_from_4(
|
||||
+ ((ulint)(b[1]) << 16)
|
||||
+ ((ulint)(b[2]) << 8)
|
||||
+ (ulint)(b[3])
|
||||
);
|
||||
);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -210,8 +210,8 @@ ulint
|
||||
mach_write_compressed(
|
||||
/*==================*/
|
||||
/* out: compressed size in bytes */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
ulint n) /* in: ulint integer (< 2^32) to be stored */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
ulint n) /* in: ulint integer (< 2^32) to be stored */
|
||||
{
|
||||
ut_ad(b);
|
||||
|
||||
@ -241,7 +241,7 @@ ulint
|
||||
mach_get_compressed_size(
|
||||
/*=====================*/
|
||||
/* out: compressed size in bytes */
|
||||
ulint n) /* in: ulint integer (< 2^32) to be stored */
|
||||
ulint n) /* in: ulint integer (< 2^32) to be stored */
|
||||
{
|
||||
if (n < 0x80UL) {
|
||||
return(1);
|
||||
@ -263,7 +263,7 @@ ulint
|
||||
mach_read_compressed(
|
||||
/*=================*/
|
||||
/* out: read integer (< 2^32) */
|
||||
byte* b) /* in: pointer to memory from where to read */
|
||||
byte* b) /* in: pointer to memory from where to read */
|
||||
{
|
||||
ulint flag;
|
||||
|
||||
@ -292,8 +292,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_8(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to 8 bytes where to store */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
byte* b, /* in: pointer to 8 bytes where to store */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
{
|
||||
ut_ad(b);
|
||||
|
||||
@ -309,7 +309,7 @@ dulint
|
||||
mach_read_from_8(
|
||||
/*=============*/
|
||||
/* out: dulint integer */
|
||||
byte* b) /* in: pointer to 8 bytes */
|
||||
byte* b) /* in: pointer to 8 bytes */
|
||||
{
|
||||
ulint high;
|
||||
ulint low;
|
||||
@ -329,8 +329,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_7(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to 7 bytes where to store */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
byte* b, /* in: pointer to 7 bytes where to store */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
{
|
||||
ut_ad(b);
|
||||
|
||||
@ -346,7 +346,7 @@ dulint
|
||||
mach_read_from_7(
|
||||
/*=============*/
|
||||
/* out: dulint integer */
|
||||
byte* b) /* in: pointer to 7 bytes */
|
||||
byte* b) /* in: pointer to 7 bytes */
|
||||
{
|
||||
ulint high;
|
||||
ulint low;
|
||||
@ -366,8 +366,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_write_to_6(
|
||||
/*============*/
|
||||
byte* b, /* in: pointer to 6 bytes where to store */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
byte* b, /* in: pointer to 6 bytes where to store */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
{
|
||||
ut_ad(b);
|
||||
|
||||
@ -383,7 +383,7 @@ dulint
|
||||
mach_read_from_6(
|
||||
/*=============*/
|
||||
/* out: dulint integer */
|
||||
byte* b) /* in: pointer to 7 bytes */
|
||||
byte* b) /* in: pointer to 7 bytes */
|
||||
{
|
||||
ulint high;
|
||||
ulint low;
|
||||
@ -403,8 +403,8 @@ ulint
|
||||
mach_dulint_write_compressed(
|
||||
/*=========================*/
|
||||
/* out: size in bytes */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
{
|
||||
ulint size;
|
||||
|
||||
@ -423,7 +423,7 @@ ulint
|
||||
mach_dulint_get_compressed_size(
|
||||
/*============================*/
|
||||
/* out: compressed size in bytes */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
{
|
||||
return(4 + mach_get_compressed_size(ut_dulint_get_high(n)));
|
||||
}
|
||||
@ -435,7 +435,7 @@ dulint
|
||||
mach_dulint_read_compressed(
|
||||
/*========================*/
|
||||
/* out: read dulint */
|
||||
byte* b) /* in: pointer to memory from where to read */
|
||||
byte* b) /* in: pointer to memory from where to read */
|
||||
{
|
||||
ulint high;
|
||||
ulint low;
|
||||
@ -459,8 +459,8 @@ ulint
|
||||
mach_dulint_write_much_compressed(
|
||||
/*==============================*/
|
||||
/* out: size in bytes */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
byte* b, /* in: pointer to memory where to store */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
{
|
||||
ulint size;
|
||||
|
||||
@ -485,14 +485,14 @@ ulint
|
||||
mach_dulint_get_much_compressed_size(
|
||||
/*=================================*/
|
||||
/* out: compressed size in bytes */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
dulint n) /* in: dulint integer to be stored */
|
||||
{
|
||||
if (0 == ut_dulint_get_high(n)) {
|
||||
return(mach_get_compressed_size(ut_dulint_get_low(n)));
|
||||
}
|
||||
|
||||
return(1 + mach_get_compressed_size(ut_dulint_get_high(n))
|
||||
+ mach_get_compressed_size(ut_dulint_get_low(n)));
|
||||
+ mach_get_compressed_size(ut_dulint_get_low(n)));
|
||||
}
|
||||
|
||||
/*************************************************************
|
||||
@ -502,7 +502,7 @@ dulint
|
||||
mach_dulint_read_much_compressed(
|
||||
/*=============================*/
|
||||
/* out: read dulint */
|
||||
byte* b) /* in: pointer to memory from where to read */
|
||||
byte* b) /* in: pointer to memory from where to read */
|
||||
{
|
||||
ulint high;
|
||||
ulint low;
|
||||
@ -531,9 +531,9 @@ double
|
||||
mach_double_read(
|
||||
/*=============*/
|
||||
/* out: double read */
|
||||
byte* b) /* in: pointer to memory from where to read */
|
||||
byte* b) /* in: pointer to memory from where to read */
|
||||
{
|
||||
double d;
|
||||
double d;
|
||||
ulint i;
|
||||
byte* ptr;
|
||||
|
||||
@ -556,8 +556,8 @@ UNIV_INLINE
|
||||
void
|
||||
mach_double_write(
|
||||
/*==============*/
|
||||
byte* b, /* in: pointer to memory where to write */
|
||||
double d) /* in: double */
|
||||
byte* b, /* in: pointer to memory where to write */
|
||||
double d) /* in: double */
|
||||
{
|
||||
ulint i;
|
||||
byte* ptr;
|
||||
@ -578,11 +578,11 @@ Reads a float. It is stored in a little-endian format. */
|
||||
UNIV_INLINE
|
||||
float
|
||||
mach_float_read(
|
||||
/*=============*/
|
||||
/*============*/
|
||||
/* out: float read */
|
||||
byte* b) /* in: pointer to memory from where to read */
|
||||
byte* b) /* in: pointer to memory from where to read */
|
||||
{
|
||||
float d;
|
||||
float d;
|
||||
ulint i;
|
||||
byte* ptr;
|
||||
|
||||
@ -604,9 +604,9 @@ Writes a float. It is stored in a little-endian format. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
mach_float_write(
|
||||
/*==============*/
|
||||
byte* b, /* in: pointer to memory where to write */
|
||||
float d) /* in: float */
|
||||
/*=============*/
|
||||
byte* b, /* in: pointer to memory where to write */
|
||||
float d) /* in: float */
|
||||
{
|
||||
ulint i;
|
||||
byte* ptr;
|
||||
|
@ -11,11 +11,11 @@ Created 6/9/1994 Heikki Tuuri
|
||||
check fields whose sizes are given below */
|
||||
|
||||
#ifdef UNIV_MEM_DEBUG
|
||||
#define MEM_FIELD_HEADER_SIZE ut_calc_align(2 * sizeof(ulint),\
|
||||
#define MEM_FIELD_HEADER_SIZE ut_calc_align(2 * sizeof(ulint),\
|
||||
UNIV_MEM_ALIGNMENT)
|
||||
#define MEM_FIELD_TRAILER_SIZE sizeof(ulint)
|
||||
#define MEM_FIELD_TRAILER_SIZE sizeof(ulint)
|
||||
#else
|
||||
#define MEM_FIELD_HEADER_SIZE 0
|
||||
#define MEM_FIELD_HEADER_SIZE 0
|
||||
#endif
|
||||
|
||||
|
||||
@ -25,8 +25,7 @@ UNIV_MEM_ALIGNMENT. In the debug version there are also
|
||||
check fields at the both ends of the field. */
|
||||
#ifdef UNIV_MEM_DEBUG
|
||||
#define MEM_SPACE_NEEDED(N) ut_calc_align((N) + MEM_FIELD_HEADER_SIZE\
|
||||
+ MEM_FIELD_TRAILER_SIZE,\
|
||||
UNIV_MEM_ALIGNMENT)
|
||||
+ MEM_FIELD_TRAILER_SIZE, UNIV_MEM_ALIGNMENT)
|
||||
#else
|
||||
#define MEM_SPACE_NEEDED(N) ut_calc_align((N), UNIV_MEM_ALIGNMENT)
|
||||
#endif
|
||||
@ -41,23 +40,23 @@ of blocks. */
|
||||
void
|
||||
mem_heap_validate_or_print(
|
||||
/*=======================*/
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
byte* top, /* in: calculate and validate only until
|
||||
this top pointer in the heap is reached,
|
||||
if this pointer is NULL, ignored */
|
||||
ibool print, /* in: if TRUE, prints the contents
|
||||
ibool print, /* in: if TRUE, prints the contents
|
||||
of the heap; works only in
|
||||
the debug version */
|
||||
ibool* error, /* out: TRUE if error */
|
||||
ulint* us_size,/* out: allocated memory
|
||||
ibool* error, /* out: TRUE if error */
|
||||
ulint* us_size,/* out: allocated memory
|
||||
(for the user) in the heap,
|
||||
if a NULL pointer is passed as this
|
||||
argument, it is ignored; in the
|
||||
non-debug version this is always -1 */
|
||||
ulint* ph_size,/* out: physical size of the heap,
|
||||
ulint* ph_size,/* out: physical size of the heap,
|
||||
if a NULL pointer is passed as this
|
||||
argument, it is ignored */
|
||||
ulint* n_blocks); /* out: number of blocks in the heap,
|
||||
ulint* n_blocks); /* out: number of blocks in the heap,
|
||||
if a NULL pointer is passed as this
|
||||
argument, it is ignored */
|
||||
#ifdef UNIV_MEM_DEBUG
|
||||
@ -115,7 +114,7 @@ the neighborhood of a given pointer. */
|
||||
void
|
||||
mem_analyze_corruption(
|
||||
/*===================*/
|
||||
byte* ptr); /* in: pointer to place of possible corruption */
|
||||
void* ptr); /* in: pointer to place of possible corruption */
|
||||
/*********************************************************************
|
||||
Prints information of dynamic memory usage and currently allocated memory
|
||||
heaps or buffers. Can only be used in the debug version. */
|
||||
|
@ -34,8 +34,8 @@ Used to initialize allocated memory. */
|
||||
void
|
||||
mem_init_buf(
|
||||
/*=========*/
|
||||
byte* buf, /* in: pointer to buffer */
|
||||
ulint n); /* in: length of buffer */
|
||||
byte* buf, /* in: pointer to buffer */
|
||||
ulint n); /* in: length of buffer */
|
||||
/*******************************************************************
|
||||
Initializes a buffer to a random combination of hex DE and AD.
|
||||
Used to erase freed memory.*/
|
||||
@ -43,8 +43,8 @@ Used to erase freed memory.*/
|
||||
void
|
||||
mem_erase_buf(
|
||||
/*==========*/
|
||||
byte* buf, /* in: pointer to buffer */
|
||||
ulint n); /* in: length of buffer */
|
||||
byte* buf, /* in: pointer to buffer */
|
||||
ulint n); /* in: length of buffer */
|
||||
/*******************************************************************
|
||||
Inserts a created memory heap to the hash table of
|
||||
current allocated memory heaps.
|
||||
|
@ -49,8 +49,8 @@ the size is not specified, i.e., 0 is given as the parameter in the call of
|
||||
create. The standard size is the maximum (payload) size of the blocks used for
|
||||
allocations of small buffers. */
|
||||
|
||||
#define MEM_BLOCK_START_SIZE 64
|
||||
#define MEM_BLOCK_STANDARD_SIZE 8000
|
||||
#define MEM_BLOCK_START_SIZE 64
|
||||
#define MEM_BLOCK_STANDARD_SIZE 8000
|
||||
|
||||
/* If a memory heap is allowed to grow into the buffer pool, the following
|
||||
is the maximum size for a single allocated buffer: */
|
||||
@ -67,24 +67,21 @@ mem_init(
|
||||
Use this macro instead of the corresponding function! Macro for memory
|
||||
heap creation. */
|
||||
|
||||
#define mem_heap_create(N) mem_heap_create_func(\
|
||||
(N), NULL, MEM_HEAP_DYNAMIC,\
|
||||
__FILE__, __LINE__)
|
||||
#define mem_heap_create(N) mem_heap_create_func(\
|
||||
(N), NULL, MEM_HEAP_DYNAMIC, __FILE__, __LINE__)
|
||||
/******************************************************************
|
||||
Use this macro instead of the corresponding function! Macro for memory
|
||||
heap creation. */
|
||||
|
||||
#define mem_heap_create_in_buffer(N) mem_heap_create_func(\
|
||||
(N), NULL, MEM_HEAP_BUFFER,\
|
||||
__FILE__, __LINE__)
|
||||
(N), NULL, MEM_HEAP_BUFFER, __FILE__, __LINE__)
|
||||
/******************************************************************
|
||||
Use this macro instead of the corresponding function! Macro for memory
|
||||
heap creation. */
|
||||
|
||||
#define mem_heap_create_in_btr_search(N) mem_heap_create_func(\
|
||||
(N), NULL, MEM_HEAP_BTR_SEARCH |\
|
||||
MEM_HEAP_BUFFER,\
|
||||
__FILE__, __LINE__)
|
||||
#define mem_heap_create_in_btr_search(N) mem_heap_create_func(\
|
||||
(N), NULL, MEM_HEAP_BTR_SEARCH | MEM_HEAP_BUFFER,\
|
||||
__FILE__, __LINE__)
|
||||
/******************************************************************
|
||||
Use this macro instead of the corresponding function! Macro for fast
|
||||
memory heap creation. An initial block of memory B is given by the
|
||||
@ -92,8 +89,7 @@ caller, N is its size, and this memory block is not freed by
|
||||
mem_heap_free. See the parameter comment in mem_heap_create_func below. */
|
||||
|
||||
#define mem_heap_fast_create(N, B) mem_heap_create_func(\
|
||||
(N), (B), MEM_HEAP_DYNAMIC,\
|
||||
__FILE__, __LINE__)
|
||||
(N), (B), MEM_HEAP_DYNAMIC, __FILE__, __LINE__)
|
||||
|
||||
/******************************************************************
|
||||
Use this macro instead of the corresponding function! Macro for memory
|
||||
@ -139,9 +135,9 @@ UNIV_INLINE
|
||||
void
|
||||
mem_heap_free_func(
|
||||
/*===============*/
|
||||
mem_heap_t* heap, /* in, own: heap to be freed */
|
||||
const char* file_name, /* in: file name where freed */
|
||||
ulint line); /* in: line where freed */
|
||||
mem_heap_t* heap, /* in, own: heap to be freed */
|
||||
const char* file_name, /* in: file name where freed */
|
||||
ulint line); /* in: line where freed */
|
||||
/*******************************************************************
|
||||
Allocates n bytes of memory from a memory heap. */
|
||||
UNIV_INLINE
|
||||
@ -151,8 +147,8 @@ mem_heap_alloc(
|
||||
/* out: allocated storage, NULL if did not
|
||||
succeed (only possible for
|
||||
MEM_HEAP_BTR_SEARCH type heaps) */
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n); /* in: number of bytes; if the heap is allowed
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n); /* in: number of bytes; if the heap is allowed
|
||||
to grow into the buffer pool, this must be
|
||||
<= MEM_MAX_ALLOC_IN_BUF */
|
||||
/*********************************************************************
|
||||
@ -162,7 +158,7 @@ byte*
|
||||
mem_heap_get_heap_top(
|
||||
/*==================*/
|
||||
/* out: pointer to the heap top */
|
||||
mem_heap_t* heap); /* in: memory heap */
|
||||
mem_heap_t* heap); /* in: memory heap */
|
||||
/*********************************************************************
|
||||
Frees the space in a memory heap exceeding the pointer given. The
|
||||
pointer must have been acquired from mem_heap_get_heap_top. The first
|
||||
@ -171,7 +167,7 @@ UNIV_INLINE
|
||||
void
|
||||
mem_heap_free_heap_top(
|
||||
/*===================*/
|
||||
mem_heap_t* heap, /* in: heap from which to free */
|
||||
mem_heap_t* heap, /* in: heap from which to free */
|
||||
byte* old_top);/* in: pointer to old top of heap */
|
||||
/*********************************************************************
|
||||
Empties a memory heap. The first memory block of the heap is not freed. */
|
||||
@ -179,7 +175,7 @@ UNIV_INLINE
|
||||
void
|
||||
mem_heap_empty(
|
||||
/*===========*/
|
||||
mem_heap_t* heap); /* in: heap to empty */
|
||||
mem_heap_t* heap); /* in: heap to empty */
|
||||
/*********************************************************************
|
||||
Returns a pointer to the topmost element in a memory heap.
|
||||
The size of the element must be given. */
|
||||
@ -188,8 +184,8 @@ void*
|
||||
mem_heap_get_top(
|
||||
/*=============*/
|
||||
/* out: pointer to the topmost element */
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n); /* in: size of the topmost element */
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n); /* in: size of the topmost element */
|
||||
/*********************************************************************
|
||||
Frees the topmost element in a memory heap.
|
||||
The size of the element must be given. */
|
||||
@ -197,25 +193,25 @@ UNIV_INLINE
|
||||
void
|
||||
mem_heap_free_top(
|
||||
/*==============*/
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n); /* in: size of the topmost element */
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n); /* in: size of the topmost element */
|
||||
/*********************************************************************
|
||||
Returns the space in bytes occupied by a memory heap. */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
mem_heap_get_size(
|
||||
/*==============*/
|
||||
mem_heap_t* heap); /* in: heap */
|
||||
mem_heap_t* heap); /* in: heap */
|
||||
/******************************************************************
|
||||
Use this macro instead of the corresponding function!
|
||||
Macro for memory buffer allocation */
|
||||
|
||||
#define mem_alloc(N) mem_alloc_func((N), __FILE__, __LINE__)
|
||||
#define mem_alloc(N) mem_alloc_func((N), __FILE__, __LINE__)
|
||||
/******************************************************************
|
||||
Use this macro instead of the corresponding function!
|
||||
Macro for memory buffer allocation */
|
||||
|
||||
#define mem_alloc_noninline(N) mem_alloc_func_noninline(\
|
||||
#define mem_alloc_noninline(N) mem_alloc_func_noninline(\
|
||||
(N), __FILE__, __LINE__)
|
||||
/*******************************************************************
|
||||
NOTE: Use the corresponding macro instead of this function.
|
||||
@ -249,7 +245,7 @@ mem_alloc_func_noninline(
|
||||
Use this macro instead of the corresponding function!
|
||||
Macro for memory buffer freeing */
|
||||
|
||||
#define mem_free(PTR) mem_free_func((PTR), __FILE__, __LINE__)
|
||||
#define mem_free(PTR) mem_free_func((PTR), __FILE__, __LINE__)
|
||||
/*******************************************************************
|
||||
NOTE: Use the corresponding macro instead of this function.
|
||||
Frees a single buffer of storage from
|
||||
@ -330,7 +326,7 @@ mem_validate_all_blocks(void);
|
||||
/* The info header of a block in a memory heap */
|
||||
|
||||
struct mem_block_info_struct {
|
||||
ulint magic_n;/* magic number for debugging */
|
||||
ulint magic_n;/* magic number for debugging */
|
||||
char file_name[8];/* file name where the mem heap was created */
|
||||
ulint line; /* line number where the mem heap was created */
|
||||
UT_LIST_BASE_NODE_T(mem_block_t) base; /* In the first block in the
|
||||
@ -340,17 +336,17 @@ struct mem_block_info_struct {
|
||||
and prev in the list. The first block allocated
|
||||
to the heap is also the first block in this list,
|
||||
though it also contains the base node of the list. */
|
||||
ulint len; /* physical length of this block in bytes */
|
||||
ulint type; /* type of heap: MEM_HEAP_DYNAMIC, or
|
||||
ulint len; /* physical length of this block in bytes */
|
||||
ulint type; /* type of heap: MEM_HEAP_DYNAMIC, or
|
||||
MEM_HEAP_BUF possibly ORed to MEM_HEAP_BTR_SEARCH */
|
||||
ibool init_block; /* TRUE if this is the first block used in fast
|
||||
creation of a heap: the memory will be freed
|
||||
by the creator, not by mem_heap_free */
|
||||
ulint free; /* offset in bytes of the first free position for
|
||||
ulint free; /* offset in bytes of the first free position for
|
||||
user data in the block */
|
||||
ulint start; /* the value of the struct field 'free' at the
|
||||
ulint start; /* the value of the struct field 'free' at the
|
||||
creation of the block */
|
||||
byte* free_block;
|
||||
byte* free_block;
|
||||
/* if the MEM_HEAP_BTR_SEARCH bit is set in type,
|
||||
and this is the heap root, this can contain an
|
||||
allocated buffer frame, which can be appended as a
|
||||
@ -367,7 +363,7 @@ struct mem_block_info_struct {
|
||||
#define MEM_FREED_BLOCK_MAGIC_N 547711122
|
||||
|
||||
/* Header size for a memory heap block */
|
||||
#define MEM_BLOCK_HEADER_SIZE ut_calc_align(sizeof(mem_block_info_t),\
|
||||
#define MEM_BLOCK_HEADER_SIZE ut_calc_align(sizeof(mem_block_info_t),\
|
||||
UNIV_MEM_ALIGNMENT)
|
||||
#include "mem0dbg.h"
|
||||
|
||||
|
@ -53,7 +53,7 @@ mem_heap_add_block(
|
||||
/* out: created block, NULL if did not
|
||||
succeed (only possible for
|
||||
MEM_HEAP_BTR_SEARCH type heaps)*/
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n); /* in: number of bytes user needs */
|
||||
|
||||
UNIV_INLINE
|
||||
@ -131,8 +131,8 @@ mem_heap_alloc(
|
||||
/* out: allocated storage, NULL if did not
|
||||
succeed (only possible for
|
||||
MEM_HEAP_BTR_SEARCH type heaps) */
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n) /* in: number of bytes; if the heap is allowed
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n) /* in: number of bytes; if the heap is allowed
|
||||
to grow into the buffer pool, this must be
|
||||
<= MEM_MAX_ALLOC_IN_BUF */
|
||||
{
|
||||
@ -189,7 +189,7 @@ byte*
|
||||
mem_heap_get_heap_top(
|
||||
/*==================*/
|
||||
/* out: pointer to the heap top */
|
||||
mem_heap_t* heap) /* in: memory heap */
|
||||
mem_heap_t* heap) /* in: memory heap */
|
||||
{
|
||||
mem_block_t* block;
|
||||
byte* buf;
|
||||
@ -211,7 +211,7 @@ UNIV_INLINE
|
||||
void
|
||||
mem_heap_free_heap_top(
|
||||
/*===================*/
|
||||
mem_heap_t* heap, /* in: heap from which to free */
|
||||
mem_heap_t* heap, /* in: heap from which to free */
|
||||
byte* old_top)/* in: pointer to old top of heap */
|
||||
{
|
||||
mem_block_t* block;
|
||||
@ -281,7 +281,7 @@ mem_heap_free_heap_top(
|
||||
one */
|
||||
|
||||
if ((heap != block) && (mem_block_get_free(block) ==
|
||||
mem_block_get_start(block))) {
|
||||
mem_block_get_start(block))) {
|
||||
mem_heap_block_free(heap, block);
|
||||
}
|
||||
}
|
||||
@ -292,7 +292,7 @@ UNIV_INLINE
|
||||
void
|
||||
mem_heap_empty(
|
||||
/*===========*/
|
||||
mem_heap_t* heap) /* in: heap to empty */
|
||||
mem_heap_t* heap) /* in: heap to empty */
|
||||
{
|
||||
mem_heap_free_heap_top(heap, (byte*)heap + mem_block_get_start(heap));
|
||||
|
||||
@ -309,8 +309,8 @@ void*
|
||||
mem_heap_get_top(
|
||||
/*=============*/
|
||||
/* out: pointer to the topmost element */
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n) /* in: size of the topmost element */
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n) /* in: size of the topmost element */
|
||||
{
|
||||
mem_block_t* block;
|
||||
void* buf;
|
||||
@ -343,8 +343,8 @@ UNIV_INLINE
|
||||
void
|
||||
mem_heap_free_top(
|
||||
/*==============*/
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n) /* in: size of the topmost element */
|
||||
mem_heap_t* heap, /* in: memory heap */
|
||||
ulint n) /* in: size of the topmost element */
|
||||
{
|
||||
mem_block_t* block;
|
||||
|
||||
@ -367,7 +367,7 @@ mem_heap_free_top(
|
||||
one */
|
||||
|
||||
if ((heap != block) && (mem_block_get_free(block) ==
|
||||
mem_block_get_start(block))) {
|
||||
mem_block_get_start(block))) {
|
||||
mem_heap_block_free(heap, block);
|
||||
}
|
||||
}
|
||||
@ -440,10 +440,10 @@ UNIV_INLINE
|
||||
void
|
||||
mem_heap_free_func(
|
||||
/*===============*/
|
||||
mem_heap_t* heap, /* in, own: heap to be freed */
|
||||
mem_heap_t* heap, /* in, own: heap to be freed */
|
||||
const char* file_name __attribute__((unused)),
|
||||
/* in: file name where freed */
|
||||
ulint line __attribute__((unused)))
|
||||
ulint line __attribute__((unused)))
|
||||
{
|
||||
mem_block_t* block;
|
||||
mem_block_t* prev_block;
|
||||
@ -492,8 +492,8 @@ mem_alloc_func(
|
||||
ulint line /* in: line where created */
|
||||
)
|
||||
{
|
||||
mem_heap_t* heap;
|
||||
void* buf;
|
||||
mem_heap_t* heap;
|
||||
void* buf;
|
||||
|
||||
heap = mem_heap_create_func(n, NULL, MEM_HEAP_DYNAMIC, file_name,
|
||||
line);
|
||||
@ -526,7 +526,7 @@ mem_free_func(
|
||||
mem_heap_t* heap;
|
||||
|
||||
heap = (mem_heap_t*)((byte*)ptr - MEM_BLOCK_HEADER_SIZE
|
||||
- MEM_FIELD_HEADER_SIZE);
|
||||
- MEM_FIELD_HEADER_SIZE);
|
||||
mem_heap_free_func(heap, file_name, line);
|
||||
}
|
||||
|
||||
@ -536,10 +536,10 @@ UNIV_INLINE
|
||||
ulint
|
||||
mem_heap_get_size(
|
||||
/*==============*/
|
||||
mem_heap_t* heap) /* in: heap */
|
||||
mem_heap_t* heap) /* in: heap */
|
||||
{
|
||||
mem_block_t* block;
|
||||
ulint size = 0;
|
||||
mem_block_t* block;
|
||||
ulint size = 0;
|
||||
|
||||
ut_ad(mem_heap_check(heap));
|
||||
|
||||
|
@ -32,7 +32,7 @@ struct mem_area_struct{
|
||||
|
||||
/* Each memory area takes this many extra bytes for control information */
|
||||
#define MEM_AREA_EXTRA_SIZE (ut_calc_align(sizeof(struct mem_area_struct),\
|
||||
UNIV_MEM_ALIGNMENT))
|
||||
UNIV_MEM_ALIGNMENT))
|
||||
|
||||
/************************************************************************
|
||||
Creates a memory pool. */
|
||||
@ -97,7 +97,7 @@ Prints info of a memory pool. */
|
||||
void
|
||||
mem_pool_print_info(
|
||||
/*================*/
|
||||
FILE* outfile,/* in: output file to write to */
|
||||
FILE* outfile,/* in: output file to write to */
|
||||
mem_pool_t* pool); /* in: memory pool */
|
||||
|
||||
|
||||
|
@ -73,10 +73,18 @@ mlog_catenate_ulint(
|
||||
|
||||
mlog = &(mtr->log);
|
||||
|
||||
ut_ad(MLOG_1BYTE == 1);
|
||||
ut_ad(MLOG_2BYTES == 2);
|
||||
ut_ad(MLOG_4BYTES == 4);
|
||||
|
||||
#if MLOG_1BYTE != 1
|
||||
# error "MLOG_1BYTE != 1"
|
||||
#endif
|
||||
#if MLOG_2BYTES != 2
|
||||
# error "MLOG_2BYTES != 2"
|
||||
#endif
|
||||
#if MLOG_4BYTES != 4
|
||||
# error "MLOG_4BYTES != 4"
|
||||
#endif
|
||||
#if MLOG_8BYTES != 8
|
||||
# error "MLOG_8BYTES != 8"
|
||||
#endif
|
||||
ptr = dyn_array_push(mlog, type);
|
||||
|
||||
if (type == MLOG_4BYTES) {
|
||||
@ -86,7 +94,7 @@ mlog_catenate_ulint(
|
||||
} else {
|
||||
ut_ad(type == MLOG_1BYTE);
|
||||
mach_write_to_1(ptr, val);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/************************************************************
|
||||
|
@ -47,31 +47,31 @@ flag value must give the length also! */
|
||||
has been called only once,
|
||||
this flag is ORed to the type
|
||||
of that first log record */
|
||||
#define MLOG_1BYTE ((byte)1) /* one byte is written */
|
||||
#define MLOG_2BYTES ((byte)2) /* 2 bytes ... */
|
||||
#define MLOG_4BYTES ((byte)4) /* 4 bytes ... */
|
||||
#define MLOG_8BYTES ((byte)8) /* 8 bytes ... */
|
||||
#define MLOG_1BYTE (1) /* one byte is written */
|
||||
#define MLOG_2BYTES (2) /* 2 bytes ... */
|
||||
#define MLOG_4BYTES (4) /* 4 bytes ... */
|
||||
#define MLOG_8BYTES (8) /* 8 bytes ... */
|
||||
#define MLOG_REC_INSERT ((byte)9) /* record insert */
|
||||
#define MLOG_REC_CLUST_DELETE_MARK ((byte)10) /* mark clustered index record
|
||||
#define MLOG_REC_CLUST_DELETE_MARK ((byte)10) /* mark clustered index record
|
||||
deleted */
|
||||
#define MLOG_REC_SEC_DELETE_MARK ((byte)11) /* mark secondary index record
|
||||
#define MLOG_REC_SEC_DELETE_MARK ((byte)11) /* mark secondary index record
|
||||
deleted */
|
||||
#define MLOG_REC_UPDATE_IN_PLACE ((byte)13) /* update of a record,
|
||||
preserves record field sizes */
|
||||
#define MLOG_REC_DELETE ((byte)14) /* delete a record from a
|
||||
page */
|
||||
#define MLOG_LIST_END_DELETE ((byte)15) /* delete record list end on
|
||||
#define MLOG_LIST_END_DELETE ((byte)15) /* delete record list end on
|
||||
index page */
|
||||
#define MLOG_LIST_START_DELETE ((byte)16) /* delete record list start on
|
||||
#define MLOG_LIST_START_DELETE ((byte)16) /* delete record list start on
|
||||
index page */
|
||||
#define MLOG_LIST_END_COPY_CREATED ((byte)17) /* copy record list end to a
|
||||
#define MLOG_LIST_END_COPY_CREATED ((byte)17) /* copy record list end to a
|
||||
new created index page */
|
||||
#define MLOG_PAGE_REORGANIZE ((byte)18) /* reorganize an index page */
|
||||
#define MLOG_PAGE_CREATE ((byte)19) /* create an index page */
|
||||
#define MLOG_UNDO_INSERT ((byte)20) /* insert entry in an undo
|
||||
#define MLOG_PAGE_REORGANIZE ((byte)18) /* reorganize an index page */
|
||||
#define MLOG_PAGE_CREATE ((byte)19) /* create an index page */
|
||||
#define MLOG_UNDO_INSERT ((byte)20) /* insert entry in an undo
|
||||
log */
|
||||
#define MLOG_UNDO_ERASE_END ((byte)21) /* erase an undo log page end */
|
||||
#define MLOG_UNDO_INIT ((byte)22) /* initialize a page in an
|
||||
#define MLOG_UNDO_INIT ((byte)22) /* initialize a page in an
|
||||
undo log */
|
||||
#define MLOG_UNDO_HDR_DISCARD ((byte)23) /* discard an update undo log
|
||||
header */
|
||||
@ -187,7 +187,7 @@ mtr_release_s_latch_at_savepoint(
|
||||
/*=============================*/
|
||||
mtr_t* mtr, /* in: mtr */
|
||||
ulint savepoint, /* in: savepoint */
|
||||
rw_lock_t* lock); /* in: latch to release */
|
||||
rw_lock_t* lock); /* in: latch to release */
|
||||
/*******************************************************************
|
||||
Gets the logging mode of a mini-transaction. */
|
||||
UNIV_INLINE
|
||||
@ -220,7 +220,7 @@ Reads 8 bytes from a file page buffered in the buffer pool. */
|
||||
|
||||
dulint
|
||||
mtr_read_dulint(
|
||||
/*===========*/
|
||||
/*============*/
|
||||
/* out: value read */
|
||||
byte* ptr, /* in: pointer from where to read */
|
||||
mtr_t* mtr); /* in: mini-transaction handle */
|
||||
|
@ -90,7 +90,7 @@ mtr_release_s_latch_at_savepoint(
|
||||
/*=============================*/
|
||||
mtr_t* mtr, /* in: mtr */
|
||||
ulint savepoint, /* in: savepoint */
|
||||
rw_lock_t* lock) /* in: latch to release */
|
||||
rw_lock_t* lock) /* in: latch to release */
|
||||
{
|
||||
mtr_memo_slot_t* slot;
|
||||
dyn_array_t* memo;
|
||||
|
@ -28,8 +28,8 @@ extern ibool os_aio_print_debug;
|
||||
extern ulint os_file_n_pending_preads;
|
||||
extern ulint os_file_n_pending_pwrites;
|
||||
|
||||
extern ulint os_n_pending_reads;
|
||||
extern ulint os_n_pending_writes;
|
||||
extern ulint os_n_pending_reads;
|
||||
extern ulint os_n_pending_writes;
|
||||
|
||||
#ifdef __WIN__
|
||||
|
||||
@ -75,7 +75,7 @@ log. */
|
||||
#define OS_FILE_OPEN_RETRY 56 /* for os_file_create() on
|
||||
the first ibdata file */
|
||||
|
||||
#define OS_FILE_READ_ONLY 333
|
||||
#define OS_FILE_READ_ONLY 333
|
||||
#define OS_FILE_READ_WRITE 444
|
||||
#define OS_FILE_READ_ALLOW_DELETE 555 /* for ibbackup */
|
||||
|
||||
@ -91,9 +91,10 @@ log. */
|
||||
#define OS_FILE_NOT_FOUND 71
|
||||
#define OS_FILE_DISK_FULL 72
|
||||
#define OS_FILE_ALREADY_EXISTS 73
|
||||
#define OS_FILE_AIO_RESOURCES_RESERVED 74 /* wait for OS aio resources
|
||||
#define OS_FILE_PATH_ERROR 74
|
||||
#define OS_FILE_AIO_RESOURCES_RESERVED 75 /* wait for OS aio resources
|
||||
to become available again */
|
||||
#define OS_FILE_ERROR_NOT_SPECIFIED 75
|
||||
#define OS_FILE_ERROR_NOT_SPECIFIED 76
|
||||
|
||||
/* Types for aio operations */
|
||||
#define OS_FILE_READ 10
|
||||
@ -109,7 +110,7 @@ log. */
|
||||
pages or ibuf bitmap pages */
|
||||
#define OS_AIO_IBUF 22 /* Asynchronous i/o for ibuf pages or ibuf
|
||||
bitmap pages */
|
||||
#define OS_AIO_LOG 23 /* Asynchronous i/o for the log */
|
||||
#define OS_AIO_LOG 23 /* Asynchronous i/o for the log */
|
||||
#define OS_AIO_SYNC 24 /* Asynchronous i/o where the calling thread
|
||||
will itself wait for the i/o to complete,
|
||||
doing also the job of the i/o-handler thread;
|
||||
@ -124,12 +125,12 @@ log. */
|
||||
in the call of os_aio(...),
|
||||
if the caller wants to post several i/o
|
||||
requests in a batch, and only after that
|
||||
wake the i/o-handler thread; this has
|
||||
wake the i/o-handler thread; this has
|
||||
effect only in simulated aio */
|
||||
#define OS_WIN31 1
|
||||
#define OS_WIN95 2
|
||||
#define OS_WINNT 3
|
||||
#define OS_WIN2000 4
|
||||
#define OS_WIN31 1
|
||||
#define OS_WIN95 2
|
||||
#define OS_WINNT 3
|
||||
#define OS_WIN2000 4
|
||||
|
||||
extern ulint os_n_file_reads;
|
||||
extern ulint os_n_file_writes;
|
||||
@ -138,10 +139,10 @@ extern ulint os_n_fsyncs;
|
||||
/* File types for directory entry data type */
|
||||
|
||||
enum os_file_type_enum{
|
||||
OS_FILE_TYPE_UNKNOWN = 0,
|
||||
OS_FILE_TYPE_FILE, /* regular file */
|
||||
OS_FILE_TYPE_DIR, /* directory */
|
||||
OS_FILE_TYPE_LINK /* symbolic link */
|
||||
OS_FILE_TYPE_UNKNOWN = 0,
|
||||
OS_FILE_TYPE_FILE, /* regular file */
|
||||
OS_FILE_TYPE_DIR, /* directory */
|
||||
OS_FILE_TYPE_LINK /* symbolic link */
|
||||
};
|
||||
typedef enum os_file_type_enum os_file_type_t;
|
||||
|
||||
@ -156,14 +157,14 @@ struct os_file_stat_struct{
|
||||
char name[OS_FILE_MAX_PATH]; /* path to a file */
|
||||
os_file_type_t type; /* file type */
|
||||
ib_longlong size; /* file size */
|
||||
time_t ctime; /* creation time */
|
||||
time_t ctime; /* creation time */
|
||||
time_t mtime; /* modification time */
|
||||
time_t atime; /* access time */
|
||||
};
|
||||
typedef struct os_file_stat_struct os_file_stat_t;
|
||||
|
||||
#ifdef __WIN__
|
||||
typedef HANDLE os_file_dir_t; /* directory stream */
|
||||
typedef HANDLE os_file_dir_t; /* directory stream */
|
||||
#else
|
||||
typedef DIR* os_file_dir_t; /* directory stream */
|
||||
#endif
|
||||
@ -174,7 +175,7 @@ Gets the operating system version. Currently works only on Windows. */
|
||||
ulint
|
||||
os_get_os_version(void);
|
||||
/*===================*/
|
||||
/* out: OS_WIN95, OS_WIN31, OS_WINNT, or OS_WIN2000 */
|
||||
/* out: OS_WIN95, OS_WIN31, OS_WINNT, or OS_WIN2000 */
|
||||
/********************************************************************
|
||||
Creates the seek mutexes used in positioned reads and writes. */
|
||||
|
||||
@ -182,12 +183,15 @@ void
|
||||
os_io_init_simple(void);
|
||||
/*===================*/
|
||||
/***************************************************************************
|
||||
Creates a temporary file. */
|
||||
Creates a temporary file. This function is like tmpfile(3), but
|
||||
the temporary file is created in the MySQL temporary directory.
|
||||
On Netware, this function is like tmpfile(3), because the C run-time
|
||||
library of Netware does not expose the delete-on-close flag. */
|
||||
|
||||
FILE*
|
||||
os_file_create_tmpfile(void);
|
||||
/*========================*/
|
||||
/* out: temporary file handle (never NULL) */
|
||||
/* out: temporary file handle, or NULL on error */
|
||||
/***************************************************************************
|
||||
The os_file_opendir() function opens a directory stream corresponding to the
|
||||
directory named by the dirname argument. The directory stream is positioned
|
||||
@ -256,7 +260,7 @@ os_file_create_simple(
|
||||
opened (if does not exist, error), or
|
||||
OS_FILE_CREATE if a new file is created
|
||||
(if exists, error), or
|
||||
OS_FILE_CREATE_PATH if new file
|
||||
OS_FILE_CREATE_PATH if new file
|
||||
(if exists, error) and subdirectories along
|
||||
its path are created (if needed)*/
|
||||
ulint access_type,/* in: OS_FILE_READ_ONLY or
|
||||
@ -483,7 +487,7 @@ ibool
|
||||
os_file_status(
|
||||
/*===========*/
|
||||
/* out: TRUE if call succeeded */
|
||||
const char* path, /* in: pathname of the file */
|
||||
const char* path, /* in: pathname of the file */
|
||||
ibool* exists, /* out: TRUE if file exists */
|
||||
os_file_type_t* type); /* out: type of the file (if it exists) */
|
||||
/********************************************************************
|
||||
@ -505,13 +509,13 @@ to free it after it is no longer needed.
|
||||
The following list of examples (taken from SUSv2) shows the strings
|
||||
returned by dirname and basename for different paths:
|
||||
|
||||
path dirname basename
|
||||
"/usr/lib" "/usr" "lib"
|
||||
"/usr/" "/" "usr"
|
||||
"usr" "." "usr"
|
||||
"/" "/" "/"
|
||||
"." "." "."
|
||||
".." "." ".."
|
||||
path dirname basename
|
||||
"/usr/lib" "/usr" "lib"
|
||||
"/usr/" "/" "usr"
|
||||
"usr" "." "usr"
|
||||
"/" "/" "/"
|
||||
"." "." "."
|
||||
".." "." ".."
|
||||
*/
|
||||
|
||||
char*
|
||||
@ -720,7 +724,7 @@ ibool
|
||||
os_file_get_status(
|
||||
/*===============*/
|
||||
/* out: TRUE if stat information found */
|
||||
const char* path, /* in: pathname of the file */
|
||||
const char* path, /* in: pathname of the file */
|
||||
os_file_stat_t* stat_info); /* information of a file in a directory */
|
||||
|
||||
#endif
|
||||
|
@ -116,20 +116,21 @@ Allocates large pages memory. */
|
||||
|
||||
void*
|
||||
os_mem_alloc_large(
|
||||
/*=================*/
|
||||
/* out: allocated memory */
|
||||
ulint n, /* in: number of bytes */
|
||||
ibool set_to_zero, /* in: TRUE if allocated memory should be set
|
||||
to zero if UNIV_SET_MEM_TO_ZERO is defined */
|
||||
ibool assert_on_error); /* in: if TRUE, we crash mysqld if the memory
|
||||
cannot be allocated */
|
||||
/*===============*/
|
||||
/* out: allocated memory */
|
||||
ulint n, /* in: number of bytes */
|
||||
ibool set_to_zero, /* in: TRUE if allocated memory
|
||||
should be set to zero if
|
||||
UNIV_SET_MEM_TO_ZERO is defined */
|
||||
ibool assert_on_error);/* in: if TRUE, we crash mysqld if
|
||||
the memory cannot be allocated */
|
||||
/********************************************************************
|
||||
Frees large pages memory. */
|
||||
|
||||
void
|
||||
os_mem_free_large(
|
||||
/*=================*/
|
||||
void *ptr); /* in: number of bytes */
|
||||
/*==============*/
|
||||
void *ptr); /* in: number of bytes */
|
||||
/********************************************************************
|
||||
Sets the priority boost for threads released from waiting within the current
|
||||
process. */
|
||||
|
@ -16,10 +16,10 @@ Created 9/6/1995 Heikki Tuuri
|
||||
|
||||
#define os_fast_mutex_t CRITICAL_SECTION
|
||||
|
||||
typedef HANDLE os_native_event_t;
|
||||
typedef HANDLE os_native_event_t;
|
||||
|
||||
typedef struct os_event_struct os_event_struct_t;
|
||||
typedef os_event_struct_t* os_event_t;
|
||||
typedef struct os_event_struct os_event_struct_t;
|
||||
typedef os_event_struct_t* os_event_t;
|
||||
|
||||
struct os_event_struct {
|
||||
os_native_event_t handle;
|
||||
@ -30,8 +30,8 @@ struct os_event_struct {
|
||||
#else
|
||||
typedef pthread_mutex_t os_fast_mutex_t;
|
||||
|
||||
typedef struct os_event_struct os_event_struct_t;
|
||||
typedef os_event_struct_t* os_event_t;
|
||||
typedef struct os_event_struct os_event_struct_t;
|
||||
typedef os_event_struct_t* os_event_t;
|
||||
|
||||
struct os_event_struct {
|
||||
os_fast_mutex_t os_mutex; /* this mutex protects the next
|
||||
@ -158,9 +158,9 @@ os_event_wait_multiple(
|
||||
/*===================*/
|
||||
/* out: index of the event
|
||||
which was signaled */
|
||||
ulint n, /* in: number of events in the
|
||||
ulint n, /* in: number of events in the
|
||||
array */
|
||||
os_native_event_t* native_event_array);
|
||||
os_native_event_t* native_event_array);
|
||||
/* in: pointer to an array of event
|
||||
handles */
|
||||
#endif
|
||||
|
@ -30,8 +30,8 @@ typedef void* os_thread_t;
|
||||
typedef ulint os_thread_id_t; /* In Windows the thread id
|
||||
is an unsigned long int */
|
||||
#else
|
||||
typedef pthread_t os_thread_t;
|
||||
typedef os_thread_t os_thread_id_t; /* In Unix we use the thread
|
||||
typedef pthread_t os_thread_t;
|
||||
typedef os_thread_t os_thread_id_t; /* In Unix we use the thread
|
||||
handle itself as the id of
|
||||
the thread */
|
||||
#endif
|
||||
@ -69,7 +69,7 @@ os_thread_create(
|
||||
/*=============*/
|
||||
/* out: handle to the thread */
|
||||
#ifndef __WIN__
|
||||
os_posix_f_t start_f,
|
||||
os_posix_f_t start_f,
|
||||
#else
|
||||
ulint (*start_f)(void*), /* in: pointer to function
|
||||
from which to start */
|
||||
@ -80,7 +80,7 @@ os_thread_create(
|
||||
thread */
|
||||
int
|
||||
os_thread_join(
|
||||
/*=============*/
|
||||
/*===========*/
|
||||
os_thread_id_t thread_id); /* in: id of the thread to join */
|
||||
/*********************************************************************
|
||||
Exits the current thread. */
|
||||
|
@ -184,7 +184,7 @@ next record after the deleted one. */
|
||||
void
|
||||
page_cur_delete_rec(
|
||||
/*================*/
|
||||
page_cur_t* cursor, /* in: a page cursor */
|
||||
page_cur_t* cursor, /* in: a page cursor */
|
||||
dict_index_t* index, /* in: record descriptor */
|
||||
const ulint* offsets,/* in: rec_get_offsets(cursor->rec, index) */
|
||||
mtr_t* mtr); /* in: mini-transaction handle */
|
||||
|
@ -181,7 +181,7 @@ page_cur_tuple_insert(
|
||||
/* out: pointer to record if succeed, NULL
|
||||
otherwise */
|
||||
page_cur_t* cursor, /* in: a page cursor */
|
||||
dtuple_t* tuple, /* in: pointer to a data tuple */
|
||||
dtuple_t* tuple, /* in: pointer to a data tuple */
|
||||
dict_index_t* index, /* in: record descriptor */
|
||||
mtr_t* mtr) /* in: mini-transaction handle */
|
||||
{
|
||||
|
@ -244,10 +244,10 @@ page_cmp_dtuple_rec_with_match(
|
||||
matched-parameter values below are not
|
||||
affected */
|
||||
const ulint* offsets,/* in: array returned by rec_get_offsets() */
|
||||
ulint* matched_fields, /* in/out: number of already completely
|
||||
ulint* matched_fields, /* in/out: number of already completely
|
||||
matched fields; when function returns
|
||||
contains the value for current comparison */
|
||||
ulint* matched_bytes); /* in/out: number of already matched
|
||||
ulint* matched_bytes); /* in/out: number of already matched
|
||||
bytes within the first field not completely
|
||||
matched; when function returns contains the
|
||||
value for current comparison */
|
||||
@ -342,7 +342,7 @@ ulint
|
||||
page_dir_slot_get_n_owned(
|
||||
/*======================*/
|
||||
/* out: number of records */
|
||||
page_dir_slot_t* slot); /* in: page directory slot */
|
||||
page_dir_slot_t* slot); /* in: page directory slot */
|
||||
/*******************************************************************
|
||||
This is used to set the owned records field of a directory slot. */
|
||||
UNIV_INLINE
|
||||
@ -436,7 +436,7 @@ TRUE if the record is the infimum record on a page. */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
page_rec_is_infimum_low(
|
||||
/*=====================*/
|
||||
/*====================*/
|
||||
/* out: TRUE if the infimum record */
|
||||
ulint offset);/* in: record offset on page */
|
||||
|
||||
@ -651,8 +651,8 @@ Splits a directory slot which owns too many records. */
|
||||
void
|
||||
page_dir_split_slot(
|
||||
/*================*/
|
||||
page_t* page, /* in: the index page in question */
|
||||
ulint slot_no); /* in: the directory slot */
|
||||
page_t* page, /* in: the index page in question */
|
||||
ulint slot_no); /* in: the directory slot */
|
||||
/*****************************************************************
|
||||
Tries to balance the given directory slot with too few records
|
||||
with the upper neighbor, so that there are at least the minimum number
|
||||
@ -663,7 +663,7 @@ void
|
||||
page_dir_balance_slot(
|
||||
/*==================*/
|
||||
page_t* page, /* in: index page */
|
||||
ulint slot_no); /* in: the directory slot */
|
||||
ulint slot_no); /* in: the directory slot */
|
||||
/**************************************************************
|
||||
Parses a log record of a record list end or start deletion. */
|
||||
|
||||
@ -733,7 +733,7 @@ debugging purposes. */
|
||||
|
||||
void
|
||||
page_print(
|
||||
/*======*/
|
||||
/*=======*/
|
||||
page_t* page, /* in: index page */
|
||||
dict_index_t* index, /* in: dictionary index of the page */
|
||||
ulint dn, /* in: print dn first and last entries
|
||||
|
@ -93,8 +93,8 @@ page_header_get_ptr(
|
||||
|
||||
ut_ad(page);
|
||||
ut_ad((field == PAGE_FREE)
|
||||
|| (field == PAGE_LAST_INSERT)
|
||||
|| (field == PAGE_HEAP_TOP));
|
||||
|| (field == PAGE_LAST_INSERT)
|
||||
|| (field == PAGE_HEAP_TOP));
|
||||
|
||||
offs = page_header_get_field(page, field);
|
||||
|
||||
@ -122,8 +122,8 @@ page_header_set_ptr(
|
||||
|
||||
ut_ad(page);
|
||||
ut_ad((field == PAGE_FREE)
|
||||
|| (field == PAGE_LAST_INSERT)
|
||||
|| (field == PAGE_HEAP_TOP));
|
||||
|| (field == PAGE_LAST_INSERT)
|
||||
|| (field == PAGE_HEAP_TOP));
|
||||
|
||||
if (ptr == NULL) {
|
||||
offs = 0;
|
||||
@ -177,13 +177,13 @@ page_rec_is_comp(
|
||||
{
|
||||
#ifdef UNIV_RELEASE_NOT_YET_STABLE
|
||||
if (UNIV_UNLIKELY((ulint)rec < (ulint)(buf_pool->frame_zero))
|
||||
|| UNIV_UNLIKELY((ulint)rec >= (ulint)(buf_pool->high_end))) {
|
||||
|| UNIV_UNLIKELY((ulint)rec >= (ulint)(buf_pool->high_end))) {
|
||||
|
||||
ut_print_timestamp(stderr);
|
||||
fprintf(stderr,
|
||||
"InnoDB: Error: trying to read a stray page rec %p\n"
|
||||
"InnoDB: buf pool start is at %p, end at %p\n",
|
||||
rec, buf_pool->frame_zero,
|
||||
rec, buf_pool->frame_zero,
|
||||
buf_pool->high_end);
|
||||
ut_error;
|
||||
}
|
||||
@ -284,7 +284,7 @@ TRUE if the record is the infimum record on a page. */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
page_rec_is_infimum_low(
|
||||
/*=====================*/
|
||||
/*====================*/
|
||||
/* out: TRUE if the infimum record */
|
||||
ulint offset) /* in: record offset on page */
|
||||
{
|
||||
@ -353,10 +353,10 @@ page_cmp_dtuple_rec_with_match(
|
||||
matched-parameter values below are not
|
||||
affected */
|
||||
const ulint* offsets,/* in: array returned by rec_get_offsets() */
|
||||
ulint* matched_fields, /* in/out: number of already completely
|
||||
ulint* matched_fields, /* in/out: number of already completely
|
||||
matched fields; when function returns
|
||||
contains the value for current comparison */
|
||||
ulint* matched_bytes) /* in/out: number of already matched
|
||||
ulint* matched_bytes) /* in/out: number of already matched
|
||||
bytes within the first field not completely
|
||||
matched; when function returns contains the
|
||||
value for current comparison */
|
||||
@ -517,7 +517,7 @@ ulint
|
||||
page_dir_slot_get_n_owned(
|
||||
/*======================*/
|
||||
/* out: number of records */
|
||||
page_dir_slot_t* slot) /* in: page directory slot */
|
||||
page_dir_slot_t* slot) /* in: page directory slot */
|
||||
{
|
||||
rec_t* rec = page_dir_slot_get_rec(slot);
|
||||
return(rec_get_n_owned(rec, page_rec_is_comp(rec)));
|
||||
@ -696,10 +696,10 @@ page_get_data_size(
|
||||
ulint ret;
|
||||
|
||||
ret = (ulint)(page_header_get_field(page, PAGE_HEAP_TOP)
|
||||
- (page_is_comp(page)
|
||||
? PAGE_NEW_SUPREMUM_END
|
||||
: PAGE_OLD_SUPREMUM_END)
|
||||
- page_header_get_field(page, PAGE_GARBAGE));
|
||||
- (page_is_comp(page)
|
||||
? PAGE_NEW_SUPREMUM_END
|
||||
: PAGE_OLD_SUPREMUM_END)
|
||||
- page_header_get_field(page, PAGE_GARBAGE));
|
||||
|
||||
ut_ad(ret < UNIV_PAGE_SIZE);
|
||||
|
||||
|
@ -13,7 +13,7 @@ Created 2/2/1994 Heikki Tuuri
|
||||
|
||||
/* Type of the index page */
|
||||
/* The following define eliminates a name collision on HP-UX */
|
||||
#define page_t ib_page_t
|
||||
#define page_t ib_page_t
|
||||
typedef byte page_t;
|
||||
typedef struct page_search_struct page_search_t;
|
||||
typedef struct page_cur_struct page_cur_t;
|
||||
|
@ -1,90 +1,220 @@
|
||||
#ifndef YYSTYPE
|
||||
#define YYSTYPE int
|
||||
#endif
|
||||
#define PARS_INT_LIT 257
|
||||
#define PARS_FLOAT_LIT 258
|
||||
#define PARS_STR_LIT 259
|
||||
#define PARS_NULL_LIT 260
|
||||
#define PARS_ID_TOKEN 261
|
||||
#define PARS_AND_TOKEN 262
|
||||
#define PARS_OR_TOKEN 263
|
||||
#define PARS_NOT_TOKEN 264
|
||||
#define PARS_GE_TOKEN 265
|
||||
#define PARS_LE_TOKEN 266
|
||||
#define PARS_NE_TOKEN 267
|
||||
#define PARS_PROCEDURE_TOKEN 268
|
||||
#define PARS_IN_TOKEN 269
|
||||
#define PARS_OUT_TOKEN 270
|
||||
#define PARS_INT_TOKEN 271
|
||||
#define PARS_INTEGER_TOKEN 272
|
||||
#define PARS_FLOAT_TOKEN 273
|
||||
#define PARS_CHAR_TOKEN 274
|
||||
#define PARS_IS_TOKEN 275
|
||||
#define PARS_BEGIN_TOKEN 276
|
||||
#define PARS_END_TOKEN 277
|
||||
#define PARS_IF_TOKEN 278
|
||||
#define PARS_THEN_TOKEN 279
|
||||
#define PARS_ELSE_TOKEN 280
|
||||
#define PARS_ELSIF_TOKEN 281
|
||||
#define PARS_LOOP_TOKEN 282
|
||||
#define PARS_WHILE_TOKEN 283
|
||||
#define PARS_RETURN_TOKEN 284
|
||||
#define PARS_SELECT_TOKEN 285
|
||||
#define PARS_SUM_TOKEN 286
|
||||
#define PARS_COUNT_TOKEN 287
|
||||
#define PARS_DISTINCT_TOKEN 288
|
||||
#define PARS_FROM_TOKEN 289
|
||||
#define PARS_WHERE_TOKEN 290
|
||||
#define PARS_FOR_TOKEN 291
|
||||
#define PARS_DDOT_TOKEN 292
|
||||
#define PARS_CONSISTENT_TOKEN 293
|
||||
#define PARS_READ_TOKEN 294
|
||||
#define PARS_ORDER_TOKEN 295
|
||||
#define PARS_BY_TOKEN 296
|
||||
#define PARS_ASC_TOKEN 297
|
||||
#define PARS_DESC_TOKEN 298
|
||||
#define PARS_INSERT_TOKEN 299
|
||||
#define PARS_INTO_TOKEN 300
|
||||
#define PARS_VALUES_TOKEN 301
|
||||
#define PARS_UPDATE_TOKEN 302
|
||||
#define PARS_SET_TOKEN 303
|
||||
#define PARS_DELETE_TOKEN 304
|
||||
#define PARS_CURRENT_TOKEN 305
|
||||
#define PARS_OF_TOKEN 306
|
||||
#define PARS_CREATE_TOKEN 307
|
||||
#define PARS_TABLE_TOKEN 308
|
||||
#define PARS_INDEX_TOKEN 309
|
||||
#define PARS_UNIQUE_TOKEN 310
|
||||
#define PARS_CLUSTERED_TOKEN 311
|
||||
#define PARS_DOES_NOT_FIT_IN_MEM_TOKEN 312
|
||||
#define PARS_ON_TOKEN 313
|
||||
#define PARS_ASSIGN_TOKEN 314
|
||||
#define PARS_DECLARE_TOKEN 315
|
||||
#define PARS_CURSOR_TOKEN 316
|
||||
#define PARS_SQL_TOKEN 317
|
||||
#define PARS_OPEN_TOKEN 318
|
||||
#define PARS_FETCH_TOKEN 319
|
||||
#define PARS_CLOSE_TOKEN 320
|
||||
#define PARS_NOTFOUND_TOKEN 321
|
||||
#define PARS_TO_CHAR_TOKEN 322
|
||||
#define PARS_TO_NUMBER_TOKEN 323
|
||||
#define PARS_TO_BINARY_TOKEN 324
|
||||
#define PARS_BINARY_TO_NUMBER_TOKEN 325
|
||||
#define PARS_SUBSTR_TOKEN 326
|
||||
#define PARS_REPLSTR_TOKEN 327
|
||||
#define PARS_CONCAT_TOKEN 328
|
||||
#define PARS_INSTR_TOKEN 329
|
||||
#define PARS_LENGTH_TOKEN 330
|
||||
#define PARS_SYSDATE_TOKEN 331
|
||||
#define PARS_PRINTF_TOKEN 332
|
||||
#define PARS_ASSERT_TOKEN 333
|
||||
#define PARS_RND_TOKEN 334
|
||||
#define PARS_RND_STR_TOKEN 335
|
||||
#define PARS_ROW_PRINTF_TOKEN 336
|
||||
#define PARS_COMMIT_TOKEN 337
|
||||
#define PARS_ROLLBACK_TOKEN 338
|
||||
#define PARS_WORK_TOKEN 339
|
||||
#define NEG 340
|
||||
/* A Bison parser, made by GNU Bison 1.875d. */
|
||||
|
||||
/* Skeleton parser for Yacc-like parsing with Bison,
|
||||
Copyright (C) 1984, 1989, 1990, 2000, 2001, 2002, 2003, 2004 Free Software Foundation, Inc.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2, or (at your option)
|
||||
any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 59 Temple Place - Suite 330,
|
||||
Boston, MA 02111-1307, USA. */
|
||||
|
||||
/* As a special exception, when this file is copied by Bison into a
|
||||
Bison output file, you may use that output file without restriction.
|
||||
This special exception was added by the Free Software Foundation
|
||||
in version 1.24 of Bison. */
|
||||
|
||||
/* Tokens. */
|
||||
#ifndef YYTOKENTYPE
|
||||
# define YYTOKENTYPE
|
||||
/* Put the tokens into the symbol table, so that GDB and other debuggers
|
||||
know about them. */
|
||||
enum yytokentype {
|
||||
PARS_INT_LIT = 258,
|
||||
PARS_FLOAT_LIT = 259,
|
||||
PARS_STR_LIT = 260,
|
||||
PARS_NULL_LIT = 261,
|
||||
PARS_ID_TOKEN = 262,
|
||||
PARS_AND_TOKEN = 263,
|
||||
PARS_OR_TOKEN = 264,
|
||||
PARS_NOT_TOKEN = 265,
|
||||
PARS_GE_TOKEN = 266,
|
||||
PARS_LE_TOKEN = 267,
|
||||
PARS_NE_TOKEN = 268,
|
||||
PARS_PROCEDURE_TOKEN = 269,
|
||||
PARS_IN_TOKEN = 270,
|
||||
PARS_OUT_TOKEN = 271,
|
||||
PARS_BINARY_TOKEN = 272,
|
||||
PARS_BLOB_TOKEN = 273,
|
||||
PARS_INT_TOKEN = 274,
|
||||
PARS_INTEGER_TOKEN = 275,
|
||||
PARS_FLOAT_TOKEN = 276,
|
||||
PARS_CHAR_TOKEN = 277,
|
||||
PARS_IS_TOKEN = 278,
|
||||
PARS_BEGIN_TOKEN = 279,
|
||||
PARS_END_TOKEN = 280,
|
||||
PARS_IF_TOKEN = 281,
|
||||
PARS_THEN_TOKEN = 282,
|
||||
PARS_ELSE_TOKEN = 283,
|
||||
PARS_ELSIF_TOKEN = 284,
|
||||
PARS_LOOP_TOKEN = 285,
|
||||
PARS_WHILE_TOKEN = 286,
|
||||
PARS_RETURN_TOKEN = 287,
|
||||
PARS_SELECT_TOKEN = 288,
|
||||
PARS_SUM_TOKEN = 289,
|
||||
PARS_COUNT_TOKEN = 290,
|
||||
PARS_DISTINCT_TOKEN = 291,
|
||||
PARS_FROM_TOKEN = 292,
|
||||
PARS_WHERE_TOKEN = 293,
|
||||
PARS_FOR_TOKEN = 294,
|
||||
PARS_DDOT_TOKEN = 295,
|
||||
PARS_CONSISTENT_TOKEN = 296,
|
||||
PARS_READ_TOKEN = 297,
|
||||
PARS_ORDER_TOKEN = 298,
|
||||
PARS_BY_TOKEN = 299,
|
||||
PARS_ASC_TOKEN = 300,
|
||||
PARS_DESC_TOKEN = 301,
|
||||
PARS_INSERT_TOKEN = 302,
|
||||
PARS_INTO_TOKEN = 303,
|
||||
PARS_VALUES_TOKEN = 304,
|
||||
PARS_UPDATE_TOKEN = 305,
|
||||
PARS_SET_TOKEN = 306,
|
||||
PARS_DELETE_TOKEN = 307,
|
||||
PARS_CURRENT_TOKEN = 308,
|
||||
PARS_OF_TOKEN = 309,
|
||||
PARS_CREATE_TOKEN = 310,
|
||||
PARS_TABLE_TOKEN = 311,
|
||||
PARS_INDEX_TOKEN = 312,
|
||||
PARS_UNIQUE_TOKEN = 313,
|
||||
PARS_CLUSTERED_TOKEN = 314,
|
||||
PARS_DOES_NOT_FIT_IN_MEM_TOKEN = 315,
|
||||
PARS_ON_TOKEN = 316,
|
||||
PARS_ASSIGN_TOKEN = 317,
|
||||
PARS_DECLARE_TOKEN = 318,
|
||||
PARS_CURSOR_TOKEN = 319,
|
||||
PARS_SQL_TOKEN = 320,
|
||||
PARS_OPEN_TOKEN = 321,
|
||||
PARS_FETCH_TOKEN = 322,
|
||||
PARS_CLOSE_TOKEN = 323,
|
||||
PARS_NOTFOUND_TOKEN = 324,
|
||||
PARS_TO_CHAR_TOKEN = 325,
|
||||
PARS_TO_NUMBER_TOKEN = 326,
|
||||
PARS_TO_BINARY_TOKEN = 327,
|
||||
PARS_BINARY_TO_NUMBER_TOKEN = 328,
|
||||
PARS_SUBSTR_TOKEN = 329,
|
||||
PARS_REPLSTR_TOKEN = 330,
|
||||
PARS_CONCAT_TOKEN = 331,
|
||||
PARS_INSTR_TOKEN = 332,
|
||||
PARS_LENGTH_TOKEN = 333,
|
||||
PARS_SYSDATE_TOKEN = 334,
|
||||
PARS_PRINTF_TOKEN = 335,
|
||||
PARS_ASSERT_TOKEN = 336,
|
||||
PARS_RND_TOKEN = 337,
|
||||
PARS_RND_STR_TOKEN = 338,
|
||||
PARS_ROW_PRINTF_TOKEN = 339,
|
||||
PARS_COMMIT_TOKEN = 340,
|
||||
PARS_ROLLBACK_TOKEN = 341,
|
||||
PARS_WORK_TOKEN = 342,
|
||||
NEG = 343
|
||||
};
|
||||
#endif
|
||||
#define PARS_INT_LIT 258
|
||||
#define PARS_FLOAT_LIT 259
|
||||
#define PARS_STR_LIT 260
|
||||
#define PARS_NULL_LIT 261
|
||||
#define PARS_ID_TOKEN 262
|
||||
#define PARS_AND_TOKEN 263
|
||||
#define PARS_OR_TOKEN 264
|
||||
#define PARS_NOT_TOKEN 265
|
||||
#define PARS_GE_TOKEN 266
|
||||
#define PARS_LE_TOKEN 267
|
||||
#define PARS_NE_TOKEN 268
|
||||
#define PARS_PROCEDURE_TOKEN 269
|
||||
#define PARS_IN_TOKEN 270
|
||||
#define PARS_OUT_TOKEN 271
|
||||
#define PARS_BINARY_TOKEN 272
|
||||
#define PARS_BLOB_TOKEN 273
|
||||
#define PARS_INT_TOKEN 274
|
||||
#define PARS_INTEGER_TOKEN 275
|
||||
#define PARS_FLOAT_TOKEN 276
|
||||
#define PARS_CHAR_TOKEN 277
|
||||
#define PARS_IS_TOKEN 278
|
||||
#define PARS_BEGIN_TOKEN 279
|
||||
#define PARS_END_TOKEN 280
|
||||
#define PARS_IF_TOKEN 281
|
||||
#define PARS_THEN_TOKEN 282
|
||||
#define PARS_ELSE_TOKEN 283
|
||||
#define PARS_ELSIF_TOKEN 284
|
||||
#define PARS_LOOP_TOKEN 285
|
||||
#define PARS_WHILE_TOKEN 286
|
||||
#define PARS_RETURN_TOKEN 287
|
||||
#define PARS_SELECT_TOKEN 288
|
||||
#define PARS_SUM_TOKEN 289
|
||||
#define PARS_COUNT_TOKEN 290
|
||||
#define PARS_DISTINCT_TOKEN 291
|
||||
#define PARS_FROM_TOKEN 292
|
||||
#define PARS_WHERE_TOKEN 293
|
||||
#define PARS_FOR_TOKEN 294
|
||||
#define PARS_DDOT_TOKEN 295
|
||||
#define PARS_CONSISTENT_TOKEN 296
|
||||
#define PARS_READ_TOKEN 297
|
||||
#define PARS_ORDER_TOKEN 298
|
||||
#define PARS_BY_TOKEN 299
|
||||
#define PARS_ASC_TOKEN 300
|
||||
#define PARS_DESC_TOKEN 301
|
||||
#define PARS_INSERT_TOKEN 302
|
||||
#define PARS_INTO_TOKEN 303
|
||||
#define PARS_VALUES_TOKEN 304
|
||||
#define PARS_UPDATE_TOKEN 305
|
||||
#define PARS_SET_TOKEN 306
|
||||
#define PARS_DELETE_TOKEN 307
|
||||
#define PARS_CURRENT_TOKEN 308
|
||||
#define PARS_OF_TOKEN 309
|
||||
#define PARS_CREATE_TOKEN 310
|
||||
#define PARS_TABLE_TOKEN 311
|
||||
#define PARS_INDEX_TOKEN 312
|
||||
#define PARS_UNIQUE_TOKEN 313
|
||||
#define PARS_CLUSTERED_TOKEN 314
|
||||
#define PARS_DOES_NOT_FIT_IN_MEM_TOKEN 315
|
||||
#define PARS_ON_TOKEN 316
|
||||
#define PARS_ASSIGN_TOKEN 317
|
||||
#define PARS_DECLARE_TOKEN 318
|
||||
#define PARS_CURSOR_TOKEN 319
|
||||
#define PARS_SQL_TOKEN 320
|
||||
#define PARS_OPEN_TOKEN 321
|
||||
#define PARS_FETCH_TOKEN 322
|
||||
#define PARS_CLOSE_TOKEN 323
|
||||
#define PARS_NOTFOUND_TOKEN 324
|
||||
#define PARS_TO_CHAR_TOKEN 325
|
||||
#define PARS_TO_NUMBER_TOKEN 326
|
||||
#define PARS_TO_BINARY_TOKEN 327
|
||||
#define PARS_BINARY_TO_NUMBER_TOKEN 328
|
||||
#define PARS_SUBSTR_TOKEN 329
|
||||
#define PARS_REPLSTR_TOKEN 330
|
||||
#define PARS_CONCAT_TOKEN 331
|
||||
#define PARS_INSTR_TOKEN 332
|
||||
#define PARS_LENGTH_TOKEN 333
|
||||
#define PARS_SYSDATE_TOKEN 334
|
||||
#define PARS_PRINTF_TOKEN 335
|
||||
#define PARS_ASSERT_TOKEN 336
|
||||
#define PARS_RND_TOKEN 337
|
||||
#define PARS_RND_STR_TOKEN 338
|
||||
#define PARS_ROW_PRINTF_TOKEN 339
|
||||
#define PARS_COMMIT_TOKEN 340
|
||||
#define PARS_ROLLBACK_TOKEN 341
|
||||
#define PARS_WORK_TOKEN 342
|
||||
#define NEG 343
|
||||
|
||||
|
||||
|
||||
|
||||
#if ! defined (YYSTYPE) && ! defined (YYSTYPE_IS_DECLARED)
|
||||
typedef int YYSTYPE;
|
||||
# define yystype YYSTYPE /* obsolescent; will be withdrawn */
|
||||
# define YYSTYPE_IS_DECLARED 1
|
||||
# define YYSTYPE_IS_TRIVIAL 1
|
||||
#endif
|
||||
|
||||
extern YYSTYPE yylval;
|
||||
|
||||
|
||||
|
||||
|
@ -46,6 +46,8 @@ extern pars_res_word_t pars_rnd_str_token;
|
||||
extern pars_res_word_t pars_count_token;
|
||||
extern pars_res_word_t pars_sum_token;
|
||||
extern pars_res_word_t pars_distinct_token;
|
||||
extern pars_res_word_t pars_binary_token;
|
||||
extern pars_res_word_t pars_blob_token;
|
||||
extern pars_res_word_t pars_int_token;
|
||||
extern pars_res_word_t pars_char_token;
|
||||
extern pars_res_word_t pars_float_token;
|
||||
@ -111,7 +113,7 @@ func_node_t*
|
||||
pars_func(
|
||||
/*======*/
|
||||
/* out, own: function node in a query tree */
|
||||
que_node_t* res_word,/* in: function name reserved word */
|
||||
que_node_t* res_word,/* in: function name reserved word */
|
||||
que_node_t* arg); /* in: first argument in the argument list */
|
||||
/*************************************************************************
|
||||
Parses an operator expression. */
|
||||
@ -212,7 +214,7 @@ pars_insert_statement(
|
||||
/* out, own: update node in a query
|
||||
tree */
|
||||
sym_node_t* table_sym, /* in: table name node */
|
||||
que_node_t* values_list, /* in: value expression list or NULL */
|
||||
que_node_t* values_list, /* in: value expression list or NULL */
|
||||
sel_node_t* select); /* in: select condition or NULL */
|
||||
/*************************************************************************
|
||||
Parses a procedure parameter declaration. */
|
||||
@ -336,10 +338,15 @@ Parses a column definition at a table creation. */
|
||||
sym_node_t*
|
||||
pars_column_def(
|
||||
/*============*/
|
||||
/* out: column sym table node */
|
||||
sym_node_t* sym_node, /* in: column node in the symbol
|
||||
table */
|
||||
pars_res_word_t* type); /* in: data type */
|
||||
/* out: column sym table
|
||||
node */
|
||||
sym_node_t* sym_node, /* in: column node in the
|
||||
symbol table */
|
||||
pars_res_word_t* type, /* in: data type */
|
||||
sym_node_t* len, /* in: length of column, or
|
||||
NULL */
|
||||
void* is_not_null); /* in: if not NULL, column
|
||||
is of type NOT NULL. */
|
||||
/*************************************************************************
|
||||
Parses a table creation operation. */
|
||||
|
||||
@ -464,7 +471,7 @@ struct if_node_struct{
|
||||
que_node_t* cond; /* if condition */
|
||||
que_node_t* stat_list; /* statement list */
|
||||
que_node_t* else_part; /* else-part statement list */
|
||||
elsif_node_t* elsif_list; /* elsif element list */
|
||||
elsif_node_t* elsif_list; /* elsif element list */
|
||||
};
|
||||
|
||||
/* while-statement node */
|
||||
|
@ -50,7 +50,7 @@ UNIV_INLINE
|
||||
que_thr_t*
|
||||
que_fork_get_first_thr(
|
||||
/*===================*/
|
||||
que_fork_t* fork); /* in: query fork */
|
||||
que_fork_t* fork); /* in: query fork */
|
||||
/***************************************************************************
|
||||
Gets the child node of the first thr in a fork. */
|
||||
UNIV_INLINE
|
||||
@ -216,7 +216,7 @@ que_fork_start_command(
|
||||
QUE_THR_RUNNING state, or NULL; the query
|
||||
thread should be executed by que_run_threads
|
||||
by the caller */
|
||||
que_fork_t* fork); /* in: a query fork */
|
||||
que_fork_t* fork); /* in: a query fork */
|
||||
/***************************************************************************
|
||||
Gets the trx of a query thread. */
|
||||
UNIV_INLINE
|
||||
@ -359,7 +359,7 @@ struct que_thr_struct{
|
||||
the control came */
|
||||
ulint resource; /* resource usage of the query thread
|
||||
thus far */
|
||||
ulint lock_state; /* lock state of thread (table or
|
||||
ulint lock_state; /* lock state of thread (table or
|
||||
row) */
|
||||
};
|
||||
|
||||
@ -485,9 +485,9 @@ struct que_fork_struct{
|
||||
#define QUE_THR_ERROR 8
|
||||
|
||||
/* Query thread lock states */
|
||||
#define QUE_THR_LOCK_NOLOCK 0
|
||||
#define QUE_THR_LOCK_ROW 1
|
||||
#define QUE_THR_LOCK_TABLE 2
|
||||
#define QUE_THR_LOCK_NOLOCK 0
|
||||
#define QUE_THR_LOCK_ROW 1
|
||||
#define QUE_THR_LOCK_TABLE 2
|
||||
|
||||
/* From where the cursor position is counted */
|
||||
#define QUE_CUR_NOT_DEFINED 1
|
||||
|
@ -27,7 +27,7 @@ UNIV_INLINE
|
||||
que_thr_t*
|
||||
que_fork_get_first_thr(
|
||||
/*===================*/
|
||||
que_fork_t* fork) /* in: query fork */
|
||||
que_fork_t* fork) /* in: query fork */
|
||||
{
|
||||
return(UT_LIST_GET_FIRST(fork->thrs));
|
||||
}
|
||||
@ -38,7 +38,7 @@ UNIV_INLINE
|
||||
que_node_t*
|
||||
que_fork_get_child(
|
||||
/*===============*/
|
||||
que_fork_t* fork) /* in: query fork */
|
||||
que_fork_t* fork) /* in: query fork */
|
||||
{
|
||||
que_thr_t* thr;
|
||||
|
||||
@ -230,9 +230,10 @@ que_thr_peek_stop(
|
||||
trx = graph->trx;
|
||||
|
||||
if (graph->state != QUE_FORK_ACTIVE
|
||||
|| trx->que_state == TRX_QUE_LOCK_WAIT
|
||||
|| (UT_LIST_GET_LEN(trx->signals) > 0
|
||||
&& trx->que_state == TRX_QUE_RUNNING)) {
|
||||
|| trx->que_state == TRX_QUE_LOCK_WAIT
|
||||
|| (UT_LIST_GET_LEN(trx->signals) > 0
|
||||
&& trx->que_state == TRX_QUE_RUNNING)) {
|
||||
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
@ -251,7 +252,7 @@ que_graph_is_select(
|
||||
if (graph->fork_type == QUE_FORK_SELECT_SCROLL
|
||||
|| graph->fork_type == QUE_FORK_SELECT_NON_SCROLL) {
|
||||
|
||||
return(TRUE);
|
||||
return(TRUE);
|
||||
}
|
||||
|
||||
return(FALSE);
|
||||
|
@ -24,9 +24,12 @@ point in time are seen in the view. */
|
||||
read_view_t*
|
||||
read_view_open_now(
|
||||
/*===============*/
|
||||
/* out, own: read view struct */
|
||||
trx_t* cr_trx, /* in: creating transaction, or NULL */
|
||||
mem_heap_t* heap); /* in: memory heap from which allocated */
|
||||
/* out, own: read view struct */
|
||||
dulint cr_trx_id, /* in: trx_id of creating
|
||||
transaction, or (0, 0) used in
|
||||
purge */
|
||||
mem_heap_t* heap); /* in: memory heap from which
|
||||
allocated */
|
||||
/*************************************************************************
|
||||
Makes a copy of the oldest existing read view, or opens a new. The view
|
||||
must be closed with ..._close. */
|
||||
@ -34,9 +37,12 @@ must be closed with ..._close. */
|
||||
read_view_t*
|
||||
read_view_oldest_copy_or_open_new(
|
||||
/*==============================*/
|
||||
/* out, own: read view struct */
|
||||
trx_t* cr_trx, /* in: creating transaction, or NULL */
|
||||
mem_heap_t* heap); /* in: memory heap from which allocated */
|
||||
/* out, own: read view struct */
|
||||
dulint cr_trx_id, /* in: trx_id of creating
|
||||
transaction, or (0, 0) used in
|
||||
purge */
|
||||
mem_heap_t* heap); /* in: memory heap from which
|
||||
allocated */
|
||||
/*************************************************************************
|
||||
Closes a read view. */
|
||||
|
||||
@ -60,7 +66,7 @@ read_view_sees_trx_id(
|
||||
/*==================*/
|
||||
/* out: TRUE if sees */
|
||||
read_view_t* view, /* in: read view */
|
||||
dulint trx_id); /* in: trx id */
|
||||
dulint trx_id);/* in: trx id */
|
||||
/*************************************************************************
|
||||
Prints a read view to stderr. */
|
||||
|
||||
@ -101,6 +107,10 @@ read_cursor_set_for_mysql(
|
||||
read should not see the modifications to the database. */
|
||||
|
||||
struct read_view_struct{
|
||||
ulint type; /* VIEW_NORMAL, VIEW_HIGH_GRANULARITY */
|
||||
dulint undo_no; /* (0, 0) or if type is VIEW_HIGH_GRANULARITY
|
||||
transaction undo_no when this high-granularity
|
||||
consistent read view was created */
|
||||
ibool can_be_too_old; /* TRUE if the system has had to purge old
|
||||
versions which this read view should be able
|
||||
to access: the read view can bump into the
|
||||
@ -121,12 +131,23 @@ struct read_view_struct{
|
||||
serialized, except the reading transaction
|
||||
itself; the trx ids in this array are in a
|
||||
descending order */
|
||||
trx_t* creator; /* Pointer to the creating transaction, or
|
||||
NULL if used in purge */
|
||||
dulint creator_trx_id; /* trx id of creating transaction, or
|
||||
(0, 0) used in purge */
|
||||
UT_LIST_NODE_T(read_view_t) view_list;
|
||||
/* List of read views in trx_sys */
|
||||
};
|
||||
|
||||
/* Read view types */
|
||||
#define VIEW_NORMAL 1 /* Normal consistent read view
|
||||
where transaction does not see changes
|
||||
made by active transactions except
|
||||
creating transaction. */
|
||||
#define VIEW_HIGH_GRANULARITY 2 /* High-granularity read view where
|
||||
transaction does not see changes
|
||||
made by active transactions and own
|
||||
changes after a point in time when this
|
||||
read view was created. */
|
||||
|
||||
/* Implement InnoDB framework to support consistent read views in
|
||||
cursors. This struct holds both heap where consistent read view
|
||||
is allocated and pointer to a read view. */
|
||||
@ -137,8 +158,8 @@ struct cursor_view_struct{
|
||||
read_view_t* read_view;
|
||||
/* Consistent read view of the cursor*/
|
||||
ulint n_mysql_tables_in_use;
|
||||
/* number of Innobase tables used in the
|
||||
processing of this cursor */
|
||||
/* number of Innobase tables used in the
|
||||
processing of this cursor */
|
||||
};
|
||||
|
||||
#ifndef UNIV_NONINL
|
||||
|
@ -24,7 +24,8 @@ cmp_types_are_equal(
|
||||
/* out: TRUE if the types are considered
|
||||
equal in comparisons */
|
||||
dtype_t* type1, /* in: type 1 */
|
||||
dtype_t* type2); /* in: type 2 */
|
||||
dtype_t* type2, /* in: type 2 */
|
||||
ibool check_charsets); /* in: whether to check charsets */
|
||||
/*****************************************************************
|
||||
This function is used to compare two data fields for which we know the
|
||||
data type. */
|
||||
@ -91,10 +92,10 @@ cmp_dtuple_rec_with_match(
|
||||
has an equal number or more fields than
|
||||
dtuple */
|
||||
const ulint* offsets,/* in: array returned by rec_get_offsets() */
|
||||
ulint* matched_fields, /* in/out: number of already completely
|
||||
ulint* matched_fields, /* in/out: number of already completely
|
||||
matched fields; when function returns,
|
||||
contains the value for current comparison */
|
||||
ulint* matched_bytes); /* in/out: number of already matched
|
||||
ulint* matched_bytes); /* in/out: number of already matched
|
||||
bytes within the first field not completely
|
||||
matched; when function returns, contains the
|
||||
value for current comparison */
|
||||
@ -107,7 +108,7 @@ cmp_dtuple_rec(
|
||||
/* out: 1, 0, -1, if dtuple is greater, equal,
|
||||
less than rec, respectively; see the comments
|
||||
for cmp_dtuple_rec_with_match */
|
||||
dtuple_t* dtuple, /* in: data tuple */
|
||||
dtuple_t* dtuple, /* in: data tuple */
|
||||
rec_t* rec, /* in: physical record */
|
||||
const ulint* offsets);/* in: array returned by rec_get_offsets() */
|
||||
/******************************************************************
|
||||
@ -137,11 +138,11 @@ cmp_rec_rec_with_match(
|
||||
const ulint* offsets1,/* in: rec_get_offsets(rec1, index) */
|
||||
const ulint* offsets2,/* in: rec_get_offsets(rec2, index) */
|
||||
dict_index_t* index, /* in: data dictionary index */
|
||||
ulint* matched_fields, /* in/out: number of already completely
|
||||
ulint* matched_fields, /* in/out: number of already completely
|
||||
matched fields; when the function returns,
|
||||
contains the value the for current
|
||||
comparison */
|
||||
ulint* matched_bytes);/* in/out: number of already matched
|
||||
ulint* matched_bytes);/* in/out: number of already matched
|
||||
bytes within the first field not completely
|
||||
matched; when the function returns, contains
|
||||
the value for the current comparison */
|
||||
|
@ -182,7 +182,7 @@ The following function tells if a new-style record is a node pointer. */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
rec_get_node_ptr_flag(
|
||||
/*=================*/
|
||||
/*==================*/
|
||||
/* out: TRUE if node pointer */
|
||||
rec_t* rec); /* in: physical record */
|
||||
/**********************************************************
|
||||
@ -191,7 +191,7 @@ of the record in the heap of the index page. */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
rec_get_heap_no(
|
||||
/*=============*/
|
||||
/*============*/
|
||||
/* out: heap order number */
|
||||
rec_t* rec, /* in: physical record */
|
||||
ulint comp); /* in: nonzero=compact page format */
|
||||
@ -201,7 +201,7 @@ field in the record. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
rec_set_heap_no(
|
||||
/*=============*/
|
||||
/*============*/
|
||||
rec_t* rec, /* in: physical record */
|
||||
ulint comp, /* in: nonzero=compact page format */
|
||||
ulint heap_no);/* in: the heap number */
|
||||
@ -216,7 +216,7 @@ rec_get_1byte_offs_flag(
|
||||
rec_t* rec); /* in: physical record */
|
||||
/**********************************************************
|
||||
The following function determines the offsets to each field
|
||||
in the record. It can reuse a previously allocated array. */
|
||||
in the record. It can reuse a previously allocated array. */
|
||||
|
||||
ulint*
|
||||
rec_get_offsets_func(
|
||||
@ -264,9 +264,9 @@ data field in an old-style record. */
|
||||
byte*
|
||||
rec_get_nth_field_old(
|
||||
/*==================*/
|
||||
/* out: pointer to the field */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n, /* in: index of the field */
|
||||
/* out: pointer to the field */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n, /* in: index of the field */
|
||||
ulint* len); /* out: length of the field; UNIV_SQL_NULL
|
||||
if SQL null */
|
||||
/****************************************************************
|
||||
@ -278,8 +278,8 @@ ulint
|
||||
rec_get_nth_field_size(
|
||||
/*===================*/
|
||||
/* out: field size in bytes */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n); /* in: index of the field */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n); /* in: index of the field */
|
||||
/****************************************************************
|
||||
The following function is used to get a pointer to the nth
|
||||
data field in a record. */
|
||||
@ -287,8 +287,8 @@ UNIV_INLINE
|
||||
byte*
|
||||
rec_get_nth_field(
|
||||
/*==============*/
|
||||
/* out: pointer to the field */
|
||||
rec_t* rec, /* in: record */
|
||||
/* out: pointer to the field */
|
||||
rec_t* rec, /* in: record */
|
||||
const ulint* offsets,/* in: array returned by rec_get_offsets() */
|
||||
ulint n, /* in: index of the field */
|
||||
ulint* len); /* out: length of the field; UNIV_SQL_NULL
|
||||
@ -374,7 +374,7 @@ UNIV_INLINE
|
||||
void
|
||||
rec_set_nth_field(
|
||||
/*==============*/
|
||||
rec_t* rec, /* in: record */
|
||||
rec_t* rec, /* in: record */
|
||||
const ulint* offsets,/* in: array returned by rec_get_offsets() */
|
||||
ulint n, /* in: index number of the field */
|
||||
const void* data, /* in: pointer to the data if not SQL null */
|
||||
@ -399,7 +399,7 @@ The following function returns the number of fields in a record. */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
rec_offs_n_fields(
|
||||
/*===============*/
|
||||
/*==============*/
|
||||
/* out: number of fields */
|
||||
const ulint* offsets);/* in: array returned by rec_get_offsets() */
|
||||
/**************************************************************
|
||||
|
@ -26,7 +26,7 @@ most significant bytes and bits are written below less significant.
|
||||
downward from
|
||||
origin -> 1 8 bits pointer to next record
|
||||
2 8 bits pointer to next record
|
||||
3 1 bit short flag
|
||||
3 1 bit short flag
|
||||
7 bits number of fields
|
||||
4 3 bits number of fields
|
||||
5 bits heap number
|
||||
@ -99,7 +99,7 @@ and the shift needed to obtain each bit-field of the record. */
|
||||
#define REC_INFO_BITS_SHIFT 0
|
||||
|
||||
/* The deleted flag in info bits */
|
||||
#define REC_INFO_DELETED_FLAG 0x20UL /* when bit is set to 1, it means the
|
||||
#define REC_INFO_DELETED_FLAG 0x20UL /* when bit is set to 1, it means the
|
||||
record has been delete marked */
|
||||
/* The following masks are used to filter the SQL null bit from
|
||||
one-byte and two-byte offsets */
|
||||
@ -144,7 +144,7 @@ The physical size of the field is not changed. */
|
||||
void
|
||||
rec_set_nth_field_sql_null(
|
||||
/*=======================*/
|
||||
rec_t* rec, /* in: record */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n); /* in: index of the field */
|
||||
|
||||
/***************************************************************
|
||||
@ -268,9 +268,12 @@ rec_get_next_offs(
|
||||
ulint comp) /* in: nonzero=compact page format */
|
||||
{
|
||||
ulint field_value;
|
||||
|
||||
ut_ad(REC_NEXT_MASK == 0xFFFFUL);
|
||||
ut_ad(REC_NEXT_SHIFT == 0);
|
||||
#if REC_NEXT_MASK != 0xFFFFUL
|
||||
# error "REC_NEXT_MASK != 0xFFFFUL"
|
||||
#endif
|
||||
#if REC_NEXT_SHIFT
|
||||
# error "REC_NEXT_SHIFT != 0"
|
||||
#endif
|
||||
|
||||
field_value = mach_read_from_2(rec - REC_NEXT);
|
||||
|
||||
@ -286,10 +289,10 @@ rec_get_next_offs(
|
||||
(int16_t) field_value + ut_align_offset(...) < UNIV_PAGE_SIZE
|
||||
*/
|
||||
ut_ad((field_value >= 32768
|
||||
? field_value - 65536
|
||||
: field_value)
|
||||
+ ut_align_offset(rec, UNIV_PAGE_SIZE)
|
||||
< UNIV_PAGE_SIZE);
|
||||
? field_value - 65536
|
||||
: field_value)
|
||||
+ ut_align_offset(rec, UNIV_PAGE_SIZE)
|
||||
< UNIV_PAGE_SIZE);
|
||||
#endif
|
||||
if (field_value == 0) {
|
||||
|
||||
@ -317,8 +320,12 @@ rec_set_next_offs(
|
||||
{
|
||||
ut_ad(rec);
|
||||
ut_ad(UNIV_PAGE_SIZE > next);
|
||||
ut_ad(REC_NEXT_MASK == 0xFFFFUL);
|
||||
ut_ad(REC_NEXT_SHIFT == 0);
|
||||
#if REC_NEXT_MASK != 0xFFFFUL
|
||||
# error "REC_NEXT_MASK != 0xFFFFUL"
|
||||
#endif
|
||||
#if REC_NEXT_SHIFT
|
||||
# error "REC_NEXT_SHIFT != 0"
|
||||
#endif
|
||||
|
||||
if (comp) {
|
||||
ulint field_value;
|
||||
@ -414,9 +421,11 @@ rec_get_n_fields(
|
||||
{
|
||||
ut_ad(rec);
|
||||
ut_ad(index);
|
||||
if (UNIV_UNLIKELY(!index->table->comp)) {
|
||||
|
||||
if (!dict_table_is_comp(index->table)) {
|
||||
return(rec_get_n_fields_old(rec));
|
||||
}
|
||||
|
||||
switch (rec_get_status(rec)) {
|
||||
case REC_STATUS_ORDINARY:
|
||||
return(dict_index_get_n_fields(index));
|
||||
@ -624,7 +633,7 @@ The following function tells if a new-style record is a node pointer. */
|
||||
UNIV_INLINE
|
||||
ibool
|
||||
rec_get_node_ptr_flag(
|
||||
/*=================*/
|
||||
/*==================*/
|
||||
/* out: TRUE if node pointer */
|
||||
rec_t* rec) /* in: physical record */
|
||||
{
|
||||
@ -637,7 +646,7 @@ heap of the index page. */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
rec_get_heap_no(
|
||||
/*=============*/
|
||||
/*============*/
|
||||
/* out: heap order number */
|
||||
rec_t* rec, /* in: physical record */
|
||||
ulint comp) /* in: nonzero=compact page format */
|
||||
@ -659,7 +668,7 @@ The following function is used to set the heap number field in the record. */
|
||||
UNIV_INLINE
|
||||
void
|
||||
rec_set_heap_no(
|
||||
/*=============*/
|
||||
/*============*/
|
||||
rec_t* rec, /* in: physical record */
|
||||
ulint comp, /* in: nonzero=compact page format */
|
||||
ulint heap_no)/* in: the heap number */
|
||||
@ -715,10 +724,10 @@ UNIV_INLINE
|
||||
ulint
|
||||
rec_1_get_field_end_info(
|
||||
/*=====================*/
|
||||
/* out: offset of the start of the field, SQL null
|
||||
flag ORed */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
/* out: offset of the start of the field, SQL null
|
||||
flag ORed */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
{
|
||||
ut_ad(rec_get_1byte_offs_flag(rec));
|
||||
ut_ad(n < rec_get_n_fields_old(rec));
|
||||
@ -734,10 +743,10 @@ UNIV_INLINE
|
||||
ulint
|
||||
rec_2_get_field_end_info(
|
||||
/*=====================*/
|
||||
/* out: offset of the start of the field, SQL null
|
||||
flag and extern storage flag ORed */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
/* out: offset of the start of the field, SQL null
|
||||
flag and extern storage flag ORed */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
{
|
||||
ut_ad(!rec_get_1byte_offs_flag(rec));
|
||||
ut_ad(n < rec_get_n_fields_old(rec));
|
||||
@ -795,7 +804,7 @@ The following function returns the number of fields in a record. */
|
||||
UNIV_INLINE
|
||||
ulint
|
||||
rec_offs_n_fields(
|
||||
/*===============*/
|
||||
/*==============*/
|
||||
/* out: number of fields */
|
||||
const ulint* offsets)/* in: array returned by rec_get_offsets() */
|
||||
{
|
||||
@ -891,8 +900,8 @@ UNIV_INLINE
|
||||
byte*
|
||||
rec_get_nth_field(
|
||||
/*==============*/
|
||||
/* out: pointer to the field */
|
||||
rec_t* rec, /* in: record */
|
||||
/* out: pointer to the field */
|
||||
rec_t* rec, /* in: record */
|
||||
const ulint* offsets,/* in: array returned by rec_get_offsets() */
|
||||
ulint n, /* in: index of the field */
|
||||
ulint* len) /* out: length of the field; UNIV_SQL_NULL
|
||||
@ -1019,7 +1028,7 @@ rec_set_nth_field_extern_bit(
|
||||
where rec is, or NULL; in the NULL case
|
||||
we do not write to log about the change */
|
||||
{
|
||||
if (UNIV_LIKELY(index->table->comp)) {
|
||||
if (dict_table_is_comp(index->table)) {
|
||||
rec_set_nth_field_extern_bit_new(rec, index, i, val, mtr);
|
||||
} else {
|
||||
rec_set_nth_field_extern_bit_old(rec, i, val, mtr);
|
||||
@ -1036,10 +1045,10 @@ UNIV_INLINE
|
||||
ulint
|
||||
rec_1_get_prev_field_end_info(
|
||||
/*==========================*/
|
||||
/* out: offset of the start of the PREVIOUS field, SQL
|
||||
/* out: offset of the start of the PREVIOUS field, SQL
|
||||
null flag ORed */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
{
|
||||
ut_ad(rec_get_1byte_offs_flag(rec));
|
||||
ut_ad(n <= rec_get_n_fields_old(rec));
|
||||
@ -1055,10 +1064,10 @@ UNIV_INLINE
|
||||
ulint
|
||||
rec_2_get_prev_field_end_info(
|
||||
/*==========================*/
|
||||
/* out: offset of the start of the PREVIOUS field, SQL
|
||||
/* out: offset of the start of the PREVIOUS field, SQL
|
||||
null flag ORed */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
{
|
||||
ut_ad(!rec_get_1byte_offs_flag(rec));
|
||||
ut_ad(n <= rec_get_n_fields_old(rec));
|
||||
@ -1073,9 +1082,9 @@ UNIV_INLINE
|
||||
void
|
||||
rec_1_set_field_end_info(
|
||||
/*=====================*/
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n, /* in: field index */
|
||||
ulint info) /* in: value to set */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n, /* in: field index */
|
||||
ulint info) /* in: value to set */
|
||||
{
|
||||
ut_ad(rec_get_1byte_offs_flag(rec));
|
||||
ut_ad(n < rec_get_n_fields_old(rec));
|
||||
@ -1090,9 +1099,9 @@ UNIV_INLINE
|
||||
void
|
||||
rec_2_set_field_end_info(
|
||||
/*=====================*/
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n, /* in: field index */
|
||||
ulint info) /* in: value to set */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n, /* in: field index */
|
||||
ulint info) /* in: value to set */
|
||||
{
|
||||
ut_ad(!rec_get_1byte_offs_flag(rec));
|
||||
ut_ad(n < rec_get_n_fields_old(rec));
|
||||
@ -1107,9 +1116,9 @@ UNIV_INLINE
|
||||
ulint
|
||||
rec_1_get_field_start_offs(
|
||||
/*=======================*/
|
||||
/* out: offset of the start of the field */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
/* out: offset of the start of the field */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
{
|
||||
ut_ad(rec_get_1byte_offs_flag(rec));
|
||||
ut_ad(n <= rec_get_n_fields_old(rec));
|
||||
@ -1130,9 +1139,9 @@ UNIV_INLINE
|
||||
ulint
|
||||
rec_2_get_field_start_offs(
|
||||
/*=======================*/
|
||||
/* out: offset of the start of the field */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
/* out: offset of the start of the field */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
{
|
||||
ut_ad(!rec_get_1byte_offs_flag(rec));
|
||||
ut_ad(n <= rec_get_n_fields_old(rec));
|
||||
@ -1155,9 +1164,9 @@ UNIV_INLINE
|
||||
ulint
|
||||
rec_get_field_start_offs(
|
||||
/*=====================*/
|
||||
/* out: offset of the start of the field */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
/* out: offset of the start of the field */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: field index */
|
||||
{
|
||||
ut_ad(rec);
|
||||
ut_ad(n <= rec_get_n_fields_old(rec));
|
||||
@ -1184,8 +1193,8 @@ ulint
|
||||
rec_get_nth_field_size(
|
||||
/*===================*/
|
||||
/* out: field size in bytes */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: index of the field */
|
||||
rec_t* rec, /* in: record */
|
||||
ulint n) /* in: index of the field */
|
||||
{
|
||||
ulint os;
|
||||
ulint next_os;
|
||||
@ -1434,7 +1443,7 @@ rec_get_converted_size(
|
||||
? dict_index_get_n_unique_in_tree(index) + 1
|
||||
: dict_index_get_n_fields(index)));
|
||||
|
||||
if (UNIV_LIKELY(index->table->comp)) {
|
||||
if (dict_table_is_comp(index->table)) {
|
||||
return(rec_get_converted_size_new(index, dtuple));
|
||||
}
|
||||
|
||||
@ -1478,11 +1487,11 @@ rec_fold(
|
||||
ut_ad(n_fields < n_fields_rec || n_bytes == 0);
|
||||
|
||||
if (n_fields > n_fields_rec) {
|
||||
n_fields = n_fields_rec;
|
||||
n_fields = n_fields_rec;
|
||||
}
|
||||
|
||||
if (n_fields == n_fields_rec) {
|
||||
n_bytes = 0;
|
||||
n_bytes = 0;
|
||||
}
|
||||
|
||||
fold = ut_fold_dulint(tree_id);
|
||||
|
@ -45,7 +45,7 @@ ins_node_create(
|
||||
/*============*/
|
||||
/* out, own: insert node struct */
|
||||
ulint ins_type, /* in: INS_VALUES, ... */
|
||||
dict_table_t* table, /* in: table where to insert */
|
||||
dict_table_t* table, /* in: table where to insert */
|
||||
mem_heap_t* heap); /* in: mem heap where created */
|
||||
/*************************************************************************
|
||||
Sets a new row to insert for an INS_DIRECT node. This function is only used
|
||||
|
@ -523,10 +523,10 @@ struct row_prebuilt_struct {
|
||||
an SQL statement: we may have to set
|
||||
an intention lock on the table,
|
||||
create a consistent read view etc. */
|
||||
ibool mysql_has_locked; /* this is set TRUE when MySQL
|
||||
calls external_lock on this handle
|
||||
with a lock flag, and set FALSE when
|
||||
with the F_UNLOCK flag */
|
||||
ibool mysql_has_locked; /* this is set TRUE when MySQL
|
||||
calls external_lock on this handle
|
||||
with a lock flag, and set FALSE when
|
||||
with the F_UNLOCK flag */
|
||||
ibool clust_index_was_generated;
|
||||
/* if the user did not define a
|
||||
primary key in MySQL, then Innobase
|
||||
@ -625,12 +625,12 @@ struct row_prebuilt_struct {
|
||||
('semi-consistent read'). Then,
|
||||
this field will be set to
|
||||
ROW_READ_DID_SEMI_CONSISTENT to
|
||||
indicate that. If the row does not
|
||||
indicate that. If the row does not
|
||||
match the WHERE condition, MySQL will
|
||||
invoke handler::unlock_row() to
|
||||
clear the flag back to
|
||||
ROW_READ_TRY_SEMI_CONSISTENT and
|
||||
to simply skip the row. If
|
||||
to simply skip the row. If
|
||||
the row matches, the next call to
|
||||
row_search_for_mysql() will lock
|
||||
the row.
|
||||
@ -687,7 +687,6 @@ struct row_prebuilt_struct {
|
||||
#define ROW_READ_TRY_SEMI_CONSISTENT 1
|
||||
#define ROW_READ_DID_SEMI_CONSISTENT 2
|
||||
|
||||
|
||||
#ifndef UNIV_NONINL
|
||||
#include "row0mysql.ic"
|
||||
#endif
|
||||
|
@ -67,9 +67,9 @@ dtuple_t*
|
||||
row_build_index_entry(
|
||||
/*==================*/
|
||||
/* out: index entry which should be inserted */
|
||||
dtuple_t* row, /* in: row which should be inserted to the
|
||||
dtuple_t* row, /* in: row which should be inserted to the
|
||||
table */
|
||||
dict_index_t* index, /* in: index on the table */
|
||||
dict_index_t* index, /* in: index on the table */
|
||||
mem_heap_t* heap); /* in: memory heap from which the memory for
|
||||
the index entry is allocated */
|
||||
/***********************************************************************
|
||||
|
@ -135,7 +135,7 @@ row_set_rec_roll_ptr(
|
||||
if (offset) {
|
||||
trx_write_roll_ptr(rec + offset + DATA_TRX_ID_LEN, roll_ptr);
|
||||
} else {
|
||||
row_set_rec_sys_field(DATA_ROLL_PTR,
|
||||
row_set_rec_sys_field(DATA_ROLL_PTR,
|
||||
rec, index, offsets, roll_ptr);
|
||||
}
|
||||
}
|
||||
|
@ -335,7 +335,7 @@ struct row_printf_node_struct{
|
||||
#define ROW_SEL_PREV 2
|
||||
|
||||
#define ROW_SEL_EXACT 1 /* search using a complete key value */
|
||||
#define ROW_SEL_EXACT_PREFIX 2 /* search using a key prefix which
|
||||
#define ROW_SEL_EXACT_PREFIX 2 /* search using a key prefix which
|
||||
must match to rows: the prefix may
|
||||
contain an incomplete field (the
|
||||
last field in prefix may be just
|
||||
|
@ -27,7 +27,7 @@ typedef struct open_node_struct open_node_t;
|
||||
|
||||
typedef struct fetch_node_struct fetch_node_t;
|
||||
|
||||
typedef struct row_printf_node_struct row_printf_node_t;
|
||||
typedef struct row_printf_node_struct row_printf_node_t;
|
||||
typedef struct sel_buf_struct sel_buf_t;
|
||||
|
||||
typedef struct undo_node_struct undo_node_t;
|
||||
|
@ -410,7 +410,7 @@ struct upd_node_struct{
|
||||
index record was changed, or this is
|
||||
a delete operation: should update
|
||||
all the secondary index records */
|
||||
#define UPD_NODE_UPDATE_SOME_SEC 5 /* secondary index entries should be
|
||||
#define UPD_NODE_UPDATE_SOME_SEC 5 /* secondary index entries should be
|
||||
looked at and updated if an ordering
|
||||
field changed */
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user