mirror of
https://github.com/MariaDB/server.git
synced 2025-08-07 00:04:31 +03:00
Automerge 5.2->5.3
This commit is contained in:
@@ -1969,4 +1969,5 @@ plugin/handler_socket/client/hsclient
|
||||
client/strings_def.h
|
||||
libmysql/strings_def.h
|
||||
libmysql_r/strings_def.h
|
||||
storage/maria/aria_log_control
|
||||
scripts/mytop
|
||||
|
@@ -891,7 +891,8 @@ sub collect_one_test_case {
|
||||
if ( -f "$testdir/$tname.slave-mi");
|
||||
|
||||
|
||||
my @source_files = tags_from_test_file($tinfo,"$testdir/${tname}.test");
|
||||
my ($master_opts, $slave_opts)=
|
||||
tags_from_test_file($tinfo, "$testdir/${tname}.test", $suitedir);
|
||||
|
||||
# Get default storage engine from suite.opt file
|
||||
|
||||
@@ -1059,16 +1060,8 @@ sub collect_one_test_case {
|
||||
# ----------------------------------------------------------------------
|
||||
# Append mysqld extra options to master and slave, as appropriate
|
||||
# ----------------------------------------------------------------------
|
||||
for (@source_files) {
|
||||
s/\.\w+$//;
|
||||
push @{$tinfo->{master_opt}}, opts_from_file("$_.opt");
|
||||
push @{$tinfo->{slave_opt}}, opts_from_file("$_.opt");
|
||||
push @{$tinfo->{master_opt}}, opts_from_file("$_-master.opt");
|
||||
push @{$tinfo->{slave_opt}}, opts_from_file("$_-slave.opt");
|
||||
}
|
||||
|
||||
push(@{$tinfo->{'master_opt'}}, @::opt_extra_mysqld_opt);
|
||||
push(@{$tinfo->{'slave_opt'}}, @::opt_extra_mysqld_opt);
|
||||
push @{$tinfo->{'master_opt'}}, @$master_opts, @::opt_extra_mysqld_opt;
|
||||
push @{$tinfo->{'slave_opt'}}, @$slave_opts, @::opt_extra_mysqld_opt;
|
||||
|
||||
process_opts($tinfo, 'master_opt');
|
||||
process_opts($tinfo, 'slave_opt');
|
||||
@@ -1077,74 +1070,113 @@ sub collect_one_test_case {
|
||||
}
|
||||
|
||||
|
||||
# List of tags in the .test files that if found should set
|
||||
# the specified value in "tinfo"
|
||||
my @tags=
|
||||
(
|
||||
["include/big_test.inc", "big_test", 1],
|
||||
["include/have_debug.inc", "need_debug", 1],
|
||||
["include/have_ndb.inc", "ndb_test", 1],
|
||||
["include/have_multi_ndb.inc", "ndb_test", 1],
|
||||
["include/master-slave.inc", "rpl_test", 1],
|
||||
["include/ndb_master-slave.inc", "rpl_test", 1],
|
||||
["include/ndb_master-slave.inc", "ndb_test", 1],
|
||||
["include/not_embedded.inc", "not_embedded", 1],
|
||||
["include/not_valgrind.inc", "not_valgrind", 1],
|
||||
["include/have_example_plugin.inc", "example_plugin_test", 1],
|
||||
["include/have_oqgraph_engine.inc", "oqgraph_test", 1],
|
||||
["include/have_ssl.inc", "need_ssl", 1],
|
||||
["include/long_test.inc", "long_test", 1],
|
||||
);
|
||||
my $tags_map= {'big_test' => ['big_test', 1],
|
||||
'have_debug' => ['need_debug', 1],
|
||||
'have_ndb' => ['ndb_test', 1],
|
||||
'have_multi_ndb' => ['ndb_test', 1],
|
||||
'master-slave' => ['rpl_test', 1],
|
||||
'ndb_master-slave' => ['rpl_test', 1, 'ndb_test', 1],
|
||||
'not_embedded' => ['not_embedded', 1],
|
||||
'not_valgrind' => ['not_valgrind', 1],
|
||||
'have_example_plugin' => ['example_plugin_test', 1],
|
||||
'have_oqgraph_engine' => ['oqgraph_test', 1],
|
||||
'have_ssl' => ['need_ssl', 1],
|
||||
'long_test' => ['long_test', 1],
|
||||
};
|
||||
my $tags_regex_string= join('|', keys %$tags_map);
|
||||
my $tags_regex= qr:include/($tags_regex_string)\.inc:o;
|
||||
|
||||
my $file_to_tags= { };
|
||||
my $file_to_master_opts= { };
|
||||
my $file_to_slave_opts= { };
|
||||
|
||||
sub tags_from_test_file {
|
||||
my $tinfo= shift;
|
||||
my $file= shift;
|
||||
#mtr_verbose("$file");
|
||||
my $F= IO::File->new($file) or mtr_error("can't open file \"$file\": $!");
|
||||
my @all_files=($file);
|
||||
# Get various tags from a file, recursively scanning also included files.
|
||||
# And get options from .opt file, also recursively for included files.
|
||||
# Return a list of [TAG_TO_SET, VALUE_TO_SET_TO] of found tags.
|
||||
# Also returns lists of options for master and slave found in .opt files.
|
||||
# Each include file is scanned only once, and subsequent calls just look up the
|
||||
# cached result.
|
||||
# We need to be a bit careful about speed here; previous version of this code
|
||||
# took forever to scan the full test suite.
|
||||
sub get_tags_from_file {
|
||||
my ($file, $suitedir)= @_;
|
||||
|
||||
return ($file_to_tags->{$file}, $file_to_master_opts->{$file},
|
||||
$file_to_slave_opts->{$file})
|
||||
if exists($file_to_tags->{$file});
|
||||
|
||||
my $F= IO::File->new($file)
|
||||
or mtr_error("can't open file \"$file\": $!");
|
||||
|
||||
my $tags= [];
|
||||
my $master_opts= [];
|
||||
my $slave_opts= [];
|
||||
|
||||
while (my $line= <$F>)
|
||||
{
|
||||
# Ignore comments.
|
||||
next if $line =~ /^\#/;
|
||||
|
||||
# Skip line if it start's with #
|
||||
next if ( $line =~ /^#/ );
|
||||
|
||||
# Match this line against tag in "tags" array
|
||||
foreach my $tag (@tags)
|
||||
# Add any tag we find.
|
||||
if ($line =~ /$tags_regex/o)
|
||||
{
|
||||
if ( index($line, $tag->[0]) >= 0 )
|
||||
my $to_set= $tags_map->{$1};
|
||||
for (my $i= 0; $i < @$to_set; $i+= 2)
|
||||
{
|
||||
# Tag matched, assign value to "tinfo"
|
||||
$tinfo->{"$tag->[1]"}= $tag->[2];
|
||||
push @$tags, [$to_set->[$i], $to_set->[$i+1]];
|
||||
}
|
||||
}
|
||||
|
||||
# If test sources another file, open it as well
|
||||
if ( $line =~ /^\-\-([[:space:]]*)source(.*)$/ or
|
||||
$line =~ /^([[:space:]]*)source(.*);$/ )
|
||||
# Check for a sourced include file.
|
||||
if ($line =~ /^(--)?[[:space:]]*source[[:space:]]+([^;[:space:]]+)/)
|
||||
{
|
||||
my $value= $2;
|
||||
$value =~ s/^\s+//; # Remove leading space
|
||||
$value =~ s/[[:space:]]+$//; # Remove ending space
|
||||
|
||||
# Sourced file may exist relative to test or
|
||||
# in global location
|
||||
foreach my $sourced_file (dirname($file). "/$value",
|
||||
"$::glob_mysql_test_dir/$value")
|
||||
my $include= $2;
|
||||
# Sourced file may exist relative to test file, or in global location.
|
||||
# Note that for the purpose of tag collection we ignore
|
||||
# non-existing files, and let mysqltest handle the error
|
||||
# (e.g. mysqltest.test needs this)
|
||||
for my $sourced_file (dirname($file) . '/' . $include,
|
||||
$suitedir . '/' . $include,
|
||||
$::glob_mysql_test_dir . '/' . $include)
|
||||
{
|
||||
if ( -f $sourced_file )
|
||||
if (-e $sourced_file)
|
||||
{
|
||||
# Only source the file if it exists, we may get
|
||||
# false positives in the regexes above if someone
|
||||
# writes "source nnnn;" in a test case(such as mysqltest.test)
|
||||
unshift @all_files, tags_from_test_file($tinfo, $sourced_file);
|
||||
my ($sub_tags, $sub_master_opts, $sub_slave_opts)=
|
||||
get_tags_from_file($sourced_file, $suitedir);
|
||||
push @$tags, @$sub_tags;
|
||||
push @$master_opts, @$sub_master_opts;
|
||||
push @$slave_opts, @$sub_slave_opts;
|
||||
last;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@all_files;
|
||||
|
||||
# Add options from main file _after_ those of any includes; this allows a
|
||||
# test file to override options set by includes (eg. rpl.rpl_ddl uses this
|
||||
# to enable innodb, then disable innodb in the slave.
|
||||
my $file_no_ext= $file;
|
||||
$file_no_ext =~ s/\.\w+$//;
|
||||
my @common_opts= opts_from_file("$file_no_ext.opt");
|
||||
push @$master_opts, @common_opts, opts_from_file("$file_no_ext-master.opt");
|
||||
push @$slave_opts, @common_opts, opts_from_file("$file_no_ext-slave.opt");
|
||||
|
||||
# Save results so we can reuse without parsing if seen again.
|
||||
$file_to_tags->{$file}= $tags;
|
||||
$file_to_master_opts->{$file}= $master_opts;
|
||||
$file_to_slave_opts->{$file}= $slave_opts;
|
||||
return ($tags, $master_opts, $slave_opts);
|
||||
}
|
||||
|
||||
sub tags_from_test_file {
|
||||
my ($tinfo, $file, $suitedir)= @_;
|
||||
|
||||
my ($tags, $master_opts, $slave_opts)= get_tags_from_file($file, $suitedir);
|
||||
for (@$tags)
|
||||
{
|
||||
$tinfo->{$_->[0]}= $_->[1];
|
||||
}
|
||||
return ($master_opts, $slave_opts);
|
||||
}
|
||||
|
||||
sub unspace {
|
||||
|
@@ -4459,6 +4459,7 @@ sub extract_warning_lines ($$) {
|
||||
qr|mysqld: Table '\./mtr/test_suppressions' is marked as crashed and should be repaired|,
|
||||
qr|Can't open shared library.*ha_archive|,
|
||||
qr|InnoDB: Error: table 'test/bug39438'|,
|
||||
qr|table.*is full|,
|
||||
);
|
||||
|
||||
my $matched_lines= [];
|
||||
|
@@ -1,5 +1,7 @@
|
||||
--source include/have_innodb_plugin.inc
|
||||
|
||||
--source include/long_test.inc
|
||||
|
||||
set session transaction isolation level read committed;
|
||||
|
||||
create table innodb_bug52663 (what varchar(5), id integer, count integer, primary key
|
||||
|
56
mysql-test/suite/maria/r/max_length.result
Normal file
56
mysql-test/suite/maria/r/max_length.result
Normal file
@@ -0,0 +1,56 @@
|
||||
drop table if exists t1,t2;
|
||||
Warnings:
|
||||
Note 1051 Unknown table 't1'
|
||||
Note 1051 Unknown table 't2'
|
||||
create table t1 (id int(10) unsigned not null auto_increment primary key, v varchar(1000), b blob) row_format=page max_rows=2 engine=aria;
|
||||
create table t2 (id int(10) unsigned not null auto_increment primary key, v varchar(1000), b blob) row_format=page max_rows=20000000 engine=aria;
|
||||
lock tables t1 write,t2 write;
|
||||
show table status like "t_";
|
||||
Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment
|
||||
t1 MARIA 10 Page 0 0 8192 268320768 8192 0 1 # # # latin1_swedish_ci NULL max_rows=2 row_format=PAGE
|
||||
t2 MARIA 10 Page 0 0 8192 17592186011648 8192 0 1 # # # latin1_swedish_ci NULL max_rows=20000000 row_format=PAGE
|
||||
insert into t1 values(null, repeat("ab",100),repeat("def",1000));
|
||||
insert into t1 values(null, repeat("de",200),repeat("ghi",2000));
|
||||
insert into t1 values(null, repeat("fe",300),repeat("ghi",3000));
|
||||
insert into t1 values(null, repeat("gh",400),repeat("jkl",10000));
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
unlock tables;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
ERROR HY000: The table 't1' is full
|
||||
check table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check warning Datafile is almost full, 268230656 of 268320768 used
|
||||
test.t1 check status OK
|
||||
insert into t1 values(null, repeat("gh",400),repeat("jkl",10000));
|
||||
ERROR HY000: The table 't1' is full
|
||||
check table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check warning Datafile is almost full, 268230656 of 268320768 used
|
||||
test.t1 check status OK
|
||||
truncate table t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
ERROR HY000: The table 't1' is full
|
||||
check table t1;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check warning Datafile is almost full, 268230656 of 268320768 used
|
||||
test.t1 check status OK
|
||||
drop table t1,t2;
|
52
mysql-test/suite/maria/t/max_length.test
Normal file
52
mysql-test/suite/maria/t/max_length.test
Normal file
@@ -0,0 +1,52 @@
|
||||
# Test max data length
|
||||
# This test will use around 1.3G of disk space!
|
||||
|
||||
--source include/have_maria.inc
|
||||
--source include/big_test.inc
|
||||
|
||||
drop table if exists t1,t2;
|
||||
|
||||
create table t1 (id int(10) unsigned not null auto_increment primary key, v varchar(1000), b blob) row_format=page max_rows=2 engine=aria;
|
||||
create table t2 (id int(10) unsigned not null auto_increment primary key, v varchar(1000), b blob) row_format=page max_rows=20000000 engine=aria;
|
||||
lock tables t1 write,t2 write;
|
||||
--replace_column 12 # 13 # 14 #
|
||||
show table status like "t_";
|
||||
insert into t1 values(null, repeat("ab",100),repeat("def",1000));
|
||||
insert into t1 values(null, repeat("de",200),repeat("ghi",2000));
|
||||
insert into t1 values(null, repeat("fe",300),repeat("ghi",3000));
|
||||
insert into t1 values(null, repeat("gh",400),repeat("jkl",10000));
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
insert into t2 (v,b) select v,b from t1;
|
||||
unlock tables;
|
||||
|
||||
--error ER_RECORD_FILE_FULL
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
check table t1;
|
||||
--error ER_RECORD_FILE_FULL
|
||||
insert into t1 values(null, repeat("gh",400),repeat("jkl",10000));
|
||||
check table t1;
|
||||
# Test also with inserting into empty table (different code)
|
||||
truncate table t1;
|
||||
--error ER_RECORD_FILE_FULL
|
||||
insert into t1 (v,b) select v,b from t2;
|
||||
check table t1;
|
||||
|
||||
drop table t1,t2;
|
@@ -211,7 +211,10 @@ static inline my_bool write_changed_bitmap(MARIA_SHARE *share,
|
||||
SYNOPSIS
|
||||
_ma_bitmap_init()
|
||||
share Share handler
|
||||
file data file handler
|
||||
file Data file handler
|
||||
last_page Pointer to last page (max_file_size) that needs to be
|
||||
mapped by the bitmap. This is adjusted to bitmap
|
||||
alignment.
|
||||
|
||||
NOTES
|
||||
This is called the first time a file is opened.
|
||||
@@ -221,7 +224,8 @@ static inline my_bool write_changed_bitmap(MARIA_SHARE *share,
|
||||
1 error
|
||||
*/
|
||||
|
||||
my_bool _ma_bitmap_init(MARIA_SHARE *share, File file)
|
||||
my_bool _ma_bitmap_init(MARIA_SHARE *share, File file,
|
||||
pgcache_page_no_t *last_page)
|
||||
{
|
||||
uint aligned_bit_blocks;
|
||||
uint max_page_size;
|
||||
@@ -244,7 +248,7 @@ my_bool _ma_bitmap_init(MARIA_SHARE *share, File file)
|
||||
|
||||
/* Size needs to be aligned on 6 */
|
||||
aligned_bit_blocks= (share->block_size - PAGE_SUFFIX_SIZE) / 6;
|
||||
bitmap->total_size= aligned_bit_blocks * 6;
|
||||
bitmap->max_total_size= bitmap->total_size= aligned_bit_blocks * 6;
|
||||
/*
|
||||
In each 6 bytes, we have 6*8/3 = 16 pages covered
|
||||
The +1 is to add the bitmap page, as this doesn't have to be covered
|
||||
@@ -272,6 +276,27 @@ my_bool _ma_bitmap_init(MARIA_SHARE *share, File file)
|
||||
first_bitmap_with_space= share->state.first_bitmap_with_space;
|
||||
_ma_bitmap_reset_cache(share);
|
||||
|
||||
/*
|
||||
The bitmap used to map the file are aligned on 6 bytes. We now
|
||||
calculate the max file size that can be used by the bitmap. This
|
||||
is needed to get ma_info() give a true file size so that the user can
|
||||
estimate if there is still space free for records in the file.
|
||||
*/
|
||||
{
|
||||
pgcache_page_no_t last_bitmap_page;
|
||||
ulong blocks, bytes;
|
||||
|
||||
last_bitmap_page= *last_page - *last_page % bitmap->pages_covered;
|
||||
blocks= *last_page - last_bitmap_page;
|
||||
bytes= (blocks * 3) / 8; /* 3 bit per page / 8 bits per byte */
|
||||
/* Size needs to be aligned on 6 */
|
||||
bytes/= 6;
|
||||
bytes*= 6;
|
||||
bitmap->last_bitmap_page= last_bitmap_page;
|
||||
bitmap->last_total_size= bytes;
|
||||
*last_page= ((last_bitmap_page + bytes*8/3));
|
||||
}
|
||||
|
||||
/* Restore first_bitmap_with_space if it's resonable */
|
||||
if (first_bitmap_with_space <= (share->state.state.data_file_length /
|
||||
share->block_size))
|
||||
@@ -322,7 +347,8 @@ my_bool _ma_bitmap_end(MARIA_SHARE *share)
|
||||
and then mutex lock would happen in the wrong order.
|
||||
*/
|
||||
|
||||
static inline void _ma_bitmap_mark_file_changed(MARIA_SHARE *share)
|
||||
static inline void _ma_bitmap_mark_file_changed(MARIA_SHARE *share,
|
||||
my_bool flush_translog)
|
||||
{
|
||||
/*
|
||||
It's extremely unlikely that the following test is true as it
|
||||
@@ -334,6 +360,14 @@ static inline void _ma_bitmap_mark_file_changed(MARIA_SHARE *share)
|
||||
/* purecov: begin inspected */
|
||||
/* unlock mutex as it can't be hold during _ma_mark_file_changed() */
|
||||
pthread_mutex_unlock(&share->bitmap.bitmap_lock);
|
||||
|
||||
/*
|
||||
We have to flush the translog to ensure we have registered that the
|
||||
table is open.
|
||||
*/
|
||||
if (flush_translog && share->now_transactional)
|
||||
(void) translog_flush(share->state.logrec_file_id);
|
||||
|
||||
_ma_mark_file_changed(share);
|
||||
pthread_mutex_lock(&share->bitmap.bitmap_lock);
|
||||
/* purecov: end */
|
||||
@@ -375,6 +409,12 @@ my_bool _ma_bitmap_flush(MARIA_SHARE *share)
|
||||
pthread_mutex_lock(&share->bitmap.bitmap_lock);
|
||||
if (share->bitmap.changed)
|
||||
{
|
||||
/*
|
||||
We have to mark the file changed here, as otherwise the following
|
||||
write to pagecache may force a page out from this file, which would
|
||||
cause _ma_mark_file_changed() to be called with bitmaplock hold!
|
||||
*/
|
||||
_ma_bitmap_mark_file_changed(share, 1);
|
||||
res= write_changed_bitmap(share, &share->bitmap);
|
||||
share->bitmap.changed= 0;
|
||||
}
|
||||
@@ -442,7 +482,7 @@ my_bool _ma_bitmap_flush_all(MARIA_SHARE *share)
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
|
||||
_ma_bitmap_mark_file_changed(share);
|
||||
_ma_bitmap_mark_file_changed(share, 0);
|
||||
|
||||
/*
|
||||
The following should be true as it was tested above. We have to test
|
||||
@@ -634,7 +674,7 @@ void _ma_bitmap_delete_all(MARIA_SHARE *share)
|
||||
bzero(bitmap->map, bitmap->block_size);
|
||||
bitmap->changed= 1;
|
||||
bitmap->page= 0;
|
||||
bitmap->used_size= bitmap->total_size;
|
||||
bitmap->used_size= bitmap->total_size= bitmap->max_total_size;
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
@@ -671,7 +711,7 @@ void _ma_bitmap_reset_cache(MARIA_SHARE *share)
|
||||
(This can only happen if writing to a bitmap page fails)
|
||||
*/
|
||||
bitmap->page= ((pgcache_page_no_t) 0) - bitmap->pages_covered;
|
||||
bitmap->used_size= bitmap->total_size;
|
||||
bitmap->used_size= bitmap->total_size= bitmap->max_total_size;
|
||||
bfill(bitmap->map, share->block_size, 255);
|
||||
#ifndef DBUG_OFF
|
||||
memcpy(bitmap->map + bitmap->block_size, bitmap->map, bitmap->block_size);
|
||||
@@ -946,6 +986,20 @@ void _ma_get_bitmap_description(MARIA_FILE_BITMAP *bitmap,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Adjust bitmap->total_size to not go over max_data_file_size
|
||||
*/
|
||||
|
||||
static void adjust_total_size(MARIA_HA *info, pgcache_page_no_t page)
|
||||
{
|
||||
MARIA_FILE_BITMAP *bitmap= &info->s->bitmap;
|
||||
|
||||
if (page < bitmap->last_bitmap_page)
|
||||
bitmap->total_size= bitmap->max_total_size; /* Use all bits in bitmap */
|
||||
else
|
||||
bitmap->total_size= bitmap->last_total_size;
|
||||
}
|
||||
|
||||
/***************************************************************************
|
||||
Reading & writing bitmap pages
|
||||
***************************************************************************/
|
||||
@@ -982,12 +1036,16 @@ static my_bool _ma_read_bitmap_page(MARIA_HA *info,
|
||||
DBUG_ASSERT(!bitmap->changed);
|
||||
|
||||
bitmap->page= page;
|
||||
if (((page + 1) * bitmap->block_size) > share->state.state.data_file_length)
|
||||
if ((page + 1) * bitmap->block_size > share->state.state.data_file_length)
|
||||
{
|
||||
/* Inexistent or half-created page */
|
||||
res= _ma_bitmap_create_missing(info, bitmap, page);
|
||||
if (!res)
|
||||
adjust_total_size(info, page);
|
||||
DBUG_RETURN(res);
|
||||
}
|
||||
|
||||
adjust_total_size(info, page);
|
||||
bitmap->used_size= bitmap->total_size;
|
||||
DBUG_ASSERT(share->pagecache->block_size == bitmap->block_size);
|
||||
res= pagecache_read(share->pagecache,
|
||||
@@ -1036,7 +1094,12 @@ static my_bool _ma_change_bitmap_page(MARIA_HA *info,
|
||||
{
|
||||
DBUG_ENTER("_ma_change_bitmap_page");
|
||||
|
||||
_ma_bitmap_mark_file_changed(info->s);
|
||||
/*
|
||||
We have to mark the file changed here, as otherwise the following
|
||||
read/write to pagecache may force a page out from this file, which would
|
||||
cause _ma_mark_file_changed() to be called with bitmaplock hold!
|
||||
*/
|
||||
_ma_bitmap_mark_file_changed(info->s, 1);
|
||||
|
||||
if (bitmap->changed)
|
||||
{
|
||||
@@ -2978,6 +3041,11 @@ static my_bool _ma_bitmap_create_missing(MARIA_HA *info,
|
||||
/* First (in offset order) bitmap page to create */
|
||||
if (data_file_length < block_size)
|
||||
goto err; /* corrupted, should have first bitmap page */
|
||||
if (page * block_size >= share->base.max_data_file_length)
|
||||
{
|
||||
my_errno= HA_ERR_RECORD_FILE_FULL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
from= (data_file_length / block_size - 1) / bitmap->pages_covered + 1;
|
||||
from*= bitmap->pages_covered;
|
||||
|
@@ -414,14 +414,29 @@ void _ma_init_block_record_data(void)
|
||||
|
||||
my_bool _ma_once_init_block_record(MARIA_SHARE *share, File data_file)
|
||||
{
|
||||
my_bool res;
|
||||
pgcache_page_no_t last_page;
|
||||
|
||||
share->base.max_data_file_length=
|
||||
(((ulonglong) 1 << ((share->base.rec_reflength-1)*8))-1) *
|
||||
share->block_size;
|
||||
/*
|
||||
First calculate the max file length with can have with a pointer of size
|
||||
rec_reflength.
|
||||
|
||||
The 'rec_reflength - 1' is because one byte is used for row
|
||||
position withing the page.
|
||||
The /2 comes from _ma_transaction_recpos_to_keypos() where we use
|
||||
the lowest bit to mark if there is a transid following the rownr.
|
||||
*/
|
||||
last_page= ((ulonglong) 1 << ((share->base.rec_reflength-1)*8))/2;
|
||||
if (!last_page) /* Overflow; set max size */
|
||||
last_page= ~(pgcache_page_no_t) 0;
|
||||
|
||||
res= _ma_bitmap_init(share, data_file, &last_page);
|
||||
share->base.max_data_file_length= _ma_safe_mul(last_page + 1,
|
||||
share->block_size);
|
||||
#if SIZEOF_OFF_T == 4
|
||||
set_if_smaller(share->base.max_data_file_length, INT_MAX32);
|
||||
#endif
|
||||
return _ma_bitmap_init(share, data_file);
|
||||
return res;
|
||||
}
|
||||
|
||||
|
||||
@@ -5187,7 +5202,7 @@ my_bool _ma_scan_init_block_record(MARIA_HA *info)
|
||||
(uchar *) my_malloc(share->block_size * 2, MYF(MY_WME))))))
|
||||
DBUG_RETURN(1);
|
||||
info->scan.page_buff= info->scan.bitmap_buff + share->block_size;
|
||||
info->scan.bitmap_end= info->scan.bitmap_buff + share->bitmap.total_size;
|
||||
info->scan.bitmap_end= info->scan.bitmap_buff + share->bitmap.max_total_size;
|
||||
|
||||
/* Set scan variables to get _ma_scan_block() to start with reading bitmap */
|
||||
info->scan.number_of_rows= 0;
|
||||
|
@@ -187,7 +187,8 @@ maria_page_get_lsn(uchar *page, pgcache_page_no_t page_no, uchar* data_ptr);
|
||||
/* ma_bitmap.c */
|
||||
extern const char *bits_to_txt[];
|
||||
|
||||
my_bool _ma_bitmap_init(MARIA_SHARE *share, File file);
|
||||
my_bool _ma_bitmap_init(MARIA_SHARE *share, File file,
|
||||
pgcache_page_no_t *last_page);
|
||||
my_bool _ma_bitmap_end(MARIA_SHARE *share);
|
||||
my_bool _ma_bitmap_flush(MARIA_SHARE *share);
|
||||
my_bool _ma_bitmap_flush_all(MARIA_SHARE *share);
|
||||
|
@@ -408,7 +408,7 @@ int maria_chk_size(HA_CHECK *param, register MARIA_HA *info)
|
||||
size= my_seek(share->kfile.file, 0L, MY_SEEK_END, MYF(MY_THREADSAFE));
|
||||
if ((skr=(my_off_t) share->state.state.key_file_length) != size)
|
||||
{
|
||||
/* Don't give error if file generated by mariapack */
|
||||
/* Don't give error if file generated by maria_pack */
|
||||
if (skr > size && maria_is_any_key_active(share->state.key_map))
|
||||
{
|
||||
error=1;
|
||||
@@ -422,13 +422,20 @@ int maria_chk_size(HA_CHECK *param, register MARIA_HA *info)
|
||||
"Size of indexfile is: %-8s Expected: %s",
|
||||
llstr(size,buff), llstr(skr,buff2));
|
||||
}
|
||||
if (!(param->testflag & T_VERY_SILENT) &&
|
||||
if (size > share->base.max_key_file_length)
|
||||
{
|
||||
_ma_check_print_warning(param,
|
||||
"Size of indexfile is: %-8s which is bigger than max indexfile size: %s",
|
||||
ullstr(size,buff),
|
||||
ullstr(share->base.max_key_file_length, buff2));
|
||||
}
|
||||
else if (!(param->testflag & T_VERY_SILENT) &&
|
||||
! (share->options & HA_OPTION_COMPRESS_RECORD) &&
|
||||
ulonglong2double(share->state.state.key_file_length) >
|
||||
ulonglong2double(share->base.margin_key_file_length)*0.9)
|
||||
_ma_check_print_warning(param,"Keyfile is almost full, %10s of %10s used",
|
||||
llstr(share->state.state.key_file_length,buff),
|
||||
llstr(share->base.max_key_file_length-1,buff));
|
||||
llstr(share->base.max_key_file_length,buff));
|
||||
|
||||
size= my_seek(info->dfile.file, 0L, MY_SEEK_END, MYF(0));
|
||||
skr=(my_off_t) share->state.state.data_file_length;
|
||||
@@ -456,13 +463,19 @@ int maria_chk_size(HA_CHECK *param, register MARIA_HA *info)
|
||||
llstr(size,buff), llstr(skr,buff2));
|
||||
}
|
||||
}
|
||||
if (!(param->testflag & T_VERY_SILENT) &&
|
||||
if (size > share->base.max_data_file_length)
|
||||
{
|
||||
_ma_check_print_warning(param,
|
||||
"Size of datafile is: %-8s which is bigger than max datafile size: %s",
|
||||
ullstr(size,buff),
|
||||
ullstr(share->base.max_data_file_length, buff2));
|
||||
} else if (!(param->testflag & T_VERY_SILENT) &&
|
||||
!(share->options & HA_OPTION_COMPRESS_RECORD) &&
|
||||
ulonglong2double(share->state.state.data_file_length) >
|
||||
(ulonglong2double(share->base.max_data_file_length)*0.9))
|
||||
_ma_check_print_warning(param, "Datafile is almost full, %10s of %10s used",
|
||||
llstr(share->state.state.data_file_length,buff),
|
||||
llstr(share->base.max_data_file_length-1,buff2));
|
||||
llstr(share->base.max_data_file_length,buff2));
|
||||
DBUG_RETURN(error);
|
||||
} /* maria_chk_size */
|
||||
|
||||
@@ -1983,8 +1996,8 @@ static int check_block_record(HA_CHECK *param, MARIA_HA *info, int extend,
|
||||
data= bitmap_buff + offset_page / 8;
|
||||
bitmap_pattern= uint2korr(data);
|
||||
if (((bitmap_pattern >> offset)) ||
|
||||
(data + 2 < bitmap_buff + share->bitmap.total_size &&
|
||||
_ma_check_if_zero(data+2, bitmap_buff + share->bitmap.total_size -
|
||||
(data + 2 < bitmap_buff + share->bitmap.max_total_size &&
|
||||
_ma_check_if_zero(data+2, bitmap_buff + share->bitmap.max_total_size -
|
||||
data - 2)))
|
||||
{
|
||||
ulonglong bitmap_page;
|
||||
@@ -2098,14 +2111,14 @@ int maria_chk_data_link(HA_CHECK *param, MARIA_HA *info, my_bool extend)
|
||||
llstr(share->state.state.records,llbuff2));
|
||||
error=1;
|
||||
}
|
||||
else if (param->record_checksum &&
|
||||
if (param->record_checksum &&
|
||||
param->record_checksum != param->tmp_record_checksum)
|
||||
{
|
||||
_ma_check_print_error(param,
|
||||
"Key pointers and record positions doesn't match");
|
||||
error=1;
|
||||
}
|
||||
else if (param->glob_crc != share->state.state.checksum &&
|
||||
if (param->glob_crc != share->state.state.checksum &&
|
||||
(share->options &
|
||||
(HA_OPTION_CHECKSUM | HA_OPTION_COMPRESS_RECORD)))
|
||||
{
|
||||
@@ -2114,7 +2127,7 @@ int maria_chk_data_link(HA_CHECK *param, MARIA_HA *info, my_bool extend)
|
||||
"stored in the index file");
|
||||
error=1;
|
||||
}
|
||||
else if (!extend)
|
||||
if (!extend)
|
||||
{
|
||||
uint key;
|
||||
for (key=0 ; key < share->base.keys; key++)
|
||||
@@ -5321,7 +5334,10 @@ int _ma_sort_write_record(MARIA_SORT_PARAM *sort_param)
|
||||
if ((sort_param->current_filepos=
|
||||
(*share->write_record_init)(info, sort_param->record)) ==
|
||||
HA_OFFSET_ERROR)
|
||||
{
|
||||
_ma_check_print_error(param, "%d when writing to datafile", my_errno);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
/* Pointer to end of file */
|
||||
sort_param->filepos= share->state.state.data_file_length;
|
||||
break;
|
||||
@@ -5943,6 +5959,9 @@ int maria_recreate_table(HA_CHECK *param, MARIA_HA **org_info, char *filename)
|
||||
MARIA_CREATE_INFO create_info;
|
||||
DBUG_ENTER("maria_recreate_table");
|
||||
|
||||
if ((!(param->testflag & T_SILENT)))
|
||||
printf("Recreating table '%s'\n", param->isam_file_name);
|
||||
|
||||
error=1; /* Default error */
|
||||
info= **org_info;
|
||||
status_info= (*org_info)->state[0];
|
||||
|
@@ -326,7 +326,15 @@ int maria_create(const char *name, enum data_file_type datafile_type,
|
||||
(~(ulonglong) 0)/ci->max_rows < (ulonglong) pack_reclength)
|
||||
ci->data_file_length= ~(ulonglong) 0;
|
||||
else
|
||||
ci->data_file_length=(ulonglong) ci->max_rows*pack_reclength;
|
||||
{
|
||||
ci->data_file_length= _ma_safe_mul(ci->max_rows, pack_reclength);
|
||||
if (datafile_type == BLOCK_RECORD)
|
||||
{
|
||||
/* Assume that blocks are only half full (very pessimistic!) */
|
||||
ci->data_file_length= _ma_safe_mul(ci->data_file_length, 2);
|
||||
set_if_bigger(ci->data_file_length, maria_block_size*2);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!ci->max_rows)
|
||||
{
|
||||
@@ -338,7 +346,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
|
||||
ulonglong data_file_length= ci->data_file_length;
|
||||
if (!data_file_length)
|
||||
data_file_length= ((((ulonglong) 1 << ((BLOCK_RECORD_POINTER_SIZE-1) *
|
||||
8)) -1) * maria_block_size);
|
||||
8))/2 -1) * maria_block_size);
|
||||
if (rows_per_page > 0)
|
||||
{
|
||||
set_if_smaller(rows_per_page, MAX_ROWS_PER_PAGE);
|
||||
@@ -360,11 +368,11 @@ int maria_create(const char *name, enum data_file_type datafile_type,
|
||||
{
|
||||
/*
|
||||
The + 1 is for record position withing page
|
||||
The / 2 is because we need one bit for knowing if there is transid's
|
||||
The * 2 is because we need one bit for knowing if there is transid's
|
||||
after the row pointer
|
||||
*/
|
||||
pointer= maria_get_pointer_length((ci->data_file_length /
|
||||
(maria_block_size * 2)), 3) + 1;
|
||||
maria_block_size) * 2, 3) + 1;
|
||||
set_if_smaller(pointer, BLOCK_RECORD_POINTER_SIZE);
|
||||
|
||||
if (!max_rows)
|
||||
|
@@ -8166,6 +8166,7 @@ int translog_assign_id_to_share(MARIA_HA *tbl_info, TRN *trn)
|
||||
before it's written to the log.
|
||||
*/
|
||||
share->id= id;
|
||||
share->state.logrec_file_id= lsn;
|
||||
}
|
||||
pthread_mutex_unlock(&share->intern_lock);
|
||||
return 0;
|
||||
|
@@ -509,7 +509,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
|
||||
(ulonglong) 1 << (share->base.rec_reflength*8))-1);
|
||||
|
||||
max_key_file_length=
|
||||
_ma_safe_mul(maria_block_size,
|
||||
_ma_safe_mul(share->base.block_size,
|
||||
((ulonglong) 1 << (share->base.key_reflength*8))-1);
|
||||
#if SIZEOF_OFF_T == 4
|
||||
set_if_smaller(max_data_file_length, INT_MAX32);
|
||||
@@ -826,7 +826,7 @@ MARIA_HA *maria_open(const char *name, int mode, uint open_flags)
|
||||
share->base.margin_key_file_length=(share->base.max_key_file_length -
|
||||
(keys ? MARIA_INDEX_BLOCK_MARGIN *
|
||||
share->block_size * keys : 0));
|
||||
share->block_size= share->base.block_size;
|
||||
|
||||
my_free(disk_cache, MYF(0));
|
||||
_ma_setup_functions(share);
|
||||
if ((*share->once_init)(share, info.dfile.file))
|
||||
|
@@ -1576,8 +1576,8 @@ static void descript(HA_CHECK *param, register MARIA_HA *info, char *name)
|
||||
if (share->base.max_data_file_length != HA_OFFSET_ERROR ||
|
||||
share->base.max_key_file_length != HA_OFFSET_ERROR)
|
||||
printf("Max datafile length: %16s Max keyfile length: %18s\n",
|
||||
llstr(share->base.max_data_file_length-1,llbuff),
|
||||
llstr(share->base.max_key_file_length-1,llbuff2));
|
||||
ullstr(share->base.max_data_file_length,llbuff),
|
||||
ullstr(share->base.max_key_file_length,llbuff2));
|
||||
}
|
||||
}
|
||||
printf("Block_size: %16d\n",(int) share->block_size);
|
||||
|
@@ -126,6 +126,8 @@ typedef struct st_maria_state_info
|
||||
increased.
|
||||
*/
|
||||
LSN skip_redo_lsn;
|
||||
/* LSN when we wrote file id to the log */
|
||||
LSN logrec_file_id;
|
||||
|
||||
/* the following isn't saved on disk */
|
||||
uint state_diff_length; /* Should be 0 */
|
||||
@@ -245,9 +247,10 @@ typedef struct st_maria_file_bitmap
|
||||
{
|
||||
uchar *map;
|
||||
pgcache_page_no_t page; /* Page number for current bitmap */
|
||||
uint used_size; /* Size of bitmap head that is not 0 */
|
||||
pgcache_page_no_t last_bitmap_page; /* Last possible bitmap page */
|
||||
my_bool changed; /* 1 if page needs to be written */
|
||||
my_bool changed_not_flushed; /* 1 if some bitmap is not flushed */
|
||||
uint used_size; /* Size of bitmap head that is not 0 */
|
||||
uint flush_all_requested; /**< If _ma_bitmap_flush_all waiting */
|
||||
uint waiting_for_flush_all_requested; /* If someone is waiting for above */
|
||||
uint non_flushable; /**< 0 if bitmap and log are in sync */
|
||||
@@ -261,6 +264,8 @@ typedef struct st_maria_file_bitmap
|
||||
/* Constants, allocated when initiating bitmaps */
|
||||
uint sizes[8]; /* Size per bit combination */
|
||||
uint total_size; /* Total usable size of bitmap page */
|
||||
uint max_total_size; /* Max value for total_size */
|
||||
uint last_total_size; /* Size of bitmap on last_bitmap_page */
|
||||
uint block_size; /* Block size of file */
|
||||
ulong pages_covered; /* Pages covered by bitmap + 1 */
|
||||
DYNAMIC_ARRAY pinned_pages; /**< not-yet-flushable bitmap pages */
|
||||
|
Reference in New Issue
Block a user