1
0
mirror of https://github.com/MariaDB/server.git synced 2025-11-06 13:10:12 +03:00

Fix for BUG#34114 "maria_chk reports false error when several tables on

command-line" and BUG#34062 "Maria table corruption on master".
Use 5 bytes (instead of 4) to store page's number in the checkpoint
record, to allow bigger table (1PB with maria-block-size=1kB).
Help pushbuild not run out of memory by moving the portion of
maria-recovery.test which generates lots of data into a -big.test.


mysql-test/r/maria-recovery.result:
  result moved
mysql-test/t/maria-recovery.test:
  piece which generates much data moved to maria-recovery-big.test
mysys/my_pread.c:
  To fix BUG#34062, where a 1.1TB file was generated due to a wrong
  pwrite offset, it was useful to not lose precision on 'offset' in
  DBUG_PRINT, so that the crazy value is visible.
mysys/my_read.c:
  To fix BUG#34062, where a 1.1TB file was generated due to a wrong
  pwrite offset, it was useful to not lose precision on 'offset' in
  DBUG_PRINT, so that the crazy value is visible.
mysys/my_write.c:
  To fix BUG#34062, where a 1.1TB file was generated due to a wrong
  pwrite offset, it was useful to not lose precision on 'offset' in
  DBUG_PRINT, so that the crazy value is visible.
storage/maria/ha_maria.cc:
  When starting a bulk insert, we throw away dirty index pages from the
  cache. Unique (non disabled) key insertions thus read out-of-date
  pages from the disk leading to BUG#34062 "Maria table corruption on
  master": a DELETE in procedure viewer_sp() had deleted all rows of
  viewer_tbl2 one by one, putting index page 1 into key_del; that page
  was thrown away at start of INSERT SELECT, then the INSERT SELECT
  needed a page to insert keys, looked at key_del, found 1, read page 1
  from disk, and its out-of-date content was used to set the new value of
  key_del (crazy value of 1TB), then a later insertion needed another
  index page, tried to read page at this crazy offset and failed, leading
  to corruption mark.
  The fix is to destroy out-of-date pages and make the state consistent
  with that, i.e. call maria_delete_all_rows().
storage/maria/ma_blockrec.c:
  Special hook for UNDO_BULK_INSERT
storage/maria/ma_blockrec.h:
  special hook for UNDO_BULK_INSERT
storage/maria/ma_check.c:
  Fix for BUG#34114 "maria_chk reports false error when several tables on
  command-line": if the Nth (on the command line) table was BLOCK_RECORD
  it would start checks by using the param->record_checksum computed by
  checks of table N-1.
storage/maria/ma_delete_all.c:
  comment
storage/maria/ma_loghandler.c:
  special hook for UNDO_BULK_INSERT
storage/maria/ma_page.c:
  comment
storage/maria/ma_pagecache.c:
  page number is 5 bytes in checkpoint record now (allows bigger tables)
storage/maria/ma_recovery.c:
  page number is 5 bytes in checkpoint record now
storage/maria/ma_recovery_util.c:
  page number is 5 bytes now
storage/maria/ma_write.c:
  typo
mysql-test/r/maria-recovery-big.result:
  result is correct
mysql-test/t/maria-recovery-big-master.opt:
  usual options for recovery tests
mysql-test/t/maria-recovery-big.test:
  Moving out the big blob test to a -big test (it exhausts memory when
  using /dev/shm on certain machines)
This commit is contained in:
unknown
2008-01-29 22:20:59 +01:00
parent e4e8418ced
commit 2fcff8988a
19 changed files with 224 additions and 144 deletions

View File

@@ -0,0 +1,81 @@
set global maria_log_file_size=4294967295;
drop database if exists mysqltest;
create database mysqltest;
use mysqltest;
* TEST of recovery with blobs
* shut down mysqld, removed logs, restarted it
use mysqltest;
set @@max_allowed_packet=32000000;
create table t1 (a int, b longtext) engine=maria table_checksum=1;
* copied t1 for feeding_recovery
insert into t1 values (1,"123456789012345678901234567890"),(2,"09876543210987654321");
flush table t1;
* copied t1 for comparison
lock table t1 write;
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
select a,length(b) from t1;
a length(b)
1 31457280
2 20971520
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
select a,length(b) from t1;
a length(b)
1 8
2 5
SET SESSION debug="+d,maria_flush_whole_log,maria_crash";
* crashing mysqld intentionally
set global maria_checkpoint_interval=1;
ERROR HY000: Lost connection to MySQL server during query
* copied t1 back for feeding_recovery
* recovery happens
check table t1 extended;
Table Op Msg_type Msg_text
mysqltest.t1 check status OK
* testing that checksum after recovery is as expected
Checksum-check
ok
use mysqltest;
drop table t1;
drop database mysqltest_for_feeding_recovery;
drop database mysqltest_for_comparison;
drop database mysqltest;

View File

@@ -302,80 +302,6 @@ a
1
3
drop table t1;
* TEST of recovery with blobs
* shut down mysqld, removed logs, restarted it
use mysqltest;
set @@max_allowed_packet=32000000;
create table t1 (a int, b longtext) engine=maria table_checksum=1;
* copied t1 for feeding_recovery
insert into t1 values (1,"123456789012345678901234567890"),(2,"09876543210987654321");
flush table t1;
* copied t1 for comparison
lock table t1 write;
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
update t1 set b=CONCAT(b,b);
select a,length(b) from t1;
a length(b)
1 31457280
2 20971520
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
update t1 set b=mid(b,1,length(b)/2);
select a,length(b) from t1;
a length(b)
1 8
2 5
SET SESSION debug="+d,maria_flush_whole_log,maria_crash";
* crashing mysqld intentionally
set global maria_checkpoint_interval=1;
ERROR HY000: Lost connection to MySQL server during query
* copied t1 back for feeding_recovery
* recovery happens
check table t1 extended;
Table Op Msg_type Msg_text
mysqltest.t1 check status OK
* testing that checksum after recovery is as expected
Checksum-check
ok
use mysqltest;
drop table t1;
* TEST of recovery when crash before bulk-insert-with-repair is committed
create table t1 (a varchar(100), key(a)) engine=maria;
create table t2 (a varchar(100)) engine=myisam;

View File

@@ -0,0 +1,2 @@
--skip-stack-trace --skip-core-file --loose-debug-on=1

View File

@@ -0,0 +1,68 @@
# Maria recovery test which cannot run in shared memory
# because it generates too much data, or which takes a lot of time.
--source include/not_embedded.inc
# Don't test this under valgrind, memory leaks will occur as we crash
--source include/not_valgrind.inc
# Binary must be compiled with debug for crash to occur
--source include/have_debug.inc
--source include/have_maria.inc
set global maria_log_file_size=4294967295;
--disable_warnings
drop database if exists mysqltest;
--enable_warnings
create database mysqltest;
# Include scripts can perform SQL. For it to not influence the main test
# they use a separate connection. This way if they use a DDL it would
# not autocommit in the main test.
connect (admin, 127.0.0.1, root,,mysqltest,,);
--enable_reconnect
connection default;
use mysqltest;
--enable_reconnect
#
# Test with big blobs
#
--echo * TEST of recovery with blobs
-- source include/maria_empty_logs.inc
set @@max_allowed_packet=32000000;
create table t1 (a int, b longtext) engine=maria table_checksum=1;
let $mms_tables=1;
-- source include/maria_make_snapshot_for_feeding_recovery.inc
insert into t1 values (1,"123456789012345678901234567890"),(2,"09876543210987654321");
-- source include/maria_make_snapshot_for_comparison.inc
lock table t1 write;
let $loop=20;
while ($loop)
{
update t1 set b=CONCAT(b,b);
dec $loop;
}
select a,length(b) from t1;
let $loop=22;
while ($loop)
{
update t1 set b=mid(b,1,length(b)/2);
dec $loop;
}
select a,length(b) from t1;
# we want recovery to run on the first snapshot made above
let $mvr_restore_old_snapshot=1;
let $mms_compare_physically=0;
let $mvr_debug_option="+d,maria_flush_whole_log,maria_crash";
let $mvr_crash_statement= set global maria_checkpoint_interval=1;
-- source include/maria_verify_recovery.inc
drop table t1;
# clean up everything
let $mms_purpose=feeding_recovery;
eval drop database mysqltest_for_$mms_purpose;
let $mms_purpose=comparison;
eval drop database mysqltest_for_$mms_purpose;
drop database mysqltest;

View File

@@ -256,41 +256,6 @@ select * from t1;
select * from t1;
drop table t1;
#
# Test with big blobs
#
--echo * TEST of recovery with blobs
-- source include/maria_empty_logs.inc
set @@max_allowed_packet=32000000;
create table t1 (a int, b longtext) engine=maria table_checksum=1;
let $mms_tables=1;
-- source include/maria_make_snapshot_for_feeding_recovery.inc
insert into t1 values (1,"123456789012345678901234567890"),(2,"09876543210987654321");
-- source include/maria_make_snapshot_for_comparison.inc
lock table t1 write;
let $loop=20;
while ($loop)
{
update t1 set b=CONCAT(b,b);
dec $loop;
}
select a,length(b) from t1;
let $loop=22;
while ($loop)
{
update t1 set b=mid(b,1,length(b)/2);
dec $loop;
}
select a,length(b) from t1;
# we want recovery to run on the first snapshot made above
let $mvr_restore_old_snapshot=1;
let $mms_compare_physically=0;
let $mvr_debug_option="+d,maria_flush_whole_log,maria_crash";
let $mvr_crash_statement= set global maria_checkpoint_interval=1;
-- source include/maria_verify_recovery.inc
drop table t1;
--echo * TEST of recovery when crash before bulk-insert-with-repair is committed
create table t1 (a varchar(100), key(a)) engine=maria;
create table t2 (a varchar(100)) engine=myisam;