mirror of
https://github.com/MariaDB/server.git
synced 2025-08-07 00:04:31 +03:00
HA_ERR_TABLE_DEF_CHANGED support in ha_archive
This commit is contained in:
@@ -12815,9 +12815,8 @@ DROP TABLE t1;
|
|||||||
#
|
#
|
||||||
create table t1 (a int, b char(50)) engine=archive;
|
create table t1 (a int, b char(50)) engine=archive;
|
||||||
select * from t1;
|
select * from t1;
|
||||||
ERROR HY000: Table 't1' is marked as crashed and should be repaired
|
a b
|
||||||
|
1 aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa
|
||||||
show warnings;
|
show warnings;
|
||||||
Level Code Message
|
Level Code Message
|
||||||
Warning 127 Got error 127 when reading table `test`.`t1`
|
|
||||||
Error 1194 Table 't1' is marked as crashed and should be repaired
|
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
@@ -1736,7 +1736,7 @@ DROP TABLE t1;
|
|||||||
create table t1 (a int, b char(50)) engine=archive;
|
create table t1 (a int, b char(50)) engine=archive;
|
||||||
--remove_file $MYSQLD_DATADIR/test/t1.ARZ
|
--remove_file $MYSQLD_DATADIR/test/t1.ARZ
|
||||||
copy_file std_data/t917689.ARZ $MYSQLD_DATADIR/test/t1.ARZ;
|
copy_file std_data/t917689.ARZ $MYSQLD_DATADIR/test/t1.ARZ;
|
||||||
--error 1194
|
# --error 1194
|
||||||
select * from t1;
|
select * from t1;
|
||||||
show warnings;
|
show warnings;
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
@@ -62,6 +62,15 @@ Tables_in_test
|
|||||||
t0
|
t0
|
||||||
t1
|
t1
|
||||||
#
|
#
|
||||||
|
# discover on HA_ERR_TABLE_DEF_CHANGED
|
||||||
|
#
|
||||||
|
alter table t1 modify a int default 5;
|
||||||
|
show create table t1;
|
||||||
|
Table Create Table
|
||||||
|
t1 CREATE TABLE `t1` (
|
||||||
|
`a` int(11) DEFAULT NULL
|
||||||
|
) ENGINE=ARCHIVE DEFAULT CHARSET=latin1
|
||||||
|
#
|
||||||
# discover on drop
|
# discover on drop
|
||||||
#
|
#
|
||||||
flush tables;
|
flush tables;
|
||||||
|
@@ -48,6 +48,14 @@ flush tables;
|
|||||||
rename table t2 to t0;
|
rename table t2 to t0;
|
||||||
show tables;
|
show tables;
|
||||||
|
|
||||||
|
--echo #
|
||||||
|
--echo # discover on HA_ERR_TABLE_DEF_CHANGED
|
||||||
|
--echo #
|
||||||
|
copy_file $mysqld_datadir/test/t1.ARZ $MYSQL_TMP_DIR/t1.ARZ;
|
||||||
|
alter table t1 modify a int default 5;
|
||||||
|
move_file $MYSQL_TMP_DIR/t1.ARZ $mysqld_datadir/test/t1.ARZ;
|
||||||
|
show create table t1;
|
||||||
|
|
||||||
--echo #
|
--echo #
|
||||||
--echo # discover on drop
|
--echo # discover on drop
|
||||||
--echo #
|
--echo #
|
||||||
|
@@ -159,12 +159,11 @@ static PSI_mutex_info all_archive_mutexes[]=
|
|||||||
{ &az_key_mutex_ARCHIVE_SHARE_mutex, "ARCHIVE_SHARE::mutex", 0}
|
{ &az_key_mutex_ARCHIVE_SHARE_mutex, "ARCHIVE_SHARE::mutex", 0}
|
||||||
};
|
};
|
||||||
|
|
||||||
PSI_file_key arch_key_file_metadata, arch_key_file_data, arch_key_file_frm;
|
PSI_file_key arch_key_file_metadata, arch_key_file_data;
|
||||||
static PSI_file_info all_archive_files[]=
|
static PSI_file_info all_archive_files[]=
|
||||||
{
|
{
|
||||||
{ &arch_key_file_metadata, "metadata", 0},
|
{ &arch_key_file_metadata, "metadata", 0},
|
||||||
{ &arch_key_file_data, "data", 0},
|
{ &arch_key_file_data, "data", 0}
|
||||||
{ &arch_key_file_frm, "FRM", 0}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static void init_archive_psi_keys(void)
|
static void init_archive_psi_keys(void)
|
||||||
@@ -438,6 +437,9 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, int *rc)
|
|||||||
*/
|
*/
|
||||||
if (archive_tmp.version < ARCHIVE_VERSION)
|
if (archive_tmp.version < ARCHIVE_VERSION)
|
||||||
*rc= HA_ERR_TABLE_NEEDS_UPGRADE;
|
*rc= HA_ERR_TABLE_NEEDS_UPGRADE;
|
||||||
|
else if (frm_compare(&archive_tmp, table_name))
|
||||||
|
*rc= HA_ERR_TABLE_DEF_CHANGED;
|
||||||
|
|
||||||
azclose(&archive_tmp);
|
azclose(&archive_tmp);
|
||||||
|
|
||||||
(void) my_hash_insert(&archive_open_tables, (uchar*) share);
|
(void) my_hash_insert(&archive_open_tables, (uchar*) share);
|
||||||
@@ -566,14 +568,8 @@ int ha_archive::open(const char *name, int mode, uint open_options)
|
|||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
break;
|
break;
|
||||||
|
case HA_ERR_TABLE_DEF_CHANGED:
|
||||||
case HA_ERR_CRASHED_ON_USAGE:
|
case HA_ERR_CRASHED_ON_USAGE:
|
||||||
DBUG_PRINT("ha_archive", ("archive table was crashed"));
|
|
||||||
if (open_options & HA_OPEN_FOR_REPAIR)
|
|
||||||
{
|
|
||||||
rc= 0;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
/* fall through */
|
|
||||||
case HA_ERR_TABLE_NEEDS_UPGRADE:
|
case HA_ERR_TABLE_NEEDS_UPGRADE:
|
||||||
if (open_options & HA_OPEN_FOR_REPAIR)
|
if (open_options & HA_OPEN_FOR_REPAIR)
|
||||||
{
|
{
|
||||||
@@ -654,7 +650,8 @@ int ha_archive::frm_copy(azio_stream *src, azio_stream *dst)
|
|||||||
int rc= 0;
|
int rc= 0;
|
||||||
uchar *frm_ptr;
|
uchar *frm_ptr;
|
||||||
|
|
||||||
if (!(frm_ptr= (uchar *) my_malloc(src->frm_length, MYF(0))))
|
if (!(frm_ptr= (uchar *) my_malloc(src->frm_length,
|
||||||
|
MYF(MY_THREAD_SPECIFIC | MY_WME))))
|
||||||
return HA_ERR_OUT_OF_MEM;
|
return HA_ERR_OUT_OF_MEM;
|
||||||
|
|
||||||
/* Write file offset is set to the end of the file. */
|
/* Write file offset is set to the end of the file. */
|
||||||
@@ -668,6 +665,46 @@ int ha_archive::frm_copy(azio_stream *src, azio_stream *dst)
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
Compare frm blob with the on-disk frm file
|
||||||
|
|
||||||
|
@param s The azio stream.
|
||||||
|
@param path A path for readfrm()
|
||||||
|
|
||||||
|
@return Zero if equal, non-zero otherwise.
|
||||||
|
*/
|
||||||
|
|
||||||
|
int ha_archive::frm_compare(azio_stream *s, const char *path)
|
||||||
|
{
|
||||||
|
int rc= 0;
|
||||||
|
uchar *frm_ptr= 0, *azfrm_ptr= 0;
|
||||||
|
size_t frm_len;
|
||||||
|
|
||||||
|
/* no frm = no discovery. perhaps it's a partitioned table */
|
||||||
|
if (readfrm(path, &frm_ptr, &frm_len))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
if (!(azfrm_ptr= (uchar *) my_malloc(s->frm_length,
|
||||||
|
MYF(MY_THREAD_SPECIFIC | MY_WME))))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
rc= 1;
|
||||||
|
|
||||||
|
if (frm_len != s->frm_length)
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
if (azread_frm(s, azfrm_ptr))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
rc= memcmp(frm_ptr, azfrm_ptr, frm_len);
|
||||||
|
|
||||||
|
err:
|
||||||
|
my_free(frm_ptr);
|
||||||
|
my_free(azfrm_ptr);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
We create our data file here. The format is pretty simple.
|
We create our data file here. The format is pretty simple.
|
||||||
You can read about the format of the data file above.
|
You can read about the format of the data file above.
|
||||||
@@ -684,9 +721,8 @@ int ha_archive::create(const char *name, TABLE *table_arg,
|
|||||||
char linkname[FN_REFLEN];
|
char linkname[FN_REFLEN];
|
||||||
int error;
|
int error;
|
||||||
azio_stream create_stream; /* Archive file we are working with */
|
azio_stream create_stream; /* Archive file we are working with */
|
||||||
File frm_file; /* File handler for readers */
|
|
||||||
MY_STAT file_stat; // Stat information for the data file
|
|
||||||
uchar *frm_ptr;
|
uchar *frm_ptr;
|
||||||
|
size_t frm_len;
|
||||||
|
|
||||||
DBUG_ENTER("ha_archive::create");
|
DBUG_ENTER("ha_archive::create");
|
||||||
|
|
||||||
@@ -735,56 +771,41 @@ int ha_archive::create(const char *name, TABLE *table_arg,
|
|||||||
There is a chance that the file was "discovered". In this case
|
There is a chance that the file was "discovered". In this case
|
||||||
just use whatever file is there.
|
just use whatever file is there.
|
||||||
*/
|
*/
|
||||||
if (!(mysql_file_stat(/* arch_key_file_data */ 0, name_buff, &file_stat, MYF(0))))
|
my_errno= 0;
|
||||||
|
if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR|O_BINARY)))
|
||||||
{
|
{
|
||||||
my_errno= 0;
|
error= errno;
|
||||||
if (!(azopen(&create_stream, name_buff, O_CREAT|O_RDWR|O_BINARY)))
|
goto error2;
|
||||||
{
|
}
|
||||||
error= errno;
|
|
||||||
goto error2;
|
if (linkname[0])
|
||||||
}
|
my_symlink(name_buff, linkname, MYF(0));
|
||||||
|
|
||||||
if (linkname[0])
|
/*
|
||||||
my_symlink(name_buff, linkname, MYF(0));
|
Here is where we open up the frm and pass it to archive to store
|
||||||
fn_format(name_buff, name, "", ".frm",
|
*/
|
||||||
MY_REPLACE_EXT | MY_UNPACK_FILENAME);
|
readfrm(name, &frm_ptr, &frm_len);
|
||||||
|
if (frm_ptr)
|
||||||
/*
|
{
|
||||||
Here is where we open up the frm and pass it to archive to store
|
azwrite_frm(&create_stream, frm_ptr, frm_len);
|
||||||
*/
|
my_free(frm_ptr);
|
||||||
if ((frm_file= mysql_file_open(arch_key_file_frm, name_buff, O_RDONLY, MYF(0))) >= 0)
|
}
|
||||||
{
|
|
||||||
if (!mysql_file_fstat(frm_file, &file_stat, MYF(MY_WME)))
|
if (create_info->comment.str)
|
||||||
{
|
azwrite_comment(&create_stream, create_info->comment.str,
|
||||||
frm_ptr= (uchar *)my_malloc(sizeof(uchar) * (size_t)file_stat.st_size, MYF(0));
|
create_info->comment.length);
|
||||||
if (frm_ptr)
|
|
||||||
{
|
/*
|
||||||
mysql_file_read(frm_file, frm_ptr, (size_t)file_stat.st_size, MYF(0));
|
Yes you need to do this, because the starting value
|
||||||
azwrite_frm(&create_stream, frm_ptr, (size_t)file_stat.st_size);
|
for the autoincrement may not be zero.
|
||||||
my_free(frm_ptr);
|
*/
|
||||||
}
|
create_stream.auto_increment= stats.auto_increment_value ?
|
||||||
}
|
stats.auto_increment_value - 1 : 0;
|
||||||
mysql_file_close(frm_file, MYF(0));
|
if (azclose(&create_stream))
|
||||||
}
|
{
|
||||||
|
error= errno;
|
||||||
if (create_info->comment.str)
|
goto error2;
|
||||||
azwrite_comment(&create_stream, create_info->comment.str,
|
|
||||||
create_info->comment.length);
|
|
||||||
|
|
||||||
/*
|
|
||||||
Yes you need to do this, because the starting value
|
|
||||||
for the autoincrement may not be zero.
|
|
||||||
*/
|
|
||||||
create_stream.auto_increment= stats.auto_increment_value ?
|
|
||||||
stats.auto_increment_value - 1 : 0;
|
|
||||||
if (azclose(&create_stream))
|
|
||||||
{
|
|
||||||
error= errno;
|
|
||||||
goto error2;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
else
|
|
||||||
my_errno= 0;
|
|
||||||
|
|
||||||
DBUG_PRINT("ha_archive", ("Creating File %s", name_buff));
|
DBUG_PRINT("ha_archive", ("Creating File %s", name_buff));
|
||||||
DBUG_PRINT("ha_archive", ("Creating Link %s", linkname));
|
DBUG_PRINT("ha_archive", ("Creating Link %s", linkname));
|
||||||
@@ -901,7 +922,6 @@ int ha_archive::write_row(uchar *buf)
|
|||||||
if (share->crashed)
|
if (share->crashed)
|
||||||
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE);
|
||||||
|
|
||||||
ha_statistic_increment(&SSV::ha_write_count);
|
|
||||||
mysql_mutex_lock(&share->mutex);
|
mysql_mutex_lock(&share->mutex);
|
||||||
|
|
||||||
if (!share->archive_write_open && init_archive_writer())
|
if (!share->archive_write_open && init_archive_writer())
|
||||||
@@ -938,7 +958,8 @@ int ha_archive::write_row(uchar *buf)
|
|||||||
First we create a buffer that we can use for reading rows, and can pass
|
First we create a buffer that we can use for reading rows, and can pass
|
||||||
to get_row().
|
to get_row().
|
||||||
*/
|
*/
|
||||||
if (!(read_buf= (uchar*) my_malloc(table->s->reclength, MYF(MY_WME))))
|
if (!(read_buf= (uchar*) my_malloc(table->s->reclength,
|
||||||
|
MYF(MY_THREAD_SPECIFIC | MY_WME))))
|
||||||
{
|
{
|
||||||
rc= HA_ERR_OUT_OF_MEM;
|
rc= HA_ERR_OUT_OF_MEM;
|
||||||
goto error;
|
goto error;
|
||||||
@@ -1325,7 +1346,6 @@ int ha_archive::rnd_next(uchar *buf)
|
|||||||
}
|
}
|
||||||
scan_rows--;
|
scan_rows--;
|
||||||
|
|
||||||
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
|
|
||||||
current_position= aztell(&archive);
|
current_position= aztell(&archive);
|
||||||
rc= get_row(&archive, buf);
|
rc= get_row(&archive, buf);
|
||||||
|
|
||||||
@@ -1361,7 +1381,6 @@ int ha_archive::rnd_pos(uchar * buf, uchar *pos)
|
|||||||
{
|
{
|
||||||
int rc;
|
int rc;
|
||||||
DBUG_ENTER("ha_archive::rnd_pos");
|
DBUG_ENTER("ha_archive::rnd_pos");
|
||||||
ha_statistic_increment(&SSV::ha_read_rnd_next_count);
|
|
||||||
current_position= (my_off_t)my_get_ptr(pos, ref_length);
|
current_position= (my_off_t)my_get_ptr(pos, ref_length);
|
||||||
if (azseek(&archive, current_position, SEEK_SET) == (my_off_t)(-1L))
|
if (azseek(&archive, current_position, SEEK_SET) == (my_off_t)(-1L))
|
||||||
{
|
{
|
||||||
|
@@ -51,7 +51,7 @@ typedef struct st_archive_share {
|
|||||||
Version for file format.
|
Version for file format.
|
||||||
1 - Initial Version (Never Released)
|
1 - Initial Version (Never Released)
|
||||||
2 - Stream Compression, seperate blobs, no packing
|
2 - Stream Compression, seperate blobs, no packing
|
||||||
3 - One steam (row and blobs), with packing
|
3 - One stream (row and blobs), with packing
|
||||||
*/
|
*/
|
||||||
#define ARCHIVE_VERSION 3
|
#define ARCHIVE_VERSION 3
|
||||||
|
|
||||||
@@ -76,6 +76,7 @@ class ha_archive: public handler
|
|||||||
archive_record_buffer *create_record_buffer(unsigned int length);
|
archive_record_buffer *create_record_buffer(unsigned int length);
|
||||||
void destroy_record_buffer(archive_record_buffer *r);
|
void destroy_record_buffer(archive_record_buffer *r);
|
||||||
int frm_copy(azio_stream *src, azio_stream *dst);
|
int frm_copy(azio_stream *src, azio_stream *dst);
|
||||||
|
int frm_compare(azio_stream *src, const char *path);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
ha_archive(handlerton *hton, TABLE_SHARE *table_arg);
|
ha_archive(handlerton *hton, TABLE_SHARE *table_arg);
|
||||||
|
Reference in New Issue
Block a user