From c4a1c72b5ec935d4925ea9360bff47bb8a338358 Mon Sep 17 00:00:00 2001 From: "brian@zim.(none)" <> Date: Sat, 2 Dec 2006 00:30:49 -0800 Subject: [PATCH 1/4] Formailized the row buffer structure, implemented new streaming format. --- mysql-test/r/archive.result | 2 +- mysql-test/t/archive.test | 2 +- storage/archive/ha_archive.cc | 302 +++++++++++++++++++++++++--------- storage/archive/ha_archive.h | 26 ++- 4 files changed, 252 insertions(+), 80 deletions(-) diff --git a/mysql-test/r/archive.result b/mysql-test/r/archive.result index bf730908d35..0d931e83dce 100644 --- a/mysql-test/r/archive.result +++ b/mysql-test/r/archive.result @@ -1,4 +1,4 @@ -drop table if exists t1,t2,t3; +drop table if exists t1,t2,t3,t4,t5; CREATE TABLE t1 ( Period smallint(4) unsigned zerofill DEFAULT '0000' NOT NULL, Varor_period smallint(4) unsigned DEFAULT '0' NOT NULL diff --git a/mysql-test/t/archive.test b/mysql-test/t/archive.test index 497cd717ae2..7eaf12de358 100644 --- a/mysql-test/t/archive.test +++ b/mysql-test/t/archive.test @@ -6,7 +6,7 @@ -- source include/have_binlog_format_mixed_or_statement.inc --disable_warnings -drop table if exists t1,t2,t3; +drop table if exists t1,t2,t3,t4,t5; --enable_warnings CREATE TABLE t1 ( diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index cb701b07ed7..dec88ec4631 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -146,6 +146,11 @@ static handler *archive_create_handler(handlerton *hton, */ #define ARCHIVE_MIN_ROWS_TO_USE_BULK_INSERT 2 +/* + Size of header used for row +*/ +#define ARCHIVE_ROW_HEADER_SIZE 4 + static handler *archive_create_handler(handlerton *hton, TABLE_SHARE *table, MEM_ROOT *mem_root) @@ -248,6 +253,8 @@ int ha_archive::read_data_header(azio_stream *file_to_read) DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0])); DBUG_PRINT("ha_archive::read_data_header", ("Version %u", data_buffer[1])); + + share->data_version= (uchar)data_buffer[1]; if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) && (data_buffer[1] != (uchar)ARCHIVE_VERSION)) @@ -283,6 +290,7 @@ error: *rows will contain the current number of rows in the data file upon success. */ int ha_archive::read_meta_file(File meta_file, ha_rows *rows, + uint *meta_version, ulonglong *auto_increment, ulonglong *forced_flushes, char *real_path) @@ -326,6 +334,8 @@ int ha_archive::read_meta_file(File meta_file, ha_rows *rows, DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path)); DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr))); + *meta_version= (uchar)meta_buffer[1]; + if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) || ((bool)(*ptr)== TRUE)) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -446,7 +456,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST)); if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1) share->crashed= TRUE; - DBUG_PRINT("info", ("archive opening (1) up write at %s", + DBUG_PRINT("ha_archive", ("archive opening (1) up write at %s", share->data_file_name)); /* @@ -454,6 +464,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, a write. */ if (read_meta_file(share->meta_file, &share->rows_recorded, + &share->meta_version, &share->auto_increment_value, &share->forced_flushes, share->real_path)) @@ -468,7 +479,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, thr_lock_init(&share->lock); } share->use_count++; - DBUG_PRINT("info", ("archive table %.*s has %d open handles now", + DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles now", share->table_name_length, share->table_name, share->use_count)); if (share->crashed) @@ -487,7 +498,7 @@ int ha_archive::free_share(ARCHIVE_SHARE *share) { int rc= 0; DBUG_ENTER("ha_archive::free_share"); - DBUG_PRINT("info", ("archive table %.*s has %d open handles on entrance", + DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles on entrance", share->table_name_length, share->table_name, share->use_count)); @@ -539,7 +550,7 @@ int ha_archive::init_archive_writer() if (!(azopen(&(share->archive_write), share->data_file_name, O_WRONLY|O_APPEND|O_BINARY))) { - DBUG_PRINT("info", ("Could not open archive write file")); + DBUG_PRINT("ha_archive", ("Could not open archive write file")); share->crashed= TRUE; DBUG_RETURN(1); } @@ -575,7 +586,7 @@ int ha_archive::open(const char *name, int mode, uint open_options) int rc= 0; DBUG_ENTER("ha_archive::open"); - DBUG_PRINT("info", ("archive table was opened for crash: %s", + DBUG_PRINT("ha_archive", ("archive table was opened for crash: %s", (open_options & HA_OPEN_FOR_REPAIR) ? "yes" : "no")); share= get_share(name, table, &rc); @@ -589,9 +600,17 @@ int ha_archive::open(const char *name, int mode, uint open_options) DBUG_RETURN(rc); } - thr_lock_data_init(&share->lock,&lock,NULL); + record_buffer= create_record_buffer(table->s->reclength); - DBUG_PRINT("info", ("archive data_file_name %s", share->data_file_name)); + if (!record_buffer) + { + free_share(share); + DBUG_RETURN(HA_ERR_OUT_OF_MEM); + } + + thr_lock_data_init(&share->lock, &lock, NULL); + + DBUG_PRINT("ha_archive", ("archive data_file_name %s", share->data_file_name)); if (!(azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY))) { if (errno == EROFS || errno == EACCES) @@ -599,7 +618,7 @@ int ha_archive::open(const char *name, int mode, uint open_options) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); } - DBUG_PRINT("info", ("archive table was crashed %s", + DBUG_PRINT("ha_archive", ("archive table was crashed %s", rc == HA_ERR_CRASHED_ON_USAGE ? "yes" : "no")); if (rc == HA_ERR_CRASHED_ON_USAGE && open_options & HA_OPEN_FOR_REPAIR) { @@ -632,6 +651,8 @@ int ha_archive::close(void) int rc= 0; DBUG_ENTER("ha_archive::close"); + destroy_record_buffer(record_buffer); + /* First close stream */ if (azclose(&archive)) rc= 1; @@ -676,7 +697,7 @@ int ha_archive::create(const char *name, TABLE *table_arg, if (!(field->flags & AUTO_INCREMENT_FLAG)) { error= -1; - DBUG_PRINT("info", ("Index error in creating archive table")); + DBUG_PRINT("ha_archive", ("Index error in creating archive table")); goto error; } } @@ -701,7 +722,7 @@ int ha_archive::create(const char *name, TABLE *table_arg, if (create_info->data_file_name) { char linkname[FN_REFLEN]; - DBUG_PRINT("info", ("archive will create stream file %s", + DBUG_PRINT("ha_archive", ("archive will create stream file %s", create_info->data_file_name)); fn_format(name_buff, create_info->data_file_name, "", ARZ, @@ -762,37 +783,74 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer) { my_off_t written; uint *ptr, *end; + int r_pack_length; + byte size_buffer[ARCHIVE_ROW_HEADER_SIZE]; // Longest possible row length with blobs DBUG_ENTER("ha_archive::real_write_row"); - written= azwrite(writer, buf, table->s->reclength); + // We pack the row for writing + r_pack_length= pack_row(buf); + DBUG_PRINT("ha_archive",("Pack row length %d", r_pack_length)); + + // Store the size of the row before the row + bzero(size_buffer, ARCHIVE_ROW_HEADER_SIZE); + int4store(size_buffer, (int)r_pack_length); + DBUG_PRINT("ha_archive",("Pack %d %d %d %d", size_buffer[0], size_buffer[1], size_buffer[2], size_buffer[3])); + azwrite(writer, size_buffer, ARCHIVE_ROW_HEADER_SIZE); + + written= azwrite(writer, record_buffer->buffer, r_pack_length); DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", (uint32)written, - (uint32)table->s->reclength)); + (uint32)r_pack_length)); if (!delayed_insert || !bulk_insert) share->dirty= TRUE; - if (written != (my_off_t)table->s->reclength) + if (written != (my_off_t)r_pack_length) DBUG_RETURN(errno ? errno : -1); - /* - We should probably mark the table as damagaged if the record is written - but the blob fails. - */ - for (ptr= table->s->blob_field, end= ptr + table->s->blob_fields ; + + DBUG_RETURN(0); +} + + +/* Calculate max length needed for row */ + +int ha_archive::max_row_length(const byte *buf) +{ + ulonglong length= table->s->reclength + table->s->fields*2; + + uint *ptr, *end; + for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; ptr != end ; ptr++) { - char *data_ptr; - uint32 size= ((Field_blob*) table->field[*ptr])->get_length(); - - if (size) - { - ((Field_blob*) table->field[*ptr])->get_ptr(&data_ptr); - written= azwrite(writer, data_ptr, (unsigned)size); - if (written != (my_off_t)size) - DBUG_RETURN(errno ? errno : -1); - } + Field_blob *blob= ((Field_blob*) table->field[*ptr]); + length+= blob->get_length((char*) buf + blob->offset())+2; } - DBUG_RETURN(0); + + return length; +} + + +int ha_archive::pack_row(const byte *record) +{ + byte *ptr; + + DBUG_ENTER("ha_archive::pack_row"); + + if (table->s->blob_fields) + { + if (fix_rec_buff(max_row_length(record))) + DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */ + } + + /* Copy null bits */ + memcpy(record_buffer->buffer, record, table->s->null_bytes); + ptr= record_buffer->buffer + table->s->null_bytes; + + for (Field **field=table->field ; *field ; field++) + ptr=(byte*) (*field)->pack((char*) ptr, + (char*) record + (*field)->offset()); + + DBUG_RETURN((size_t) (ptr - record_buffer->buffer)); } @@ -809,7 +867,9 @@ int ha_archive::write_row(byte *buf) { int rc; byte *read_buf= NULL; + byte *ptr; ulonglong temp_auto; + DBUG_ENTER("ha_archive::write_row"); if (share->crashed) @@ -866,12 +926,6 @@ int ha_archive::write_row(byte *buf) goto error; } - /* - Now we read and check all of the rows. - if (!memcmp(table->next_number_field->ptr, mfield->ptr, mfield->max_length())) - if ((longlong)temp_auto == - mfield->val_int((char*)(read_buf + mfield->offset()))) - */ Field *mfield= table->next_number_field; while (!(get_row(&archive, read_buf))) @@ -899,37 +953,8 @@ int ha_archive::write_row(byte *buf) if (init_archive_writer()) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - /* - Varchar structures are constant in size but are not cleaned up request - to request. The following sets all unused space to null to improve - compression. - */ - for (Field **field=table->field ; *field ; field++) - { - /* - Pack length will report 256 when you have 255 bytes - of data plus the single byte for length. - - Probably could have added a method to say the number - of bytes taken up by field for the length data. - */ - uint32 actual_length= (*field)->data_length() + - ((*field)->pack_length() > 256 ? 2 : 1); - - if ((*field)->real_type() == MYSQL_TYPE_VARCHAR) - { - char *ptr= (*field)->ptr + actual_length; - - DBUG_ASSERT(actual_length <= (*field)->pack_length()); - - uint32 to_free= (*field)->pack_length() - actual_length; - if (to_free > 0) - bzero(ptr, to_free); - } - } - share->rows_recorded++; - rc= real_write_row(buf, &(share->archive_write)); + rc= real_write_row(buf, &(share->archive_write)); error: pthread_mutex_unlock(&share->mutex); if (read_buf) @@ -1054,7 +1079,7 @@ int ha_archive::rnd_init(bool scan) if (scan) { scan_rows= share->rows_recorded; - DBUG_PRINT("info", ("archive will retrieve %llu rows", + DBUG_PRINT("ha_archive", ("archive will retrieve %llu rows", (unsigned long long)scan_rows)); stats.records= 0; @@ -1067,7 +1092,7 @@ int ha_archive::rnd_init(bool scan) pthread_mutex_lock(&share->mutex); if (share->dirty == TRUE) { - DBUG_PRINT("info", ("archive flushing out rows for scan")); + DBUG_PRINT("ha_archive", ("archive flushing out rows for scan")); azflush(&(share->archive_write), Z_SYNC_FLUSH); share->forced_flushes++; share->dirty= FALSE; @@ -1088,16 +1113,90 @@ int ha_archive::rnd_init(bool scan) positioned where you want it. */ int ha_archive::get_row(azio_stream *file_to_read, byte *buf) +{ + int rc; + DBUG_ENTER("ha_archive::get_row"); + if (share->data_version == ARCHIVE_VERSION) + rc= get_row_version3(file_to_read, buf); + else + rc= get_row_version2(file_to_read, buf); + + DBUG_PRINT("ha_archive", ("Return %d\n", rc)); + + DBUG_RETURN(rc); +} + +/* Reallocate buffer if needed */ +bool ha_archive::fix_rec_buff(int length) +{ + if (! record_buffer->buffer || length > record_buffer->length) + { + byte *newptr; + if (!(newptr=(byte*) my_realloc((gptr) record_buffer->buffer, length, + MYF(MY_ALLOW_ZERO_PTR)))) + return 1; /* purecov: inspected */ + record_buffer->buffer= newptr; + record_buffer->length= length; + } + return 0; +} + +int ha_archive::unpack_row(azio_stream *file_to_read, char *record) +{ + DBUG_ENTER("ha_archive::unpack_row"); + + int read; // Bytes read, azread() returns int + byte size_buffer[ARCHIVE_ROW_HEADER_SIZE]; + int row_len; + + /* First we grab the length stored */ + read= azread(file_to_read, (byte *)size_buffer, ARCHIVE_ROW_HEADER_SIZE); + + if (read == Z_STREAM_ERROR) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + + /* If we read nothing we are at the end of the file */ + if (read == 0 || read != ARCHIVE_ROW_HEADER_SIZE) + DBUG_RETURN(HA_ERR_END_OF_FILE); + + row_len= sint4korr(size_buffer); + DBUG_PRINT("ha_archive",("Unpack row length %d -> %llu", row_len, + (unsigned long long)table->s->reclength)); + fix_rec_buff(row_len); + + if (azread(file_to_read, record_buffer->buffer, row_len) != row_len) + DBUG_RETURN(-1); + + /* Copy null bits */ + const char *ptr= (const char*) record_buffer->buffer; + memcpy(record, ptr, table->s->null_bytes); + ptr+= table->s->null_bytes; + for (Field **field=table->field ; *field ; field++) + ptr= (*field)->unpack(record + (*field)->offset(), ptr); + + DBUG_RETURN(0); +} + + +int ha_archive::get_row_version3(azio_stream *file_to_read, byte *buf) +{ + DBUG_ENTER("ha_archive::get_row_version3"); + int returnable= unpack_row(file_to_read, buf); + DBUG_RETURN(returnable); +} + + +int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf) { int read; // Bytes read, azread() returns int uint *ptr, *end; char *last; size_t total_blob_length= 0; MY_BITMAP *read_set= table->read_set; - DBUG_ENTER("ha_archive::get_row"); + DBUG_ENTER("ha_archive::get_row_version2"); read= azread(file_to_read, buf, table->s->reclength); - DBUG_PRINT("ha_archive::get_row", ("Read %d bytes expected %lu", read, + DBUG_PRINT("ha_archive::get_row_version2", ("Read %d bytes expected %lu", read, (unsigned long)table->s->reclength)); if (read == Z_STREAM_ERROR) @@ -1266,8 +1365,11 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) if (check_opt->flags == T_EXTEND) { - DBUG_PRINT("info", ("archive extended rebuild")); + DBUG_PRINT("ha_archive", ("archive extended rebuild")); byte *buf; + archive_record_buffer *write_buffer, *read_buffer, *original_buffer; + + original_buffer= record_buffer; /* First we create a buffer that we can use for reading rows, and can pass @@ -1279,6 +1381,15 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) goto error; } + read_buffer= create_record_buffer(record_buffer->length); + write_buffer= create_record_buffer(record_buffer->length); + + if (!write_buffer || !read_buffer) + { + rc= HA_ERR_OUT_OF_MEM; + goto error; + } + /* Now we will rewind the archive file so that we are positioned at the start of the file. @@ -1300,8 +1411,11 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) { share->rows_recorded= 0; stats.auto_increment_value= share->auto_increment_value= 0; + record_buffer= read_buffer; + while (!(rc= get_row(&archive, buf))) { + record_buffer= write_buffer; real_write_row(buf, &writer); if (table->found_next_number_field) { @@ -1313,18 +1427,24 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) auto_value; } share->rows_recorded++; + record_buffer= read_buffer; } + } - DBUG_PRINT("info", ("recovered %llu archive rows", + + DBUG_PRINT("ha_archive", ("recovered %llu archive rows", (unsigned long long)share->rows_recorded)); + record_buffer= original_buffer; + destroy_record_buffer(read_buffer); + destroy_record_buffer(write_buffer); my_free((char*)buf, MYF(0)); if (rc && rc != HA_ERR_END_OF_FILE) goto error; } else { - DBUG_PRINT("info", ("archive quick rebuild")); + DBUG_PRINT("ha_archive", ("archive quick rebuild")); /* The quick method is to just read the data raw, and then compress it directly. */ @@ -1333,7 +1453,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) if (azrewind(&archive) == -1) { rc= HA_ERR_CRASHED_ON_USAGE; - DBUG_PRINT("info", ("archive HA_ERR_CRASHED_ON_USAGE")); + DBUG_PRINT("ha_archive", ("archive HA_ERR_CRASHED_ON_USAGE")); goto error; } @@ -1359,12 +1479,12 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) know it failed. We also need to reopen our read descriptor since it has changed. */ - DBUG_PRINT("info", ("Reopening archive data file")); + DBUG_PRINT("ha_archive", ("Reopening archive data file")); if (!azopen(&(share->archive_write), share->data_file_name, O_WRONLY|O_APPEND|O_BINARY) || !azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)) { - DBUG_PRINT("info", ("Could not open archive write file")); + DBUG_PRINT("ha_archive", ("Could not open archive write file")); rc= HA_ERR_CRASHED_ON_USAGE; } @@ -1577,6 +1697,36 @@ bool ha_archive::check_and_repair(THD *thd) DBUG_RETURN(repair(thd, &check_opt)); } +archive_record_buffer *ha_archive::create_record_buffer(ulonglong length) +{ + DBUG_ENTER("ha_archive::create_record_buffer"); + archive_record_buffer *r; + if (!(r= + (archive_record_buffer*) my_malloc(sizeof(archive_record_buffer), + MYF(MY_WME)))) + { + DBUG_RETURN(NULL); /* purecov: inspected */ + } + r->length= (int)length; + + if (!(r->buffer= (byte*) my_malloc(r->length, + MYF(MY_WME)))) + { + my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR)); + DBUG_RETURN(NULL); /* purecov: inspected */ + } + + DBUG_RETURN(r); +} + +void ha_archive::destroy_record_buffer(archive_record_buffer *r) +{ + DBUG_ENTER("ha_archive::destroy_record_buffer"); + my_free((char*) r->buffer, MYF(MY_ALLOW_ZERO_PTR)); + my_free((char*) r, MYF(MY_ALLOW_ZERO_PTR)); + DBUG_VOID_RETURN; +} + struct st_mysql_storage_engine archive_storage_engine= { MYSQL_HANDLERTON_INTERFACE_VERSION }; @@ -1590,7 +1740,7 @@ mysql_declare_plugin(archive) PLUGIN_LICENSE_GPL, archive_db_init, /* Plugin Init */ archive_db_done, /* Plugin Deinit */ - 0x0100 /* 1.0 */, + 0x0300 /* 1.0 */, NULL, /* status variables */ NULL, /* system variables */ NULL /* config options */ diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 75ca29e640a..2e2e7a114f1 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -27,6 +27,12 @@ ha_example.h. */ +typedef struct st_archive_record_buffer { + byte *buffer; + int length; +} archive_record_buffer; + + typedef struct st_archive_share { char *table_name; char data_file_name[FN_REFLEN]; @@ -43,18 +49,23 @@ typedef struct st_archive_share { ulonglong forced_flushes; ulonglong mean_rec_length; char real_path[FN_REFLEN]; + uint meta_version; + uint data_version; } ARCHIVE_SHARE; /* Version for file format. - 1 - Initial Version + 1 - Initial Version (Never Released) + 2 - Stream Compression, seperate blobs, no packing + 3 - One steam (row and blobs), with packing */ -#define ARCHIVE_VERSION 2 +#define ARCHIVE_VERSION 3 class ha_archive: public handler { THR_LOCK_DATA lock; /* MySQL lock */ ARCHIVE_SHARE *share; /* Shared lock info */ + azio_stream archive; /* Archive file we are working with */ my_off_t current_position; /* The position of the row we just read */ byte byte_buffer[IO_SIZE]; /* Initial buffer for our string */ @@ -65,6 +76,10 @@ class ha_archive: public handler const byte *current_key; uint current_key_len; uint current_k_offset; + archive_record_buffer *record_buffer; + + archive_record_buffer *create_record_buffer(ulonglong length); + void destroy_record_buffer(archive_record_buffer *r); public: ha_archive(handlerton *hton, TABLE_SHARE *table_arg); @@ -105,7 +120,10 @@ public: int rnd_next(byte *buf); int rnd_pos(byte * buf, byte *pos); int get_row(azio_stream *file_to_read, byte *buf); + int get_row_version2(azio_stream *file_to_read, byte *buf); + int get_row_version3(azio_stream *file_to_read, byte *buf); int read_meta_file(File meta_file, ha_rows *rows, + uint *meta_version, ulonglong *auto_increment, ulonglong *forced_flushes, char *real_path); @@ -137,5 +155,9 @@ public: bool is_crashed() const; int check(THD* thd, HA_CHECK_OPT* check_opt); bool check_and_repair(THD *thd); + int max_row_length(const byte *buf); + bool fix_rec_buff(int length); + int unpack_row(azio_stream *file_to_read, char *record); + int pack_row(const byte *record); }; From 4eba36ee3be195a42521fb257f18fc763b34dbd4 Mon Sep 17 00:00:00 2001 From: "brian@zim.(none)" <> Date: Sun, 3 Dec 2006 22:09:32 -0800 Subject: [PATCH 2/4] Fixed problems with original gzio() functions not being able to correctly return lengths of long rows. --- storage/archive/archive_test.c | 127 +++++++++++++++++++++++++----- storage/archive/azio.c | 50 ++++++++---- storage/archive/azlib.h | 4 +- storage/archive/ha_archive.cc | 136 ++++++++++++++++++++++++--------- storage/archive/ha_archive.h | 2 +- 5 files changed, 247 insertions(+), 72 deletions(-) diff --git a/storage/archive/archive_test.c b/storage/archive/archive_test.c index fd4b8385069..753fdb8287a 100644 --- a/storage/archive/archive_test.c +++ b/storage/archive/archive_test.c @@ -1,48 +1,133 @@ #include "azlib.h" +#include +#include #include -#define TEST_STRING "This is a test" +#define TEST_FILENAME "test.az" +#define TEST_STRING "YOU don't know about me without you have read a book by the name of The Adventures of Tom Sawyer; but that ain't no matter. That book was made by Mr. Mark Twain, and he told the truth, mainly. There was things which he stretched, but mainly he told the truth. That is nothing. I never seen anybody but lied one time or another, without it was Aunt Polly, or the widow, or maybe Mary. Aunt Polly--Tom's Aunt Polly, she is--and Mary, and the Widow Douglas is all told about in that book, which is mostly a true book, with some stretchers, as I said before. Now the way that the book winds up is this: Tom and me found the money that the robbers hid in the cave, and it made us rich. We got six thousand dollars apiece--all gold. It was an awful sight of money when it was piled up. Well, Judge Thatcher he took it and put it out at interest, and it fetched us a dollar a day apiece all the year round --more than a body could tell what to do with. The Widow Douglas she took me for her son, and allowed she would..." #define BUFFER_LEN 1024 +#define TWOGIG 2147483648 +#define FOURGIG 4294967296 int main(int argc __attribute__((unused)), char *argv[]) { - int ret; - azio_stream foo, foo1; + unsigned long ret; + int error; + azio_stream writer_handle, reader_handle; char buffer[BUFFER_LEN]; + unsigned long write_length; + unsigned long read_length= 0; MY_INIT(argv[0]); - if (!(ret= azopen(&foo, "test", O_CREAT|O_WRONLY|O_TRUNC|O_BINARY))) + if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY))) { printf("Could not create test file\n"); return 0; } - azwrite(&foo, TEST_STRING, sizeof(TEST_STRING)); - azflush(&foo, Z_FINISH); + ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN); + assert(ret == BUFFER_LEN); + azflush(&writer_handle, Z_FINISH); - if (!(ret= azopen(&foo1, "test", O_RDONLY|O_BINARY))) + if (!(ret= azopen(&reader_handle, TEST_FILENAME, O_RDONLY|O_BINARY))) { printf("Could not open test file\n"); return 0; } - ret= azread(&foo1, buffer, BUFFER_LEN); - printf("Read %d bytes\n", ret); - printf("%s\n", buffer); - azrewind(&foo1); - azclose(&foo); - if (!(ret= azopen(&foo, "test", O_APPEND|O_WRONLY|O_BINARY))) + ret= azread(&reader_handle, buffer, BUFFER_LEN, &error); + printf("Read %lu bytes, expected %d\n", ret, BUFFER_LEN); + + azrewind(&reader_handle); + azclose(&writer_handle); + + if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_APPEND|O_WRONLY|O_BINARY))) + { + printf("Could not open file (%s) for appending\n", TEST_FILENAME); + return 0; + } + ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN); + assert(ret == BUFFER_LEN); + azflush(&writer_handle, Z_FINISH); + + /* Read the original data */ + ret= azread(&reader_handle, buffer, BUFFER_LEN, &error); + printf("Read %lu bytes, expected %d\n", ret, BUFFER_LEN); + assert(ret == BUFFER_LEN); + assert(!error); + + /* Read the new data */ + ret= azread(&reader_handle, buffer, BUFFER_LEN, &error); + printf("Read %lu bytes, expected %d\n", ret, BUFFER_LEN); + assert(ret == BUFFER_LEN); + assert(!error); + + azclose(&writer_handle); + azclose(&reader_handle); + unlink(TEST_FILENAME); + + /* Start size tests */ + printf("About to run 2gig and 4gig test now, you may want to hit CTRL-C\n"); + + if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY))) { printf("Could not create test file\n"); return 0; } - azwrite(&foo, TEST_STRING, sizeof(TEST_STRING)); - azflush(&foo, Z_FINISH); - ret= azread(&foo1, buffer, BUFFER_LEN); - printf("Read %d bytes\n", ret); - printf("%s\n", buffer); - azclose(&foo); - azclose(&foo1); - /* unlink("test"); */ + for (write_length= 0; write_length < TWOGIG ; write_length+= ret) + { + ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN); + assert(!error); + if (ret != BUFFER_LEN) + { + printf("Size %lu\n", ret); + assert(ret != BUFFER_LEN); + } + } + assert(write_length == TWOGIG); + printf("Read %lu bytes, expected %lu\n", write_length, TWOGIG); + azflush(&writer_handle, Z_FINISH); + + printf("Reading back data\n"); + + if (!(ret= azopen(&reader_handle, TEST_FILENAME, O_RDONLY|O_BINARY))) + { + printf("Could not open test file\n"); + return 0; + } + + while ((ret= azread(&reader_handle, buffer, BUFFER_LEN, &error))) + { + read_length+= ret; + assert(!memcmp(buffer, TEST_STRING, ret)); + if (ret != BUFFER_LEN) + { + printf("Size %lu\n", ret); + assert(ret != BUFFER_LEN); + } + } + + assert(read_length == TWOGIG); + azclose(&writer_handle); + azclose(&reader_handle); + unlink(TEST_FILENAME); + + if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY))) + { + printf("Could not create test file\n"); + return 0; + } + + for (write_length= 0; write_length < FOURGIG ; write_length+= ret) + { + ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN); + assert(ret == BUFFER_LEN); + } + assert(write_length == FOURGIG); + printf("Read %lu bytes, expected %lu\n", write_length, FOURGIG); + azclose(&writer_handle); + azclose(&reader_handle); + unlink(TEST_FILENAME); + return 0; } diff --git a/storage/archive/azio.c b/storage/archive/azio.c index 7cbe6a2a17d..8e111f4f50f 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -1,5 +1,6 @@ /* azio is a modified version of gzio. It makes use of mysys and removes mallocs. + -Brian Aker */ /* gzio.c -- IO on .gz files @@ -292,15 +293,28 @@ int destroy (s) Reads the given number of uncompressed bytes from the compressed file. azread returns the number of bytes actually read (0 for end of file). */ -int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len) +unsigned long ZEXPORT azread ( azio_stream *s, voidp buf, unsigned long len, int *error) { Bytef *start = (Bytef*)buf; /* starting point for crc computation */ Byte *next_out; /* == stream.next_out but not forced far (for MSDOS) */ + *error= 0; - if (s->mode != 'r') return Z_STREAM_ERROR; + if (s->mode != 'r') + { + *error= Z_STREAM_ERROR; + return 0; + } - if (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO) return -1; - if (s->z_err == Z_STREAM_END) return 0; /* EOF */ + if (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO) + { + *error= s->z_err; + return 0; + } + + if (s->z_err == Z_STREAM_END) /* EOF */ + { + return 0; + } next_out = (Byte*)buf; s->stream.next_out = (Bytef*)buf; @@ -315,7 +329,9 @@ int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len) start++; if (s->last) { s->z_err = Z_STREAM_END; - return 1; + { + return 1; + } } } @@ -342,7 +358,9 @@ int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len) s->in += len; s->out += len; if (len == 0) s->z_eof = 1; - return (int)len; + { + return len; + } } if (s->stream.avail_in == 0 && !s->z_eof) { @@ -386,8 +404,13 @@ int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len) if (len == s->stream.avail_out && (s->z_err == Z_DATA_ERROR || s->z_err == Z_ERRNO)) - return -1; - return (int)(len - s->stream.avail_out); + { + *error= s->z_err; + + return 0; + } + + return (len - s->stream.avail_out); } @@ -396,7 +419,7 @@ int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned len) Writes the given number of uncompressed bytes into the compressed file. azwrite returns the number of bytes actually written (0 in case of error). */ -int azwrite (azio_stream *s, voidpc buf, unsigned len) +unsigned long azwrite (azio_stream *s, voidpc buf, unsigned long len) { s->stream.next_in = (Bytef*)buf; @@ -424,7 +447,7 @@ int azwrite (azio_stream *s, voidpc buf, unsigned len) } s->crc = crc32(s->crc, (const Bytef *)buf, len); - return (int)(len - s->stream.avail_in); + return (unsigned long)(len - s->stream.avail_in); } #endif @@ -580,11 +603,12 @@ my_off_t azseek (s, offset, whence) if (s->last) s->z_err = Z_STREAM_END; } while (offset > 0) { - int size = Z_BUFSIZE; + int error; + unsigned long size = Z_BUFSIZE; if (offset < Z_BUFSIZE) size = (int)offset; - size = azread(s, s->outbuf, (uInt)size); - if (size <= 0) return -1L; + size = azread(s, s->outbuf, size, &error); + if (error <= 0) return -1L; offset -= size; } return s->out; diff --git a/storage/archive/azlib.h b/storage/archive/azlib.h index e63d1ed9997..7e20387ff0a 100644 --- a/storage/archive/azlib.h +++ b/storage/archive/azlib.h @@ -206,7 +206,7 @@ int azdopen(azio_stream *s,File fd, int Flags); */ -extern int azread(azio_stream *file, voidp buf, unsigned len); +extern unsigned long azread ( azio_stream *s, voidp buf, unsigned long len, int *error); /* Reads the given number of uncompressed bytes from the compressed file. If the input file was not in gzip format, gzread copies the given number @@ -214,7 +214,7 @@ extern int azread(azio_stream *file, voidp buf, unsigned len); gzread returns the number of uncompressed bytes actually read (0 for end of file, -1 for error). */ -extern int azwrite (azio_stream *file, voidpc buf, unsigned len); +extern unsigned long azwrite (azio_stream *s, voidpc buf, unsigned long len); /* Writes the given number of uncompressed bytes into the compressed file. gzwrite returns the number of uncompressed bytes actually written diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index dec88ec4631..87312331aae 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -242,19 +242,34 @@ ha_archive::ha_archive(handlerton *hton, TABLE_SHARE *table_arg) */ int ha_archive::read_data_header(azio_stream *file_to_read) { + int error; + unsigned long ret; uchar data_buffer[DATA_BUFFER_SIZE]; DBUG_ENTER("ha_archive::read_data_header"); if (azrewind(file_to_read) == -1) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - if (azread(file_to_read, data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE) - DBUG_RETURN(errno ? errno : -1); + ret= azread(file_to_read, data_buffer, DATA_BUFFER_SIZE, &error); + + if (ret != DATA_BUFFER_SIZE) + { + DBUG_PRINT("ha_archive", ("Reading, expected %lu got %lu", + DATA_BUFFER_SIZE, ret)); + DBUG_RETURN(1); + } + + if (error) + { + DBUG_PRINT("ha_archive", ("Compression error (%d)", error)); + DBUG_RETURN(1); + } - DBUG_PRINT("ha_archive::read_data_header", ("Check %u", data_buffer[0])); - DBUG_PRINT("ha_archive::read_data_header", ("Version %u", data_buffer[1])); + DBUG_PRINT("ha_archive", ("Check %u", data_buffer[0])); + DBUG_PRINT("ha_archive", ("Version %u", data_buffer[1])); share->data_version= (uchar)data_buffer[1]; + DBUG_PRINT("ha_archive", ("Set Version %u", share->data_version)); if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) && (data_buffer[1] != (uchar)ARCHIVE_VERSION)) @@ -277,11 +292,12 @@ int ha_archive::write_data_header(azio_stream *file_to_write) if (azwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) != DATA_BUFFER_SIZE) goto error; - DBUG_PRINT("ha_archive::write_data_header", ("Check %u", (uint)data_buffer[0])); - DBUG_PRINT("ha_archive::write_data_header", ("Version %u", (uint)data_buffer[1])); + DBUG_PRINT("ha_archive", ("Check %u", (uint)data_buffer[0])); + DBUG_PRINT("ha_archive", ("Version %u", (uint)data_buffer[1])); DBUG_RETURN(0); error: + DBUG_PRINT("ha_archive", ("Could not write full data header")); DBUG_RETURN(errno); } @@ -783,30 +799,37 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer) { my_off_t written; uint *ptr, *end; - int r_pack_length; + unsigned long r_pack_length; byte size_buffer[ARCHIVE_ROW_HEADER_SIZE]; // Longest possible row length with blobs DBUG_ENTER("ha_archive::real_write_row"); // We pack the row for writing r_pack_length= pack_row(buf); - DBUG_PRINT("ha_archive",("Pack row length %d", r_pack_length)); + DBUG_PRINT("ha_archive",("Pack row length %lu", r_pack_length)); // Store the size of the row before the row bzero(size_buffer, ARCHIVE_ROW_HEADER_SIZE); int4store(size_buffer, (int)r_pack_length); - DBUG_PRINT("ha_archive",("Pack %d %d %d %d", size_buffer[0], size_buffer[1], size_buffer[2], size_buffer[3])); - azwrite(writer, size_buffer, ARCHIVE_ROW_HEADER_SIZE); + written= azwrite(writer, size_buffer, ARCHIVE_ROW_HEADER_SIZE); + + if (written != ARCHIVE_ROW_HEADER_SIZE) + { + DBUG_PRINT("ha_archive", ("Died writing row header")); + DBUG_RETURN(-1); + } written= azwrite(writer, record_buffer->buffer, r_pack_length); - DBUG_PRINT("ha_archive::real_write_row", ("Wrote %d bytes expected %d", - (uint32)written, - (uint32)r_pack_length)); + if (written != r_pack_length) + { + DBUG_PRINT("ha_archive", ("Wrote %llu bytes expected %lu", + (unsigned long long) written, + r_pack_length)); + DBUG_RETURN(-1); + } + if (!delayed_insert || !bulk_insert) share->dirty= TRUE; - if (written != (my_off_t)r_pack_length) - DBUG_RETURN(errno ? errno : -1); - DBUG_RETURN(0); } @@ -830,7 +853,7 @@ int ha_archive::max_row_length(const byte *buf) } -int ha_archive::pack_row(const byte *record) +unsigned long ha_archive::pack_row(const byte *record) { byte *ptr; @@ -850,7 +873,7 @@ int ha_archive::pack_row(const byte *record) ptr=(byte*) (*field)->pack((char*) ptr, (char*) record + (*field)->offset()); - DBUG_RETURN((size_t) (ptr - record_buffer->buffer)); + DBUG_RETURN((unsigned long) (ptr - record_buffer->buffer)); } @@ -1116,6 +1139,8 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf) { int rc; DBUG_ENTER("ha_archive::get_row"); + DBUG_PRINT("ha_archive", ("Picking version for get_row() %d -> %d", + share->data_version, ARCHIVE_VERSION)); if (share->data_version == ARCHIVE_VERSION) rc= get_row_version3(file_to_read, buf); else @@ -1145,14 +1170,15 @@ int ha_archive::unpack_row(azio_stream *file_to_read, char *record) { DBUG_ENTER("ha_archive::unpack_row"); - int read; // Bytes read, azread() returns int + unsigned long read; + int error; byte size_buffer[ARCHIVE_ROW_HEADER_SIZE]; - int row_len; + unsigned long row_len; /* First we grab the length stored */ - read= azread(file_to_read, (byte *)size_buffer, ARCHIVE_ROW_HEADER_SIZE); + read= azread(file_to_read, (byte *)size_buffer, ARCHIVE_ROW_HEADER_SIZE, &error); - if (read == Z_STREAM_ERROR) + if (error == Z_STREAM_ERROR || (read && read < ARCHIVE_ROW_HEADER_SIZE)) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); /* If we read nothing we are at the end of the file */ @@ -1160,12 +1186,16 @@ int ha_archive::unpack_row(azio_stream *file_to_read, char *record) DBUG_RETURN(HA_ERR_END_OF_FILE); row_len= sint4korr(size_buffer); - DBUG_PRINT("ha_archive",("Unpack row length %d -> %llu", row_len, - (unsigned long long)table->s->reclength)); + DBUG_PRINT("ha_archive",("Unpack row length %lu -> %lu", row_len, + (unsigned long)table->s->reclength)); fix_rec_buff(row_len); - if (azread(file_to_read, record_buffer->buffer, row_len) != row_len) + read= azread(file_to_read, record_buffer->buffer, row_len, &error); + + if (read != row_len || error) + { DBUG_RETURN(-1); + } /* Copy null bits */ const char *ptr= (const char*) record_buffer->buffer; @@ -1188,18 +1218,25 @@ int ha_archive::get_row_version3(azio_stream *file_to_read, byte *buf) int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf) { - int read; // Bytes read, azread() returns int + unsigned long read; + int error; uint *ptr, *end; char *last; size_t total_blob_length= 0; MY_BITMAP *read_set= table->read_set; DBUG_ENTER("ha_archive::get_row_version2"); - read= azread(file_to_read, buf, table->s->reclength); - DBUG_PRINT("ha_archive::get_row_version2", ("Read %d bytes expected %lu", read, - (unsigned long)table->s->reclength)); + read= azread(file_to_read, buf, table->s->reclength, &error); - if (read == Z_STREAM_ERROR) + if (read != table->s->reclength) + { + DBUG_PRINT("ha_archive::get_row_version2", ("Read %lu bytes expected %lu", + read, + (unsigned long)table->s->reclength)); + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + } + + if (error == Z_STREAM_ERROR || error == Z_DATA_ERROR ) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); /* If we read nothing we are at the end of the file */ @@ -1238,7 +1275,11 @@ int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf) if (bitmap_is_set(read_set, ((Field_blob*) table->field[*ptr])->field_index)) { - read= azread(file_to_read, last, size); + read= azread(file_to_read, last, size, &error); + + if (error) + DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + if ((size_t) read != size) DBUG_RETURN(HA_ERR_END_OF_FILE); ((Field_blob*) table->field[*ptr])->set_ptr(size, last); @@ -1448,17 +1489,42 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) /* The quick method is to just read the data raw, and then compress it directly. */ - int read; // Bytes read, azread() returns int + unsigned long read, written; + int error; char block[IO_SIZE]; if (azrewind(&archive) == -1) { rc= HA_ERR_CRASHED_ON_USAGE; - DBUG_PRINT("ha_archive", ("archive HA_ERR_CRASHED_ON_USAGE")); + DBUG_PRINT("ha_archive", ("crashed on rewinding file")); goto error; } - while ((read= azread(&archive, block, IO_SIZE)) > 0) - azwrite(&writer, block, read); + while ((read= azread(&archive, block, IO_SIZE, &error)) > 0) + { + if (error) + { + rc= HA_ERR_CRASHED_ON_USAGE; + DBUG_PRINT("ha_archive", ("azread error on read")); + goto error; + } + + written= azwrite(&writer, block, read); + if (written != read) + { + rc= HA_ERR_CRASHED_ON_USAGE; + DBUG_PRINT("ha_archive::real_write_row", + ("Crashed wrote %lu bytes expected %lu", + written, read)); + goto error; + } + } + + if (error) + { + rc= HA_ERR_CRASHED_ON_USAGE; + DBUG_PRINT("ha_archive", ("retrieved zero blocks and error'ed")); + goto error; + } } azclose(&writer); diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 2e2e7a114f1..d451cf69488 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -158,6 +158,6 @@ public: int max_row_length(const byte *buf); bool fix_rec_buff(int length); int unpack_row(azio_stream *file_to_read, char *record); - int pack_row(const byte *record); + unsigned long pack_row(const byte *record); }; From f85acb894ce204344005d33187bbcc1bd4ce6a86 Mon Sep 17 00:00:00 2001 From: "brian@zim.(none)" <> Date: Mon, 4 Dec 2006 22:01:48 -0800 Subject: [PATCH 3/4] This updates archive to use new header information in az files and makes way for combining the meta data file with the AZ file. --- storage/archive/azio.c | 151 +++++++++++++++++++++++----------- storage/archive/azlib.h | 13 +++ storage/archive/ha_archive.cc | 62 ++++---------- storage/archive/ha_archive.h | 2 - 4 files changed, 134 insertions(+), 94 deletions(-) diff --git a/storage/archive/azio.c b/storage/archive/azio.c index 8e111f4f50f..8de5ab58702 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -18,6 +18,7 @@ #include static int const gz_magic[2] = {0x1f, 0x8b}; /* gzip magic header */ +static int const az_magic[2] = {0xfe, 0x03}; /* az magic header */ /* gzip flag byte */ #define ASCII_FLAG 0x01 /* bit 0 set: file probably ascii text */ @@ -66,6 +67,7 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) s->crc = crc32(0L, Z_NULL, 0); s->transparent = 0; s->mode = 'r'; + s->version = (unsigned char)az_magic[1]; /* this needs to be a define to version */ if (Flags & O_WRONLY || Flags & O_APPEND) s->mode = 'w'; @@ -112,20 +114,24 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) return Z_NULL; } if (s->mode == 'w') { - char buffer[10]; - /* Write a very simple .gz header: - */ - buffer[0] = gz_magic[0]; - buffer[1] = gz_magic[1]; - buffer[2] = Z_DEFLATED; - buffer[3] = 0 /*flags*/; - buffer[4] = 0; - buffer[5] = 0; - buffer[6] = 0; - buffer[7] = 0 /*time*/; - buffer[8] = 0 /*xflags*/; - buffer[9] = 0x03; - s->start = 10L; + char buffer[AZHEADER_SIZE]; + char *ptr; + /* Write a very simple .gz header: */ + bzero(buffer, AZHEADER_SIZE); + buffer[0] = az_magic[0]; + buffer[1] = az_magic[1]; + buffer[2] = (unsigned char)0; /* Reserved for block size */ + buffer[3] = (unsigned char)0; /* Compression Type */ + ptr= buffer + 4; + int4store(ptr, 0LL); /* FRM Block */ + ptr+= sizeof(unsigned long); + int4store(ptr, 0LL); /* Meta Block */ + ptr+= sizeof(unsigned long); + int4store(ptr, (unsigned long)AZHEADER_SIZE); /* Start of Data Block Index Block */ + ptr+= sizeof(unsigned long); + + s->start = AZHEADER_SIZE; + s->version = (unsigned char)az_magic[1]; my_write(s->file, buffer, (uint)s->start, MYF(0)); /* We use 10L instead of ftell(s->file) to because ftell causes an * fflush on some systems. This version of the library doesn't use @@ -218,41 +224,53 @@ void check_header(azio_stream *s) } /* Peek ahead to check the gzip magic header */ - if (s->stream.next_in[0] != gz_magic[0] || - s->stream.next_in[1] != gz_magic[1]) { + if ( s->stream.next_in[0] == gz_magic[0] && s->stream.next_in[1] == gz_magic[1]) + { + s->stream.avail_in -= 2; + s->stream.next_in += 2; + s->version= (unsigned char)2; + + /* Check the rest of the gzip header */ + method = get_byte(s); + flags = get_byte(s); + if (method != Z_DEFLATED || (flags & RESERVED) != 0) { + s->z_err = Z_DATA_ERROR; + return; + } + + /* Discard time, xflags and OS code: */ + for (len = 0; len < 6; len++) (void)get_byte(s); + + if ((flags & EXTRA_FIELD) != 0) { /* skip the extra field */ + len = (uInt)get_byte(s); + len += ((uInt)get_byte(s))<<8; + /* len is garbage if EOF but the loop below will quit anyway */ + while (len-- != 0 && get_byte(s) != EOF) ; + } + if ((flags & ORIG_NAME) != 0) { /* skip the original file name */ + while ((c = get_byte(s)) != 0 && c != EOF) ; + } + if ((flags & COMMENT) != 0) { /* skip the .gz file comment */ + while ((c = get_byte(s)) != 0 && c != EOF) ; + } + if ((flags & HEAD_CRC) != 0) { /* skip the header crc */ + for (len = 0; len < 2; len++) (void)get_byte(s); + } + s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK; + } + else if ( s->stream.next_in[0] == az_magic[0] && s->stream.next_in[1] == az_magic[1]) + { + s->stream.avail_in -= 2; + s->stream.next_in += 2; + for (len = 0; len < (AZHEADER_SIZE-2); len++) (void)get_byte(s); + s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK; + } + else + { s->transparent = 1; + s->version = (unsigned char)0; return; } - s->stream.avail_in -= 2; - s->stream.next_in += 2; - - /* Check the rest of the gzip header */ - method = get_byte(s); - flags = get_byte(s); - if (method != Z_DEFLATED || (flags & RESERVED) != 0) { - s->z_err = Z_DATA_ERROR; - return; - } - - /* Discard time, xflags and OS code: */ - for (len = 0; len < 6; len++) (void)get_byte(s); - - if ((flags & EXTRA_FIELD) != 0) { /* skip the extra field */ - len = (uInt)get_byte(s); - len += ((uInt)get_byte(s))<<8; - /* len is garbage if EOF but the loop below will quit anyway */ - while (len-- != 0 && get_byte(s) != EOF) ; - } - if ((flags & ORIG_NAME) != 0) { /* skip the original file name */ - while ((c = get_byte(s)) != 0 && c != EOF) ; - } - if ((flags & COMMENT) != 0) { /* skip the .gz file comment */ - while ((c = get_byte(s)) != 0 && c != EOF) ; - } - if ((flags & HEAD_CRC) != 0) { /* skip the header crc */ - for (len = 0; len < 2; len++) (void)get_byte(s); - } - s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK; } /* =========================================================================== @@ -668,10 +686,12 @@ int azclose (azio_stream *s) if (s == NULL) return Z_STREAM_ERROR; + if (s->mode == 'w') { #ifdef NO_GZCOMPRESS return Z_STREAM_ERROR; #else + if (do_flush (s, Z_FINISH) != Z_OK) return destroy(s); @@ -681,3 +701,42 @@ int azclose (azio_stream *s) } return destroy(s); } + +/* + This function reads the header of meta block and returns whether or not it was successful. + *rows will contain the current number of rows in the data file upon success. +*/ +int az_read_meta_block(char *meta_start, unsigned long *rows, + unsigned long long *auto_increment, + unsigned long long *forced_flushes) +{ + unsigned char *ptr= meta_start; + ulonglong check_point; + + DBUG_ENTER("ha_archive::read_meta_file"); + + /* + Parse out the meta data, we ignore version at the moment + */ + + *rows= (unsigned long long)uint8korr(ptr); + ptr+= sizeof(unsigned long long); // Move past rows + check_point= uint8korr(ptr); + ptr+= sizeof(unsigned long long); // Move past check_point + *auto_increment= uint8korr(ptr); + ptr+= sizeof(unsigned long long); // Move past auto_increment + *forced_flushes= uint8korr(ptr); + ptr+= sizeof(unsigned long long); // Move past forced_flush + + DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", + (long long unsigned)*rows)); + DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", + (long long unsigned) check_point)); + DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", + (long long unsigned)*auto_increment)); + DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", + (long long unsigned)*forced_flushes)); + DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr))); + + DBUG_RETURN(0); +} diff --git a/storage/archive/azlib.h b/storage/archive/azlib.h index 7e20387ff0a..122e460d24c 100644 --- a/storage/archive/azlib.h +++ b/storage/archive/azlib.h @@ -1,6 +1,8 @@ /* This libary has been modified for use by the MySQL Archive Engine. + -Brian Aker */ + /* zlib.h -- interface of the 'zlib' general purpose compression library version 1.2.3, July 18th, 2005 @@ -38,6 +40,16 @@ #ifdef __cplusplus extern "C" { #endif +/* Start of MySQL Specific Information */ + +/* + ulonglong + ulonglong + ulonglong + ulonglong + uchar +*/ +#define AZMETA_BUFFER_SIZE sizeof(ulonglong) \ + + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) \ + + sizeof(uchar) + +#define AZHEADER_SIZE 16 /* The 'zlib' compression library provides in-memory compression and @@ -171,6 +183,7 @@ typedef struct azio_stream { my_off_t out; /* bytes out of deflate or inflate */ int back; /* one character push-back */ int last; /* true if push-back is last character */ + unsigned char version; /* Version */ } azio_stream; /* basic functions */ diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index 87312331aae..e9d4116459a 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -250,11 +250,17 @@ int ha_archive::read_data_header(azio_stream *file_to_read) if (azrewind(file_to_read) == -1) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); + if (file_to_read->version >= 3) + DBUG_RETURN(0); + /* Everything below this is just legacy to version 2< */ + + DBUG_PRINT("ha_archive", ("Reading legacy data header")); + ret= azread(file_to_read, data_buffer, DATA_BUFFER_SIZE, &error); if (ret != DATA_BUFFER_SIZE) { - DBUG_PRINT("ha_archive", ("Reading, expected %lu got %lu", + DBUG_PRINT("ha_archive", ("Reading, expected %d got %lu", DATA_BUFFER_SIZE, ret)); DBUG_RETURN(1); } @@ -268,9 +274,6 @@ int ha_archive::read_data_header(azio_stream *file_to_read) DBUG_PRINT("ha_archive", ("Check %u", data_buffer[0])); DBUG_PRINT("ha_archive", ("Version %u", data_buffer[1])); - share->data_version= (uchar)data_buffer[1]; - DBUG_PRINT("ha_archive", ("Set Version %u", share->data_version)); - if ((data_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) && (data_buffer[1] != (uchar)ARCHIVE_VERSION)) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); @@ -278,29 +281,6 @@ int ha_archive::read_data_header(azio_stream *file_to_read) DBUG_RETURN(0); } -/* - This method writes out the header of a datafile and returns whether or not it was successful. -*/ -int ha_archive::write_data_header(azio_stream *file_to_write) -{ - uchar data_buffer[DATA_BUFFER_SIZE]; - DBUG_ENTER("ha_archive::write_data_header"); - - data_buffer[0]= (uchar)ARCHIVE_CHECK_HEADER; - data_buffer[1]= (uchar)ARCHIVE_VERSION; - - if (azwrite(file_to_write, &data_buffer, DATA_BUFFER_SIZE) != - DATA_BUFFER_SIZE) - goto error; - DBUG_PRINT("ha_archive", ("Check %u", (uint)data_buffer[0])); - DBUG_PRINT("ha_archive", ("Version %u", (uint)data_buffer[1])); - - DBUG_RETURN(0); -error: - DBUG_PRINT("ha_archive", ("Could not write full data header")); - DBUG_RETURN(errno); -} - /* This method reads the header of a meta file and returns whether or not it was successful. *rows will contain the current number of rows in the data file upon success. @@ -616,6 +596,9 @@ int ha_archive::open(const char *name, int mode, uint open_options) DBUG_RETURN(rc); } + DBUG_ASSERT(share); + + record_buffer= create_record_buffer(table->s->reclength); if (!record_buffer) @@ -694,6 +677,7 @@ int ha_archive::create(const char *name, TABLE *table_arg, File create_file; // We use to create the datafile and the metafile char name_buff[FN_REFLEN]; int error; + azio_stream create_stream; /* Archive file we are working with */ DBUG_ENTER("ha_archive::create"); stats.auto_increment_value= (create_info->auto_increment_value ? @@ -762,18 +746,13 @@ int ha_archive::create(const char *name, TABLE *table_arg, goto error; } } - if (!azdopen(&archive, create_file, O_WRONLY|O_BINARY)) + if (!azdopen(&create_stream, create_file, O_WRONLY|O_BINARY)) { error= errno; goto error2; } - if (write_data_header(&archive)) - { - error= errno; - goto error3; - } - if (azclose(&archive)) + if (azclose(&create_stream)) { error= errno; goto error2; @@ -781,9 +760,6 @@ int ha_archive::create(const char *name, TABLE *table_arg, DBUG_RETURN(0); -error3: - /* We already have an error, so ignore results of azclose. */ - (void)azclose(&archive); error2: my_close(create_file, MYF(0)); delete_table(name); @@ -1140,8 +1116,9 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf) int rc; DBUG_ENTER("ha_archive::get_row"); DBUG_PRINT("ha_archive", ("Picking version for get_row() %d -> %d", - share->data_version, ARCHIVE_VERSION)); - if (share->data_version == ARCHIVE_VERSION) + (uchar)file_to_read->version, + ARCHIVE_VERSION)); + if (file_to_read->version == ARCHIVE_VERSION) rc= get_row_version3(file_to_read, buf); else rc= get_row_version2(file_to_read, buf); @@ -1436,13 +1413,6 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) start of the file. */ rc= read_data_header(&archive); - - /* - Assuming now error from rewinding the archive file, we now write out the - new header for out data file. - */ - if (!rc) - rc= write_data_header(&writer); /* On success of writing out the new header, we now fetch each row and diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index d451cf69488..3e3016cca1e 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -50,7 +50,6 @@ typedef struct st_archive_share { ulonglong mean_rec_length; char real_path[FN_REFLEN]; uint meta_version; - uint data_version; } ARCHIVE_SHARE; /* @@ -137,7 +136,6 @@ public: int init_archive_writer(); bool auto_repair() const { return 1; } // For the moment we just do this int read_data_header(azio_stream *file_to_read); - int write_data_header(azio_stream *file_to_write); void position(const byte *record); int info(uint); void update_create_info(HA_CREATE_INFO *create_info); From 9879f4363fe7ab4ff0d4387f59655157748a7616 Mon Sep 17 00:00:00 2001 From: "brian@zim.(none)" <> Date: Wed, 10 Jan 2007 23:53:08 -0800 Subject: [PATCH 4/4] New azio which keeps meta data in its own header. --- storage/archive/Makefile.am | 10 +- storage/archive/archive_reader.c | 39 +++ storage/archive/archive_test.c | 188 ++++++++++---- storage/archive/azio.c | 277 ++++++++++---------- storage/archive/azlib.h | 40 ++- storage/archive/ha_archive.cc | 422 +++++++++---------------------- storage/archive/ha_archive.h | 17 +- 7 files changed, 487 insertions(+), 506 deletions(-) create mode 100644 storage/archive/archive_reader.c diff --git a/storage/archive/Makefile.am b/storage/archive/Makefile.am index 5e91ffa83e8..7c24a962ffa 100644 --- a/storage/archive/Makefile.am +++ b/storage/archive/Makefile.am @@ -31,7 +31,7 @@ LDADD = DEFS = @DEFS@ noinst_HEADERS = ha_archive.h azlib.h -noinst_PROGRAMS = archive_test +noinst_PROGRAMS = archive_test archive_reader EXTRA_LTLIBRARIES = ha_archive.la pkglib_LTLIBRARIES = @plugin_archive_shared_target@ @@ -56,6 +56,14 @@ archive_test_LDADD = $(top_builddir)/mysys/libmysys.a \ @ZLIB_LIBS@ archive_test_LDFLAGS = @NOINST_LDFLAGS@ +archive_reader_SOURCES = archive_reader.c azio.c +archive_reader_CFLAGS = $(AM_CFLAGS) +archive_reader_LDADD = $(top_builddir)/mysys/libmysys.a \ + $(top_builddir)/dbug/libdbug.a \ + $(top_builddir)/strings/libmystrings.a \ + @ZLIB_LIBS@ +archive_reader_LDFLAGS = @NOINST_LDFLAGS@ + EXTRA_DIST = CMakeLists.txt plug.in # Don't update the files from bitkeeper diff --git a/storage/archive/archive_reader.c b/storage/archive/archive_reader.c new file mode 100644 index 00000000000..9f83c11e8cb --- /dev/null +++ b/storage/archive/archive_reader.c @@ -0,0 +1,39 @@ +#include "azlib.h" +#include +#include +#include + +#define BUFFER_LEN 1024 + +int main(int argc, char *argv[]) +{ + unsigned int ret; + azio_stream reader_handle; + + MY_INIT(argv[0]); + + if (argc < 2) + { + printf("No file specified. \n"); + return 0; + } + + if (!(ret= azopen(&reader_handle, argv[1], O_RDONLY|O_BINARY))) + { + printf("Could not create test file\n"); + return 0; + } + + printf("Version :%u\n", reader_handle.version); + printf("Start position :%llu\n", (unsigned long long)reader_handle.start); + printf("Block size :%u\n", reader_handle.block_size); + printf("Rows: %llu\n", reader_handle.rows); + printf("Autoincrement: %llu\n", reader_handle.auto_increment); + printf("Check Point: %llu\n", reader_handle.check_point); + printf("Forced Flushes: %llu\n", reader_handle.forced_flushes); + printf("State: %s\n", ( reader_handle.dirty ? "dirty" : "clean")); + + azclose(&reader_handle); + + return 0; +} diff --git a/storage/archive/archive_test.c b/storage/archive/archive_test.c index 753fdb8287a..161084f9b69 100644 --- a/storage/archive/archive_test.c +++ b/storage/archive/archive_test.c @@ -5,88 +5,192 @@ #define TEST_FILENAME "test.az" #define TEST_STRING "YOU don't know about me without you have read a book by the name of The Adventures of Tom Sawyer; but that ain't no matter. That book was made by Mr. Mark Twain, and he told the truth, mainly. There was things which he stretched, but mainly he told the truth. That is nothing. I never seen anybody but lied one time or another, without it was Aunt Polly, or the widow, or maybe Mary. Aunt Polly--Tom's Aunt Polly, she is--and Mary, and the Widow Douglas is all told about in that book, which is mostly a true book, with some stretchers, as I said before. Now the way that the book winds up is this: Tom and me found the money that the robbers hid in the cave, and it made us rich. We got six thousand dollars apiece--all gold. It was an awful sight of money when it was piled up. Well, Judge Thatcher he took it and put it out at interest, and it fetched us a dollar a day apiece all the year round --more than a body could tell what to do with. The Widow Douglas she took me for her son, and allowed she would..." +#define TEST_LOOP_NUM 100 #define BUFFER_LEN 1024 #define TWOGIG 2147483648 #define FOURGIG 4294967296 +#define EIGHTGIG 8589934592 -int main(int argc __attribute__((unused)), char *argv[]) +/* prototypes */ +int size_test(unsigned long long length, unsigned long long rows_to_test_for); + + +int main(int argc, char *argv[]) { - unsigned long ret; + unsigned int ret; + int error; + unsigned int x; + int written_rows= 0; azio_stream writer_handle, reader_handle; char buffer[BUFFER_LEN]; - unsigned long write_length; - unsigned long read_length= 0; + + unlink(TEST_FILENAME); + + if (argc > 1) + return 0; MY_INIT(argv[0]); - if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY))) + if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_RDWR|O_BINARY))) { printf("Could not create test file\n"); return 0; } - ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN); - assert(ret == BUFFER_LEN); - azflush(&writer_handle, Z_FINISH); if (!(ret= azopen(&reader_handle, TEST_FILENAME, O_RDONLY|O_BINARY))) { printf("Could not open test file\n"); return 0; } - ret= azread(&reader_handle, buffer, BUFFER_LEN, &error); - printf("Read %lu bytes, expected %d\n", ret, BUFFER_LEN); + assert(reader_handle.rows == 0); + assert(reader_handle.auto_increment == 0); + assert(reader_handle.check_point == 0); + assert(reader_handle.forced_flushes == 0); + assert(reader_handle.dirty == 1); + + for (x= 0; x < TEST_LOOP_NUM; x++) + { + ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN); + assert(ret == BUFFER_LEN); + written_rows++; + } + azflush(&writer_handle, Z_SYNC_FLUSH); + + /* Lets test that our internal stats are good */ + assert(writer_handle.rows == TEST_LOOP_NUM); + + /* Reader needs to be flushed to make sure it is up to date */ + azflush(&reader_handle, Z_SYNC_FLUSH); + assert(reader_handle.rows == TEST_LOOP_NUM); + assert(reader_handle.auto_increment == 0); + assert(reader_handle.check_point == 0); + assert(reader_handle.forced_flushes == 1); + assert(reader_handle.dirty == 1); + + writer_handle.auto_increment= 4; + azflush(&writer_handle, Z_SYNC_FLUSH); + assert(writer_handle.rows == TEST_LOOP_NUM); + assert(writer_handle.auto_increment == 4); + assert(writer_handle.check_point == 0); + assert(writer_handle.forced_flushes == 2); + assert(writer_handle.dirty == 1); + + if (!(ret= azopen(&reader_handle, TEST_FILENAME, O_RDONLY|O_BINARY))) + { + printf("Could not open test file\n"); + return 0; + } + + /* Read the original data */ + for (x= 0; x < writer_handle.rows; x++) + { + ret= azread(&reader_handle, buffer, BUFFER_LEN, &error); + assert(!error); + assert(ret == BUFFER_LEN); + assert(!memcmp(buffer, TEST_STRING, ret)); + } + assert(writer_handle.rows == TEST_LOOP_NUM); + + /* Test here for falling off the planet */ + + /* Final Write before closing */ + ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN); + assert(ret == BUFFER_LEN); + + /* We don't use FINISH, but I want to have it tested */ + azflush(&writer_handle, Z_FINISH); + + assert(writer_handle.rows == TEST_LOOP_NUM+1); + + /* Read final write */ azrewind(&reader_handle); + for (x= 0; x < writer_handle.rows; x++) + { + ret= azread(&reader_handle, buffer, BUFFER_LEN, &error); + assert(ret == BUFFER_LEN); + assert(!error); + assert(!memcmp(buffer, TEST_STRING, ret)); + } + + azclose(&writer_handle); - if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_APPEND|O_WRONLY|O_BINARY))) + /* Rewind and full test */ + azrewind(&reader_handle); + for (x= 0; x < writer_handle.rows; x++) + { + ret= azread(&reader_handle, buffer, BUFFER_LEN, &error); + assert(ret == BUFFER_LEN); + assert(!error); + assert(!memcmp(buffer, TEST_STRING, ret)); + } + + printf("Finished reading\n"); + + if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_RDWR|O_BINARY))) { printf("Could not open file (%s) for appending\n", TEST_FILENAME); return 0; } ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN); assert(ret == BUFFER_LEN); - azflush(&writer_handle, Z_FINISH); + azflush(&writer_handle, Z_SYNC_FLUSH); - /* Read the original data */ - ret= azread(&reader_handle, buffer, BUFFER_LEN, &error); - printf("Read %lu bytes, expected %d\n", ret, BUFFER_LEN); - assert(ret == BUFFER_LEN); - assert(!error); - - /* Read the new data */ - ret= azread(&reader_handle, buffer, BUFFER_LEN, &error); - printf("Read %lu bytes, expected %d\n", ret, BUFFER_LEN); - assert(ret == BUFFER_LEN); - assert(!error); + /* Rewind and full test */ + azrewind(&reader_handle); + for (x= 0; x < writer_handle.rows; x++) + { + ret= azread(&reader_handle, buffer, BUFFER_LEN, &error); + assert(!error); + assert(ret == BUFFER_LEN); + assert(!memcmp(buffer, TEST_STRING, ret)); + } azclose(&writer_handle); azclose(&reader_handle); unlink(TEST_FILENAME); /* Start size tests */ - printf("About to run 2gig and 4gig test now, you may want to hit CTRL-C\n"); + printf("About to run 2/4/8 gig tests now, you may want to hit CTRL-C\n"); + size_test(TWOGIG, 2097152); + size_test(FOURGIG, 4194304); + size_test(EIGHTGIG, 8388608); - if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY))) + return 0; +} + +int size_test(unsigned long long length, unsigned long long rows_to_test_for) +{ + azio_stream writer_handle, reader_handle; + unsigned long long write_length; + unsigned long long read_length= 0; + unsigned int ret; + char buffer[BUFFER_LEN]; + int error; + + if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_RDWR|O_TRUNC|O_BINARY))) { printf("Could not create test file\n"); return 0; } - for (write_length= 0; write_length < TWOGIG ; write_length+= ret) + for (write_length= 0; write_length < length ; write_length+= ret) { ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN); - assert(!error); if (ret != BUFFER_LEN) { - printf("Size %lu\n", ret); + printf("Size %u\n", ret); assert(ret != BUFFER_LEN); } + if ((write_length % 14031) == 0) + { + azflush(&writer_handle, Z_SYNC_FLUSH); + } } - assert(write_length == TWOGIG); - printf("Read %lu bytes, expected %lu\n", write_length, TWOGIG); - azflush(&writer_handle, Z_FINISH); + assert(write_length == length); + azflush(&writer_handle, Z_SYNC_FLUSH); printf("Reading back data\n"); @@ -102,29 +206,13 @@ int main(int argc __attribute__((unused)), char *argv[]) assert(!memcmp(buffer, TEST_STRING, ret)); if (ret != BUFFER_LEN) { - printf("Size %lu\n", ret); + printf("Size %u\n", ret); assert(ret != BUFFER_LEN); } } - assert(read_length == TWOGIG); - azclose(&writer_handle); - azclose(&reader_handle); - unlink(TEST_FILENAME); - - if (!(ret= azopen(&writer_handle, TEST_FILENAME, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY))) - { - printf("Could not create test file\n"); - return 0; - } - - for (write_length= 0; write_length < FOURGIG ; write_length+= ret) - { - ret= azwrite(&writer_handle, TEST_STRING, BUFFER_LEN); - assert(ret == BUFFER_LEN); - } - assert(write_length == FOURGIG); - printf("Read %lu bytes, expected %lu\n", write_length, FOURGIG); + assert(read_length == length); + assert(writer_handle.rows == rows_to_test_for); azclose(&writer_handle); azclose(&reader_handle); unlink(TEST_FILENAME); diff --git a/storage/archive/azio.c b/storage/archive/azio.c index 8de5ab58702..c96c689cf2b 100644 --- a/storage/archive/azio.c +++ b/storage/archive/azio.c @@ -7,7 +7,6 @@ * Copyright (C) 1995-2005 Jean-loup Gailly. * For conditions of distribution and use, see copyright notice in zlib.h * - * Compile this file with -DNO_GZCOMPRESS to avoid the compression code. */ /* @(#) $Id$ */ @@ -32,9 +31,11 @@ int az_open(azio_stream *s, const char *path, int Flags, File fd); int do_flush(azio_stream *file, int flush); int get_byte(azio_stream *s); void check_header(azio_stream *s); +void write_header(azio_stream *s); int destroy(azio_stream *s); void putLong(File file, uLong x); uLong getLong(azio_stream *s); +void read_header(azio_stream *s, unsigned char *buffer); /* =========================================================================== Opens a gzip (.gz) file for reading or writing. The mode parameter @@ -48,14 +49,14 @@ uLong getLong(azio_stream *s); int az_open (azio_stream *s, const char *path, int Flags, File fd) { int err; - int level = Z_DEFAULT_COMPRESSION; /* compression level */ + int level = Z_NO_COMPRESSION; /* Z_DEFAULT_COMPRESSION;*/ /* compression level */ int strategy = Z_DEFAULT_STRATEGY; /* compression strategy */ s->stream.zalloc = (alloc_func)0; s->stream.zfree = (free_func)0; s->stream.opaque = (voidpf)0; - memset(s->inbuf, 0, Z_BUFSIZE); - memset(s->outbuf, 0, Z_BUFSIZE); + memset(s->inbuf, 0, AZ_BUFSIZE); + memset(s->outbuf, 0, AZ_BUFSIZE); s->stream.next_in = s->inbuf; s->stream.next_out = s->outbuf; s->stream.avail_in = s->stream.avail_out = 0; @@ -69,19 +70,23 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) s->mode = 'r'; s->version = (unsigned char)az_magic[1]; /* this needs to be a define to version */ - if (Flags & O_WRONLY || Flags & O_APPEND) + /* + We do our own version of append by nature. + We must always have write access to take card of the header. + */ + DBUG_ASSERT(Flags | O_APPEND); + DBUG_ASSERT(Flags | O_WRONLY); + + if (Flags & O_RDWR) s->mode = 'w'; - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - err = Z_STREAM_ERROR; -#else + if (s->mode == 'w') + { err = deflateInit2(&(s->stream), level, Z_DEFLATED, -MAX_WBITS, 8, strategy); /* windowBits is passed < 0 to suppress zlib header */ s->stream.next_out = s->outbuf; -#endif if (err != Z_OK) { destroy(s); @@ -103,7 +108,7 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) return Z_NULL; } } - s->stream.avail_out = Z_BUFSIZE; + s->stream.avail_out = AZ_BUFSIZE; errno = 0; s->file = fd < 0 ? my_open(path, Flags, MYF(0)) : fd; @@ -113,39 +118,64 @@ int az_open (azio_stream *s, const char *path, int Flags, File fd) destroy(s); return Z_NULL; } - if (s->mode == 'w') { - char buffer[AZHEADER_SIZE]; - char *ptr; - /* Write a very simple .gz header: */ - bzero(buffer, AZHEADER_SIZE); - buffer[0] = az_magic[0]; - buffer[1] = az_magic[1]; - buffer[2] = (unsigned char)0; /* Reserved for block size */ - buffer[3] = (unsigned char)0; /* Compression Type */ - ptr= buffer + 4; - int4store(ptr, 0LL); /* FRM Block */ - ptr+= sizeof(unsigned long); - int4store(ptr, 0LL); /* Meta Block */ - ptr+= sizeof(unsigned long); - int4store(ptr, (unsigned long)AZHEADER_SIZE); /* Start of Data Block Index Block */ - ptr+= sizeof(unsigned long); - - s->start = AZHEADER_SIZE; - s->version = (unsigned char)az_magic[1]; - my_write(s->file, buffer, (uint)s->start, MYF(0)); - /* We use 10L instead of ftell(s->file) to because ftell causes an - * fflush on some systems. This version of the library doesn't use - * start anyway in write mode, so this initialization is not - * necessary. - */ - } else { - check_header(s); /* skip the .gz header */ - s->start = my_tell(s->file, MYF(0)) - s->stream.avail_in; + + if (Flags & O_CREAT || Flags & O_TRUNC) + { + s->rows= 0; + s->forced_flushes= 0; + s->auto_increment= 0; + s->check_point= 0; + s->dirty= 1; /* We create the file dirty */ + write_header(s); + my_seek(s->file, 0, MY_SEEK_END, MYF(0)); + } + else if (s->mode == 'w') + { + unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE]; + my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0)); + read_header(s, buffer); /* skip the .az header */ + my_seek(s->file, 0, MY_SEEK_END, MYF(0)); + } + else + { + check_header(s); /* skip the .az header */ } return 1; } + +void write_header(azio_stream *s) +{ + char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE]; + char *ptr= buffer; + + s->start = AZHEADER_SIZE + AZMETA_BUFFER_SIZE; + s->block_size= AZ_BUFSIZE; + s->version = (unsigned char)az_magic[1]; + + + /* Write a very simple .az header: */ + bzero(buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE); + *(ptr + AZ_MAGIC_POS)= az_magic[0]; + *(ptr + AZ_VERSION_POS)= (unsigned char)s->version; + *(ptr + AZ_BLOCK_POS)= (unsigned char)(s->block_size/1024); /* Reserved for block size */ + *(ptr + AZ_STRATEGY_POS)= (unsigned char)Z_DEFAULT_STRATEGY; /* Compression Type */ + + int4store(ptr + AZ_FRM_POS, 0); /* FRM Block */ + int4store(ptr + AZ_META_POS, 0); /* Meta Block */ + int8store(ptr + AZ_START_POS, (unsigned long long)s->start); /* Start of Data Block Index Block */ + printf("ROWS %llu\n", s->rows); + int8store(ptr + AZ_ROW_POS, (unsigned long long)s->rows); /* Start of Data Block Index Block */ + int8store(ptr + AZ_FLUSH_POS, (unsigned long long)s->forced_flushes); /* Start of Data Block Index Block */ + int8store(ptr + AZ_CHECK_POS, (unsigned long long)s->check_point); /* Start of Data Block Index Block */ + int8store(ptr + AZ_AUTOINCREMENT_POS, (unsigned long long)s->auto_increment); /* Start of Data Block Index Block */ + *(ptr + AZ_DIRTY_POS)= (unsigned char)s->dirty; /* Start of Data Block Index Block */ + + /* Always begin at the begining, and end there as well */ + my_pwrite(s->file, buffer, (uint)s->start, 0, MYF(0)); +} + /* =========================================================================== Opens a gzip (.gz) file for reading or writing. */ @@ -177,7 +207,7 @@ int get_byte(s) if (s->stream.avail_in == 0) { errno = 0; - s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, Z_BUFSIZE, MYF(0)); + s->stream.avail_in = my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0)); if (s->stream.avail_in == 0) { s->z_eof = 1; @@ -213,7 +243,7 @@ void check_header(azio_stream *s) if (len < 2) { if (len) s->inbuf[0] = s->stream.next_in[0]; errno = 0; - len = (uInt)my_read(s->file, (byte *)s->inbuf + len, Z_BUFSIZE >> len, MYF(0)); + len = (uInt)my_read(s->file, (byte *)s->inbuf + len, AZ_BUFSIZE >> len, MYF(0)); if (len == 0) s->z_err = Z_ERRNO; s->stream.avail_in += len; s->stream.next_in = s->inbuf; @@ -257,18 +287,41 @@ void check_header(azio_stream *s) for (len = 0; len < 2; len++) (void)get_byte(s); } s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK; + s->start = my_tell(s->file, MYF(0)) - s->stream.avail_in; } else if ( s->stream.next_in[0] == az_magic[0] && s->stream.next_in[1] == az_magic[1]) { - s->stream.avail_in -= 2; - s->stream.next_in += 2; - for (len = 0; len < (AZHEADER_SIZE-2); len++) (void)get_byte(s); + unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE]; + + for (len = 0; len < (AZHEADER_SIZE + AZMETA_BUFFER_SIZE); len++) + buffer[len]= get_byte(s); s->z_err = s->z_eof ? Z_DATA_ERROR : Z_OK; + read_header(s, buffer); } else { - s->transparent = 1; - s->version = (unsigned char)0; + s->z_err = Z_OK; + + return; + } +} + +void read_header(azio_stream *s, unsigned char *buffer) +{ + if (buffer[0] == az_magic[0] && buffer[1] == az_magic[1]) + { + s->version= (unsigned int)buffer[AZ_VERSION_POS]; + s->block_size= 1024 * buffer[AZ_BLOCK_POS]; + s->start= (unsigned long long)uint8korr(buffer + AZ_START_POS); + s->rows= (unsigned long long)uint8korr(buffer + AZ_ROW_POS); + s->check_point= (unsigned long long)uint8korr(buffer + AZ_CHECK_POS); + s->forced_flushes= (unsigned long long)uint8korr(buffer + AZ_FLUSH_POS); + s->auto_increment= (unsigned long long)uint8korr(buffer + AZ_AUTOINCREMENT_POS); + s->dirty= (unsigned int)buffer[AZ_DIRTY_POS]; + } + else + { + DBUG_ASSERT(buffer[0] == az_magic[0] && buffer[1] == az_magic[1]); return; } } @@ -284,11 +337,7 @@ int destroy (s) if (s->stream.state != NULL) { if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - err = Z_STREAM_ERROR; -#else err = deflateEnd(&(s->stream)); -#endif } else if (s->mode == 'r') { @@ -311,7 +360,7 @@ int destroy (s) Reads the given number of uncompressed bytes from the compressed file. azread returns the number of bytes actually read (0 for end of file). */ -unsigned long ZEXPORT azread ( azio_stream *s, voidp buf, unsigned long len, int *error) +unsigned int ZEXPORT azread ( azio_stream *s, voidp buf, unsigned int len, int *error) { Bytef *start = (Bytef*)buf; /* starting point for crc computation */ Byte *next_out; /* == stream.next_out but not forced far (for MSDOS) */ @@ -383,7 +432,7 @@ unsigned long ZEXPORT azread ( azio_stream *s, voidp buf, unsigned long len, int if (s->stream.avail_in == 0 && !s->z_eof) { errno = 0; - s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, Z_BUFSIZE, MYF(0)); + s->stream.avail_in = (uInt)my_read(s->file, (byte *)s->inbuf, AZ_BUFSIZE, MYF(0)); if (s->stream.avail_in == 0) { s->z_eof = 1; @@ -410,7 +459,8 @@ unsigned long ZEXPORT azread ( azio_stream *s, voidp buf, unsigned long len, int * Check for such files: */ check_header(s); - if (s->z_err == Z_OK) { + if (s->z_err == Z_OK) + { inflateReset(&(s->stream)); s->crc = crc32(0L, Z_NULL, 0); } @@ -432,29 +482,30 @@ unsigned long ZEXPORT azread ( azio_stream *s, voidp buf, unsigned long len, int } -#ifndef NO_GZCOMPRESS /* =========================================================================== Writes the given number of uncompressed bytes into the compressed file. azwrite returns the number of bytes actually written (0 in case of error). */ -unsigned long azwrite (azio_stream *s, voidpc buf, unsigned long len) +unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len) { - s->stream.next_in = (Bytef*)buf; s->stream.avail_in = len; + + s->rows++; + while (s->stream.avail_in != 0) { if (s->stream.avail_out == 0) { s->stream.next_out = s->outbuf; - if (my_write(s->file, (byte *)s->outbuf, Z_BUFSIZE, MYF(0)) != Z_BUFSIZE) + if (my_write(s->file, (byte *)s->outbuf, AZ_BUFSIZE, MYF(0)) != AZ_BUFSIZE) { s->z_err = Z_ERRNO; break; } - s->stream.avail_out = Z_BUFSIZE; + s->stream.avail_out = AZ_BUFSIZE; } s->in += s->stream.avail_in; s->out += s->stream.avail_out; @@ -465,19 +516,15 @@ unsigned long azwrite (azio_stream *s, voidpc buf, unsigned long len) } s->crc = crc32(s->crc, (const Bytef *)buf, len); - return (unsigned long)(len - s->stream.avail_in); + return (unsigned int)(len - s->stream.avail_in); } -#endif - /* =========================================================================== Flushes all pending output into the compressed file. The parameter flush is as in the deflate() function. */ -int do_flush (s, flush) - azio_stream *s; - int flush; +int do_flush (azio_stream *s, int flush) { uInt len; int done = 0; @@ -486,8 +533,9 @@ int do_flush (s, flush) s->stream.avail_in = 0; /* should be zero already anyway */ - for (;;) { - len = Z_BUFSIZE - s->stream.avail_out; + for (;;) + { + len = AZ_BUFSIZE - s->stream.avail_out; if (len != 0) { if ((uInt)my_write(s->file, (byte *)s->outbuf, len, MYF(0)) != len) @@ -496,7 +544,7 @@ int do_flush (s, flush) return Z_ERRNO; } s->stream.next_out = s->outbuf; - s->stream.avail_out = Z_BUFSIZE; + s->stream.avail_out = AZ_BUFSIZE; } if (done) break; s->out += s->stream.avail_out; @@ -513,6 +561,11 @@ int do_flush (s, flush) if (s->z_err != Z_OK && s->z_err != Z_STREAM_END) break; } + + if (flush == Z_FINISH) + s->dirty= 0; /* Mark it clean, we should be good now */ + write_header(s); + return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; } @@ -520,11 +573,25 @@ int ZEXPORT azflush (s, flush) azio_stream *s; int flush; { - int err = do_flush (s, flush); + int err; - if (err) return err; - my_sync(s->file, MYF(0)); - return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; + if (s->mode == 'r') + { + unsigned char buffer[AZHEADER_SIZE + AZMETA_BUFFER_SIZE]; + my_pread(s->file, buffer, AZHEADER_SIZE + AZMETA_BUFFER_SIZE, 0, MYF(0)); + read_header(s, buffer); /* skip the .az header */ + + return Z_OK; + } + else + { + s->forced_flushes++; + err= do_flush(s, flush); + + if (err) return err; + my_sync(s->file, MYF(0)); + return s->z_err == Z_STREAM_END ? Z_OK : s->z_err; + } } /* =========================================================================== @@ -566,19 +633,17 @@ my_off_t azseek (s, offset, whence) return -1L; } - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - return -1L; -#else - if (whence == SEEK_SET) { + if (s->mode == 'w') + { + if (whence == SEEK_SET) offset -= s->in; - } /* At this point, offset is the number of zero bytes to write. */ /* There was a zmemzero here if inbuf was null -Brian */ - while (offset > 0) { - uInt size = Z_BUFSIZE; - if (offset < Z_BUFSIZE) size = (uInt)offset; + while (offset > 0) + { + uInt size = AZ_BUFSIZE; + if (offset < AZ_BUFSIZE) size = (uInt)offset; size = azwrite(s, s->inbuf, size); if (size == 0) return -1L; @@ -586,7 +651,6 @@ my_off_t azseek (s, offset, whence) offset -= size; } return s->in; -#endif } /* Rest of function is for reading only */ @@ -622,8 +686,8 @@ my_off_t azseek (s, offset, whence) } while (offset > 0) { int error; - unsigned long size = Z_BUFSIZE; - if (offset < Z_BUFSIZE) size = (int)offset; + unsigned int size = AZ_BUFSIZE; + if (offset < AZ_BUFSIZE) size = (int)offset; size = azread(s, s->outbuf, size, &error); if (error <= 0) return -1L; @@ -687,56 +751,13 @@ int azclose (azio_stream *s) if (s == NULL) return Z_STREAM_ERROR; - if (s->mode == 'w') { -#ifdef NO_GZCOMPRESS - return Z_STREAM_ERROR; -#else - + if (s->mode == 'w') + { if (do_flush (s, Z_FINISH) != Z_OK) return destroy(s); putLong(s->file, s->crc); putLong(s->file, (uLong)(s->in & 0xffffffff)); -#endif } return destroy(s); } - -/* - This function reads the header of meta block and returns whether or not it was successful. - *rows will contain the current number of rows in the data file upon success. -*/ -int az_read_meta_block(char *meta_start, unsigned long *rows, - unsigned long long *auto_increment, - unsigned long long *forced_flushes) -{ - unsigned char *ptr= meta_start; - ulonglong check_point; - - DBUG_ENTER("ha_archive::read_meta_file"); - - /* - Parse out the meta data, we ignore version at the moment - */ - - *rows= (unsigned long long)uint8korr(ptr); - ptr+= sizeof(unsigned long long); // Move past rows - check_point= uint8korr(ptr); - ptr+= sizeof(unsigned long long); // Move past check_point - *auto_increment= uint8korr(ptr); - ptr+= sizeof(unsigned long long); // Move past auto_increment - *forced_flushes= uint8korr(ptr); - ptr+= sizeof(unsigned long long); // Move past forced_flush - - DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", - (long long unsigned)*rows)); - DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", - (long long unsigned) check_point)); - DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", - (long long unsigned)*auto_increment)); - DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", - (long long unsigned)*forced_flushes)); - DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr))); - - DBUG_RETURN(0); -} diff --git a/storage/archive/azlib.h b/storage/archive/azlib.h index 122e460d24c..3bf82ce218e 100644 --- a/storage/archive/azlib.h +++ b/storage/archive/azlib.h @@ -36,6 +36,7 @@ #include #include "../../mysys/mysys_priv.h" +#include #ifdef __cplusplus extern "C" { @@ -45,11 +46,24 @@ extern "C" { /* ulonglong + ulonglong + ulonglong + ulonglong + uchar */ -#define AZMETA_BUFFER_SIZE sizeof(ulonglong) \ - + sizeof(ulonglong) + sizeof(ulonglong) + sizeof(ulonglong) \ - + sizeof(uchar) +#define AZMETA_BUFFER_SIZE sizeof(unsigned long long) \ + + sizeof(unsigned long long) + sizeof(unsigned long long) + sizeof(unsigned long long) \ + + sizeof(unsigned char) -#define AZHEADER_SIZE 16 +#define AZHEADER_SIZE 20 + +#define AZ_MAGIC_POS 0 +#define AZ_VERSION_POS 1 +#define AZ_BLOCK_POS 2 +#define AZ_STRATEGY_POS 3 +#define AZ_FRM_POS 4 +#define AZ_META_POS 8 +#define AZ_START_POS 12 +#define AZ_ROW_POS 20 +#define AZ_FLUSH_POS 28 +#define AZ_CHECK_POS 36 +#define AZ_AUTOINCREMENT_POS 44 +#define AZ_DIRTY_POS 52 /* The 'zlib' compression library provides in-memory compression and @@ -164,7 +178,7 @@ extern "C" { /* The deflate compression method (the only one supported in this version) */ #define Z_NULL 0 /* for initializing zalloc, zfree, opaque */ -#define Z_BUFSIZE 16384 +#define AZ_BUFSIZE 16384 typedef struct azio_stream { @@ -172,8 +186,8 @@ typedef struct azio_stream { int z_err; /* error code for last stream operation */ int z_eof; /* set if end of input file */ File file; /* .gz file */ - Byte inbuf[Z_BUFSIZE]; /* input buffer */ - Byte outbuf[Z_BUFSIZE]; /* output buffer */ + Byte inbuf[AZ_BUFSIZE]; /* input buffer */ + Byte outbuf[AZ_BUFSIZE]; /* output buffer */ uLong crc; /* crc32 of uncompressed data */ char *msg; /* error message */ int transparent; /* 1 if input file is not a .gz file */ @@ -184,6 +198,12 @@ typedef struct azio_stream { int back; /* one character push-back */ int last; /* true if push-back is last character */ unsigned char version; /* Version */ + unsigned int block_size; /* Block Size */ + unsigned long long check_point; /* Last position we checked */ + unsigned long long forced_flushes; /* Forced Flushes */ + unsigned long long rows; /* rows */ + unsigned long long auto_increment; /* auto increment field */ + unsigned char dirty; /* State of file */ } azio_stream; /* basic functions */ @@ -219,7 +239,7 @@ int azdopen(azio_stream *s,File fd, int Flags); */ -extern unsigned long azread ( azio_stream *s, voidp buf, unsigned long len, int *error); +extern unsigned int azread ( azio_stream *s, voidp buf, unsigned int len, int *error); /* Reads the given number of uncompressed bytes from the compressed file. If the input file was not in gzip format, gzread copies the given number @@ -227,10 +247,10 @@ extern unsigned long azread ( azio_stream *s, voidp buf, unsigned long len, int gzread returns the number of uncompressed bytes actually read (0 for end of file, -1 for error). */ -extern unsigned long azwrite (azio_stream *s, voidpc buf, unsigned long len); +extern unsigned int azwrite (azio_stream *s, voidpc buf, unsigned int len); /* Writes the given number of uncompressed bytes into the compressed file. - gzwrite returns the number of uncompressed bytes actually written + azwrite returns the number of uncompressed bytes actually written (0 in case of error). */ diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index e9d4116459a..a667d61889f 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -121,7 +121,7 @@ static HASH archive_open_tables; /* The file extension */ #define ARZ ".ARZ" // The data file #define ARN ".ARN" // Files used during an optimize call -#define ARM ".ARM" // Meta file +#define ARM ".ARM" // Meta file (deprecated) /* uchar + uchar + ulonglong + ulonglong + ulonglong + ulonglong + FN_REFLEN + uchar @@ -281,127 +281,6 @@ int ha_archive::read_data_header(azio_stream *file_to_read) DBUG_RETURN(0); } -/* - This method reads the header of a meta file and returns whether or not it was successful. - *rows will contain the current number of rows in the data file upon success. -*/ -int ha_archive::read_meta_file(File meta_file, ha_rows *rows, - uint *meta_version, - ulonglong *auto_increment, - ulonglong *forced_flushes, - char *real_path) -{ - uchar meta_buffer[META_BUFFER_SIZE]; - uchar *ptr= meta_buffer; - ulonglong check_point; - - DBUG_ENTER("ha_archive::read_meta_file"); - - VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); - if (my_read(meta_file, (byte*)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE) - DBUG_RETURN(-1); - - /* - Parse out the meta data, we ignore version at the moment - */ - - ptr+= sizeof(uchar)*2; // Move past header - *rows= (ha_rows)uint8korr(ptr); - ptr+= sizeof(ulonglong); // Move past rows - check_point= uint8korr(ptr); - ptr+= sizeof(ulonglong); // Move past check_point - *auto_increment= uint8korr(ptr); - ptr+= sizeof(ulonglong); // Move past auto_increment - *forced_flushes= uint8korr(ptr); - ptr+= sizeof(ulonglong); // Move past forced_flush - memmove(real_path, ptr, FN_REFLEN); - ptr+= FN_REFLEN; // Move past the possible location of the file - - DBUG_PRINT("ha_archive::read_meta_file", ("Check %d", (uint)meta_buffer[0])); - DBUG_PRINT("ha_archive::read_meta_file", ("Version %d", (uint)meta_buffer[1])); - DBUG_PRINT("ha_archive::read_meta_file", ("Rows %llu", - (long long unsigned)*rows)); - DBUG_PRINT("ha_archive::read_meta_file", ("Checkpoint %llu", - (long long unsigned) check_point)); - DBUG_PRINT("ha_archive::read_meta_file", ("Auto-Increment %llu", - (long long unsigned)*auto_increment)); - DBUG_PRINT("ha_archive::read_meta_file", ("Forced Flushes %llu", - (long long unsigned)*forced_flushes)); - DBUG_PRINT("ha_archive::read_meta_file", ("Real Path %s", real_path)); - DBUG_PRINT("ha_archive::read_meta_file", ("Dirty %d", (int)(*ptr))); - - *meta_version= (uchar)meta_buffer[1]; - - if ((meta_buffer[0] != (uchar)ARCHIVE_CHECK_HEADER) || - ((bool)(*ptr)== TRUE)) - DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); - - my_sync(meta_file, MYF(MY_WME)); - - DBUG_RETURN(0); -} - -/* - This method writes out the header of a meta file and returns whether or not it was successful. - By setting dirty you say whether or not the file represents the actual state of the data file. - Upon ::open() we set to dirty, and upon ::close() we set to clean. -*/ -int ha_archive::write_meta_file(File meta_file, ha_rows rows, - ulonglong auto_increment, - ulonglong forced_flushes, - char *real_path, - bool dirty) -{ - uchar meta_buffer[META_BUFFER_SIZE]; - uchar *ptr= meta_buffer; - ulonglong check_point= 0; //Reserved for the future - - DBUG_ENTER("ha_archive::write_meta_file"); - - *ptr= (uchar)ARCHIVE_CHECK_HEADER; - ptr += sizeof(uchar); - *ptr= (uchar)ARCHIVE_VERSION; - ptr += sizeof(uchar); - int8store(ptr, (ulonglong)rows); - ptr += sizeof(ulonglong); - int8store(ptr, check_point); - ptr += sizeof(ulonglong); - int8store(ptr, auto_increment); - ptr += sizeof(ulonglong); - int8store(ptr, forced_flushes); - ptr += sizeof(ulonglong); - // No matter what, we pad with nulls - if (real_path) - strncpy((char *)ptr, real_path, FN_REFLEN); - else - bzero(ptr, FN_REFLEN); - ptr += FN_REFLEN; - *ptr= (uchar)dirty; - DBUG_PRINT("ha_archive::write_meta_file", ("Check %d", - (uint)ARCHIVE_CHECK_HEADER)); - DBUG_PRINT("ha_archive::write_meta_file", ("Version %d", - (uint)ARCHIVE_VERSION)); - DBUG_PRINT("ha_archive::write_meta_file", ("Rows %llu", - (unsigned long long)rows)); - DBUG_PRINT("ha_archive::write_meta_file", ("Checkpoint %llu", - (unsigned long long)check_point)); - DBUG_PRINT("ha_archive::write_meta_file", ("Auto Increment %llu", - (unsigned long long)auto_increment)); - DBUG_PRINT("ha_archive::write_meta_file", ("Forced Flushes %llu", - (unsigned long long)forced_flushes)); - DBUG_PRINT("ha_archive::write_meta_file", ("Real path %s", - real_path)); - DBUG_PRINT("ha_archive::write_meta_file", ("Dirty %d", (uint)dirty)); - - VOID(my_seek(meta_file, 0, MY_SEEK_SET, MYF(0))); - if (my_write(meta_file, (byte *)meta_buffer, META_BUFFER_SIZE, 0) != META_BUFFER_SIZE) - DBUG_RETURN(-1); - - my_sync(meta_file, MYF(MY_WME)); - - DBUG_RETURN(0); -} - /* We create the shared memory space that we will use for the open table. @@ -414,9 +293,7 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, TABLE *table, int *rc) { ARCHIVE_SHARE *share; - char meta_file_name[FN_REFLEN]; uint length; - char *tmp_name; DBUG_ENTER("ha_archive::get_share"); pthread_mutex_lock(&archive_mutex); @@ -426,6 +303,9 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, (byte*) table_name, length))) { + char *tmp_name; + char tmp_file_name[FN_REFLEN]; + azio_stream archive_tmp; if (!my_multi_malloc(MYF(MY_WME | MY_ZEROFILL), &share, sizeof(*share), &tmp_name, length+1, @@ -443,34 +323,29 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, share->archive_write_open= FALSE; fn_format(share->data_file_name, table_name, "", ARZ,MY_REPLACE_EXT|MY_UNPACK_FILENAME); - fn_format(meta_file_name, table_name, "", ARM, - MY_REPLACE_EXT|MY_UNPACK_FILENAME); strmov(share->table_name,table_name); /* We will use this lock for rows. */ VOID(pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST)); - if ((share->meta_file= my_open(meta_file_name, O_RDWR, MYF(0))) == -1) - share->crashed= TRUE; DBUG_PRINT("ha_archive", ("archive opening (1) up write at %s", share->data_file_name)); /* - We read the meta file, but do not mark it dirty unless we actually do - a write. + We read the meta file, but do not mark it dirty. Since we are not + doing a write we won't mark it dirty (and we won't open it for + anything but reading... open it for write and we will generate null + compression writes). */ - if (read_meta_file(share->meta_file, &share->rows_recorded, - &share->meta_version, - &share->auto_increment_value, - &share->forced_flushes, - share->real_path)) - share->crashed= TRUE; - /* - Since we now possibly no real_path, we will use it instead if it exists. - */ - if (*share->real_path) - fn_format(share->data_file_name, share->real_path, "", ARZ, - MY_REPLACE_EXT|MY_UNPACK_FILENAME); + if (!(azopen(&archive_tmp, share->data_file_name, O_RDONLY|O_BINARY))) + { + DBUG_RETURN(NULL); + } + stats.auto_increment_value= archive_tmp.auto_increment; + share->rows_recorded= archive_tmp.rows; + share->crashed= archive_tmp.dirty; + azclose(&archive_tmp); + VOID(my_hash_insert(&archive_open_tables, (byte*) share)); thr_lock_init(&share->lock); } @@ -490,20 +365,20 @@ ARCHIVE_SHARE *ha_archive::get_share(const char *table_name, Free the share. See ha_example.cc for a description. */ -int ha_archive::free_share(ARCHIVE_SHARE *share) +int ha_archive::free_share(ARCHIVE_SHARE *share_to_free) { int rc= 0; DBUG_ENTER("ha_archive::free_share"); DBUG_PRINT("ha_archive", ("archive table %.*s has %d open handles on entrance", - share->table_name_length, share->table_name, - share->use_count)); + share_to_free->table_name_length, share_to_free->table_name, + share_to_free->use_count)); pthread_mutex_lock(&archive_mutex); - if (!--share->use_count) + if (!--share_to_free->use_count) { - hash_delete(&archive_open_tables, (byte*) share); - thr_lock_delete(&share->lock); - VOID(pthread_mutex_destroy(&share->mutex)); + hash_delete(&archive_open_tables, (byte*) share_to_free); + thr_lock_delete(&share_to_free->lock); + VOID(pthread_mutex_destroy(&share_to_free->mutex)); /* We need to make sure we don't reset the crashed state. If we open a crashed file, wee need to close it as crashed unless @@ -511,18 +386,12 @@ int ha_archive::free_share(ARCHIVE_SHARE *share) Since we will close the data down after this, we go on and count the flush on close; */ - share->forced_flushes++; - (void)write_meta_file(share->meta_file, share->rows_recorded, - share->auto_increment_value, - share->forced_flushes, - share->real_path, - share->crashed ? TRUE :FALSE); - if (share->archive_write_open) - if (azclose(&(share->archive_write))) + if (share_to_free->archive_write_open) + { + if (azclose(&(share_to_free->archive_write))) rc= 1; - if (my_close(share->meta_file, MYF(0))) - rc= 1; - my_free((gptr) share, MYF(0)); + } + my_free((gptr) share_to_free, MYF(0)); } pthread_mutex_unlock(&archive_mutex); @@ -532,19 +401,13 @@ int ha_archive::free_share(ARCHIVE_SHARE *share) int ha_archive::init_archive_writer() { DBUG_ENTER("ha_archive::init_archive_writer"); - (void)write_meta_file(share->meta_file, share->rows_recorded, - share->auto_increment_value, - share->forced_flushes, - share->real_path, - TRUE); - /* It is expensive to open and close the data files and since you can't have a gzip file that can be both read and written we keep a writer open that is shared amoung all open tables. */ if (!(azopen(&(share->archive_write), share->data_file_name, - O_WRONLY|O_APPEND|O_BINARY))) + O_RDWR|O_BINARY))) { DBUG_PRINT("ha_archive", ("Could not open archive write file")); share->crashed= TRUE; @@ -561,7 +424,6 @@ int ha_archive::init_archive_writer() */ static const char *ha_archive_exts[] = { ARZ, - ARM, NullS }; @@ -674,15 +536,16 @@ int ha_archive::close(void) int ha_archive::create(const char *name, TABLE *table_arg, HA_CREATE_INFO *create_info) { - File create_file; // We use to create the datafile and the metafile char name_buff[FN_REFLEN]; + char linkname[FN_REFLEN]; int error; azio_stream create_stream; /* Archive file we are working with */ + DBUG_ENTER("ha_archive::create"); stats.auto_increment_value= (create_info->auto_increment_value ? create_info->auto_increment_value -1 : - (ulonglong) 0); + (ulonglong) 0); for (uint key= 0; key < table_arg->s->keys; key++) { @@ -703,25 +566,11 @@ int ha_archive::create(const char *name, TABLE *table_arg, } } - if ((create_file= my_create(fn_format(name_buff,name,"",ARM, - MY_REPLACE_EXT|MY_UNPACK_FILENAME),0, - O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) - { - error= my_errno; - goto error; - } - - write_meta_file(create_file, 0, stats.auto_increment_value, 0, - (char *)create_info->data_file_name, - FALSE); - my_close(create_file,MYF(0)); - /* We reuse name_buff since it is available. */ if (create_info->data_file_name) { - char linkname[FN_REFLEN]; DBUG_PRINT("ha_archive", ("archive will create stream file %s", create_info->data_file_name)); @@ -729,29 +578,26 @@ int ha_archive::create(const char *name, TABLE *table_arg, MY_REPLACE_EXT|MY_UNPACK_FILENAME); fn_format(linkname, name, "", ARZ, MY_UNPACK_FILENAME | MY_APPEND_EXT); - if ((create_file= my_create_with_symlink(linkname, name_buff, 0, - O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) - { - error= my_errno; - goto error; - } } else { - if ((create_file= my_create(fn_format(name_buff, name,"", ARZ, - MY_REPLACE_EXT|MY_UNPACK_FILENAME),0, - O_RDWR | O_TRUNC,MYF(MY_WME))) < 0) - { - error= my_errno; - goto error; - } + fn_format(name_buff, name,"", ARZ, + MY_REPLACE_EXT|MY_UNPACK_FILENAME); + linkname[0]= 0; } - if (!azdopen(&create_stream, create_file, O_WRONLY|O_BINARY)) + + if (!(azopen(&create_stream, linkname[0] ? linkname : name_buff, O_CREAT|O_RDWR|O_BINARY))) { error= errno; goto error2; } + + /* + Yes you need to do this, because the starting value + for the autoincrement may not be zero. + */ + create_stream.auto_increment= stats.auto_increment_value; if (azclose(&create_stream)) { error= errno; @@ -761,7 +607,7 @@ int ha_archive::create(const char *name, TABLE *table_arg, DBUG_RETURN(0); error2: - my_close(create_file, MYF(0)); + azclose(&create_stream); delete_table(name); error: /* Return error number, if we got one */ @@ -775,29 +621,16 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer) { my_off_t written; uint *ptr, *end; - unsigned long r_pack_length; - byte size_buffer[ARCHIVE_ROW_HEADER_SIZE]; // Longest possible row length with blobs + unsigned int r_pack_length; DBUG_ENTER("ha_archive::real_write_row"); // We pack the row for writing r_pack_length= pack_row(buf); - DBUG_PRINT("ha_archive",("Pack row length %lu", r_pack_length)); - - // Store the size of the row before the row - bzero(size_buffer, ARCHIVE_ROW_HEADER_SIZE); - int4store(size_buffer, (int)r_pack_length); - written= azwrite(writer, size_buffer, ARCHIVE_ROW_HEADER_SIZE); - - if (written != ARCHIVE_ROW_HEADER_SIZE) - { - DBUG_PRINT("ha_archive", ("Died writing row header")); - DBUG_RETURN(-1); - } written= azwrite(writer, record_buffer->buffer, r_pack_length); if (written != r_pack_length) { - DBUG_PRINT("ha_archive", ("Wrote %llu bytes expected %lu", + DBUG_PRINT("ha_archive", ("Wrote %llu bytes expected %u", (unsigned long long) written, r_pack_length)); DBUG_RETURN(-1); @@ -815,6 +648,7 @@ int ha_archive::real_write_row(byte *buf, azio_stream *writer) int ha_archive::max_row_length(const byte *buf) { ulonglong length= table->s->reclength + table->s->fields*2; + length+= ARCHIVE_ROW_HEADER_SIZE; uint *ptr, *end; for (ptr= table->s->blob_field, end=ptr + table->s->blob_fields ; @@ -829,7 +663,7 @@ int ha_archive::max_row_length(const byte *buf) } -unsigned long ha_archive::pack_row(const byte *record) +unsigned int ha_archive::pack_row(const byte *record) { byte *ptr; @@ -841,15 +675,23 @@ unsigned long ha_archive::pack_row(const byte *record) DBUG_RETURN(HA_ERR_OUT_OF_MEM); /* purecov: inspected */ } + /* Copy null bits */ - memcpy(record_buffer->buffer, record, table->s->null_bytes); - ptr= record_buffer->buffer + table->s->null_bytes; + memcpy(record_buffer->buffer+ARCHIVE_ROW_HEADER_SIZE, + record, table->s->null_bytes); + ptr= record_buffer->buffer + table->s->null_bytes + ARCHIVE_ROW_HEADER_SIZE; for (Field **field=table->field ; *field ; field++) ptr=(byte*) (*field)->pack((char*) ptr, (char*) record + (*field)->offset()); - DBUG_RETURN((unsigned long) (ptr - record_buffer->buffer)); + int4store(record_buffer->buffer, (int)(ptr - record_buffer->buffer - + ARCHIVE_ROW_HEADER_SIZE)); + DBUG_PRINT("ha_archive",("Pack row length %u", (unsigned int) + (ptr - record_buffer->buffer - + ARCHIVE_ROW_HEADER_SIZE))); + + DBUG_RETURN((unsigned int) (ptr - record_buffer->buffer)); } @@ -886,19 +728,23 @@ int ha_archive::write_row(byte *buf) temp_auto= table->next_number_field->val_int(); /* - Bad news, this will cause a search for the unique value which is very - expensive since we will have to do a table scan which will lock up - all other writers during this period. This could perhaps be optimized - in the future. + Simple optimization to see if we fail for duplicate key immediatly + because we have just given out this value. */ - if (temp_auto == share->auto_increment_value && + if (temp_auto == share->archive_write.auto_increment && mkey->flags & HA_NOSAME) { rc= HA_ERR_FOUND_DUPP_KEY; goto error; } - if (temp_auto < share->auto_increment_value && + /* + Bad news, this will cause a search for the unique value which is very + expensive since we will have to do a table scan which will lock up + all other writers during this period. This could perhaps be optimized + in the future. + */ + if (temp_auto < share->archive_write.auto_increment && mkey->flags & HA_NOSAME) { /* @@ -915,7 +761,6 @@ int ha_archive::write_row(byte *buf) data */ azflush(&(share->archive_write), Z_SYNC_FLUSH); - share->forced_flushes++; /* Set the position of the local read thread to the beginning postion. */ @@ -939,8 +784,8 @@ int ha_archive::write_row(byte *buf) } else { - if (temp_auto > share->auto_increment_value) - stats.auto_increment_value= share->auto_increment_value= temp_auto; + if (temp_auto > share->archive_write.auto_increment) + stats.auto_increment_value= share->archive_write.auto_increment= temp_auto; } } @@ -969,7 +814,7 @@ void ha_archive::get_auto_increment(ulonglong offset, ulonglong increment, ulonglong *nb_reserved_values) { *nb_reserved_values= 1; - *first_value= share->auto_increment_value + 1; + *first_value= share->archive_write.auto_increment + 1; } /* Initialized at each key walk (called multiple times unlike rnd_init()) */ @@ -1014,7 +859,6 @@ int ha_archive::index_read_idx(byte *buf, uint index, const byte *key, */ pthread_mutex_lock(&share->mutex); azflush(&(share->archive_write), Z_SYNC_FLUSH); - share->forced_flushes++; pthread_mutex_unlock(&share->mutex); /* @@ -1093,7 +937,6 @@ int ha_archive::rnd_init(bool scan) { DBUG_PRINT("ha_archive", ("archive flushing out rows for scan")); azflush(&(share->archive_write), Z_SYNC_FLUSH); - share->forced_flushes++; share->dirty= FALSE; } pthread_mutex_unlock(&share->mutex); @@ -1131,10 +974,12 @@ int ha_archive::get_row(azio_stream *file_to_read, byte *buf) /* Reallocate buffer if needed */ bool ha_archive::fix_rec_buff(int length) { - if (! record_buffer->buffer || length > record_buffer->length) + if (! record_buffer->buffer || + length > (record_buffer->length + ARCHIVE_ROW_HEADER_SIZE)) { byte *newptr; - if (!(newptr=(byte*) my_realloc((gptr) record_buffer->buffer, length, + if (!(newptr=(byte*) my_realloc((gptr) record_buffer->buffer, + length + ARCHIVE_ROW_HEADER_SIZE, MYF(MY_ALLOW_ZERO_PTR)))) return 1; /* purecov: inspected */ record_buffer->buffer= newptr; @@ -1147,10 +992,10 @@ int ha_archive::unpack_row(azio_stream *file_to_read, char *record) { DBUG_ENTER("ha_archive::unpack_row"); - unsigned long read; + unsigned int read; int error; byte size_buffer[ARCHIVE_ROW_HEADER_SIZE]; - unsigned long row_len; + unsigned int row_len; /* First we grab the length stored */ read= azread(file_to_read, (byte *)size_buffer, ARCHIVE_ROW_HEADER_SIZE, &error); @@ -1162,13 +1007,15 @@ int ha_archive::unpack_row(azio_stream *file_to_read, char *record) if (read == 0 || read != ARCHIVE_ROW_HEADER_SIZE) DBUG_RETURN(HA_ERR_END_OF_FILE); - row_len= sint4korr(size_buffer); - DBUG_PRINT("ha_archive",("Unpack row length %lu -> %lu", row_len, - (unsigned long)table->s->reclength)); + row_len= uint4korr(size_buffer); + DBUG_PRINT("ha_archive",("Unpack row length %u -> %u", row_len, + (unsigned int)table->s->reclength)); fix_rec_buff(row_len); read= azread(file_to_read, record_buffer->buffer, row_len, &error); + DBUG_ASSERT(row_len == read); + if (read != row_len || error) { DBUG_RETURN(-1); @@ -1195,7 +1042,7 @@ int ha_archive::get_row_version3(azio_stream *file_to_read, byte *buf) int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf) { - unsigned long read; + unsigned int read; int error; uint *ptr, *end; char *last; @@ -1207,9 +1054,9 @@ int ha_archive::get_row_version2(azio_stream *file_to_read, byte *buf) if (read != table->s->reclength) { - DBUG_PRINT("ha_archive::get_row_version2", ("Read %lu bytes expected %lu", + DBUG_PRINT("ha_archive::get_row_version2", ("Read %u bytes expected %u", read, - (unsigned long)table->s->reclength)); + (unsigned int)table->s->reclength)); DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); } @@ -1362,26 +1209,31 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) char writer_filename[FN_REFLEN]; /* Open up the writer if we haven't yet */ - if (!share->archive_write_open) + if (share->archive_write_open) + { + /* Flush any waiting data */ + azflush(&(share->archive_write), Z_SYNC_FLUSH); + } + else + { init_archive_writer(); - - /* Flush any waiting data */ - azflush(&(share->archive_write), Z_SYNC_FLUSH); - share->forced_flushes++; + } /* Lets create a file to contain the new data */ fn_format(writer_filename, share->table_name, "", ARN, MY_REPLACE_EXT|MY_UNPACK_FILENAME); - if (!(azopen(&writer, writer_filename, O_CREAT|O_WRONLY|O_TRUNC|O_BINARY))) + if (!(azopen(&writer, writer_filename, O_CREAT|O_RDWR|O_BINARY))) DBUG_RETURN(HA_ERR_CRASHED_ON_USAGE); /* An extended rebuild is a lot more effort. We open up each row and re-record it. Any dead rows are removed (aka rows that may have been partially recorded). - */ - if (check_opt->flags == T_EXTEND) + As of Archive format 3, this is the only type that is performed, before this + version it was just done on T_EXTEND + */ + if (1) { DBUG_PRINT("ha_archive", ("archive extended rebuild")); byte *buf; @@ -1421,26 +1273,35 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) if (!rc) { share->rows_recorded= 0; - stats.auto_increment_value= share->auto_increment_value= 0; + stats.auto_increment_value= share->archive_write.auto_increment= 0; record_buffer= read_buffer; while (!(rc= get_row(&archive, buf))) { record_buffer= write_buffer; real_write_row(buf, &writer); + /* + Long term it should be possible to optimize this so that + it is not called on each row. + */ if (table->found_next_number_field) { Field *field= table->found_next_number_field; ulonglong auto_value= - (ulonglong) field->val_int((char*)(buf + field->offset())); - if (share->auto_increment_value < auto_value) - stats.auto_increment_value= share->auto_increment_value= + (ulonglong) field->val_int((char*) + (buf + field->offset())); + DBUG_PRINT("ha_archive::optimize", ("Value %llu\n", (unsigned long long)auto_value)); + if (share->archive_write.auto_increment < auto_value) + stats.auto_increment_value= share->archive_write.auto_increment= auto_value; } - share->rows_recorded++; record_buffer= read_buffer; } + share->rows_recorded= archive.rows; + stats.auto_increment_value= share->archive_write.auto_increment= + writer.auto_increment= archive.auto_increment; + DBUG_PRINT("ha_archive", ("auto to save %llu", writer.auto_increment)); } DBUG_PRINT("ha_archive", ("recovered %llu archive rows", @@ -1453,53 +1314,9 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) if (rc && rc != HA_ERR_END_OF_FILE) goto error; } - else - { - DBUG_PRINT("ha_archive", ("archive quick rebuild")); - /* - The quick method is to just read the data raw, and then compress it directly. - */ - unsigned long read, written; - int error; - char block[IO_SIZE]; - if (azrewind(&archive) == -1) - { - rc= HA_ERR_CRASHED_ON_USAGE; - DBUG_PRINT("ha_archive", ("crashed on rewinding file")); - goto error; - } - - while ((read= azread(&archive, block, IO_SIZE, &error)) > 0) - { - if (error) - { - rc= HA_ERR_CRASHED_ON_USAGE; - DBUG_PRINT("ha_archive", ("azread error on read")); - goto error; - } - - written= azwrite(&writer, block, read); - if (written != read) - { - rc= HA_ERR_CRASHED_ON_USAGE; - DBUG_PRINT("ha_archive::real_write_row", - ("Crashed wrote %lu bytes expected %lu", - written, read)); - goto error; - } - } - - if (error) - { - rc= HA_ERR_CRASHED_ON_USAGE; - DBUG_PRINT("ha_archive", ("retrieved zero blocks and error'ed")); - goto error; - } - } azclose(&writer); share->dirty= FALSE; - share->forced_flushes= 0; // now we close both our writer and our reader for the rename azclose(&(share->archive_write)); @@ -1517,7 +1334,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) */ DBUG_PRINT("ha_archive", ("Reopening archive data file")); if (!azopen(&(share->archive_write), share->data_file_name, - O_WRONLY|O_APPEND|O_BINARY) || + O_RDWR|O_BINARY) || !azopen(&archive, share->data_file_name, O_RDONLY|O_BINARY)) { DBUG_PRINT("ha_archive", ("Could not open archive write file")); @@ -1583,8 +1400,10 @@ void ha_archive::update_create_info(HA_CREATE_INFO *create_info) { create_info->auto_increment_value= stats.auto_increment_value; } - if (*share->real_path) +#ifdef DISABLED + if (share->real_path) create_info->data_file_name= share->real_path; +#endif } @@ -1616,8 +1435,10 @@ int ha_archive::info(uint flag) stats.delete_length= 0; stats.index_file_length=0; + /* if (flag & HA_STATUS_AUTO) - stats.auto_increment_value= share->auto_increment_value; + stats.auto_increment_value= share->archive_write.auto_increment; +*/ DBUG_RETURN(0); } @@ -1685,7 +1506,6 @@ int ha_archive::check(THD* thd, HA_CHECK_OPT* check_opt) old_proc_info= thd_proc_info(thd, "Checking table"); /* Flush any waiting data */ azflush(&(share->archive_write), Z_SYNC_FLUSH); - share->forced_flushes++; /* First we create a buffer that we can use for reading rows, and can pass diff --git a/storage/archive/ha_archive.h b/storage/archive/ha_archive.h index 3e3016cca1e..14c048b4bb9 100644 --- a/storage/archive/ha_archive.h +++ b/storage/archive/ha_archive.h @@ -39,17 +39,12 @@ typedef struct st_archive_share { uint table_name_length,use_count; pthread_mutex_t mutex; THR_LOCK lock; - File meta_file; /* Meta file we use */ azio_stream archive_write; /* Archive file we are working with */ bool archive_write_open; bool dirty; /* Flag for if a flush should occur */ bool crashed; /* Meta file is crashed */ ha_rows rows_recorded; /* Number of rows in tables */ - ulonglong auto_increment_value; - ulonglong forced_flushes; ulonglong mean_rec_length; - char real_path[FN_REFLEN]; - uint meta_version; } ARCHIVE_SHARE; /* @@ -121,16 +116,6 @@ public: int get_row(azio_stream *file_to_read, byte *buf); int get_row_version2(azio_stream *file_to_read, byte *buf); int get_row_version3(azio_stream *file_to_read, byte *buf); - int read_meta_file(File meta_file, ha_rows *rows, - uint *meta_version, - ulonglong *auto_increment, - ulonglong *forced_flushes, - char *real_path); - int write_meta_file(File meta_file, ha_rows rows, - ulonglong auto_increment, - ulonglong forced_flushes, - char *real_path, - bool dirty); ARCHIVE_SHARE *get_share(const char *table_name, TABLE *table, int *rc); int free_share(ARCHIVE_SHARE *share); int init_archive_writer(); @@ -156,6 +141,6 @@ public: int max_row_length(const byte *buf); bool fix_rec_buff(int length); int unpack_row(azio_stream *file_to_read, char *record); - unsigned long pack_row(const byte *record); + unsigned int pack_row(const byte *record); };