mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
MDEV-371 Unique Index for long columns
post-merge fixes
This commit is contained in:
@ -1387,13 +1387,13 @@ create table t1(a blob unique) partition by hash(a);
|
|||||||
ERROR HY000: A BLOB field is not allowed in partition function
|
ERROR HY000: A BLOB field is not allowed in partition function
|
||||||
#key length > 2^16 -1
|
#key length > 2^16 -1
|
||||||
create table t1(a blob, unique(a(65536)));
|
create table t1(a blob, unique(a(65536)));
|
||||||
ERROR HY000: Max key segment length is 65535
|
ERROR 42000: Specified key part was too long; max key part length is 65535 bytes
|
||||||
create table t1(a blob, unique(a(65535)));
|
create table t1(a blob, unique(a(65535)));
|
||||||
show create table t1;
|
show create table t1;
|
||||||
Table Create Table
|
Table Create Table
|
||||||
t1 CREATE TABLE `t1` (
|
t1 CREATE TABLE `t1` (
|
||||||
`a` blob DEFAULT NULL,
|
`a` blob DEFAULT NULL,
|
||||||
UNIQUE KEY `a` (`a`) USING HASH
|
UNIQUE KEY `a` (`a`(65535)) USING HASH
|
||||||
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
) ENGINE=MyISAM DEFAULT CHARSET=latin1
|
||||||
drop table t1;
|
drop table t1;
|
||||||
#64 indexes
|
#64 indexes
|
||||||
|
@ -1,6 +1,10 @@
|
|||||||
let datadir=`select @@datadir`;
|
let datadir=`select @@datadir`;
|
||||||
--source include/have_partition.inc
|
--source include/have_partition.inc
|
||||||
|
|
||||||
|
#
|
||||||
|
# MDEV-371 Unique indexes for blobs
|
||||||
|
#
|
||||||
|
|
||||||
--echo #Structure of tests
|
--echo #Structure of tests
|
||||||
--echo #First we will check all option for
|
--echo #First we will check all option for
|
||||||
--echo #table containing single unique column
|
--echo #table containing single unique column
|
||||||
@ -475,7 +479,7 @@ drop table t1;
|
|||||||
--error ER_BLOB_FIELD_IN_PART_FUNC_ERROR
|
--error ER_BLOB_FIELD_IN_PART_FUNC_ERROR
|
||||||
create table t1(a blob unique) partition by hash(a);
|
create table t1(a blob unique) partition by hash(a);
|
||||||
--echo #key length > 2^16 -1
|
--echo #key length > 2^16 -1
|
||||||
--error ER_TOO_LONG_HASH_KEYSEG
|
--error ER_TOO_LONG_KEYPART
|
||||||
create table t1(a blob, unique(a(65536)));
|
create table t1(a blob, unique(a(65536)));
|
||||||
create table t1(a blob, unique(a(65535)));
|
create table t1(a blob, unique(a(65535)));
|
||||||
show create table t1;
|
show create table t1;
|
||||||
|
@ -1,5 +1,9 @@
|
|||||||
--source include/have_debug.inc
|
--source include/have_debug.inc
|
||||||
--source include/have_innodb.inc
|
|
||||||
|
#
|
||||||
|
# MDEV-371 Unique indexes for blobs
|
||||||
|
#
|
||||||
|
|
||||||
--echo #In this test case we will check what will happen in the case of hash collision
|
--echo #In this test case we will check what will happen in the case of hash collision
|
||||||
|
|
||||||
SET debug_dbug="d,same_long_unique_hash";
|
SET debug_dbug="d,same_long_unique_hash";
|
||||||
|
@ -3,6 +3,16 @@ insert into t1 values('RUC');
|
|||||||
insert into t1 values ('RUC');
|
insert into t1 values ('RUC');
|
||||||
ERROR 23000: Duplicate entry 'RUC' for key 'a'
|
ERROR 23000: Duplicate entry 'RUC' for key 'a'
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
create table t1 (a blob unique , c int unique) engine=innodb;
|
||||||
|
show create table t1;
|
||||||
|
Table Create Table
|
||||||
|
t1 CREATE TABLE `t1` (
|
||||||
|
`a` blob DEFAULT NULL,
|
||||||
|
`c` int(11) DEFAULT NULL,
|
||||||
|
UNIQUE KEY `a` (`a`) USING HASH,
|
||||||
|
UNIQUE KEY `c` (`c`)
|
||||||
|
) ENGINE=InnoDB DEFAULT CHARSET=latin1
|
||||||
|
drop table t1;
|
||||||
#test for concurrent insert of long unique in innodb
|
#test for concurrent insert of long unique in innodb
|
||||||
create table t1(a blob unique) engine= InnoDB;
|
create table t1(a blob unique) engine= InnoDB;
|
||||||
show create table t1;
|
show create table t1;
|
||||||
|
@ -1,11 +1,19 @@
|
|||||||
--source include/have_innodb.inc
|
--source include/have_innodb.inc
|
||||||
|
|
||||||
|
#
|
||||||
|
# MDEV-371 Unique indexes for blobs
|
||||||
|
#
|
||||||
|
|
||||||
create table t1(a blob unique) engine= InnoDB;
|
create table t1(a blob unique) engine= InnoDB;
|
||||||
insert into t1 values('RUC');
|
insert into t1 values('RUC');
|
||||||
--error ER_DUP_ENTRY
|
--error ER_DUP_ENTRY
|
||||||
insert into t1 values ('RUC');
|
insert into t1 values ('RUC');
|
||||||
drop table t1;
|
drop table t1;
|
||||||
|
|
||||||
|
create table t1 (a blob unique , c int unique) engine=innodb;
|
||||||
|
show create table t1;
|
||||||
|
drop table t1;
|
||||||
|
|
||||||
--echo #test for concurrent insert of long unique in innodb
|
--echo #test for concurrent insert of long unique in innodb
|
||||||
create table t1(a blob unique) engine= InnoDB;
|
create table t1(a blob unique) engine= InnoDB;
|
||||||
show create table t1;
|
show create table t1;
|
||||||
@ -33,7 +41,6 @@ insert into t1 values('RC');
|
|||||||
commit;
|
commit;
|
||||||
set transaction isolation level READ COMMITTED;
|
set transaction isolation level READ COMMITTED;
|
||||||
start transaction;
|
start transaction;
|
||||||
--error ER_DUP_ENTRY
|
|
||||||
--error ER_LOCK_WAIT_TIMEOUT
|
--error ER_LOCK_WAIT_TIMEOUT
|
||||||
insert into t1 values ('RC');
|
insert into t1 values ('RC');
|
||||||
commit;
|
commit;
|
||||||
@ -47,7 +54,6 @@ insert into t1 values('RR');
|
|||||||
commit;
|
commit;
|
||||||
set transaction isolation level REPEATABLE READ;
|
set transaction isolation level REPEATABLE READ;
|
||||||
start transaction;
|
start transaction;
|
||||||
--error ER_DUP_ENTRY
|
|
||||||
--error ER_LOCK_WAIT_TIMEOUT
|
--error ER_LOCK_WAIT_TIMEOUT
|
||||||
insert into t1 values ('RR');
|
insert into t1 values ('RR');
|
||||||
|
|
||||||
@ -60,7 +66,6 @@ insert into t1 values('S');
|
|||||||
commit;
|
commit;
|
||||||
set transaction isolation level SERIALIZABLE;
|
set transaction isolation level SERIALIZABLE;
|
||||||
start transaction;
|
start transaction;
|
||||||
--error ER_DUP_ENTRY
|
|
||||||
--error ER_LOCK_WAIT_TIMEOUT
|
--error ER_LOCK_WAIT_TIMEOUT
|
||||||
insert into t1 values ('S');
|
insert into t1 values ('S');
|
||||||
commit;
|
commit;
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
#
|
||||||
|
# MDEV-371 Unique indexes for blobs
|
||||||
|
#
|
||||||
--echo #structure of tests;
|
--echo #structure of tests;
|
||||||
--echo #1 test of table containing single unique blob column;
|
--echo #1 test of table containing single unique blob column;
|
||||||
--echo #2 test of table containing another unique int/ varchar etc column;
|
--echo #2 test of table containing another unique int/ varchar etc column;
|
||||||
|
@ -1,3 +1,6 @@
|
|||||||
|
#
|
||||||
|
# MDEV-371 Unique indexes for blobs
|
||||||
|
#
|
||||||
|
|
||||||
create table t1(a blob , unique(a) using hash);
|
create table t1(a blob , unique(a) using hash);
|
||||||
--query_vertical show keys from t1;
|
--query_vertical show keys from t1;
|
||||||
|
@ -7950,8 +7950,7 @@ ER_PERIOD_NOT_FOUND
|
|||||||
eng "Period %`s is not found in table"
|
eng "Period %`s is not found in table"
|
||||||
ER_PERIOD_COLUMNS_UPDATED
|
ER_PERIOD_COLUMNS_UPDATED
|
||||||
eng "Column %`s used in period %`s specified in update SET list"
|
eng "Column %`s used in period %`s specified in update SET list"
|
||||||
|
|
||||||
ER_PERIOD_CONSTRAINT_DROP
|
ER_PERIOD_CONSTRAINT_DROP
|
||||||
eng "Can't DROP CONSTRAINT `%s`. Use DROP PERIOD `%s` for this"
|
eng "Can't DROP CONSTRAINT `%s`. Use DROP PERIOD `%s` for this"
|
||||||
ER_TOO_LONG_HASH_KEYSEG
|
ER_TOO_LONG_KEYPART 42000 S1009
|
||||||
eng "Max key segment length is 65535"
|
eng "Specified key part was too long; max key part length is %u bytes"
|
||||||
|
@ -2352,9 +2352,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet,
|
|||||||
if (key_part->field &&
|
if (key_part->field &&
|
||||||
(key_part->length !=
|
(key_part->length !=
|
||||||
table->field[key_part->fieldnr-1]->key_length() &&
|
table->field[key_part->fieldnr-1]->key_length() &&
|
||||||
!(key_info->flags & (HA_FULLTEXT | HA_SPATIAL))) &&
|
!(key_info->flags & (HA_FULLTEXT | HA_SPATIAL))))
|
||||||
(key_info->algorithm != HA_KEY_ALG_LONG_HASH ||
|
|
||||||
key_info->algorithm == HA_KEY_ALG_LONG_HASH && key_part->length))
|
|
||||||
{
|
{
|
||||||
packet->append_parenthesized((long) key_part->length /
|
packet->append_parenthesized((long) key_part->length /
|
||||||
key_part->field->charset()->mbmaxlen);
|
key_part->field->charset()->mbmaxlen);
|
||||||
@ -6644,9 +6642,7 @@ static int get_schema_stat_record(THD *thd, TABLE_LIST *tables,
|
|||||||
if (!(key_info->flags & HA_FULLTEXT) &&
|
if (!(key_info->flags & HA_FULLTEXT) &&
|
||||||
(key_part->field &&
|
(key_part->field &&
|
||||||
key_part->length !=
|
key_part->length !=
|
||||||
show_table->s->field[key_part->fieldnr-1]->key_length()) &&
|
show_table->s->field[key_part->fieldnr-1]->key_length()))
|
||||||
(key_info->algorithm != HA_KEY_ALG_LONG_HASH ||
|
|
||||||
key_info->algorithm == HA_KEY_ALG_LONG_HASH && key_part->length))
|
|
||||||
{
|
{
|
||||||
table->field[10]->store((longlong) key_part->length /
|
table->field[10]->store((longlong) key_part->length /
|
||||||
key_part->field->charset()->mbmaxlen, TRUE);
|
key_part->field->charset()->mbmaxlen, TRUE);
|
||||||
|
@ -2778,23 +2778,26 @@ bool quick_rm_table(THD *thd, handlerton *base, const LEX_CSTRING *db,
|
|||||||
|
|
||||||
This will make checking for duplicated keys faster and ensure that
|
This will make checking for duplicated keys faster and ensure that
|
||||||
PRIMARY keys are prioritized.
|
PRIMARY keys are prioritized.
|
||||||
This will not reorder LONG_HASH indexes, because they must match the
|
|
||||||
order of their LONG_UNIQUE_HASH_FIELD's.
|
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static int sort_keys(KEY *a, KEY *b)
|
static int sort_keys(KEY *a, KEY *b)
|
||||||
{
|
{
|
||||||
ulong a_flags= a->flags, b_flags= b->flags;
|
ulong a_flags= a->flags, b_flags= b->flags;
|
||||||
|
|
||||||
|
/*
|
||||||
|
Do not reorder LONG_HASH indexes, because they must match the order
|
||||||
|
of their LONG_UNIQUE_HASH_FIELD's.
|
||||||
|
*/
|
||||||
|
if (a->algorithm == HA_KEY_ALG_LONG_HASH &&
|
||||||
|
b->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||||
|
return a->usable_key_parts - b->usable_key_parts;
|
||||||
|
|
||||||
if (a_flags & HA_NOSAME)
|
if (a_flags & HA_NOSAME)
|
||||||
{
|
{
|
||||||
if (!(b_flags & HA_NOSAME))
|
if (!(b_flags & HA_NOSAME))
|
||||||
return -1;
|
return -1;
|
||||||
if ((a_flags ^ b_flags) & HA_NULL_PART_KEY)
|
if ((a_flags ^ b_flags) & HA_NULL_PART_KEY)
|
||||||
{
|
{
|
||||||
if (a->algorithm == HA_KEY_ALG_LONG_HASH &&
|
|
||||||
b->algorithm == HA_KEY_ALG_LONG_HASH)
|
|
||||||
return a->usable_key_parts - b->usable_key_parts;
|
|
||||||
/* Sort NOT NULL keys before other keys */
|
/* Sort NOT NULL keys before other keys */
|
||||||
return (a_flags & HA_NULL_PART_KEY) ? 1 : -1;
|
return (a_flags & HA_NULL_PART_KEY) ? 1 : -1;
|
||||||
}
|
}
|
||||||
@ -2817,9 +2820,7 @@ static int sort_keys(KEY *a, KEY *b)
|
|||||||
Prefer original key order. usable_key_parts contains here
|
Prefer original key order. usable_key_parts contains here
|
||||||
the original key position.
|
the original key position.
|
||||||
*/
|
*/
|
||||||
return ((a->usable_key_parts < b->usable_key_parts) ? -1 :
|
return a->usable_key_parts - b->usable_key_parts;
|
||||||
(a->usable_key_parts > b->usable_key_parts) ? 1 :
|
|
||||||
0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -3302,6 +3303,7 @@ static inline void make_long_hash_field_name(LEX_CSTRING *buf, uint num)
|
|||||||
buf->length= my_snprintf((char *)buf->str,
|
buf->length= my_snprintf((char *)buf->str,
|
||||||
LONG_HASH_FIELD_NAME_LENGTH, "DB_ROW_HASH_%u", num);
|
LONG_HASH_FIELD_NAME_LENGTH, "DB_ROW_HASH_%u", num);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
Add fully invisible hash field to table in case of long
|
Add fully invisible hash field to table in case of long
|
||||||
unique column
|
unique column
|
||||||
@ -3313,7 +3315,6 @@ static Create_field * add_hash_field(THD * thd, List<Create_field> *create_list,
|
|||||||
KEY *key_info)
|
KEY *key_info)
|
||||||
{
|
{
|
||||||
List_iterator<Create_field> it(*create_list);
|
List_iterator<Create_field> it(*create_list);
|
||||||
// CHARSET_INFO *field_cs;
|
|
||||||
Create_field *dup_field, *cf= new (thd->mem_root) Create_field();
|
Create_field *dup_field, *cf= new (thd->mem_root) Create_field();
|
||||||
cf->flags|= UNSIGNED_FLAG | LONG_UNIQUE_HASH_FIELD;
|
cf->flags|= UNSIGNED_FLAG | LONG_UNIQUE_HASH_FIELD;
|
||||||
cf->decimals= 0;
|
cf->decimals= 0;
|
||||||
@ -3336,18 +3337,6 @@ static Create_field * add_hash_field(THD * thd, List<Create_field> *create_list,
|
|||||||
it.rewind();
|
it.rewind();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* for (uint i= 0; i < key_info->user_defined_key_parts; i++)
|
|
||||||
{
|
|
||||||
dup_field= create_list->elem(key_info->key_part[i].fieldnr);
|
|
||||||
if (!i)
|
|
||||||
field_cs= dup_field->charset;
|
|
||||||
else if(field_cs != dup_field->charset)
|
|
||||||
{
|
|
||||||
my_error(ER_MULTIPLE_CS_HASH_KEY, MYF(0));
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
cf->charset= field_cs;*/
|
|
||||||
cf->field_name= field_name;
|
cf->field_name= field_name;
|
||||||
cf->set_handler(&type_handler_longlong);
|
cf->set_handler(&type_handler_longlong);
|
||||||
key_info->algorithm= HA_KEY_ALG_LONG_HASH;
|
key_info->algorithm= HA_KEY_ALG_LONG_HASH;
|
||||||
@ -4107,19 +4096,14 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* We can not store key_part_length more then 2^16 - 1 in frm
|
/* We can not store key_part_length more then 2^16 - 1 in frm */
|
||||||
So we will simply make it zero */
|
if (is_hash_field_needed && column->length > UINT16_MAX)
|
||||||
if (is_hash_field_needed && column->length > (1<<16) - 1)
|
|
||||||
{
|
{
|
||||||
my_error(ER_TOO_LONG_HASH_KEYSEG, MYF(0));
|
my_error(ER_TOO_LONG_KEYPART, MYF(0), UINT16_MAX);
|
||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
key_part_info->length= (uint16) key_part_length;
|
key_part_info->length= (uint16) key_part_length;
|
||||||
if (is_hash_field_needed &&
|
|
||||||
(key_part_info->length == sql_field->char_length * sql_field->charset->mbmaxlen ||
|
|
||||||
key_part_info->length == (1<<16) -1))
|
|
||||||
key_part_info->length= 0;
|
|
||||||
/* Use packed keys for long strings on the first column */
|
/* Use packed keys for long strings on the first column */
|
||||||
if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) &&
|
if (!((*db_options) & HA_OPTION_NO_PACK_KEYS) &&
|
||||||
!((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) &&
|
!((create_info->table_options & HA_OPTION_NO_PACK_KEYS)) &&
|
||||||
@ -8385,13 +8369,6 @@ mysql_prepare_alter_table(THD *thd, TABLE *table,
|
|||||||
if (cfield->field) // Not new field
|
if (cfield->field) // Not new field
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
if (key_info->algorithm == HA_KEY_ALG_LONG_HASH)
|
|
||||||
{
|
|
||||||
Field *fld= cfield->field;
|
|
||||||
if (fld->max_display_length() == cfield->length*fld->charset()->mbmaxlen
|
|
||||||
&& fld->max_data_length() != key_part->length)
|
|
||||||
cfield->length= cfield->char_length= key_part->length;
|
|
||||||
}
|
|
||||||
If the field can't have only a part used in a key according to its
|
If the field can't have only a part used in a key according to its
|
||||||
new type, or should not be used partially according to its
|
new type, or should not be used partially according to its
|
||||||
previous type, or the field length is less than the key part
|
previous type, or the field length is less than the key part
|
||||||
|
34
sql/table.cc
34
sql/table.cc
@ -2443,8 +2443,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||||||
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
|
if (keyinfo->algorithm == HA_KEY_ALG_LONG_HASH)
|
||||||
{
|
{
|
||||||
share->long_unique_table= 1;
|
share->long_unique_table= 1;
|
||||||
if (share->frm_version < FRM_VER_EXPRESSSIONS)
|
|
||||||
share->frm_version= FRM_VER_EXPRESSSIONS;
|
|
||||||
hash_keypart= keyinfo->key_part + keyinfo->user_defined_key_parts;
|
hash_keypart= keyinfo->key_part + keyinfo->user_defined_key_parts;
|
||||||
hash_keypart->length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
|
hash_keypart->length= HA_HASH_KEY_LENGTH_WITHOUT_NULL;
|
||||||
hash_keypart->store_length= hash_keypart->length;
|
hash_keypart->store_length= hash_keypart->length;
|
||||||
@ -2453,8 +2451,6 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||||||
hash_keypart->key_type= 32834;
|
hash_keypart->key_type= 32834;
|
||||||
/* Last n fields are unique_index_hash fields*/
|
/* Last n fields are unique_index_hash fields*/
|
||||||
hash_keypart->offset= offset;
|
hash_keypart->offset= offset;
|
||||||
// hash_keypart->offset= share->reclength
|
|
||||||
// - HA_HASH_FIELD_LENGTH*(share->fields - hash_field_used_no);
|
|
||||||
hash_keypart->fieldnr= hash_field_used_no + 1;
|
hash_keypart->fieldnr= hash_field_used_no + 1;
|
||||||
hash_field= share->field[hash_field_used_no];
|
hash_field= share->field[hash_field_used_no];
|
||||||
hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs
|
hash_field->flags|= LONG_UNIQUE_HASH_FIELD;//Used in parse_vcol_defs
|
||||||
@ -2566,7 +2562,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||||||
for (i= 0; i < keyinfo->user_defined_key_parts; i++)
|
for (i= 0; i < keyinfo->user_defined_key_parts; i++)
|
||||||
{
|
{
|
||||||
uint fieldnr= keyinfo->key_part[i].fieldnr;
|
uint fieldnr= keyinfo->key_part[i].fieldnr;
|
||||||
field= share->field[keyinfo->key_part[i].fieldnr-1];
|
field= share->field[fieldnr-1];
|
||||||
|
|
||||||
if (field->null_ptr)
|
if (field->null_ptr)
|
||||||
len_null_byte= HA_KEY_NULL_LENGTH;
|
len_null_byte= HA_KEY_NULL_LENGTH;
|
||||||
@ -2581,8 +2577,7 @@ int TABLE_SHARE::init_from_binary_frm_image(THD *thd, bool write,
|
|||||||
|
|
||||||
ext_key_length+= keyinfo->key_part[i].length + len_null_byte
|
ext_key_length+= keyinfo->key_part[i].length + len_null_byte
|
||||||
+ length_bytes;
|
+ length_bytes;
|
||||||
if (share->field[fieldnr-1]->key_length() !=
|
if (field->key_length() != keyinfo->key_part[i].length)
|
||||||
keyinfo->key_part[i].length)
|
|
||||||
{
|
{
|
||||||
add_keyparts_for_this_key= 0;
|
add_keyparts_for_this_key= 0;
|
||||||
break;
|
break;
|
||||||
@ -4258,6 +4253,8 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
|
|||||||
{
|
{
|
||||||
size_t key_comment_total_bytes= 0;
|
size_t key_comment_total_bytes= 0;
|
||||||
uint i;
|
uint i;
|
||||||
|
uchar frm_format= create_info->expression_length ? FRM_VER_EXPRESSSIONS
|
||||||
|
: FRM_VER_TRUE_VARCHAR;
|
||||||
DBUG_ENTER("prepare_frm_header");
|
DBUG_ENTER("prepare_frm_header");
|
||||||
|
|
||||||
/* Fix this when we have new .frm files; Current limit is 4G rows (TODO) */
|
/* Fix this when we have new .frm files; Current limit is 4G rows (TODO) */
|
||||||
@ -4266,17 +4263,6 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
|
|||||||
if (create_info->min_rows > UINT_MAX32)
|
if (create_info->min_rows > UINT_MAX32)
|
||||||
create_info->min_rows= UINT_MAX32;
|
create_info->min_rows= UINT_MAX32;
|
||||||
|
|
||||||
size_t key_length, tmp_key_length, tmp, csid;
|
|
||||||
bzero((char*) fileinfo, FRM_HEADER_SIZE);
|
|
||||||
/* header */
|
|
||||||
fileinfo[0]=(uchar) 254;
|
|
||||||
fileinfo[1]= 1;
|
|
||||||
fileinfo[2]= (create_info->expression_length == 0 ? FRM_VER_TRUE_VARCHAR :
|
|
||||||
FRM_VER_EXPRESSSIONS);
|
|
||||||
|
|
||||||
DBUG_ASSERT(ha_storage_engine_is_enabled(create_info->db_type));
|
|
||||||
fileinfo[3]= (uchar) ha_legacy_type(create_info->db_type);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Keep in sync with pack_keys() in unireg.cc
|
Keep in sync with pack_keys() in unireg.cc
|
||||||
For each key:
|
For each key:
|
||||||
@ -4295,8 +4281,20 @@ void prepare_frm_header(THD *thd, uint reclength, uchar *fileinfo,
|
|||||||
(key_info[i].comment.length > 0));
|
(key_info[i].comment.length > 0));
|
||||||
if (key_info[i].flags & HA_USES_COMMENT)
|
if (key_info[i].flags & HA_USES_COMMENT)
|
||||||
key_comment_total_bytes += 2 + key_info[i].comment.length;
|
key_comment_total_bytes += 2 + key_info[i].comment.length;
|
||||||
|
if (key_info[i].algorithm == HA_KEY_ALG_LONG_HASH)
|
||||||
|
frm_format= FRM_VER_EXPRESSSIONS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t key_length, tmp_key_length, tmp, csid;
|
||||||
|
bzero((char*) fileinfo, FRM_HEADER_SIZE);
|
||||||
|
/* header */
|
||||||
|
fileinfo[0]=(uchar) 254;
|
||||||
|
fileinfo[1]= 1;
|
||||||
|
fileinfo[2]= frm_format;
|
||||||
|
|
||||||
|
DBUG_ASSERT(ha_storage_engine_is_enabled(create_info->db_type));
|
||||||
|
fileinfo[3]= (uchar) ha_legacy_type(create_info->db_type);
|
||||||
|
|
||||||
key_length= keys * (8 + MAX_REF_PARTS * 9 + NAME_LEN + 1) + 16
|
key_length= keys * (8 + MAX_REF_PARTS * 9 + NAME_LEN + 1) + 16
|
||||||
+ key_comment_total_bytes;
|
+ key_comment_total_bytes;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user