mirror of
https://github.com/MariaDB/server.git
synced 2025-10-12 12:25:37 +03:00
MDEV-18889 Long unique on virtual fields crashes server
Use table->record[0] for ha_index_read_map so that vfield gets automatically be updated.
This commit is contained in:
@@ -158,3 +158,16 @@ insert into t1 values(1,1);
|
||||
ERROR 23000: Duplicate entry '1-1' for key 'a'
|
||||
alter table t1 add column c int;
|
||||
drop table t1;
|
||||
create table t1(a blob , b blob as (a) unique);
|
||||
insert into t1 values(1, default);
|
||||
insert into t1 values(1, default);
|
||||
ERROR 23000: Duplicate entry '1' for key 'b'
|
||||
drop table t1;
|
||||
create table t1(a blob, b blob, c blob as (left(a, 5000)) virtual, d blob as (left(b, 5000)) persistent, unique(a,b(4000)));
|
||||
insert into t1(a,b) values(10,11);
|
||||
insert into t1(a,b) values(10,11);
|
||||
ERROR 23000: Duplicate entry '10-11' for key 'a'
|
||||
insert into t1(a,b) values(2,2);
|
||||
insert into t1(a,b) values(2,3);
|
||||
insert into t1(a,b) values(3,2);
|
||||
drop table t1;
|
||||
|
@@ -178,3 +178,21 @@ insert into t1 values(1,1);
|
||||
insert into t1 values(1,1);
|
||||
alter table t1 add column c int;
|
||||
drop table t1;
|
||||
|
||||
#
|
||||
# MDEV-18889 Long unique on virtual fields crashes server
|
||||
#
|
||||
create table t1(a blob , b blob as (a) unique);
|
||||
insert into t1 values(1, default);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1 values(1, default);
|
||||
drop table t1;
|
||||
|
||||
create table t1(a blob, b blob, c blob as (left(a, 5000)) virtual, d blob as (left(b, 5000)) persistent, unique(a,b(4000)));
|
||||
insert into t1(a,b) values(10,11);
|
||||
--error ER_DUP_ENTRY
|
||||
insert into t1(a,b) values(10,11);
|
||||
insert into t1(a,b) values(2,2);
|
||||
insert into t1(a,b) values(2,3);
|
||||
insert into t1(a,b) values(3,2);
|
||||
drop table t1;
|
||||
|
@@ -6547,7 +6547,8 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h,
|
||||
result= h->ha_index_init(key_no, 0);
|
||||
if (result)
|
||||
return result;
|
||||
result= h->ha_index_read_map(table->check_unique_buf,
|
||||
store_record(table, check_unique_buf);
|
||||
result= h->ha_index_read_map(table->record[0],
|
||||
ptr, HA_WHOLE_KEY, HA_READ_KEY_EXACT);
|
||||
if (!result)
|
||||
{
|
||||
@@ -6583,7 +6584,7 @@ static int check_duplicate_long_entry_key(TABLE *table, handler *h,
|
||||
}
|
||||
}
|
||||
}
|
||||
while (!is_same && !(result= h->ha_index_next_same(table->check_unique_buf,
|
||||
while (!is_same && !(result= h->ha_index_next_same(table->record[0],
|
||||
ptr, key_info->key_length)));
|
||||
if (is_same)
|
||||
error= HA_ERR_FOUND_DUPP_KEY;
|
||||
@@ -6597,10 +6598,11 @@ exit:
|
||||
table->file->errkey= key_no;
|
||||
if (h->ha_table_flags() & HA_DUPLICATE_POS)
|
||||
{
|
||||
h->position(table->check_unique_buf);
|
||||
h->position(table->record[0]);
|
||||
memcpy(table->file->dup_ref, h->ref, h->ref_length);
|
||||
}
|
||||
}
|
||||
restore_record(table, check_unique_buf);
|
||||
h->ha_index_end();
|
||||
return error;
|
||||
}
|
||||
|
Reference in New Issue
Block a user