mirror of
https://github.com/MariaDB/server.git
synced 2025-08-01 03:47:19 +03:00
MDEV-22218 InnoDB: Failing assertion: node->pcur->rel_pos == BTR_PCUR_ON upon LOAD DATA with NO_BACKSLASH_ESCAPES in SQL_MODE and unique blob in table
`inited == NONE` at the initialization time does not always mean that it'll be `NONE` later, at the execution time. Use a more complex caller-specific logic to decide whether to create a cloned lookup handler. Besides LOAD (as in the original bug report) make sure that all prepare_for_insert() invocations are covered by tests. Add tests for CREATE ... SELECT, multi-UPDATE, and multi-DELETE. Don't enable write cache with long uniques.
This commit is contained in:
@ -279,3 +279,61 @@ start transaction;
|
||||
alter table tmp alter column a set default 8;
|
||||
unlock tables;
|
||||
drop table t2;
|
||||
create table t1 (pk int primary key, f blob, unique(f)) engine=innodb;
|
||||
insert t1 values (1, null);
|
||||
select * into outfile 't1.data' from t1;
|
||||
load data infile 't1.data' replace into table t1;
|
||||
select * from t1;
|
||||
pk f
|
||||
1 NULL
|
||||
drop table t1;
|
||||
create table t1 (a int, b blob) engine=myisam;
|
||||
insert t1 values (1,'foo'),(2,'bar'), (3, 'bar');
|
||||
create table t2 (c int, d blob, unique(d)) engine=myisam;
|
||||
insert t2 select * from t1;
|
||||
ERROR 23000: Duplicate entry 'bar' for key 'd'
|
||||
select * from t2;
|
||||
c d
|
||||
1 foo
|
||||
2 bar
|
||||
insert ignore t2 select * from t1;
|
||||
Warnings:
|
||||
Warning 1062 Duplicate entry 'foo' for key 'd'
|
||||
Warning 1062 Duplicate entry 'bar' for key 'd'
|
||||
Warning 1062 Duplicate entry 'bar' for key 'd'
|
||||
select * from t2;
|
||||
c d
|
||||
1 foo
|
||||
2 bar
|
||||
replace t2 select * from t1;
|
||||
select * from t2;
|
||||
c d
|
||||
1 foo
|
||||
3 bar
|
||||
update t1, t2 set t2.d='off' where t1.a=t2.c and t1.b='foo';
|
||||
select * from t2;
|
||||
c d
|
||||
1 off
|
||||
3 bar
|
||||
alter table t2 add system versioning;
|
||||
delete from t2 using t1, t2 where t1.a=t2.c and t1.b='foo';
|
||||
select * from t2;
|
||||
c d
|
||||
3 bar
|
||||
create or replace table t2 (a int, b blob, unique(b)) as select * from t1;
|
||||
ERROR 23000: Duplicate entry 'bar' for key 'b'
|
||||
select * from t2;
|
||||
ERROR 42S02: Table 'test.t2' doesn't exist
|
||||
create or replace table t2 (a int, b blob, unique(b)) ignore as select * from t1;
|
||||
Warnings:
|
||||
Warning 1062 Duplicate entry 'bar' for key 'b'
|
||||
select * from t2;
|
||||
a b
|
||||
1 foo
|
||||
2 bar
|
||||
create or replace table t2 (a int, b blob, unique(b)) replace as select * from t1;
|
||||
select * from t2;
|
||||
a b
|
||||
1 foo
|
||||
3 bar
|
||||
drop table if exists t1, t2;
|
||||
|
@ -353,3 +353,52 @@ start transaction;
|
||||
alter table tmp alter column a set default 8;
|
||||
unlock tables;
|
||||
drop table t2;
|
||||
--source include/have_innodb.inc
|
||||
|
||||
#
|
||||
# MDEV-22218 InnoDB: Failing assertion: node->pcur->rel_pos == BTR_PCUR_ON upon LOAD DATA with NO_BACKSLASH_ESCAPES in SQL_MODE and unique blob in table
|
||||
#
|
||||
create table t1 (pk int primary key, f blob, unique(f)) engine=innodb;
|
||||
insert t1 values (1, null);
|
||||
select * into outfile 't1.data' from t1;
|
||||
load data infile 't1.data' replace into table t1;
|
||||
select * from t1;
|
||||
drop table t1;
|
||||
--let $datadir= `SELECT @@datadir`
|
||||
--remove_file $datadir/test/t1.data
|
||||
|
||||
# more tests:
|
||||
|
||||
create table t1 (a int, b blob) engine=myisam;
|
||||
insert t1 values (1,'foo'),(2,'bar'), (3, 'bar');
|
||||
create table t2 (c int, d blob, unique(d)) engine=myisam;
|
||||
|
||||
# INSERT...SELECT
|
||||
--error ER_DUP_ENTRY
|
||||
insert t2 select * from t1;
|
||||
select * from t2;
|
||||
insert ignore t2 select * from t1;
|
||||
select * from t2;
|
||||
replace t2 select * from t1;
|
||||
select * from t2;
|
||||
|
||||
# multi-UPDATE
|
||||
update t1, t2 set t2.d='off' where t1.a=t2.c and t1.b='foo';
|
||||
select * from t2;
|
||||
|
||||
# multi-DELETE
|
||||
alter table t2 add system versioning;
|
||||
delete from t2 using t1, t2 where t1.a=t2.c and t1.b='foo';
|
||||
select * from t2;
|
||||
|
||||
# CREATE...SELECT
|
||||
--error ER_DUP_ENTRY
|
||||
create or replace table t2 (a int, b blob, unique(b)) as select * from t1;
|
||||
--error ER_NO_SUCH_TABLE
|
||||
select * from t2;
|
||||
create or replace table t2 (a int, b blob, unique(b)) ignore as select * from t1;
|
||||
select * from t2;
|
||||
create or replace table t2 (a int, b blob, unique(b)) replace as select * from t1;
|
||||
select * from t2;
|
||||
|
||||
drop table if exists t1, t2;
|
||||
|
@ -6909,16 +6909,12 @@ bool handler::prepare_for_row_logging()
|
||||
Do all initialization needed for insert
|
||||
*/
|
||||
|
||||
int handler::prepare_for_insert()
|
||||
int handler::prepare_for_insert(bool do_create)
|
||||
{
|
||||
/* Preparation for unique of blob's */
|
||||
if (table->s->long_unique_table || table->s->period.unique_keys)
|
||||
{
|
||||
/*
|
||||
When doing a scan we can't use the same handler to check
|
||||
duplicate rows. Create a new temporary one
|
||||
*/
|
||||
if (inited != NONE && create_lookup_handler())
|
||||
if (do_create && create_lookup_handler())
|
||||
return 1;
|
||||
alloc_lookup_buffer();
|
||||
}
|
||||
|
@ -4650,7 +4650,7 @@ protected:
|
||||
public:
|
||||
bool check_table_binlog_row_based();
|
||||
bool prepare_for_row_logging();
|
||||
int prepare_for_insert();
|
||||
int prepare_for_insert(bool do_create);
|
||||
int binlog_log_row(TABLE *table,
|
||||
const uchar *before_record,
|
||||
const uchar *after_record,
|
||||
|
@ -753,7 +753,8 @@ bool mysql_delete(THD *thd, TABLE_LIST *table_list, COND *conds,
|
||||
|
||||
if (table->versioned(VERS_TIMESTAMP) ||
|
||||
(table_list->has_period() && !portion_of_time_through_update))
|
||||
table->file->prepare_for_insert();
|
||||
table->file->prepare_for_insert(1);
|
||||
DBUG_ASSERT(table->file->inited != handler::NONE);
|
||||
|
||||
THD_STAGE_INFO(thd, stage_updating);
|
||||
while (likely(!(error=info.read_record())) && likely(!thd->killed) &&
|
||||
@ -1243,7 +1244,7 @@ multi_delete::initialize_tables(JOIN *join)
|
||||
tbl->prepare_for_position();
|
||||
|
||||
if (tbl->versioned(VERS_TIMESTAMP))
|
||||
tbl->file->prepare_for_insert();
|
||||
tbl->file->prepare_for_insert(1);
|
||||
}
|
||||
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
|
||||
walk == delete_tables)
|
||||
|
@ -879,8 +879,10 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list,
|
||||
if (lock_type != TL_WRITE_DELAYED)
|
||||
#endif /* EMBEDDED_LIBRARY */
|
||||
{
|
||||
bool create_lookup_handler= duplic != DUP_ERROR;
|
||||
if (duplic != DUP_ERROR || ignore)
|
||||
{
|
||||
create_lookup_handler= true;
|
||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
|
||||
{
|
||||
@ -888,7 +890,7 @@ bool mysql_insert(THD *thd, TABLE_LIST *table_list,
|
||||
goto abort;
|
||||
}
|
||||
}
|
||||
table->file->prepare_for_insert();
|
||||
table->file->prepare_for_insert(create_lookup_handler);
|
||||
/**
|
||||
This is a simple check for the case when the table has a trigger
|
||||
that reads from it, or when the statement invokes a stored function
|
||||
@ -3446,7 +3448,7 @@ bool Delayed_insert::handle_inserts(void)
|
||||
handler_writes() will not have called decide_logging_format.
|
||||
*/
|
||||
table->file->prepare_for_row_logging();
|
||||
table->file->prepare_for_insert();
|
||||
table->file->prepare_for_insert(1);
|
||||
using_bin_log= table->file->row_logging;
|
||||
|
||||
/*
|
||||
@ -3931,8 +3933,10 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
|
||||
#endif
|
||||
|
||||
thd->cuted_fields=0;
|
||||
bool create_lookup_handler= info.handle_duplicates != DUP_ERROR;
|
||||
if (info.ignore || info.handle_duplicates != DUP_ERROR)
|
||||
{
|
||||
create_lookup_handler= true;
|
||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
|
||||
{
|
||||
@ -3940,7 +3944,7 @@ select_insert::prepare(List<Item> &values, SELECT_LEX_UNIT *u)
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
}
|
||||
table->file->prepare_for_insert();
|
||||
table->file->prepare_for_insert(create_lookup_handler);
|
||||
if (info.handle_duplicates == DUP_REPLACE &&
|
||||
(!table->triggers || !table->triggers->has_delete_triggers()))
|
||||
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
|
||||
@ -4698,8 +4702,10 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u)
|
||||
|
||||
restore_record(table,s->default_values); // Get empty record
|
||||
thd->cuted_fields=0;
|
||||
bool create_lookup_handler= info.handle_duplicates != DUP_ERROR;
|
||||
if (info.ignore || info.handle_duplicates != DUP_ERROR)
|
||||
{
|
||||
create_lookup_handler= true;
|
||||
table->file->extra(HA_EXTRA_IGNORE_DUP_KEY);
|
||||
if (table->file->ha_table_flags() & HA_DUPLICATE_POS)
|
||||
{
|
||||
@ -4707,7 +4713,7 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u)
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
}
|
||||
table->file->prepare_for_insert();
|
||||
table->file->prepare_for_insert(create_lookup_handler);
|
||||
if (info.handle_duplicates == DUP_REPLACE &&
|
||||
(!table->triggers || !table->triggers->has_delete_triggers()))
|
||||
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
|
||||
|
@ -651,12 +651,14 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
|
||||
|
||||
thd->abort_on_warning= !ignore && thd->is_strict_mode();
|
||||
|
||||
bool create_lookup_handler= handle_duplicates != DUP_ERROR;
|
||||
if ((table_list->table->file->ha_table_flags() & HA_DUPLICATE_POS))
|
||||
{
|
||||
create_lookup_handler= true;
|
||||
if ((error= table_list->table->file->ha_rnd_init_with_error(0)))
|
||||
goto err;
|
||||
}
|
||||
table->file->prepare_for_insert();
|
||||
table->file->prepare_for_insert(create_lookup_handler);
|
||||
thd_progress_init(thd, 2);
|
||||
if (table_list->table->validate_default_values_of_unset_fields(thd))
|
||||
{
|
||||
|
@ -11113,7 +11113,8 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
||||
|
||||
from->file->column_bitmaps_signal();
|
||||
|
||||
to->file->prepare_for_insert();
|
||||
to->file->prepare_for_insert(0);
|
||||
DBUG_ASSERT(to->file->inited == handler::NONE);
|
||||
|
||||
/* Tell handler that we have values for all columns in the to table */
|
||||
to->use_all_columns();
|
||||
|
@ -969,7 +969,8 @@ update_begin:
|
||||
can_compare_record= records_are_comparable(table);
|
||||
explain->tracker.on_scan_init();
|
||||
|
||||
table->file->prepare_for_insert();
|
||||
table->file->prepare_for_insert(1);
|
||||
DBUG_ASSERT(table->file->inited != handler::NONE);
|
||||
|
||||
THD_STAGE_INFO(thd, stage_updating);
|
||||
while (!(error=info.read_record()) && !thd->killed)
|
||||
@ -2028,7 +2029,7 @@ int multi_update::prepare(List<Item> ¬_used_values,
|
||||
{
|
||||
table->read_set= &table->def_read_set;
|
||||
bitmap_union(table->read_set, &table->tmp_set);
|
||||
table->file->prepare_for_insert();
|
||||
table->file->prepare_for_insert(1);
|
||||
}
|
||||
}
|
||||
if (unlikely(error))
|
||||
|
@ -2096,7 +2096,7 @@ void ha_maria::start_bulk_insert(ha_rows rows, uint flags)
|
||||
DBUG_PRINT("info", ("start_bulk_insert: rows %lu", (ulong) rows));
|
||||
|
||||
/* don't enable row cache if too few rows */
|
||||
if (!rows || (rows > MARIA_MIN_ROWS_TO_USE_WRITE_CACHE))
|
||||
if ((!rows || rows > MARIA_MIN_ROWS_TO_USE_WRITE_CACHE) && !has_long_unique())
|
||||
{
|
||||
ulonglong size= thd->variables.read_buff_size, tmp;
|
||||
if (rows)
|
||||
|
@ -1727,7 +1727,7 @@ void ha_myisam::start_bulk_insert(ha_rows rows, uint flags)
|
||||
(ulong) rows, size));
|
||||
|
||||
/* don't enable row cache if too few rows */
|
||||
if (! rows || (rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE))
|
||||
if ((!rows || rows > MI_MIN_ROWS_TO_USE_WRITE_CACHE) && !has_long_unique())
|
||||
mi_extra(file, HA_EXTRA_WRITE_CACHE, (void*) &size);
|
||||
|
||||
can_enable_indexes= mi_is_all_keys_active(file->s->state.key_map,
|
||||
|
Reference in New Issue
Block a user