mirror of
https://github.com/MariaDB/server.git
synced 2025-07-30 16:24:05 +03:00
MDEV-32015 insert into an empty table fails with hash unique
don't enable bulk insert when table->s->long_unique_table
This commit is contained in:
@ -478,5 +478,16 @@ Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
drop table t1;
|
||||
#
|
||||
# MDEV-32015 insert into an empty table fails with hash unique
|
||||
#
|
||||
create table t1 (f1 varchar(25), unique (f1) using hash);
|
||||
insert ignore t1 (f1) values ('new york'),('virginia'),('spouse'),(null),('zqekmqpwutxnzddrbjycyo'),('nebraska'),('illinois'),('qe'),('ekmqpwut'),('arizona'),('arizona');
|
||||
Warnings:
|
||||
Warning 1062 Duplicate entry 'arizona' for key 'f1'
|
||||
check table t1 extended;
|
||||
Table Op Msg_type Msg_text
|
||||
test.t1 check status OK
|
||||
drop table t1;
|
||||
#
|
||||
# End of 10.4 tests
|
||||
#
|
||||
|
@ -479,6 +479,14 @@ update t1 set f1=9599 where f1=100;
|
||||
check table t1 extended;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # MDEV-32015 insert into an empty table fails with hash unique
|
||||
--echo #
|
||||
create table t1 (f1 varchar(25), unique (f1) using hash);
|
||||
insert ignore t1 (f1) values ('new york'),('virginia'),('spouse'),(null),('zqekmqpwutxnzddrbjycyo'),('nebraska'),('illinois'),('qe'),('ekmqpwut'),('arizona'),('arizona');
|
||||
check table t1 extended;
|
||||
drop table t1;
|
||||
|
||||
--echo #
|
||||
--echo # End of 10.4 tests
|
||||
--echo #
|
||||
|
@ -13529,7 +13529,8 @@ Rows_log_event::write_row(rpl_group_info *rgi,
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
if (m_curr_row == m_rows_buf && !invoke_triggers)
|
||||
if (m_curr_row == m_rows_buf && !invoke_triggers &&
|
||||
!table->s->long_unique_table)
|
||||
{
|
||||
/*
|
||||
This table has no triggers so we can do bulk insert.
|
||||
|
@ -901,7 +901,7 @@ bool mysql_insert(THD *thd,TABLE_LIST *table_list,
|
||||
same table in the same connection.
|
||||
*/
|
||||
if (thd->locked_tables_mode <= LTM_LOCK_TABLES &&
|
||||
values_list.elements > 1)
|
||||
!table->s->long_unique_table && values_list.elements > 1)
|
||||
{
|
||||
using_bulk_insert= 1;
|
||||
table->file->ha_start_bulk_insert(values_list.elements);
|
||||
@ -3930,7 +3930,7 @@ int select_insert::prepare2(JOIN *)
|
||||
DBUG_ENTER("select_insert::prepare2");
|
||||
if (thd->lex->current_select->options & OPTION_BUFFER_RESULT &&
|
||||
thd->locked_tables_mode <= LTM_LOCK_TABLES &&
|
||||
!thd->lex->describe)
|
||||
!table->s->long_unique_table && !thd->lex->describe)
|
||||
table->file->ha_start_bulk_insert((ha_rows) 0);
|
||||
if (table->validate_default_values_of_unset_fields(thd))
|
||||
DBUG_RETURN(1);
|
||||
@ -4664,7 +4664,8 @@ select_create::prepare(List<Item> &_values, SELECT_LEX_UNIT *u)
|
||||
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
|
||||
if (info.handle_duplicates == DUP_UPDATE)
|
||||
table->file->extra(HA_EXTRA_INSERT_WITH_UPDATE);
|
||||
if (thd->locked_tables_mode <= LTM_LOCK_TABLES)
|
||||
if (thd->locked_tables_mode <= LTM_LOCK_TABLES &&
|
||||
!table->s->long_unique_table)
|
||||
table->file->ha_start_bulk_insert((ha_rows) 0);
|
||||
thd->abort_on_warning= !info.ignore && thd->is_strict_mode();
|
||||
if (check_that_all_fields_are_given_values(thd, table, table_list))
|
||||
|
@ -641,7 +641,8 @@ int mysql_load(THD *thd, const sql_exchange *ex, TABLE_LIST *table_list,
|
||||
(!table->triggers ||
|
||||
!table->triggers->has_delete_triggers()))
|
||||
table->file->extra(HA_EXTRA_WRITE_CAN_REPLACE);
|
||||
if (thd->locked_tables_mode <= LTM_LOCK_TABLES)
|
||||
if (thd->locked_tables_mode <= LTM_LOCK_TABLES &&
|
||||
!table->s->long_unique_table)
|
||||
table->file->ha_start_bulk_insert((ha_rows) 0);
|
||||
table->copy_blobs=1;
|
||||
|
||||
|
@ -72,11 +72,9 @@ static bool make_unique_constraint_name(THD *, LEX_CSTRING *, const char *,
|
||||
List<Virtual_column_info> *, uint *);
|
||||
static const char *make_unique_invisible_field_name(THD *, const char *,
|
||||
List<Create_field> *);
|
||||
static int copy_data_between_tables(THD *, TABLE *,TABLE *,
|
||||
List<Create_field> &, bool, uint, ORDER *,
|
||||
ha_rows *, ha_rows *,
|
||||
Alter_info::enum_enable_or_disable,
|
||||
Alter_table_ctx *);
|
||||
static int copy_data_between_tables(THD *, TABLE *,TABLE *, bool, uint,
|
||||
ORDER *, ha_rows *, ha_rows *,
|
||||
Alter_info *, Alter_table_ctx *);
|
||||
static int mysql_prepare_create_table(THD *, HA_CREATE_INFO *, Alter_info *,
|
||||
uint *, handler *, KEY **, uint *, int,
|
||||
const LEX_CSTRING db,
|
||||
@ -10548,10 +10546,8 @@ do_continue:;
|
||||
my_error(ER_LOCK_WAIT_TIMEOUT, MYF(0));
|
||||
goto err_new_table_cleanup;
|
||||
});
|
||||
if (copy_data_between_tables(thd, table, new_table,
|
||||
alter_info->create_list, ignore,
|
||||
order_num, order, &copied, &deleted,
|
||||
alter_info->keys_onoff,
|
||||
if (copy_data_between_tables(thd, table, new_table, ignore, order_num,
|
||||
order, &copied, &deleted, alter_info,
|
||||
&alter_ctx))
|
||||
{
|
||||
goto err_new_table_cleanup;
|
||||
@ -10877,11 +10873,9 @@ bool mysql_trans_commit_alter_copy_data(THD *thd)
|
||||
|
||||
|
||||
static int
|
||||
copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
||||
List<Create_field> &create, bool ignore,
|
||||
uint order_num, ORDER *order,
|
||||
ha_rows *copied, ha_rows *deleted,
|
||||
Alter_info::enum_enable_or_disable keys_onoff,
|
||||
copy_data_between_tables(THD *thd, TABLE *from, TABLE *to, bool ignore,
|
||||
uint order_num, ORDER *order, ha_rows *copied,
|
||||
ha_rows *deleted, Alter_info *alter_info,
|
||||
Alter_table_ctx *alter_ctx)
|
||||
{
|
||||
int error= 1;
|
||||
@ -10930,7 +10924,8 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
||||
|
||||
backup_set_alter_copy_lock(thd, from);
|
||||
|
||||
alter_table_manage_keys(to, from->file->indexes_are_disabled(), keys_onoff);
|
||||
alter_table_manage_keys(to, from->file->indexes_are_disabled(),
|
||||
alter_info->keys_onoff);
|
||||
|
||||
from->default_column_bitmaps();
|
||||
|
||||
@ -10939,10 +10934,14 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
||||
|
||||
from->file->info(HA_STATUS_VARIABLE);
|
||||
to->file->extra(HA_EXTRA_PREPARE_FOR_ALTER_TABLE);
|
||||
to->file->ha_start_bulk_insert(from->file->stats.records,
|
||||
ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT);
|
||||
bulk_insert_started= 1;
|
||||
List_iterator<Create_field> it(create);
|
||||
if (!to->s->long_unique_table || !(alter_info->flags &
|
||||
(ALTER_ADD_INDEX|ALTER_CHANGE_COLUMN|ALTER_PARSER_DROP_COLUMN)))
|
||||
{
|
||||
to->file->ha_start_bulk_insert(from->file->stats.records,
|
||||
ignore ? 0 : HA_CREATE_UNIQUE_INDEX_BY_SORT);
|
||||
bulk_insert_started= 1;
|
||||
}
|
||||
List_iterator<Create_field> it(alter_info->create_list);
|
||||
Create_field *def;
|
||||
copy_end=copy;
|
||||
to->s->default_fields= 0;
|
||||
@ -11197,7 +11196,7 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
||||
/* We are going to drop the temporary table */
|
||||
to->file->extra(HA_EXTRA_PREPARE_FOR_DROP);
|
||||
}
|
||||
if (unlikely(to->file->ha_end_bulk_insert()) && error <= 0)
|
||||
if (bulk_insert_started && to->file->ha_end_bulk_insert() && error <= 0)
|
||||
{
|
||||
/* Give error, if not already given */
|
||||
if (!thd->is_error())
|
||||
@ -11238,7 +11237,6 @@ copy_data_between_tables(THD *thd, TABLE *from, TABLE *to,
|
||||
{
|
||||
/* This happens if we get an error during initialization of data */
|
||||
DBUG_ASSERT(error);
|
||||
to->file->ha_end_bulk_insert();
|
||||
ha_enable_transaction(thd, TRUE);
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user