1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-30 16:24:05 +03:00

MDEV-35848, MDEV-35568 Reintroduce delete_while_scanning for multi_delete

Reintroduces delete_while_scanning optimization for multi_delete.
Reverse some test changes from the initial feature devlopment now
that we delete-on-the-fly once again.
This commit is contained in:
Dave Gosselin
2025-01-29 15:21:03 -05:00
parent 8ec275da16
commit 5e07d1abd4
7 changed files with 337 additions and 70 deletions

View File

@ -237,3 +237,157 @@ update t2, t1 set t2.field=t1.field where t1.id1=t2.id2 and 0=1;
delete t1, t2 from t2 inner join t1 on t1.id1=t2.id2 where 0=1; delete t1, t2 from t2 inner join t1 on t1.id1=t2.id2 where 0=1;
drop table t1, t2; drop table t1, t2;
set session sql_buffer_result=default; set session sql_buffer_result=default;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
insert into t1 (id, v) values (2,3),(1,4);
insert into t2 (id, v) values (5,5),(6,6);
select * from t1;
id v
2 3
1 4
select * from t2;
id v
5 5
6 6
select t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 1;
id v id v
1 4 5 5
delete t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 1;
select * from t1;
id v
2 3
select * from t2;
id v
6 6
drop table t1, t2;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
insert into t1 (id, v) values (2,3),(1,4);
insert into t2 (id, v) values (5,5),(6,6);
select * from t1;
id v
2 3
1 4
select * from t2;
id v
5 5
6 6
select t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 2;
id v id v
1 4 5 5
1 4 6 6
delete t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 2;
select * from t1;
id v
2 3
select * from t2;
id v
drop table t1, t2;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
insert into t1 (id, v) values (2,3),(1,4);
insert into t2 (id, v) values (5,5),(6,6);
select * from t1;
id v
2 3
1 4
select * from t2;
id v
5 5
6 6
select t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 3;
id v id v
1 4 5 5
1 4 6 6
2 3 5 5
delete t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 3;
select * from t1;
id v
select * from t2;
id v
drop table t1, t2;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
create table t3 (id int primary key, v int);
insert into t1 (id, v) values (1, 1000), (2, 2000), (3, 3000), (4, 4000), (5, 5000);
insert into t2 (id, v) values (10, 100), (20, 200), (30, 300), (40, 400), (50, 500);
insert into t3 (id, v) values (11, 111), (22, 222), (33, 333), (44, 444), (55, 555);
select * from t1;
id v
1 1000
2 2000
3 3000
4 4000
5 5000
select * from t2;
id v
10 100
20 200
30 300
40 400
50 500
select * from t3;
id v
11 111
22 222
33 333
44 444
55 555
select t1.*, t2.*, t3.* from t1, t2, t3 order by t1.id, t2.id, t3.id limit 3;
id v id v id v
1 1000 10 100 11 111
1 1000 10 100 22 222
1 1000 10 100 33 333
delete t1.*, t2.*, t3.* from t1, t2, t3 order by t1.id, t2.id, t3.id limit 3;
select * from t1;
id v
2 2000
3 3000
4 4000
5 5000
select * from t2;
id v
20 200
30 300
40 400
50 500
select * from t3;
id v
44 444
55 555
drop table t1, t2, t3;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
create table t3 (id int primary key, v int);
insert into t1 (id, v) values (1, 1000), (2, 2000), (3, 3000), (4, 4000), (5, 5000);
insert into t2 (id, v) values (10, 100), (20, 200), (30, 300), (40, 400), (50, 500);
insert into t3 (id, v) values (11, 111), (22, 222), (33, 333), (44, 444), (55, 555);
select * from t1;
id v
1 1000
2 2000
3 3000
4 4000
5 5000
select * from t2;
id v
10 100
20 200
30 300
40 400
50 500
select * from t3;
id v
11 111
22 222
33 333
44 444
55 555
delete t1.*, t2.*, t3.* from t1, t2, t3;
select * from t1;
id v
select * from t2;
id v
select * from t3;
id v
drop table t1, t2, t3;

View File

@ -1,6 +1,9 @@
# #
# MDEV-30469 Support ORDER BY and LIMIT for multi-table DELETE, index hints for single-table DELETE. # MDEV-30469 Support ORDER BY and LIMIT for multi-table DELETE, index hints for single-table DELETE.
# #
--source include/have_innodb.inc
create table t1 (id int primary key, v int); create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int); create table t2 (id int primary key, v int);
insert into t1 (id, v) values (4,1),(3,2),(2,3),(1,4); insert into t1 (id, v) values (4,1),(3,2),(2,3),(1,4);
@ -129,3 +132,74 @@ delete t1, t2 from t2 inner join t1 on t1.id1=t2.id2 where 0=1;
drop table t1, t2; drop table t1, t2;
set session sql_buffer_result=default; set session sql_buffer_result=default;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
insert into t1 (id, v) values (2,3),(1,4);
insert into t2 (id, v) values (5,5),(6,6);
select * from t1;
select * from t2;
select t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 1;
delete t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 1;
select * from t1;
select * from t2;
drop table t1, t2;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
insert into t1 (id, v) values (2,3),(1,4);
insert into t2 (id, v) values (5,5),(6,6);
select * from t1;
select * from t2;
select t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 2;
delete t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 2;
select * from t1;
select * from t2;
drop table t1, t2;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
insert into t1 (id, v) values (2,3),(1,4);
insert into t2 (id, v) values (5,5),(6,6);
select * from t1;
select * from t2;
select t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 3;
delete t1.*, t2.* from t1, t2 order by t1.id, t2.id limit 3;
select * from t1;
select * from t2;
drop table t1, t2;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
create table t3 (id int primary key, v int);
insert into t1 (id, v) values (1, 1000), (2, 2000), (3, 3000), (4, 4000), (5, 5000);
insert into t2 (id, v) values (10, 100), (20, 200), (30, 300), (40, 400), (50, 500);
insert into t3 (id, v) values (11, 111), (22, 222), (33, 333), (44, 444), (55, 555);
select * from t1;
select * from t2;
select * from t3;
select t1.*, t2.*, t3.* from t1, t2, t3 order by t1.id, t2.id, t3.id limit 3;
delete t1.*, t2.*, t3.* from t1, t2, t3 order by t1.id, t2.id, t3.id limit 3;
select * from t1;
select * from t2;
select * from t3;
drop table t1, t2, t3;
create table t1 (id int primary key, v int);
create table t2 (id int primary key, v int);
create table t3 (id int primary key, v int);
insert into t1 (id, v) values (1, 1000), (2, 2000), (3, 3000), (4, 4000), (5, 5000);
insert into t2 (id, v) values (10, 100), (20, 200), (30, 300), (40, 400), (50, 500);
insert into t3 (id, v) values (11, 111), (22, 222), (33, 333), (44, 444), (55, 555);
select * from t1;
select * from t2;
select * from t3;
delete t1.*, t2.*, t3.* from t1, t2, t3;
select * from t1;
select * from t2;
select * from t3;
drop table t1, t2, t3;

View File

@ -3394,12 +3394,12 @@ o_custkey in (select c_custkey from customer
where c_nationkey in (1,2)); where c_nationkey in (1,2));
select o_orderkey, o_totalprice from t; select o_orderkey, o_totalprice from t;
o_orderkey o_totalprice o_orderkey o_totalprice
1856 189361.42
324 26868.85 324 26868.85
1856 189361.42
1221 117397.16 1221 117397.16
3139 40975.96 3139 40975.96
1925 146382.71
1344 43809.37 1344 43809.37
1925 146382.71
4903 34363.63 4903 34363.63
5607 24660.06 5607 24660.06
delete from orders where o_orderDATE between '1992-01-01' and '1992-06-30' and delete from orders where o_orderDATE between '1992-01-01' and '1992-06-30' and

View File

@ -120,8 +120,7 @@ Handler_read_rnd_next 4
Variable_name Value Variable_name Value
Handler_delete 1 Handler_delete 1
Handler_read_key 2 Handler_read_key 2
Handler_read_rnd 1 Handler_read_rnd_next 4
Handler_read_rnd_next 6
DROP TABLE t1; DROP TABLE t1;
#4 #4
@ -928,9 +927,9 @@ Variable_name Value
Handler_delete 8 Handler_delete 8
Handler_read_key 19 Handler_read_key 19
Handler_read_next 3 Handler_read_next 3
Handler_read_rnd 8 Handler_read_rnd 5
Handler_read_rnd_deleted 1 Handler_read_rnd_deleted 1
Handler_read_rnd_next 15 Handler_read_rnd_next 11
DROP TABLE t1, t2, t3; DROP TABLE t1, t2, t3;
#20 #20
@ -1066,8 +1065,7 @@ Handler_read_rnd_next 12
Variable_name Value Variable_name Value
Handler_delete 3 Handler_delete 3
Handler_read_key 4 Handler_read_key 4
Handler_read_rnd 3 Handler_read_rnd_next 30
Handler_read_rnd_next 34
DROP TABLE t1, t2; DROP TABLE t1, t2;
#22 #22

View File

@ -69,7 +69,6 @@ wait/io/table/sql/handler TABLE test1 t2 update 1
wait/io/table/sql/handler TABLE test marker insert 1 wait/io/table/sql/handler TABLE test marker insert 1
wait/io/table/sql/handler TABLE test t1 fetch 1 wait/io/table/sql/handler TABLE test t1 fetch 1
wait/io/table/sql/handler TABLE test1 t2 fetch 1 wait/io/table/sql/handler TABLE test1 t2 fetch 1
wait/io/table/sql/handler TABLE test t1 fetch 1
wait/io/table/sql/handler TABLE test t1 delete 1 wait/io/table/sql/handler TABLE test t1 delete 1
wait/io/table/sql/handler TABLE test1 t2 fetch 1 wait/io/table/sql/handler TABLE test1 t2 fetch 1
wait/io/table/sql/handler TABLE test1 t2 delete 1 wait/io/table/sql/handler TABLE test1 t2 delete 1

View File

@ -7564,7 +7564,6 @@ class multi_delete :public select_result_interceptor
TABLE_LIST *delete_tables, *table_being_deleted; TABLE_LIST *delete_tables, *table_being_deleted;
TMP_TABLE_PARAM *tmp_table_param; TMP_TABLE_PARAM *tmp_table_param;
TABLE **tmp_tables, *main_table; TABLE **tmp_tables, *main_table;
Unique **tempfiles;
ha_rows deleted, found; ha_rows deleted, found;
uint table_count; uint table_count;
int error; int error;
@ -7573,6 +7572,7 @@ class multi_delete :public select_result_interceptor
bool transactional_tables; bool transactional_tables;
/* True if at least one table we delete from is not transactional */ /* True if at least one table we delete from is not transactional */
bool normal_tables; bool normal_tables;
bool delete_while_scanning;
/* /*
error handling (rollback and binlogging) can happen in send_eof() error handling (rollback and binlogging) can happen in send_eof()
so that afterward abort_result_set() needs to find out that. so that afterward abort_result_set() needs to find out that.

View File

@ -1112,7 +1112,6 @@ multi_delete::multi_delete(THD *thd_arg,
normal_tables(0), normal_tables(0),
error_handled(0) error_handled(0)
{ {
tempfiles= thd_arg->calloc<Unique*>(table_count);
tmp_tables = thd->calloc<TABLE*>(table_count); tmp_tables = thd->calloc<TABLE*>(table_count);
tmp_table_param = thd->calloc<TMP_TABLE_PARAM>(table_count); tmp_table_param = thd->calloc<TMP_TABLE_PARAM>(table_count);
} }
@ -1154,7 +1153,7 @@ int multi_delete::prepare2(JOIN *join)
{ {
if (!join->need_tmp || !join->tmp_table_keep_current_rowid) if (!join->need_tmp || !join->tmp_table_keep_current_rowid)
return 0; return 0;
delete_while_scanning= false;
JOIN_TAB *tmptab= join->join_tab + join->exec_join_tab_cnt(); JOIN_TAB *tmptab= join->join_tab + join->exec_join_tab_cnt();
for (Item **it= tmptab->tmp_table_param->items_to_copy; *it ; it++) for (Item **it= tmptab->tmp_table_param->items_to_copy; *it ; it++)
@ -1195,7 +1194,6 @@ bool
multi_delete::initialize_tables(JOIN *join) multi_delete::initialize_tables(JOIN *join)
{ {
TABLE_LIST *walk; TABLE_LIST *walk;
Unique **tempfiles_ptr;
DBUG_ENTER("initialize_tables"); DBUG_ENTER("initialize_tables");
if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) && if (unlikely((thd->variables.option_bits & OPTION_SAFE_UPDATES) &&
@ -1203,7 +1201,8 @@ multi_delete::initialize_tables(JOIN *join)
DBUG_RETURN(1); DBUG_RETURN(1);
main_table=join->join_tab->table; main_table=join->join_tab->table;
table_map tables_to_delete_from=0; table_map tables_to_delete_from= 0;
delete_while_scanning= true;
for (walk= delete_tables; walk; walk= walk->next_local) for (walk= delete_tables; walk; walk= walk->next_local)
{ {
TABLE_LIST *tbl= walk->correspondent_table->find_table_for_update(); TABLE_LIST *tbl= walk->correspondent_table->find_table_for_update();
@ -1214,13 +1213,24 @@ multi_delete::initialize_tables(JOIN *join)
delete is called. delete is called.
*/ */
join->map2table[tbl->table->tablenr]->keep_current_rowid= true; join->map2table[tbl->table->tablenr]->keep_current_rowid= true;
if (delete_while_scanning &&
unique_table(thd, tbl, join->tables_list, 0))
{
/*
If the table we are going to delete from appears
in join, we need to defer delete. So the delete
doesn't interfers with the scaning of results.
*/
delete_while_scanning= false;
}
} }
walk= delete_tables; walk= delete_tables;
uint index= 0; uint index= 0;
for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS, for (JOIN_TAB *tab= first_linear_tab(join, WITHOUT_BUSH_ROOTS,
WITH_CONST_TABLES); WITH_CONST_TABLES);
tab; tab;
tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS)) tab= next_linear_tab(join, tab, WITHOUT_BUSH_ROOTS))
{ {
if (!tab->bush_children && tab->table->map & tables_to_delete_from) if (!tab->bush_children && tab->table->map & tables_to_delete_from)
@ -1273,16 +1283,19 @@ multi_delete::initialize_tables(JOIN *join)
tmp_tables[index]->file->extra(HA_EXTRA_WRITE_CACHE); tmp_tables[index]->file->extra(HA_EXTRA_WRITE_CACHE);
++index; ++index;
} }
else if ((tab->type != JT_SYSTEM && tab->type != JT_CONST) &&
walk == delete_tables)
{
/*
We are not deleting from the table we are scanning. In this
case send_data() shouldn't delete any rows a we may touch
the rows in the deleted table many times
*/
delete_while_scanning= false;
}
} }
walk= delete_tables; if (delete_while_scanning)
tempfiles_ptr= tempfiles; table_being_deleted= delete_tables;
for (;walk ;walk= walk->next_local)
{
TABLE *table=walk->table;
*tempfiles_ptr++= new (thd->mem_root) Unique (refpos_order_cmp, table->file,
table->file->ref_length,
MEM_STRIP_BUF_SIZE);
}
if (init_ftfuncs(thd, thd->lex->current_select, 1)) if (init_ftfuncs(thd, thd->lex->current_select, 1))
DBUG_RETURN(true); DBUG_RETURN(true);
@ -1293,23 +1306,15 @@ multi_delete::initialize_tables(JOIN *join)
multi_delete::~multi_delete() multi_delete::~multi_delete()
{ {
for (table_being_deleted= delete_tables; for (TABLE_LIST *walk= delete_tables; walk; walk= walk->next_local)
table_being_deleted;
table_being_deleted= table_being_deleted->next_local)
{ {
TABLE *table= table_being_deleted->table; TABLE *table= walk->table;
if (!table) if (!table)
continue; continue;
table->no_keyread=0; table->no_keyread=0;
table->no_cache= 0; table->no_cache= 0;
} }
for (uint counter= 0; counter < table_count; counter++)
{
if (tempfiles[counter])
delete tempfiles[counter];
}
if (tmp_tables) if (tmp_tables)
{ {
for (uint cnt = 0; cnt < table_count; cnt++) for (uint cnt = 0; cnt < table_count; cnt++)
@ -1326,12 +1331,15 @@ multi_delete::~multi_delete()
int multi_delete::send_data(List<Item> &values) int multi_delete::send_data(List<Item> &values)
{ {
int secure_counter= delete_while_scanning ? -1 : 0;
TABLE_LIST *del_table; TABLE_LIST *del_table;
DBUG_ENTER("multi_delete::send_data"); DBUG_ENTER("multi_delete::send_data");
bool ignore= thd->lex->ignore;
for (del_table= delete_tables; for (del_table= delete_tables;
del_table; del_table;
del_table= del_table->next_local) del_table= del_table->next_local, ++secure_counter)
{ {
TABLE *table= del_table->table; TABLE *table= del_table->table;
// DELETE and TRUNCATE don't affect SEQUENCE, so bail early // DELETE and TRUNCATE don't affect SEQUENCE, so bail early
@ -1342,31 +1350,68 @@ int multi_delete::send_data(List<Item> &values)
if (table->status & (STATUS_NULL_ROW | STATUS_DELETED)) if (table->status & (STATUS_NULL_ROW | STATUS_DELETED))
continue; continue;
found++; table->file->position(table->record[0]);
const uint offset= del_table->shared; ++found;
TABLE *tmp_table= tmp_tables[offset];
if (copy_funcs(tmp_table_param[offset].items_to_copy, thd)) if (secure_counter < 0)
DBUG_RETURN(1);
/* rowid field is NULL if join tmp table has null row from outer join */
if (tmp_table->field[0]->is_null())
continue;
error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
if (error)
{ {
--found; /* We are scanning the current table */
if (error != HA_ERR_FOUND_DUPP_KEY && DBUG_ASSERT(del_table == table_being_deleted);
error != HA_ERR_FOUND_DUPP_UNIQUE) if (table->triggers &&
{ table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
if (create_internal_tmp_table_from_heap(thd, tmp_table, TRG_ACTION_BEFORE, false))
tmp_table_param[offset].start_recinfo, DBUG_RETURN(1);
&tmp_table_param[offset].recinfo,
error, 1, NULL)) table->status|= STATUS_DELETED;
{
do_delete= 0; error= table->delete_row();
DBUG_RETURN(1); // Not a table_is_full error if (likely(!error))
} {
found++; deleted++;
} if (!table->file->has_transactions())
thd->transaction->stmt.modified_non_trans_table= TRUE;
if (table->triggers &&
table->triggers->process_triggers(thd, TRG_EVENT_DELETE,
TRG_ACTION_AFTER, false))
DBUG_RETURN(1);
}
else if (!ignore)
{
/*
If the IGNORE option is used errors caused by ha_delete_row don't
have to stop the iteration.
*/
table->file->print_error(error,MYF(0));
DBUG_RETURN(1);
}
}
else
{
const uint offset= del_table->shared;
TABLE *tmp_table= tmp_tables[offset];
if (copy_funcs(tmp_table_param[offset].items_to_copy, thd))
DBUG_RETURN(1);
/* rowid field is NULL if join tmp table has null row from outer join */
if (tmp_table->field[0]->is_null())
continue;
error= tmp_table->file->ha_write_tmp_row(tmp_table->record[0]);
if (error)
{
--found;
if (error != HA_ERR_FOUND_DUPP_KEY &&
error != HA_ERR_FOUND_DUPP_UNIQUE)
{
if (create_internal_tmp_table_from_heap(thd, tmp_table,
tmp_table_param[offset].start_recinfo,
&tmp_table_param[offset].recinfo,
error, 1, NULL))
{
do_delete= 0;
DBUG_RETURN(1); // Not a table_is_full error
}
found++;
}
}
} }
} }
DBUG_RETURN(0); DBUG_RETURN(0);
@ -1452,21 +1497,18 @@ int multi_delete::do_deletes()
if (!found) if (!found)
DBUG_RETURN(0); DBUG_RETURN(0);
table_being_deleted= delete_tables; table_being_deleted= (delete_while_scanning ? delete_tables->next_local :
delete_tables);
for (uint counter= 0; table_being_deleted;
table_being_deleted= table_being_deleted->next_local, counter++) for (; table_being_deleted;
{ table_being_deleted= table_being_deleted->next_local)
{
TABLE *table = table_being_deleted->table; TABLE *table = table_being_deleted->table;
// DELETE and TRUNCATE don't affect SEQUENCE, so bail early // DELETE and TRUNCATE don't affect SEQUENCE, so bail early
if (table->file->ht->db_type == DB_TYPE_SEQUENCE) if (table->file->ht->db_type == DB_TYPE_SEQUENCE)
continue; continue;
int local_error; int local_error= rowid_table_deletes(table, thd->lex->ignore);
if (tempfiles[counter] && unlikely(tempfiles[counter]->get(table)))
DBUG_RETURN(1);
local_error= rowid_table_deletes(table, thd->lex->ignore);
if (unlikely(thd->killed) && likely(!local_error)) if (unlikely(thd->killed) && likely(!local_error))
DBUG_RETURN(1); DBUG_RETURN(1);
@ -1556,7 +1598,7 @@ int multi_delete::rowid_table_deletes(TABLE *table, bool ignore)
table->file->print_error(local_error, MYF(0)); table->file->print_error(local_error, MYF(0));
break; break;
} }
/* /*
Increase the reported number of deleted rows only if no error occurred Increase the reported number of deleted rows only if no error occurred
during ha_delete_row. during ha_delete_row.