1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-29 05:21:33 +03:00

MDEV-18479 Assertion `join->best_read < double(1.79769313486231570815e+308L)'

or server crashes in JOIN::fix_all_splittings_in_plan after EXPLAIN

This patch resolves the problem of overflowing when performing
calculations to estimate the cost of an evaluated query execution plan.
The overflowing in a non-debug build could cause different kind of
problems uncluding crashes of the server.
This commit is contained in:
Igor Babaev
2019-05-27 19:08:00 -07:00
parent 4584c18631
commit 0955462d0a
5 changed files with 772 additions and 79 deletions

View File

@ -5676,7 +5676,7 @@ best_access_path(JOIN *join,
else
tmp= table->file->read_time(key, 1,
(ha_rows) min(tmp,s->worst_seeks));
tmp*= record_count;
tmp= COST_MULT(tmp, record_count);
}
}
else
@ -5841,18 +5841,18 @@ best_access_path(JOIN *join,
else
tmp= table->file->read_time(key, 1,
(ha_rows) min(tmp,s->worst_seeks));
tmp*= record_count;
tmp= COST_MULT(tmp, record_count);
}
else
tmp= best_time; // Do nothing
tmp= best_time; // Do nothing
}
tmp += s->startup_cost;
tmp= COST_ADD(tmp, s->startup_cost);
loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp);
} /* not ft_key */
if (tmp + 0.0001 < best_time - records/(double) TIME_FOR_COMPARE)
{
best_time= tmp + records/(double) TIME_FOR_COMPARE;
best_time= COST_ADD(tmp, records/(double) TIME_FOR_COMPARE);
best= tmp;
best_records= records;
best_key= start_key;
@ -5885,14 +5885,18 @@ best_access_path(JOIN *join,
ha_rows rnd_records= matching_candidates_in_table(s, found_constraint);
tmp= s->quick ? s->quick->read_time : s->scan_time();
tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
double cmp_time= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
tmp= COST_ADD(tmp, cmp_time);
/* We read the table as many times as join buffer becomes full. */
tmp*= (1.0 + floor((double) cache_record_length(join,idx) *
record_count /
(double) thd->variables.join_buff_size));
best_time= tmp +
(record_count*join_sel) / TIME_FOR_COMPARE * rnd_records;
double refills= (1.0 + floor((double) cache_record_length(join,idx) *
record_count /
(double) thd->variables.join_buff_size));
tmp= COST_MULT(tmp, refills);
best_time= COST_ADD(tmp,
COST_MULT((record_count*join_sel) / TIME_FOR_COMPARE,
rnd_records));
best= tmp;
records= rows2double(rnd_records);
best_key= hj_start_key;
@ -5960,9 +5964,9 @@ best_access_path(JOIN *join,
access (see first else-branch below), but we don't take it into
account here for range/index_merge access. Find out why this is so.
*/
tmp= record_count *
(s->quick->read_time +
(s->found_records - rnd_records)/(double) TIME_FOR_COMPARE);
double cmp_time= (s->found_records - rnd_records)/(double) TIME_FOR_COMPARE;
tmp= COST_MULT(record_count,
COST_ADD(s->quick->read_time, cmp_time));
loose_scan_opt.check_range_access(join, idx, s->quick);
}
@ -5977,16 +5981,15 @@ best_access_path(JOIN *join,
- read the whole table record
- skip rows which does not satisfy join condition
*/
tmp= record_count *
(tmp +
(s->records - rnd_records)/(double) TIME_FOR_COMPARE);
double cmp_time= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
tmp= COST_MULT(record_count, COST_ADD(tmp,cmp_time));
}
else
{
/* We read the table as many times as join buffer becomes full. */
tmp*= (1.0 + floor((double) cache_record_length(join,idx) *
record_count /
(double) thd->variables.join_buff_size));
double refills= (1.0 + floor((double) cache_record_length(join,idx) *
(record_count /
(double) thd->variables.join_buff_size)));
tmp= COST_MULT(tmp, refills);
/*
We don't make full cartesian product between rows in the scanned
table and existing records because we skip all rows from the
@ -5994,7 +5997,8 @@ best_access_path(JOIN *join,
we read the table (see flush_cached_records for details). Here we
take into account cost to read and skip these records.
*/
tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
double cmp_time= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
tmp= COST_ADD(tmp, cmp_time);
}
}
@ -6005,9 +6009,9 @@ best_access_path(JOIN *join,
tmp give us total cost of using TABLE SCAN
*/
if (best == DBL_MAX ||
(tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records <
COST_ADD(tmp, record_count/(double) TIME_FOR_COMPARE*rnd_records) <
(best_key->is_for_hash_join() ? best_time :
best + record_count/(double) TIME_FOR_COMPARE*records)))
COST_ADD(best, record_count/(double) TIME_FOR_COMPARE*records)))
{
/*
If the table has a range (s->quick is set) make_join_select()
@ -6526,9 +6530,10 @@ optimize_straight_join(JOIN *join, table_map join_tables)
join->positions + idx, &loose_scan_pos);
/* compute the cost of the new plan extended with 's' */
record_count*= join->positions[idx].records_read;
read_time+= join->positions[idx].read_time +
record_count / (double) TIME_FOR_COMPARE;
record_count= COST_MULT(record_count, join->positions[idx].records_read);
read_time= COST_ADD(read_time,
COST_ADD(join->positions[idx].read_time,
record_count / (double) TIME_FOR_COMPARE));
advance_sj_state(join, join_tables, idx, &record_count, &read_time,
&loose_scan_pos);
@ -6710,9 +6715,10 @@ greedy_search(JOIN *join,
swap_variables(JOIN_TAB*, join->best_ref[idx], join->best_ref[best_idx]);
/* compute the cost of the new plan extended with 'best_table' */
record_count*= join->positions[idx].records_read;
read_time+= join->positions[idx].read_time +
record_count / (double) TIME_FOR_COMPARE;
record_count= COST_MULT(record_count, join->positions[idx].records_read);
read_time= COST_ADD(read_time,
COST_ADD(join->positions[idx].read_time,
record_count / (double) TIME_FOR_COMPARE));
remaining_tables&= ~(best_table->table->map);
--size_remain;
@ -6819,11 +6825,13 @@ void JOIN::get_partial_cost_and_fanout(int end_tab_idx,
}
if (tab->records_read && (cur_table_map & filter_map))
{
record_count *= tab->records_read;
read_time += tab->read_time + record_count / (double) TIME_FOR_COMPARE;
record_count= COST_MULT(record_count, tab->records_read);
read_time= COST_ADD(read_time,
COST_ADD(tab->read_time,
record_count / (double) TIME_FOR_COMPARE));
if (tab->emb_sj_nest)
sj_inner_fanout *= tab->records_read;
}
sj_inner_fanout= COST_MULT(sj_inner_fanout, tab->records_read);
}
if (i == last_sj_table)
{
@ -6861,8 +6869,8 @@ void JOIN::get_prefix_cost_and_fanout(uint n_tables,
{
if (best_positions[i].records_read)
{
record_count *= best_positions[i].records_read;
read_time += best_positions[i].read_time;
record_count= COST_MULT(record_count, best_positions[i].records_read);
read_time= COST_ADD(read_time, best_positions[i].read_time);
}
}
*read_time_arg= read_time;// + record_count / TIME_FOR_COMPARE;
@ -7068,10 +7076,11 @@ best_extension_by_limited_search(JOIN *join,
record_count, join->positions + idx, &loose_scan_pos);
/* Compute the cost of extending the plan with 's' */
current_record_count= record_count * position->records_read;
current_read_time=read_time + position->read_time +
current_record_count / (double) TIME_FOR_COMPARE;
current_record_count= COST_MULT(record_count, position->records_read);
current_read_time=COST_ADD(read_time,
COST_ADD(position->read_time,
current_record_count /
(double) TIME_FOR_COMPARE));
advance_sj_state(join, remaining_tables, idx, &current_record_count,
&current_read_time, &loose_scan_pos);
@ -7145,7 +7154,7 @@ best_extension_by_limited_search(JOIN *join,
join->sort_by_table !=
join->positions[join->const_tables].table->table)
/* We have to make a temp table */
current_read_time+= current_record_count;
current_read_time= COST_ADD(current_read_time, current_record_count);
if (current_read_time < join->best_read)
{
memcpy((uchar*) join->best_positions, (uchar*) join->positions,
@ -7189,11 +7198,11 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
DBUG_PRINT("best",("read_time: %g record_count: %g",read_time,
record_count));
read_time+=record_count/(double) TIME_FOR_COMPARE;
read_time= COST_ADD(read_time, record_count/(double) TIME_FOR_COMPARE);
if (join->sort_by_table &&
join->sort_by_table !=
join->positions[join->const_tables].table->table)
read_time+=record_count; // We have to make a temp table
read_time= COST_ADD(read_time, record_count); // We have to make a temp table
if (read_time < join->best_read)
{
memcpy((uchar*) join->best_positions,(uchar*) join->positions,
@ -7202,7 +7211,8 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
}
DBUG_RETURN(FALSE);
}
if (read_time+record_count/(double) TIME_FOR_COMPARE >= join->best_read)
if (COST_ADD(read_time, record_count/(double) TIME_FOR_COMPARE)
>= join->best_read)
DBUG_RETURN(FALSE); /* Found better before */
JOIN_TAB *s;
@ -7224,8 +7234,8 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
Go to the next level only if there hasn't been a better key on
this level! This will cut down the search for a lot simple cases!
*/
double current_record_count=record_count*records;
double current_read_time=read_time+best;
double current_record_count= COST_MULT(record_count, records);
double current_read_time= COST_ADD(read_time, best);
advance_sj_state(join, rest_tables, idx, &current_record_count,
&current_read_time, &loose_scan_pos);
@ -7552,8 +7562,8 @@ prev_record_reads(POSITION *positions, uint idx, table_map found_ref)
#max_nested_outer_joins=64-1) will not make it any more precise.
*/
if (pos->records_read)
found*= pos->records_read;
}
found= COST_MULT(found, pos->records_read);
}
}
return found;
}
@ -13835,11 +13845,12 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
pos= loose_scan_pos;
reopt_remaining_tables &= ~rs->table->map;
rec_count *= pos.records_read;
cost += pos.read_time;
rec_count= COST_MULT(rec_count, pos.records_read);
cost= COST_ADD(cost, pos.read_time);
if (!rs->emb_sj_nest)
*outer_rec_count *= pos.records_read;
*outer_rec_count= COST_MULT(*outer_rec_count, pos.records_read);
}
join->cur_sj_inner_tables= save_cur_sj_inner_tables;