1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-29 05:21:33 +03:00

MDEV-20371: Invalid reads at plan refinement stage: join->positions...

best_access_path() is called from two optimization phases:

1. Plan choice phase, in choose_plan(). Here, the join prefix being
   considered is in join->positions[]

2. Plan refinement stage, in fix_semijoin_strategies_for_picked_join_order
   Here, the join prefix is in join->best_positions[]

It used to access join->positions[] from stage #2. This didnt cause any
valgrind or asan failures (as join->positions[] has been written-to before)
but the effect was similar to that of reading the random data:
The join prefix we've picked (in join->best_positions) could have
nothing in common with the join prefix that was last to be considered
(in join->positions).
This commit is contained in:
Sergei Petrunia
2019-09-10 23:51:42 +03:00
parent 863a951731
commit c8dc866fde
4 changed files with 52 additions and 28 deletions

View File

@ -97,10 +97,6 @@ static int sort_keyuse(KEYUSE *a,KEYUSE *b);
static bool are_tables_local(JOIN_TAB *jtab, table_map used_tables);
static bool create_ref_for_key(JOIN *join, JOIN_TAB *j, KEYUSE *org_keyuse,
bool allow_full_scan, table_map used_tables);
void best_access_path(JOIN *join, JOIN_TAB *s,
table_map remaining_tables, uint idx,
bool disable_jbuf, double record_count,
POSITION *pos, POSITION *loose_scan_pos);
static void optimize_straight_join(JOIN *join, table_map join_tables);
static bool greedy_search(JOIN *join, table_map remaining_tables,
uint depth, uint prune_level,
@ -4571,6 +4567,13 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
{
if (choose_plan(join, all_table_map & ~join->const_table_map))
goto error;
#ifdef HAVE_valgrind
// JOIN::positions holds the current query plan. We've already
// made the plan choice, so we should only use JOIN::best_positions
for (uint k=join->const_tables; k < join->table_count; k++)
MEM_UNDEFINED(&join->positions[k], sizeof(join->positions[k]));
#endif
}
else
{
@ -6285,6 +6288,7 @@ void
best_access_path(JOIN *join,
JOIN_TAB *s,
table_map remaining_tables,
const POSITION *join_positions,
uint idx,
bool disable_jbuf,
double record_count,
@ -6388,7 +6392,7 @@ best_access_path(JOIN *join,
if (!(keyuse->used_tables & ~join->const_table_map))
const_part|= keyuse->keypart_map;
double tmp2= prev_record_reads(join->positions, idx,
double tmp2= prev_record_reads(join_positions, idx,
(found_ref | keyuse->used_tables));
if (tmp2 < best_prev_record_reads)
{
@ -6429,7 +6433,7 @@ best_access_path(JOIN *join,
Really, there should be records=0.0 (yes!)
but 1.0 would be probably safer
*/
tmp= prev_record_reads(join->positions, idx, found_ref);
tmp= prev_record_reads(join_positions, idx, found_ref);
records= 1.0;
}
else
@ -6445,7 +6449,7 @@ best_access_path(JOIN *join,
if ((key_flags & (HA_NOSAME | HA_NULL_PART_KEY)) == HA_NOSAME ||
MY_TEST(key_flags & HA_EXT_NOSAME))
{
tmp = prev_record_reads(join->positions, idx, found_ref);
tmp = prev_record_reads(join_positions, idx, found_ref);
records=1.0;
}
else
@ -6689,7 +6693,8 @@ best_access_path(JOIN *join,
}
tmp= COST_ADD(tmp, s->startup_cost);
loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp);
loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp,
found_ref);
} /* not ft_key */
if (tmp + 0.0001 < best_time - records/(double) TIME_FOR_COMPARE)
{
@ -7367,7 +7372,8 @@ optimize_straight_join(JOIN *join, table_map join_tables)
for (JOIN_TAB **pos= join->best_ref + idx ; (s= *pos) ; pos++)
{
/* Find the best access method from 's' to the current partial plan */
best_access_path(join, s, join_tables, idx, disable_jbuf, record_count,
best_access_path(join, s, join_tables, join->positions, idx,
disable_jbuf, record_count,
join->positions + idx, &loose_scan_pos);
/* compute the cost of the new plan extended with 's' */
@ -8285,8 +8291,9 @@ best_extension_by_limited_search(JOIN *join,
/* Find the best access method from 's' to the current partial plan */
POSITION loose_scan_pos;
best_access_path(join, s, remaining_tables, idx, disable_jbuf,
record_count, join->positions + idx, &loose_scan_pos);
best_access_path(join, s, remaining_tables, join->positions, idx,
disable_jbuf, record_count, join->positions + idx,
&loose_scan_pos);
/* Compute the cost of extending the plan with 's' */
current_record_count= COST_MULT(record_count, position->records_read);
@ -8672,11 +8679,11 @@ cache_record_length(JOIN *join,uint idx)
*/
double
prev_record_reads(POSITION *positions, uint idx, table_map found_ref)
prev_record_reads(const POSITION *positions, uint idx, table_map found_ref)
{
double found=1.0;
POSITION *pos_end= positions - 1;
for (POSITION *pos= positions + idx - 1; pos != pos_end; pos--)
const POSITION *pos_end= positions - 1;
for (const POSITION *pos= positions + idx - 1; pos != pos_end; pos--)
{
if (pos->table->table->map & found_ref)
{
@ -15400,7 +15407,8 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
if ((i == first_tab && first_alt) || join->positions[i].use_join_buffer)
{
/* Find the best access method that would not use join buffering */
best_access_path(join, rs, reopt_remaining_tables, i,
best_access_path(join, rs, reopt_remaining_tables,
join->positions, i,
TRUE, rec_count,
&pos, &loose_scan_pos);
}