mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
Merge 10.3 into 10.4
This commit is contained in:
@ -1,5 +1,5 @@
|
||||
/* Copyright (c) 2000, 2016 Oracle and/or its affiliates.
|
||||
Copyright (c) 2009, 2019 MariaDB Corporation
|
||||
/* Copyright (c) 2000, 2016, Oracle and/or its affiliates.
|
||||
Copyright (c) 2009, 2019, MariaDB Corporation.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -7451,7 +7451,7 @@ best_access_path(JOIN *join,
|
||||
else
|
||||
tmp= table->file->read_time(key, 1,
|
||||
(ha_rows) MY_MIN(tmp,s->worst_seeks));
|
||||
tmp*= record_count;
|
||||
tmp= COST_MULT(tmp, record_count);
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -7632,7 +7632,7 @@ best_access_path(JOIN *join,
|
||||
else
|
||||
tmp= table->file->read_time(key, 1,
|
||||
(ha_rows) MY_MIN(tmp,s->worst_seeks));
|
||||
tmp*= record_count;
|
||||
tmp= COST_MULT(tmp, record_count);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -7642,7 +7642,7 @@ best_access_path(JOIN *join,
|
||||
}
|
||||
}
|
||||
|
||||
tmp += s->startup_cost;
|
||||
tmp= COST_ADD(tmp, s->startup_cost);
|
||||
loose_scan_opt.check_ref_access_part2(key, start_key, records, tmp);
|
||||
} /* not ft_key */
|
||||
|
||||
@ -7665,7 +7665,7 @@ best_access_path(JOIN *join,
|
||||
if (tmp + 0.0001 < best_time - records/(double) TIME_FOR_COMPARE)
|
||||
{
|
||||
trace_access_idx.add("chosen", true);
|
||||
best_time= tmp + records/(double) TIME_FOR_COMPARE;
|
||||
best_time= COST_ADD(tmp, records/(double) TIME_FOR_COMPARE);
|
||||
best= tmp;
|
||||
best_records= records;
|
||||
best_key= start_key;
|
||||
@ -7707,14 +7707,18 @@ best_access_path(JOIN *join,
|
||||
use_cond_selectivity);
|
||||
|
||||
tmp= s->quick ? s->quick->read_time : s->scan_time();
|
||||
tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
double cmp_time= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
tmp= COST_ADD(tmp, cmp_time);
|
||||
|
||||
/* We read the table as many times as join buffer becomes full. */
|
||||
tmp*= (1.0 + floor((double) cache_record_length(join,idx) *
|
||||
record_count /
|
||||
(double) thd->variables.join_buff_size));
|
||||
best_time= tmp +
|
||||
(record_count*join_sel) / TIME_FOR_COMPARE * rnd_records;
|
||||
|
||||
double refills= (1.0 + floor((double) cache_record_length(join,idx) *
|
||||
record_count /
|
||||
(double) thd->variables.join_buff_size));
|
||||
tmp= COST_MULT(tmp, refills);
|
||||
best_time= COST_ADD(tmp,
|
||||
COST_MULT((record_count*join_sel) / TIME_FOR_COMPARE,
|
||||
rnd_records));
|
||||
best= tmp;
|
||||
records= rnd_records;
|
||||
best_key= hj_start_key;
|
||||
@ -7746,7 +7750,8 @@ best_access_path(JOIN *join,
|
||||
'range' access using index IDX, and the best way to perform 'ref'
|
||||
access is to use the same index IDX, with the same or more key parts.
|
||||
(note: it is not clear how this rule is/should be extended to
|
||||
index_merge quick selects)
|
||||
index_merge quick selects). Also if we have a hash join we prefer that
|
||||
over a table scan
|
||||
(3) See above note about InnoDB.
|
||||
(4) NOT ("FORCE INDEX(...)" is used for table and there is 'ref' access
|
||||
path, but there is no quick select)
|
||||
@ -7763,6 +7768,7 @@ best_access_path(JOIN *join,
|
||||
*/
|
||||
Json_writer_object trace_access_scan(thd);
|
||||
if ((records >= s->found_records || best > s->read_time) && // (1)
|
||||
!(best_key && best_key->key == MAX_KEY) && // (2)
|
||||
!(s->quick && best_key && s->quick->index == best_key->key && // (2)
|
||||
best_max_key_part >= s->table->quick_key_parts[best_key->key]) &&// (2)
|
||||
!((s->table->file->ha_table_flags() & HA_TABLE_SCAN_ON_INDEX) && // (3)
|
||||
@ -7795,9 +7801,9 @@ best_access_path(JOIN *join,
|
||||
access (see first else-branch below), but we don't take it into
|
||||
account here for range/index_merge access. Find out why this is so.
|
||||
*/
|
||||
tmp= record_count *
|
||||
(s->quick->read_time +
|
||||
(s->found_records - rnd_records)/(double) TIME_FOR_COMPARE);
|
||||
double cmp_time= (s->found_records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
tmp= COST_MULT(record_count,
|
||||
COST_ADD(s->quick->read_time, cmp_time));
|
||||
|
||||
if ( s->quick->get_type() == QUICK_SELECT_I::QS_TYPE_RANGE)
|
||||
{
|
||||
@ -7836,16 +7842,15 @@ best_access_path(JOIN *join,
|
||||
- read the whole table record
|
||||
- skip rows which does not satisfy join condition
|
||||
*/
|
||||
tmp= record_count *
|
||||
(tmp +
|
||||
(s->records - rnd_records)/(double) TIME_FOR_COMPARE);
|
||||
double cmp_time= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
tmp= COST_MULT(record_count, COST_ADD(tmp,cmp_time));
|
||||
}
|
||||
else
|
||||
{
|
||||
/* We read the table as many times as join buffer becomes full. */
|
||||
tmp*= (1.0 + floor((double) cache_record_length(join,idx) *
|
||||
record_count /
|
||||
(double) thd->variables.join_buff_size));
|
||||
double refills= (1.0 + floor((double) cache_record_length(join,idx) *
|
||||
(record_count /
|
||||
(double) thd->variables.join_buff_size)));
|
||||
tmp= COST_MULT(tmp, refills);
|
||||
/*
|
||||
We don't make full cartesian product between rows in the scanned
|
||||
table and existing records because we skip all rows from the
|
||||
@ -7853,7 +7858,8 @@ best_access_path(JOIN *join,
|
||||
we read the table (see flush_cached_records for details). Here we
|
||||
take into account cost to read and skip these records.
|
||||
*/
|
||||
tmp+= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
double cmp_time= (s->records - rnd_records)/(double) TIME_FOR_COMPARE;
|
||||
tmp= COST_ADD(tmp, cmp_time);
|
||||
}
|
||||
}
|
||||
|
||||
@ -7869,19 +7875,17 @@ best_access_path(JOIN *join,
|
||||
tmp give us total cost of using TABLE SCAN
|
||||
*/
|
||||
|
||||
double best_filter_cmp_gain= 0;
|
||||
if (best_filter)
|
||||
{
|
||||
best_filter_cmp_gain= best_filter->get_cmp_gain(record_count * records);
|
||||
}
|
||||
const double best_filter_cmp_gain= best_filter
|
||||
? best_filter->get_cmp_gain(record_count * records)
|
||||
: 0;
|
||||
trace_access_scan.add("resulting_rows", rnd_records);
|
||||
trace_access_scan.add("cost", tmp);
|
||||
|
||||
if (best == DBL_MAX ||
|
||||
(tmp + record_count/(double) TIME_FOR_COMPARE*rnd_records <
|
||||
COST_ADD(tmp, record_count/(double) TIME_FOR_COMPARE*rnd_records) <
|
||||
(best_key->is_for_hash_join() ? best_time :
|
||||
best + record_count/(double) TIME_FOR_COMPARE*records -
|
||||
best_filter_cmp_gain)))
|
||||
COST_ADD(best - best_filter_cmp_gain,
|
||||
record_count/(double) TIME_FOR_COMPARE*records)))
|
||||
{
|
||||
/*
|
||||
If the table has a range (s->quick is set) make_join_select()
|
||||
@ -8420,16 +8424,13 @@ optimize_straight_join(JOIN *join, table_map join_tables)
|
||||
position, &loose_scan_pos);
|
||||
|
||||
/* compute the cost of the new plan extended with 's' */
|
||||
record_count*= position->records_read;
|
||||
double filter_cmp_gain= 0;
|
||||
if (position->range_rowid_filter_info)
|
||||
{
|
||||
filter_cmp_gain=
|
||||
position->range_rowid_filter_info->get_cmp_gain(record_count);
|
||||
}
|
||||
read_time+= position->read_time +
|
||||
record_count / (double) TIME_FOR_COMPARE -
|
||||
filter_cmp_gain;
|
||||
record_count= COST_MULT(record_count, position->records_read);
|
||||
const double filter_cmp_gain= position->range_rowid_filter_info
|
||||
? position->range_rowid_filter_info->get_cmp_gain(record_count)
|
||||
: 0;
|
||||
read_time+= COST_ADD(read_time - filter_cmp_gain,
|
||||
COST_ADD(position->read_time,
|
||||
record_count / (double) TIME_FOR_COMPARE));
|
||||
advance_sj_state(join, join_tables, idx, &record_count, &read_time,
|
||||
&loose_scan_pos);
|
||||
|
||||
@ -8619,9 +8620,10 @@ greedy_search(JOIN *join,
|
||||
swap_variables(JOIN_TAB*, join->best_ref[idx], join->best_ref[best_idx]);
|
||||
|
||||
/* compute the cost of the new plan extended with 'best_table' */
|
||||
record_count*= join->positions[idx].records_read;
|
||||
read_time+= join->positions[idx].read_time +
|
||||
record_count / (double) TIME_FOR_COMPARE;
|
||||
record_count= COST_MULT(record_count, join->positions[idx].records_read);
|
||||
read_time= COST_ADD(read_time,
|
||||
COST_ADD(join->positions[idx].read_time,
|
||||
record_count / (double) TIME_FOR_COMPARE));
|
||||
|
||||
remaining_tables&= ~(best_table->table->map);
|
||||
--size_remain;
|
||||
@ -8728,11 +8730,13 @@ void JOIN::get_partial_cost_and_fanout(int end_tab_idx,
|
||||
}
|
||||
if (tab->records_read && (cur_table_map & filter_map))
|
||||
{
|
||||
record_count *= tab->records_read;
|
||||
read_time += tab->read_time + record_count / (double) TIME_FOR_COMPARE;
|
||||
record_count= COST_MULT(record_count, tab->records_read);
|
||||
read_time= COST_ADD(read_time,
|
||||
COST_ADD(tab->read_time,
|
||||
record_count / (double) TIME_FOR_COMPARE));
|
||||
if (tab->emb_sj_nest)
|
||||
sj_inner_fanout *= tab->records_read;
|
||||
}
|
||||
sj_inner_fanout= COST_MULT(sj_inner_fanout, tab->records_read);
|
||||
}
|
||||
|
||||
if (i == last_sj_table)
|
||||
{
|
||||
@ -8770,8 +8774,8 @@ void JOIN::get_prefix_cost_and_fanout(uint n_tables,
|
||||
{
|
||||
if (best_positions[i].records_read)
|
||||
{
|
||||
record_count *= best_positions[i].records_read;
|
||||
read_time += best_positions[i].read_time;
|
||||
record_count= COST_MULT(record_count, best_positions[i].records_read);
|
||||
read_time= COST_ADD(read_time, best_positions[i].read_time);
|
||||
}
|
||||
}
|
||||
*read_time_arg= read_time;// + record_count / TIME_FOR_COMPARE;
|
||||
@ -9351,20 +9355,16 @@ best_extension_by_limited_search(JOIN *join,
|
||||
best_access_path(join, s, remaining_tables, idx, disable_jbuf,
|
||||
record_count, position, &loose_scan_pos);
|
||||
|
||||
/* Compute the cost of extending the plan with 's', avoid overflow */
|
||||
if (position->records_read < DBL_MAX / record_count)
|
||||
current_record_count= record_count * position->records_read;
|
||||
else
|
||||
current_record_count= DBL_MAX;
|
||||
double filter_cmp_gain= 0;
|
||||
if (position->range_rowid_filter_info)
|
||||
{
|
||||
filter_cmp_gain=
|
||||
position->range_rowid_filter_info->get_cmp_gain(current_record_count);
|
||||
}
|
||||
current_read_time=read_time + position->read_time +
|
||||
current_record_count / (double) TIME_FOR_COMPARE -
|
||||
filter_cmp_gain;
|
||||
/* Compute the cost of extending the plan with 's' */
|
||||
current_record_count= COST_MULT(record_count, position->records_read);
|
||||
const double filter_cmp_gain= position->range_rowid_filter_info
|
||||
? position->range_rowid_filter_info->get_cmp_gain(current_record_count)
|
||||
: 0;
|
||||
current_read_time=COST_ADD(read_time,
|
||||
COST_ADD(position->read_time -
|
||||
filter_cmp_gain,
|
||||
current_record_count /
|
||||
(double) TIME_FOR_COMPARE));
|
||||
|
||||
advance_sj_state(join, remaining_tables, idx, ¤t_record_count,
|
||||
¤t_read_time, &loose_scan_pos);
|
||||
@ -9449,12 +9449,12 @@ best_extension_by_limited_search(JOIN *join,
|
||||
if (join->sort_by_table &&
|
||||
join->sort_by_table !=
|
||||
join->positions[join->const_tables].table->table)
|
||||
/*
|
||||
We may have to make a temp table, note that this is only a
|
||||
heuristic since we cannot know for sure at this point.
|
||||
/*
|
||||
We may have to make a temp table, note that this is only a
|
||||
heuristic since we cannot know for sure at this point.
|
||||
Hence it may be wrong.
|
||||
*/
|
||||
current_read_time+= current_record_count;
|
||||
current_read_time= COST_ADD(current_read_time, current_record_count);
|
||||
if (current_read_time < join->best_read)
|
||||
{
|
||||
memcpy((uchar*) join->best_positions, (uchar*) join->positions,
|
||||
@ -9772,8 +9772,8 @@ prev_record_reads(POSITION *positions, uint idx, table_map found_ref)
|
||||
#max_nested_outer_joins=64-1) will not make it any more precise.
|
||||
*/
|
||||
if (pos->records_read)
|
||||
found*= pos->records_read;
|
||||
}
|
||||
found= COST_MULT(found, pos->records_read);
|
||||
}
|
||||
}
|
||||
return found;
|
||||
}
|
||||
@ -11353,8 +11353,16 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
|
||||
/*
|
||||
We plan to scan all rows.
|
||||
Check again if we should use an index.
|
||||
We could have used an column from a previous table in
|
||||
the index if we are using limit and this is the first table
|
||||
|
||||
There are two cases:
|
||||
1) There could be an index usage the refers to a previous
|
||||
table that we didn't consider before, but could be consider
|
||||
now as a "last resort". For example
|
||||
SELECT * from t1,t2 where t1.a between t2.a and t2.b;
|
||||
2) If the current table is the first non const table
|
||||
and there is a limit it still possibly beneficial
|
||||
to use the index even if the index range is big as
|
||||
we can stop when we've found limit rows.
|
||||
|
||||
(1) - Don't switch the used index if we are using semi-join
|
||||
LooseScan on this table. Using different index will not
|
||||
@ -16049,8 +16057,20 @@ simplify_joins(JOIN *join, List<TABLE_LIST> *join_list, COND *conds, bool top,
|
||||
table->table->maybe_null= FALSE;
|
||||
table->outer_join= 0;
|
||||
if (!(straight_join || table->straight))
|
||||
table->dep_tables= table->embedding && !table->embedding->sj_subq_pred ?
|
||||
table->embedding->dep_tables : 0;
|
||||
{
|
||||
table->dep_tables= 0;
|
||||
TABLE_LIST *embedding= table->embedding;
|
||||
while (embedding)
|
||||
{
|
||||
if (embedding->nested_join->join_list.head()->outer_join)
|
||||
{
|
||||
if (!embedding->sj_subq_pred)
|
||||
table->dep_tables= embedding->dep_tables;
|
||||
break;
|
||||
}
|
||||
embedding= embedding->embedding;
|
||||
}
|
||||
}
|
||||
if (table->on_expr)
|
||||
{
|
||||
/* Add ON expression to the WHERE or upper-level ON condition. */
|
||||
@ -16575,11 +16595,12 @@ void optimize_wo_join_buffering(JOIN *join, uint first_tab, uint last_tab,
|
||||
pos= loose_scan_pos;
|
||||
|
||||
reopt_remaining_tables &= ~rs->table->map;
|
||||
rec_count *= pos.records_read;
|
||||
cost += pos.read_time;
|
||||
rec_count= COST_MULT(rec_count, pos.records_read);
|
||||
cost= COST_ADD(cost, pos.read_time);
|
||||
|
||||
|
||||
if (!rs->emb_sj_nest)
|
||||
*outer_rec_count *= pos.records_read;
|
||||
*outer_rec_count= COST_MULT(*outer_rec_count, pos.records_read);
|
||||
}
|
||||
join->cur_sj_inner_tables= save_cur_sj_inner_tables;
|
||||
|
||||
@ -21039,7 +21060,6 @@ join_read_first(JOIN_TAB *tab)
|
||||
tab->table->status=0;
|
||||
tab->read_record.read_record_func= join_read_next;
|
||||
tab->read_record.table=table;
|
||||
tab->read_record.index=tab->index;
|
||||
tab->read_record.record=table->record[0];
|
||||
if (!table->file->inited)
|
||||
error= table->file->ha_index_init(tab->index, tab->sorted);
|
||||
@ -21080,7 +21100,6 @@ join_read_last(JOIN_TAB *tab)
|
||||
tab->table->status=0;
|
||||
tab->read_record.read_record_func= join_read_prev;
|
||||
tab->read_record.table=table;
|
||||
tab->read_record.index=tab->index;
|
||||
tab->read_record.record=table->record[0];
|
||||
if (!table->file->inited)
|
||||
error= table->file->ha_index_init(tab->index, 1);
|
||||
|
Reference in New Issue
Block a user