mirror of
https://github.com/MariaDB/server.git
synced 2025-08-07 00:04:31 +03:00
Optimizer code cleanups, no logic changes
- Updated comments - Added some extra DEBUG - Indentation changes and break long lines - Trivial code changes like: - Combining 2 statements in one - Reorder DBUG lines - Use a variable to store a pointer that is used multiple times - Moved declaration of variables to start of loop/function - Removed dead or commented code - Removed wrong DBUG_EXECUTE code in best_extension_by_limited_search()
This commit is contained in:
@@ -1544,7 +1544,7 @@ static bool check_if_pq_applicable(Sort_param *param,
|
|||||||
DBUG_RETURN(false);
|
DBUG_RETURN(false);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (param->max_rows + 2 >= UINT_MAX)
|
if (param->max_rows >= UINT_MAX - 2)
|
||||||
{
|
{
|
||||||
DBUG_PRINT("info", ("Too large LIMIT"));
|
DBUG_PRINT("info", ("Too large LIMIT"));
|
||||||
DBUG_RETURN(false);
|
DBUG_RETURN(false);
|
||||||
@@ -2205,8 +2205,8 @@ Type_handler_decimal_result::sort_length(THD *thd,
|
|||||||
@param thd Thread handler
|
@param thd Thread handler
|
||||||
@param sortorder Order of items to sort
|
@param sortorder Order of items to sort
|
||||||
@param s_length Number of items to sort
|
@param s_length Number of items to sort
|
||||||
@param allow_packing_for_sortkeys [out] set to false if packing sort keys is not
|
@param allow_packing_for_sortkeys [out] set to false if packing sort keys
|
||||||
allowed
|
is not allowed
|
||||||
|
|
||||||
@note
|
@note
|
||||||
* sortorder->length and other members are updated for each sort item.
|
* sortorder->length and other members are updated for each sort item.
|
||||||
|
@@ -37,8 +37,8 @@
|
|||||||
@param n_ranges_arg Number of ranges in the sequence, or 0 if the caller
|
@param n_ranges_arg Number of ranges in the sequence, or 0 if the caller
|
||||||
can't efficiently determine it
|
can't efficiently determine it
|
||||||
@param bufsz INOUT IN: Size of the buffer available for use
|
@param bufsz INOUT IN: Size of the buffer available for use
|
||||||
OUT: Size of the buffer that is expected to be actually
|
OUT: Size of the buffer that is expected to be
|
||||||
used, or 0 if buffer is not needed.
|
actually used, or 0 if buffer is not needed.
|
||||||
@param flags INOUT A combination of HA_MRR_* flags
|
@param flags INOUT A combination of HA_MRR_* flags
|
||||||
@param cost OUT Estimated cost of MRR access
|
@param cost OUT Estimated cost of MRR access
|
||||||
|
|
||||||
@@ -286,11 +286,15 @@ handler::multi_range_read_info_const(uint keyno, RANGE_SEQ_IF *seq,
|
|||||||
(single_point_ranges - assigned_single_point_ranges).
|
(single_point_ranges - assigned_single_point_ranges).
|
||||||
|
|
||||||
We don't add these to io_blocks as we don't want to penalize equal
|
We don't add these to io_blocks as we don't want to penalize equal
|
||||||
readss (if we did, a range that would read 5 rows would be
|
reads (if we did, a range that would read 5 rows would be
|
||||||
regarded as better than one equal read).
|
regarded as better than one equal read).
|
||||||
|
|
||||||
Better to assume we have done a records_in_range() for the equal
|
Better to assume we have done a records_in_range() for the equal
|
||||||
range and it's also cached.
|
range and it's also cached.
|
||||||
|
|
||||||
|
One effect of this is that io_blocks for simple ranges are often 0,
|
||||||
|
as the blocks where already read by records_in_range and we assume
|
||||||
|
that we don't have to read it again.
|
||||||
*/
|
*/
|
||||||
io_blocks= (range_blocks_cnt - edge_blocks_cnt);
|
io_blocks= (range_blocks_cnt - edge_blocks_cnt);
|
||||||
unassigned_single_point_ranges+= (single_point_ranges -
|
unassigned_single_point_ranges+= (single_point_ranges -
|
||||||
@@ -1991,9 +1995,10 @@ bool DsMrr_impl::get_disk_sweep_mrr_cost(uint keynr, ha_rows rows, uint flags,
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
cost->reset();
|
cost->reset();
|
||||||
*buffer_size= (uint)MY_MAX(*buffer_size,
|
*buffer_size= ((uint) MY_MAX(*buffer_size,
|
||||||
(size_t)(1.2*rows_in_last_step) * elem_size +
|
(size_t)(1.2*rows_in_last_step) * elem_size +
|
||||||
primary_file->ref_length + table->key_info[keynr].key_length);
|
primary_file->ref_length +
|
||||||
|
table->key_info[keynr].key_length));
|
||||||
}
|
}
|
||||||
|
|
||||||
Cost_estimate last_step_cost;
|
Cost_estimate last_step_cost;
|
||||||
|
111
sql/opt_range.cc
111
sql/opt_range.cc
@@ -19,8 +19,8 @@
|
|||||||
Fix that MAYBE_KEY are stored in the tree so that we can detect use
|
Fix that MAYBE_KEY are stored in the tree so that we can detect use
|
||||||
of full hash keys for queries like:
|
of full hash keys for queries like:
|
||||||
|
|
||||||
select s.id, kws.keyword_id from sites as s,kws where s.id=kws.site_id and kws.keyword_id in (204,205);
|
select s.id, kws.keyword_id from sites as s,kws where s.id=kws.site_id and
|
||||||
|
kws.keyword_id in (204,205);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -2626,7 +2626,8 @@ static int fill_used_fields_bitmap(PARAM *param)
|
|||||||
In the table struct the following information is updated:
|
In the table struct the following information is updated:
|
||||||
quick_keys - Which keys can be used
|
quick_keys - Which keys can be used
|
||||||
quick_rows - How many rows the key matches
|
quick_rows - How many rows the key matches
|
||||||
opt_range_condition_rows - E(# rows that will satisfy the table condition)
|
opt_range_condition_rows - E(# rows that will satisfy the table
|
||||||
|
condition)
|
||||||
|
|
||||||
IMPLEMENTATION
|
IMPLEMENTATION
|
||||||
opt_range_condition_rows value is obtained as follows:
|
opt_range_condition_rows value is obtained as follows:
|
||||||
@@ -2774,7 +2775,8 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||||||
|
|
||||||
thd->no_errors=1; // Don't warn about NULL
|
thd->no_errors=1; // Don't warn about NULL
|
||||||
init_sql_alloc(key_memory_quick_range_select_root, &alloc,
|
init_sql_alloc(key_memory_quick_range_select_root, &alloc,
|
||||||
thd->variables.range_alloc_block_size, 0, MYF(MY_THREAD_SPECIFIC));
|
thd->variables.range_alloc_block_size, 0,
|
||||||
|
MYF(MY_THREAD_SPECIFIC));
|
||||||
if (!(param.key_parts=
|
if (!(param.key_parts=
|
||||||
(KEY_PART*) alloc_root(&alloc,
|
(KEY_PART*) alloc_root(&alloc,
|
||||||
sizeof(KEY_PART) *
|
sizeof(KEY_PART) *
|
||||||
@@ -2931,6 +2933,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||||||
TRP_INDEX_INTERSECT *intersect_trp;
|
TRP_INDEX_INTERSECT *intersect_trp;
|
||||||
bool can_build_covering= FALSE;
|
bool can_build_covering= FALSE;
|
||||||
Json_writer_object trace_range(thd, "analyzing_range_alternatives");
|
Json_writer_object trace_range(thd, "analyzing_range_alternatives");
|
||||||
|
TABLE_READ_PLAN *range_trp;
|
||||||
|
|
||||||
backup_keys= (SEL_ARG**) alloca(sizeof(backup_keys[0])*param.keys);
|
backup_keys= (SEL_ARG**) alloca(sizeof(backup_keys[0])*param.keys);
|
||||||
memcpy(&backup_keys[0], &tree->keys[0],
|
memcpy(&backup_keys[0], &tree->keys[0],
|
||||||
@@ -2939,9 +2942,9 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||||||
remove_nonrange_trees(¶m, tree);
|
remove_nonrange_trees(¶m, tree);
|
||||||
|
|
||||||
/* Get best 'range' plan and prepare data for making other plans */
|
/* Get best 'range' plan and prepare data for making other plans */
|
||||||
if (auto range_trp= get_key_scans_params(¶m, tree,
|
if ((range_trp= get_key_scans_params(¶m, tree,
|
||||||
only_single_index_range_scan,
|
only_single_index_range_scan,
|
||||||
true, best_read_time))
|
true, best_read_time)))
|
||||||
{
|
{
|
||||||
best_trp= range_trp;
|
best_trp= range_trp;
|
||||||
best_read_time= best_trp->read_cost;
|
best_read_time= best_trp->read_cost;
|
||||||
@@ -3048,7 +3051,6 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
|||||||
{
|
{
|
||||||
grp_summary.add("chosen", true);
|
grp_summary.add("chosen", true);
|
||||||
best_trp= group_trp;
|
best_trp= group_trp;
|
||||||
best_read_time= best_trp->read_cost;
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
grp_summary.add("chosen", false).add("cause", "cost");
|
grp_summary.add("chosen", false).add("cause", "cost");
|
||||||
@@ -3209,7 +3211,8 @@ bool create_key_parts_for_pseudo_indexes(RANGE_OPT_PARAM *param,
|
|||||||
SYNOPSIS
|
SYNOPSIS
|
||||||
records_in_column_ranges()
|
records_in_column_ranges()
|
||||||
param the data structure to access descriptors of pseudo indexes
|
param the data structure to access descriptors of pseudo indexes
|
||||||
built over columns used in the condition of the processed query
|
built over columns used in the condition of the processed
|
||||||
|
query
|
||||||
idx the index of the descriptor of interest in param
|
idx the index of the descriptor of interest in param
|
||||||
tree the tree representing ranges built for the interesting column
|
tree the tree representing ranges built for the interesting column
|
||||||
|
|
||||||
@@ -3334,7 +3337,8 @@ int cmp_quick_ranges(TABLE *table, uint *a, uint *b)
|
|||||||
DESCRIPTION
|
DESCRIPTION
|
||||||
This function calculates the selectivity of range conditions cond imposed
|
This function calculates the selectivity of range conditions cond imposed
|
||||||
on the rows of 'table' in the processed query.
|
on the rows of 'table' in the processed query.
|
||||||
The calculated selectivity is assigned to the field table->cond_selectivity.
|
The calculated selectivity is assigned to the field
|
||||||
|
table->cond_selectivity.
|
||||||
|
|
||||||
Selectivity is calculated as a product of selectivities imposed by:
|
Selectivity is calculated as a product of selectivities imposed by:
|
||||||
|
|
||||||
@@ -3346,6 +3350,8 @@ int cmp_quick_ranges(TABLE *table, uint *a, uint *b)
|
|||||||
3. Reading a few records from the table pages and checking the condition
|
3. Reading a few records from the table pages and checking the condition
|
||||||
selectivity (this is used for conditions like "column LIKE '%val%'"
|
selectivity (this is used for conditions like "column LIKE '%val%'"
|
||||||
where approaches #1 and #2 do not provide selectivity data).
|
where approaches #1 and #2 do not provide selectivity data).
|
||||||
|
4. If the selectivity calculated by get_best_ror_intersect() is smaller,
|
||||||
|
use this instead.
|
||||||
|
|
||||||
NOTE
|
NOTE
|
||||||
Currently the selectivities of range conditions over different columns are
|
Currently the selectivities of range conditions over different columns are
|
||||||
@@ -3362,6 +3368,9 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
|
|||||||
MY_BITMAP *used_fields= &table->cond_set;
|
MY_BITMAP *used_fields= &table->cond_set;
|
||||||
double table_records= (double)table->stat_records();
|
double table_records= (double)table->stat_records();
|
||||||
uint optimal_key_order[MAX_KEY];
|
uint optimal_key_order[MAX_KEY];
|
||||||
|
MY_BITMAP handled_columns;
|
||||||
|
my_bitmap_map *buf;
|
||||||
|
QUICK_SELECT_I *quick;
|
||||||
DBUG_ENTER("calculate_cond_selectivity_for_table");
|
DBUG_ENTER("calculate_cond_selectivity_for_table");
|
||||||
|
|
||||||
table->cond_selectivity= 1.0;
|
table->cond_selectivity= 1.0;
|
||||||
@@ -3369,7 +3378,6 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
|
|||||||
if (table_records == 0)
|
if (table_records == 0)
|
||||||
DBUG_RETURN(FALSE);
|
DBUG_RETURN(FALSE);
|
||||||
|
|
||||||
QUICK_SELECT_I *quick;
|
|
||||||
if ((quick=table->reginfo.join_tab->quick) &&
|
if ((quick=table->reginfo.join_tab->quick) &&
|
||||||
quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
|
quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)
|
||||||
{
|
{
|
||||||
@@ -3377,14 +3385,14 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
|
|||||||
DBUG_RETURN(FALSE);
|
DBUG_RETURN(FALSE);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!*cond)
|
if (!*cond || table->pos_in_table_list->schema_table)
|
||||||
DBUG_RETURN(FALSE);
|
DBUG_RETURN(FALSE);
|
||||||
|
|
||||||
if (table->pos_in_table_list->schema_table)
|
/*
|
||||||
DBUG_RETURN(FALSE);
|
This should be pre-alloced so that we could use the same bitmap for all
|
||||||
|
tables. Would also avoid extra memory allocations if this function would
|
||||||
MY_BITMAP handled_columns;
|
be called multiple times per query.
|
||||||
my_bitmap_map* buf;
|
*/
|
||||||
if (!(buf= (my_bitmap_map*)thd->alloc(table->s->column_bitmap_size)))
|
if (!(buf= (my_bitmap_map*)thd->alloc(table->s->column_bitmap_size)))
|
||||||
DBUG_RETURN(TRUE);
|
DBUG_RETURN(TRUE);
|
||||||
my_bitmap_init(&handled_columns, buf, table->s->fields);
|
my_bitmap_init(&handled_columns, buf, table->s->fields);
|
||||||
@@ -3512,7 +3520,8 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
|
|||||||
double rows;
|
double rows;
|
||||||
|
|
||||||
init_sql_alloc(key_memory_quick_range_select_root, &alloc,
|
init_sql_alloc(key_memory_quick_range_select_root, &alloc,
|
||||||
thd->variables.range_alloc_block_size, 0, MYF(MY_THREAD_SPECIFIC));
|
thd->variables.range_alloc_block_size, 0,
|
||||||
|
MYF(MY_THREAD_SPECIFIC));
|
||||||
param.thd= thd;
|
param.thd= thd;
|
||||||
param.mem_root= &alloc;
|
param.mem_root= &alloc;
|
||||||
param.old_root= thd->mem_root;
|
param.old_root= thd->mem_root;
|
||||||
@@ -3531,9 +3540,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
|
|||||||
|
|
||||||
thd->no_errors=1;
|
thd->no_errors=1;
|
||||||
|
|
||||||
tree= cond[0]->get_mm_tree(¶m, cond);
|
if (!(tree= cond[0]->get_mm_tree(¶m, cond)))
|
||||||
|
|
||||||
if (!tree)
|
|
||||||
goto free_alloc;
|
goto free_alloc;
|
||||||
|
|
||||||
table->reginfo.impossible_range= 0;
|
table->reginfo.impossible_range= 0;
|
||||||
@@ -3557,7 +3564,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond)
|
|||||||
for (uint idx= 0; idx < param.keys; idx++)
|
for (uint idx= 0; idx < param.keys; idx++)
|
||||||
{
|
{
|
||||||
SEL_ARG *key= tree->keys[idx];
|
SEL_ARG *key= tree->keys[idx];
|
||||||
if (key)
|
if (key) // Quick range found for key
|
||||||
{
|
{
|
||||||
Json_writer_object selectivity_for_column(thd);
|
Json_writer_object selectivity_for_column(thd);
|
||||||
selectivity_for_column.add("column_name", key->field->field_name);
|
selectivity_for_column.add("column_name", key->field->field_name);
|
||||||
@@ -5671,8 +5678,7 @@ bool create_fields_bitmap(PARAM *param, MY_BITMAP *fields_bitmap)
|
|||||||
static
|
static
|
||||||
int cmp_intersect_index_scan(INDEX_SCAN_INFO **a, INDEX_SCAN_INFO **b)
|
int cmp_intersect_index_scan(INDEX_SCAN_INFO **a, INDEX_SCAN_INFO **b)
|
||||||
{
|
{
|
||||||
return (*a)->records < (*b)->records ?
|
return CMP_NUM((*a)->records, (*b)->records);
|
||||||
-1 : (*a)->records == (*b)->records ? 0 : 1;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -6269,7 +6275,8 @@ bool check_index_intersect_extension(PARTIAL_INDEX_INTERSECT_INFO *curr,
|
|||||||
size_t max_memory_size= common_info->max_memory_size;
|
size_t max_memory_size= common_info->max_memory_size;
|
||||||
|
|
||||||
records_sent_to_unique+= ext_index_scan_records;
|
records_sent_to_unique+= ext_index_scan_records;
|
||||||
cost= Unique::get_use_cost(buff_elems, (size_t) records_sent_to_unique, key_size,
|
cost= Unique::get_use_cost(buff_elems, (size_t) records_sent_to_unique,
|
||||||
|
key_size,
|
||||||
max_memory_size, compare_factor, TRUE,
|
max_memory_size, compare_factor, TRUE,
|
||||||
&next->in_memory);
|
&next->in_memory);
|
||||||
if (records_filtered_out_by_cpk)
|
if (records_filtered_out_by_cpk)
|
||||||
@@ -6584,6 +6591,11 @@ ROR_SCAN_INFO *make_ror_scan(const PARAM *param, int idx, SEL_ARG *sel_arg)
|
|||||||
if (bitmap_is_set(¶m->needed_fields, key_part->fieldnr-1))
|
if (bitmap_is_set(¶m->needed_fields, key_part->fieldnr-1))
|
||||||
bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr-1);
|
bitmap_set_bit(&ror_scan->covered_fields, key_part->fieldnr-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Cost of reading the keys for the rows, which are later stored in the
|
||||||
|
ror queue.
|
||||||
|
*/
|
||||||
ror_scan->index_read_cost=
|
ror_scan->index_read_cost=
|
||||||
param->table->file->keyread_time(ror_scan->keynr, 1, ror_scan->records);
|
param->table->file->keyread_time(ror_scan->keynr, 1, ror_scan->records);
|
||||||
DBUG_RETURN(ror_scan);
|
DBUG_RETURN(ror_scan);
|
||||||
@@ -6895,7 +6907,7 @@ static double ror_scan_selectivity(const ROR_INTERSECT_INFO *info,
|
|||||||
avoid duplicating the inference code)
|
avoid duplicating the inference code)
|
||||||
|
|
||||||
NOTES
|
NOTES
|
||||||
Adding a ROR scan to ROR-intersect "makes sense" iff the cost of ROR-
|
Adding a ROR scan to ROR-intersect "makes sense" if the cost of ROR-
|
||||||
intersection decreases. The cost of ROR-intersection is calculated as
|
intersection decreases. The cost of ROR-intersection is calculated as
|
||||||
follows:
|
follows:
|
||||||
|
|
||||||
@@ -7057,8 +7069,12 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
|
|||||||
{
|
{
|
||||||
uint idx;
|
uint idx;
|
||||||
double min_cost= DBL_MAX;
|
double min_cost= DBL_MAX;
|
||||||
DBUG_ENTER("get_best_ror_intersect");
|
|
||||||
THD *thd= param->thd;
|
THD *thd= param->thd;
|
||||||
|
DBUG_ENTER("get_best_ror_intersect");
|
||||||
|
DBUG_PRINT("enter", ("opt_range_condition_rows: %llu cond_selectivity: %g",
|
||||||
|
(ulonglong) param->table->opt_range_condition_rows,
|
||||||
|
param->table->cond_selectivity));
|
||||||
|
|
||||||
Json_writer_object trace_ror(thd, "analyzing_roworder_intersect");
|
Json_writer_object trace_ror(thd, "analyzing_roworder_intersect");
|
||||||
|
|
||||||
if ((tree->n_ror_scans < 2) || !param->table->stat_records() ||
|
if ((tree->n_ror_scans < 2) || !param->table->stat_records() ||
|
||||||
@@ -7267,6 +7283,8 @@ TRP_ROR_INTERSECT *get_best_ror_intersect(const PARAM *param, SEL_TREE *tree,
|
|||||||
? "too few indexes to merge"
|
? "too few indexes to merge"
|
||||||
: "cost");
|
: "cost");
|
||||||
}
|
}
|
||||||
|
DBUG_PRINT("enter", ("opt_range_condition_rows: %llu",
|
||||||
|
(ulonglong) param->table->opt_range_condition_rows));
|
||||||
DBUG_RETURN(trp);
|
DBUG_RETURN(trp);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -11535,7 +11553,8 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
|||||||
bool *is_ror_scan)
|
bool *is_ror_scan)
|
||||||
{
|
{
|
||||||
SEL_ARG_RANGE_SEQ seq;
|
SEL_ARG_RANGE_SEQ seq;
|
||||||
RANGE_SEQ_IF seq_if = {NULL, sel_arg_range_seq_init, sel_arg_range_seq_next, 0, 0};
|
RANGE_SEQ_IF seq_if=
|
||||||
|
{NULL, sel_arg_range_seq_init, sel_arg_range_seq_next, 0, 0};
|
||||||
handler *file= param->table->file;
|
handler *file= param->table->file;
|
||||||
ha_rows rows= HA_POS_ERROR;
|
ha_rows rows= HA_POS_ERROR;
|
||||||
uint keynr= param->real_keynr[idx];
|
uint keynr= param->real_keynr[idx];
|
||||||
@@ -11603,24 +11622,24 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only,
|
|||||||
This check is needed as sometimes that table statistics or range
|
This check is needed as sometimes that table statistics or range
|
||||||
estimates may be slightly out of sync.
|
estimates may be slightly out of sync.
|
||||||
*/
|
*/
|
||||||
rows= table_records;
|
rows= MY_MAX(table_records, 1);
|
||||||
set_if_bigger(rows, 1);
|
|
||||||
param->quick_rows[keynr]= rows;
|
param->quick_rows[keynr]= rows;
|
||||||
}
|
}
|
||||||
param->possible_keys.set_bit(keynr);
|
param->possible_keys.set_bit(keynr);
|
||||||
if (update_tbl_stats)
|
if (update_tbl_stats)
|
||||||
{
|
{
|
||||||
|
TABLE::OPT_RANGE *range= param->table->opt_range + keynr;
|
||||||
param->table->opt_range_keys.set_bit(keynr);
|
param->table->opt_range_keys.set_bit(keynr);
|
||||||
param->table->opt_range[keynr].key_parts= param->max_key_parts;
|
range->key_parts= param->max_key_parts;
|
||||||
param->table->opt_range[keynr].ranges= param->range_count;
|
range->ranges= param->range_count;
|
||||||
param->table->opt_range_condition_rows=
|
param->table->opt_range_condition_rows=
|
||||||
MY_MIN(param->table->opt_range_condition_rows, rows);
|
MY_MIN(param->table->opt_range_condition_rows, rows);
|
||||||
param->table->opt_range[keynr].rows= rows;
|
range->rows= rows;
|
||||||
param->table->opt_range[keynr].cost= cost->total_cost();
|
range->cost= cost->total_cost();
|
||||||
if (param->table->file->is_clustering_key(keynr))
|
if (param->table->file->is_clustering_key(keynr))
|
||||||
param->table->opt_range[keynr].index_only_cost= 0;
|
range->index_only_cost= 0;
|
||||||
else
|
else
|
||||||
param->table->opt_range[keynr].index_only_cost= cost->index_only_cost();
|
range->index_only_cost= cost->index_only_cost();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -14821,11 +14840,13 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
|
|||||||
set_if_smaller(num_groups, table_records);
|
set_if_smaller(num_groups, table_records);
|
||||||
|
|
||||||
if (used_key_parts > group_key_parts)
|
if (used_key_parts > group_key_parts)
|
||||||
{ /*
|
{
|
||||||
|
/*
|
||||||
Compute the probability that two ends of a subgroup are inside
|
Compute the probability that two ends of a subgroup are inside
|
||||||
different blocks.
|
different blocks.
|
||||||
*/
|
*/
|
||||||
keys_per_subgroup= (ha_rows) index_info->actual_rec_per_key(used_key_parts - 1);
|
keys_per_subgroup= (ha_rows) index_info->actual_rec_per_key(used_key_parts -
|
||||||
|
1);
|
||||||
if (keys_per_subgroup >= keys_per_block) /* If a subgroup is bigger than */
|
if (keys_per_subgroup >= keys_per_block) /* If a subgroup is bigger than */
|
||||||
p_overlap= 1.0; /* a block, it will overlap at least two blocks. */
|
p_overlap= 1.0; /* a block, it will overlap at least two blocks. */
|
||||||
else
|
else
|
||||||
@@ -14849,10 +14870,13 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
|
|||||||
reads the next record without having to re-position to it on every
|
reads the next record without having to re-position to it on every
|
||||||
group. To make the CPU cost reflect this, we estimate the CPU cost
|
group. To make the CPU cost reflect this, we estimate the CPU cost
|
||||||
as the sum of:
|
as the sum of:
|
||||||
1. Cost for evaluating the condition (similarly as for index scan).
|
1. Cost for evaluating the condition for each num_group
|
||||||
|
(1/TIME_FOR_COMPARE) (similarly as for index scan).
|
||||||
2. Cost for navigating the index structure (assuming a b-tree).
|
2. Cost for navigating the index structure (assuming a b-tree).
|
||||||
Note: We only add the cost for one comparision per block. For a
|
Note: We only add the cost for one index comparision per block. For a
|
||||||
b-tree the number of comparisons will be larger.
|
b-tree the number of comparisons will be larger. However the cost
|
||||||
|
is low as all of the upper level b-tree blocks should be in
|
||||||
|
memory.
|
||||||
TODO: This cost should be provided by the storage engine.
|
TODO: This cost should be provided by the storage engine.
|
||||||
*/
|
*/
|
||||||
const double tree_traversal_cost=
|
const double tree_traversal_cost=
|
||||||
@@ -14860,8 +14884,8 @@ void cost_group_min_max(TABLE* table, KEY *index_info, uint used_key_parts,
|
|||||||
log(static_cast<double>(keys_per_block))) *
|
log(static_cast<double>(keys_per_block))) *
|
||||||
1/(2*TIME_FOR_COMPARE);
|
1/(2*TIME_FOR_COMPARE);
|
||||||
|
|
||||||
const double cpu_cost= num_groups *
|
const double cpu_cost= (num_groups *
|
||||||
(tree_traversal_cost + 1/TIME_FOR_COMPARE_IDX);
|
(tree_traversal_cost + 1/TIME_FOR_COMPARE_IDX));
|
||||||
|
|
||||||
*read_cost= io_cost + cpu_cost;
|
*read_cost= io_cost + cpu_cost;
|
||||||
*records= num_groups;
|
*records= num_groups;
|
||||||
@@ -14910,7 +14934,8 @@ TRP_GROUP_MIN_MAX::make_quick(PARAM *param, bool retrieve_full_rows,
|
|||||||
group_prefix_len, group_key_parts,
|
group_prefix_len, group_key_parts,
|
||||||
used_key_parts, index_info, index,
|
used_key_parts, index_info, index,
|
||||||
read_cost, records, key_infix_len,
|
read_cost, records, key_infix_len,
|
||||||
key_infix, parent_alloc, is_index_scan);
|
key_infix, parent_alloc,
|
||||||
|
is_index_scan);
|
||||||
if (!quick)
|
if (!quick)
|
||||||
DBUG_RETURN(NULL);
|
DBUG_RETURN(NULL);
|
||||||
|
|
||||||
|
@@ -2507,12 +2507,6 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
|
|||||||
sjm->is_used= FALSE;
|
sjm->is_used= FALSE;
|
||||||
double subjoin_out_rows, subjoin_read_time;
|
double subjoin_out_rows, subjoin_read_time;
|
||||||
|
|
||||||
/*
|
|
||||||
join->get_partial_cost_and_fanout(n_tables + join->const_tables,
|
|
||||||
table_map(-1),
|
|
||||||
&subjoin_read_time,
|
|
||||||
&subjoin_out_rows);
|
|
||||||
*/
|
|
||||||
join->get_prefix_cost_and_fanout(n_tables,
|
join->get_prefix_cost_and_fanout(n_tables,
|
||||||
&subjoin_read_time,
|
&subjoin_read_time,
|
||||||
&subjoin_out_rows);
|
&subjoin_out_rows);
|
||||||
@@ -2520,11 +2514,6 @@ bool optimize_semijoin_nests(JOIN *join, table_map all_table_map)
|
|||||||
sjm->materialization_cost.convert_from_cost(subjoin_read_time);
|
sjm->materialization_cost.convert_from_cost(subjoin_read_time);
|
||||||
sjm->rows_with_duplicates= sjm->rows= subjoin_out_rows;
|
sjm->rows_with_duplicates= sjm->rows= subjoin_out_rows;
|
||||||
|
|
||||||
// Don't use the following list because it has "stale" items. use
|
|
||||||
// ref_pointer_array instead:
|
|
||||||
//
|
|
||||||
//List<Item> &right_expr_list=
|
|
||||||
// sj_nest->sj_subq_pred->unit->first_select()->item_list;
|
|
||||||
/*
|
/*
|
||||||
Adjust output cardinality estimates. If the subquery has form
|
Adjust output cardinality estimates. If the subquery has form
|
||||||
|
|
||||||
@@ -3432,8 +3421,8 @@ bool Firstmatch_picker::check_qep(JOIN *join,
|
|||||||
optimizer_flag(join->thd, OPTIMIZER_SWITCH_SEMIJOIN_WITH_CACHE))
|
optimizer_flag(join->thd, OPTIMIZER_SWITCH_SEMIJOIN_WITH_CACHE))
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
An important special case: only one inner table, and @@optimizer_switch
|
An important special case: only one inner table, and
|
||||||
allows join buffering.
|
@@optimizer_switch allows join buffering.
|
||||||
- read_time is the same (i.e. FirstMatch doesn't add any cost
|
- read_time is the same (i.e. FirstMatch doesn't add any cost
|
||||||
- remove fanout added by the last table
|
- remove fanout added by the last table
|
||||||
*/
|
*/
|
||||||
@@ -3584,7 +3573,6 @@ bool Duplicate_weedout_picker::check_qep(JOIN *join,
|
|||||||
records, and we will make
|
records, and we will make
|
||||||
- sj_outer_fanout table writes
|
- sj_outer_fanout table writes
|
||||||
- sj_inner_fanout*sj_outer_fanout lookups.
|
- sj_inner_fanout*sj_outer_fanout lookups.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
double one_lookup_cost= get_tmp_table_lookup_cost(join->thd,
|
double one_lookup_cost= get_tmp_table_lookup_cost(join->thd,
|
||||||
sj_outer_fanout,
|
sj_outer_fanout,
|
||||||
@@ -3661,31 +3649,35 @@ void restore_prev_sj_state(const table_map remaining_tables,
|
|||||||
{
|
{
|
||||||
TABLE_LIST *emb_sj_nest;
|
TABLE_LIST *emb_sj_nest;
|
||||||
|
|
||||||
if (tab->emb_sj_nest)
|
if ((emb_sj_nest= tab->emb_sj_nest))
|
||||||
{
|
{
|
||||||
table_map subq_tables= tab->emb_sj_nest->sj_inner_tables;
|
table_map subq_tables= emb_sj_nest->sj_inner_tables;
|
||||||
tab->join->sjm_lookup_tables &= ~subq_tables;
|
tab->join->sjm_lookup_tables &= ~subq_tables;
|
||||||
}
|
|
||||||
|
|
||||||
if (!tab->join->emb_sjm_nest && (emb_sj_nest= tab->emb_sj_nest))
|
if (!tab->join->emb_sjm_nest)
|
||||||
{
|
{
|
||||||
table_map subq_tables= emb_sj_nest->sj_inner_tables &
|
table_map subq_tables= (emb_sj_nest->sj_inner_tables &
|
||||||
~tab->join->const_table_map;
|
~tab->join->const_table_map);
|
||||||
/* If we're removing the last SJ-inner table, remove the sj-nest */
|
/* If we're removing the last SJ-inner table, remove the sj-nest */
|
||||||
if ((remaining_tables & subq_tables) == subq_tables)
|
if ((remaining_tables & subq_tables) == subq_tables)
|
||||||
{
|
{
|
||||||
// All non-const tables of the SJ nest are in the remaining_tables.
|
/*
|
||||||
// we are not in the nest anymore.
|
All non-const tables of the SJ nest are in the remaining_tables.
|
||||||
|
we are not in the nest anymore.
|
||||||
|
*/
|
||||||
tab->join->cur_sj_inner_tables &= ~emb_sj_nest->sj_inner_tables;
|
tab->join->cur_sj_inner_tables &= ~emb_sj_nest->sj_inner_tables;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Semi-join nest has:
|
/*
|
||||||
// - a table being removed (not in the prefix)
|
Semi-join nest has:
|
||||||
// - some tables in the prefix.
|
- a table being removed (not in the prefix)
|
||||||
|
- some tables in the prefix.
|
||||||
|
*/
|
||||||
tab->join->cur_sj_inner_tables |= emb_sj_nest->sj_inner_tables;
|
tab->join->cur_sj_inner_tables |= emb_sj_nest->sj_inner_tables;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef DBUG_OFF
|
#ifndef DBUG_OFF
|
||||||
/* positions[idx] has been removed. Verify the state for [0...idx-1] */
|
/* positions[idx] has been removed. Verify the state for [0...idx-1] */
|
||||||
@@ -6634,7 +6626,6 @@ bool JOIN::choose_subquery_plan(table_map join_tables)
|
|||||||
|
|
||||||
/* Get the cost of the modified IN-EXISTS plan. */
|
/* Get the cost of the modified IN-EXISTS plan. */
|
||||||
inner_read_time_2= inner_join->best_read;
|
inner_read_time_2= inner_join->best_read;
|
||||||
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@@ -568,10 +568,10 @@ void Opt_trace_stmt::set_allowed_mem_size(size_t mem_size)
|
|||||||
|
|
||||||
void Json_writer::add_table_name(const JOIN_TAB *tab)
|
void Json_writer::add_table_name(const JOIN_TAB *tab)
|
||||||
{
|
{
|
||||||
DBUG_ASSERT(tab->join->thd->trace_started());
|
|
||||||
if (tab != NULL)
|
|
||||||
{
|
|
||||||
char table_name_buffer[SAFE_NAME_LEN];
|
char table_name_buffer[SAFE_NAME_LEN];
|
||||||
|
DBUG_ASSERT(tab != NULL);
|
||||||
|
DBUG_ASSERT(tab->join->thd->trace_started());
|
||||||
|
|
||||||
if (tab->table && tab->table->derived_select_number)
|
if (tab->table && tab->table->derived_select_number)
|
||||||
{
|
{
|
||||||
/* Derived table name generation */
|
/* Derived table name generation */
|
||||||
@@ -594,9 +594,6 @@ void Json_writer::add_table_name(const JOIN_TAB *tab)
|
|||||||
TABLE_LIST *real_table= tab->table->pos_in_table_list;
|
TABLE_LIST *real_table= tab->table->pos_in_table_list;
|
||||||
add_str(real_table->alias.str, real_table->alias.length);
|
add_str(real_table->alias.str, real_table->alias.length);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
else
|
|
||||||
DBUG_ASSERT(0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void Json_writer::add_table_name(const TABLE *table)
|
void Json_writer::add_table_name(const TABLE *table)
|
||||||
|
@@ -39,7 +39,8 @@ double Range_rowid_filter_cost_info::lookup_cost(
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
@brief
|
@brief
|
||||||
The average gain in cost per row to use the range filter with this cost info
|
The average gain in cost per row to use the range filter with this cost
|
||||||
|
info
|
||||||
*/
|
*/
|
||||||
|
|
||||||
inline
|
inline
|
||||||
@@ -58,8 +59,9 @@ double Range_rowid_filter_cost_info::avg_access_and_eval_gain_per_row(
|
|||||||
@param access_cost_factor the adjusted cost of access a row
|
@param access_cost_factor the adjusted cost of access a row
|
||||||
|
|
||||||
@details
|
@details
|
||||||
The current code to estimate the cost of a ref access is quite inconsistent:
|
The current code to estimate the cost of a ref access is quite
|
||||||
in some cases the effect of page buffers is taken into account, for others
|
inconsistent:
|
||||||
|
In some cases the effect of page buffers is taken into account, for others
|
||||||
just the engine dependent read_time() is employed. That's why the average
|
just the engine dependent read_time() is employed. That's why the average
|
||||||
cost of one random seek might differ from 1.
|
cost of one random seek might differ from 1.
|
||||||
The parameter access_cost_factor can be considered as the cost of a random
|
The parameter access_cost_factor can be considered as the cost of a random
|
||||||
|
@@ -5956,7 +5956,7 @@ unit_common_op st_select_lex_unit::common_op()
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (operation != op)
|
if (operation != op)
|
||||||
operation= OP_MIX;
|
return OP_MIX;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -5966,12 +5966,13 @@ unit_common_op st_select_lex_unit::common_op()
|
|||||||
Save explain structures of a UNION. The only variable member is whether the
|
Save explain structures of a UNION. The only variable member is whether the
|
||||||
union has "Using filesort".
|
union has "Using filesort".
|
||||||
|
|
||||||
There is also save_union_explain_part2() function, which is called before we read
|
There is also save_union_explain_part2() function, which is called before we
|
||||||
UNION's output.
|
read UNION's output.
|
||||||
|
|
||||||
The reason for it is examples like this:
|
The reason for it is examples like this:
|
||||||
|
|
||||||
SELECT col1 FROM t1 UNION SELECT col2 FROM t2 ORDER BY (select ... from t3 ...)
|
SELECT col1 FROM t1 UNION SELECT col2 FROM t2
|
||||||
|
ORDER BY (select ... from t3 ...)
|
||||||
|
|
||||||
Here, the (select ... from t3 ...) subquery must be a child of UNION's
|
Here, the (select ... from t3 ...) subquery must be a child of UNION's
|
||||||
st_select_lex. However, it is not connected as child until a very late
|
st_select_lex. However, it is not connected as child until a very late
|
||||||
@@ -10191,7 +10192,7 @@ SELECT_LEX_UNIT *LEX::parsed_select_expr_cont(SELECT_LEX_UNIT *unit,
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
Add primary expression as the next term in a given query expression body
|
Add primary expression as the next term in a given query expression body
|
||||||
pruducing a new query expression body
|
producing a new query expression body
|
||||||
*/
|
*/
|
||||||
|
|
||||||
SELECT_LEX_UNIT *
|
SELECT_LEX_UNIT *
|
||||||
|
@@ -1990,9 +1990,8 @@ bool JOIN::make_range_rowid_filters()
|
|||||||
bool
|
bool
|
||||||
JOIN::init_range_rowid_filters()
|
JOIN::init_range_rowid_filters()
|
||||||
{
|
{
|
||||||
DBUG_ENTER("init_range_rowid_filters");
|
|
||||||
|
|
||||||
JOIN_TAB *tab;
|
JOIN_TAB *tab;
|
||||||
|
DBUG_ENTER("init_range_rowid_filters");
|
||||||
|
|
||||||
for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
|
for (tab= first_linear_tab(this, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
|
||||||
tab;
|
tab;
|
||||||
@@ -2248,7 +2247,8 @@ JOIN::optimize_inner()
|
|||||||
(see build_equal_items() below) because it can be not rebuilt
|
(see build_equal_items() below) because it can be not rebuilt
|
||||||
at second invocation.
|
at second invocation.
|
||||||
*/
|
*/
|
||||||
if (!thd->stmt_arena->is_conventional() && thd->mem_root != thd->stmt_arena->mem_root)
|
if (!thd->stmt_arena->is_conventional() &&
|
||||||
|
thd->mem_root != thd->stmt_arena->mem_root)
|
||||||
for (TABLE_LIST *tbl= tables_list; tbl; tbl= tbl->next_local)
|
for (TABLE_LIST *tbl= tables_list; tbl; tbl= tbl->next_local)
|
||||||
if (tbl->table && tbl->on_expr && tbl->table->versioned())
|
if (tbl->table && tbl->on_expr && tbl->table->versioned())
|
||||||
{
|
{
|
||||||
@@ -5318,6 +5318,7 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
|
|||||||
s->tab_list= tables;
|
s->tab_list= tables;
|
||||||
table->pos_in_table_list= tables;
|
table->pos_in_table_list= tables;
|
||||||
error= tables->fetch_number_of_rows();
|
error= tables->fetch_number_of_rows();
|
||||||
|
/* Calculate table->use_stat_records */
|
||||||
set_statistics_for_table(join->thd, table);
|
set_statistics_for_table(join->thd, table);
|
||||||
bitmap_clear_all(&table->cond_set);
|
bitmap_clear_all(&table->cond_set);
|
||||||
|
|
||||||
@@ -5987,7 +5988,6 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
|
|||||||
DBUG_RETURN(TRUE); /* purecov: inspected */
|
DBUG_RETURN(TRUE); /* purecov: inspected */
|
||||||
|
|
||||||
{
|
{
|
||||||
double records= 1;
|
|
||||||
SELECT_LEX_UNIT *unit= join->select_lex->master_unit();
|
SELECT_LEX_UNIT *unit= join->select_lex->master_unit();
|
||||||
|
|
||||||
/* Find an optimal join order of the non-constant tables. */
|
/* Find an optimal join order of the non-constant tables. */
|
||||||
@@ -6018,10 +6018,12 @@ make_join_statistics(JOIN *join, List<TABLE_LIST> &tables_list,
|
|||||||
Calculate estimated number of rows for materialized derived
|
Calculate estimated number of rows for materialized derived
|
||||||
table/view.
|
table/view.
|
||||||
*/
|
*/
|
||||||
|
double records= 1.0;
|
||||||
|
ha_rows rows;
|
||||||
for (i= 0; i < join->table_count ; i++)
|
for (i= 0; i < join->table_count ; i++)
|
||||||
if (double rr= join->best_positions[i].records_read)
|
if (double rr= join->best_positions[i].records_read)
|
||||||
records= COST_MULT(records, rr);
|
records= COST_MULT(records, rr);
|
||||||
ha_rows rows= records > (double) HA_ROWS_MAX ? HA_ROWS_MAX : (ha_rows) records;
|
rows= records > (double) HA_ROWS_MAX ? HA_ROWS_MAX : (ha_rows) records;
|
||||||
set_if_smaller(rows, unit->lim.get_select_limit());
|
set_if_smaller(rows, unit->lim.get_select_limit());
|
||||||
join->select_lex->increase_derived_records(rows);
|
join->select_lex->increase_derived_records(rows);
|
||||||
}
|
}
|
||||||
@@ -7690,15 +7692,25 @@ void set_position(JOIN *join,uint idx,JOIN_TAB *table,KEYUSE *key)
|
|||||||
Estimate how many records we will get if we read just this table and apply
|
Estimate how many records we will get if we read just this table and apply
|
||||||
a part of WHERE that can be checked for it.
|
a part of WHERE that can be checked for it.
|
||||||
|
|
||||||
@detail
|
@param s Current JOIN_TAB
|
||||||
|
@param with_found_constraint There is a filtering condition on the
|
||||||
|
current table. See more below
|
||||||
|
@param use_cond_selectivity Value of optimizer_use_condition_selectivity.
|
||||||
|
If > 1 then use table->cond_selecitivity.
|
||||||
|
|
||||||
|
@return 0.0 No matching rows
|
||||||
|
@return >= 1.0 Number of expected matching rows
|
||||||
|
|
||||||
|
@details
|
||||||
Estimate how many records we will get if we
|
Estimate how many records we will get if we
|
||||||
- read the given table with its "independent" access method (either quick
|
- read the given table with its "independent" access method (either quick
|
||||||
select or full table/index scan),
|
select or full table/index scan),
|
||||||
- apply the part of WHERE that refers only to this table.
|
- apply the part of WHERE that refers only to this table.
|
||||||
|
- The result cannot be bigger than table records
|
||||||
|
|
||||||
@seealso
|
@see also
|
||||||
table_cond_selectivity() produces selectivity of condition that is checked
|
table_after_join_selectivity() produces selectivity of condition that is
|
||||||
after joining rows from this table to rows from preceding tables.
|
checked after joining rows from this table to rows from preceding tables.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
inline
|
inline
|
||||||
@@ -7804,7 +7816,7 @@ double adjust_quick_cost(double quick_cost, ha_rows records)
|
|||||||
|
|
||||||
The function finds the best access path to table 's' from the passed
|
The function finds the best access path to table 's' from the passed
|
||||||
partial plan where an access path is the general term for any means to
|
partial plan where an access path is the general term for any means to
|
||||||
access the data in 's'. An access path may use either an index or a scan,
|
cacess the data in 's'. An access path may use either an index or a scan,
|
||||||
whichever is cheaper. The input partial plan is passed via the array
|
whichever is cheaper. The input partial plan is passed via the array
|
||||||
'join->positions' of length 'idx'. The chosen access method for 's' and its
|
'join->positions' of length 'idx'. The chosen access method for 's' and its
|
||||||
cost are stored in 'join->positions[idx]'.
|
cost are stored in 'join->positions[idx]'.
|
||||||
@@ -7838,14 +7850,16 @@ best_access_path(JOIN *join,
|
|||||||
POSITION *loose_scan_pos)
|
POSITION *loose_scan_pos)
|
||||||
{
|
{
|
||||||
THD *thd= join->thd;
|
THD *thd= join->thd;
|
||||||
uint use_cond_selectivity= thd->variables.optimizer_use_condition_selectivity;
|
uint use_cond_selectivity=
|
||||||
|
thd->variables.optimizer_use_condition_selectivity;
|
||||||
KEYUSE *best_key= 0;
|
KEYUSE *best_key= 0;
|
||||||
uint best_max_key_part= 0;
|
uint best_max_key_part= 0;
|
||||||
my_bool found_constraint= 0;
|
my_bool found_constraint= 0;
|
||||||
double best= DBL_MAX;
|
double best= DBL_MAX;
|
||||||
double best_time= DBL_MAX;
|
double best_time= DBL_MAX;
|
||||||
double records= DBL_MAX;
|
double records= DBL_MAX;
|
||||||
ha_rows records_for_key= 0;
|
ha_rows records_for_key;
|
||||||
|
double best_filter_cmp_gain;
|
||||||
table_map best_ref_depends_map= 0;
|
table_map best_ref_depends_map= 0;
|
||||||
/*
|
/*
|
||||||
key_dependent is 0 if all key parts could be used or if there was an
|
key_dependent is 0 if all key parts could be used or if there was an
|
||||||
@@ -7947,7 +7961,7 @@ best_access_path(JOIN *join,
|
|||||||
do /* For each way to access the keypart */
|
do /* For each way to access the keypart */
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
if 1. expression doesn't refer to forward tables
|
If 1. expression does not refer to forward tables
|
||||||
2. we won't get two ref-or-null's
|
2. we won't get two ref-or-null's
|
||||||
*/
|
*/
|
||||||
all_parts|= keyuse->keypart_map;
|
all_parts|= keyuse->keypart_map;
|
||||||
@@ -7970,7 +7984,8 @@ best_access_path(JOIN *join,
|
|||||||
(found_ref | keyuse->used_tables));
|
(found_ref | keyuse->used_tables));
|
||||||
if (tmp2 < best_prev_record_reads)
|
if (tmp2 < best_prev_record_reads)
|
||||||
{
|
{
|
||||||
best_part_found_ref= keyuse->used_tables & ~join->const_table_map;
|
best_part_found_ref= (keyuse->used_tables &
|
||||||
|
~join->const_table_map);
|
||||||
best_prev_record_reads= tmp2;
|
best_prev_record_reads= tmp2;
|
||||||
}
|
}
|
||||||
if (rec > keyuse->ref_table_rows)
|
if (rec > keyuse->ref_table_rows)
|
||||||
@@ -8243,7 +8258,8 @@ best_access_path(JOIN *join,
|
|||||||
if (!found_ref && // (1)
|
if (!found_ref && // (1)
|
||||||
records < rows) // (3)
|
records < rows) // (3)
|
||||||
{
|
{
|
||||||
trace_access_idx.add("used_range_estimates", "clipped up");
|
trace_access_idx.add("used_range_estimates",
|
||||||
|
"clipped up");
|
||||||
records= rows;
|
records= rows;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -8483,6 +8499,7 @@ best_access_path(JOIN *join,
|
|||||||
join->allowed_outer_join_with_cache)) // (2)
|
join->allowed_outer_join_with_cache)) // (2)
|
||||||
{
|
{
|
||||||
double join_sel= 0.1;
|
double join_sel= 0.1;
|
||||||
|
double refills;
|
||||||
/* Estimate the cost of the hash join access to the table */
|
/* Estimate the cost of the hash join access to the table */
|
||||||
double rnd_records= matching_candidates_in_table(s, found_constraint,
|
double rnd_records= matching_candidates_in_table(s, found_constraint,
|
||||||
use_cond_selectivity);
|
use_cond_selectivity);
|
||||||
@@ -8492,8 +8509,7 @@ best_access_path(JOIN *join,
|
|||||||
tmp= COST_ADD(tmp, cmp_time);
|
tmp= COST_ADD(tmp, cmp_time);
|
||||||
|
|
||||||
/* We read the table as many times as join buffer becomes full. */
|
/* We read the table as many times as join buffer becomes full. */
|
||||||
|
refills= (1.0 + floor((double) cache_record_length(join,idx) *
|
||||||
double refills= (1.0 + floor((double) cache_record_length(join,idx) *
|
|
||||||
record_count /
|
record_count /
|
||||||
(double) thd->variables.join_buff_size));
|
(double) thd->variables.join_buff_size));
|
||||||
tmp= COST_MULT(tmp, refills);
|
tmp= COST_MULT(tmp, refills);
|
||||||
@@ -8578,6 +8594,10 @@ best_access_path(JOIN *join,
|
|||||||
For each record we:
|
For each record we:
|
||||||
- read record range through 'quick'
|
- read record range through 'quick'
|
||||||
- skip rows which does not satisfy WHERE constraints
|
- skip rows which does not satisfy WHERE constraints
|
||||||
|
|
||||||
|
Note that s->quick->read_time includes the cost of comparing
|
||||||
|
the row with the where clause (TIME_FOR_COMPARE)
|
||||||
|
|
||||||
TODO:
|
TODO:
|
||||||
We take into account possible use of join cache for ALL/index
|
We take into account possible use of join cache for ALL/index
|
||||||
access (see first else-branch below), but we don't take it into
|
access (see first else-branch below), but we don't take it into
|
||||||
@@ -8638,9 +8658,13 @@ best_access_path(JOIN *join,
|
|||||||
if ((s->table->map & join->outer_join) || disable_jbuf) // Can't use join cache
|
if ((s->table->map & join->outer_join) || disable_jbuf) // Can't use join cache
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
|
Simple scan
|
||||||
|
|
||||||
For each record we have to:
|
For each record we have to:
|
||||||
- read the whole table record
|
- Read the whole table record
|
||||||
- skip rows which does not satisfy join condition
|
- Compare with the current where clause with only fields for the table
|
||||||
|
- Compare with the full where and skip rows which does not satisfy
|
||||||
|
the join condition
|
||||||
*/
|
*/
|
||||||
double cmp_time= (s->records - rnd_records)/TIME_FOR_COMPARE;
|
double cmp_time= (s->records - rnd_records)/TIME_FOR_COMPARE;
|
||||||
tmp= COST_MULT(record_count, COST_ADD(tmp,cmp_time));
|
tmp= COST_MULT(record_count, COST_ADD(tmp,cmp_time));
|
||||||
@@ -8678,9 +8702,9 @@ best_access_path(JOIN *join,
|
|||||||
tmp give us total cost of using TABLE SCAN
|
tmp give us total cost of using TABLE SCAN
|
||||||
*/
|
*/
|
||||||
|
|
||||||
const double best_filter_cmp_gain= best_filter
|
best_filter_cmp_gain= (best_filter ?
|
||||||
? best_filter->get_cmp_gain(record_count * records)
|
best_filter->get_cmp_gain(record_count * records) :
|
||||||
: 0;
|
0);
|
||||||
trace_access_scan.add("resulting_rows", rnd_records);
|
trace_access_scan.add("resulting_rows", rnd_records);
|
||||||
trace_access_scan.add("cost", tmp);
|
trace_access_scan.add("cost", tmp);
|
||||||
|
|
||||||
@@ -8792,6 +8816,7 @@ static void choose_initial_table_order(JOIN *join)
|
|||||||
JOIN_TAB **tab= join->best_ref + join->const_tables;
|
JOIN_TAB **tab= join->best_ref + join->const_tables;
|
||||||
JOIN_TAB **tabs_end= tab + join->table_count - join->const_tables;
|
JOIN_TAB **tabs_end= tab + join->table_count - join->const_tables;
|
||||||
DBUG_ENTER("choose_initial_table_order");
|
DBUG_ENTER("choose_initial_table_order");
|
||||||
|
|
||||||
/* Find where the top-level JOIN_TABs end and subquery JOIN_TABs start */
|
/* Find where the top-level JOIN_TABs end and subquery JOIN_TABs start */
|
||||||
for (; tab != tabs_end; tab++)
|
for (; tab != tabs_end; tab++)
|
||||||
{
|
{
|
||||||
@@ -8910,8 +8935,8 @@ choose_plan(JOIN *join, table_map join_tables)
|
|||||||
reorder tables so dependent tables come after tables they depend
|
reorder tables so dependent tables come after tables they depend
|
||||||
on, otherwise keep tables in the order they were specified in the query
|
on, otherwise keep tables in the order they were specified in the query
|
||||||
else
|
else
|
||||||
Apply heuristic: pre-sort all access plans with respect to the number of
|
Apply heuristic: pre-sort all access plans with respect to the number
|
||||||
records accessed.
|
of records accessed.
|
||||||
*/
|
*/
|
||||||
jtab_sort_func= straight_join ? join_tab_cmp_straight : join_tab_cmp;
|
jtab_sort_func= straight_join ? join_tab_cmp_straight : join_tab_cmp;
|
||||||
}
|
}
|
||||||
@@ -9992,7 +10017,7 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
|
|||||||
Selectivity of COND(table) is already accounted for in
|
Selectivity of COND(table) is already accounted for in
|
||||||
matching_candidates_in_table().
|
matching_candidates_in_table().
|
||||||
*/
|
*/
|
||||||
sel= 1;
|
sel= 1.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@@ -10015,7 +10040,8 @@ double table_cond_selectivity(JOIN *join, uint idx, JOIN_TAB *s,
|
|||||||
next_field != field;
|
next_field != field;
|
||||||
next_field= next_field->next_equal_field)
|
next_field= next_field->next_equal_field)
|
||||||
{
|
{
|
||||||
if (!(next_field->table->map & rem_tables) && next_field->table != table)
|
if (!(next_field->table->map & rem_tables) &&
|
||||||
|
next_field->table != table)
|
||||||
{
|
{
|
||||||
if (field->cond_selectivity > 0)
|
if (field->cond_selectivity > 0)
|
||||||
{
|
{
|
||||||
@@ -10329,7 +10355,6 @@ best_extension_by_limited_search(JOIN *join,
|
|||||||
SORT_POSITION *sort= (SORT_POSITION*) alloca(sizeof(SORT_POSITION)*tables_left);
|
SORT_POSITION *sort= (SORT_POSITION*) alloca(sizeof(SORT_POSITION)*tables_left);
|
||||||
SORT_POSITION *sort_end;
|
SORT_POSITION *sort_end;
|
||||||
DBUG_ENTER("best_extension_by_limited_search");
|
DBUG_ENTER("best_extension_by_limited_search");
|
||||||
|
|
||||||
DBUG_EXECUTE_IF("show_explain_probe_best_ext_lim_search",
|
DBUG_EXECUTE_IF("show_explain_probe_best_ext_lim_search",
|
||||||
if (dbug_user_var_equals_int(thd,
|
if (dbug_user_var_equals_int(thd,
|
||||||
"show_explain_probe_select_id",
|
"show_explain_probe_select_id",
|
||||||
@@ -10434,6 +10459,8 @@ best_extension_by_limited_search(JOIN *join,
|
|||||||
double current_record_count, current_read_time;
|
double current_record_count, current_read_time;
|
||||||
double partial_join_cardinality;
|
double partial_join_cardinality;
|
||||||
POSITION *position= join->positions + idx, *loose_scan_pos;
|
POSITION *position= join->positions + idx, *loose_scan_pos;
|
||||||
|
double filter_cmp_gain;
|
||||||
|
double pushdown_cond_selectivity;
|
||||||
Json_writer_object trace_one_table(thd);
|
Json_writer_object trace_one_table(thd);
|
||||||
|
|
||||||
if (unlikely(thd->trace_started()))
|
if (unlikely(thd->trace_started()))
|
||||||
@@ -10448,9 +10475,9 @@ best_extension_by_limited_search(JOIN *join,
|
|||||||
|
|
||||||
/* Compute the cost of the new plan extended with 's' */
|
/* Compute the cost of the new plan extended with 's' */
|
||||||
current_record_count= COST_MULT(record_count, position->records_read);
|
current_record_count= COST_MULT(record_count, position->records_read);
|
||||||
const double filter_cmp_gain= position->range_rowid_filter_info
|
filter_cmp_gain= position->range_rowid_filter_info ?
|
||||||
? position->range_rowid_filter_info->get_cmp_gain(current_record_count)
|
position->range_rowid_filter_info->get_cmp_gain(current_record_count) :
|
||||||
: 0;
|
0;
|
||||||
current_read_time= COST_ADD(read_time,
|
current_read_time= COST_ADD(read_time,
|
||||||
COST_ADD(position->read_time -
|
COST_ADD(position->read_time -
|
||||||
filter_cmp_gain,
|
filter_cmp_gain,
|
||||||
@@ -10574,7 +10601,7 @@ best_extension_by_limited_search(JOIN *join,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
double pushdown_cond_selectivity= 1.0;
|
pushdown_cond_selectivity= 1.0;
|
||||||
if (use_cond_selectivity > 1)
|
if (use_cond_selectivity > 1)
|
||||||
pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
|
pushdown_cond_selectivity= table_cond_selectivity(join, idx, s,
|
||||||
remaining_tables &
|
remaining_tables &
|
||||||
@@ -11397,14 +11424,15 @@ bool JOIN::get_best_combination()
|
|||||||
j->table= NULL; //temporary way to tell SJM tables from others.
|
j->table= NULL; //temporary way to tell SJM tables from others.
|
||||||
j->ref.key = -1;
|
j->ref.key = -1;
|
||||||
j->on_expr_ref= (Item**) &null_ptr;
|
j->on_expr_ref= (Item**) &null_ptr;
|
||||||
j->keys= key_map(1); /* The unique index is always in 'possible keys' in EXPLAIN */
|
/* The unique index is always in 'possible keys' in EXPLAIN */
|
||||||
|
j->keys= key_map(1);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
2. Proceed with processing SJM nest's join tabs, putting them into the
|
2. Proceed with processing SJM nest's join tabs, putting them into the
|
||||||
sub-order
|
sub-order
|
||||||
*/
|
*/
|
||||||
SJ_MATERIALIZATION_INFO *sjm= cur_pos->table->emb_sj_nest->sj_mat_info;
|
SJ_MATERIALIZATION_INFO *sjm= cur_pos->table->emb_sj_nest->sj_mat_info;
|
||||||
j->records_read= (sjm->is_sj_scan? sjm->rows : 1);
|
j->records_read= (sjm->is_sj_scan? sjm->rows : 1.0);
|
||||||
j->records= (ha_rows) j->records_read;
|
j->records= (ha_rows) j->records_read;
|
||||||
j->cond_selectivity= 1.0;
|
j->cond_selectivity= 1.0;
|
||||||
JOIN_TAB *jt;
|
JOIN_TAB *jt;
|
||||||
@@ -11447,18 +11475,12 @@ bool JOIN::get_best_combination()
|
|||||||
full_join= 1;
|
full_join= 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*if (best_positions[tablenr].sj_strategy == SJ_OPT_LOOSE_SCAN)
|
|
||||||
{
|
|
||||||
DBUG_ASSERT(!keyuse || keyuse->key ==
|
|
||||||
best_positions[tablenr].loosescan_picker.loosescan_key);
|
|
||||||
j->index= best_positions[tablenr].loosescan_picker.loosescan_key;
|
|
||||||
}*/
|
|
||||||
|
|
||||||
if ((j->type == JT_REF || j->type == JT_EQ_REF) &&
|
if ((j->type == JT_REF || j->type == JT_EQ_REF) &&
|
||||||
is_hash_join_key_no(j->ref.key))
|
is_hash_join_key_no(j->ref.key))
|
||||||
hash_join= TRUE;
|
hash_join= TRUE;
|
||||||
|
|
||||||
j->range_rowid_filter_info= best_positions[tablenr].range_rowid_filter_info;
|
j->range_rowid_filter_info=
|
||||||
|
best_positions[tablenr].range_rowid_filter_info;
|
||||||
|
|
||||||
loop_end:
|
loop_end:
|
||||||
/*
|
/*
|
||||||
@@ -12583,7 +12605,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
|
|||||||
tab->type == JT_EQ_REF || first_inner_tab)
|
tab->type == JT_EQ_REF || first_inner_tab)
|
||||||
{
|
{
|
||||||
DBUG_EXECUTE("where",print_where(tmp,
|
DBUG_EXECUTE("where",print_where(tmp,
|
||||||
tab->table? tab->table->alias.c_ptr() :"sjm-nest",
|
tab->table ?
|
||||||
|
tab->table->alias.c_ptr() :"sjm-nest",
|
||||||
QT_ORDINARY););
|
QT_ORDINARY););
|
||||||
SQL_SELECT *sel= tab->select= ((SQL_SELECT*)
|
SQL_SELECT *sel= tab->select= ((SQL_SELECT*)
|
||||||
thd->memdup((uchar*) select,
|
thd->memdup((uchar*) select,
|
||||||
@@ -14104,10 +14127,12 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
|
|||||||
|
|
||||||
if (tab->bush_root_tab && tab->bush_root_tab->bush_children->start == tab)
|
if (tab->bush_root_tab && tab->bush_root_tab->bush_children->start == tab)
|
||||||
prev_tab= NULL;
|
prev_tab= NULL;
|
||||||
DBUG_ASSERT(tab->bush_children || tab->table == join->best_positions[i].table->table);
|
DBUG_ASSERT(tab->bush_children ||
|
||||||
|
tab->table == join->best_positions[i].table->table);
|
||||||
|
|
||||||
tab->partial_join_cardinality= join->best_positions[i].records_read *
|
tab->partial_join_cardinality= join->best_positions[i].records_read *
|
||||||
(prev_tab? prev_tab->partial_join_cardinality : 1);
|
(prev_tab ?
|
||||||
|
prev_tab->partial_join_cardinality : 1);
|
||||||
if (!tab->bush_children)
|
if (!tab->bush_children)
|
||||||
i++;
|
i++;
|
||||||
}
|
}
|
||||||
@@ -14115,7 +14140,8 @@ make_join_readinfo(JOIN *join, ulonglong options, uint no_jbuf_after)
|
|||||||
check_join_cache_usage_for_tables(join, options, no_jbuf_after);
|
check_join_cache_usage_for_tables(join, options, no_jbuf_after);
|
||||||
|
|
||||||
JOIN_TAB *first_tab;
|
JOIN_TAB *first_tab;
|
||||||
for (tab= first_tab= first_linear_tab(join, WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
|
for (tab= first_tab= first_linear_tab(join,
|
||||||
|
WITH_BUSH_ROOTS, WITHOUT_CONST_TABLES);
|
||||||
tab;
|
tab;
|
||||||
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
|
tab= next_linear_tab(join, tab, WITH_BUSH_ROOTS))
|
||||||
{
|
{
|
||||||
@@ -17887,7 +17913,8 @@ static bool check_interleaving_with_nj(JOIN_TAB *next_tab)
|
|||||||
Do update counters for "pairs of brackets" that we've left (marked as
|
Do update counters for "pairs of brackets" that we've left (marked as
|
||||||
X,Y,Z in the above picture)
|
X,Y,Z in the above picture)
|
||||||
*/
|
*/
|
||||||
for (;next_emb && next_emb != join->emb_sjm_nest; next_emb= next_emb->embedding)
|
for (;next_emb && next_emb != join->emb_sjm_nest;
|
||||||
|
next_emb= next_emb->embedding)
|
||||||
{
|
{
|
||||||
if (!next_emb->sj_on_expr)
|
if (!next_emb->sj_on_expr)
|
||||||
{
|
{
|
||||||
@@ -17896,8 +17923,8 @@ static bool check_interleaving_with_nj(JOIN_TAB *next_tab)
|
|||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
next_emb is the first table inside a nested join we've "entered". In
|
next_emb is the first table inside a nested join we've "entered". In
|
||||||
the picture above, we're looking at the 'X' bracket. Don't exit yet as
|
the picture above, we're looking at the 'X' bracket. Don't exit yet
|
||||||
X bracket might have Y pair bracket.
|
as X bracket might have Y pair bracket.
|
||||||
*/
|
*/
|
||||||
join->cur_embedding_map |= next_emb->nested_join->nj_map;
|
join->cur_embedding_map |= next_emb->nested_join->nj_map;
|
||||||
}
|
}
|
||||||
@@ -19911,7 +19938,7 @@ bool Create_tmp_table::finalize(THD *thd,
|
|||||||
bool save_abort_on_warning;
|
bool save_abort_on_warning;
|
||||||
uchar *pos;
|
uchar *pos;
|
||||||
uchar *null_flags;
|
uchar *null_flags;
|
||||||
KEY *keyinfo;
|
KEY *keyinfo= param->keyinfo;
|
||||||
TMP_ENGINE_COLUMNDEF *recinfo;
|
TMP_ENGINE_COLUMNDEF *recinfo;
|
||||||
TABLE_SHARE *share= table->s;
|
TABLE_SHARE *share= table->s;
|
||||||
Copy_field *copy= param->copy_field;
|
Copy_field *copy= param->copy_field;
|
||||||
@@ -20103,8 +20130,6 @@ bool Create_tmp_table::finalize(THD *thd,
|
|||||||
set_if_smaller(share->max_rows, m_rows_limit);
|
set_if_smaller(share->max_rows, m_rows_limit);
|
||||||
param->end_write_records= m_rows_limit;
|
param->end_write_records= m_rows_limit;
|
||||||
|
|
||||||
keyinfo= param->keyinfo;
|
|
||||||
|
|
||||||
if (m_group)
|
if (m_group)
|
||||||
{
|
{
|
||||||
DBUG_PRINT("info",("Creating group key in temporary table"));
|
DBUG_PRINT("info",("Creating group key in temporary table"));
|
||||||
@@ -20118,7 +20143,8 @@ bool Create_tmp_table::finalize(THD *thd,
|
|||||||
keyinfo->key_part= m_key_part_info;
|
keyinfo->key_part= m_key_part_info;
|
||||||
keyinfo->flags=HA_NOSAME | HA_BINARY_PACK_KEY | HA_PACK_KEY;
|
keyinfo->flags=HA_NOSAME | HA_BINARY_PACK_KEY | HA_PACK_KEY;
|
||||||
keyinfo->ext_key_flags= keyinfo->flags;
|
keyinfo->ext_key_flags= keyinfo->flags;
|
||||||
keyinfo->usable_key_parts=keyinfo->user_defined_key_parts= param->group_parts;
|
keyinfo->usable_key_parts=keyinfo->user_defined_key_parts=
|
||||||
|
param->group_parts;
|
||||||
keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
|
keyinfo->ext_key_parts= keyinfo->user_defined_key_parts;
|
||||||
keyinfo->key_length=0;
|
keyinfo->key_length=0;
|
||||||
keyinfo->rec_per_key=NULL;
|
keyinfo->rec_per_key=NULL;
|
||||||
@@ -23748,10 +23774,10 @@ bool test_if_ref(Item *root_cond, Item_field *left_item,Item *right_item)
|
|||||||
|
|
||||||
@param cond Condition to analyze
|
@param cond Condition to analyze
|
||||||
@param tables Tables for which "current field values" are available
|
@param tables Tables for which "current field values" are available
|
||||||
@param used_table Table that we're extracting the condition for
|
Tables for which "current field values" are available (this
|
||||||
tables Tables for which "current field values" are available (this
|
|
||||||
includes used_table)
|
includes used_table)
|
||||||
(may also include PSEUDO_TABLE_BITS, and may be zero)
|
(may also include PSEUDO_TABLE_BITS, and may be zero)
|
||||||
|
@param used_table Table that we're extracting the condition for
|
||||||
@param join_tab_idx_arg
|
@param join_tab_idx_arg
|
||||||
The index of the JOIN_TAB this Item is being extracted
|
The index of the JOIN_TAB this Item is being extracted
|
||||||
for. MAX_TABLES if there is no corresponding JOIN_TAB.
|
for. MAX_TABLES if there is no corresponding JOIN_TAB.
|
||||||
@@ -27788,7 +27814,7 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
|
|||||||
table_map prefix_tables,
|
table_map prefix_tables,
|
||||||
bool distinct_arg, JOIN_TAB *first_top_tab)
|
bool distinct_arg, JOIN_TAB *first_top_tab)
|
||||||
{
|
{
|
||||||
int quick_type;
|
int quick_type= -1;
|
||||||
CHARSET_INFO *cs= system_charset_info;
|
CHARSET_INFO *cs= system_charset_info;
|
||||||
THD *thd= join->thd;
|
THD *thd= join->thd;
|
||||||
TABLE_LIST *table_list= table->pos_in_table_list;
|
TABLE_LIST *table_list= table->pos_in_table_list;
|
||||||
@@ -27797,7 +27823,6 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta,
|
|||||||
char table_name_buffer[SAFE_NAME_LEN];
|
char table_name_buffer[SAFE_NAME_LEN];
|
||||||
KEY *key_info= 0;
|
KEY *key_info= 0;
|
||||||
uint key_len= 0;
|
uint key_len= 0;
|
||||||
quick_type= -1;
|
|
||||||
|
|
||||||
explain_plan= eta;
|
explain_plan= eta;
|
||||||
eta->key.clear();
|
eta->key.clear();
|
||||||
@@ -29351,8 +29376,8 @@ void JOIN::restore_query_plan(Join_plan_state *restore_from)
|
|||||||
|
|
||||||
@param added_where An extra conjunct to the WHERE clause to reoptimize with
|
@param added_where An extra conjunct to the WHERE clause to reoptimize with
|
||||||
@param join_tables The set of tables to reoptimize
|
@param join_tables The set of tables to reoptimize
|
||||||
@param save_to If != NULL, save here the state of the current query plan,
|
@param save_to If != NULL, save here the state of the current query
|
||||||
otherwise reuse the existing query plan structures.
|
plan, otherwise reuse the existing query plan structures.
|
||||||
|
|
||||||
@notes
|
@notes
|
||||||
Given a query plan that was already optimized taking into account some WHERE
|
Given a query plan that was already optimized taking into account some WHERE
|
||||||
@@ -29369,7 +29394,8 @@ void JOIN::restore_query_plan(Join_plan_state *restore_from)
|
|||||||
|
|
||||||
@retval REOPT_NEW_PLAN there is a new plan.
|
@retval REOPT_NEW_PLAN there is a new plan.
|
||||||
@retval REOPT_OLD_PLAN no new improved plan was produced, use the old one.
|
@retval REOPT_OLD_PLAN no new improved plan was produced, use the old one.
|
||||||
@retval REOPT_ERROR an irrecovarable error occurred during reoptimization.
|
@retval REOPT_ERROR an irrecovarable error occurred during
|
||||||
|
reoptimization.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
JOIN::enum_reopt_result
|
JOIN::enum_reopt_result
|
||||||
@@ -29381,8 +29407,8 @@ JOIN::reoptimize(Item *added_where, table_map join_tables,
|
|||||||
size_t org_keyuse_elements;
|
size_t org_keyuse_elements;
|
||||||
|
|
||||||
/* Re-run the REF optimizer to take into account the new conditions. */
|
/* Re-run the REF optimizer to take into account the new conditions. */
|
||||||
if (update_ref_and_keys(thd, &added_keyuse, join_tab, table_count, added_where,
|
if (update_ref_and_keys(thd, &added_keyuse, join_tab, table_count,
|
||||||
~outer_join, select_lex, &sargables))
|
added_where, ~outer_join, select_lex, &sargables))
|
||||||
{
|
{
|
||||||
delete_dynamic(&added_keyuse);
|
delete_dynamic(&added_keyuse);
|
||||||
return REOPT_ERROR;
|
return REOPT_ERROR;
|
||||||
@@ -30062,7 +30088,7 @@ uint get_index_for_order(ORDER *order, TABLE *table, SQL_SELECT *select,
|
|||||||
if (select && select->quick)
|
if (select && select->quick)
|
||||||
return select->quick->index; // index or MAX_KEY, use quick select as is
|
return select->quick->index; // index or MAX_KEY, use quick select as is
|
||||||
else
|
else
|
||||||
return table->file->key_used_on_scan; // MAX_KEY or index for some engines
|
return table->file->key_used_on_scan; // MAX_KEY or index for some engine
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!is_simple_order(order)) // just to cut further expensive checks
|
if (!is_simple_order(order)) // just to cut further expensive checks
|
||||||
@@ -30110,11 +30136,13 @@ uint get_index_for_order(ORDER *order, TABLE *table, SQL_SELECT *select,
|
|||||||
DBUG_ASSERT(0);
|
DBUG_ASSERT(0);
|
||||||
}
|
}
|
||||||
else if (limit != HA_POS_ERROR)
|
else if (limit != HA_POS_ERROR)
|
||||||
{ // check if some index scan & LIMIT is more efficient than filesort
|
{
|
||||||
|
// check if some index scan & LIMIT is more efficient than filesort
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Update opt_range_condition_rows since single table UPDATE/DELETE procedures
|
Update opt_range_condition_rows since single table UPDATE/DELETE
|
||||||
don't call make_join_statistics() and leave this variable uninitialized.
|
procedures don't call make_join_statistics() and leave this
|
||||||
|
variable uninitialized.
|
||||||
*/
|
*/
|
||||||
table->opt_range_condition_rows= table->stat_records();
|
table->opt_range_condition_rows= table->stat_records();
|
||||||
|
|
||||||
@@ -30692,20 +30720,22 @@ bool build_notnull_conds_for_range_scans(JOIN *join, Item *cond,
|
|||||||
@brief
|
@brief
|
||||||
Build not null conditions for inner nest tables of an outer join
|
Build not null conditions for inner nest tables of an outer join
|
||||||
|
|
||||||
@param join the join for whose table nest not null conditions are to be built
|
@param join the join for whose table nest not null conditions are to be
|
||||||
|
built
|
||||||
@param nest_tbl the nest of the inner tables of an outer join
|
@param nest_tbl the nest of the inner tables of an outer join
|
||||||
|
|
||||||
@details
|
@details
|
||||||
The function assumes that nest_tbl is the nest of the inner tables of an
|
The function assumes that nest_tbl is the nest of the inner tables
|
||||||
outer join and so an ON expression for this outer join is attached to
|
of an outer join and so an ON expression for this outer join is
|
||||||
nest_tbl.
|
attached to nest_tbl.
|
||||||
The function selects the tables of the nest_tbl that are not inner tables of
|
The function selects the tables of the nest_tbl that are not inner
|
||||||
embedded outer joins and then it calls build_notnull_conds_for_range_scans()
|
tables of embedded outer joins and then it calls
|
||||||
for nest_tbl->on_expr and the bitmap for the selected tables. This call
|
build_notnull_conds_for_range_scans() for nest_tbl->on_expr and
|
||||||
finds all fields belonging to the selected tables whose null-rejectedness
|
the bitmap for the selected tables. This call finds all fields
|
||||||
can be inferred from the null-rejectedness of nest_tbl->on_expr. After this
|
belonging to the selected tables whose null-rejectedness can be
|
||||||
the function recursively finds all null_rejected fields for the remaining
|
inferred from the null-rejectedness of nest_tbl->on_expr. After
|
||||||
tables from the nest of nest_tbl.
|
this the function recursively finds all null_rejected fields for
|
||||||
|
the remaining tables from the nest of nest_tbl.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
static
|
static
|
||||||
|
@@ -1343,7 +1343,9 @@ public:
|
|||||||
int dbug_join_tab_array_size;
|
int dbug_join_tab_array_size;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* We also maintain a stack of join optimization states in * join->positions[] */
|
/*
|
||||||
|
We also maintain a stack of join optimization states in join->positions[]
|
||||||
|
*/
|
||||||
/******* Join optimization state members end *******/
|
/******* Join optimization state members end *******/
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
Reference in New Issue
Block a user