mirror of
https://github.com/MariaDB/server.git
synced 2025-07-30 16:24:05 +03:00
Merge of mysql-trunk-bugfixing into mysql-trunk-merge.
This commit is contained in:
129
sql/opt_range.cc
129
sql/opt_range.cc
@ -1091,7 +1091,7 @@ SQL_SELECT *make_select(TABLE *head, table_map const_tables,
|
||||
select->file= *head->sort.io_cache;
|
||||
select->records=(ha_rows) (select->file.end_of_file/
|
||||
head->file->ref_length);
|
||||
my_free(head->sort.io_cache, MYF(0));
|
||||
my_free(head->sort.io_cache);
|
||||
head->sort.io_cache=0;
|
||||
}
|
||||
DBUG_RETURN(select);
|
||||
@ -1216,11 +1216,11 @@ QUICK_RANGE_SELECT::~QUICK_RANGE_SELECT()
|
||||
}
|
||||
delete_dynamic(&ranges); /* ranges are allocated in alloc */
|
||||
free_root(&alloc,MYF(0));
|
||||
my_free((char*) column_bitmap.bitmap, MYF(MY_ALLOW_ZERO_PTR));
|
||||
my_free(column_bitmap.bitmap);
|
||||
}
|
||||
head->column_bitmaps_set(save_read_set, save_write_set);
|
||||
x_free(multi_range);
|
||||
x_free(multi_range_buff);
|
||||
my_free(multi_range);
|
||||
my_free(multi_range_buff);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
@ -1842,96 +1842,6 @@ SEL_ARG *SEL_ARG::clone_tree(RANGE_OPT_PARAM *param)
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Find the best index to retrieve first N records in given order
|
||||
|
||||
SYNOPSIS
|
||||
get_index_for_order()
|
||||
table Table to be accessed
|
||||
order Required ordering
|
||||
limit Number of records that will be retrieved
|
||||
|
||||
DESCRIPTION
|
||||
Find the best index that allows to retrieve first #limit records in the
|
||||
given order cheaper then one would retrieve them using full table scan.
|
||||
|
||||
IMPLEMENTATION
|
||||
Run through all table indexes and find the shortest index that allows
|
||||
records to be retrieved in given order. We look for the shortest index
|
||||
as we will have fewer index pages to read with it.
|
||||
|
||||
This function is used only by UPDATE/DELETE, so we take into account how
|
||||
the UPDATE/DELETE code will work:
|
||||
* index can only be scanned in forward direction
|
||||
* HA_EXTRA_KEYREAD will not be used
|
||||
Perhaps these assumptions could be relaxed.
|
||||
|
||||
RETURN
|
||||
Number of the index that produces the required ordering in the cheapest way
|
||||
MAX_KEY if no such index was found.
|
||||
*/
|
||||
|
||||
uint get_index_for_order(TABLE *table, ORDER *order, ha_rows limit)
|
||||
{
|
||||
uint idx;
|
||||
uint match_key= MAX_KEY, match_key_len= MAX_KEY_LENGTH + 1;
|
||||
ORDER *ord;
|
||||
|
||||
for (ord= order; ord; ord= ord->next)
|
||||
if (!ord->asc)
|
||||
return MAX_KEY;
|
||||
|
||||
for (idx= 0; idx < table->s->keys; idx++)
|
||||
{
|
||||
if (!(table->keys_in_use_for_query.is_set(idx)))
|
||||
continue;
|
||||
KEY_PART_INFO *keyinfo= table->key_info[idx].key_part;
|
||||
uint n_parts= table->key_info[idx].key_parts;
|
||||
uint partno= 0;
|
||||
|
||||
/*
|
||||
The below check is sufficient considering we now have either BTREE
|
||||
indexes (records are returned in order for any index prefix) or HASH
|
||||
indexes (records are not returned in order for any index prefix).
|
||||
*/
|
||||
if (!(table->file->index_flags(idx, 0, 1) & HA_READ_ORDER))
|
||||
continue;
|
||||
for (ord= order; ord && partno < n_parts; ord= ord->next, partno++)
|
||||
{
|
||||
Item *item= order->item[0];
|
||||
if (!(item->type() == Item::FIELD_ITEM &&
|
||||
((Item_field*)item)->field->eq(keyinfo[partno].field)))
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ord && table->key_info[idx].key_length < match_key_len)
|
||||
{
|
||||
/*
|
||||
Ok, the ordering is compatible and this key is shorter then
|
||||
previous match (we want shorter keys as we'll have to read fewer
|
||||
index pages for the same number of records)
|
||||
*/
|
||||
match_key= idx;
|
||||
match_key_len= table->key_info[idx].key_length;
|
||||
}
|
||||
}
|
||||
|
||||
if (match_key != MAX_KEY)
|
||||
{
|
||||
/*
|
||||
Found an index that allows records to be retrieved in the requested
|
||||
order. Now we'll check if using the index is cheaper then doing a table
|
||||
scan.
|
||||
*/
|
||||
double full_scan_time= table->file->scan_time();
|
||||
double index_scan_time= table->file->read_time(match_key, 1, limit);
|
||||
if (index_scan_time > full_scan_time)
|
||||
match_key= MAX_KEY;
|
||||
}
|
||||
return match_key;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Table rows retrieval plan. Range optimizer creates QUICK_SELECT_I-derived
|
||||
objects from table read plans.
|
||||
@ -8682,7 +8592,7 @@ int QUICK_RANGE_SELECT::reset()
|
||||
}
|
||||
if (! multi_range_buff)
|
||||
{
|
||||
my_free((char*) multi_range, MYF(0));
|
||||
my_free(multi_range);
|
||||
multi_range= NULL;
|
||||
multi_range_length= 0;
|
||||
DBUG_RETURN(HA_ERR_OUT_OF_MEM);
|
||||
@ -8980,7 +8890,6 @@ QUICK_SELECT_DESC::QUICK_SELECT_DESC(QUICK_RANGE_SELECT *q,
|
||||
}
|
||||
rev_it.rewind();
|
||||
q->dont_free=1; // Don't free shared mem
|
||||
delete q;
|
||||
}
|
||||
|
||||
|
||||
@ -9070,6 +8979,27 @@ int QUICK_SELECT_DESC::get_next()
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Create a compatible quick select with the result ordered in an opposite way
|
||||
|
||||
@param used_key_parts_arg Number of used key parts
|
||||
|
||||
@retval NULL in case of errors (OOM etc)
|
||||
@retval pointer to a newly created QUICK_SELECT_DESC if success
|
||||
*/
|
||||
|
||||
QUICK_SELECT_I *QUICK_RANGE_SELECT::make_reverse(uint used_key_parts_arg)
|
||||
{
|
||||
QUICK_SELECT_DESC *new_quick= new QUICK_SELECT_DESC(this, used_key_parts_arg);
|
||||
if (new_quick == NULL || new_quick->error != 0)
|
||||
{
|
||||
delete new_quick;
|
||||
return NULL;
|
||||
}
|
||||
return new_quick;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Compare if found key is over max-value
|
||||
Returns 0 if key <= range->max_key
|
||||
@ -9643,8 +9573,8 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree, double read_time)
|
||||
first Item? If so, then why? What is the array for?
|
||||
*/
|
||||
/* Above we already checked that all group items are fields. */
|
||||
DBUG_ASSERT((*tmp_group->item)->type() == Item::FIELD_ITEM);
|
||||
Item_field *group_field= (Item_field *) (*tmp_group->item);
|
||||
DBUG_ASSERT((*tmp_group->item)->real_item()->type() == Item::FIELD_ITEM);
|
||||
Item_field *group_field= (Item_field *) (*tmp_group->item)->real_item();
|
||||
if (group_field->field->eq(cur_part->field))
|
||||
{
|
||||
cur_group_prefix_len+= cur_part->store_length;
|
||||
@ -11675,6 +11605,7 @@ void QUICK_RANGE_SELECT::dbug_dump(int indent, bool verbose)
|
||||
/* purecov: end */
|
||||
}
|
||||
|
||||
|
||||
void QUICK_INDEX_MERGE_SELECT::dbug_dump(int indent, bool verbose)
|
||||
{
|
||||
List_iterator_fast<QUICK_RANGE_SELECT> it(quick_selects);
|
||||
@ -11763,7 +11694,7 @@ void QUICK_GROUP_MIN_MAX_SELECT::dbug_dump(int indent, bool verbose)
|
||||
}
|
||||
|
||||
|
||||
#endif /* NOT_USED */
|
||||
#endif /* !DBUG_OFF */
|
||||
|
||||
/*****************************************************************************
|
||||
** Instantiate templates
|
||||
|
Reference in New Issue
Block a user