mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
Merge 10.2 to 10.3
This commit is contained in:
@ -2434,6 +2434,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
||||
KEY_PART *key_parts;
|
||||
KEY *key_info;
|
||||
PARAM param;
|
||||
bool force_group_by = false;
|
||||
|
||||
if (check_stack_overrun(thd, 2*STACK_MIN_SIZE + sizeof(PARAM), buff))
|
||||
DBUG_RETURN(0); // Fatal error flag is set
|
||||
@ -2562,15 +2563,20 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
||||
Try to construct a QUICK_GROUP_MIN_MAX_SELECT.
|
||||
Notice that it can be constructed no matter if there is a range tree.
|
||||
*/
|
||||
DBUG_EXECUTE_IF("force_group_by", force_group_by = true; );
|
||||
group_trp= get_best_group_min_max(¶m, tree, best_read_time);
|
||||
if (group_trp)
|
||||
{
|
||||
param.table->quick_condition_rows= MY_MIN(group_trp->records,
|
||||
head->stat_records());
|
||||
if (group_trp->read_cost < best_read_time)
|
||||
if (group_trp->read_cost < best_read_time || force_group_by)
|
||||
{
|
||||
best_trp= group_trp;
|
||||
best_read_time= best_trp->read_cost;
|
||||
if (force_group_by)
|
||||
{
|
||||
goto force_plan;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2670,6 +2676,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
|
||||
}
|
||||
}
|
||||
|
||||
force_plan:
|
||||
thd->mem_root= param.old_root;
|
||||
|
||||
/* If we got a read plan, create a quick select from it. */
|
||||
@ -11534,13 +11541,28 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length,
|
||||
DBUG_ASSERT(cur_prefix != NULL);
|
||||
result= file->ha_index_read_map(record, cur_prefix, keypart_map,
|
||||
HA_READ_AFTER_KEY);
|
||||
if (result || last_range->max_keypart_map == 0)
|
||||
DBUG_RETURN(result);
|
||||
|
||||
key_range previous_endpoint;
|
||||
last_range->make_max_endpoint(&previous_endpoint, prefix_length, keypart_map);
|
||||
if (file->compare_key(&previous_endpoint) <= 0)
|
||||
DBUG_RETURN(0);
|
||||
if (result || last_range->max_keypart_map == 0) {
|
||||
/*
|
||||
Only return if actual failure occurred. For HA_ERR_KEY_NOT_FOUND
|
||||
or HA_ERR_END_OF_FILE, we just want to continue to reach the next
|
||||
set of ranges. It is possible for the storage engine to return
|
||||
HA_ERR_KEY_NOT_FOUND/HA_ERR_END_OF_FILE even when there are more
|
||||
keys if it respects the end range set by the read_range_first call
|
||||
below.
|
||||
*/
|
||||
if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
|
||||
DBUG_RETURN(result);
|
||||
} else {
|
||||
/*
|
||||
For storage engines that don't respect end range, check if we've
|
||||
moved past the current range.
|
||||
*/
|
||||
key_range previous_endpoint;
|
||||
last_range->make_max_endpoint(&previous_endpoint, prefix_length,
|
||||
keypart_map);
|
||||
if (file->compare_key(&previous_endpoint) <= 0)
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
}
|
||||
|
||||
uint count= ranges.elements - (uint)(cur_range - (QUICK_RANGE**) ranges.buffer);
|
||||
|
Reference in New Issue
Block a user