1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-27 18:02:13 +03:00

Post-merge fixes for rocksdb.group_min_max test

- Fix the LooseScan code to support storage engines that return
  HA_ERR_END_OF_FILE if the index scan goes out of provided range
  bounds
- Add a DBUG_EXECUTE_IF("force_group_by",...) to allow a test to
  force a LooseScan
- Adjust rocksdb.group_min_max test not to use features not present
  in MariaDB 10.2 (e.g. optimizer_trace.  In MariaDB 10.4 it's present
  but it doesn't meet the  assumptions that the test makes about it
- Adjust the test result file:
  = MariaDB doesn't support "Enhanced Loose Scan" that FB/MySQL has
  = MariaDB has different cost calculations.
This commit is contained in:
Sergei Petrunia
2019-08-03 23:15:44 +03:00
parent 05b35cf4c1
commit 09a85692a6
4 changed files with 395 additions and 360 deletions

View File

@ -2428,6 +2428,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
KEY_PART *key_parts;
KEY *key_info;
PARAM param;
bool force_group_by = false;
if (check_stack_overrun(thd, 2*STACK_MIN_SIZE + sizeof(PARAM), buff))
DBUG_RETURN(0); // Fatal error flag is set
@ -2555,15 +2556,20 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
Try to construct a QUICK_GROUP_MIN_MAX_SELECT.
Notice that it can be constructed no matter if there is a range tree.
*/
DBUG_EXECUTE_IF("force_group_by", force_group_by = true; );
group_trp= get_best_group_min_max(&param, tree, best_read_time);
if (group_trp)
{
param.table->quick_condition_rows= MY_MIN(group_trp->records,
head->stat_records());
if (group_trp->read_cost < best_read_time)
if (group_trp->read_cost < best_read_time || force_group_by)
{
best_trp= group_trp;
best_read_time= best_trp->read_cost;
if (force_group_by)
{
goto force_plan;
}
}
}
@ -2663,6 +2669,7 @@ int SQL_SELECT::test_quick_select(THD *thd, key_map keys_to_use,
}
}
force_plan:
thd->mem_root= param.old_root;
/* If we got a read plan, create a quick select from it. */
@ -11509,13 +11516,28 @@ int QUICK_RANGE_SELECT::get_next_prefix(uint prefix_length,
DBUG_ASSERT(cur_prefix != NULL);
result= file->ha_index_read_map(record, cur_prefix, keypart_map,
HA_READ_AFTER_KEY);
if (result || last_range->max_keypart_map == 0)
DBUG_RETURN(result);
key_range previous_endpoint;
last_range->make_max_endpoint(&previous_endpoint, prefix_length, keypart_map);
if (file->compare_key(&previous_endpoint) <= 0)
DBUG_RETURN(0);
if (result || last_range->max_keypart_map == 0) {
/*
Only return if actual failure occurred. For HA_ERR_KEY_NOT_FOUND
or HA_ERR_END_OF_FILE, we just want to continue to reach the next
set of ranges. It is possible for the storage engine to return
HA_ERR_KEY_NOT_FOUND/HA_ERR_END_OF_FILE even when there are more
keys if it respects the end range set by the read_range_first call
below.
*/
if (result != HA_ERR_KEY_NOT_FOUND && result != HA_ERR_END_OF_FILE)
DBUG_RETURN(result);
} else {
/*
For storage engines that don't respect end range, check if we've
moved past the current range.
*/
key_range previous_endpoint;
last_range->make_max_endpoint(&previous_endpoint, prefix_length,
keypart_map);
if (file->compare_key(&previous_endpoint) <= 0)
DBUG_RETURN(0);
}
}
uint count= ranges.elements - (uint)(cur_range - (QUICK_RANGE**) ranges.buffer);