mirror of
https://github.com/MariaDB/server.git
synced 2025-08-01 03:47:19 +03:00
Creation of mysql-trunk = {summit + "Innodb plugin replacing the builtin"}:
bzr branch mysql-5.1-performance-version mysql-trunk # Summit cd mysql-trunk bzr merge mysql-5.1-innodb_plugin # which is 5.1 + Innodb plugin bzr rm innobase # remove the builtin Next step: build, test fixes.
This commit is contained in:
@ -1353,7 +1353,7 @@ JOIN::optimize()
|
||||
join_tab[const_tables].type != JT_ALL &&
|
||||
join_tab[const_tables].type != JT_FT &&
|
||||
join_tab[const_tables].type != JT_REF_OR_NULL &&
|
||||
(order && simple_order || group_list && simple_group))
|
||||
((order && simple_order) || (group_list && simple_group)))
|
||||
{
|
||||
if (add_ref_to_table_cond(thd,&join_tab[const_tables])) {
|
||||
DBUG_RETURN(1);
|
||||
@ -1871,9 +1871,9 @@ JOIN::exec()
|
||||
like SEC_TO_TIME(SUM(...)).
|
||||
*/
|
||||
|
||||
if (curr_join->group_list && (!test_if_subpart(curr_join->group_list,
|
||||
if ((curr_join->group_list && (!test_if_subpart(curr_join->group_list,
|
||||
curr_join->order) ||
|
||||
curr_join->select_distinct) ||
|
||||
curr_join->select_distinct)) ||
|
||||
(curr_join->select_distinct &&
|
||||
curr_join->tmp_table_param.using_indirect_summary_function))
|
||||
{ /* Must copy to another table */
|
||||
@ -2251,6 +2251,14 @@ JOIN::destroy()
|
||||
cond_equal= 0;
|
||||
|
||||
cleanup(1);
|
||||
/* Cleanup items referencing temporary table columns */
|
||||
if (!tmp_all_fields3.is_empty())
|
||||
{
|
||||
List_iterator_fast<Item> it(tmp_all_fields3);
|
||||
Item *item;
|
||||
while ((item= it++))
|
||||
item->cleanup();
|
||||
}
|
||||
if (exec_tmp_table1)
|
||||
free_tmp_table(thd, exec_tmp_table1);
|
||||
if (exec_tmp_table2)
|
||||
@ -2339,9 +2347,10 @@ mysql_select(THD *thd, Item ***rref_pointer_array,
|
||||
}
|
||||
else
|
||||
{
|
||||
if (err= join->prepare(rref_pointer_array, tables, wild_num,
|
||||
conds, og_num, order, group, having, proc_param,
|
||||
select_lex, unit))
|
||||
err= join->prepare(rref_pointer_array, tables, wild_num,
|
||||
conds, og_num, order, group, having, proc_param,
|
||||
select_lex, unit);
|
||||
if (err)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
@ -2356,9 +2365,10 @@ mysql_select(THD *thd, Item ***rref_pointer_array,
|
||||
DBUG_RETURN(TRUE);
|
||||
thd_proc_info(thd, "init");
|
||||
thd->used_tables=0; // Updated by setup_fields
|
||||
if (err= join->prepare(rref_pointer_array, tables, wild_num,
|
||||
conds, og_num, order, group, having, proc_param,
|
||||
select_lex, unit))
|
||||
err= join->prepare(rref_pointer_array, tables, wild_num,
|
||||
conds, og_num, order, group, having, proc_param,
|
||||
select_lex, unit);
|
||||
if (err)
|
||||
{
|
||||
goto err;
|
||||
}
|
||||
@ -2425,7 +2435,6 @@ static ha_rows get_quick_record_count(THD *thd, SQL_SELECT *select,
|
||||
if (select)
|
||||
{
|
||||
select->head=table;
|
||||
table->reginfo.impossible_range=0;
|
||||
if ((error= select->test_quick_select(thd, *(key_map *)keys,(table_map) 0,
|
||||
limit, 0)) == 1)
|
||||
DBUG_RETURN(select->quick->records);
|
||||
@ -3836,7 +3845,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab,
|
||||
if (use->key == prev->key && use->table == prev->table)
|
||||
{
|
||||
if (prev->keypart+1 < use->keypart ||
|
||||
prev->keypart == use->keypart && found_eq_constant)
|
||||
(prev->keypart == use->keypart && found_eq_constant))
|
||||
continue; /* remove */
|
||||
}
|
||||
else if (use->keypart != 0) // First found must be 0
|
||||
@ -5141,8 +5150,8 @@ best_extension_by_limited_search(JOIN *join,
|
||||
{
|
||||
if (best_record_count > current_record_count ||
|
||||
best_read_time > current_read_time ||
|
||||
idx == join->const_tables && // 's' is the first table in the QEP
|
||||
s->table == join->sort_by_table)
|
||||
(idx == join->const_tables && // 's' is the first table in the QEP
|
||||
s->table == join->sort_by_table))
|
||||
{
|
||||
if (best_record_count >= current_record_count &&
|
||||
best_read_time >= current_read_time &&
|
||||
@ -5268,7 +5277,7 @@ find_best(JOIN *join,table_map rest_tables,uint idx,double record_count,
|
||||
double current_read_time=read_time+best;
|
||||
if (best_record_count > current_record_count ||
|
||||
best_read_time > current_read_time ||
|
||||
idx == join->const_tables && s->table == join->sort_by_table)
|
||||
(idx == join->const_tables && s->table == join->sort_by_table))
|
||||
{
|
||||
if (best_record_count >= current_record_count &&
|
||||
best_read_time >= current_read_time &&
|
||||
@ -6215,8 +6224,8 @@ make_join_select(JOIN *join,SQL_SELECT *select,COND *cond)
|
||||
the index if we are using limit and this is the first table
|
||||
*/
|
||||
|
||||
if (cond &&
|
||||
(!tab->keys.is_subset(tab->const_keys) && i > 0) ||
|
||||
if ((cond &&
|
||||
!tab->keys.is_subset(tab->const_keys) && i > 0) ||
|
||||
(!tab->const_keys.is_clear_all() && i == join->const_tables &&
|
||||
join->unit->select_limit_cnt <
|
||||
join->best_positions[i].records_read &&
|
||||
@ -7087,15 +7096,17 @@ return_zero_rows(JOIN *join, select_result *result,TABLE_LIST *tables,
|
||||
if (!(result->send_fields(fields,
|
||||
Protocol::SEND_NUM_ROWS | Protocol::SEND_EOF)))
|
||||
{
|
||||
bool send_error= FALSE;
|
||||
if (send_row)
|
||||
{
|
||||
List_iterator_fast<Item> it(fields);
|
||||
Item *item;
|
||||
while ((item= it++))
|
||||
item->no_rows_in_result();
|
||||
result->send_data(fields);
|
||||
send_error= result->send_data(fields);
|
||||
}
|
||||
result->send_eof(); // Should be safe
|
||||
if (!send_error)
|
||||
result->send_eof(); // Should be safe
|
||||
}
|
||||
/* Update results for FOUND_ROWS */
|
||||
join->thd->limit_found_rows= join->thd->examined_row_count= 0;
|
||||
@ -7347,7 +7358,7 @@ static bool check_simple_equality(Item *left_item, Item *right_item,
|
||||
left_item_equal->merge(right_item_equal);
|
||||
/* Remove the merged multiple equality from the list */
|
||||
List_iterator<Item_equal> li(cond_equal->current_level);
|
||||
while ((li++) != right_item_equal);
|
||||
while ((li++) != right_item_equal) ;
|
||||
li.remove();
|
||||
}
|
||||
}
|
||||
@ -9383,13 +9394,17 @@ static Field *create_tmp_field_from_item(THD *thd, Item *item, TABLE *table,
|
||||
+1: for decimal point
|
||||
*/
|
||||
|
||||
overflow= my_decimal_precision_to_length(intg + dec, dec,
|
||||
item->unsigned_flag) - len;
|
||||
const int required_length=
|
||||
my_decimal_precision_to_length(intg + dec, dec,
|
||||
item->unsigned_flag);
|
||||
|
||||
overflow= required_length - len;
|
||||
|
||||
if (overflow > 0)
|
||||
dec= max(0, dec - overflow); // too long, discard fract
|
||||
else
|
||||
len -= item->decimals - dec; // corrected value fits
|
||||
/* Corrected value fits. */
|
||||
len= required_length;
|
||||
}
|
||||
|
||||
new_field= new Field_new_decimal(len, maybe_null, item->name,
|
||||
@ -10025,9 +10040,9 @@ create_tmp_table(THD *thd,TMP_TABLE_PARAM *param,List<Item> &fields,
|
||||
reclength=1; // Dummy select
|
||||
/* Use packed rows if there is blobs or a lot of space to gain */
|
||||
if (blob_count ||
|
||||
string_total_length >= STRING_TOTAL_LENGTH_TO_PACK_ROWS &&
|
||||
(string_total_length >= STRING_TOTAL_LENGTH_TO_PACK_ROWS &&
|
||||
(reclength / string_total_length <= RATIO_TO_PACK_ROWS ||
|
||||
string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS))
|
||||
string_total_length / string_count >= AVG_STRING_LENGTH_TO_PACK_ROWS)))
|
||||
use_packed_rows= 1;
|
||||
|
||||
share->reclength= reclength;
|
||||
@ -10828,9 +10843,8 @@ do_select(JOIN *join,List<Item> *fields,TABLE *table,Procedure *procedure)
|
||||
{
|
||||
int rc= 0;
|
||||
enum_nested_loop_state error= NESTED_LOOP_OK;
|
||||
JOIN_TAB *join_tab;
|
||||
JOIN_TAB *join_tab= NULL;
|
||||
DBUG_ENTER("do_select");
|
||||
LINT_INIT(join_tab);
|
||||
|
||||
join->procedure=procedure;
|
||||
join->tmp_table= table; /* Save for easy recursion */
|
||||
@ -12651,7 +12665,10 @@ static int test_if_order_by_key(ORDER *order, TABLE *table, uint idx,
|
||||
one row). The sorting doesn't matter.
|
||||
*/
|
||||
if (key_part == key_part_end && reverse == 0)
|
||||
{
|
||||
*used_key_parts= 0;
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
}
|
||||
else
|
||||
DBUG_RETURN(0);
|
||||
@ -13066,9 +13083,9 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
||||
*/
|
||||
uint nr;
|
||||
key_map keys;
|
||||
uint best_key_parts;
|
||||
int best_key_direction;
|
||||
ha_rows best_records;
|
||||
uint best_key_parts= 0;
|
||||
int best_key_direction= 0;
|
||||
ha_rows best_records= 0;
|
||||
double read_time;
|
||||
int best_key= -1;
|
||||
bool is_best_covering= FALSE;
|
||||
@ -13078,9 +13095,6 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
||||
ha_rows table_records= table->file->stats.records;
|
||||
bool group= join->group && order == join->group_list;
|
||||
ha_rows ref_key_quick_rows= HA_POS_ERROR;
|
||||
LINT_INIT(best_key_parts);
|
||||
LINT_INIT(best_key_direction);
|
||||
LINT_INIT(best_records);
|
||||
|
||||
/*
|
||||
If not used with LIMIT, only use keys if the whole query can be
|
||||
@ -13121,12 +13135,20 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
||||
for (nr=0; nr < table->s->keys ; nr++)
|
||||
{
|
||||
int direction;
|
||||
|
||||
if (keys.is_set(nr) &&
|
||||
(direction= test_if_order_by_key(order, table, nr, &used_key_parts)))
|
||||
{
|
||||
/*
|
||||
At this point we are sure that ref_key is a non-ordering
|
||||
key (where "ordering key" is a key that will return rows
|
||||
in the order required by ORDER BY).
|
||||
*/
|
||||
DBUG_ASSERT (ref_key != (int) nr);
|
||||
|
||||
bool is_covering= table->covering_keys.is_set(nr) ||
|
||||
nr == table->s->primary_key &&
|
||||
table->file->primary_key_is_clustered();
|
||||
(nr == table->s->primary_key &&
|
||||
table->file->primary_key_is_clustered());
|
||||
|
||||
/*
|
||||
Don't use an index scan with ORDER BY without limit.
|
||||
@ -13139,7 +13161,7 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
||||
*/
|
||||
if (is_covering ||
|
||||
select_limit != HA_POS_ERROR ||
|
||||
ref_key < 0 && (group || table->force_index))
|
||||
(ref_key < 0 && (group || table->force_index)))
|
||||
{
|
||||
double rec_per_key;
|
||||
double index_scan_time;
|
||||
@ -13148,7 +13170,8 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
||||
select_limit= table_records;
|
||||
if (group)
|
||||
{
|
||||
rec_per_key= keyinfo->rec_per_key[used_key_parts-1];
|
||||
rec_per_key= used_key_parts ? keyinfo->rec_per_key[used_key_parts-1]
|
||||
: 1;
|
||||
set_if_bigger(rec_per_key, 1);
|
||||
/*
|
||||
With a grouping query each group containing on average
|
||||
@ -13203,13 +13226,13 @@ test_if_skip_sort_order(JOIN_TAB *tab,ORDER *order,ha_rows select_limit,
|
||||
*/
|
||||
index_scan_time= select_limit/rec_per_key *
|
||||
min(rec_per_key, table->file->scan_time());
|
||||
if (is_covering ||
|
||||
ref_key < 0 && (group || table->force_index) ||
|
||||
if ((ref_key < 0 && is_covering) ||
|
||||
(ref_key < 0 && (group || table->force_index)) ||
|
||||
index_scan_time < read_time)
|
||||
{
|
||||
ha_rows quick_records= table_records;
|
||||
if (is_best_covering && !is_covering ||
|
||||
is_covering && ref_key_quick_rows < select_limit)
|
||||
if ((is_best_covering && !is_covering) ||
|
||||
(is_covering && ref_key_quick_rows < select_limit))
|
||||
continue;
|
||||
if (table->quick_keys.is_set(nr))
|
||||
quick_records= table->quick_rows[nr];
|
||||
@ -13417,8 +13440,8 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
|
||||
*/
|
||||
if ((order != join->group_list ||
|
||||
!(join->select_options & SELECT_BIG_RESULT) ||
|
||||
select && select->quick &&
|
||||
select->quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX) &&
|
||||
(select && select->quick &&
|
||||
select->quick->get_type() == QUICK_SELECT_I::QS_TYPE_GROUP_MIN_MAX)) &&
|
||||
test_if_skip_sort_order(tab,order,select_limit,0,
|
||||
is_order_by ? &table->keys_in_use_for_order_by :
|
||||
&table->keys_in_use_for_group_by))
|
||||
@ -13479,9 +13502,24 @@ create_sort_index(THD *thd, JOIN *join, ORDER *order,
|
||||
tab->records= table->sort.found_records; // For SQL_CALC_ROWS
|
||||
if (select)
|
||||
{
|
||||
/*
|
||||
We need to preserve tablesort's output resultset here, because
|
||||
QUICK_INDEX_MERGE_SELECT::~QUICK_INDEX_MERGE_SELECT (called by
|
||||
SQL_SELECT::cleanup()) may free it assuming it's the result of the quick
|
||||
select operation that we no longer need. Note that all the other parts of
|
||||
this data structure are cleaned up when
|
||||
QUICK_INDEX_MERGE_SELECT::get_next encounters end of data, so the next
|
||||
SQL_SELECT::cleanup() call changes sort.io_cache alone.
|
||||
*/
|
||||
IO_CACHE *tablesort_result_cache;
|
||||
|
||||
tablesort_result_cache= table->sort.io_cache;
|
||||
table->sort.io_cache= NULL;
|
||||
|
||||
select->cleanup(); // filesort did select
|
||||
tab->select= 0;
|
||||
table->quick_keys.clear_all(); // as far as we cleanup select->quick
|
||||
table->sort.io_cache= tablesort_result_cache;
|
||||
}
|
||||
tab->select_cond=0;
|
||||
tab->last_inner= 0;
|
||||
@ -13855,7 +13893,7 @@ SORT_FIELD *make_unireg_sortorder(ORDER *order, uint *length,
|
||||
pos->field= ((Item_sum*) item)->get_tmp_table_field();
|
||||
else if (item->type() == Item::COPY_STR_ITEM)
|
||||
{ // Blob patch
|
||||
pos->item= ((Item_copy_string*) item)->item;
|
||||
pos->item= ((Item_copy*) item)->get_item();
|
||||
}
|
||||
else
|
||||
pos->item= *order->item;
|
||||
@ -14244,8 +14282,8 @@ find_order_in_list(THD *thd, Item **ref_pointer_array, TABLE_LIST *tables,
|
||||
/* Lookup the current GROUP field in the FROM clause. */
|
||||
order_item_type= order_item->type();
|
||||
from_field= (Field*) not_found_field;
|
||||
if (is_group_field &&
|
||||
order_item_type == Item::FIELD_ITEM ||
|
||||
if ((is_group_field &&
|
||||
order_item_type == Item::FIELD_ITEM) ||
|
||||
order_item_type == Item::REF_ITEM)
|
||||
{
|
||||
from_field= find_field_in_tables(thd, (Item_ident*) order_item, tables,
|
||||
@ -14680,7 +14718,7 @@ get_sort_by_table(ORDER *a,ORDER *b,TABLE_LIST *tables)
|
||||
if (!map || (map & (RAND_TABLE_BIT | OUTER_REF_TABLE_BIT)))
|
||||
DBUG_RETURN(0);
|
||||
|
||||
for (; !(map & tables->table->map); tables= tables->next_leaf);
|
||||
for (; !(map & tables->table->map); tables= tables->next_leaf) ;
|
||||
if (map != tables->table->map)
|
||||
DBUG_RETURN(0); // More than one table
|
||||
DBUG_PRINT("exit",("sort by table: %d",tables->table->tablenr));
|
||||
@ -14926,7 +14964,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
|
||||
pos= item;
|
||||
if (item->field->flags & BLOB_FLAG)
|
||||
{
|
||||
if (!(pos= new Item_copy_string(pos)))
|
||||
if (!(pos= Item_copy::create(pos)))
|
||||
goto err;
|
||||
/*
|
||||
Item_copy_string::copy for function can call
|
||||
@ -14980,7 +15018,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
|
||||
on how the value is to be used: In some cases this may be an
|
||||
argument in a group function, like: IF(ISNULL(col),0,COUNT(*))
|
||||
*/
|
||||
if (!(pos=new Item_copy_string(pos)))
|
||||
if (!(pos= Item_copy::create(pos)))
|
||||
goto err;
|
||||
if (i < border) // HAVING, ORDER and GROUP BY
|
||||
{
|
||||
@ -15033,8 +15071,8 @@ copy_fields(TMP_TABLE_PARAM *param)
|
||||
(*ptr->do_copy)(ptr);
|
||||
|
||||
List_iterator_fast<Item> it(param->copy_funcs);
|
||||
Item_copy_string *item;
|
||||
while ((item = (Item_copy_string*) it++))
|
||||
Item_copy *item;
|
||||
while ((item = (Item_copy*) it++))
|
||||
item->copy();
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user