1
0
mirror of https://github.com/MariaDB/server.git synced 2025-07-30 16:24:05 +03:00

10.0-base merge

This commit is contained in:
Sergei Golubchik
2013-07-18 16:46:57 +02:00
1818 changed files with 11042 additions and 3689 deletions

View File

@ -1,5 +1,5 @@
/* Copyright (c) 2000, 2011, Oracle and/or its affiliates.
Copyright (c) 2008-2011 Monty Program Ab
/* Copyright (c) 2000, 2013, Oracle and/or its affiliates.
Copyright (c) 2008, 2013, Monty Program Ab.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -844,8 +844,17 @@ public:
/* Number of SEL_ARG objects allocated by SEL_ARG::clone_tree operations */
uint alloced_sel_args;
bool force_default_mrr;
KEY_PART *key[MAX_KEY]; /* First key parts of keys used in the query */
bool statement_should_be_aborted() const
{
return
thd->is_fatal_error ||
thd->is_error() ||
alloced_sel_args > SEL_ARG::MAX_SEL_ARGS;
}
};
class PARAM : public RANGE_OPT_PARAM
@ -2074,30 +2083,16 @@ end:
org_key_read= head->key_read;
head->file= file;
head->key_read= 0;
head->mark_columns_used_by_index_no_reset(index, head->read_set);
if (!head->no_keyread)
{
doing_key_read= 1;
head->mark_columns_used_by_index_no_reset(index, head->read_set);
head->enable_keyread();
}
head->prepare_for_position();
if (head->no_keyread)
{
/*
We can get here when doing multi-table delete and having index_merge
condition on a table that we're deleting from. It probably doesn't make
sense to use index_merge, but de-facto it is used.
When it is used, we need to index columns to be read (before maria-5.3,
read_multi_range_first() would set it).
We shouldn't call mark_columns_used_by_index(), because it calls
enable_keyread(), which is not allowed.
*/
head->mark_columns_used_by_index_no_reset(index, head->read_set);
}
head->file= org_file;
head->key_read= org_key_read;
@ -3609,6 +3604,44 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item *cond)
/****************************************************************************
* Partition pruning module
****************************************************************************/
/*
Store field key image to table record
SYNOPSIS
store_key_image_to_rec()
field Field which key image should be stored
ptr Field value in key format
len Length of the value, in bytes
DESCRIPTION
Copy the field value from its key image to the table record. The source
is the value in key image format, occupying len bytes in buffer pointed
by ptr. The destination is table record, in "field value in table record"
format.
*/
void store_key_image_to_rec(Field *field, uchar *ptr, uint len)
{
/* Do the same as print_key() does */
my_bitmap_map *old_map;
if (field->real_maybe_null())
{
if (*ptr)
{
field->set_null();
return;
}
field->set_notnull();
ptr++;
}
old_map= dbug_tmp_use_all_columns(field->table,
field->table->write_set);
field->set_key_image(ptr, len);
dbug_tmp_restore_column_map(field->table->write_set, old_map);
}
#ifdef WITH_PARTITION_STORAGE_ENGINE
/*
@ -3943,44 +3976,6 @@ end:
}
/*
Store field key image to table record
SYNOPSIS
store_key_image_to_rec()
field Field which key image should be stored
ptr Field value in key format
len Length of the value, in bytes
DESCRIPTION
Copy the field value from its key image to the table record. The source
is the value in key image format, occupying len bytes in buffer pointed
by ptr. The destination is table record, in "field value in table record"
format.
*/
void store_key_image_to_rec(Field *field, uchar *ptr, uint len)
{
/* Do the same as print_key() does */
my_bitmap_map *old_map;
if (field->real_maybe_null())
{
if (*ptr)
{
field->set_null();
return;
}
field->set_notnull();
ptr++;
}
old_map= dbug_tmp_use_all_columns(field->table,
field->table->write_set);
field->set_key_image(ptr, len);
dbug_tmp_restore_column_map(field->table->write_set, old_map);
}
/*
For SEL_ARG* array, store sel_arg->min values into table record buffer
@ -5326,6 +5321,8 @@ TABLE_READ_PLAN *merge_same_index_scans(PARAM *param, SEL_IMERGE *imerge,
bzero((*changed_tree)->keys,
sizeof((*changed_tree)->keys[0])*param->keys);
(*changed_tree)->keys_map.clear_all();
key->incr_refs();
(*tree)->keys[key_idx]->incr_refs();
if (((*changed_tree)->keys[key_idx]=
key_or(param, key, (*tree)->keys[key_idx])))
(*changed_tree)->keys_map.set_bit(key_idx);
@ -7531,6 +7528,34 @@ static SEL_TREE *get_func_mm_tree(RANGE_OPT_PARAM *param, Item_func *cond_func,
{
new_interval->min_value= last_val->max_value;
new_interval->min_flag= NEAR_MIN;
/*
If the interval is over a partial keypart, the
interval must be "c_{i-1} <= X < c_i" instead of
"c_{i-1} < X < c_i". Reason:
Consider a table with a column "my_col VARCHAR(3)",
and an index with definition
"INDEX my_idx my_col(1)". If the table contains rows
with my_col values "f" and "foo", the index will not
distinguish the two rows.
Note that tree_or() below will effectively merge
this range with the range created for c_{i-1} and
we'll eventually end up with only one range:
"NULL < X".
Partitioning indexes are never partial.
*/
if (param->using_real_indexes)
{
const KEY key=
param->table->key_info[param->real_keynr[idx]];
const KEY_PART_INFO *kpi= key.key_part + new_interval->part;
if (kpi->key_part_flag & HA_PART_KEY_SEG)
new_interval->min_flag= 0;
}
}
}
/*
@ -7743,34 +7768,35 @@ static SEL_TREE *get_mm_tree(RANGE_OPT_PARAM *param,COND *cond)
if (((Item_cond*) cond)->functype() == Item_func::COND_AND_FUNC)
{
tree=0;
tree= NULL;
Item *item;
while ((item=li++))
{
SEL_TREE *new_tree=get_mm_tree(param,item);
if (param->thd->is_fatal_error ||
param->alloced_sel_args > SEL_ARG::MAX_SEL_ARGS)
DBUG_RETURN(0); // out of memory
tree=tree_and(param,tree,new_tree);
if (tree && tree->type == SEL_TREE::IMPOSSIBLE)
break;
SEL_TREE *new_tree= get_mm_tree(param,item);
if (param->statement_should_be_aborted())
DBUG_RETURN(NULL);
tree= tree_and(param,tree,new_tree);
if (tree && tree->type == SEL_TREE::IMPOSSIBLE)
break;
}
}
else
{ // COND OR
tree=get_mm_tree(param,li++);
{ // COND OR
tree= get_mm_tree(param,li++);
if (param->statement_should_be_aborted())
DBUG_RETURN(NULL);
if (tree)
{
Item *item;
while ((item=li++))
{
SEL_TREE *new_tree=get_mm_tree(param,item);
if (!new_tree)
DBUG_RETURN(0); // out of memory
tree=tree_or(param,tree,new_tree);
if (!tree || tree->type == SEL_TREE::ALWAYS)
break;
}
Item *item;
while ((item=li++))
{
SEL_TREE *new_tree=get_mm_tree(param,item);
if (new_tree == NULL || param->statement_should_be_aborted())
DBUG_RETURN(NULL);
tree= tree_or(param,tree,new_tree);
if (tree == NULL || tree->type == SEL_TREE::ALWAYS)
break;
}
}
}
DBUG_RETURN(tree);
@ -8024,6 +8050,7 @@ get_mm_leaf(RANGE_OPT_PARAM *param, COND *conf_func, Field *field,
if (key_part->image_type == Field::itMBR)
{
// @todo: use is_spatial_operator() instead?
switch (type) {
case Item_func::SP_EQUALS_FUNC:
case Item_func::SP_DISJOINT_FUNC:
@ -10993,12 +11020,13 @@ int read_keys_and_merge_scans(THD *thd,
Unique *unique= *unique_ptr;
handler *file= head->file;
bool with_cpk_filter= pk_quick_select != NULL;
bool enabled_keyread= 0;
DBUG_ENTER("read_keys_and_merge");
/* We're going to just read rowids. */
if (!head->key_read)
{
enabled_keyread= 1;
head->enable_keyread();
}
head->prepare_for_position();
@ -11092,13 +11120,15 @@ int read_keys_and_merge_scans(THD *thd,
/*
index merge currently doesn't support "using index" at all
*/
head->disable_keyread();
if (enabled_keyread)
head->disable_keyread();
if (init_read_record(read_record, thd, head, (SQL_SELECT*) 0, 1 , 1, TRUE))
result= 1;
DBUG_RETURN(result);
err:
head->disable_keyread();
if (enabled_keyread)
head->disable_keyread();
DBUG_RETURN(1);
}
@ -13584,7 +13614,11 @@ QUICK_GROUP_MIN_MAX_SELECT::~QUICK_GROUP_MIN_MAX_SELECT()
DBUG_ASSERT(file == head->file);
if (doing_key_read)
head->disable_keyread();
file->ha_index_end();
/*
There may be a code path when the same table was first accessed by index,
then the index is closed, and the table is scanned (order by + loose scan).
*/
file->ha_index_or_rnd_end();
}
if (min_max_arg_part)
delete_dynamic(&min_max_ranges);