mirror of
https://github.com/MariaDB/server.git
synced 2025-08-01 03:47:19 +03:00
Merge zim.(none):/home/brian/mysql/mysql-5.0
into zim.(none):/home/brian/mysql/mysql-5.1 Resolved to minor issues. BitKeeper/etc/config: Auto merged Makefile.am: Auto merged VC++Files/sql/mysqld.vcproj: Auto merged extra/perror.c: Auto merged include/my_global.h: Auto merged mysql-test/mysql-test-run.pl: Auto merged mysql-test/mysql-test-run.sh: Auto merged mysql-test/t/alter_table.test: Auto merged mysql-test/t/disabled.def: Auto merged mysys/Makefile.am: Auto merged scripts/mysql_fix_privilege_tables.sql: Auto merged sql/Makefile.am: Auto merged sql/ha_innodb.cc: Auto merged sql/ha_innodb.h: Auto merged sql/handler.h: Auto merged sql/item.cc: Auto merged sql/lex.h: Auto merged sql/log.cc: Auto merged sql/mysql_priv.h: Auto merged sql/mysqld.cc: Auto merged sql/opt_range.cc: Auto merged sql/set_var.cc: Auto merged sql/sp.cc: Auto merged sql/sql_class.h: Auto merged sql/sql_delete.cc: Auto merged sql/sql_insert.cc: Auto merged sql/sql_lex.cc: Auto merged sql/sql_lex.h: Auto merged sql/sql_parse.cc: Auto merged sql/sql_select.cc: Auto merged sql/sql_show.cc: Auto merged sql/sql_table.cc: Auto merged sql/sql_update.cc: Auto merged sql/sql_yacc.yy: Auto merged sql/examples/ha_tina.cc: Auto merged sql/table.h: Auto merged sql/unireg.h: Auto merged storage/innobase/lock/lock0lock.c: Auto merged storage/myisam/mi_check.c: Auto merged storage/myisam/mi_search.c: Auto merged storage/myisam/myisamchk.c: Auto merged storage/myisam/myisamlog.c: Auto merged storage/myisam/myisampack.c: Auto merged storage/ndb/src/kernel/blocks/dbdict/Dbdict.cpp: Auto merged configure.in: Fixed the resolve of versions. mysql-test/r/alter_table.result: Fixed results.
This commit is contained in:
@ -6048,6 +6048,7 @@ int QUICK_RANGE_SELECT::reset()
|
||||
DBUG_ENTER("QUICK_RANGE_SELECT::reset");
|
||||
next=0;
|
||||
range= NULL;
|
||||
in_range= FALSE;
|
||||
cur_range= (QUICK_RANGE**) ranges.buffer;
|
||||
|
||||
if (file->inited == handler::NONE && (error= file->ha_index_init(index,1)))
|
||||
@ -7021,6 +7022,7 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
|
||||
ha_rows cur_quick_prefix_records= 0;
|
||||
uint cur_param_idx;
|
||||
key_map cur_used_key_parts;
|
||||
uint pk= param->table->s->primary_key;
|
||||
|
||||
for (uint cur_index= 0 ; cur_index_info != cur_index_info_end ;
|
||||
cur_index_info++, cur_index++)
|
||||
@ -7029,6 +7031,45 @@ get_best_group_min_max(PARAM *param, SEL_TREE *tree)
|
||||
if (!table->used_keys.is_set(cur_index))
|
||||
goto next_index;
|
||||
|
||||
/*
|
||||
If the current storage manager is such that it appends the primary key to
|
||||
each index, then the above condition is insufficient to check if the
|
||||
index is covering. In such cases it may happen that some fields are
|
||||
covered by the PK index, but not by the current index. Since we can't
|
||||
use the concatenation of both indexes for index lookup, such an index
|
||||
does not qualify as covering in our case. If this is the case, below
|
||||
we check that all query fields are indeed covered by 'cur_index'.
|
||||
*/
|
||||
if (pk < MAX_KEY && cur_index != pk &&
|
||||
(table->file->table_flags() & HA_PRIMARY_KEY_IN_READ_INDEX))
|
||||
{
|
||||
/* For each table field */
|
||||
for (uint i= 0; i < table->s->fields; i++)
|
||||
{
|
||||
Field *cur_field= table->field[i];
|
||||
/*
|
||||
If the field is used in the current query, check that the
|
||||
field is covered by some keypart of the current index.
|
||||
*/
|
||||
if (thd->query_id == cur_field->query_id)
|
||||
{
|
||||
bool is_covered= FALSE;
|
||||
KEY_PART_INFO *key_part= cur_index_info->key_part;
|
||||
KEY_PART_INFO *key_part_end= key_part + cur_index_info->key_parts;
|
||||
for (; key_part != key_part_end ; key_part++)
|
||||
{
|
||||
if (key_part->field == cur_field)
|
||||
{
|
||||
is_covered= TRUE;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!is_covered)
|
||||
goto next_index;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Check (GA1) for GROUP BY queries.
|
||||
*/
|
||||
|
Reference in New Issue
Block a user