mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
Merge of 5.1-main into 5.1-maria. There were no changes to storage/myisam, or mysql-test/t/*myisam*.
However there were three new tests mysql-test/suite/parts/t/partition*myisam.test, of which I make here copies for Maria.
This commit is contained in:
135
sql/sql_base.cc
135
sql/sql_base.cc
@ -489,12 +489,20 @@ static TABLE_SHARE
|
||||
"no such table" errors.
|
||||
@todo Rework the alternative ways to deal with ER_NO_SUCH TABLE.
|
||||
*/
|
||||
if (thd->is_error() && table_list->belong_to_view)
|
||||
if (thd->is_error())
|
||||
{
|
||||
TABLE_LIST *view= table_list->belong_to_view;
|
||||
thd->clear_error();
|
||||
my_error(ER_VIEW_INVALID, MYF(0),
|
||||
view->view_db.str, view->view_name.str);
|
||||
if (table_list->parent_l)
|
||||
{
|
||||
thd->clear_error();
|
||||
my_error(ER_WRONG_MRG_TABLE, MYF(0));
|
||||
}
|
||||
else if (table_list->belong_to_view)
|
||||
{
|
||||
TABLE_LIST *view= table_list->belong_to_view;
|
||||
thd->clear_error();
|
||||
my_error(ER_VIEW_INVALID, MYF(0),
|
||||
view->view_db.str, view->view_name.str);
|
||||
}
|
||||
}
|
||||
DBUG_RETURN(0);
|
||||
}
|
||||
@ -3731,6 +3739,20 @@ void assign_new_table_id(TABLE_SHARE *share)
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
#ifndef DBUG_OFF
|
||||
/* Cause a spurious statement reprepare for debug purposes. */
|
||||
static bool inject_reprepare(THD *thd)
|
||||
{
|
||||
if (thd->m_reprepare_observer && thd->stmt_arena->is_reprepared == FALSE)
|
||||
{
|
||||
thd->m_reprepare_observer->report_error(thd);
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
Compare metadata versions of an element obtained from the table
|
||||
definition cache and its corresponding node in the parse tree.
|
||||
@ -3784,13 +3806,7 @@ check_and_update_table_version(THD *thd,
|
||||
tables->set_table_ref_id(table_share);
|
||||
}
|
||||
|
||||
DBUG_EXECUTE_IF("reprepare_each_statement",
|
||||
if (thd->m_reprepare_observer &&
|
||||
thd->stmt_arena->is_reprepared == FALSE)
|
||||
{
|
||||
thd->m_reprepare_observer->report_error(thd);
|
||||
return TRUE;
|
||||
});
|
||||
DBUG_EXECUTE_IF("reprepare_each_statement", return inject_reprepare(thd););
|
||||
|
||||
return FALSE;
|
||||
}
|
||||
@ -4364,6 +4380,38 @@ bool fix_merge_after_open(TABLE_LIST *old_child_list, TABLE_LIST **old_last,
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Return a appropriate read lock type given a table object.
|
||||
|
||||
@param thd Thread context
|
||||
@param table TABLE object for table to be locked
|
||||
|
||||
@remark Due to a statement-based replication limitation, statements such as
|
||||
INSERT INTO .. SELECT FROM .. and CREATE TABLE .. SELECT FROM need
|
||||
to grab a TL_READ_NO_INSERT lock on the source table in order to
|
||||
prevent the replication of a concurrent statement that modifies the
|
||||
source table. If such a statement gets applied on the slave before
|
||||
the INSERT .. SELECT statement finishes, data on the master could
|
||||
differ from data on the slave and end-up with a discrepancy between
|
||||
the binary log and table state. Furthermore, this does not apply to
|
||||
I_S and log tables as it's always unsafe to replicate such tables
|
||||
under statement-based replication as the table on the slave might
|
||||
contain other data (ie: general_log is enabled on the slave). The
|
||||
statement will be marked as unsafe for SBR in decide_logging_format().
|
||||
*/
|
||||
|
||||
thr_lock_type read_lock_type_for_table(THD *thd, TABLE *table)
|
||||
{
|
||||
bool log_on= mysql_bin_log.is_open() && (thd->options & OPTION_BIN_LOG);
|
||||
ulong binlog_format= thd->variables.binlog_format;
|
||||
if ((log_on == FALSE) || (binlog_format == BINLOG_FORMAT_ROW) ||
|
||||
(table->s->table_category == TABLE_CATEGORY_PERFORMANCE))
|
||||
return TL_READ;
|
||||
else
|
||||
return TL_READ_NO_INSERT;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
Open all tables in list
|
||||
|
||||
@ -4638,6 +4686,9 @@ int open_tables(THD *thd, TABLE_LIST **start, uint *counter, uint flags)
|
||||
{
|
||||
if (tables->lock_type == TL_WRITE_DEFAULT)
|
||||
tables->table->reginfo.lock_type= thd->update_lock_default;
|
||||
else if (tables->lock_type == TL_READ_DEFAULT)
|
||||
tables->table->reginfo.lock_type=
|
||||
read_lock_type_for_table(thd, tables->table);
|
||||
else if (tables->table->s->tmp_table == NO_TMP_TABLE)
|
||||
tables->table->reginfo.lock_type= tables->lock_type;
|
||||
}
|
||||
@ -5045,7 +5096,11 @@ int decide_logging_format(THD *thd, TABLE_LIST *tables)
|
||||
void* prev_ht= NULL;
|
||||
for (TABLE_LIST *table= tables; table; table= table->next_global)
|
||||
{
|
||||
if (!table->placeholder() && table->lock_type >= TL_WRITE_ALLOW_WRITE)
|
||||
if (table->placeholder())
|
||||
continue;
|
||||
if (table->table->s->table_category == TABLE_CATEGORY_PERFORMANCE)
|
||||
thd->lex->set_stmt_unsafe();
|
||||
if (table->lock_type >= TL_WRITE_ALLOW_WRITE)
|
||||
{
|
||||
ulonglong const flags= table->table->file->ha_table_flags();
|
||||
DBUG_PRINT("info", ("table: %s; ha_table_flags: %s%s",
|
||||
@ -5709,8 +5764,21 @@ find_field_in_natural_join(THD *thd, TABLE_LIST *table_ref, const char *name,
|
||||
{
|
||||
/* This is a base table. */
|
||||
DBUG_ASSERT(nj_col->view_field == NULL);
|
||||
DBUG_ASSERT(nj_col->table_ref->table == nj_col->table_field->table);
|
||||
found_field= nj_col->table_field;
|
||||
/*
|
||||
This fix_fields is not necessary (initially this item is fixed by
|
||||
the Item_field constructor; after reopen_tables the Item_func_eq
|
||||
calls fix_fields on that item), it's just a check during table
|
||||
reopening for columns that was dropped by the concurrent connection.
|
||||
*/
|
||||
if (!nj_col->table_field->fixed &&
|
||||
nj_col->table_field->fix_fields(thd, (Item **)&nj_col->table_field))
|
||||
{
|
||||
DBUG_PRINT("info", ("column '%s' was dropped by the concurrent connection",
|
||||
nj_col->table_field->name));
|
||||
DBUG_RETURN(NULL);
|
||||
}
|
||||
DBUG_ASSERT(nj_col->table_ref->table == nj_col->table_field->field->table);
|
||||
found_field= nj_col->table_field->field;
|
||||
update_field_dependencies(thd, found_field, nj_col->table_ref->table);
|
||||
}
|
||||
|
||||
@ -6635,7 +6703,7 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
|
||||
const char *field_name_1;
|
||||
/* true if field_name_1 is a member of using_fields */
|
||||
bool is_using_column_1;
|
||||
if (!(nj_col_1= it_1.get_or_create_column_ref(leaf_1)))
|
||||
if (!(nj_col_1= it_1.get_or_create_column_ref(thd, leaf_1)))
|
||||
goto err;
|
||||
field_name_1= nj_col_1->name();
|
||||
is_using_column_1= using_fields &&
|
||||
@ -6656,7 +6724,7 @@ mark_common_columns(THD *thd, TABLE_LIST *table_ref_1, TABLE_LIST *table_ref_2,
|
||||
{
|
||||
Natural_join_column *cur_nj_col_2;
|
||||
const char *cur_field_name_2;
|
||||
if (!(cur_nj_col_2= it_2.get_or_create_column_ref(leaf_2)))
|
||||
if (!(cur_nj_col_2= it_2.get_or_create_column_ref(thd, leaf_2)))
|
||||
goto err;
|
||||
cur_field_name_2= cur_nj_col_2->name();
|
||||
DBUG_PRINT ("info", ("cur_field_name_2=%s.%s",
|
||||
@ -7146,15 +7214,24 @@ static bool setup_natural_join_row_types(THD *thd,
|
||||
TABLE_LIST *left_neighbor;
|
||||
/* Table reference to the right of the current. */
|
||||
TABLE_LIST *right_neighbor= NULL;
|
||||
bool save_first_natural_join_processing=
|
||||
context->select_lex->first_natural_join_processing;
|
||||
|
||||
context->select_lex->first_natural_join_processing= FALSE;
|
||||
|
||||
/* Note that tables in the list are in reversed order */
|
||||
for (left_neighbor= table_ref_it++; left_neighbor ; )
|
||||
{
|
||||
table_ref= left_neighbor;
|
||||
left_neighbor= table_ref_it++;
|
||||
/* For stored procedures do not redo work if already done. */
|
||||
if (context->select_lex->first_execution)
|
||||
/*
|
||||
Do not redo work if already done:
|
||||
1) for stored procedures,
|
||||
2) for multitable update after lock failure and table reopening.
|
||||
*/
|
||||
if (save_first_natural_join_processing)
|
||||
{
|
||||
context->select_lex->first_natural_join_processing= FALSE;
|
||||
if (store_top_level_join_columns(thd, table_ref,
|
||||
left_neighbor, right_neighbor))
|
||||
return TRUE;
|
||||
@ -7299,6 +7376,22 @@ bool setup_fields(THD *thd, Item **ref_pointer_array,
|
||||
if (ref_pointer_array)
|
||||
bzero(ref_pointer_array, sizeof(Item *) * fields.elements);
|
||||
|
||||
/*
|
||||
We call set_entry() there (before fix_fields() of the whole list of field
|
||||
items) because:
|
||||
1) the list of field items has same order as in the query, and the
|
||||
Item_func_get_user_var item may go before the Item_func_set_user_var:
|
||||
SELECT @a, @a := 10 FROM t;
|
||||
2) The entry->update_query_id value controls constantness of
|
||||
Item_func_get_user_var items, so in presence of Item_func_set_user_var
|
||||
items we have to refresh their entries before fixing of
|
||||
Item_func_get_user_var items.
|
||||
*/
|
||||
List_iterator<Item_func_set_user_var> li(thd->lex->set_var_list);
|
||||
Item_func_set_user_var *var;
|
||||
while ((var= li++))
|
||||
var->set_entry(thd, FALSE);
|
||||
|
||||
Item **ref= ref_pointer_array;
|
||||
thd->lex->current_select->cur_pos_in_select_list= 0;
|
||||
while ((item= it++))
|
||||
@ -7680,6 +7773,10 @@ insert_fields(THD *thd, Name_resolution_context *context, const char *db_name,
|
||||
|
||||
if (!(item= field_iterator.create_item(thd)))
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_ASSERT(item->fixed);
|
||||
/* cache the table for the Item_fields inserted by expanding stars */
|
||||
if (item->type() == Item::FIELD_ITEM && tables->cacheable_table)
|
||||
((Item_field *)item)->cached_table= tables;
|
||||
|
||||
if (!found)
|
||||
{
|
||||
|
Reference in New Issue
Block a user