mirror of
https://github.com/MariaDB/server.git
synced 2025-07-29 05:21:33 +03:00
5.5-merge
This commit is contained in:
@ -1,4 +1,4 @@
|
||||
/* Copyright 2000-2008 MySQL AB, 2008-2009 Sun Microsystems, Inc.
|
||||
/* Copyright (c) 2000, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
@ -42,11 +42,68 @@
|
||||
// mysql_handle_derived,
|
||||
// mysql_derived_filling
|
||||
|
||||
/* Return 0 if row hasn't changed */
|
||||
|
||||
bool compare_record(TABLE *table)
|
||||
/**
|
||||
True if the table's input and output record buffers are comparable using
|
||||
compare_records(TABLE*).
|
||||
*/
|
||||
bool records_are_comparable(const TABLE *table) {
|
||||
return ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) == 0) ||
|
||||
bitmap_is_subset(table->write_set, table->read_set);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
Compares the input and outbut record buffers of the table to see if a row
|
||||
has changed. The algorithm iterates over updated columns and if they are
|
||||
nullable compares NULL bits in the buffer before comparing actual
|
||||
data. Special care must be taken to compare only the relevant NULL bits and
|
||||
mask out all others as they may be undefined. The storage engine will not
|
||||
and should not touch them.
|
||||
|
||||
@param table The table to evaluate.
|
||||
|
||||
@return true if row has changed.
|
||||
@return false otherwise.
|
||||
*/
|
||||
bool compare_records(const TABLE *table)
|
||||
{
|
||||
DBUG_ASSERT(records_are_comparable(table));
|
||||
|
||||
if ((table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ) != 0)
|
||||
{
|
||||
/*
|
||||
Storage engine may not have read all columns of the record. Fields
|
||||
(including NULL bits) not in the write_set may not have been read and
|
||||
can therefore not be compared.
|
||||
*/
|
||||
for (Field **ptr= table->field ; *ptr != NULL; ptr++)
|
||||
{
|
||||
Field *field= *ptr;
|
||||
if (bitmap_is_set(table->write_set, field->field_index))
|
||||
{
|
||||
if (field->real_maybe_null())
|
||||
{
|
||||
uchar null_byte_index= field->null_ptr - table->record[0];
|
||||
|
||||
if (((table->record[0][null_byte_index]) & field->null_bit) !=
|
||||
((table->record[1][null_byte_index]) & field->null_bit))
|
||||
return TRUE;
|
||||
}
|
||||
if (field->cmp_binary_offset(table->s->rec_buff_length))
|
||||
return TRUE;
|
||||
}
|
||||
}
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
/*
|
||||
The storage engine has read all columns, so it's safe to compare all bits
|
||||
including those not in the write_set. This is cheaper than the field-by-field
|
||||
comparison done above.
|
||||
*/
|
||||
if (table->s->can_cmp_whole_record)
|
||||
// Fixed-size record: do bitwise comparison of the records
|
||||
return cmp_record(table,record[1]);
|
||||
/* Compare null bits */
|
||||
if (memcmp(table->null_flags,
|
||||
@ -204,7 +261,6 @@ int mysql_update(THD *thd,
|
||||
bool using_limit= limit != HA_POS_ERROR;
|
||||
bool safe_update= test(thd->variables.option_bits & OPTION_SAFE_UPDATES);
|
||||
bool used_key_is_modified= FALSE, transactional_table, will_batch;
|
||||
bool can_compare_record;
|
||||
int res;
|
||||
int error, loc_error;
|
||||
uint used_index, dup_key_found;
|
||||
@ -240,11 +296,17 @@ int mysql_update(THD *thd,
|
||||
if (lock_tables(thd, table_list, table_count, 0))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
if (mysql_handle_derived(thd->lex, &mysql_derived_prepare) ||
|
||||
(thd->fill_derived_tables() &&
|
||||
mysql_handle_derived(thd->lex, &mysql_derived_filling)))
|
||||
if (mysql_handle_derived(thd->lex, &mysql_derived_prepare))
|
||||
DBUG_RETURN(1);
|
||||
|
||||
if (thd->fill_derived_tables() &&
|
||||
mysql_handle_derived(thd->lex, &mysql_derived_filling))
|
||||
{
|
||||
mysql_handle_derived(thd->lex, &mysql_derived_cleanup);
|
||||
DBUG_RETURN(1);
|
||||
}
|
||||
mysql_handle_derived(thd->lex, &mysql_derived_cleanup);
|
||||
|
||||
thd_proc_info(thd, "init");
|
||||
table= table_list->table;
|
||||
|
||||
@ -582,15 +644,6 @@ int mysql_update(THD *thd,
|
||||
if (table->file->ha_table_flags() & HA_PARTIAL_COLUMN_READ)
|
||||
table->prepare_for_position();
|
||||
|
||||
/*
|
||||
We can use compare_record() to optimize away updates if
|
||||
the table handler is returning all columns OR if
|
||||
if all updated columns are read
|
||||
*/
|
||||
can_compare_record= (!(table->file->ha_table_flags() &
|
||||
HA_PARTIAL_COLUMN_READ) ||
|
||||
bitmap_is_subset(table->write_set, table->read_set));
|
||||
|
||||
while (!(error=info.read_record(&info)) && !thd->killed)
|
||||
{
|
||||
update_virtual_fields(thd, table);
|
||||
@ -608,7 +661,7 @@ int mysql_update(THD *thd,
|
||||
|
||||
found++;
|
||||
|
||||
if (!can_compare_record || compare_record(table))
|
||||
if (!records_are_comparable(table) || compare_records(table))
|
||||
{
|
||||
if ((res= table_list->view_check_option(thd, ignore)) !=
|
||||
VIEW_CHECK_OK)
|
||||
@ -948,6 +1001,108 @@ static table_map get_table_map(List<Item> *items)
|
||||
return map;
|
||||
}
|
||||
|
||||
/**
|
||||
If one row is updated through two different aliases and the first
|
||||
update physically moves the row, the second update will error
|
||||
because the row is no longer located where expected. This function
|
||||
checks if the multiple-table update is about to do that and if so
|
||||
returns with an error.
|
||||
|
||||
The following update operations physically moves rows:
|
||||
1) Update of a column in a clustered primary key
|
||||
2) Update of a column used to calculate which partition the row belongs to
|
||||
|
||||
This function returns with an error if both of the following are
|
||||
true:
|
||||
|
||||
a) A table in the multiple-table update statement is updated
|
||||
through multiple aliases (including views)
|
||||
b) At least one of the updates on the table from a) may physically
|
||||
moves the row. Note: Updating a column used to calculate which
|
||||
partition a row belongs to does not necessarily mean that the
|
||||
row is moved. The new value may or may not belong to the same
|
||||
partition.
|
||||
|
||||
@param leaves First leaf table
|
||||
@param tables_for_update Map of tables that are updated
|
||||
|
||||
@return
|
||||
true if the update is unsafe, in which case an error message is also set,
|
||||
false otherwise.
|
||||
*/
|
||||
static
|
||||
bool unsafe_key_update(TABLE_LIST *leaves, table_map tables_for_update)
|
||||
{
|
||||
TABLE_LIST *tl= leaves;
|
||||
|
||||
for (tl= leaves; tl ; tl= tl->next_leaf)
|
||||
{
|
||||
if (tl->table->map & tables_for_update)
|
||||
{
|
||||
TABLE *table1= tl->table;
|
||||
bool primkey_clustered= (table1->file->primary_key_is_clustered() &&
|
||||
table1->s->primary_key != MAX_KEY);
|
||||
|
||||
bool table_partitioned= false;
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
table_partitioned= (table1->part_info != NULL);
|
||||
#endif
|
||||
|
||||
if (!table_partitioned && !primkey_clustered)
|
||||
continue;
|
||||
|
||||
for (TABLE_LIST* tl2= tl->next_leaf; tl2 ; tl2= tl2->next_leaf)
|
||||
{
|
||||
/*
|
||||
Look at "next" tables only since all previous tables have
|
||||
already been checked
|
||||
*/
|
||||
TABLE *table2= tl2->table;
|
||||
if (table2->map & tables_for_update && table1->s == table2->s)
|
||||
{
|
||||
// A table is updated through two aliases
|
||||
if (table_partitioned &&
|
||||
(partition_key_modified(table1, table1->write_set) ||
|
||||
partition_key_modified(table2, table2->write_set)))
|
||||
{
|
||||
// Partitioned key is updated
|
||||
my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
|
||||
tl->belong_to_view ? tl->belong_to_view->alias
|
||||
: tl->alias,
|
||||
tl2->belong_to_view ? tl2->belong_to_view->alias
|
||||
: tl2->alias);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (primkey_clustered)
|
||||
{
|
||||
// The primary key can cover multiple columns
|
||||
KEY key_info= table1->key_info[table1->s->primary_key];
|
||||
KEY_PART_INFO *key_part= key_info.key_part;
|
||||
KEY_PART_INFO *key_part_end= key_part + key_info.key_parts;
|
||||
|
||||
for (;key_part != key_part_end; ++key_part)
|
||||
{
|
||||
if (bitmap_is_set(table1->write_set, key_part->fieldnr-1) ||
|
||||
bitmap_is_set(table2->write_set, key_part->fieldnr-1))
|
||||
{
|
||||
// Clustered primary key is updated
|
||||
my_error(ER_MULTI_UPDATE_KEY_CONFLICT, MYF(0),
|
||||
tl->belong_to_view ? tl->belong_to_view->alias
|
||||
: tl->alias,
|
||||
tl2->belong_to_view ? tl2->belong_to_view->alias
|
||||
: tl2->alias);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
make update specific preparation and checks after opening tables
|
||||
@ -982,9 +1137,17 @@ int mysql_multi_update_prepare(THD *thd)
|
||||
/* following need for prepared statements, to run next time multi-update */
|
||||
thd->lex->sql_command= SQLCOM_UPDATE_MULTI;
|
||||
|
||||
/* open tables and create derived ones, but do not lock and fill them */
|
||||
/*
|
||||
Open tables and create derived ones, but do not lock and fill them yet.
|
||||
|
||||
During prepare phase acquire only S metadata locks instead of SW locks to
|
||||
keep prepare of multi-UPDATE compatible with concurrent LOCK TABLES WRITE
|
||||
and global read lock.
|
||||
*/
|
||||
if ((original_multiupdate &&
|
||||
open_tables(thd, &table_list, &table_count, 0)) ||
|
||||
open_tables(thd, &table_list, &table_count,
|
||||
(thd->stmt_arena->is_stmt_prepare() ?
|
||||
MYSQL_OPEN_FORCE_SHARED_MDL : 0))) ||
|
||||
mysql_handle_derived(lex, &mysql_derived_prepare))
|
||||
DBUG_RETURN(TRUE);
|
||||
/*
|
||||
@ -1019,10 +1182,14 @@ int mysql_multi_update_prepare(THD *thd)
|
||||
|
||||
thd->table_map_for_update= tables_for_update= get_table_map(fields);
|
||||
|
||||
leaves= lex->select_lex.leaf_tables;
|
||||
|
||||
if (unsafe_key_update(leaves, tables_for_update))
|
||||
DBUG_RETURN(true);
|
||||
|
||||
/*
|
||||
Setup timestamp handling and locking mode
|
||||
*/
|
||||
leaves= lex->select_lex.leaf_tables;
|
||||
for (tl= leaves; tl; tl= tl->next_leaf)
|
||||
{
|
||||
TABLE *table= tl->table;
|
||||
@ -1142,7 +1309,11 @@ int mysql_multi_update_prepare(THD *thd)
|
||||
|
||||
if (thd->fill_derived_tables() &&
|
||||
mysql_handle_derived(lex, &mysql_derived_filling))
|
||||
{
|
||||
mysql_handle_derived(lex, &mysql_derived_cleanup);
|
||||
DBUG_RETURN(TRUE);
|
||||
}
|
||||
mysql_handle_derived(lex, &mysql_derived_cleanup);
|
||||
|
||||
DBUG_RETURN (FALSE);
|
||||
}
|
||||
@ -1647,18 +1818,8 @@ bool multi_update::send_data(List<Item> ¬_used_values)
|
||||
if (table->status & (STATUS_NULL_ROW | STATUS_UPDATED))
|
||||
continue;
|
||||
|
||||
/*
|
||||
We can use compare_record() to optimize away updates if
|
||||
the table handler is returning all columns OR if
|
||||
if all updated columns are read
|
||||
*/
|
||||
if (table == table_to_update)
|
||||
{
|
||||
bool can_compare_record;
|
||||
can_compare_record= (!(table->file->ha_table_flags() &
|
||||
HA_PARTIAL_COLUMN_READ) ||
|
||||
bitmap_is_subset(table->write_set,
|
||||
table->read_set));
|
||||
table->status|= STATUS_UPDATED;
|
||||
store_record(table,record[1]);
|
||||
if (fill_record_n_invoke_before_triggers(thd, *fields_for_table[offset],
|
||||
@ -1673,7 +1834,7 @@ bool multi_update::send_data(List<Item> ¬_used_values)
|
||||
*/
|
||||
table->auto_increment_field_not_null= FALSE;
|
||||
found++;
|
||||
if (!can_compare_record || compare_record(table))
|
||||
if (!records_are_comparable(table) || compare_records(table))
|
||||
{
|
||||
int error;
|
||||
if ((error= cur_table->view_check_option(thd, ignore)) !=
|
||||
@ -1864,7 +2025,6 @@ int multi_update::do_updates()
|
||||
DBUG_RETURN(0);
|
||||
for (cur_table= update_tables; cur_table; cur_table= cur_table->next_local)
|
||||
{
|
||||
bool can_compare_record;
|
||||
uint offset= cur_table->shared;
|
||||
|
||||
table = cur_table->table;
|
||||
@ -1913,11 +2073,6 @@ int multi_update::do_updates()
|
||||
goto err;
|
||||
}
|
||||
|
||||
can_compare_record= (!(table->file->ha_table_flags() &
|
||||
HA_PARTIAL_COLUMN_READ) ||
|
||||
bitmap_is_subset(table->write_set,
|
||||
table->read_set));
|
||||
|
||||
for (;;)
|
||||
{
|
||||
if (thd->killed && trans_safe)
|
||||
@ -1965,7 +2120,7 @@ int multi_update::do_updates()
|
||||
TRG_ACTION_BEFORE, TRUE))
|
||||
goto err2;
|
||||
|
||||
if (!can_compare_record || compare_record(table))
|
||||
if (!records_are_comparable(table) || compare_records(table))
|
||||
{
|
||||
int error;
|
||||
if ((error= cur_table->view_check_option(thd, ignore)) !=
|
||||
@ -2062,7 +2217,9 @@ bool multi_update::send_eof()
|
||||
Does updates for the last n - 1 tables, returns 0 if ok;
|
||||
error takes into account killed status gained in do_updates()
|
||||
*/
|
||||
int local_error = (table_count) ? do_updates() : 0;
|
||||
int local_error= thd->is_error();
|
||||
if (!local_error)
|
||||
local_error = (table_count) ? do_updates() : 0;
|
||||
/*
|
||||
if local_error is not set ON until after do_updates() then
|
||||
later carried out killing should not affect binlogging.
|
||||
|
Reference in New Issue
Block a user