mirror of
https://github.com/MariaDB/server.git
synced 2025-08-31 22:22:30 +03:00
merged 5.1-5.1.29-rc -> 5.1-bugteam
This commit is contained in:
@@ -160,7 +160,8 @@ const uint ha_partition::NO_CURRENT_PART_ID= 0xFFFFFFFF;
|
||||
|
||||
ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
|
||||
:handler(hton, share), m_part_info(NULL), m_create_handler(FALSE),
|
||||
m_is_sub_partitioned(0), is_clone(FALSE)
|
||||
m_is_sub_partitioned(0), is_clone(FALSE), auto_increment_lock(FALSE),
|
||||
auto_increment_safe_stmt_log_lock(FALSE)
|
||||
{
|
||||
DBUG_ENTER("ha_partition::ha_partition(table)");
|
||||
init_handler_variables();
|
||||
@@ -182,7 +183,8 @@ ha_partition::ha_partition(handlerton *hton, TABLE_SHARE *share)
|
||||
ha_partition::ha_partition(handlerton *hton, partition_info *part_info)
|
||||
:handler(hton, NULL), m_part_info(part_info),
|
||||
m_create_handler(TRUE),
|
||||
m_is_sub_partitioned(m_part_info->is_sub_partitioned()), is_clone(FALSE)
|
||||
m_is_sub_partitioned(m_part_info->is_sub_partitioned()), is_clone(FALSE),
|
||||
auto_increment_lock(FALSE), auto_increment_safe_stmt_log_lock(FALSE)
|
||||
{
|
||||
DBUG_ENTER("ha_partition::ha_partition(part_info)");
|
||||
init_handler_variables();
|
||||
@@ -1248,7 +1250,7 @@ int ha_partition::prepare_new_partition(TABLE *tbl,
|
||||
assumes that external_lock() is last call that may fail here.
|
||||
Otherwise see description for cleanup_new_partition().
|
||||
*/
|
||||
if ((error= file->ha_external_lock(current_thd, m_lock_type)))
|
||||
if ((error= file->ha_external_lock(ha_thd(), m_lock_type)))
|
||||
goto error;
|
||||
|
||||
DBUG_RETURN(0);
|
||||
@@ -1336,8 +1338,8 @@ void ha_partition::cleanup_new_partition(uint part_count)
|
||||
|
||||
int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
||||
const char *path,
|
||||
ulonglong *copied,
|
||||
ulonglong *deleted,
|
||||
ulonglong * const copied,
|
||||
ulonglong * const deleted,
|
||||
const uchar *pack_frm_data
|
||||
__attribute__((unused)),
|
||||
size_t pack_frm_len
|
||||
@@ -1354,7 +1356,7 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
||||
int error= 1;
|
||||
bool first;
|
||||
uint temp_partitions= m_part_info->temp_partitions.elements;
|
||||
THD *thd= current_thd;
|
||||
THD *thd= ha_thd();
|
||||
DBUG_ENTER("ha_partition::change_partitions");
|
||||
|
||||
/*
|
||||
@@ -1628,7 +1630,8 @@ int ha_partition::change_partitions(HA_CREATE_INFO *create_info,
|
||||
partitions.
|
||||
*/
|
||||
|
||||
int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
|
||||
int ha_partition::copy_partitions(ulonglong * const copied,
|
||||
ulonglong * const deleted)
|
||||
{
|
||||
uint reorg_part= 0;
|
||||
int result= 0;
|
||||
@@ -1674,13 +1677,13 @@ int ha_partition::copy_partitions(ulonglong *copied, ulonglong *deleted)
|
||||
table since it doesn't fit into any partition any longer due to
|
||||
changed partitioning ranges or list values.
|
||||
*/
|
||||
deleted++;
|
||||
(*deleted)++;
|
||||
}
|
||||
else
|
||||
{
|
||||
THD *thd= ha_thd();
|
||||
/* Copy record to new handler */
|
||||
copied++;
|
||||
(*copied)++;
|
||||
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
|
||||
result= m_new_file[new_part]->ha_write_row(m_rec0);
|
||||
reenable_binlog(thd);
|
||||
@@ -1714,6 +1717,14 @@ error:
|
||||
|
||||
void ha_partition::update_create_info(HA_CREATE_INFO *create_info)
|
||||
{
|
||||
/*
|
||||
Fix for bug#38751, some engines needs info-calls in ALTER.
|
||||
Archive need this since it flushes in ::info.
|
||||
HA_STATUS_AUTO is optimized so it will not always be forwarded
|
||||
to all partitions, but HA_STATUS_VARIABLE will.
|
||||
*/
|
||||
info(HA_STATUS_VARIABLE);
|
||||
|
||||
info(HA_STATUS_AUTO);
|
||||
|
||||
if (!(create_info->used_fields & HA_CREATE_USED_AUTO))
|
||||
@@ -1804,7 +1815,7 @@ uint ha_partition::del_ren_cre_table(const char *from,
|
||||
handler **file, **abort_file;
|
||||
DBUG_ENTER("del_ren_cre_table()");
|
||||
|
||||
if (get_from_handler_file(from, current_thd->mem_root))
|
||||
if (get_from_handler_file(from, ha_thd()->mem_root))
|
||||
DBUG_RETURN(TRUE);
|
||||
DBUG_ASSERT(m_file_buffer);
|
||||
DBUG_PRINT("enter", ("from: (%s) to: (%s)", from, to));
|
||||
@@ -1931,7 +1942,7 @@ int ha_partition::set_up_table_before_create(TABLE *tbl,
|
||||
{
|
||||
int error= 0;
|
||||
const char *partition_name;
|
||||
THD *thd= current_thd;
|
||||
THD *thd= ha_thd();
|
||||
DBUG_ENTER("set_up_table_before_create");
|
||||
|
||||
if (!part_elem)
|
||||
@@ -2327,7 +2338,7 @@ bool ha_partition::get_from_handler_file(const char *name, MEM_ROOT *mem_root)
|
||||
tot_partition_words= (m_tot_parts + 3) / 4;
|
||||
engine_array= (handlerton **) my_alloca(m_tot_parts * sizeof(handlerton*));
|
||||
for (i= 0; i < m_tot_parts; i++)
|
||||
engine_array[i]= ha_resolve_by_legacy_type(current_thd,
|
||||
engine_array[i]= ha_resolve_by_legacy_type(ha_thd(),
|
||||
(enum legacy_db_type)
|
||||
*(uchar *) ((file_buffer) + 12 + i));
|
||||
address_tot_name_len= file_buffer + 12 + 4 * tot_partition_words;
|
||||
@@ -2398,8 +2409,10 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
||||
uint alloc_len;
|
||||
handler **file;
|
||||
char name_buff[FN_REFLEN];
|
||||
bool is_not_tmp_table= (table_share->tmp_table == NO_TMP_TABLE);
|
||||
DBUG_ENTER("ha_partition::open");
|
||||
|
||||
DBUG_ASSERT(table->s == table_share);
|
||||
ref_length= 0;
|
||||
m_mode= mode;
|
||||
m_open_test_lock= test_if_locked;
|
||||
@@ -2408,9 +2421,9 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
||||
DBUG_RETURN(1);
|
||||
m_start_key.length= 0;
|
||||
m_rec0= table->record[0];
|
||||
m_rec_length= table->s->reclength;
|
||||
m_rec_length= table_share->reclength;
|
||||
alloc_len= m_tot_parts * (m_rec_length + PARTITION_BYTES_IN_POS);
|
||||
alloc_len+= table->s->max_key_length;
|
||||
alloc_len+= table_share->max_key_length;
|
||||
if (!m_ordered_rec_buffer)
|
||||
{
|
||||
if (!(m_ordered_rec_buffer= (uchar*)my_malloc(alloc_len, MYF(MY_WME))))
|
||||
@@ -2482,6 +2495,30 @@ int ha_partition::open(const char *name, int mode, uint test_if_locked)
|
||||
0, key_rec_cmp, (void*)this)))
|
||||
goto err_handler;
|
||||
|
||||
/*
|
||||
Use table_share->ha_data to share auto_increment_value among all handlers
|
||||
for the same table.
|
||||
*/
|
||||
if (is_not_tmp_table)
|
||||
pthread_mutex_lock(&table_share->mutex);
|
||||
if (!table_share->ha_data)
|
||||
{
|
||||
HA_DATA_PARTITION *ha_data;
|
||||
/* currently only needed for auto_increment */
|
||||
table_share->ha_data= ha_data= (HA_DATA_PARTITION*)
|
||||
alloc_root(&table_share->mem_root,
|
||||
sizeof(HA_DATA_PARTITION));
|
||||
if (!ha_data)
|
||||
{
|
||||
if (is_not_tmp_table)
|
||||
pthread_mutex_unlock(&table_share->mutex);
|
||||
goto err_handler;
|
||||
}
|
||||
DBUG_PRINT("info", ("table_share->ha_data 0x%p", ha_data));
|
||||
bzero(ha_data, sizeof(HA_DATA_PARTITION));
|
||||
}
|
||||
if (is_not_tmp_table)
|
||||
pthread_mutex_unlock(&table_share->mutex);
|
||||
/*
|
||||
Some handlers update statistics as part of the open call. This will in
|
||||
some cases corrupt the statistics of the partition handler and thus
|
||||
@@ -2539,6 +2576,7 @@ int ha_partition::close(void)
|
||||
handler **file;
|
||||
DBUG_ENTER("ha_partition::close");
|
||||
|
||||
DBUG_ASSERT(table->s == table_share);
|
||||
delete_queue(&m_queue);
|
||||
if (!is_clone)
|
||||
bitmap_free(&(m_part_info->used_partitions));
|
||||
@@ -2607,6 +2645,7 @@ int ha_partition::external_lock(THD *thd, int lock_type)
|
||||
handler **file;
|
||||
DBUG_ENTER("ha_partition::external_lock");
|
||||
|
||||
DBUG_ASSERT(!auto_increment_lock && !auto_increment_safe_stmt_log_lock);
|
||||
file= m_file;
|
||||
m_lock_type= lock_type;
|
||||
|
||||
@@ -2825,8 +2864,9 @@ int ha_partition::write_row(uchar * buf)
|
||||
uint32 part_id;
|
||||
int error;
|
||||
longlong func_value;
|
||||
bool autoincrement_lock= FALSE;
|
||||
bool have_auto_increment= table->next_number_field && buf == table->record[0];
|
||||
my_bitmap_map *old_map;
|
||||
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
||||
THD *thd= ha_thd();
|
||||
timestamp_auto_set_type orig_timestamp_type= table->timestamp_field_type;
|
||||
#ifdef NOT_NEEDED
|
||||
@@ -2844,28 +2884,16 @@ int ha_partition::write_row(uchar * buf)
|
||||
If we have an auto_increment column and we are writing a changed row
|
||||
or a new row, then update the auto_increment value in the record.
|
||||
*/
|
||||
if (table->next_number_field && buf == table->record[0])
|
||||
if (have_auto_increment)
|
||||
{
|
||||
/*
|
||||
Some engines (InnoDB for example) can change autoincrement
|
||||
counter only after 'table->write_row' operation.
|
||||
So if another thread gets inside the ha_partition::write_row
|
||||
before it is complete, it gets same auto_increment value,
|
||||
which means DUP_KEY error (bug #27405)
|
||||
Here we separate the access using table_share->mutex, and
|
||||
use autoincrement_lock variable to avoid unnecessary locks.
|
||||
Probably not an ideal solution.
|
||||
*/
|
||||
if (table_share->tmp_table == NO_TMP_TABLE)
|
||||
if (!ha_data->auto_inc_initialized &&
|
||||
!table->s->next_number_keypart)
|
||||
{
|
||||
/*
|
||||
Bug#30878 crash when alter table from non partitioned table
|
||||
to partitioned.
|
||||
Checking if tmp table then there is no need to lock,
|
||||
and the table_share->mutex may not be initialised.
|
||||
If auto_increment in table_share is not initialized, start by
|
||||
initializing it.
|
||||
*/
|
||||
autoincrement_lock= TRUE;
|
||||
pthread_mutex_lock(&table_share->mutex);
|
||||
info(HA_STATUS_AUTO);
|
||||
}
|
||||
error= update_auto_increment();
|
||||
|
||||
@@ -2903,11 +2931,11 @@ int ha_partition::write_row(uchar * buf)
|
||||
DBUG_PRINT("info", ("Insert in partition %d", part_id));
|
||||
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
|
||||
error= m_file[part_id]->ha_write_row(buf);
|
||||
if (have_auto_increment && !table->s->next_number_keypart)
|
||||
set_auto_increment_if_higher(table->next_number_field->val_int());
|
||||
reenable_binlog(thd);
|
||||
exit:
|
||||
table->timestamp_field_type= orig_timestamp_type;
|
||||
if (autoincrement_lock)
|
||||
pthread_mutex_unlock(&table_share->mutex);
|
||||
DBUG_RETURN(error);
|
||||
}
|
||||
|
||||
@@ -2931,13 +2959,6 @@ exit:
|
||||
Keep in mind that the server can do updates based on ordering if an
|
||||
ORDER BY clause was used. Consecutive ordering is not guarenteed.
|
||||
|
||||
Currently new_data will not have an updated auto_increament record, or
|
||||
and updated timestamp field. You can do these for partition by doing these:
|
||||
if (table->timestamp_field_type & TIMESTAMP_AUTO_SET_ON_UPDATE)
|
||||
table->timestamp_field->set_time();
|
||||
if (table->next_number_field && record == table->record[0])
|
||||
update_auto_increment();
|
||||
|
||||
Called from sql_select.cc, sql_acl.cc, sql_update.cc, and sql_insert.cc.
|
||||
new_data is always record[0]
|
||||
old_data is normally record[1] but may be anything
|
||||
@@ -2969,17 +2990,23 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/*
|
||||
TODO:
|
||||
set_internal_auto_increment=
|
||||
max(set_internal_auto_increment, new_data->auto_increment)
|
||||
*/
|
||||
m_last_part= new_part_id;
|
||||
if (new_part_id == old_part_id)
|
||||
{
|
||||
DBUG_PRINT("info", ("Update in partition %d", new_part_id));
|
||||
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
|
||||
error= m_file[new_part_id]->ha_update_row(old_data, new_data);
|
||||
/*
|
||||
if updating an auto_increment column, update
|
||||
table_share->ha_data->next_auto_inc_val if needed.
|
||||
(not to be used if auto_increment on secondary field in a multi-
|
||||
column index)
|
||||
mysql_update does not set table->next_number_field, so we use
|
||||
table->found_next_number_field instead.
|
||||
*/
|
||||
if (table->found_next_number_field && new_data == table->record[0] &&
|
||||
!table->s->next_number_keypart)
|
||||
set_auto_increment_if_higher(table->found_next_number_field->val_int());
|
||||
reenable_binlog(thd);
|
||||
goto exit;
|
||||
}
|
||||
@@ -2989,6 +3016,9 @@ int ha_partition::update_row(const uchar *old_data, uchar *new_data)
|
||||
old_part_id, new_part_id));
|
||||
tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */
|
||||
error= m_file[new_part_id]->ha_write_row(new_data);
|
||||
if (table->found_next_number_field && new_data == table->record[0] &&
|
||||
!table->s->next_number_keypart)
|
||||
set_auto_increment_if_higher(table->found_next_number_field->val_int());
|
||||
reenable_binlog(thd);
|
||||
if (error)
|
||||
goto exit;
|
||||
@@ -3084,8 +3114,17 @@ int ha_partition::delete_all_rows()
|
||||
{
|
||||
int error;
|
||||
handler **file;
|
||||
THD *thd= ha_thd();
|
||||
DBUG_ENTER("ha_partition::delete_all_rows");
|
||||
|
||||
if (thd->lex->sql_command == SQLCOM_TRUNCATE)
|
||||
{
|
||||
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
||||
lock_auto_increment();
|
||||
ha_data->next_auto_inc_val= 0;
|
||||
ha_data->auto_inc_initialized= FALSE;
|
||||
unlock_auto_increment();
|
||||
}
|
||||
file= m_file;
|
||||
do
|
||||
{
|
||||
@@ -4251,6 +4290,17 @@ int ha_partition::handle_unordered_scan_next_partition(uchar * buf)
|
||||
break;
|
||||
case partition_index_first:
|
||||
DBUG_PRINT("info", ("index_first on partition %d", i));
|
||||
/* MyISAM engine can fail if we call index_first() when indexes disabled */
|
||||
/* that happens if the table is empty. */
|
||||
/* Here we use file->stats.records instead of file->records() because */
|
||||
/* file->records() is supposed to return an EXACT count, and it can be */
|
||||
/* possibly slow. We don't need an exact number, an approximate one- from*/
|
||||
/* the last ::info() call - is sufficient. */
|
||||
if (file->stats.records == 0)
|
||||
{
|
||||
error= HA_ERR_END_OF_FILE;
|
||||
break;
|
||||
}
|
||||
error= file->index_first(buf);
|
||||
break;
|
||||
case partition_index_first_unordered:
|
||||
@@ -4338,10 +4388,32 @@ int ha_partition::handle_ordered_index_scan(uchar *buf, bool reverse_order)
|
||||
m_start_key.flag);
|
||||
break;
|
||||
case partition_index_first:
|
||||
/* MyISAM engine can fail if we call index_first() when indexes disabled */
|
||||
/* that happens if the table is empty. */
|
||||
/* Here we use file->stats.records instead of file->records() because */
|
||||
/* file->records() is supposed to return an EXACT count, and it can be */
|
||||
/* possibly slow. We don't need an exact number, an approximate one- from*/
|
||||
/* the last ::info() call - is sufficient. */
|
||||
if (file->stats.records == 0)
|
||||
{
|
||||
error= HA_ERR_END_OF_FILE;
|
||||
break;
|
||||
}
|
||||
error= file->index_first(rec_buf_ptr);
|
||||
reverse_order= FALSE;
|
||||
break;
|
||||
case partition_index_last:
|
||||
/* MyISAM engine can fail if we call index_last() when indexes disabled */
|
||||
/* that happens if the table is empty. */
|
||||
/* Here we use file->stats.records instead of file->records() because */
|
||||
/* file->records() is supposed to return an EXACT count, and it can be */
|
||||
/* possibly slow. We don't need an exact number, an approximate one- from*/
|
||||
/* the last ::info() call - is sufficient. */
|
||||
if (file->stats.records == 0)
|
||||
{
|
||||
error= HA_ERR_END_OF_FILE;
|
||||
break;
|
||||
}
|
||||
error= file->index_last(rec_buf_ptr);
|
||||
reverse_order= TRUE;
|
||||
break;
|
||||
@@ -4596,21 +4668,54 @@ int ha_partition::handle_ordered_prev(uchar *buf)
|
||||
|
||||
int ha_partition::info(uint flag)
|
||||
{
|
||||
handler *file, **file_array;
|
||||
DBUG_ENTER("ha_partition:info");
|
||||
DBUG_ENTER("ha_partition::info");
|
||||
|
||||
if (flag & HA_STATUS_AUTO)
|
||||
{
|
||||
ulonglong auto_increment_value= 0;
|
||||
bool auto_inc_is_first_in_idx= (table_share->next_number_keypart == 0);
|
||||
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
||||
DBUG_PRINT("info", ("HA_STATUS_AUTO"));
|
||||
file_array= m_file;
|
||||
do
|
||||
if (!table->found_next_number_field)
|
||||
stats.auto_increment_value= 0;
|
||||
else if (ha_data->auto_inc_initialized)
|
||||
{
|
||||
file= *file_array;
|
||||
file->info(HA_STATUS_AUTO);
|
||||
set_if_bigger(auto_increment_value, file->stats.auto_increment_value);
|
||||
} while (*(++file_array));
|
||||
stats.auto_increment_value= auto_increment_value;
|
||||
lock_auto_increment();
|
||||
stats.auto_increment_value= ha_data->next_auto_inc_val;
|
||||
unlock_auto_increment();
|
||||
}
|
||||
else
|
||||
{
|
||||
lock_auto_increment();
|
||||
/* to avoid two concurrent initializations, check again when locked */
|
||||
if (ha_data->auto_inc_initialized)
|
||||
stats.auto_increment_value= ha_data->next_auto_inc_val;
|
||||
else
|
||||
{
|
||||
handler *file, **file_array;
|
||||
ulonglong auto_increment_value= 0;
|
||||
file_array= m_file;
|
||||
DBUG_PRINT("info",
|
||||
("checking all partitions for auto_increment_value"));
|
||||
do
|
||||
{
|
||||
file= *file_array;
|
||||
file->info(HA_STATUS_AUTO);
|
||||
set_if_bigger(auto_increment_value,
|
||||
file->stats.auto_increment_value);
|
||||
} while (*(++file_array));
|
||||
|
||||
DBUG_ASSERT(auto_increment_value);
|
||||
stats.auto_increment_value= auto_increment_value;
|
||||
if (auto_inc_is_first_in_idx)
|
||||
{
|
||||
set_if_bigger(ha_data->next_auto_inc_val, auto_increment_value);
|
||||
ha_data->auto_inc_initialized= TRUE;
|
||||
DBUG_PRINT("info", ("initializing next_auto_inc_val to %lu",
|
||||
(ulong) ha_data->next_auto_inc_val));
|
||||
}
|
||||
}
|
||||
unlock_auto_increment();
|
||||
}
|
||||
}
|
||||
if (flag & HA_STATUS_VARIABLE)
|
||||
{
|
||||
@@ -4634,6 +4739,7 @@ int ha_partition::info(uint flag)
|
||||
check_time: Time of last check (only applicable to MyISAM)
|
||||
We report last time of all underlying handlers
|
||||
*/
|
||||
handler *file, **file_array;
|
||||
stats.records= 0;
|
||||
stats.deleted= 0;
|
||||
stats.data_file_length= 0;
|
||||
@@ -4715,6 +4821,7 @@ int ha_partition::info(uint flag)
|
||||
So we calculate these constants by using the variables on the first
|
||||
handler.
|
||||
*/
|
||||
handler *file;
|
||||
|
||||
file= m_file[0];
|
||||
file->info(HA_STATUS_CONST);
|
||||
@@ -4736,6 +4843,7 @@ int ha_partition::info(uint flag)
|
||||
}
|
||||
if (flag & HA_STATUS_TIME)
|
||||
{
|
||||
handler *file, **file_array;
|
||||
DBUG_PRINT("info", ("info: HA_STATUS_TIME"));
|
||||
/*
|
||||
This flag is used to set the latest update time of the table.
|
||||
@@ -5796,19 +5904,33 @@ int ha_partition::cmp_ref(const uchar *ref1, const uchar *ref2)
|
||||
MODULE auto increment
|
||||
****************************************************************************/
|
||||
|
||||
void ha_partition::restore_auto_increment(ulonglong)
|
||||
{
|
||||
DBUG_ENTER("ha_partition::restore_auto_increment");
|
||||
|
||||
DBUG_VOID_RETURN;
|
||||
int ha_partition::reset_auto_increment(ulonglong value)
|
||||
{
|
||||
handler **file= m_file;
|
||||
int res;
|
||||
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
||||
DBUG_ENTER("ha_partition::reset_auto_increment");
|
||||
lock_auto_increment();
|
||||
ha_data->auto_inc_initialized= FALSE;
|
||||
ha_data->next_auto_inc_val= 0;
|
||||
do
|
||||
{
|
||||
if ((res= (*file)->ha_reset_auto_increment(value)) != 0)
|
||||
break;
|
||||
} while (*(++file));
|
||||
unlock_auto_increment();
|
||||
DBUG_RETURN(res);
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
/**
|
||||
This method is called by update_auto_increment which in turn is called
|
||||
by the individual handlers as part of write_row. We will always let
|
||||
the first handler keep track of the auto increment value for all
|
||||
partitions.
|
||||
by the individual handlers as part of write_row. We use the
|
||||
table_share->ha_data->next_auto_inc_val, or search all
|
||||
partitions for the highest auto_increment_value if not initialized or
|
||||
if auto_increment field is a secondary part of a key, we must search
|
||||
every partition when holding a mutex to be sure of correctness.
|
||||
*/
|
||||
|
||||
void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
@@ -5816,59 +5938,88 @@ void ha_partition::get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
ulonglong *first_value,
|
||||
ulonglong *nb_reserved_values)
|
||||
{
|
||||
ulonglong first_value_part, last_value_part, nb_reserved_values_part,
|
||||
last_value= ~ (ulonglong) 0;
|
||||
handler **pos, **end;
|
||||
bool retry= TRUE;
|
||||
DBUG_ENTER("ha_partition::get_auto_increment");
|
||||
DBUG_PRINT("info", ("offset: %lu inc: %lu desired_values: %lu "
|
||||
"first_value: %lu", (ulong) offset, (ulong) increment,
|
||||
(ulong) nb_desired_values, (ulong) *first_value));
|
||||
DBUG_ASSERT(increment && nb_desired_values);
|
||||
*first_value= 0;
|
||||
if (table->s->next_number_keypart)
|
||||
{
|
||||
/*
|
||||
next_number_keypart is != 0 if the auto_increment column is a secondary
|
||||
column in the index (it is allowed in MyISAM)
|
||||
*/
|
||||
DBUG_PRINT("info", ("next_number_keypart != 0"));
|
||||
ulonglong nb_reserved_values_part;
|
||||
ulonglong first_value_part, max_first_value;
|
||||
handler **file= m_file;
|
||||
first_value_part= max_first_value= *first_value;
|
||||
/* Must lock and find highest value among all partitions. */
|
||||
lock_auto_increment();
|
||||
do
|
||||
{
|
||||
/* Only nb_desired_values = 1 makes sense */
|
||||
(*file)->get_auto_increment(offset, increment, 1,
|
||||
&first_value_part, &nb_reserved_values_part);
|
||||
if (first_value_part == ~(ulonglong)(0)) // error in one partition
|
||||
{
|
||||
*first_value= first_value_part;
|
||||
/* log that the error was between table/partition handler */
|
||||
sql_print_error("Partition failed to reserve auto_increment value");
|
||||
unlock_auto_increment();
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
DBUG_PRINT("info", ("first_value_part: %lu", (ulong) first_value_part));
|
||||
set_if_bigger(max_first_value, first_value_part);
|
||||
} while (*(++file));
|
||||
*first_value= max_first_value;
|
||||
*nb_reserved_values= 1;
|
||||
unlock_auto_increment();
|
||||
}
|
||||
else
|
||||
{
|
||||
THD *thd= ha_thd();
|
||||
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
||||
/*
|
||||
This is initialized in the beginning of the first write_row call.
|
||||
*/
|
||||
DBUG_ASSERT(ha_data->auto_inc_initialized);
|
||||
/*
|
||||
Get a lock for handling the auto_increment in table_share->ha_data
|
||||
for avoiding two concurrent statements getting the same number.
|
||||
*/
|
||||
|
||||
again:
|
||||
for (pos=m_file, end= m_file+ m_tot_parts; pos != end ; pos++)
|
||||
{
|
||||
first_value_part= *first_value;
|
||||
(*pos)->get_auto_increment(offset, increment, nb_desired_values,
|
||||
&first_value_part, &nb_reserved_values_part);
|
||||
if (first_value_part == ~(ulonglong)(0)) // error in one partition
|
||||
lock_auto_increment();
|
||||
|
||||
/*
|
||||
In a multi-row insert statement like INSERT SELECT and LOAD DATA
|
||||
where the number of candidate rows to insert is not known in advance
|
||||
we must hold a lock/mutex for the whole statement if we have statement
|
||||
based replication. Because the statement-based binary log contains
|
||||
only the first generated value used by the statement, and slaves assumes
|
||||
all other generated values used by this statement were consecutive to
|
||||
this first one, we must exclusively lock the generator until the statement
|
||||
is done.
|
||||
*/
|
||||
if (!auto_increment_safe_stmt_log_lock &&
|
||||
thd->lex->sql_command != SQLCOM_INSERT &&
|
||||
mysql_bin_log.is_open() &&
|
||||
!thd->current_stmt_binlog_row_based &&
|
||||
(thd->options & OPTION_BIN_LOG))
|
||||
{
|
||||
*first_value= first_value_part;
|
||||
sql_print_error("Partition failed to reserve auto_increment value");
|
||||
DBUG_VOID_RETURN;
|
||||
DBUG_PRINT("info", ("locking auto_increment_safe_stmt_log_lock"));
|
||||
auto_increment_safe_stmt_log_lock= TRUE;
|
||||
}
|
||||
/*
|
||||
Partition has reserved an interval. Intersect it with the intervals
|
||||
already reserved for the previous partitions.
|
||||
*/
|
||||
last_value_part= (nb_reserved_values_part == ULONGLONG_MAX) ?
|
||||
ULONGLONG_MAX : (first_value_part + nb_reserved_values_part * increment);
|
||||
set_if_bigger(*first_value, first_value_part);
|
||||
set_if_smaller(last_value, last_value_part);
|
||||
|
||||
/* this gets corrected (for offset/increment) in update_auto_increment */
|
||||
*first_value= ha_data->next_auto_inc_val;
|
||||
ha_data->next_auto_inc_val+= nb_desired_values * increment;
|
||||
|
||||
unlock_auto_increment();
|
||||
DBUG_PRINT("info", ("*first_value: %lu", (ulong) *first_value));
|
||||
*nb_reserved_values= nb_desired_values;
|
||||
}
|
||||
if (last_value < *first_value) /* empty intersection, error */
|
||||
{
|
||||
/*
|
||||
When we have an empty intersection, it means that one or more
|
||||
partitions may have a significantly different autoinc next value.
|
||||
We should not fail here - it just means that we should try to
|
||||
find a new reservation making use of the current *first_value
|
||||
wbich should now be compatible with all partitions.
|
||||
*/
|
||||
if (retry)
|
||||
{
|
||||
retry= FALSE;
|
||||
last_value= ~ (ulonglong) 0;
|
||||
release_auto_increment();
|
||||
goto again;
|
||||
}
|
||||
/*
|
||||
We should not get here.
|
||||
*/
|
||||
sql_print_error("Failed to calculate auto_increment value for partition");
|
||||
|
||||
*first_value= ~(ulonglong)(0);
|
||||
}
|
||||
if (increment) // If not check for values
|
||||
*nb_reserved_values= (last_value == ULONGLONG_MAX) ?
|
||||
ULONGLONG_MAX : ((last_value - *first_value) / increment);
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
||||
@@ -5876,9 +6027,31 @@ void ha_partition::release_auto_increment()
|
||||
{
|
||||
DBUG_ENTER("ha_partition::release_auto_increment");
|
||||
|
||||
for (uint i= 0; i < m_tot_parts; i++)
|
||||
if (table->s->next_number_keypart)
|
||||
{
|
||||
m_file[i]->ha_release_auto_increment();
|
||||
for (uint i= 0; i < m_tot_parts; i++)
|
||||
m_file[i]->ha_release_auto_increment();
|
||||
}
|
||||
else if (next_insert_id)
|
||||
{
|
||||
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
||||
ulonglong next_auto_inc_val;
|
||||
lock_auto_increment();
|
||||
next_auto_inc_val= ha_data->next_auto_inc_val;
|
||||
if (next_insert_id < next_auto_inc_val &&
|
||||
auto_inc_interval_for_cur_row.maximum() >= next_auto_inc_val)
|
||||
ha_data->next_auto_inc_val= next_insert_id;
|
||||
DBUG_PRINT("info", ("ha_data->next_auto_inc_val: %lu",
|
||||
(ulong) ha_data->next_auto_inc_val));
|
||||
|
||||
/* Unlock the multi row statement lock taken in get_auto_increment */
|
||||
if (auto_increment_safe_stmt_log_lock)
|
||||
{
|
||||
auto_increment_safe_stmt_log_lock= FALSE;
|
||||
DBUG_PRINT("info", ("unlocking auto_increment_safe_stmt_log_lock"));
|
||||
}
|
||||
|
||||
unlock_auto_increment();
|
||||
}
|
||||
DBUG_VOID_RETURN;
|
||||
}
|
||||
|
@@ -37,6 +37,15 @@ typedef struct st_partition_share
|
||||
} PARTITION_SHARE;
|
||||
#endif
|
||||
|
||||
/**
|
||||
Partition specific ha_data struct.
|
||||
@todo: move all partition specific data from TABLE_SHARE here.
|
||||
*/
|
||||
typedef struct st_ha_data_partition
|
||||
{
|
||||
ulonglong next_auto_inc_val; /**< first non reserved value */
|
||||
bool auto_inc_initialized;
|
||||
} HA_DATA_PARTITION;
|
||||
|
||||
#define PARTITION_BYTES_IN_POS 2
|
||||
class ha_partition :public handler
|
||||
@@ -140,6 +149,12 @@ private:
|
||||
"own" the m_part_info structure.
|
||||
*/
|
||||
bool is_clone;
|
||||
bool auto_increment_lock; /**< lock reading/updating auto_inc */
|
||||
/**
|
||||
Flag to keep the auto_increment lock through out the statement.
|
||||
This to ensure it will work with statement based replication.
|
||||
*/
|
||||
bool auto_increment_safe_stmt_log_lock;
|
||||
public:
|
||||
handler *clone(MEM_ROOT *mem_root);
|
||||
virtual void set_part_info(partition_info *part_info)
|
||||
@@ -196,8 +211,8 @@ public:
|
||||
virtual char *update_table_comment(const char *comment);
|
||||
virtual int change_partitions(HA_CREATE_INFO *create_info,
|
||||
const char *path,
|
||||
ulonglong *copied,
|
||||
ulonglong *deleted,
|
||||
ulonglong * const copied,
|
||||
ulonglong * const deleted,
|
||||
const uchar *pack_frm_data,
|
||||
size_t pack_frm_len);
|
||||
virtual int drop_partitions(const char *path);
|
||||
@@ -211,7 +226,7 @@ public:
|
||||
virtual void change_table_ptr(TABLE *table_arg, TABLE_SHARE *share);
|
||||
private:
|
||||
int prepare_for_rename();
|
||||
int copy_partitions(ulonglong *copied, ulonglong *deleted);
|
||||
int copy_partitions(ulonglong * const copied, ulonglong * const deleted);
|
||||
void cleanup_new_partition(uint part_count);
|
||||
int prepare_new_partition(TABLE *table, HA_CREATE_INFO *create_info,
|
||||
handler *file, const char *part_name,
|
||||
@@ -826,12 +841,51 @@ public:
|
||||
auto_increment_column_changed
|
||||
-------------------------------------------------------------------------
|
||||
*/
|
||||
virtual void restore_auto_increment(ulonglong prev_insert_id);
|
||||
virtual void get_auto_increment(ulonglong offset, ulonglong increment,
|
||||
ulonglong nb_desired_values,
|
||||
ulonglong *first_value,
|
||||
ulonglong *nb_reserved_values);
|
||||
virtual void release_auto_increment();
|
||||
private:
|
||||
virtual int reset_auto_increment(ulonglong value);
|
||||
virtual void lock_auto_increment()
|
||||
{
|
||||
/* lock already taken */
|
||||
if (auto_increment_safe_stmt_log_lock)
|
||||
return;
|
||||
DBUG_ASSERT(table_share->ha_data && !auto_increment_lock);
|
||||
if(table_share->tmp_table == NO_TMP_TABLE)
|
||||
{
|
||||
auto_increment_lock= TRUE;
|
||||
pthread_mutex_lock(&table_share->mutex);
|
||||
}
|
||||
}
|
||||
virtual void unlock_auto_increment()
|
||||
{
|
||||
DBUG_ASSERT(table_share->ha_data);
|
||||
/*
|
||||
If auto_increment_safe_stmt_log_lock is true, we have to keep the lock.
|
||||
It will be set to false and thus unlocked at the end of the statement by
|
||||
ha_partition::release_auto_increment.
|
||||
*/
|
||||
if(auto_increment_lock && !auto_increment_safe_stmt_log_lock)
|
||||
{
|
||||
pthread_mutex_unlock(&table_share->mutex);
|
||||
auto_increment_lock= FALSE;
|
||||
}
|
||||
}
|
||||
virtual void set_auto_increment_if_higher(const ulonglong nr)
|
||||
{
|
||||
HA_DATA_PARTITION *ha_data= (HA_DATA_PARTITION*) table_share->ha_data;
|
||||
lock_auto_increment();
|
||||
/* must check when the mutex is taken */
|
||||
if (nr >= ha_data->next_auto_inc_val)
|
||||
ha_data->next_auto_inc_val= nr + 1;
|
||||
ha_data->auto_inc_initialized= TRUE;
|
||||
unlock_auto_increment();
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
/*
|
||||
-------------------------------------------------------------------------
|
||||
|
@@ -2202,7 +2202,12 @@ prev_insert_id(ulonglong nr, struct system_variables *variables)
|
||||
- In both cases, the reserved intervals are remembered in
|
||||
thd->auto_inc_intervals_in_cur_stmt_for_binlog if statement-based
|
||||
binlogging; the last reserved interval is remembered in
|
||||
auto_inc_interval_for_cur_row.
|
||||
auto_inc_interval_for_cur_row. The number of reserved intervals is
|
||||
remembered in auto_inc_intervals_count. It differs from the number of
|
||||
elements in thd->auto_inc_intervals_in_cur_stmt_for_binlog() because the
|
||||
latter list is cumulative over all statements forming one binlog event
|
||||
(when stored functions and triggers are used), and collapses two
|
||||
contiguous intervals in one (see its append() method).
|
||||
|
||||
The idea is that generated auto_increment values are predictable and
|
||||
independent of the column values in the table. This is needed to be
|
||||
@@ -2286,8 +2291,6 @@ int handler::update_auto_increment()
|
||||
handler::estimation_rows_to_insert was set by
|
||||
handler::ha_start_bulk_insert(); if 0 it means "unknown".
|
||||
*/
|
||||
uint nb_already_reserved_intervals=
|
||||
thd->auto_inc_intervals_in_cur_stmt_for_binlog.nb_elements();
|
||||
ulonglong nb_desired_values;
|
||||
/*
|
||||
If an estimation was given to the engine:
|
||||
@@ -2299,17 +2302,17 @@ int handler::update_auto_increment()
|
||||
start, starting from AUTO_INC_DEFAULT_NB_ROWS.
|
||||
Don't go beyond a max to not reserve "way too much" (because
|
||||
reservation means potentially losing unused values).
|
||||
Note that in prelocked mode no estimation is given.
|
||||
*/
|
||||
if (nb_already_reserved_intervals == 0 &&
|
||||
(estimation_rows_to_insert > 0))
|
||||
if ((auto_inc_intervals_count == 0) && (estimation_rows_to_insert > 0))
|
||||
nb_desired_values= estimation_rows_to_insert;
|
||||
else /* go with the increasing defaults */
|
||||
{
|
||||
/* avoid overflow in formula, with this if() */
|
||||
if (nb_already_reserved_intervals <= AUTO_INC_DEFAULT_NB_MAX_BITS)
|
||||
if (auto_inc_intervals_count <= AUTO_INC_DEFAULT_NB_MAX_BITS)
|
||||
{
|
||||
nb_desired_values= AUTO_INC_DEFAULT_NB_ROWS *
|
||||
(1 << nb_already_reserved_intervals);
|
||||
nb_desired_values= AUTO_INC_DEFAULT_NB_ROWS *
|
||||
(1 << auto_inc_intervals_count);
|
||||
set_if_smaller(nb_desired_values, AUTO_INC_DEFAULT_NB_MAX);
|
||||
}
|
||||
else
|
||||
@@ -2322,7 +2325,7 @@ int handler::update_auto_increment()
|
||||
&nb_reserved_values);
|
||||
if (nr == ~(ulonglong) 0)
|
||||
DBUG_RETURN(HA_ERR_AUTOINC_READ_FAILED); // Mark failure
|
||||
|
||||
|
||||
/*
|
||||
That rounding below should not be needed when all engines actually
|
||||
respect offset and increment in get_auto_increment(). But they don't
|
||||
@@ -2333,7 +2336,7 @@ int handler::update_auto_increment()
|
||||
*/
|
||||
nr= compute_next_insert_id(nr-1, variables);
|
||||
}
|
||||
|
||||
|
||||
if (table->s->next_number_keypart == 0)
|
||||
{
|
||||
/* We must defer the appending until "nr" has been possibly truncated */
|
||||
@@ -2377,8 +2380,9 @@ int handler::update_auto_increment()
|
||||
{
|
||||
auto_inc_interval_for_cur_row.replace(nr, nb_reserved_values,
|
||||
variables->auto_increment_increment);
|
||||
auto_inc_intervals_count++;
|
||||
/* Row-based replication does not need to store intervals in binlog */
|
||||
if (!thd->current_stmt_binlog_row_based)
|
||||
if (mysql_bin_log.is_open() && !thd->current_stmt_binlog_row_based)
|
||||
thd->auto_inc_intervals_in_cur_stmt_for_binlog.append(auto_inc_interval_for_cur_row.minimum(),
|
||||
auto_inc_interval_for_cur_row.values(),
|
||||
variables->auto_increment_increment);
|
||||
@@ -2498,6 +2502,7 @@ void handler::ha_release_auto_increment()
|
||||
release_auto_increment();
|
||||
insert_id_for_cur_row= 0;
|
||||
auto_inc_interval_for_cur_row.replace(0, 0, 0);
|
||||
auto_inc_intervals_count= 0;
|
||||
if (next_insert_id > 0)
|
||||
{
|
||||
next_insert_id= 0;
|
||||
|
@@ -1129,6 +1129,13 @@ public:
|
||||
inserter.
|
||||
*/
|
||||
Discrete_interval auto_inc_interval_for_cur_row;
|
||||
/**
|
||||
Number of reserved auto-increment intervals. Serves as a heuristic
|
||||
when we have no estimation of how many records the statement will insert:
|
||||
the more intervals we have reserved, the bigger the next one. Reset in
|
||||
handler::ha_release_auto_increment().
|
||||
*/
|
||||
uint auto_inc_intervals_count;
|
||||
|
||||
handler(handlerton *ht_arg, TABLE_SHARE *share_arg)
|
||||
:table_share(share_arg), table(0),
|
||||
@@ -1137,7 +1144,8 @@ public:
|
||||
ref_length(sizeof(my_off_t)),
|
||||
ft_handler(0), inited(NONE),
|
||||
locked(FALSE), implicit_emptied(0),
|
||||
pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0)
|
||||
pushed_cond(0), next_insert_id(0), insert_id_for_cur_row(0),
|
||||
auto_inc_intervals_count(0)
|
||||
{}
|
||||
virtual ~handler(void)
|
||||
{
|
||||
@@ -1241,8 +1249,8 @@ public:
|
||||
|
||||
int ha_change_partitions(HA_CREATE_INFO *create_info,
|
||||
const char *path,
|
||||
ulonglong *copied,
|
||||
ulonglong *deleted,
|
||||
ulonglong * const copied,
|
||||
ulonglong * const deleted,
|
||||
const uchar *pack_frm_data,
|
||||
size_t pack_frm_len);
|
||||
int ha_drop_partitions(const char *path);
|
||||
@@ -1859,7 +1867,8 @@ private:
|
||||
This is called to delete all rows in a table
|
||||
If the handler don't support this, then this function will
|
||||
return HA_ERR_WRONG_COMMAND and MySQL will delete the rows one
|
||||
by one.
|
||||
by one. It should reset auto_increment if
|
||||
thd->lex->sql_command == SQLCOM_TRUNCATE.
|
||||
*/
|
||||
virtual int delete_all_rows()
|
||||
{ return (my_errno=HA_ERR_WRONG_COMMAND); }
|
||||
@@ -1898,8 +1907,8 @@ private:
|
||||
|
||||
virtual int change_partitions(HA_CREATE_INFO *create_info,
|
||||
const char *path,
|
||||
ulonglong *copied,
|
||||
ulonglong *deleted,
|
||||
ulonglong * const copied,
|
||||
ulonglong * const deleted,
|
||||
const uchar *pack_frm_data,
|
||||
size_t pack_frm_len)
|
||||
{ return HA_ERR_WRONG_COMMAND; }
|
||||
|
@@ -1248,10 +1248,12 @@ Item_name_const::Item_name_const(Item *name_arg, Item *val):
|
||||
if (!(valid_args= name_item->basic_const_item() &&
|
||||
(value_item->basic_const_item() ||
|
||||
((value_item->type() == FUNC_ITEM) &&
|
||||
(((Item_func *) value_item)->functype() ==
|
||||
Item_func::NEG_FUNC) &&
|
||||
((((Item_func *) value_item)->functype() ==
|
||||
Item_func::COLLATE_FUNC) ||
|
||||
((((Item_func *) value_item)->functype() ==
|
||||
Item_func::NEG_FUNC) &&
|
||||
(((Item_func *) value_item)->key_item()->type() !=
|
||||
FUNC_ITEM)))))
|
||||
FUNC_ITEM)))))))
|
||||
my_error(ER_WRONG_ARGUMENTS, MYF(0), "NAME_CONST");
|
||||
Item::maybe_null= TRUE;
|
||||
}
|
||||
@@ -1336,6 +1338,7 @@ public:
|
||||
else
|
||||
Item_ident::print(str, query_type);
|
||||
}
|
||||
virtual Ref_Type ref_type() { return AGGREGATE_REF; }
|
||||
};
|
||||
|
||||
|
||||
|
@@ -2126,7 +2126,7 @@ class Item_ref :public Item_ident
|
||||
protected:
|
||||
void set_properties();
|
||||
public:
|
||||
enum Ref_Type { REF, DIRECT_REF, VIEW_REF, OUTER_REF };
|
||||
enum Ref_Type { REF, DIRECT_REF, VIEW_REF, OUTER_REF, AGGREGATE_REF };
|
||||
Field *result_field; /* Save result here */
|
||||
Item **ref;
|
||||
Item_ref(Name_resolution_context *context_arg,
|
||||
|
@@ -4011,11 +4011,6 @@ bool MYSQL_BIN_LOG::write(Log_event *event_info)
|
||||
DBUG_PRINT("info",("number of auto_inc intervals: %u",
|
||||
thd->auto_inc_intervals_in_cur_stmt_for_binlog.
|
||||
nb_elements()));
|
||||
/*
|
||||
If the auto_increment was second in a table's index (possible with
|
||||
MyISAM or BDB) (table->next_number_keypart != 0), such event is
|
||||
in fact not necessary. We could avoid logging it.
|
||||
*/
|
||||
Intvar_log_event e(thd, (uchar) INSERT_ID_EVENT,
|
||||
thd->auto_inc_intervals_in_cur_stmt_for_binlog.
|
||||
minimum());
|
||||
|
@@ -3350,6 +3350,17 @@ int Start_log_event_v3::do_apply_event(Relay_log_info const *rli)
|
||||
close_temporary_tables(thd);
|
||||
cleanup_load_tmpdir();
|
||||
}
|
||||
else
|
||||
{
|
||||
/*
|
||||
Set all temporary tables thread references to the current thread
|
||||
as they may point to the "old" SQL slave thread in case of its
|
||||
restart.
|
||||
*/
|
||||
TABLE *table;
|
||||
for (table= thd->temporary_tables; table; table= table->next)
|
||||
table->in_use= thd;
|
||||
}
|
||||
break;
|
||||
|
||||
/*
|
||||
@@ -8613,10 +8624,10 @@ int Rows_log_event::find_row(const Relay_log_info *rli)
|
||||
the necessary bits on the bytes and don't set the filler bits
|
||||
correctly.
|
||||
*/
|
||||
my_ptrdiff_t const pos=
|
||||
table->s->null_bytes > 0 ? table->s->null_bytes - 1 : 0;
|
||||
table->record[0][pos]= 0xFF;
|
||||
|
||||
if (table->s->null_bytes > 0)
|
||||
table->record[0][table->s->null_bytes - 1]|=
|
||||
256U - (1U << table->s->last_null_bit_pos);
|
||||
|
||||
if ((error= table->file->index_read_map(table->record[0], m_key,
|
||||
HA_WHOLE_KEY,
|
||||
HA_READ_KEY_EXACT)))
|
||||
|
@@ -3384,7 +3384,7 @@ static int init_common_variables(const char *conf_file_name, int argc,
|
||||
if (opt_slow_log && opt_slow_logname && !(log_output_options & LOG_FILE)
|
||||
&& !(log_output_options & LOG_NONE))
|
||||
sql_print_warning("Although a path was specified for the "
|
||||
"--log-slow-queries option, log tables are used. "
|
||||
"--log_slow_queries option, log tables are used. "
|
||||
"To enable logging to files use the --log-output=file option.");
|
||||
|
||||
s= opt_logname ? opt_logname : make_default_log_name(buff, ".log");
|
||||
@@ -3753,23 +3753,25 @@ with --log-bin instead.");
|
||||
unireg_abort(1);
|
||||
}
|
||||
if (!opt_bin_log)
|
||||
{
|
||||
if (opt_binlog_format_id != BINLOG_FORMAT_UNSPEC)
|
||||
{
|
||||
sql_print_error("You need to use --log-bin to make "
|
||||
"--binlog-format work.");
|
||||
unireg_abort(1);
|
||||
}
|
||||
else
|
||||
{
|
||||
global_system_variables.binlog_format= BINLOG_FORMAT_MIXED;
|
||||
{
|
||||
sql_print_error("You need to use --log-bin to make "
|
||||
"--binlog-format work.");
|
||||
unireg_abort(1);
|
||||
}
|
||||
else
|
||||
{
|
||||
global_system_variables.binlog_format= BINLOG_FORMAT_STMT;
|
||||
}
|
||||
}
|
||||
else
|
||||
if (opt_binlog_format_id == BINLOG_FORMAT_UNSPEC)
|
||||
global_system_variables.binlog_format= BINLOG_FORMAT_MIXED;
|
||||
global_system_variables.binlog_format= BINLOG_FORMAT_STMT;
|
||||
else
|
||||
{
|
||||
DBUG_ASSERT(global_system_variables.binlog_format != BINLOG_FORMAT_UNSPEC);
|
||||
}
|
||||
}
|
||||
|
||||
/* Check that we have not let the format to unspecified at this point */
|
||||
DBUG_ASSERT((uint)global_system_variables.binlog_format <=
|
||||
@@ -5530,7 +5532,9 @@ enum options_mysqld
|
||||
OPT_MIN_EXAMINED_ROW_LIMIT,
|
||||
OPT_LOG_SLOW_SLAVE_STATEMENTS,
|
||||
OPT_OLD_MODE,
|
||||
OPT_SLAVE_EXEC_MODE
|
||||
OPT_SLAVE_EXEC_MODE,
|
||||
OPT_GENERAL_LOG_FILE,
|
||||
OPT_SLOW_QUERY_LOG_FILE
|
||||
};
|
||||
|
||||
|
||||
@@ -5727,7 +5731,7 @@ struct my_option my_long_options[] =
|
||||
"Set up signals usable for debugging",
|
||||
(uchar**) &opt_debugging, (uchar**) &opt_debugging,
|
||||
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"general-log", OPT_GENERAL_LOG,
|
||||
{"general_log", OPT_GENERAL_LOG,
|
||||
"Enable|disable general log", (uchar**) &opt_log,
|
||||
(uchar**) &opt_log, 0, GET_BOOL, OPT_ARG, 0, 0, 0, 0, 0, 0},
|
||||
#ifdef HAVE_LARGE_PAGES
|
||||
@@ -5763,8 +5767,12 @@ Disable with --skip-large-pages.",
|
||||
(uchar**) &opt_local_infile,
|
||||
(uchar**) &opt_local_infile, 0, GET_BOOL, OPT_ARG,
|
||||
1, 0, 0, 0, 0, 0},
|
||||
{"log", 'l', "Log connections and queries to file.", (uchar**) &opt_logname,
|
||||
{"log", 'l', "Log connections and queries to file (deprecated option, use "
|
||||
"--general_log/--general_log_file instead).", (uchar**) &opt_logname,
|
||||
(uchar**) &opt_logname, 0, GET_STR, OPT_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"general_log_file", OPT_GENERAL_LOG_FILE,
|
||||
"Log connections and queries to given file.", (uchar**) &opt_logname,
|
||||
(uchar**) &opt_logname, 0, GET_STR, REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"log-bin", OPT_BIN_LOG,
|
||||
"Log update queries in binary format. Optional (but strongly recommended "
|
||||
"to avoid replication problems if server's hostname changes) argument "
|
||||
@@ -5838,10 +5846,17 @@ Disable with --skip-large-pages.",
|
||||
(uchar**) &opt_log_slow_slave_statements,
|
||||
(uchar**) &opt_log_slow_slave_statements,
|
||||
0, GET_BOOL, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"log-slow-queries", OPT_SLOW_QUERY_LOG,
|
||||
"Log slow queries to a table or log file. Defaults logging to table mysql.slow_log or hostname-slow.log if --log-output=file is used. Must be enabled to activate other slow log options.",
|
||||
{"log_slow_queries", OPT_SLOW_QUERY_LOG,
|
||||
"Log slow queries to a table or log file. Defaults logging to table "
|
||||
"mysql.slow_log or hostname-slow.log if --log-output=file is used. "
|
||||
"Must be enabled to activate other slow log options. "
|
||||
"(deprecated option, use --slow_query_log/--slow_query_log_file instead)",
|
||||
(uchar**) &opt_slow_logname, (uchar**) &opt_slow_logname, 0, GET_STR, OPT_ARG,
|
||||
0, 0, 0, 0, 0, 0},
|
||||
{"slow_query_log_file", OPT_SLOW_QUERY_LOG_FILE,
|
||||
"Log slow queries to given log file. Defaults logging to hostname-slow.log. Must be enabled to activate other slow log options.",
|
||||
(uchar**) &opt_slow_logname, (uchar**) &opt_slow_logname, 0, GET_STR,
|
||||
REQUIRED_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"log-tc", OPT_LOG_TC,
|
||||
"Path to transaction coordinator log (used for transactions that affect "
|
||||
"more than one storage engine, when binary log is disabled)",
|
||||
@@ -6223,7 +6238,7 @@ Can't be set to 1 if --log-slave-updates is used.",
|
||||
{"skip-symlink", OPT_SKIP_SYMLINKS, "Don't allow symlinking of tables. Deprecated option. Use --skip-symbolic-links instead.",
|
||||
0, 0, 0, GET_NO_ARG, NO_ARG, 0, 0, 0, 0, 0, 0},
|
||||
{"skip-thread-priority", OPT_SKIP_PRIOR,
|
||||
"Don't give threads different priorities.", 0, 0, 0, GET_NO_ARG, NO_ARG,
|
||||
"Don't give threads different priorities. Deprecated option.", 0, 0, 0, GET_NO_ARG, NO_ARG,
|
||||
DEFAULT_SKIP_THREAD_PRIORITY, 0, 0, 0, 0, 0},
|
||||
#ifdef HAVE_REPLICATION
|
||||
{"slave-load-tmpdir", OPT_SLAVE_LOAD_TMPDIR,
|
||||
@@ -7535,13 +7550,13 @@ static void mysql_init_variables(void)
|
||||
have_community_features = SHOW_OPTION_YES;
|
||||
#else
|
||||
have_community_features = SHOW_OPTION_NO;
|
||||
#endif
|
||||
global_system_variables.ndb_index_stat_enable=FALSE;
|
||||
max_system_variables.ndb_index_stat_enable=TRUE;
|
||||
global_system_variables.ndb_index_stat_cache_entries=32;
|
||||
max_system_variables.ndb_index_stat_cache_entries=~0L;
|
||||
global_system_variables.ndb_index_stat_update_freq=20;
|
||||
max_system_variables.ndb_index_stat_update_freq=~0L;
|
||||
#endif
|
||||
#ifdef HAVE_OPENSSL
|
||||
have_ssl=SHOW_OPTION_YES;
|
||||
#else
|
||||
@@ -7649,6 +7664,9 @@ mysqld_get_one_option(int optid,
|
||||
default_collation_name= 0;
|
||||
break;
|
||||
case 'l':
|
||||
WARN_DEPRECATED(NULL, "7.0", "--log", "'--general_log'/'--general_log_file'");
|
||||
/* FALL-THROUGH */
|
||||
case OPT_GENERAL_LOG_FILE:
|
||||
opt_log=1;
|
||||
break;
|
||||
case 'h':
|
||||
@@ -7818,6 +7836,9 @@ mysqld_get_one_option(int optid,
|
||||
}
|
||||
#endif /* HAVE_REPLICATION */
|
||||
case (int) OPT_SLOW_QUERY_LOG:
|
||||
WARN_DEPRECATED(NULL, "7.0", "--log_slow_queries", "'--slow_query_log'/'--slow_query_log_file'");
|
||||
/* FALL-THROUGH */
|
||||
case (int) OPT_SLOW_QUERY_LOG_FILE:
|
||||
opt_slow_log= 1;
|
||||
break;
|
||||
#ifdef WITH_CSV_STORAGE_ENGINE
|
||||
@@ -7865,6 +7886,9 @@ mysqld_get_one_option(int optid,
|
||||
break;
|
||||
case (int) OPT_SKIP_PRIOR:
|
||||
opt_specialflag|= SPECIAL_NO_PRIOR;
|
||||
sql_print_warning("The --skip-thread-priority startup option is deprecated "
|
||||
"and will be removed in MySQL 7.0. MySQL 6.0 and up do not "
|
||||
"give threads different priorities.");
|
||||
break;
|
||||
case (int) OPT_SKIP_LOCK:
|
||||
opt_external_locking=0;
|
||||
@@ -8207,7 +8231,7 @@ static void get_options(int *argc,char **argv)
|
||||
if ((opt_log_slow_admin_statements || opt_log_queries_not_using_indexes ||
|
||||
opt_log_slow_slave_statements) &&
|
||||
!opt_slow_log)
|
||||
sql_print_warning("options --log-slow-admin-statements, --log-queries-not-using-indexes and --log-slow-slave-statements have no effect if --log-slow-queries is not set");
|
||||
sql_print_warning("options --log-slow-admin-statements, --log-queries-not-using-indexes and --log-slow-slave-statements have no effect if --log_slow_queries is not set");
|
||||
|
||||
#if defined(HAVE_BROKEN_REALPATH)
|
||||
my_use_symdir=0;
|
||||
|
@@ -2391,6 +2391,12 @@ end:
|
||||
bool sys_var_log_state::update(THD *thd, set_var *var)
|
||||
{
|
||||
bool res;
|
||||
|
||||
if (this == &sys_var_log)
|
||||
WARN_DEPRECATED(thd, "7.0", "@@log", "'@@general_log'");
|
||||
else if (this == &sys_var_log_slow)
|
||||
WARN_DEPRECATED(thd, "7.0", "@@log_slow_queries", "'@@slow_query_log'");
|
||||
|
||||
pthread_mutex_lock(&LOCK_global_system_variables);
|
||||
if (!var->save_result.ulong_value)
|
||||
{
|
||||
@@ -2405,6 +2411,11 @@ bool sys_var_log_state::update(THD *thd, set_var *var)
|
||||
|
||||
void sys_var_log_state::set_default(THD *thd, enum_var_type type)
|
||||
{
|
||||
if (this == &sys_var_log)
|
||||
WARN_DEPRECATED(thd, "7.0", "@@log", "'@@general_log'");
|
||||
else if (this == &sys_var_log_slow)
|
||||
WARN_DEPRECATED(thd, "7.0", "@@log_slow_queries", "'@@slow_query_log'");
|
||||
|
||||
pthread_mutex_lock(&LOCK_global_system_variables);
|
||||
logger.deactivate_log_handler(thd, log_type);
|
||||
pthread_mutex_unlock(&LOCK_global_system_variables);
|
||||
@@ -3711,7 +3722,7 @@ bool sys_var_thd_storage_engine::update(THD *thd, set_var *var)
|
||||
|
||||
void sys_var_thd_table_type::warn_deprecated(THD *thd)
|
||||
{
|
||||
WARN_DEPRECATED(thd, "5.2", "table_type", "'storage_engine'");
|
||||
WARN_DEPRECATED(thd, "5.2", "@@table_type", "'@@storage_engine'");
|
||||
}
|
||||
|
||||
void sys_var_thd_table_type::set_default(THD *thd, enum_var_type type)
|
||||
@@ -3973,8 +3984,8 @@ bool process_key_caches(process_key_cache_t func)
|
||||
|
||||
void sys_var_trust_routine_creators::warn_deprecated(THD *thd)
|
||||
{
|
||||
WARN_DEPRECATED(thd, "5.2", "log_bin_trust_routine_creators",
|
||||
"'log_bin_trust_function_creators'");
|
||||
WARN_DEPRECATED(thd, "5.2", "@@log_bin_trust_routine_creators",
|
||||
"'@@log_bin_trust_function_creators'");
|
||||
}
|
||||
|
||||
void sys_var_trust_routine_creators::set_default(THD *thd, enum_var_type type)
|
||||
|
@@ -126,6 +126,9 @@ sp_get_item_value(THD *thd, Item *item, String *str)
|
||||
if (cs->escape_with_backslash_is_dangerous)
|
||||
buf.append(' ');
|
||||
append_query_string(cs, result, &buf);
|
||||
buf.append(" COLLATE '");
|
||||
buf.append(item->collation.collation->name);
|
||||
buf.append('\'');
|
||||
str->copy(buf);
|
||||
|
||||
return str;
|
||||
|
@@ -1524,6 +1524,9 @@ public:
|
||||
then the latter INSERT will insert no rows
|
||||
(first_successful_insert_id_in_cur_stmt == 0), but storing "INSERT_ID=3"
|
||||
in the binlog is still needed; the list's minimum will contain 3.
|
||||
This variable is cumulative: if several statements are written to binlog
|
||||
as one (stored functions or triggers are used) this list is the
|
||||
concatenation of all intervals reserved by all statements.
|
||||
*/
|
||||
Discrete_intervals_list auto_inc_intervals_in_cur_stmt_for_binlog;
|
||||
/* Used by replication and SET INSERT_ID */
|
||||
|
@@ -2999,6 +2999,8 @@ int get_partition_id_range_sub_hash(partition_info *part_info,
|
||||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_range_sub_hash");
|
||||
LINT_INIT(loc_part_id);
|
||||
LINT_INIT(sub_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
@@ -3026,6 +3028,8 @@ int get_partition_id_range_sub_linear_hash(partition_info *part_info,
|
||||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_range_sub_linear_hash");
|
||||
LINT_INIT(loc_part_id);
|
||||
LINT_INIT(sub_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
@@ -3055,6 +3059,7 @@ int get_partition_id_range_sub_key(partition_info *part_info,
|
||||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_range_sub_key");
|
||||
LINT_INIT(loc_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
@@ -3078,6 +3083,7 @@ int get_partition_id_range_sub_linear_key(partition_info *part_info,
|
||||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_range_sub_linear_key");
|
||||
LINT_INIT(loc_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_range(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
@@ -3102,6 +3108,7 @@ int get_partition_id_list_sub_hash(partition_info *part_info,
|
||||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_list_sub_hash");
|
||||
LINT_INIT(sub_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
@@ -3129,6 +3136,7 @@ int get_partition_id_list_sub_linear_hash(partition_info *part_info,
|
||||
longlong local_func_value;
|
||||
int error;
|
||||
DBUG_ENTER("get_partition_id_list_sub_linear_hash");
|
||||
LINT_INIT(sub_part_id);
|
||||
|
||||
if (unlikely((error= get_partition_id_list(part_info, &loc_part_id,
|
||||
func_value))))
|
||||
|
@@ -14804,6 +14804,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
|
||||
Item *pos;
|
||||
List_iterator_fast<Item> li(all_fields);
|
||||
Copy_field *copy= NULL;
|
||||
IF_DBUG(Copy_field *copy_start);
|
||||
res_selected_fields.empty();
|
||||
res_all_fields.empty();
|
||||
List_iterator_fast<Item> itr(res_all_fields);
|
||||
@@ -14816,12 +14817,19 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
|
||||
goto err2;
|
||||
|
||||
param->copy_funcs.empty();
|
||||
IF_DBUG(copy_start= copy);
|
||||
for (i= 0; (pos= li++); i++)
|
||||
{
|
||||
Field *field;
|
||||
uchar *tmp;
|
||||
Item *real_pos= pos->real_item();
|
||||
if (real_pos->type() == Item::FIELD_ITEM)
|
||||
/*
|
||||
Aggregate functions can be substituted for fields (by e.g. temp tables).
|
||||
We need to filter those substituted fields out.
|
||||
*/
|
||||
if (real_pos->type() == Item::FIELD_ITEM &&
|
||||
!(real_pos != pos &&
|
||||
((Item_ref *)pos)->ref_type() == Item_ref::AGGREGATE_REF))
|
||||
{
|
||||
Item_field *item;
|
||||
if (!(item= new Item_field(thd, ((Item_field*) real_pos))))
|
||||
@@ -14868,6 +14876,7 @@ setup_copy_fields(THD *thd, TMP_TABLE_PARAM *param,
|
||||
goto err;
|
||||
if (copy)
|
||||
{
|
||||
DBUG_ASSERT (param->field_count > (uint) (copy - copy_start));
|
||||
copy->set(tmp, item->result_field);
|
||||
item->result_field->move_field(copy->to_ptr,copy->to_null_ptr,1);
|
||||
#ifdef HAVE_purify
|
||||
|
@@ -410,6 +410,7 @@ typedef struct st_table_share
|
||||
int cached_row_logging_check;
|
||||
|
||||
#ifdef WITH_PARTITION_STORAGE_ENGINE
|
||||
/** @todo: Move into *ha_data for partitioning */
|
||||
bool auto_partitioned;
|
||||
const char *partition_info;
|
||||
uint partition_info_len;
|
||||
@@ -419,6 +420,9 @@ typedef struct st_table_share
|
||||
handlerton *default_part_db_type;
|
||||
#endif
|
||||
|
||||
/** place to store storage engine specific data */
|
||||
void *ha_data;
|
||||
|
||||
|
||||
/*
|
||||
Set share's table cache key and update its db and table name appropriately.
|
||||
|
Reference in New Issue
Block a user